summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-07-31 15:50:41 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:35:23 +0000
commit7b2ffa587235a47d4094787d72f38102089f402a (patch)
tree30e82af9cbab08a7fa028bb18f4f2987a3f74dfa /chromium/media
parentd94af01c90575348c4e81a418257f254b6f8d225 (diff)
downloadqtwebengine-chromium-7b2ffa587235a47d4094787d72f38102089f402a.tar.gz
BASELINE: Update Chromium to 76.0.3809.94
Change-Id: I321c3f5f929c105aec0f98c5091ef6108822e647 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/BUILD.gn25
-rw-r--r--chromium/media/DEPS5
-rw-r--r--chromium/media/audio/alsa/alsa_output.cc4
-rw-r--r--chromium/media/audio/alsa/alsa_output.h1
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc39
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc5
-rw-r--r--chromium/media/audio/android/audio_track_output_stream.cc4
-rw-r--r--chromium/media/audio/android/audio_track_output_stream.h1
-rw-r--r--chromium/media/audio/android/opensles_output.cc4
-rw-r--r--chromium/media/audio/android/opensles_output.h1
-rw-r--r--chromium/media/audio/audio_debug_file_writer_unittest.cc9
-rw-r--r--chromium/media/audio/audio_features.cc4
-rw-r--r--chromium/media/audio/audio_features.h4
-rw-r--r--chromium/media/audio/audio_io.h4
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc5
-rw-r--r--chromium/media/audio/audio_output_controller.cc28
-rw-r--r--chromium/media/audio/audio_output_controller.h4
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc130
-rw-r--r--chromium/media/audio/audio_output_delegate.h1
-rw-r--r--chromium/media/audio/audio_output_device.cc13
-rw-r--r--chromium/media/audio/audio_output_device.h2
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc16
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h4
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc4
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h1
-rw-r--r--chromium/media/audio/audio_output_ipc.h4
-rw-r--r--chromium/media/audio/audio_output_proxy.cc7
-rw-r--r--chromium/media/audio/audio_output_proxy.h1
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc1
-rw-r--r--chromium/media/audio/audio_output_resampler.cc7
-rw-r--r--chromium/media/audio/audio_output_resampler.h1
-rw-r--r--chromium/media/audio/audio_output_stream_sink.cc12
-rw-r--r--chromium/media/audio/audio_output_stream_sink.h2
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc2
-rw-r--r--chromium/media/audio/clockless_audio_sink.h1
-rw-r--r--chromium/media/audio/cras/cras_unified.cc4
-rw-r--r--chromium/media/audio/cras/cras_unified.h1
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc2
-rw-r--r--chromium/media/audio/fake_audio_output_stream.h1
-rw-r--r--chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc8
-rw-r--r--chromium/media/audio/fuchsia/audio_output_stream_fuchsia.h1
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc4
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h1
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc65
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h2
-rw-r--r--chromium/media/audio/null_audio_sink.cc2
-rw-r--r--chromium/media/audio/null_audio_sink.h1
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc3
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc4
-rw-r--r--chromium/media/audio/pulse/pulse_output.h1
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc6
-rw-r--r--chromium/media/audio/virtual_audio_output_stream.cc4
-rw-r--r--chromium/media/audio/virtual_audio_output_stream.h1
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc55
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc63
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h19
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc54
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc71
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc48
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h2
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc50
-rw-r--r--chromium/media/audio/win/waveout_output_win.cc11
-rw-r--r--chromium/media/audio/win/waveout_output_win.h1
-rw-r--r--chromium/media/base/BUILD.gn7
-rw-r--r--chromium/media/base/android/media_codec_bridge_impl_unittest.cc24
-rw-r--r--chromium/media/base/android/media_codec_loop_unittest.cc1
-rw-r--r--chromium/media/base/android/media_codec_util.cc6
-rw-r--r--chromium/media/base/android/media_crypto_context.h6
-rw-r--r--chromium/media/base/android/media_crypto_context_impl.cc4
-rw-r--r--chromium/media/base/android/media_crypto_context_impl.h3
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc22
-rw-r--r--chromium/media/base/android/media_drm_bridge.h2
-rw-r--r--chromium/media/base/android/media_drm_bridge_factory.cc2
-rw-r--r--chromium/media/base/android/mock_media_crypto_context.cc10
-rw-r--r--chromium/media/base/android/mock_media_crypto_context.h10
-rw-r--r--chromium/media/base/android/stream_texture_wrapper.h2
-rw-r--r--chromium/media/base/audio_buffer.cc4
-rw-r--r--chromium/media/base/audio_buffer.h6
-rw-r--r--chromium/media/base/audio_buffer_converter.cc43
-rw-r--r--chromium/media/base/audio_buffer_converter.h4
-rw-r--r--chromium/media/base/audio_buffer_converter_unittest.cc4
-rw-r--r--chromium/media/base/audio_buffer_queue.cc12
-rw-r--r--chromium/media/base/audio_buffer_queue.h2
-rw-r--r--chromium/media/base/audio_codecs.cc4
-rw-r--r--chromium/media/base/audio_decoder.h10
-rw-r--r--chromium/media/base/audio_discard_helper.cc7
-rw-r--r--chromium/media/base/audio_discard_helper.h2
-rw-r--r--chromium/media/base/audio_discard_helper_unittest.cc99
-rw-r--r--chromium/media/base/audio_parameters.h16
-rw-r--r--chromium/media/base/audio_push_fifo.h3
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.cc4
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.h1
-rw-r--r--chromium/media/base/audio_renderer_sink.h4
-rw-r--r--chromium/media/base/bitstream_buffer.cc29
-rw-r--r--chromium/media/base/bitstream_buffer.h46
-rw-r--r--chromium/media/base/callback_holder.h4
-rw-r--r--chromium/media/base/container_names_unittest.cc2
-rw-r--r--chromium/media/base/decoder_buffer.cc14
-rw-r--r--chromium/media/base/decoder_buffer.h9
-rw-r--r--chromium/media/base/decoder_buffer_unittest.cc82
-rw-r--r--chromium/media/base/decryptor.h7
-rw-r--r--chromium/media/base/demuxer_stream.h2
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.cc4
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.h1
-rw-r--r--chromium/media/base/fake_audio_worker_unittest.cc5
-rw-r--r--chromium/media/base/fake_demuxer_stream.cc2
-rw-r--r--chromium/media/base/fallback_video_decoder.cc29
-rw-r--r--chromium/media/base/fallback_video_decoder.h9
-rw-r--r--chromium/media/base/fallback_video_decoder_unittest.cc30
-rw-r--r--chromium/media/base/gmock_callback_support.h6
-rw-r--r--chromium/media/base/ipc/media_param_traits_macros.h2
-rw-r--r--chromium/media/base/key_systems_unittest.cc10
-rw-r--r--chromium/media/base/mac/video_frame_mac_unittests.cc2
-rw-r--r--chromium/media/base/media_client.h6
-rw-r--r--chromium/media/base/media_permission.h12
-rw-r--r--chromium/media/base/media_switches.cc62
-rw-r--r--chromium/media/base/media_switches.h11
-rw-r--r--chromium/media/base/mime_util_internal.cc36
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.h1
-rw-r--r--chromium/media/base/mock_filters.h41
-rw-r--r--chromium/media/base/null_video_sink.cc4
-rw-r--r--chromium/media/base/null_video_sink.h4
-rw-r--r--chromium/media/base/null_video_sink_unittest.cc6
-rw-r--r--chromium/media/base/pipeline.h4
-rw-r--r--chromium/media/base/pipeline_impl.cc4
-rw-r--r--chromium/media/base/pipeline_impl.h2
-rw-r--r--chromium/media/base/pipeline_metadata.h2
-rw-r--r--chromium/media/base/routing_token_callback.h5
-rw-r--r--chromium/media/base/test_helpers.cc5
-rw-r--r--chromium/media/base/test_helpers.h14
-rw-r--r--chromium/media/base/unaligned_shared_memory.cc65
-rw-r--r--chromium/media/base/unaligned_shared_memory.h31
-rw-r--r--chromium/media/base/unaligned_shared_memory_unittest.cc137
-rw-r--r--chromium/media/base/vector_math.cc7
-rw-r--r--chromium/media/base/vector_math.h2
-rw-r--r--chromium/media/base/vector_math_unittest.cc10
-rw-r--r--chromium/media/base/video_decoder.h12
-rw-r--r--chromium/media/base/video_decoder_config.cc13
-rw-r--r--chromium/media/base/video_decoder_config.h10
-rw-r--r--chromium/media/base/video_decoder_config_unittest.cc14
-rw-r--r--chromium/media/base/video_frame.cc48
-rw-r--r--chromium/media/base/video_frame.h16
-rw-r--r--chromium/media/base/video_frame_layout.cc26
-rw-r--r--chromium/media/base/video_frame_layout.h24
-rw-r--r--chromium/media/base/video_frame_layout_unittest.cc37
-rw-r--r--chromium/media/base/video_frame_metadata.h4
-rw-r--r--chromium/media/base/video_frame_pool.cc2
-rw-r--r--chromium/media/base/video_frame_unittest.cc7
-rw-r--r--chromium/media/base/video_renderer_sink.h6
-rw-r--r--chromium/media/base/video_rotation.cc26
-rw-r--r--chromium/media/base/video_rotation.h26
-rw-r--r--chromium/media/base/video_thumbnail_decoder.cc2
-rw-r--r--chromium/media/base/video_thumbnail_decoder.h2
-rw-r--r--chromium/media/base/video_thumbnail_decoder_unittest.cc25
-rw-r--r--chromium/media/base/video_transformation.cc95
-rw-r--r--chromium/media/base/video_transformation.h61
-rw-r--r--chromium/media/base/video_types.h17
-rw-r--r--chromium/media/base/video_util.cc8
-rw-r--r--chromium/media/base/video_util.h2
-rw-r--r--chromium/media/blink/BUILD.gn4
-rw-r--r--chromium/media/blink/DEPS1
-rw-r--r--chromium/media/blink/key_system_config_selector_unittest.cc11
-rw-r--r--chromium/media/blink/multibuffer_data_source.cc6
-rw-r--r--chromium/media/blink/multibuffer_data_source.h2
-rw-r--r--chromium/media/blink/video_decode_stats_reporter_unittest.cc2
-rw-r--r--chromium/media/blink/video_frame_compositor.cc39
-rw-r--r--chromium/media/blink/video_frame_compositor.h7
-rw-r--r--chromium/media/blink/video_frame_compositor_unittest.cc16
-rw-r--r--chromium/media/blink/watch_time_reporter.cc51
-rw-r--r--chromium/media/blink/watch_time_reporter.h13
-rw-r--r--chromium/media/blink/watch_time_reporter_unittest.cc54
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl.cc6
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl.h1
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc4
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h3
-rw-r--r--chromium/media/blink/webcontentdecryptionmodulesession_impl.cc6
-rw-r--r--chromium/media/blink/webmediacapabilitiesclient_impl.cc156
-rw-r--r--chromium/media/blink/webmediacapabilitiesclient_impl.h40
-rw-r--r--chromium/media/blink/webmediacapabilitiesclient_impl_unittest.cc95
-rw-r--r--chromium/media/blink/webmediaplayer_delegate.h4
-rw-r--r--chromium/media/blink/webmediaplayer_impl.cc436
-rw-r--r--chromium/media/blink/webmediaplayer_impl.h22
-rw-r--r--chromium/media/blink/webmediaplayer_impl_unittest.cc127
-rw-r--r--chromium/media/blink/webmediaplayer_params.h3
-rw-r--r--chromium/media/capabilities/learning_helper.cc124
-rw-r--r--chromium/media/capabilities/learning_helper.h13
-rw-r--r--chromium/media/capabilities/video_decode_stats.proto14
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.cc101
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.h5
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc99
-rw-r--r--chromium/media/capture/BUILD.gn29
-rw-r--r--chromium/media/capture/video/OWNERS2
-rw-r--r--chromium/media/capture/video/android/video_capture_device_android.cc8
-rw-r--r--chromium/media/capture/video/android/video_capture_device_android.h1
-rw-r--r--chromium/media/capture/video/android/video_capture_device_factory_android.cc6
-rw-r--r--chromium/media/capture/video/chromeos/camera_device_delegate.cc144
-rw-r--r--chromium/media/capture/video/chromeos/camera_device_delegate.h11
-rw-r--r--chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc7
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc4
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h10
-rw-r--r--chromium/media/capture/video/chromeos/cros_image_capture_impl.cc24
-rw-r--r--chromium/media/capture/video/chromeos/cros_image_capture_impl.h13
-rw-r--r--chromium/media/capture/video/chromeos/mock_video_capture_client.cc1
-rw-r--r--chromium/media/capture/video/chromeos/mock_video_capture_client.h3
-rw-r--r--chromium/media/capture/video/chromeos/mojo/cros_camera_service.mojom6
-rw-r--r--chromium/media/capture/video/chromeos/mojo/cros_image_capture.mojom8
-rw-r--r--chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.cc17
-rw-r--r--chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.h9
-rw-r--r--chromium/media/capture/video/chromeos/reprocess_manager.cc60
-rw-r--r--chromium/media/capture/video/chromeos/reprocess_manager.h41
-rw-r--r--chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.cc (renamed from chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc)2
-rw-r--r--chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.h (renamed from chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h)8
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.cc119
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.h19
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.cc35
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.h17
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_jpeg_decoder.h (renamed from chromium/media/capture/video/video_capture_jpeg_decoder.h)6
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.cc (renamed from chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc)24
-rw-r--r--chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.h (renamed from chromium/media/capture/video/video_capture_jpeg_decoder_impl.h)24
-rw-r--r--chromium/media/capture/video/fake_video_capture_device.cc54
-rw-r--r--chromium/media/capture/video/fake_video_capture_device_unittest.cc14
-rw-r--r--chromium/media/capture/video/file_video_capture_device.cc9
-rw-r--r--chromium/media/capture/video/file_video_capture_device_unittest.cc5
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate.cc8
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate_unittest.cc2
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc4
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm9
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_decklink_mac.h1
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm10
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_mac.h1
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_mac.mm4
-rw-r--r--chromium/media/capture/video/mock_device.cc2
-rw-r--r--chromium/media/capture/video/mock_device_factory.cc4
-rw-r--r--chromium/media/capture/video/mock_video_capture_device_client.h6
-rw-r--r--chromium/media/capture/video/video_capture_device.h1
-rw-r--r--chromium/media/capture/video/video_capture_device_client.cc75
-rw-r--r--chromium/media/capture/video/video_capture_device_client.h11
-rw-r--r--chromium/media/capture/video/video_capture_device_client_unittest.cc62
-rw-r--r--chromium/media/capture/video/video_capture_device_factory.h4
-rw-r--r--chromium/media/capture/video/video_capture_device_unittest.cc19
-rw-r--r--chromium/media/capture/video/video_frame_receiver_on_task_runner.cc5
-rw-r--r--chromium/media/capture/video/win/sink_input_pin_win.cc8
-rw-r--r--chromium/media/capture/video/win/video_capture_device_factory_win.cc2
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win.cc6
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc1
-rw-r--r--chromium/media/capture/video/win/video_capture_device_utils_win.cc8
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.cc77
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.h1
-rw-r--r--chromium/media/capture/video_capturer_source.h6
-rw-r--r--chromium/media/cast/cast_receiver.h10
-rw-r--r--chromium/media/cast/cast_sender.h7
-rw-r--r--chromium/media/cast/cast_sender_impl.cc14
-rw-r--r--chromium/media/cast/net/udp_transport_impl.cc3
-rw-r--r--chromium/media/cast/net/udp_transport_impl.h5
-rw-r--r--chromium/media/cast/net/udp_transport_unittest.cc10
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.cc6
-rw-r--r--chromium/media/cast/receiver/cast_receiver_impl.h2
-rw-r--r--chromium/media/cast/receiver/video_decoder.h5
-rw-r--r--chromium/media/cast/receiver/video_decoder_unittest.cc8
-rw-r--r--chromium/media/cast/sender/external_video_encoder.cc17
-rw-r--r--chromium/media/cast/sender/external_video_encoder.h2
-rw-r--r--chromium/media/cast/sender/fake_software_video_encoder.cc2
-rw-r--r--chromium/media/cast/sender/fake_software_video_encoder.h2
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder.cc2
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder.h2
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder_unittest.cc10
-rw-r--r--chromium/media/cast/sender/performance_metrics_overlay.cc2
-rw-r--r--chromium/media/cast/sender/size_adaptable_video_encoder_base.cc8
-rw-r--r--chromium/media/cast/sender/size_adaptable_video_encoder_base.h2
-rw-r--r--chromium/media/cast/sender/software_video_encoder.h2
-rw-r--r--chromium/media/cast/sender/video_encoder.h2
-rw-r--r--chromium/media/cast/sender/video_encoder_impl.cc21
-rw-r--r--chromium/media/cast/sender/video_encoder_impl.h2
-rw-r--r--chromium/media/cast/sender/video_encoder_unittest.cc4
-rw-r--r--chromium/media/cast/sender/video_sender.cc6
-rw-r--r--chromium/media/cast/sender/video_sender.h4
-rw-r--r--chromium/media/cast/sender/vp8_encoder.cc2
-rw-r--r--chromium/media/cast/sender/vp8_encoder.h2
-rw-r--r--chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc7
-rw-r--r--chromium/media/device_monitors/device_monitor_udev.cc6
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc6
-rw-r--r--chromium/media/filters/BUILD.gn44
-rw-r--r--chromium/media/filters/android/media_codec_audio_decoder.cc33
-rw-r--r--chromium/media/filters/android/media_codec_audio_decoder.h8
-rw-r--r--chromium/media/filters/aom_video_decoder.cc24
-rw-r--r--chromium/media/filters/aom_video_decoder.h5
-rw-r--r--chromium/media/filters/aom_video_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/audio_decoder_stream_unittest.cc9
-rw-r--r--chromium/media/filters/audio_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.cc62
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.h15
-rw-r--r--chromium/media/filters/audio_timestamp_validator.cc8
-rw-r--r--chromium/media/filters/audio_timestamp_validator.h2
-rw-r--r--chromium/media/filters/audio_timestamp_validator_unittest.cc8
-rw-r--r--chromium/media/filters/audio_video_metadata_extractor_unittest.cc13
-rw-r--r--chromium/media/filters/blocking_url_protocol.cc11
-rw-r--r--chromium/media/filters/blocking_url_protocol_unittest.cc2
-rw-r--r--chromium/media/filters/chunk_demuxer.cc129
-rw-r--r--chromium/media/filters/chunk_demuxer.h41
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc300
-rw-r--r--chromium/media/filters/context_3d.h36
-rw-r--r--chromium/media/filters/dav1d_video_decoder.cc107
-rw-r--r--chromium/media/filters/dav1d_video_decoder.h7
-rw-r--r--chromium/media/filters/dav1d_video_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/decoder_selector_unittest.cc24
-rw-r--r--chromium/media/filters/decoder_stream.cc23
-rw-r--r--chromium/media/filters/decoder_stream.h14
-rw-r--r--chromium/media/filters/decoder_stream_traits.cc28
-rw-r--r--chromium/media/filters/decoder_stream_traits.h8
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc8
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.h6
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc2
-rw-r--r--chromium/media/filters/decrypting_video_decoder.cc19
-rw-r--r--chromium/media/filters/decrypting_video_decoder.h12
-rw-r--r--chromium/media/filters/decrypting_video_decoder_unittest.cc2
-rw-r--r--chromium/media/filters/fake_video_decoder.cc43
-rw-r--r--chromium/media/filters/fake_video_decoder.h21
-rw-r--r--chromium/media/filters/fake_video_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc16
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.h4
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h2
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc8
-rw-r--r--chromium/media/filters/ffmpeg_glue.cc8
-rw-r--r--chromium/media/filters/ffmpeg_glue_unittest.cc2
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc26
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.h7
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder_unittest.cc16
-rw-r--r--chromium/media/filters/frame_processor.cc91
-rw-r--r--chromium/media/filters/frame_processor.h7
-rw-r--r--chromium/media/filters/frame_processor_unittest.cc379
-rw-r--r--chromium/media/filters/fuchsia/fuchsia_video_decoder.cc101
-rw-r--r--chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc83
-rw-r--r--chromium/media/filters/gpu_video_decoder.h11
-rw-r--r--chromium/media/filters/in_memory_url_protocol.cc9
-rw-r--r--chromium/media/filters/in_memory_url_protocol_unittest.cc2
-rw-r--r--chromium/media/filters/offloading_video_decoder.cc43
-rw-r--r--chromium/media/filters/offloading_video_decoder.h7
-rw-r--r--chromium/media/filters/offloading_video_decoder_unittest.cc104
-rw-r--r--chromium/media/filters/pipeline_controller.cc14
-rw-r--r--chromium/media/filters/pipeline_controller_unittest.cc43
-rw-r--r--chromium/media/filters/source_buffer_range.cc813
-rw-r--r--chromium/media/filters/source_buffer_range.h314
-rw-r--r--chromium/media/filters/source_buffer_range_by_dts.cc683
-rw-r--r--chromium/media/filters/source_buffer_range_by_dts.h268
-rw-r--r--chromium/media/filters/source_buffer_range_by_pts.cc829
-rw-r--r--chromium/media/filters/source_buffer_range_by_pts.h275
-rw-r--r--chromium/media/filters/source_buffer_state.cc2
-rw-r--r--chromium/media/filters/source_buffer_state.h4
-rw-r--r--chromium/media/filters/source_buffer_state_unittest.cc46
-rw-r--r--chromium/media/filters/source_buffer_stream.cc1258
-rw-r--r--chromium/media/filters/source_buffer_stream.h224
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc1067
-rw-r--r--chromium/media/filters/stream_parser_factory.cc11
-rw-r--r--chromium/media/filters/video_decoder_stream_unittest.cc7
-rw-r--r--chromium/media/filters/video_renderer_algorithm.cc51
-rw-r--r--chromium/media/filters/video_renderer_algorithm.h4
-rw-r--r--chromium/media/filters/video_renderer_algorithm_unittest.cc32
-rw-r--r--chromium/media/filters/vp9_parser.h4
-rw-r--r--chromium/media/filters/vp9_uncompressed_header_parser.cc5
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc32
-rw-r--r--chromium/media/filters/vpx_video_decoder.h7
-rw-r--r--chromium/media/filters/vpx_video_decoder_fuzzertest.cc6
-rw-r--r--chromium/media/filters/vpx_video_decoder_unittest.cc4
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video_unittest.cc2
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.cc2
-rw-r--r--chromium/media/formats/mp4/avc.cc9
-rw-r--r--chromium/media/formats/mp4/fourccs.h1
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.cc57
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.h4
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser_unittest.cc79
-rw-r--r--chromium/media/formats/webm/webm_colour_parser.cc26
-rw-r--r--chromium/media/formats/webm/webm_colour_parser.h3
-rw-r--r--chromium/media/formats/webm/webm_video_client.cc27
-rw-r--r--chromium/media/formats/webm/webm_video_client_unittest.cc83
-rw-r--r--chromium/media/gpu/BUILD.gn127
-rw-r--r--chromium/media/gpu/DEPS7
-rw-r--r--chromium/media/gpu/OWNERS3
-rw-r--r--chromium/media/gpu/accelerated_video_decoder.h10
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.cc59
-rw-r--r--chromium/media/gpu/android/android_video_decode_accelerator.h8
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.cc16
-rw-r--r--chromium/media/gpu/android/android_video_encode_accelerator.h5
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser.h2
-rw-r--r--chromium/media/gpu/android/codec_wrapper.cc10
-rw-r--r--chromium/media/gpu/android/codec_wrapper_unittest.cc4
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner.cc56
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner.h2
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner_unittest.cc45
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc56
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h15
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder_unittest.cc27
-rw-r--r--chromium/media/gpu/android/shared_image_video.cc412
-rw-r--r--chromium/media/gpu/android/shared_image_video.h10
-rw-r--r--chromium/media/gpu/android/surface_chooser_helper.h2
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc4
-rw-r--r--chromium/media/gpu/android/texture_owner.cc8
-rw-r--r--chromium/media/gpu/android/texture_owner.h8
-rw-r--r--chromium/media/gpu/android/video_frame_factory.h3
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.cc235
-rw-r--r--chromium/media/gpu/android/video_frame_factory_impl.h69
-rw-r--r--chromium/media/gpu/fake_mjpeg_decode_accelerator.cc106
-rw-r--r--chromium/media/gpu/fake_mjpeg_decode_accelerator.h67
-rw-r--r--chromium/media/gpu/format_utils.cc6
-rw-r--r--chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.cc68
-rw-r--r--chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.h33
-rw-r--r--chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.cc93
-rw-r--r--chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.h33
-rw-r--r--chromium/media/gpu/h264_decoder.cc29
-rw-r--r--chromium/media/gpu/h264_decoder.h13
-rw-r--r--chromium/media/gpu/h264_decoder_unittest.cc18
-rw-r--r--chromium/media/gpu/image_processor_test.cc76
-rw-r--r--chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc24
-rw-r--r--chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.h2
-rw-r--r--chromium/media/gpu/ipc/common/media_messages.h1
-rw-r--r--chromium/media/gpu/ipc/common/media_param_traits.cc4
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc5
-rw-r--r--chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h2
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.cc38
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.h9
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc12
-rw-r--r--chromium/media/gpu/jpeg_decode_accelerator_unittest.cc903
-rw-r--r--chromium/media/gpu/jpeg_encode_accelerator_unittest.cc762
-rw-r--r--chromium/media/gpu/libyuv_image_processor.cc32
-rw-r--r--chromium/media/gpu/libyuv_image_processor.h5
-rw-r--r--chromium/media/gpu/linux/BUILD.gn5
-rw-r--r--chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.cc56
-rw-r--r--chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.h9
-rw-r--r--chromium/media/gpu/linux/mailbox_video_frame_converter.cc260
-rw-r--r--chromium/media/gpu/linux/mailbox_video_frame_converter.h112
-rw-r--r--chromium/media/gpu/linux/platform_video_frame_utils.cc20
-rw-r--r--chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc2
-rw-r--r--chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h2
-rw-r--r--chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc32
-rw-r--r--chromium/media/gpu/mac/vt_video_encode_accelerator_mac.h7
-rw-r--r--chromium/media/gpu/v4l2/BUILD.gn19
-rw-r--r--chromium/media/gpu/v4l2/generic_v4l2_device.cc2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.cc12
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.h9
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device_unittest.cc27
-rw-r--r--chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc21
-rw-r--r--chromium/media/gpu/v4l2/v4l2_h264_accelerator.h10
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor.cc16
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc30
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h25
-rw-r--r--chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc24
-rw-r--r--chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h17
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc11
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_stateful_workaround.cc2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc10
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc30
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h7
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc2
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc25
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h8
-rw-r--r--chromium/media/gpu/vaapi/BUILD.gn45
-rw-r--r--chromium/media/gpu/vaapi/h264_encoder.cc3
-rw-r--r--chromium/media/gpu/vaapi/va_surface.cc6
-rw-r--r--chromium/media/gpu/vaapi/va_surface.h9
-rw-r--r--chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.cc27
-rw-r--r--chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.h6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc12
-rw-r--r--chromium/media/gpu/vaapi/vaapi_h264_accelerator.h8
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.cc114
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.h61
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decoder.cc30
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decoder.h37
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc119
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc277
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h25
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc2
-rw-r--r--chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc47
-rw-r--r--chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h17
-rw-r--r--chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_utils_unittest.cc122
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc26
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h11
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc45
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc67
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h7
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h2
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp9_accelerator.h6
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.cc100
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.h14
-rw-r--r--chromium/media/gpu/vaapi/vp8_encoder.h2
-rw-r--r--chromium/media/gpu/vaapi/vp9_encoder.cc13
-rw-r--r--chromium/media/gpu/vaapi/vp9_encoder.h8
-rw-r--r--chromium/media/gpu/video_decode_accelerator_perf_tests.cc84
-rw-r--r--chromium/media/gpu/video_decode_accelerator_unittest.cc13
-rw-r--r--chromium/media/gpu/video_encode_accelerator_unittest.cc177
-rw-r--r--chromium/media/gpu/video_frame_converter.cc25
-rw-r--r--chromium/media/gpu/video_frame_converter.h44
-rw-r--r--chromium/media/gpu/video_frame_mapper.h10
-rw-r--r--chromium/media/gpu/video_frame_mapper_factory.cc12
-rw-r--r--chromium/media/gpu/video_frame_mapper_factory.h5
-rw-r--r--chromium/media/gpu/vp8_decoder.cc4
-rw-r--r--chromium/media/gpu/vp8_decoder.h3
-rw-r--r--chromium/media/gpu/vp8_picture.h2
-rw-r--r--chromium/media/gpu/vp8_reference_frame_vector.h2
-rw-r--r--chromium/media/gpu/vp9_decoder.cc32
-rw-r--r--chromium/media/gpu/vp9_decoder.h15
-rw-r--r--chromium/media/gpu/windows/OWNERS2
-rw-r--r--chromium/media/gpu/windows/d3d11_h264_accelerator.cc36
-rw-r--r--chromium/media/gpu/windows/d3d11_h264_accelerator.h10
-rw-r--r--chromium/media/gpu/windows/d3d11_picture_buffer.cc172
-rw-r--r--chromium/media/gpu/windows/d3d11_picture_buffer.h75
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_wrapper.cc170
-rw-r--r--chromium/media/gpu/windows/d3d11_texture_wrapper.h109
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.cc34
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.h7
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_accelerator.cc88
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_accelerator.h21
-rw-r--r--chromium/media/gpu/windows/dxva_picture_buffer_win.cc18
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc179
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h10
-rw-r--r--chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc33
-rw-r--r--chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h5
-rw-r--r--chromium/media/gpu/windows/output_with_release_mailbox_cb.h3
-rw-r--r--chromium/media/learning/common/BUILD.gn1
-rw-r--r--chromium/media/learning/common/DEPS3
-rw-r--r--chromium/media/learning/common/learning_task.cc6
-rw-r--r--chromium/media/learning/common/learning_task.h31
-rw-r--r--chromium/media/learning/common/learning_task_controller.h14
-rw-r--r--chromium/media/learning/impl/BUILD.gn2
-rw-r--r--chromium/media/learning/impl/DEPS9
-rw-r--r--chromium/media/learning/impl/distribution_reporter.cc62
-rw-r--r--chromium/media/learning/impl/distribution_reporter.h14
-rw-r--r--chromium/media/learning/impl/distribution_reporter_unittest.cc110
-rw-r--r--chromium/media/learning/impl/learning_session_impl.h3
-rw-r--r--chromium/media/learning/impl/learning_session_impl_unittest.cc7
-rw-r--r--chromium/media/learning/impl/learning_task_controller_helper.cc3
-rw-r--r--chromium/media/learning/impl/learning_task_controller_helper.h6
-rw-r--r--chromium/media/learning/impl/learning_task_controller_helper_unittest.cc16
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl.cc8
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl.h2
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl_unittest.cc7
-rw-r--r--chromium/media/media_options.gni3
-rw-r--r--chromium/media/midi/BUILD.gn4
-rw-r--r--chromium/media/midi/midi_device_android.cc11
-rw-r--r--chromium/media/midi/midi_manager_android.cc6
-rw-r--r--chromium/media/midi/midi_manager_win.cc6
-rw-r--r--chromium/media/midi/midi_manager_winrt.cc2
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.cc5
-rw-r--r--chromium/media/mojo/README.md4
-rw-r--r--chromium/media/mojo/clients/BUILD.gn17
-rw-r--r--chromium/media/mojo/clients/mojo_android_overlay.cc6
-rw-r--r--chromium/media/mojo/clients/mojo_android_overlay_unittest.cc5
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder.cc16
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder.h6
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc10
-rw-r--r--chromium/media/mojo/clients/mojo_decoder_factory.cc3
-rw-r--r--chromium/media/mojo/clients/mojo_decryptor.cc2
-rw-r--r--chromium/media/mojo/clients/mojo_decryptor.h3
-rw-r--r--chromium/media/mojo/clients/mojo_decryptor_unittest.cc3
-rw-r--r--chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.cc124
-rw-r--r--chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.h65
-rw-r--r--chromium/media/mojo/clients/mojo_renderer.cc5
-rw-r--r--chromium/media/mojo/clients/mojo_renderer.h1
-rw-r--r--chromium/media/mojo/clients/mojo_renderer_factory.cc3
-rw-r--r--chromium/media/mojo/clients/mojo_renderer_factory.h1
-rw-r--r--chromium/media/mojo/clients/mojo_video_decoder.cc51
-rw-r--r--chromium/media/mojo/clients/mojo_video_decoder.h9
-rw-r--r--chromium/media/mojo/clients/mojo_video_encode_accelerator.cc23
-rw-r--r--chromium/media/mojo/clients/mojo_video_encode_accelerator.h5
-rw-r--r--chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc11
-rw-r--r--chromium/media/mojo/common/media_type_converters.cc28
-rw-r--r--chromium/media/mojo/common/media_type_converters.h8
-rw-r--r--chromium/media/mojo/common/media_type_converters_unittest.cc38
-rw-r--r--chromium/media/mojo/interfaces/BUILD.gn14
-rw-r--r--chromium/media/mojo/interfaces/audio_output_stream.mojom3
-rw-r--r--chromium/media/mojo/interfaces/interface_factory.mojom4
-rw-r--r--chromium/media/mojo/interfaces/media_types.mojom12
-rw-r--r--chromium/media/mojo/interfaces/media_types.typemap13
-rw-r--r--chromium/media/mojo/interfaces/mjpeg_decode_accelerator.mojom72
-rw-r--r--chromium/media/mojo/interfaces/mjpeg_decode_accelerator.typemap28
-rw-r--r--chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.cc128
-rw-r--r--chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h64
-rw-r--r--chromium/media/mojo/interfaces/renderer.mojom6
-rw-r--r--chromium/media/mojo/interfaces/renderer_extensions.mojom30
-rw-r--r--chromium/media/mojo/interfaces/typemaps.gni1
-rw-r--r--chromium/media/mojo/interfaces/video_decoder_config_struct_traits.cc9
-rw-r--r--chromium/media/mojo/interfaces/video_decoder_config_struct_traits.h5
-rw-r--r--chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc12
-rw-r--r--chromium/media/mojo/interfaces/video_frame_struct_traits.cc16
-rw-r--r--chromium/media/mojo/interfaces/video_frame_struct_traits.h7
-rw-r--r--chromium/media/mojo/interfaces/video_transformation_mojom_traits.cc22
-rw-r--r--chromium/media/mojo/interfaces/video_transformation_mojom_traits.h32
-rw-r--r--chromium/media/mojo/services/BUILD.gn18
-rw-r--r--chromium/media/mojo/services/DEPS3
-rw-r--r--chromium/media/mojo/services/cdm_manifest.cc9
-rw-r--r--chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.cc214
-rw-r--r--chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.h77
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client.cc4
-rw-r--r--chromium/media/mojo/services/interface_factory_impl.cc1
-rw-r--r--chromium/media/mojo/services/interface_factory_impl.h6
-rw-r--r--chromium/media/mojo/services/main.cc7
-rw-r--r--chromium/media/mojo/services/media_manifest.cc13
-rw-r--r--chromium/media/mojo/services/media_service_unittest.cc10
-rw-r--r--chromium/media/mojo/services/mojo_audio_decoder_service.cc4
-rw-r--r--chromium/media/mojo/services/mojo_audio_decoder_service.h2
-rw-r--r--chromium/media/mojo/services/mojo_audio_output_stream.cc5
-rw-r--r--chromium/media/mojo/services/mojo_audio_output_stream.h1
-rw-r--r--chromium/media/mojo/services/mojo_audio_output_stream_provider_unittest.cc1
-rw-r--r--chromium/media/mojo/services/mojo_audio_output_stream_unittest.cc12
-rw-r--r--chromium/media/mojo/services/mojo_decryptor_service.cc4
-rw-r--r--chromium/media/mojo/services/mojo_decryptor_service.h2
-rw-r--r--chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.cc246
-rw-r--r--chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.h80
-rw-r--r--chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service_unittest.cc91
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.cc2
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.cc8
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.h2
-rw-r--r--chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc14
-rw-r--r--chromium/media/mojo/services/video_decode_perf_history.cc15
-rw-r--r--chromium/media/parsers/BUILD.gn82
-rw-r--r--chromium/media/parsers/jpeg_parser.cc (renamed from chromium/media/filters/jpeg_parser.cc)2
-rw-r--r--chromium/media/parsers/jpeg_parser.h (renamed from chromium/media/filters/jpeg_parser.h)22
-rw-r--r--chromium/media/parsers/jpeg_parser_picture_fuzzertest.cc (renamed from chromium/media/filters/jpeg_parser_picture_fuzzertest.cc)2
-rw-r--r--chromium/media/parsers/jpeg_parser_unittest.cc (renamed from chromium/media/filters/jpeg_parser_unittest.cc)2
-rw-r--r--chromium/media/parsers/media_parsers_export.h12
-rw-r--r--chromium/media/parsers/vp8_bool_decoder.cc (renamed from chromium/media/filters/vp8_bool_decoder.cc)3
-rw-r--r--chromium/media/parsers/vp8_bool_decoder.h (renamed from chromium/media/filters/vp8_bool_decoder.h)10
-rw-r--r--chromium/media/parsers/vp8_bool_decoder_unittest.cc (renamed from chromium/media/filters/vp8_bool_decoder_unittest.cc)2
-rw-r--r--chromium/media/parsers/vp8_parser.cc (renamed from chromium/media/filters/vp8_parser.cc)3
-rw-r--r--chromium/media/parsers/vp8_parser.h (renamed from chromium/media/filters/vp8_parser.h)20
-rw-r--r--chromium/media/parsers/vp8_parser_fuzzertest.cc (renamed from chromium/media/filters/vp8_parser_fuzzertest.cc)2
-rw-r--r--chromium/media/parsers/vp8_parser_unittest.cc (renamed from chromium/media/filters/vp8_parser_unittest.cc)2
-rw-r--r--chromium/media/parsers/webp_parser.cc131
-rw-r--r--chromium/media/parsers/webp_parser.h38
-rw-r--r--chromium/media/parsers/webp_parser_fuzzertest.cc24
-rw-r--r--chromium/media/parsers/webp_parser_unittest.cc327
-rw-r--r--chromium/media/remoting/fake_media_resource.cc2
-rw-r--r--chromium/media/remoting/proto_utils.cc2
-rw-r--r--chromium/media/remoting/stream_provider.cc2
-rw-r--r--chromium/media/renderers/BUILD.gn1
-rw-r--r--chromium/media/renderers/audio_renderer_impl.cc54
-rw-r--r--chromium/media/renderers/audio_renderer_impl.h6
-rw-r--r--chromium/media/renderers/audio_renderer_impl_unittest.cc63
-rw-r--r--chromium/media/renderers/default_decoder_factory.cc19
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer.cc238
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer.h50
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer_unittest.cc93
-rw-r--r--chromium/media/renderers/video_renderer_impl.cc14
-rw-r--r--chromium/media/renderers/video_renderer_impl.h5
-rw-r--r--chromium/media/renderers/video_renderer_impl_unittest.cc25
-rw-r--r--chromium/media/renderers/video_resource_updater.cc68
-rw-r--r--chromium/media/renderers/video_resource_updater.h2
-rw-r--r--chromium/media/renderers/video_resource_updater_unittest.cc6
-rw-r--r--chromium/media/video/BUILD.gn4
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.cc9
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.h5
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.cc99
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.h8
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc8
-rw-r--r--chromium/media/video/jpeg_encode_accelerator.cc11
-rw-r--r--chromium/media/video/jpeg_encode_accelerator.h111
-rw-r--r--chromium/media/video/mjpeg_decode_accelerator.cc11
-rw-r--r--chromium/media/video/mjpeg_decode_accelerator.h128
-rw-r--r--chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.cc4
-rw-r--r--chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.h2
-rw-r--r--chromium/media/video/mock_video_decode_accelerator.h2
-rw-r--r--chromium/media/video/mock_video_encode_accelerator.h5
-rw-r--r--chromium/media/video/video_decode_accelerator.cc11
-rw-r--r--chromium/media/video/video_decode_accelerator.h13
-rw-r--r--chromium/media/video/video_encode_accelerator.h5
-rw-r--r--chromium/media/webrtc/audio_processor.cc36
-rw-r--r--chromium/media/webrtc/audio_processor_unittest.cc13
670 files changed, 11491 insertions, 12940 deletions
diff --git a/chromium/media/BUILD.gn b/chromium/media/BUILD.gn
index f9ae1d92946..d53a0e58025 100644
--- a/chromium/media/BUILD.gn
+++ b/chromium/media/BUILD.gn
@@ -149,6 +149,7 @@ test("media_unittests") {
"//media/learning:unit_tests",
"//media/mojo:unit_tests",
"//media/muxers:unit_tests",
+ "//media/parsers:unit_tests",
"//media/renderers:unit_tests",
"//media/test:pipeline_integration_tests",
"//media/test:run_all_unittests",
@@ -314,30 +315,6 @@ if (proprietary_codecs) {
}
}
-fuzzer_test("media_jpeg_parser_picture_fuzzer") {
- sources = [
- "filters/jpeg_parser_picture_fuzzertest.cc",
- ]
- deps = [
- ":test_support",
- "//base",
- ]
- seed_corpus = "test/data"
- dict = "test/jpeg.dict"
-}
-
-fuzzer_test("media_vp8_parser_fuzzer") {
- sources = [
- "filters/vp8_parser_fuzzertest.cc",
- ]
- deps = [
- ":test_support",
- "//base",
- ]
- libfuzzer_options = [ "max_len = 400000" ]
- dict = "test/vp8.dict"
-}
-
fuzzer_test("media_vp9_parser_fuzzer") {
sources = [
"filters/vp9_parser_fuzzertest.cc",
diff --git a/chromium/media/DEPS b/chromium/media/DEPS
index e5e9d613c81..096cc14c361 100644
--- a/chromium/media/DEPS
+++ b/chromium/media/DEPS
@@ -5,11 +5,11 @@ include_rules = [
"+chromeos/audio",
"+crypto",
"+device/udev_linux",
- "+device/usb",
"+gpu",
"+jni",
"+mojo/public/cpp/bindings/callback_helpers.h",
"+mojo/public/cpp/system/platform_handle.h",
+ "+services/device/public",
"+services/ws/public/cpp/gpu/context_provider_command_buffer.h",
"+skia/ext",
"+third_party/dav1d",
@@ -43,5 +43,8 @@ specific_include_rules = {
],
"gpu_memory_buffer_video_frame_pool_unittest.cc": [
"+components/viz/test/test_context_provider.h",
+ ],
+ "null_video_sink_unittest.cc": [
+ "+components/viz/common/frame_sinks/begin_frame_args.h",
]
}
diff --git a/chromium/media/audio/alsa/alsa_output.cc b/chromium/media/audio/alsa/alsa_output.cc
index 254fd306554..7f72aefa73a 100644
--- a/chromium/media/audio/alsa/alsa_output.cc
+++ b/chromium/media/audio/alsa/alsa_output.cc
@@ -337,6 +337,10 @@ void AlsaPcmOutputStream::Stop() {
TransitionTo(kIsStopped);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void AlsaPcmOutputStream::Flush() {}
+
void AlsaPcmOutputStream::SetVolume(double volume) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/audio/alsa/alsa_output.h b/chromium/media/audio/alsa/alsa_output.h
index acf139b3dae..4efec65df09 100644
--- a/chromium/media/audio/alsa/alsa_output.h
+++ b/chromium/media/audio/alsa/alsa_output.h
@@ -81,6 +81,7 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
void Close() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
+ void Flush() override;
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index 5af0c4a316f..34cc910087b 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -48,11 +48,6 @@ ACTION_P4(CheckCountAndPostQuitTask, count, limit, task_runner, quit_closure) {
task_runner->PostTask(FROM_HERE, quit_closure);
}
-const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
-const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw";
-const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
-const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw";
-
const float kCallbackTestTimeMs = 2000.0;
const int kBytesPerSample = 2;
const SampleFormat kSampleFormat = kSampleFormatS16;
@@ -840,40 +835,6 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
StartOutputStreamCallbacks(params);
}
-// Play out a PCM file segment in real time and allow the user to verify that
-// the rendered audio sounds OK.
-// NOTE: this test requires user interaction and is not designed to run as an
-// automatized test on bots.
-TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
- GetDefaultOutputStreamParametersOnAudioThread();
- DVLOG(1) << audio_output_parameters();
- MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
-
- std::string file_name;
- const AudioParameters params = audio_output_parameters();
- if (params.sample_rate() == 48000 && params.channels() == 2) {
- file_name = kSpeechFile_16b_s_48k;
- } else if (params.sample_rate() == 48000 && params.channels() == 1) {
- file_name = kSpeechFile_16b_m_48k;
- } else if (params.sample_rate() == 44100 && params.channels() == 2) {
- file_name = kSpeechFile_16b_s_44k;
- } else if (params.sample_rate() == 44100 && params.channels() == 1) {
- file_name = kSpeechFile_16b_m_44k;
- } else {
- FAIL() << "This test supports 44.1kHz and 48kHz mono/stereo only.";
- return;
- }
-
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- FileAudioSource source(&event, file_name);
-
- OpenAndStartAudioOutputStreamOnAudioThread(&source);
- DVLOG(0) << ">> Verify that the file is played out correctly...";
- EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- StopAndCloseAudioOutputStreamOnAudioThread();
-}
-
// Start input streaming and run it for ten seconds while recording to a
// local audio file.
// NOTE: this test requires user interaction and is not designed to run as an
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index d322bc61a3f..599958e948b 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -113,11 +113,8 @@ void AudioManagerAndroid::GetAudioInputDeviceNames(
// MODIFY_AUDIO_SETTINGS or RECORD_AUDIO permissions.
return;
}
- jsize len = env->GetArrayLength(j_device_array.obj());
AudioDeviceName device;
- for (jsize i = 0; i < len; ++i) {
- ScopedJavaLocalRef<jobject> j_device(
- env, env->GetObjectArrayElement(j_device_array.obj(), i));
+ for (auto j_device : j_device_array.ReadElements<jobject>()) {
ScopedJavaLocalRef<jstring> j_device_name =
Java_AudioDeviceName_name(env, j_device);
ConvertJavaStringToUTF8(env, j_device_name.obj(), &device.device_name);
diff --git a/chromium/media/audio/android/audio_track_output_stream.cc b/chromium/media/audio/android/audio_track_output_stream.cc
index b271a8a5d21..1012622b888 100644
--- a/chromium/media/audio/android/audio_track_output_stream.cc
+++ b/chromium/media/audio/android/audio_track_output_stream.cc
@@ -88,6 +88,10 @@ void AudioTrackOutputStream::Close() {
audio_manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void AudioTrackOutputStream::Flush() {}
+
void AudioTrackOutputStream::SetMute(bool muted) {
if (params_.IsBitstreamFormat() && muted) {
LOG(WARNING)
diff --git a/chromium/media/audio/android/audio_track_output_stream.h b/chromium/media/audio/android/audio_track_output_stream.h
index 8cb2ba0e73d..500671bff04 100644
--- a/chromium/media/audio/android/audio_track_output_stream.h
+++ b/chromium/media/audio/android/audio_track_output_stream.h
@@ -31,6 +31,7 @@ class MEDIA_EXPORT AudioTrackOutputStream : public MuteableAudioOutputStream {
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
void Close() override;
+ void Flush() override;
// MuteableAudioOutputStream implementation.
void SetMute(bool muted) override;
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index 0c0a1c920b3..17554fa2b09 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -220,6 +220,10 @@ void OpenSLESOutputStream::Close() {
audio_manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void OpenSLESOutputStream::Flush() {}
+
void OpenSLESOutputStream::SetVolume(double volume) {
DVLOG(2) << "OpenSLESOutputStream::SetVolume(" << volume << ")";
DCHECK(thread_checker_.CalledOnValidThread());
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index 6de09916315..4c1cec16350 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -42,6 +42,7 @@ class OpenSLESOutputStream : public MuteableAudioOutputStream {
// Implementation of MuteableAudioOutputStream.
bool Open() override;
void Close() override;
+ void Flush() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
void SetVolume(double volume) override;
diff --git a/chromium/media/audio/audio_debug_file_writer_unittest.cc b/chromium/media/audio/audio_debug_file_writer_unittest.cc
index c13be180ba6..4d498690b43 100644
--- a/chromium/media/audio/audio_debug_file_writer_unittest.cc
+++ b/chromium/media/audio/audio_debug_file_writer_unittest.cc
@@ -50,7 +50,7 @@ class AudioDebugFileWriterTest
: public testing::TestWithParam<AudioDebugFileWriterTestData> {
public:
explicit AudioDebugFileWriterTest(
- base::test::ScopedTaskEnvironment::ExecutionMode execution_mode)
+ base::test::ScopedTaskEnvironment::ThreadPoolExecutionMode execution_mode)
: scoped_task_environment_(
base::test::ScopedTaskEnvironment::MainThreadType::DEFAULT,
execution_mode),
@@ -67,7 +67,8 @@ class AudioDebugFileWriterTest
}
AudioDebugFileWriterTest()
: AudioDebugFileWriterTest(
- base::test::ScopedTaskEnvironment::ExecutionMode::ASYNC) {}
+ base::test::ScopedTaskEnvironment::ThreadPoolExecutionMode::ASYNC) {
+ }
protected:
virtual ~AudioDebugFileWriterTest() = default;
@@ -231,8 +232,8 @@ class AudioDebugFileWriterBehavioralTest : public AudioDebugFileWriterTest {};
class AudioDebugFileWriterSingleThreadTest : public AudioDebugFileWriterTest {
public:
AudioDebugFileWriterSingleThreadTest()
- : AudioDebugFileWriterTest(
- base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED) {}
+ : AudioDebugFileWriterTest(base::test::ScopedTaskEnvironment::
+ ThreadPoolExecutionMode::QUEUED) {}
};
TEST_P(AudioDebugFileWriterTest, WaveRecordingTest) {
diff --git a/chromium/media/audio/audio_features.cc b/chromium/media/audio/audio_features.cc
index e5fdc29917a..a36d1fcc840 100644
--- a/chromium/media/audio/audio_features.cc
+++ b/chromium/media/audio/audio_features.cc
@@ -28,4 +28,8 @@ const base::Feature kForceEnableSystemAec{"ForceEnableSystemAec",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif
+#if defined(OS_WIN)
+const base::Feature kAllowIAudioClient3{"AllowIAudioClient3",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+#endif
} // namespace features
diff --git a/chromium/media/audio/audio_features.h b/chromium/media/audio/audio_features.h
index 70e228d7d79..b219d86b471 100644
--- a/chromium/media/audio/audio_features.h
+++ b/chromium/media/audio/audio_features.h
@@ -23,6 +23,10 @@ MEDIA_EXPORT extern const base::Feature kCrOSSystemAECDeactivatedGroups;
MEDIA_EXPORT extern const base::Feature kForceEnableSystemAec;
#endif
+#if defined(OS_WIN)
+MEDIA_EXPORT extern const base::Feature kAllowIAudioClient3;
+#endif
+
} // namespace features
#endif // MEDIA_AUDIO_AUDIO_FEATURES_H_
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
index ed22a2944a7..8f56ef08512 100644
--- a/chromium/media/audio/audio_io.h
+++ b/chromium/media/audio/audio_io.h
@@ -111,6 +111,10 @@ class MEDIA_EXPORT AudioOutputStream {
// Close the stream.
// After calling this method, the object should not be used anymore.
virtual void Close() = 0;
+
+ // Flushes the stream. This should only be called if the stream is not
+ // playing. (i.e. called after Stop or Open)
+ virtual void Flush() = 0;
};
// Models an audio sink receiving recorded audio from the audio driver.
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 27cb93ff7fb..d1b75a911cf 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -268,7 +268,8 @@ class AudioManagerTest : public ::testing::Test {
chromeos::CrasAudioClient::InitializeFake();
chromeos::FakeCrasAudioClient::Get()->SetAudioNodesForTesting(audio_nodes);
audio_pref_handler_ = new chromeos::AudioDevicesPrefHandlerStub();
- chromeos::CrasAudioHandler::Initialize(audio_pref_handler_);
+ chromeos::CrasAudioHandler::Initialize(/*connector=*/nullptr,
+ audio_pref_handler_);
cras_audio_handler_ = chromeos::CrasAudioHandler::Get();
base::RunLoop().RunUntilIdle();
}
@@ -722,6 +723,8 @@ TEST_F(AudioManagerTest, CheckMakeOutputStreamWithPreferredParameters) {
AudioOutputStream* stream =
audio_manager_->MakeAudioOutputStreamProxy(params, "");
ASSERT_TRUE(stream);
+
+ stream->Close();
}
#if defined(OS_MACOSX) || defined(USE_CRAS)
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index 508c57133e8..88a5ddc45be 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -205,6 +205,19 @@ void AudioOutputController::Close(base::OnceClosure closed_task) {
base::WrapRefCounted(this), std::move(closed_task)));
}
+void AudioOutputController::Flush() {
+ CHECK_EQ(AudioManager::Get(), audio_manager_);
+ DCHECK_CALLED_ON_VALID_SEQUENCE(owning_sequence_);
+
+ if (task_runner_->BelongsToCurrentThread()) {
+ DoFlush();
+ return;
+ }
+
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&AudioOutputController::DoFlush, this));
+}
+
void AudioOutputController::SetVolume(double volume) {
CHECK_EQ(AudioManager::Get(), audio_manager_);
DCHECK_CALLED_ON_VALID_SEQUENCE(owning_sequence_);
@@ -347,6 +360,21 @@ void AudioOutputController::DoClose() {
}
}
+void AudioOutputController::DoFlush() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioOutputController.FlushTime");
+ TRACE_EVENT0("audio", "AudioOutputController::DoFlush");
+ handler_->OnLog("AOC::DoFlush");
+
+ if (stream_) {
+ if (state_ == kPlaying) {
+ handler_->OnControllerError();
+ } else {
+ stream_->Flush();
+ }
+ }
+}
+
void AudioOutputController::DoSetVolume(double volume) {
DCHECK(task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index b3721f17ec7..77b793a0a0c 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -162,6 +162,9 @@ class MEDIA_EXPORT AudioOutputController
// case, it must be null.
void Close(base::OnceClosure closed_task);
+ // Flushes the audio output stream
+ void Flush();
+
// Sets the volume of the audio output stream.
void SetVolume(double volume);
@@ -247,6 +250,7 @@ class MEDIA_EXPORT AudioOutputController
void DoPlay();
void DoPause();
void DoClose();
+ void DoFlush();
void DoSetVolume(double volume);
void DoReportError();
void DoStartDiverting(AudioOutputStream* to_stream);
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index 3cbbbfe3872..55e0b0879b6 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -25,6 +25,7 @@
#include "base/unguessable_token.h"
#include "media/audio/audio_device_description.h"
#include "media/audio/audio_source_diverter.h"
+#include "media/audio/mock_audio_manager.h"
#include "media/audio/test_audio_thread.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_parameters.h"
@@ -93,20 +94,25 @@ class MockAudioOutputStream : public AudioOutputStream,
explicit MockAudioOutputStream(AudioManager* audio_manager)
: audio_manager_(audio_manager) {}
+ explicit MockAudioOutputStream() : audio_manager_(nullptr) {}
+
// We forward to a fake stream to get automatic OnMoreData callbacks,
// required by some tests.
MOCK_METHOD0(DidOpen, void());
MOCK_METHOD0(DidStart, void());
MOCK_METHOD0(DidStop, void());
MOCK_METHOD0(DidClose, void());
+ MOCK_METHOD0(DidFlush, void());
MOCK_METHOD1(SetVolume, void(double));
MOCK_METHOD1(GetVolume, void(double* volume));
bool Open() override {
EXPECT_EQ(nullptr, impl_);
- impl_ =
- audio_manager_->MakeAudioOutputStreamProxy(AOCTestParams(), "default");
- impl_->Open();
+ if (audio_manager_) {
+ impl_ = audio_manager_->MakeAudioOutputStreamProxy(AOCTestParams(),
+ "default");
+ impl_->Open();
+ }
DidOpen();
return true;
}
@@ -114,22 +120,35 @@ class MockAudioOutputStream : public AudioOutputStream,
void Start(AudioOutputStream::AudioSourceCallback* cb) override {
EXPECT_EQ(nullptr, callback_);
callback_ = cb;
- impl_->Start(this);
+ if (impl_) {
+ impl_->Start(this);
+ }
DidStart();
}
void Stop() override {
- impl_->Stop();
+ if (impl_) {
+ impl_->Stop();
+ }
callback_ = nullptr;
DidStop();
}
void Close() override {
- impl_->Close();
+ if (impl_) {
+ impl_->Close();
+ }
impl_ = nullptr;
DidClose();
}
+ void Flush() override {
+ if (impl_) {
+ impl_->Flush();
+ }
+ DidFlush();
+ }
+
private:
int OnMoreData(base::TimeDelta delay,
base::TimeTicks delay_timestamp,
@@ -413,6 +432,89 @@ class AudioOutputControllerTest : public TestWithParam<bool> {
DISALLOW_COPY_AND_ASSIGN(AudioOutputControllerTest);
};
+class AudioOutputControllerMockTest : public TestWithParam<bool> {
+ public:
+ AudioOutputControllerMockTest()
+ : audio_manager_(std::make_unique<media::TestAudioThread>(true)) {
+ audio_manager_.SetMakeOutputStreamCB(
+ base::BindRepeating([](media::AudioOutputStream* stream,
+ const media::AudioParameters& params,
+ const std::string& device_id) { return stream; },
+ &mock_stream_));
+ }
+
+ ~AudioOutputControllerMockTest() { audio_manager_.Shutdown(); }
+
+ protected:
+ void Create() {
+ EXPECT_CALL(mock_event_handler_, OnControllerCreated());
+ EXPECT_CALL(mock_stream_, DidOpen());
+ EXPECT_CALL(mock_stream_, SetVolume(1)); // Default volume
+ controller_ = AudioOutputController::Create(
+ &audio_manager_, &mock_event_handler_, AOCTestParams(), std::string(),
+ base::UnguessableToken(), &mock_sync_reader_);
+ EXPECT_NE(nullptr, controller_.get());
+ EXPECT_CALL(mock_stream_, SetVolume(kTestVolume));
+ controller_->SetVolume(kTestVolume);
+ }
+
+ void Close() {
+ EXPECT_CALL(mock_sync_reader_, Close());
+ EXPECT_CALL(mock_stream_, DidClose());
+
+ base::RunLoop run_loop;
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&AudioOutputController::Close, controller_,
+ run_loop.QuitClosure()));
+ run_loop.Run();
+ }
+
+ void Play() {
+ base::RunLoop run_loop;
+ EXPECT_CALL(mock_stream_, DidStart())
+ .WillOnce(RunClosure(run_loop.QuitClosure()));
+ EXPECT_CALL(mock_event_handler_, OnControllerPlaying());
+ EXPECT_CALL(mock_sync_reader_, RequestMoreData(_, _, _))
+ .WillRepeatedly(Return());
+ controller_->Play();
+ run_loop.Run();
+ }
+
+ void Pause() {
+ base::RunLoop loop;
+ EXPECT_CALL(mock_stream_, DidStop());
+ EXPECT_CALL(mock_event_handler_, OnControllerPaused())
+ .WillOnce(RunOnceClosure(loop.QuitClosure()));
+ controller_->Pause();
+ loop.Run();
+ Mock::VerifyAndClearExpectations(&mock_event_handler_);
+ }
+
+ void Flush(bool is_playing) {
+ base::RunLoop loop;
+ if (is_playing) {
+ EXPECT_CALL(mock_stream_, DidFlush())
+ .WillOnce(RunOnceClosure(loop.QuitClosure()));
+ } else {
+ EXPECT_CALL(mock_event_handler_, OnControllerError())
+ .Times(1)
+ .WillOnce(RunOnceClosure(loop.QuitClosure()));
+ }
+
+ controller_->Flush();
+ loop.Run();
+ }
+
+ StrictMock<MockAudioOutputControllerEventHandler> mock_event_handler_;
+
+ private:
+ base::TestMessageLoop message_loop_;
+ MockAudioManager audio_manager_;
+ StrictMock<MockAudioOutputControllerSyncReader> mock_sync_reader_;
+ StrictMock<MockAudioOutputStream> mock_stream_;
+ scoped_refptr<AudioOutputController> controller_;
+};
+
TEST_P(AudioOutputControllerTest, CreateAndClose) {
Create();
Close();
@@ -520,6 +622,22 @@ TEST_P(AudioOutputControllerTest, DuplicateDivertInteract) {
Close();
}
+TEST_F(AudioOutputControllerMockTest, FlushWhenStreamIsPaused) {
+ Create();
+ Play();
+ Pause();
+ Flush(true);
+ Close();
+}
+
+TEST_F(AudioOutputControllerMockTest, FlushWhenStreamIsPlayingTriggersError) {
+ Create();
+ Play();
+ Flush(false);
+ Pause();
+ Close();
+}
+
INSTANTIATE_TEST_SUITE_P(AOC, AudioOutputControllerTest, Bool());
} // namespace media
diff --git a/chromium/media/audio/audio_output_delegate.h b/chromium/media/audio/audio_output_delegate.h
index 8cd95aa9d40..ab14a28cbec 100644
--- a/chromium/media/audio/audio_output_delegate.h
+++ b/chromium/media/audio/audio_output_delegate.h
@@ -44,6 +44,7 @@ class MEDIA_EXPORT AudioOutputDelegate {
// Stream control:
virtual void OnPlayStream() = 0;
virtual void OnPauseStream() = 0;
+ virtual void OnFlushStream() = 0;
virtual void OnSetVolume(double volume) = 0;
};
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index 0cf69752aa7..0b4eda8f695 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -126,6 +126,12 @@ void AudioOutputDevice::Pause() {
FROM_HERE, base::BindOnce(&AudioOutputDevice::PauseOnIOThread, this));
}
+void AudioOutputDevice::Flush() {
+ TRACE_EVENT0("audio", "AudioOutputDevice::Flush");
+ io_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&AudioOutputDevice::FlushOnIOThread, this));
+}
+
bool AudioOutputDevice::SetVolume(double volume) {
TRACE_EVENT1("audio", "AudioOutputDevice::Pause", "volume", volume);
@@ -232,6 +238,13 @@ void AudioOutputDevice::PauseOnIOThread() {
ipc_->PauseStream();
}
+void AudioOutputDevice::FlushOnIOThread() {
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+
+ if (ipc_)
+ ipc_->FlushStream();
+}
+
void AudioOutputDevice::ShutDownOnIOThread() {
DCHECK(io_task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/audio/audio_output_device.h b/chromium/media/audio/audio_output_device.h
index a512e72b101..ff0ea95e6c8 100644
--- a/chromium/media/audio/audio_output_device.h
+++ b/chromium/media/audio/audio_output_device.h
@@ -108,6 +108,7 @@ class MEDIA_EXPORT AudioOutputDevice : public AudioRendererSink,
void Stop() override;
void Play() override;
void Pause() override;
+ void Flush() override;
bool SetVolume(double volume) override;
OutputDeviceInfo GetOutputDeviceInfo() override;
void GetOutputDeviceInfoAsync(OutputDeviceInfoCB info_cb) override;
@@ -160,6 +161,7 @@ class MEDIA_EXPORT AudioOutputDevice : public AudioRendererSink,
void CreateStreamOnIOThread();
void PlayOnIOThread();
void PauseOnIOThread();
+ void FlushOnIOThread();
void ShutDownOnIOThread();
void SetVolumeOnIOThread(double volume);
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
index 34e2dda6a75..a5f87002819 100644
--- a/chromium/media/audio/audio_output_device_unittest.cc
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -85,6 +85,7 @@ class MockAudioOutputIPC : public AudioOutputIPC {
const base::Optional<base::UnguessableToken>& processing_id));
MOCK_METHOD0(PlayStream, void());
MOCK_METHOD0(PauseStream, void());
+ MOCK_METHOD0(FlushStream, void());
MOCK_METHOD0(CloseStream, void());
MOCK_METHOD1(SetVolume, void(double volume));
};
@@ -100,6 +101,7 @@ class AudioOutputDeviceTest : public testing::Test {
void StartAudioDevice();
void CallOnStreamCreated();
void StopAudioDevice();
+ void FlushAudioDevice();
void CreateDevice(const std::string& device_id,
base::TimeDelta timeout = kAuthTimeout);
void SetDevice(const std::string& device_id);
@@ -225,6 +227,14 @@ void AudioOutputDeviceTest::StopAudioDevice() {
task_env_.FastForwardBy(base::TimeDelta());
}
+void AudioOutputDeviceTest::FlushAudioDevice() {
+ if (device_status_ == OUTPUT_DEVICE_STATUS_OK)
+ EXPECT_CALL(*audio_output_ipc_, FlushStream());
+
+ audio_device_->Flush();
+ task_env_.FastForwardBy(base::TimeDelta());
+}
+
TEST_F(AudioOutputDeviceTest, Initialize) {
// Tests that the object can be constructed, initialized and destructed
// without having ever been started.
@@ -383,6 +393,12 @@ TEST_F(AudioOutputDeviceTest, GetOutputDeviceInfoAsync_Okay) {
task_env_.FastForwardBy(base::TimeDelta());
}
+TEST_F(AudioOutputDeviceTest, StreamIsFlushed) {
+ StartAudioDevice();
+ FlushAudioDevice();
+ StopAudioDevice();
+}
+
namespace {
// This struct collects useful stuff without doing anything magical. It is used
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
index 22bcd7c8fd5..aa79817b8a9 100644
--- a/chromium/media/audio/audio_output_dispatcher.h
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -58,6 +58,10 @@ class MEDIA_EXPORT AudioOutputDispatcher {
// Called by AudioOutputProxy when the stream is closed.
virtual void CloseStream(AudioOutputProxy* stream_proxy) = 0;
+ // Called by AudioOutputProxy to flush the stream. This should only be
+ // called when a stream is stopped.
+ virtual void FlushStream(AudioOutputProxy* stream_proxy) = 0;
+
protected:
AudioManager* audio_manager() const { return audio_manager_; }
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 932be8196d7..ebe96ea0465 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -131,6 +131,10 @@ void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
close_timer_.Reset();
}
+// There is nothing to flush since the phsyical stream is removed during
+// StopStream().
+void AudioOutputDispatcherImpl::FlushStream(AudioOutputProxy* stream_proxy) {}
+
bool AudioOutputDispatcherImpl::HasOutputProxies() const {
DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
return idle_proxies_ || !proxy_to_physical_map_.empty();
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index dcea811038d..488f0ab82bc 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -47,6 +47,7 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
void StopStream(AudioOutputProxy* stream_proxy) override;
void StreamVolumeSet(AudioOutputProxy* stream_proxy, double volume) override;
void CloseStream(AudioOutputProxy* stream_proxy) override;
+ void FlushStream(AudioOutputProxy* stream_proxy) override;
// Returns true if there are any open AudioOutputProxy objects.
bool HasOutputProxies() const;
diff --git a/chromium/media/audio/audio_output_ipc.h b/chromium/media/audio/audio_output_ipc.h
index 669a1c7052d..f41054e573b 100644
--- a/chromium/media/audio/audio_output_ipc.h
+++ b/chromium/media/audio/audio_output_ipc.h
@@ -95,6 +95,10 @@ class MEDIA_EXPORT AudioOutputIPC {
// AudioOutputController::Pause().
virtual void PauseStream() = 0;
+ // Flushes an audio stream. This should only be called when the stream is
+ // paused.
+ virtual void FlushStream() = 0;
+
// Closes the audio stream which should shut down the corresponding
// AudioOutputController in the peer process. Usage of an AudioOutputIPC must
// always end with a call to CloseStream(), and the |delegate| passed to other
diff --git a/chromium/media/audio/audio_output_proxy.cc b/chromium/media/audio/audio_output_proxy.cc
index f5dd3a12d87..f8d91be7cf9 100644
--- a/chromium/media/audio/audio_output_proxy.cc
+++ b/chromium/media/audio/audio_output_proxy.cc
@@ -92,4 +92,11 @@ void AudioOutputProxy::Close() {
delete this;
}
+void AudioOutputProxy::Flush() {
+ DCHECK(state_ != kPlaying);
+
+ if (dispatcher_)
+ dispatcher_->FlushStream(this);
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_output_proxy.h b/chromium/media/audio/audio_output_proxy.h
index d68622f84b6..9e132a1ea04 100644
--- a/chromium/media/audio/audio_output_proxy.h
+++ b/chromium/media/audio/audio_output_proxy.h
@@ -36,6 +36,7 @@ class MEDIA_EXPORT AudioOutputProxy : public AudioOutputStream {
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
void Close() override;
+ void Flush() override;
AudioOutputDispatcher* get_dispatcher_for_testing() const {
return dispatcher_.get();
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index cc5bd957e1f..6f2902a732f 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -90,6 +90,7 @@ class MockAudioOutputStream : public AudioOutputStream {
MOCK_METHOD1(SetVolume, void(double volume));
MOCK_METHOD1(GetVolume, void(double* volume));
MOCK_METHOD0(Close, void());
+ MOCK_METHOD0(Flush, void());
private:
bool start_called_;
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index 67aa3b75b13..7f50515f0c0 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -464,6 +464,13 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
}
}
+void AudioOutputResampler::FlushStream(AudioOutputProxy* stream_proxy) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(dispatcher_);
+
+ dispatcher_->FlushStream(stream_proxy);
+}
+
void AudioOutputResampler::StopStreamInternal(
const CallbackMap::value_type& item) {
DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index 556d9e36f06..f866ab43733 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -54,6 +54,7 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
void StopStream(AudioOutputProxy* stream_proxy) override;
void StreamVolumeSet(AudioOutputProxy* stream_proxy, double volume) override;
void CloseStream(AudioOutputProxy* stream_proxy) override;
+ void FlushStream(AudioOutputProxy* stream_proxy) override;
private:
using CallbackMap =
diff --git a/chromium/media/audio/audio_output_stream_sink.cc b/chromium/media/audio/audio_output_stream_sink.cc
index 7974224ad76..83343f43cbc 100644
--- a/chromium/media/audio/audio_output_stream_sink.cc
+++ b/chromium/media/audio/audio_output_stream_sink.cc
@@ -61,6 +61,11 @@ void AudioOutputStreamSink::Pause() {
FROM_HERE, base::BindOnce(&AudioOutputStreamSink::DoPause, this));
}
+void AudioOutputStreamSink::Flush() {
+ audio_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&AudioOutputStreamSink::DoFlush, this));
+}
+
void AudioOutputStreamSink::Play() {
{
base::AutoLock al(callback_lock_);
@@ -152,6 +157,13 @@ void AudioOutputStreamSink::DoPause() {
stream_->Stop();
}
+void AudioOutputStreamSink::DoFlush() {
+ DCHECK(audio_task_runner_->BelongsToCurrentThread());
+ if (stream_) {
+ stream_->Flush();
+ }
+}
+
void AudioOutputStreamSink::DoPlay() {
DCHECK(audio_task_runner_->BelongsToCurrentThread());
stream_->Start(this);
diff --git a/chromium/media/audio/audio_output_stream_sink.h b/chromium/media/audio/audio_output_stream_sink.h
index e5fadfa90cf..612da724599 100644
--- a/chromium/media/audio/audio_output_stream_sink.h
+++ b/chromium/media/audio/audio_output_stream_sink.h
@@ -52,6 +52,7 @@ class MEDIA_EXPORT AudioOutputStreamSink
int prior_frames_skipped,
AudioBus* dest) override;
void OnError() override;
+ void Flush() override;
private:
~AudioOutputStreamSink() override;
@@ -61,6 +62,7 @@ class MEDIA_EXPORT AudioOutputStreamSink
void DoStart(const AudioParameters& params);
void DoStop();
void DoPause();
+ void DoFlush();
void DoPlay();
void DoSetVolume(double volume);
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
index a41bf204dcb..ac1d2fc5830 100644
--- a/chromium/media/audio/clockless_audio_sink.cc
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -107,6 +107,8 @@ void ClocklessAudioSink::Stop() {
Pause();
}
+void ClocklessAudioSink::Flush() {}
+
void ClocklessAudioSink::Play() {
DCHECK(initialized_);
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
index 8dbfb52aad9..cf6d6a6e770 100644
--- a/chromium/media/audio/clockless_audio_sink.h
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -27,6 +27,7 @@ class MEDIA_EXPORT ClocklessAudioSink : public AudioRendererSink {
RenderCallback* callback) override;
void Start() override;
void Stop() override;
+ void Flush() override;
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index 2a8f92a6f49..0719abca359 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -131,6 +131,10 @@ void CrasUnifiedStream::Close() {
manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void CrasUnifiedStream::Flush() {}
+
void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
CHECK(callback);
diff --git a/chromium/media/audio/cras/cras_unified.h b/chromium/media/audio/cras/cras_unified.h
index 2d5eeee7030..c857ac48356 100644
--- a/chromium/media/audio/cras/cras_unified.h
+++ b/chromium/media/audio/cras/cras_unified.h
@@ -46,6 +46,7 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
// Implementation of AudioOutputStream.
bool Open() override;
void Close() override;
+ void Flush() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
void SetVolume(double volume) override;
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
index b0ef0481129..13b04003cb4 100644
--- a/chromium/media/audio/fake_audio_output_stream.cc
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -56,6 +56,8 @@ void FakeAudioOutputStream::Close() {
audio_manager_->ReleaseOutputStream(this);
}
+void FakeAudioOutputStream::Flush() {}
+
void FakeAudioOutputStream::SetVolume(double volume) {}
void FakeAudioOutputStream::GetVolume(double* volume) {
diff --git a/chromium/media/audio/fake_audio_output_stream.h b/chromium/media/audio/fake_audio_output_stream.h
index 1233b2d87b5..3f97020b506 100644
--- a/chromium/media/audio/fake_audio_output_stream.h
+++ b/chromium/media/audio/fake_audio_output_stream.h
@@ -32,6 +32,7 @@ class MEDIA_EXPORT FakeAudioOutputStream : public MuteableAudioOutputStream {
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
void Close() override;
+ void Flush() override;
void SetMute(bool muted) override;
private:
diff --git a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
index ae9435d30fc..a497b7baa73 100644
--- a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
+++ b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.cc
@@ -82,6 +82,10 @@ void AudioOutputStreamFuchsia::Stop() {
timer_.Stop();
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void AudioOutputStreamFuchsia::Flush() {}
+
void AudioOutputStreamFuchsia::SetVolume(double volume) {
DCHECK(0.0 <= volume && volume <= 1.0) << volume;
volume_ = volume;
@@ -148,6 +152,10 @@ void AudioOutputStreamFuchsia::OnMinLeadTimeChanged(int64_t min_lead_time) {
if (payload_buffer_.IsValid() &&
GetMinBufferSize() > payload_buffer_.size()) {
payload_buffer_ = {};
+
+ // Discard all packets currently in flight. This is required because
+ // AddPayloadBuffer() will fail if there are any packets in flight.
+ audio_renderer_->DiscardAllPacketsNoReply();
}
}
diff --git a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.h b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.h
index cbfcbae7595..97277024230 100644
--- a/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.h
+++ b/chromium/media/audio/fuchsia/audio_output_stream_fuchsia.h
@@ -26,6 +26,7 @@ class AudioOutputStreamFuchsia : public AudioOutputStream {
bool Open() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
+ void Flush() override;
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
void Close() override;
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index cd89860527e..5bbbd232094 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -255,6 +255,10 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
callback->OnError();
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void AUHALStream::Flush() {}
+
void AUHALStream::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
deferred_start_cb_.Cancel();
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
index 575a4df8cef..5cbbc246dff 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.h
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -86,6 +86,7 @@ class AUHALStream : public AudioOutputStream {
void Close() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
+ void Flush() override;
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index 11cc3843f58..a9d6babb03c 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -74,17 +74,17 @@ static OSStatus GetIOBufferFrameSizeRange(AudioDeviceID device_id,
kAudioDevicePropertyBufferFrameSizeRange, is_input);
AudioValueRange range = {0, 0};
UInt32 data_size = sizeof(AudioValueRange);
- OSStatus error = AudioObjectGetPropertyData(device_id, &address, 0, NULL,
- &data_size, &range);
- if (error != noErr) {
- OSSTATUS_DLOG(WARNING, error)
+ OSStatus result = AudioObjectGetPropertyData(device_id, &address, 0, NULL,
+ &data_size, &range);
+ if (result != noErr) {
+ OSSTATUS_DLOG(WARNING, result)
<< "Failed to query IO buffer size range for device: " << std::hex
<< device_id;
} else {
*minimum = range.mMinimum;
*maximum = range.mMaximum;
}
- return error;
+ return result;
}
static bool HasAudioHardware(AudioObjectPropertySelector selector) {
@@ -1026,8 +1026,13 @@ bool AudioManagerMac::MaybeChangeBufferSize(AudioDeviceID device_id,
// does in fact do this limitation internally and report noErr even if the
// user tries to set an invalid size. As an example, asking for a size of
// 4410 will on most devices be limited to 4096 without any further notice.
- UInt32 minimum, maximum;
- GetIOBufferFrameSizeRange(device_id, is_input, &minimum, &maximum);
+ UInt32 minimum = buffer_size;
+ UInt32 maximum = buffer_size;
+ result = GetIOBufferFrameSizeRange(device_id, is_input, &minimum, &maximum);
+ if (result != noErr) {
+ // OS error is logged in GetIOBufferFrameSizeRange().
+ return false;
+ }
DVLOG(1) << "valid IO buffer size range: [" << minimum << ", " << maximum
<< "]";
buffer_size = desired_buffer_size;
@@ -1187,19 +1192,19 @@ void AudioManagerMac::UnsuppressNoiseReduction(AudioDeviceID device_id) {
}
}
-bool AudioManagerMac::IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id) {
+void AudioManagerMac::IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id) {
DCHECK(GetTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << "IncreaseIOBufferSizeIfPossible(id=0x" << std::hex << device_id
<< ")";
if (in_shutdown_) {
DVLOG(1) << "Disabled since we are shutting down";
- return false;
+ return;
}
- // Start by storing the actual I/O buffer size. Then scan all active output
+
+ // Start by getting the actual I/O buffer size. Then scan all active output
// streams using the specified |device_id| and find the minimum requested
// buffer size. In addition, store a reference to the audio unit of the first
// output stream using |device_id|.
- DCHECK(!output_io_buffer_size_map_.empty());
// All active output streams use the same actual I/O buffer size given
// a unique device ID.
// TODO(henrika): it would also be possible to use AudioUnitGetProperty(...,
@@ -1207,7 +1212,12 @@ bool AudioManagerMac::IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id) {
// buffer size but I have chosen to use the map instead to avoid possibly
// expensive Core Audio API calls and the risk of failure when asking while
// closing a stream.
- const size_t& actual_size = output_io_buffer_size_map_[device_id];
+ // TODO(http://crbug.com/961629): There seems to be bugs in the caching.
+ const size_t actual_size =
+ output_io_buffer_size_map_.find(device_id) !=
+ output_io_buffer_size_map_.end()
+ ? output_io_buffer_size_map_[device_id]
+ : 0; // This leads to trying to update the buffer size below.
AudioUnit audio_unit;
size_t min_requested_size = std::numeric_limits<std::size_t>::max();
for (auto* stream : output_streams_) {
@@ -1225,7 +1235,7 @@ bool AudioManagerMac::IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id) {
if (min_requested_size == std::numeric_limits<std::size_t>::max()) {
DVLOG(1) << "No action since there is no active stream for given device id";
- return false;
+ return;
}
// It is only possible to revert to a larger buffer size if the lowest
@@ -1236,22 +1246,20 @@ bool AudioManagerMac::IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id) {
if (min_requested_size == actual_size) {
DVLOG(1) << "No action since lowest possible size is already in use: "
<< actual_size;
- return false;
+ return;
}
// It should now be safe to increase the I/O buffer size to a new (higher)
// value using the |min_requested_size|. Doing so will save system resources.
// All active output streams with the same |device_id| are affected by this
// change but it is only required to apply the change to one of the streams.
+ // We ignore the result from MaybeChangeBufferSize(). Logging is done in that
+ // function and it could fail if the device was removed during the operation.
DVLOG(1) << "min_requested_size: " << min_requested_size;
bool size_was_changed = false;
size_t io_buffer_frame_size = 0;
- bool result =
- MaybeChangeBufferSize(device_id, audio_unit, 0, min_requested_size,
- &size_was_changed, &io_buffer_frame_size);
- DCHECK_EQ(io_buffer_frame_size, min_requested_size);
- DCHECK(size_was_changed);
- return result;
+ MaybeChangeBufferSize(device_id, audio_unit, 0, min_requested_size,
+ &size_was_changed, &io_buffer_frame_size);
}
bool AudioManagerMac::AudioDeviceIsUsedForInput(AudioDeviceID device_id) {
@@ -1297,12 +1305,19 @@ void AudioManagerMac::ReleaseOutputStreamUsingRealDevice(
if (output_streams_.empty())
return;
- if (!AudioDeviceIsUsedForInput(device_id)) {
- // The current audio device is not used for input. See if it is possible to
- // increase the IO buffer size (saves power) given the remaining output
- // audio streams and their buffer size requirements.
+ // If the audio device exists (i.e. has not been removed from the system) and
+ // is not used for input, see if it is possible to increase the IO buffer size
+ // (saves power) given the remaining output audio streams and their buffer
+ // size requirements.
+ // TODO(grunell): When closing several idle streams
+ // (AudioOutputDispatcherImpl::CloseIdleStreams), we should ideally only
+ // update the buffer size once after closing all those streams.
+ std::vector<AudioObjectID> device_ids =
+ core_audio_mac::GetAllAudioDeviceIDs();
+ const bool device_exists = std::find(device_ids.begin(), device_ids.end(),
+ device_id) != device_ids.end();
+ if (device_exists && !AudioDeviceIsUsedForInput(device_id))
IncreaseIOBufferSizeIfPossible(device_id);
- }
}
void AudioManagerMac::ReleaseInputStream(AudioInputStream* stream) {
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index bb1977d5e94..c2fb8dec46c 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -182,7 +182,7 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
// false otherwise.
// TODO(henrika): possibly extend the scheme to also take input streams into
// account.
- bool IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id);
+ void IncreaseIOBufferSizeIfPossible(AudioDeviceID device_id);
std::string GetDefaultDeviceID(bool is_input);
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
index 971197af6d4..56d7c23b9a1 100644
--- a/chromium/media/audio/null_audio_sink.cc
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -72,6 +72,8 @@ void NullAudioSink::Pause() {
playing_ = false;
}
+void NullAudioSink::Flush() {}
+
bool NullAudioSink::SetVolume(double volume) {
// Audio is always muted.
return volume == 0.0;
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
index 3ca75aa7c8d..1db35b9f885 100644
--- a/chromium/media/audio/null_audio_sink.h
+++ b/chromium/media/audio/null_audio_sink.h
@@ -32,6 +32,7 @@ class MEDIA_EXPORT NullAudioSink : public SwitchableAudioRendererSink {
void Stop() override;
void Pause() override;
void Play() override;
+ void Flush() override;
bool SetVolume(double volume) override;
OutputDeviceInfo GetOutputDeviceInfo() override;
void GetOutputDeviceInfoAsync(OutputDeviceInfoCB info_cb) override;
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index 3700506781f..d6cdac7358b 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -306,7 +306,8 @@ void AudioManagerPulse::AudioHardwareInfoCallback(pa_context* context,
manager->native_input_sample_rate_ = info->sample_spec.rate;
manager->native_channel_count_ = info->sample_spec.channels;
- manager->default_source_name_ = info->default_source_name;
+ if (info->default_source_name)
+ manager->default_source_name_ = info->default_source_name;
pa_threaded_mainloop_signal(manager->input_mainloop_, 0);
}
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
index ca1181d3aa8..6b03e08bc55 100644
--- a/chromium/media/audio/pulse/pulse_output.cc
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -124,6 +124,10 @@ void PulseAudioOutputStream::Close() {
manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void PulseAudioOutputStream::Flush() {}
+
void PulseAudioOutputStream::FulfillWriteRequest(size_t requested_bytes) {
int bytes_remaining = requested_bytes;
while (bytes_remaining > 0) {
diff --git a/chromium/media/audio/pulse/pulse_output.h b/chromium/media/audio/pulse/pulse_output.h
index 3a7b3fe866d..638459a0b4b 100644
--- a/chromium/media/audio/pulse/pulse_output.h
+++ b/chromium/media/audio/pulse/pulse_output.h
@@ -48,6 +48,7 @@ class PulseAudioOutputStream : public AudioOutputStream {
// Implementation of AudioOutputStream.
bool Open() override;
void Close() override;
+ void Flush() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
void SetVolume(double volume) override;
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
index f2e686add5f..97debbdbc07 100644
--- a/chromium/media/audio/pulse/pulse_util.cc
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -162,8 +162,10 @@ void GetDefaultDeviceIdCallback(pa_context* c,
const pa_server_info* info,
void* userdata) {
DefaultDevicesData* data = static_cast<DefaultDevicesData*>(userdata);
- data->input_ = info->default_source_name;
- data->output_ = info->default_sink_name;
+ if (info->default_source_name)
+ data->input_ = info->default_source_name;
+ if (info->default_sink_name)
+ data->output_ = info->default_sink_name;
pa_threaded_mainloop_signal(data->loop_, 0);
}
diff --git a/chromium/media/audio/virtual_audio_output_stream.cc b/chromium/media/audio/virtual_audio_output_stream.cc
index a7ba1dec76f..7bba9df7311 100644
--- a/chromium/media/audio/virtual_audio_output_stream.cc
+++ b/chromium/media/audio/virtual_audio_output_stream.cc
@@ -65,6 +65,10 @@ void VirtualAudioOutputStream::Close() {
}
}
+// There is nothing to flush since |this| is removed from the
+// |target_input_stream_|.
+void VirtualAudioOutputStream::Flush() {}
+
void VirtualAudioOutputStream::SetVolume(double volume) {
DCHECK(thread_checker_.CalledOnValidThread());
volume_ = volume;
diff --git a/chromium/media/audio/virtual_audio_output_stream.h b/chromium/media/audio/virtual_audio_output_stream.h
index 4f61f287357..706097b9da2 100644
--- a/chromium/media/audio/virtual_audio_output_stream.h
+++ b/chromium/media/audio/virtual_audio_output_stream.h
@@ -44,6 +44,7 @@ class MEDIA_EXPORT VirtualAudioOutputStream
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
void Close() override;
+ void Flush() override;
private:
// AudioConverter::InputCallback:
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index 71c6ea7be28..b37ee05b7b2 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -618,38 +618,40 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
- if (device_id_ == AudioDeviceDescription::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device_.GetAddressOf());
- } else if (device_id_ == AudioDeviceDescription::kCommunicationsDeviceId) {
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
- endpoint_device_.GetAddressOf());
- } else if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
- endpoint_device_.GetAddressOf());
-
- if (SUCCEEDED(hr)) {
- endpoint_device_->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
- NULL, &system_audio_volume_);
- }
- } else if (device_id_ == AudioDeviceDescription::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
- endpoint_device_.GetAddressOf());
+ // To open a stream in loopback mode, the client must obtain an IMMDevice
+ // interface for the rendering endpoint device. Make that happen if needed;
+ // otherwise use default capture data-flow direction.
+ const EDataFlow data_flow =
+ AudioDeviceDescription::IsLoopbackDevice(device_id_) ? eRender : eCapture;
+ // Determine selected role to be used if the device is a default device.
+ const ERole role = AudioDeviceDescription::IsCommunicationsDevice(device_id_)
+ ? eCommunications
+ : eConsole;
+ if (AudioDeviceDescription::IsDefaultDevice(device_id_) ||
+ AudioDeviceDescription::IsCommunicationsDevice(device_id_) ||
+ AudioDeviceDescription::IsLoopbackDevice(device_id_)) {
+ hr =
+ enumerator->GetDefaultAudioEndpoint(data_flow, role, &endpoint_device_);
} else {
hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
endpoint_device_.GetAddressOf());
}
-
if (FAILED(hr)) {
open_result_ = OPEN_RESULT_NO_ENDPOINT;
return hr;
}
+ // If loopback device with muted system audio is requested, get the volume
+ // interface for the endpoint.
+ if (device_id_ == AudioDeviceDescription::kLoopbackWithMuteDeviceId) {
+ hr = endpoint_device_->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, &system_audio_volume_);
+ if (FAILED(hr)) {
+ open_result_ = OPEN_RESULT_ACTIVATION_FAILED;
+ return hr;
+ }
+ }
+
// Verify that the audio endpoint device is active, i.e., the audio
// adapter that connects to the endpoint device is present and enabled.
DWORD state = DEVICE_STATE_DISABLED;
@@ -807,7 +809,7 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
100 * 1000 * 10, // Buffer duration, 100 ms expressed in 100-ns units.
0, // Device period, n/a for shared mode.
reinterpret_cast<const WAVEFORMATEX*>(&input_format_),
- device_id_ == AudioDeviceDescription::kCommunicationsDeviceId
+ AudioDeviceDescription::IsCommunicationsDevice(device_id_)
? &kCommunicationsSessionId
: nullptr);
@@ -887,7 +889,10 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
hr = audio_render_client_for_loopback_->Initialize(
AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST, 0, 0,
- reinterpret_cast<const WAVEFORMATEX*>(&input_format_), NULL);
+ reinterpret_cast<const WAVEFORMATEX*>(&input_format_),
+ AudioDeviceDescription::IsCommunicationsDevice(device_id_)
+ ? &kCommunicationsSessionId
+ : nullptr);
if (FAILED(hr)) {
open_result_ = OPEN_RESULT_LOOPBACK_INIT_FAILED;
return hr;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index 4a3fa164637..fbb3bf47a01 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -12,6 +12,7 @@
#include "base/command_line.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/stl_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
@@ -286,6 +287,8 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
source_ = callback;
num_written_frames_ = endpoint_buffer_size_frames_;
+ last_position_ = 0;
+ last_qpc_position_ = 0;
// Create and start the thread that will drive the rendering by waiting for
// render events.
@@ -333,6 +336,8 @@ void WASAPIAudioOutputStream::Stop() {
callback->OnError();
}
+ ReportAndResetStats();
+
// Extra safety check to ensure that the buffers are cleared.
// If the buffers are not cleared correctly, the next call to Start()
// would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
@@ -357,6 +362,10 @@ void WASAPIAudioOutputStream::Close() {
manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void WASAPIAudioOutputStream::Flush() {}
+
void WASAPIAudioOutputStream::SetVolume(double volume) {
DVLOG(1) << "SetVolume(volume=" << volume << ")";
float volume_float = static_cast<float>(volume);
@@ -527,6 +536,43 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
const uint64_t played_out_frames =
format_.Format.nSamplesPerSec * position / device_frequency;
+ // Check for glitches. Records a glitch whenever the stream's position has
+ // moved forward significantly less than the performance counter has. The
+ // threshold is set to half the buffer size, to limit false positives.
+ if (last_qpc_position_ != 0) {
+ const int64_t buffer_duration_us = packet_size_frames_ *
+ base::Time::kMicrosecondsPerSecond /
+ format_.Format.nSamplesPerSec;
+
+ const int64_t position_us =
+ position * base::Time::kMicrosecondsPerSecond / device_frequency;
+ const int64_t last_position_us = last_position_ *
+ base::Time::kMicrosecondsPerSecond /
+ device_frequency;
+ // The QPC values are in 100 ns units.
+ const int64_t qpc_position_us = qpc_position / 10;
+ const int64_t last_qpc_position_us = last_qpc_position_ / 10;
+
+ const int64_t position_diff_us = position_us - last_position_us;
+ const int64_t qpc_position_diff_us =
+ qpc_position_us - last_qpc_position_us;
+
+ if (qpc_position_diff_us - position_diff_us > buffer_duration_us / 2) {
+ ++num_glitches_detected_;
+
+ base::TimeDelta glitch_duration = base::TimeDelta::FromMicroseconds(
+ qpc_position_diff_us - position_diff_us);
+
+ if (glitch_duration > largest_glitch_)
+ largest_glitch_ = glitch_duration;
+
+ cumulative_audio_lost_ += glitch_duration;
+ }
+ }
+
+ last_position_ = position;
+ last_qpc_position_ = qpc_position;
+
// Number of frames that have been written to the buffer but not yet
// played out.
const uint64_t delay_frames = num_written_frames_ - played_out_frames;
@@ -667,4 +713,21 @@ void WASAPIAudioOutputStream::StopThread() {
source_ = NULL;
}
+void WASAPIAudioOutputStream::ReportAndResetStats() {
+ // Even if there aren't any glitches, we want to record it to get a feel for
+ // how often we get no glitches vs the alternative.
+ UMA_HISTOGRAM_CUSTOM_COUNTS("Media.Audio.Render.Glitches",
+ num_glitches_detected_, 1, 999999, 100);
+ // Don't record these unless there actually was a glitch, though.
+ if (num_glitches_detected_ != 0) {
+ UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LostFramesInMs",
+ cumulative_audio_lost_.InMilliseconds());
+ UMA_HISTOGRAM_COUNTS_1M("Media.Audio.Render.LargestGlitchMs",
+ largest_glitch_.InMilliseconds());
+ }
+ num_glitches_detected_ = 0;
+ cumulative_audio_lost_ = base::TimeDelta();
+ largest_glitch_ = base::TimeDelta();
+}
+
} // namespace media
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index ad70b3919e7..7609f2ea141 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -138,6 +138,7 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
void Start(AudioSourceCallback* callback) override;
void Stop() override;
void Close() override;
+ void Flush() override;
void SetVolume(double volume) override;
void GetVolume(double* volume) override;
@@ -170,6 +171,9 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// |source_| is set to NULL.
void StopThread();
+ // Reports audio stream glitch stats and resets them to their initial values.
+ void ReportAndResetStats();
+
// Contains the thread ID of the creating thread.
const base::PlatformThreadId creating_thread_id_;
@@ -220,6 +224,21 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Counts the number of audio frames written to the endpoint buffer.
UINT64 num_written_frames_;
+ // The position read during the last call to RenderAudioFromSource
+ UINT64 last_position_ = 0;
+
+ // The performance counter read during the last call to RenderAudioFromSource
+ UINT64 last_qpc_position_ = 0;
+
+ // The number of glitches detected while this stream was active.
+ int num_glitches_detected_ = 0;
+
+ // The approximate amount of audio lost due to glitches.
+ base::TimeDelta cumulative_audio_lost_;
+
+ // The largest single glitch recorded.
+ base::TimeDelta largest_glitch_;
+
// Pointer to the client that will deliver audio samples to be played out.
AudioSourceCallback* source_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 877c8832f93..9730cebe994 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -49,10 +49,6 @@ using ::testing::Return;
namespace media {
-static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
-static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
-static const size_t kFileDurationMs = 20000;
-static const size_t kNumFileSegments = 2;
static const int kBitsPerSample = 16;
static const size_t kMaxDeltaSamples = 1000;
static const char kDeltaTimeMsFileName[] = "delta_times_ms.txt";
@@ -392,56 +388,6 @@ TEST_F(WASAPIAudioOutputStreamTest, ValidPacketSize) {
aos->Close();
}
-// This test is intended for manual tests and should only be enabled
-// when it is required to play out data from a local PCM file.
-// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
-// To include disabled tests in test execution, just invoke the test program
-// with --gtest_also_run_disabled_tests or set the GTEST_ALSO_RUN_DISABLED_TESTS
-// environment variable to a value greater than 0.
-// The test files are approximately 20 seconds long.
-TEST_F(WASAPIAudioOutputStreamTest, DISABLED_ReadFromStereoFile) {
- ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndOutputDevices(audio_manager_.get()));
-
- AudioOutputStreamWrapper aosw(audio_manager_.get());
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
-
- std::string file_name;
- if (aosw.sample_rate() == 48000) {
- file_name = kSpeechFile_16b_s_48k;
- } else if (aosw.sample_rate() == 44100) {
- file_name = kSpeechFile_16b_s_44k;
- } else if (aosw.sample_rate() == 96000) {
- // Use 48kHz file at 96kHz as well. Will sound like Donald Duck.
- file_name = kSpeechFile_16b_s_48k;
- } else {
- FAIL() << "This test supports 44.1, 48kHz and 96kHz only.";
- return;
- }
- ReadFromFileAudioSource file_source(file_name);
-
- DVLOG(0) << "File name : " << file_name.c_str();
- DVLOG(0) << "Sample rate : " << aosw.sample_rate();
- DVLOG(0) << "#channels : " << aosw.channels();
- DVLOG(0) << "File size : " << file_source.file_size();
- DVLOG(0) << "#file segments : " << kNumFileSegments;
- DVLOG(0) << ">> Listen to the stereo file while playing...";
-
- for (size_t i = 0; i < kNumFileSegments; i++) {
- // Each segment will start with a short (~20ms) block of zeros, hence
- // some short glitches might be heard in this test if kNumFileSegments
- // is larger than one. The exact length of the silence period depends on
- // the selected sample rate.
- aos->Start(&file_source);
- base::PlatformThread::Sleep(
- base::TimeDelta::FromMilliseconds(kFileDurationMs / kNumFileSegments));
- aos->Stop();
- }
-
- DVLOG(0) << ">> Stereo file playout has stopped.";
- aos->Close();
-}
-
// Verify that we can open the output stream in exclusive mode using a
// certain set of audio parameters and a sample rate of 48kHz.
// The expected outcomes of each setting in this test has been derived
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index f7efeff67fa..d16ee80b80c 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -19,7 +19,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
-#include "base/metrics/histogram_functions.h"
#include "base/strings/string_number_conversions.h"
#include "base/win/windows_version.h"
#include "media/audio/audio_device_description.h"
@@ -80,71 +79,6 @@ static int NumberOfWaveOutBuffers() {
return 3;
}
-static bool IsSupported(HRESULT hr) {
- return hr != S_FALSE && SUCCEEDED(hr);
-}
-
-// Records bitstream output support to histograms. Follows information from:
-// https://docs.microsoft.com/en-us/windows/desktop/coreaudio/representing-formats-for-iec-61937-transmissions
-static void LogBitstreamOutputSupport() {
- auto client = CoreAudioUtil::CreateClient(
- AudioDeviceDescription::kDefaultDeviceId, eRender, eConsole);
-
- // Happens if no audio output devices are available.
- if (!client)
- return;
-
- WAVEFORMATEXTENSIBLE wfext;
- memset(&wfext, 0, sizeof(wfext));
-
- // See link in function comment for where each value comes from.
- wfext.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- wfext.Format.nChannels = 2;
- wfext.Format.nSamplesPerSec = 192000;
- wfext.Format.nAvgBytesPerSec = 768000;
- wfext.Format.nBlockAlign = 4;
- wfext.Format.wBitsPerSample = 16;
- wfext.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
- wfext.Samples.wValidBitsPerSample = 16;
- wfext.dwChannelMask = KSAUDIO_SPEAKER_7POINT1_SURROUND;
-
- // Test Dolby Digital+ / Atmos support. For whatever reason Atmos doesn't use
- // the KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS_ATMOS SubFormat.
- wfext.SubFormat = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS;
-
- HRESULT hr = client->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE,
- &wfext.Format, nullptr);
- base::UmaHistogramBoolean("Media.Audio.Bitstream.EAC3", IsSupported(hr));
-
- // Test Dolby TrueHD.
- wfext.SubFormat = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP;
- hr = client->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfext.Format,
- nullptr);
- base::UmaHistogramBoolean("Media.Audio.Bitstream.TrueHD", IsSupported(hr));
-
- // Test DTS-HD.
- wfext.SubFormat = KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD;
- hr = client->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfext.Format,
- nullptr);
- base::UmaHistogramBoolean("Media.Audio.Bitstream.DTS-HD", IsSupported(hr));
-
- // Older bitstream formats run at lower sampling rates.
- wfext.Format.nSamplesPerSec = 48000;
- wfext.Format.nAvgBytesPerSec = 192000;
-
- // Test AC3.
- wfext.SubFormat = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL;
- hr = client->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfext.Format,
- nullptr);
- base::UmaHistogramBoolean("Media.Audio.Bitstream.AC3", IsSupported(hr));
-
- // Test DTS.
- wfext.SubFormat = KSDATAFORMAT_SUBTYPE_IEC61937_DTS;
- hr = client->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfext.Format,
- nullptr);
- base::UmaHistogramBoolean("Media.Audio.Bitstream.DTS", IsSupported(hr));
-}
-
AudioManagerWin::AudioManagerWin(std::unique_ptr<AudioThread> audio_thread,
AudioLogFactory* audio_log_factory)
: AudioManagerBase(std::move(audio_thread), audio_log_factory) {
@@ -196,11 +130,6 @@ bool AudioManagerWin::HasAudioInputDevices() {
void AudioManagerWin::InitializeOnAudioThread() {
DCHECK(GetTaskRunner()->BelongsToCurrentThread());
- // Delay metrics recording to avoid any issues at startup.
- GetTaskRunner()->PostDelayedTask(FROM_HERE,
- base::BindOnce(&LogBitstreamOutputSupport),
- base::TimeDelta::FromSeconds(15));
-
// AudioDeviceListenerWin must be initialized on a COM thread.
output_device_listener_.reset(new AudioDeviceListenerWin(BindToCurrentLoop(
base::Bind(&AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index f95b498a8f1..04ab5e3402b 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -24,6 +24,7 @@
#include "base/win/scoped_variant.h"
#include "base/win/windows_version.h"
#include "media/audio/audio_device_description.h"
+#include "media/audio/audio_features.h"
#include "media/base/media_switches.h"
using Microsoft::WRL::ComPtr;
@@ -313,7 +314,8 @@ ChannelConfig GuessChannelConfig(WORD channels) {
}
bool IAudioClient3IsSupported() {
- return CoreAudioUtil::GetIAudioClientVersion() >= 3;
+ return base::FeatureList::IsEnabled(features::kAllowIAudioClient3) &&
+ CoreAudioUtil::GetIAudioClientVersion() >= 3;
}
std::string GetDeviceID(IMMDevice* device) {
@@ -443,6 +445,27 @@ ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
ERole role,
const UMALogCallback& uma_log_cb) {
ComPtr<IMMDevice> endpoint_device;
+ // In loopback mode, a client of WASAPI can capture the audio stream that
+ // is being played by a rendering endpoint device.
+ // See https://crbug.com/956526 for why we use both a DCHECK and then deal
+ // with the error here and below.
+ DCHECK(!(AudioDeviceDescription::IsLoopbackDevice(device_id) &&
+ data_flow != eCapture));
+ if (AudioDeviceDescription::IsLoopbackDevice(device_id) &&
+ data_flow != eCapture) {
+ LOG(WARNING) << "Loopback device must be an input device";
+ return endpoint_device;
+ }
+
+ // Usage of AudioDeviceDescription::kCommunicationsDeviceId as |device_id|
+ // is not allowed. Instead, set |device_id| to kDefaultDeviceId and select
+ // between default device and default communication device by using different
+ // |role| values (eConsole or eCommunications).
+ DCHECK(!AudioDeviceDescription::IsCommunicationsDevice(device_id));
+ if (AudioDeviceDescription::IsCommunicationsDevice(device_id)) {
+ LOG(WARNING) << "Invalid device identifier";
+ return endpoint_device;
+ }
// Create the IMMDeviceEnumerator interface.
ComPtr<IMMDeviceEnumerator> device_enum(
@@ -455,9 +478,8 @@ ComPtr<IMMDevice> CreateDeviceInternal(const std::string& device_id,
hr =
device_enum->GetDefaultAudioEndpoint(data_flow, role, &endpoint_device);
} else if (AudioDeviceDescription::IsLoopbackDevice(device_id)) {
- // Obtain an IMMDevice interface for the rendering endpoint device since
- // loopback mode is selected.
- // TODO(http://crbug/956526): clean up code related to loopback mode.
+ // To open a stream in loopback mode, the client must obtain an IMMDevice
+ // interface for the *rendering* endpoint device.
hr = device_enum->GetDefaultAudioEndpoint(eRender, role, &endpoint_device);
} else {
hr = device_enum->GetDevice(base::UTF8ToUTF16(device_id).c_str(),
@@ -686,11 +708,11 @@ base::TimeDelta CoreAudioUtil::ReferenceTimeToTimeDelta(REFERENCE_TIME time) {
}
uint32_t CoreAudioUtil::GetIAudioClientVersion() {
- if (base::win::GetVersion() >= base::win::VERSION_WIN10) {
+ if (base::win::GetVersion() >= base::win::Version::WIN10) {
// Minimum supported client: Windows 10.
// Minimum supported server: Windows Server 2016
return 3;
- } else if (base::win::GetVersion() >= base::win::VERSION_WIN8) {
+ } else if (base::win::GetVersion() >= base::win::Version::WIN8) {
// Minimum supported client: Windows 8.
// Minimum supported server: Windows Server 2012.
return 2;
@@ -1068,6 +1090,15 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
UMALogCallback uma_log_cb(
is_output_device ? base::BindRepeating(&LogUMAPreferredOutputParams)
: base::BindRepeating(&LogUMAEmptyCb));
+
+ // Loopback audio streams must be input streams.
+ DCHECK(!(AudioDeviceDescription::IsLoopbackDevice(device_id) &&
+ is_output_device));
+ if (AudioDeviceDescription::IsLoopbackDevice(device_id) && is_output_device) {
+ LOG(WARNING) << "Loopback device must be an input device";
+ return E_FAIL;
+ }
+
ComPtr<IMMDevice> device(
CreateDeviceByID(device_id, is_output_device, uma_log_cb));
if (!device.Get())
@@ -1101,7 +1132,10 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
EDataFlow data_flow) {
- ComPtr<IAudioClient> client(CreateClient(device_id, data_flow, eConsole));
+ const ERole role = AudioDeviceDescription::IsCommunicationsDevice(device_id)
+ ? eCommunications
+ : eConsole;
+ ComPtr<IAudioClient> client(CreateClient(device_id, data_flow, role));
WAVEFORMATEXTENSIBLE mix_format;
if (!client.Get() ||
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index 61ed47221e4..1a797e3345b 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -205,8 +205,6 @@ class MEDIA_EXPORT CoreAudioUtil {
// speaker, and so on, continuing in the order defined in KsMedia.h.
// See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx
// for more details.
- // To get the channel config of the default device, pass an empty string
- // for |device_id|.
static ChannelConfig GetChannelConfig(const std::string& device_id,
EDataFlow data_flow);
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index dffdc641e99..8fc1330f31b 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -131,6 +131,18 @@ TEST_F(CoreAudioUtilWinTest, CreateDeviceEnumerator) {
EXPECT_TRUE(enumerator.Get());
}
+TEST_F(CoreAudioUtilWinTest, GetDefaultDeviceIDs) {
+ ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
+ std::string default_device_id = CoreAudioUtil::GetDefaultInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+ default_device_id = CoreAudioUtil::GetDefaultOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+ default_device_id = CoreAudioUtil::GetCommunicationsInputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+ default_device_id = CoreAudioUtil::GetCommunicationsOutputDeviceID();
+ EXPECT_FALSE(default_device_id.empty());
+}
+
TEST_F(CoreAudioUtilWinTest, CreateDefaultDevice) {
ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
@@ -171,7 +183,7 @@ TEST_F(CoreAudioUtilWinTest, CreateDevice) {
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(
default_render_device.Get(), &default_render_name)));
- // Use the uniqe ID as input to CreateDevice() and create a corresponding
+ // Use the unique ID as input to CreateDevice() and create a corresponding
// IMMDevice.
ComPtr<IMMDevice> audio_device = CoreAudioUtil::CreateDevice(
default_render_name.unique_id, EDataFlow(), ERole());
@@ -383,10 +395,17 @@ TEST_F(CoreAudioUtilWinTest, GetPreferredAudioParameters) {
// and capture devices.
for (size_t i = 0; i < base::size(data); ++i) {
AudioParameters params;
+ const bool is_output_device = (data[i] == eRender);
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- AudioDeviceDescription::kDefaultDeviceId, data[i] == eRender,
- &params)));
+ AudioDeviceDescription::kDefaultDeviceId, is_output_device, &params)));
EXPECT_TRUE(params.IsValid());
+ if (!is_output_device) {
+ // Loopack devices are supported for input streams.
+ EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+ AudioDeviceDescription::kLoopbackInputDeviceId, is_output_device,
+ &params)));
+ EXPECT_TRUE(params.IsValid());
+ }
}
}
@@ -396,10 +415,22 @@ TEST_F(CoreAudioUtilWinTest, GetChannelConfig) {
EDataFlow data_flows[] = {eRender, eCapture};
for (auto data_flow : data_flows) {
- ChannelConfig config =
+ ChannelConfig config1 =
CoreAudioUtil::GetChannelConfig(std::string(), data_flow);
- EXPECT_NE(config, CHANNEL_LAYOUT_NONE);
- EXPECT_NE(config, CHANNEL_LAYOUT_UNSUPPORTED);
+ EXPECT_NE(config1, CHANNEL_LAYOUT_NONE);
+ EXPECT_NE(config1, CHANNEL_LAYOUT_UNSUPPORTED);
+ ChannelConfig config2 = CoreAudioUtil::GetChannelConfig(
+ AudioDeviceDescription::kDefaultDeviceId, data_flow);
+ EXPECT_EQ(config1, config2);
+ // For loopback input devices, verify that the channel configuration is
+ // same as for the default output device.
+ if (data_flow == eCapture) {
+ config1 = CoreAudioUtil::GetChannelConfig(
+ AudioDeviceDescription::kLoopbackInputDeviceId, data_flow);
+ config2 = CoreAudioUtil::GetChannelConfig(
+ AudioDeviceDescription::kDefaultDeviceId, eRender);
+ EXPECT_EQ(config1, config2);
+ }
}
}
@@ -583,13 +614,6 @@ TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
EXPECT_TRUE(found_a_pair);
}
-TEST_F(CoreAudioUtilWinTest, GetDefaultOutputDeviceID) {
- ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
-
- std::string default_device_id(CoreAudioUtil::GetDefaultOutputDeviceID());
- EXPECT_FALSE(default_device_id.empty());
-}
-
TEST_F(CoreAudioUtilWinTest, CheckGetPreferredAudioParametersUMAStats) {
base::HistogramTester tester;
ABORT_AUDIO_TEST_IF_NOT(DevicesAvailable());
diff --git a/chromium/media/audio/win/waveout_output_win.cc b/chromium/media/audio/win/waveout_output_win.cc
index 70d830f6213..64878cbcd8f 100644
--- a/chromium/media/audio/win/waveout_output_win.cc
+++ b/chromium/media/audio/win/waveout_output_win.cc
@@ -4,7 +4,8 @@
#include "media/audio/win/waveout_output_win.h"
-#include "base/atomicops.h"
+#include <atomic>
+
#include "base/logging.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
@@ -211,7 +212,7 @@ void PCMWaveOutAudioOutputStream::Start(AudioSourceCallback* callback) {
// From now on |pending_bytes_| would be accessed by callback thread.
// Most likely waveOutPause() or waveOutRestart() has its own memory barrier,
// but issuing our own is safer.
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
MMRESULT result = ::waveOutPause(waveout_);
if (result != MMSYSERR_NOERROR) {
@@ -246,7 +247,7 @@ void PCMWaveOutAudioOutputStream::Stop() {
if (state_ != PCMA_PLAYING)
return;
state_ = PCMA_STOPPING;
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
// Stop watching for buffer event, waits until outstanding callbacks finish.
if (waiting_handle_) {
@@ -301,6 +302,10 @@ void PCMWaveOutAudioOutputStream::Close() {
manager_->ReleaseOutputStream(this);
}
+// This stream is always used with sub second buffer sizes, where it's
+// sufficient to simply always flush upon Start().
+void PCMWaveOutAudioOutputStream::Flush() {}
+
void PCMWaveOutAudioOutputStream::SetVolume(double volume) {
if (!waveout_)
return;
diff --git a/chromium/media/audio/win/waveout_output_win.h b/chromium/media/audio/win/waveout_output_win.h
index 76d81eab3a7..f61d79ced03 100644
--- a/chromium/media/audio/win/waveout_output_win.h
+++ b/chromium/media/audio/win/waveout_output_win.h
@@ -46,6 +46,7 @@ class PCMWaveOutAudioOutputStream : public AudioOutputStream {
// Implementation of AudioOutputStream.
bool Open() override;
void Close() override;
+ void Flush() override;
void Start(AudioSourceCallback* callback) override;
void Stop() override;
void SetVolume(double volume) override;
diff --git a/chromium/media/base/BUILD.gn b/chromium/media/base/BUILD.gn
index fcc6ab6cd3f..2ee16b0d5ec 100644
--- a/chromium/media/base/BUILD.gn
+++ b/chromium/media/base/BUILD.gn
@@ -285,10 +285,10 @@ jumbo_source_set("base") {
"video_frame_pool.h",
"video_renderer.cc",
"video_renderer.h",
- "video_rotation.cc",
- "video_rotation.h",
"video_thumbnail_decoder.cc",
"video_thumbnail_decoder.h",
+ "video_transformation.cc",
+ "video_transformation.h",
"video_types.cc",
"video_types.h",
"video_util.cc",
@@ -311,6 +311,7 @@ jumbo_source_set("base") {
deps = [
"//base/allocator:buildflags",
"//gpu/command_buffer/common",
+ "//gpu/ipc/common:common",
"//skia",
"//third_party/libyuv",
"//third_party/widevine/cdm:headers",
@@ -319,6 +320,7 @@ jumbo_source_set("base") {
"//ui/events:events_base",
"//url:url",
]
+
libs = []
configs += [
"//build/config:precompiled_headers",
@@ -550,6 +552,7 @@ source_set("unit_tests") {
]
deps = [
"//base/test:test_support",
+ "//components/viz/common",
"//gpu/command_buffer/common",
"//media:test_support",
"//skia",
diff --git a/chromium/media/base/android/media_codec_bridge_impl_unittest.cc b/chromium/media/base/android/media_codec_bridge_impl_unittest.cc
index e95a8c50c63..f4097fc50d4 100644
--- a/chromium/media/base/android/media_codec_bridge_impl_unittest.cc
+++ b/chromium/media/base/android/media_codec_bridge_impl_unittest.cc
@@ -439,8 +439,8 @@ TEST(MediaCodecBridgeTest, CreateUnsupportedCodec) {
TEST(MediaCodecBridgeTest, H264VideoEncodeAndValidate) {
SKIP_TEST_IF_HW_H264_IS_NOT_AVAILABLE();
- const int width = 320;
- const int height = 192;
+ const int width = 640;
+ const int height = 360;
const int bit_rate = 300000;
const int frame_rate = 30;
const int i_frame_interval = 20;
@@ -464,8 +464,8 @@ TEST(MediaCodecBridgeTest, H264VideoEncodeAndValidate) {
i_frame_interval, color_format));
ASSERT_THAT(media_codec, NotNull());
- const char* src_filename = "bear_320x192_40frames.yuv";
- base::FilePath src_file = GetTestDataFilePath(src_filename);
+ const char kSrcFileName[] = "bali_640x360_P420.yuv";
+ base::FilePath src_file = GetTestDataFilePath(kSrcFileName);
int64_t src_file_size = 0;
ASSERT_TRUE(base::GetFileSize(src_file, &src_file_size));
@@ -479,15 +479,15 @@ TEST(MediaCodecBridgeTest, H264VideoEncodeAndValidate) {
base::File src(src_file, base::File::FLAG_OPEN | base::File::FLAG_READ);
std::unique_ptr<uint8_t[]> frame_data =
std::make_unique<uint8_t[]>(frame_size);
- off_t src_offset = 0;
+ ASSERT_THAT(
+ src.Read(0, reinterpret_cast<char*>(frame_data.get()), frame_size),
+ frame_size);
+
// A monotonically-growing value.
base::TimeDelta input_timestamp;
- // Src_file should contain 40 frames. Here we only encode 3 of them.
- for (int frame = 0; frame < num_frames && frame < 3; frame++) {
- ASSERT_THAT(src.Read(src_offset, (char*)frame_data.get(), frame_size),
- frame_size);
- src_offset += static_cast<off_t>(frame_size);
+ // Src_file contains 1 frames. Encode it 3 times.
+ for (int frame = 0; frame < num_frames && frame < 3; frame++) {
input_timestamp += base::TimeDelta::FromMicroseconds(
base::Time::kMicrosecondsPerSecond / frame_rate);
EncodeMediaFrame(media_codec.get(), frame_data.get(), frame_size, width,
@@ -498,10 +498,6 @@ TEST(MediaCodecBridgeTest, H264VideoEncodeAndValidate) {
// also contain SPS/PPS NALUs.
media_codec->RequestKeyFrameSoon();
for (int frame = 0; frame < num_frames && frame < 3; frame++) {
- ASSERT_THAT(src.Read(src_offset, (char*)frame_data.get(), frame_size),
- frame_size);
- src_offset += static_cast<off_t>(frame_size);
-
input_timestamp += base::TimeDelta::FromMicroseconds(
base::Time::kMicrosecondsPerSecond / frame_rate);
EncodeMediaFrame(media_codec.get(), frame_data.get(), frame_size, width,
diff --git a/chromium/media/base/android/media_codec_loop_unittest.cc b/chromium/media/base/android/media_codec_loop_unittest.cc
index 1e2eb8c86e7..7e9a579e6ad 100644
--- a/chromium/media/base/android/media_codec_loop_unittest.cc
+++ b/chromium/media/base/android/media_codec_loop_unittest.cc
@@ -17,6 +17,7 @@
using ::testing::_;
using ::testing::AtLeast;
+using ::testing::DoAll;
using ::testing::Eq;
using ::testing::Field;
using ::testing::InSequence;
diff --git a/chromium/media/base/android/media_codec_util.cc b/chromium/media/base/android/media_codec_util.cc
index 51f30953c19..a6d6ee702de 100644
--- a/chromium/media/base/android/media_codec_util.cc
+++ b/chromium/media/base/android/media_codec_util.cc
@@ -263,10 +263,8 @@ bool MediaCodecUtil::AddSupportedCodecProfileLevels(
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobjectArray> j_codec_profile_levels(
Java_MediaCodecUtil_getSupportedCodecProfileLevels(env));
- int java_array_length = env->GetArrayLength(j_codec_profile_levels.obj());
- for (int i = 0; i < java_array_length; ++i) {
- ScopedJavaLocalRef<jobject> java_codec_profile_level(
- env, env->GetObjectArrayElement(j_codec_profile_levels.obj(), i));
+ for (auto java_codec_profile_level :
+ j_codec_profile_levels.ReadElements<jobject>()) {
result->push_back(MediaCodecProfileLevelToChromiumProfileLevel(
env, java_codec_profile_level));
}
diff --git a/chromium/media/base/android/media_crypto_context.h b/chromium/media/base/android/media_crypto_context.h
index 77f5c318e53..1bc3686bf29 100644
--- a/chromium/media/base/android/media_crypto_context.h
+++ b/chromium/media/base/android/media_crypto_context.h
@@ -34,14 +34,14 @@ class MEDIA_EXPORT MediaCryptoContext : public PlayerTracker {
// Should be ignored if |media_crypto|
// contains null MediaCrypto object.
using MediaCryptoReadyCB =
- base::Callback<void(JavaObjectPtr media_crypto,
- bool requires_secure_video_codec)>;
+ base::OnceCallback<void(JavaObjectPtr media_crypto,
+ bool requires_secure_video_codec)>;
MediaCryptoContext() {}
~MediaCryptoContext() override {}
virtual void SetMediaCryptoReadyCB(
- const MediaCryptoReadyCB& media_crypto_ready_cb) = 0;
+ MediaCryptoReadyCB media_crypto_ready_cb) = 0;
DISALLOW_COPY_AND_ASSIGN(MediaCryptoContext);
};
diff --git a/chromium/media/base/android/media_crypto_context_impl.cc b/chromium/media/base/android/media_crypto_context_impl.cc
index d50384894f3..434c94250ec 100644
--- a/chromium/media/base/android/media_crypto_context_impl.cc
+++ b/chromium/media/base/android/media_crypto_context_impl.cc
@@ -25,8 +25,8 @@ void MediaCryptoContextImpl::UnregisterPlayer(int registration_id) {
}
void MediaCryptoContextImpl::SetMediaCryptoReadyCB(
- const MediaCryptoReadyCB& media_crypto_ready_cb) {
- media_drm_bridge_->SetMediaCryptoReadyCB(media_crypto_ready_cb);
+ MediaCryptoReadyCB media_crypto_ready_cb) {
+ media_drm_bridge_->SetMediaCryptoReadyCB(std::move(media_crypto_ready_cb));
}
} // namespace media
diff --git a/chromium/media/base/android/media_crypto_context_impl.h b/chromium/media/base/android/media_crypto_context_impl.h
index a3b83fb4830..24a4ca1423a 100644
--- a/chromium/media/base/android/media_crypto_context_impl.h
+++ b/chromium/media/base/android/media_crypto_context_impl.h
@@ -41,8 +41,7 @@ class MEDIA_EXPORT MediaCryptoContextImpl : public MediaCryptoContext {
void UnregisterPlayer(int registration_id) final;
// MediaCryptoContext implementation.
- void SetMediaCryptoReadyCB(
- const MediaCryptoReadyCB& media_crypto_ready_cb) final;
+ void SetMediaCryptoReadyCB(MediaCryptoReadyCB media_crypto_ready_cb) final;
private:
MediaDrmBridge* const media_drm_bridge_;
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index 222e868bdc5..1a759fe57e2 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -38,10 +38,11 @@
#include "third_party/widevine/cdm/widevine_cdm_common.h"
using base::android::AttachCurrentThread;
-using base::android::ConvertUTF8ToJavaString;
using base::android::ConvertJavaStringToUTF8;
+using base::android::ConvertUTF8ToJavaString;
using base::android::JavaByteArrayToByteVector;
using base::android::JavaByteArrayToString;
+using base::android::JavaObjectArrayReader;
using base::android::JavaParamRef;
using base::android::ScopedJavaGlobalRef;
using base::android::ScopedJavaLocalRef;
@@ -625,12 +626,12 @@ void MediaDrmBridge::RejectPromise(uint32_t promise_id,
}
void MediaDrmBridge::SetMediaCryptoReadyCB(
- const MediaCryptoReadyCB& media_crypto_ready_cb) {
+ MediaCryptoReadyCB media_crypto_ready_cb) {
if (!task_runner_->BelongsToCurrentThread()) {
task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&MediaDrmBridge::SetMediaCryptoReadyCB,
- weak_factory_.GetWeakPtr(), media_crypto_ready_cb));
+ FROM_HERE, base::BindOnce(&MediaDrmBridge::SetMediaCryptoReadyCB,
+ weak_factory_.GetWeakPtr(),
+ std::move(media_crypto_ready_cb)));
return;
}
@@ -642,7 +643,7 @@ void MediaDrmBridge::SetMediaCryptoReadyCB(
}
DCHECK(!media_crypto_ready_cb_);
- media_crypto_ready_cb_ = media_crypto_ready_cb;
+ media_crypto_ready_cb_ = std::move(media_crypto_ready_cb);
if (!j_media_crypto_)
return;
@@ -772,13 +773,10 @@ void MediaDrmBridge::OnSessionKeysChange(
CdmKeysInfo cdm_keys_info;
- size_t size = env->GetArrayLength(j_keys_info);
- DCHECK_GT(size, 0u);
-
- for (size_t i = 0; i < size; ++i) {
- ScopedJavaLocalRef<jobject> j_key_status(
- env, env->GetObjectArrayElement(j_keys_info, i));
+ JavaObjectArrayReader<jobject> j_keys_info_array(j_keys_info);
+ DCHECK_GT(j_keys_info_array.size(), 0);
+ for (auto j_key_status : j_keys_info_array) {
ScopedJavaLocalRef<jbyteArray> j_key_id =
Java_KeyStatus_getKeyId(env, j_key_status);
std::vector<uint8_t> key_id;
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 852c8cb2d4d..79c12136cda 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -166,7 +166,7 @@ class MEDIA_EXPORT MediaDrmBridge : public ContentDecryptionModule,
// The registered callbacks will be fired on |task_runner_|. The caller
// should make sure that the callbacks are posted to the correct thread.
// TODO(xhwang): Move this up to be close to RegisterPlayer().
- void SetMediaCryptoReadyCB(const MediaCryptoReadyCB& media_crypto_ready_cb);
+ void SetMediaCryptoReadyCB(MediaCryptoReadyCB media_crypto_ready_cb);
// All the OnXxx functions below are called from Java. The implementation must
// only do minimal work and then post tasks to avoid reentrancy issues.
diff --git a/chromium/media/base/android/media_drm_bridge_factory.cc b/chromium/media/base/android/media_drm_bridge_factory.cc
index 0b9dd42a9fa..e8ce4c94ef1 100644
--- a/chromium/media/base/android/media_drm_bridge_factory.cc
+++ b/chromium/media/base/android/media_drm_bridge_factory.cc
@@ -122,7 +122,7 @@ void MediaDrmBridgeFactory::CreateMediaDrmBridge(const std::string& origin_id) {
return;
}
- media_drm_bridge_->SetMediaCryptoReadyCB(base::BindRepeating(
+ media_drm_bridge_->SetMediaCryptoReadyCB(base::BindOnce(
&MediaDrmBridgeFactory::OnMediaCryptoReady, weak_factory_.GetWeakPtr()));
}
diff --git a/chromium/media/base/android/mock_media_crypto_context.cc b/chromium/media/base/android/mock_media_crypto_context.cc
index cb0fbd42adf..c85f690c424 100644
--- a/chromium/media/base/android/mock_media_crypto_context.cc
+++ b/chromium/media/base/android/mock_media_crypto_context.cc
@@ -15,6 +15,12 @@ using ::testing::Return;
using ::testing::SaveArg;
using ::testing::_;
+ACTION_TEMPLATE(MoveArg,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(out)) {
+ *out = std::move(::testing::get<k>(args));
+}
+
namespace media {
MockMediaCryptoContext::MockMediaCryptoContext(bool has_media_crypto_context)
@@ -26,8 +32,8 @@ MockMediaCryptoContext::MockMediaCryptoContext(bool has_media_crypto_context)
ON_CALL(*this, RegisterPlayer(_, _))
.WillByDefault(DoAll(SaveArg<0>(&new_key_cb), SaveArg<1>(&cdm_unset_cb),
Return(kRegistrationId)));
- ON_CALL(*this, SetMediaCryptoReadyCB(_))
- .WillByDefault(SaveArg<0>(&media_crypto_ready_cb));
+ ON_CALL(*this, SetMediaCryptoReadyCB_(_))
+ .WillByDefault(MoveArg<0>(&media_crypto_ready_cb));
// Don't set any expectation on the number of correct calls to
// UnregisterPlayer, but expect no calls with the wrong registration id.
diff --git a/chromium/media/base/android/mock_media_crypto_context.h b/chromium/media/base/android/mock_media_crypto_context.h
index ceb42465988..6630574cd39 100644
--- a/chromium/media/base/android/mock_media_crypto_context.h
+++ b/chromium/media/base/android/mock_media_crypto_context.h
@@ -30,14 +30,20 @@ class MEDIA_EXPORT MockMediaCryptoContext
int(const base::Closure& new_key_cb,
const base::Closure& cdm_unset_cb));
MOCK_METHOD1(UnregisterPlayer, void(int registration_id));
- MOCK_METHOD1(SetMediaCryptoReadyCB,
- void(const MediaCryptoReadyCB& media_crypto_ready_cb));
+ void SetMediaCryptoReadyCB(
+ MediaCryptoReadyCB media_crypto_ready_cb) override {
+ SetMediaCryptoReadyCB_(media_crypto_ready_cb);
+ }
+ MOCK_METHOD1(SetMediaCryptoReadyCB_,
+ void(MediaCryptoReadyCB& media_crypto_ready_cb));
static constexpr int kRegistrationId = 1000;
base::Closure new_key_cb;
base::Closure cdm_unset_cb;
MediaCryptoReadyCB media_crypto_ready_cb;
+ // To be set to true when |media_crypto_ready_cb| is consumed and run.
+ bool ran_media_crypto_ready_cb = false;
private:
bool has_media_crypto_context_;
diff --git a/chromium/media/base/android/stream_texture_wrapper.h b/chromium/media/base/android/stream_texture_wrapper.h
index 78ac8388acf..13e06d8bf5e 100644
--- a/chromium/media/base/android/stream_texture_wrapper.h
+++ b/chromium/media/base/android/stream_texture_wrapper.h
@@ -22,7 +22,7 @@ class MEDIA_EXPORT StreamTextureWrapper {
// Initialize the underlying StreamTexture.
// See StreamTextureWrapperImpl.
virtual void Initialize(
- const base::Closure& received_frame_cb,
+ const base::RepeatingClosure& received_frame_cb,
const gfx::Size& natural_size,
scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner,
const StreamTextureWrapperInitCB& init_cb) = 0;
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
index 17adcb578eb..6de13f0cc5e 100644
--- a/chromium/media/base/audio_buffer.cc
+++ b/chromium/media/base/audio_buffer.cc
@@ -240,7 +240,7 @@ void AudioBuffer::AdjustSampleRate(int sample_rate) {
void AudioBuffer::ReadFrames(int frames_to_copy,
int source_frame_offset,
int dest_frame_offset,
- AudioBus* dest) {
+ AudioBus* dest) const {
// Deinterleave each channel (if necessary) and convert to 32bit
// floating-point with nominal range -1.0 -> +1.0 (if necessary).
@@ -411,7 +411,7 @@ void AudioBuffer::TrimRange(int start, int end) {
TrimEnd(frames_to_trim);
}
-bool AudioBuffer::IsBitstreamFormat() {
+bool AudioBuffer::IsBitstreamFormat() const {
return IsBitstream(sample_format_);
}
diff --git a/chromium/media/base/audio_buffer.h b/chromium/media/base/audio_buffer.h
index abd2c779a4b..e96bfa8edf9 100644
--- a/chromium/media/base/audio_buffer.h
+++ b/chromium/media/base/audio_buffer.h
@@ -129,7 +129,7 @@ class MEDIA_EXPORT AudioBuffer
void ReadFrames(int frames_to_copy,
int source_frame_offset,
int dest_frame_offset,
- AudioBus* dest);
+ AudioBus* dest) const;
// Trim an AudioBuffer by removing |frames_to_trim| frames from the start.
// Timestamp and duration are adjusted to reflect the fewer frames.
@@ -146,7 +146,7 @@ class MEDIA_EXPORT AudioBuffer
void TrimRange(int start, int end);
// Return true if the buffer contains compressed bitstream.
- bool IsBitstreamFormat();
+ bool IsBitstreamFormat() const;
// Return the number of channels.
int channel_count() const { return channel_count_; }
@@ -183,7 +183,7 @@ class MEDIA_EXPORT AudioBuffer
// mojo::TypeConverter added as a friend so that AudioBuffer can be
// transferred across a mojo connection.
friend struct mojo::TypeConverter<mojo::StructPtr<mojom::AudioBuffer>,
- scoped_refptr<AudioBuffer>>;
+ AudioBuffer>;
// Allocates aligned contiguous buffer to hold all channel data (1 block for
// interleaved data, |channel_count| blocks for planar data), copies
diff --git a/chromium/media/base/audio_buffer_converter.cc b/chromium/media/base/audio_buffer_converter.cc
index 4026a2c95dc..17779fc6f92 100644
--- a/chromium/media/base/audio_buffer_converter.cc
+++ b/chromium/media/base/audio_buffer_converter.cc
@@ -19,10 +19,10 @@ namespace media {
// Is the config presented by |buffer| a config change from |params|?
static bool IsConfigChange(const AudioParameters& params,
- const scoped_refptr<AudioBuffer>& buffer) {
- return buffer->sample_rate() != params.sample_rate() ||
- buffer->channel_count() != params.channels() ||
- buffer->channel_layout() != params.channel_layout();
+ const AudioBuffer& buffer) {
+ return buffer.sample_rate() != params.sample_rate() ||
+ buffer.channel_count() != params.channels() ||
+ buffer.channel_layout() != params.channel_layout();
}
AudioBufferConverter::AudioBufferConverter(const AudioParameters& output_params)
@@ -38,29 +38,29 @@ AudioBufferConverter::AudioBufferConverter(const AudioParameters& output_params)
AudioBufferConverter::~AudioBufferConverter() = default;
-void AudioBufferConverter::AddInput(const scoped_refptr<AudioBuffer>& buffer) {
+void AudioBufferConverter::AddInput(scoped_refptr<AudioBuffer> buffer) {
// On EOS flush any remaining buffered data.
if (buffer->end_of_stream()) {
Flush();
- queued_outputs_.push_back(buffer);
+ queued_outputs_.push_back(std::move(buffer));
return;
}
// We'll need a new |audio_converter_| if there was a config change.
- if (IsConfigChange(input_params_, buffer))
- ResetConverter(buffer);
+ if (IsConfigChange(input_params_, *buffer))
+ ResetConverter(*buffer);
// Pass straight through if there's no work to be done.
if (!audio_converter_) {
- queued_outputs_.push_back(buffer);
+ queued_outputs_.push_back(std::move(buffer));
return;
}
if (timestamp_helper_.base_timestamp() == kNoTimestamp)
timestamp_helper_.SetBaseTimestamp(buffer->timestamp());
- queued_inputs_.push_back(buffer);
input_frames_ += buffer->frame_count();
+ queued_inputs_.push_back(std::move(buffer));
ConvertIfPossible();
}
@@ -69,7 +69,7 @@ bool AudioBufferConverter::HasNextBuffer() { return !queued_outputs_.empty(); }
scoped_refptr<AudioBuffer> AudioBufferConverter::GetNextBuffer() {
DCHECK(!queued_outputs_.empty());
- scoped_refptr<AudioBuffer> out = queued_outputs_.front();
+ auto out = std::move(queued_outputs_.front());
queued_outputs_.pop_front();
return out;
}
@@ -98,13 +98,13 @@ double AudioBufferConverter::ProvideInput(AudioBus* audio_bus,
int dest_index = 0;
while (requested_frames_left > 0 && !queued_inputs_.empty()) {
- scoped_refptr<AudioBuffer> input_buffer = queued_inputs_.front();
+ const auto& input_buffer = queued_inputs_.front();
int frames_to_read =
std::min(requested_frames_left,
input_buffer->frame_count() - last_input_buffer_offset_);
- input_buffer->ReadFrames(
- frames_to_read, last_input_buffer_offset_, dest_index, audio_bus);
+ input_buffer->ReadFrames(frames_to_read, last_input_buffer_offset_,
+ dest_index, audio_bus);
last_input_buffer_offset_ += frames_to_read;
if (last_input_buffer_offset_ == input_buffer->frame_count()) {
@@ -135,21 +135,18 @@ double AudioBufferConverter::ProvideInput(AudioBus* audio_bus,
return 1.0;
}
-void AudioBufferConverter::ResetConverter(
- const scoped_refptr<AudioBuffer>& buffer) {
+void AudioBufferConverter::ResetConverter(const AudioBuffer& buffer) {
Flush();
audio_converter_.reset();
input_params_.Reset(
- input_params_.format(),
- buffer->channel_layout(),
- buffer->sample_rate(),
+ input_params_.format(), buffer.channel_layout(), buffer.sample_rate(),
// If resampling is needed and the FIFO disabled, the AudioConverter will
// always request SincResampler::kDefaultRequestSize frames. Otherwise it
// will use the output frame size.
- buffer->sample_rate() == output_params_.sample_rate()
+ buffer.sample_rate() == output_params_.sample_rate()
? output_params_.frames_per_buffer()
: SincResampler::kDefaultRequestSize);
- input_params_.set_channels_for_discrete(buffer->channel_count());
+ input_params_.set_channels_for_discrete(buffer.channel_count());
io_sample_rate_ratio_ = static_cast<double>(input_params_.sample_rate()) /
output_params_.sample_rate();
@@ -186,7 +183,7 @@ void AudioBufferConverter::ConvertIfPossible() {
if (!request_frames)
return;
- scoped_refptr<AudioBuffer> output_buffer = AudioBuffer::CreateBuffer(
+ auto output_buffer = AudioBuffer::CreateBuffer(
kSampleFormatPlanarF32, output_params_.channel_layout(),
output_params_.channels(), output_params_.sample_rate(), request_frames,
pool_);
@@ -226,7 +223,7 @@ void AudioBufferConverter::ConvertIfPossible() {
output_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
timestamp_helper_.AddFrames(request_frames);
- queued_outputs_.push_back(output_buffer);
+ queued_outputs_.push_back(std::move(output_buffer));
}
void AudioBufferConverter::Flush() {
diff --git a/chromium/media/base/audio_buffer_converter.h b/chromium/media/base/audio_buffer_converter.h
index 3cccb208965..3a0320be67d 100644
--- a/chromium/media/base/audio_buffer_converter.h
+++ b/chromium/media/base/audio_buffer_converter.h
@@ -27,7 +27,7 @@ class MEDIA_EXPORT AudioBufferConverter : public AudioConverter::InputCallback {
explicit AudioBufferConverter(const AudioParameters& output_params);
~AudioBufferConverter() override;
- void AddInput(const scoped_refptr<AudioBuffer>& buffer);
+ void AddInput(scoped_refptr<AudioBuffer> buffer);
// Is an output buffer available via GetNextBuffer()?
bool HasNextBuffer();
@@ -54,7 +54,7 @@ class MEDIA_EXPORT AudioBufferConverter : public AudioConverter::InputCallback {
double ProvideInput(AudioBus* audio_bus, uint32_t frames_delayed) override;
// Reset the converter in response to a configuration change.
- void ResetConverter(const scoped_refptr<AudioBuffer>& input_buffer);
+ void ResetConverter(const AudioBuffer& input_buffer);
// Perform conversion if we have enough data.
void ConvertIfPossible();
diff --git a/chromium/media/base/audio_buffer_converter_unittest.cc b/chromium/media/base/audio_buffer_converter_unittest.cc
index 06d88ec058a..b57d8b456df 100644
--- a/chromium/media/base/audio_buffer_converter_unittest.cc
+++ b/chromium/media/base/audio_buffer_converter_unittest.cc
@@ -49,7 +49,7 @@ class AudioBufferConverterTest : public ::testing::Test {
output_frames_ = expected_output_frames_ = input_frames_ = 0;
}
- void AddInput(const scoped_refptr<AudioBuffer>& in) {
+ void AddInput(scoped_refptr<AudioBuffer> in) {
if (!in->end_of_stream()) {
input_frames_ += in->frame_count();
expected_output_frames_ +=
@@ -57,7 +57,7 @@ class AudioBufferConverterTest : public ::testing::Test {
(static_cast<double>(output_params_.sample_rate()) /
in->sample_rate());
}
- audio_buffer_converter_->AddInput(in);
+ audio_buffer_converter_->AddInput(std::move(in));
}
void ConsumeOutput() {
diff --git a/chromium/media/base/audio_buffer_queue.cc b/chromium/media/base/audio_buffer_queue.cc
index a436b1e4e0c..3e699663898 100644
--- a/chromium/media/base/audio_buffer_queue.cc
+++ b/chromium/media/base/audio_buffer_queue.cc
@@ -20,14 +20,14 @@ void AudioBufferQueue::Clear() {
frames_ = 0;
}
-void AudioBufferQueue::Append(const scoped_refptr<AudioBuffer>& buffer_in) {
- // Add the buffer to the queue. Inserting into deque invalidates all
- // iterators, so point to the first buffer.
- buffers_.push_back(buffer_in);
-
+void AudioBufferQueue::Append(scoped_refptr<AudioBuffer> buffer_in) {
// Update the |frames_| counter since we have added frames.
frames_ += buffer_in->frame_count();
CHECK_GT(frames_, 0); // make sure it doesn't overflow.
+
+ // Add the buffer to the queue. Inserting into deque invalidates all
+ // iterators, so point to the first buffer.
+ buffers_.push_back(std::move(buffer_in));
}
int AudioBufferQueue::ReadFrames(int frames,
@@ -71,7 +71,7 @@ int AudioBufferQueue::InternalRead(int frames,
dest_frame_offset == dest->GetBitstreamFrames());
DCHECK(!source_frame_offset);
- scoped_refptr<AudioBuffer> buffer = buffers_.front();
+ const auto& buffer = buffers_.front();
int taken = buffer->frame_count();
// if |dest| is NULL, there's no need to copy.
diff --git a/chromium/media/base/audio_buffer_queue.h b/chromium/media/base/audio_buffer_queue.h
index 5148fa3d24f..cb709e80e53 100644
--- a/chromium/media/base/audio_buffer_queue.h
+++ b/chromium/media/base/audio_buffer_queue.h
@@ -29,7 +29,7 @@ class MEDIA_EXPORT AudioBufferQueue {
void Clear();
// Appends |buffer_in| to this queue.
- void Append(const scoped_refptr<AudioBuffer>& buffer_in);
+ void Append(scoped_refptr<AudioBuffer> buffer_in);
// Reads a maximum of |frames| frames into |dest| from the current position.
// Returns the number of frames read. The current position will advance by the
diff --git a/chromium/media/base/audio_codecs.cc b/chromium/media/base/audio_codecs.cc
index 10e0286b600..5b39eb04738 100644
--- a/chromium/media/base/audio_codecs.cc
+++ b/chromium/media/base/audio_codecs.cc
@@ -64,8 +64,10 @@ AudioCodec StringToAudioCodec(const std::string& codec_id) {
return kCodecALAC;
if (codec_id == "flac")
return kCodecFLAC;
- if (base::StartsWith(codec_id, "mhm1.", base::CompareCase::SENSITIVE))
+ if (base::StartsWith(codec_id, "mhm1.", base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_id, "mha1.", base::CompareCase::SENSITIVE)) {
return kCodecMpegHAudio;
+ }
if (codec_id == "opus")
return kCodecOpus;
if (codec_id == "vorbis")
diff --git a/chromium/media/base/audio_decoder.h b/chromium/media/base/audio_decoder.h
index e2d5926163b..39a893bd693 100644
--- a/chromium/media/base/audio_decoder.h
+++ b/chromium/media/base/audio_decoder.h
@@ -26,15 +26,15 @@ class CdmContext;
class MEDIA_EXPORT AudioDecoder {
public:
// Callback for VideoDecoder initialization.
- using InitCB = base::Callback<void(bool success)>;
+ using InitCB = base::OnceCallback<void(bool success)>;
// Callback for AudioDecoder to return a decoded frame whenever it becomes
// available. Only non-EOS frames should be returned via this callback.
- using OutputCB = base::Callback<void(const scoped_refptr<AudioBuffer>&)>;
+ using OutputCB = base::RepeatingCallback<void(scoped_refptr<AudioBuffer>)>;
// Callback for Decode(). Called after the decoder has accepted corresponding
// DecoderBuffer, indicating that the pipeline can send next buffer to decode.
- using DecodeCB = base::Callback<void(DecodeStatus)>;
+ using DecodeCB = base::RepeatingCallback<void(DecodeStatus)>;
AudioDecoder();
@@ -69,7 +69,7 @@ class MEDIA_EXPORT AudioDecoder {
// Initialize().
virtual void Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) = 0;
@@ -89,7 +89,7 @@ class MEDIA_EXPORT AudioDecoder {
// Resets decoder state. All pending Decode() requests will be finished or
// aborted before |closure| is called.
- virtual void Reset(const base::Closure& closure) = 0;
+ virtual void Reset(base::OnceClosure closure) = 0;
// Returns true if the decoder needs bitstream conversion before decoding.
virtual bool NeedsBitstreamConversion() const;
diff --git a/chromium/media/base/audio_discard_helper.cc b/chromium/media/base/audio_discard_helper.cc
index f5bf3d68f04..ecb3e2a1437 100644
--- a/chromium/media/base/audio_discard_helper.cc
+++ b/chromium/media/base/audio_discard_helper.cc
@@ -49,9 +49,8 @@ void AudioDiscardHelper::Reset(size_t initial_discard) {
delayed_discard_padding_ = DecoderBuffer::DiscardPadding();
}
-bool AudioDiscardHelper::ProcessBuffers(
- const DecoderBuffer& encoded_buffer,
- const scoped_refptr<AudioBuffer>& decoded_buffer) {
+bool AudioDiscardHelper::ProcessBuffers(const DecoderBuffer& encoded_buffer,
+ AudioBuffer* decoded_buffer) {
DCHECK(!encoded_buffer.end_of_stream());
DCHECK(encoded_buffer.timestamp() != kNoTimestamp);
@@ -69,7 +68,7 @@ bool AudioDiscardHelper::ProcessBuffers(
}
DCHECK(initialized());
- if (!decoded_buffer.get()) {
+ if (!decoded_buffer) {
// If there's a one buffer delay for decoding, we need to save it so it can
// be processed with the next decoder buffer.
if (delayed_discard_)
diff --git a/chromium/media/base/audio_discard_helper.h b/chromium/media/base/audio_discard_helper.h
index 7154beedfb9..09956690136 100644
--- a/chromium/media/base/audio_discard_helper.h
+++ b/chromium/media/base/audio_discard_helper.h
@@ -61,7 +61,7 @@ class MEDIA_EXPORT AudioDiscardHelper {
// |decoded_buffer|s. If the first buffer has a negative timestamp it will be
// clamped to zero.
bool ProcessBuffers(const DecoderBuffer& encoded_buffer,
- const scoped_refptr<AudioBuffer>& decoded_buffer);
+ AudioBuffer* decoded_buffer);
// Whether any buffers have been processed.
bool initialized() const {
diff --git a/chromium/media/base/audio_discard_helper_unittest.cc b/chromium/media/base/audio_discard_helper_unittest.cc
index 3c2f4dc698b..324439d5f82 100644
--- a/chromium/media/base/audio_discard_helper_unittest.cc
+++ b/chromium/media/base/audio_discard_helper_unittest.cc
@@ -34,13 +34,12 @@ static scoped_refptr<AudioBuffer> CreateDecodedBuffer(int frames) {
kSampleRate, 0.0f, kDataStep, frames, kNoTimestamp);
}
-static float ExtractDecodedData(const scoped_refptr<AudioBuffer>& buffer,
- int index) {
+static float ExtractDecodedData(const AudioBuffer& buffer, int index) {
// This is really inefficient, but we can't access the raw AudioBuffer if any
// start trimming has been applied.
std::unique_ptr<AudioBus> temp_bus =
- AudioBus::Create(buffer->channel_count(), 1);
- buffer->ReadFrames(1, index, 0, temp_bus.get());
+ AudioBus::Create(buffer.channel_count(), 1);
+ buffer.ReadFrames(1, index, 0, temp_bus.get());
return temp_bus->channel(0)[0];
}
@@ -86,7 +85,8 @@ TEST(AudioDiscardHelperTest, BasicProcessBuffers) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// Verify the basic case where nothing is discarded.
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kActualDuration, decoded_buffer->duration());
@@ -113,7 +113,8 @@ TEST(AudioDiscardHelperTest, NegativeTimestampClampsToZero) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// Verify the basic case where nothing is discarded.
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(base::TimeDelta(), decoded_buffer->timestamp());
EXPECT_EQ(kDuration, decoded_buffer->duration());
@@ -137,13 +138,14 @@ TEST(AudioDiscardHelperTest, ProcessBuffersWithInitialDiscard) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// Verify half the frames end up discarded.
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kDiscardFrames, decoded_buffer->frame_count());
ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
- ExtractDecodedData(decoded_buffer, 0));
+ ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, ProcessBuffersWithLargeInitialDiscard) {
@@ -162,13 +164,15 @@ TEST(AudioDiscardHelperTest, ProcessBuffersWithLargeInitialDiscard) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// The first call should fail since no output buffer remains.
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
// Generate another set of buffers and expect half the output frames.
encoded_buffer = CreateEncodedBuffer(kTimestamp + kDuration, kDuration);
decoded_buffer = CreateDecodedBuffer(kTestFrames);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
// The timestamp should match that of the initial buffer.
const int kDiscardFrames = kTestFrames / 2;
@@ -176,7 +180,7 @@ TEST(AudioDiscardHelperTest, ProcessBuffersWithLargeInitialDiscard) {
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kDiscardFrames, decoded_buffer->frame_count());
ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
- ExtractDecodedData(decoded_buffer, 0));
+ ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, AllowNonMonotonicTimestamps) {
@@ -191,7 +195,8 @@ TEST(AudioDiscardHelperTest, AllowNonMonotonicTimestamps) {
CreateEncodedBuffer(kTimestamp, kDuration);
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration, decoded_buffer->duration());
@@ -199,7 +204,8 @@ TEST(AudioDiscardHelperTest, AllowNonMonotonicTimestamps) {
// Process the same input buffer again to ensure input timestamps which go
// backwards in time are not errors.
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp + kDuration, decoded_buffer->timestamp());
EXPECT_EQ(kDuration, decoded_buffer->duration());
EXPECT_EQ(kTestFrames, decoded_buffer->frame_count());
@@ -221,7 +227,8 @@ TEST(AudioDiscardHelperTest, DiscardEndPadding) {
encoded_buffer->set_discard_padding(
std::make_pair(base::TimeDelta(), kDuration / 2));
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
@@ -245,7 +252,8 @@ TEST(AudioDiscardHelperTest, BadDiscardEndPadding) {
std::make_pair(base::TimeDelta(), kDuration * 2));
// Verify the end discard padding is rejected.
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
}
@@ -269,13 +277,14 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardEndPadding) {
const int kDiscardFrames = kTestFrames / 4;
discard_helper.Reset(kDiscardFrames);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
ASSERT_FLOAT_EQ(kDiscardFrames * kDataStep,
- ExtractDecodedData(decoded_buffer, 0));
+ ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPadding) {
@@ -296,7 +305,8 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPadding) {
std::make_pair(kDuration / 8, kDuration / 16));
discard_helper.Reset(kDiscardFrames);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration - kDuration / 4 - kDuration / 8 - kDuration / 16,
@@ -333,7 +343,8 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
// |--------| |---------| |----|
// Decoded Discard Front Padding
//
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
// Processing another buffer that has front discard set to half the buffer's
@@ -354,17 +365,18 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
encoded_buffer->set_discard_padding(
std::make_pair(kDuration / 2, kDuration / 4));
decoded_buffer = CreateDecodedBuffer(kTestFrames);
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
ASSERT_NEAR(kDecoderDelay * kDataStep,
- ExtractDecodedData(decoded_buffer, kDecoderDelay),
+ ExtractDecodedData(*decoded_buffer, kDecoderDelay),
kDataStep / 1000);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
// Verify it was actually the latter half of the buffer that was removed.
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
// Verify the end discard padding is carried over to the next buffer. Use
// kDuration / 2 for the end discard padding so that the next buffer has its
@@ -381,17 +393,18 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
encoded_buffer->set_discard_padding(
std::make_pair(base::TimeDelta(), kDuration / 2));
decoded_buffer = CreateDecodedBuffer(kTestFrames);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp + kDuration / 2, decoded_buffer->timestamp());
EXPECT_EQ(3 * kDuration / 4, decoded_buffer->duration());
EXPECT_EQ(3 * kTestFrames / 4, decoded_buffer->frame_count());
// Verify it was actually the second quarter of the buffer that was removed.
const int kDiscardFrames = kTestFrames / 4;
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
ASSERT_FLOAT_EQ(
kDiscardFrames * 2 * kDataStep,
- ExtractDecodedData(decoded_buffer, kDecoderDelay - kDiscardFrames));
+ ExtractDecodedData(*decoded_buffer, kDecoderDelay - kDiscardFrames));
// One last test to ensure carryover discard from the start works.
//
@@ -405,14 +418,15 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
encoded_buffer->set_timestamp(encoded_buffer->timestamp() + kDuration);
encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
decoded_buffer = CreateDecodedBuffer(kTestFrames);
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp + kDuration / 2 + 3 * kDuration / 4,
decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
ASSERT_FLOAT_EQ(kTestFrames / 2 * kDataStep,
- ExtractDecodedData(decoded_buffer, 0));
+ ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, DelayedDiscardInitialDiscardAndDiscardPadding) {
@@ -442,7 +456,8 @@ TEST(AudioDiscardHelperTest, DelayedDiscardInitialDiscardAndDiscardPadding) {
// Verify that when the decoded buffer is consumed, the discards from the
// previous encoded buffer are applied.
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration - kDuration / 4 - kDuration / 8 - kDuration / 16,
decoded_buffer->duration());
@@ -466,18 +481,20 @@ TEST(AudioDiscardHelperTest, CompleteDiscard) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// Verify all of the first buffer is discarded.
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
encoded_buffer->set_timestamp(kTimestamp + kDuration);
encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
// Verify a second buffer goes through untouched.
decoded_buffer = CreateDecodedBuffer(kTestFrames / 2);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, CompleteDiscardWithDelayedDiscard) {
@@ -502,16 +519,18 @@ TEST(AudioDiscardHelperTest, CompleteDiscardWithDelayedDiscard) {
// Verify the first output buffer is dropped.
encoded_buffer->set_timestamp(kTimestamp + kDuration);
encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
// Verify the second buffer goes through untouched.
encoded_buffer->set_timestamp(kTimestamp + 2 * kDuration);
decoded_buffer = CreateDecodedBuffer(kTestFrames / 2);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
- ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(*decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, CompleteDiscardWithInitialDiscardDecoderDelay) {
@@ -532,19 +551,21 @@ TEST(AudioDiscardHelperTest, CompleteDiscardWithInitialDiscardDecoderDelay) {
scoped_refptr<AudioBuffer> decoded_buffer = CreateDecodedBuffer(kTestFrames);
// Verify all of the first buffer is discarded.
- ASSERT_FALSE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_FALSE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
ASSERT_TRUE(discard_helper.initialized());
encoded_buffer->set_timestamp(kTimestamp + kDuration);
encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
// Verify 5ms off the front of the second buffer is discarded.
decoded_buffer = CreateDecodedBuffer(kTestFrames * 2);
- ASSERT_TRUE(discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer));
+ ASSERT_TRUE(
+ discard_helper.ProcessBuffers(*encoded_buffer, decoded_buffer.get()));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration * 2 - kDuration / 2, decoded_buffer->duration());
EXPECT_EQ(kTestFrames * 2 - kDecoderDelay, decoded_buffer->frame_count());
ASSERT_FLOAT_EQ(kDecoderDelay * kDataStep,
- ExtractDecodedData(decoded_buffer, 0));
+ ExtractDecodedData(*decoded_buffer, 0));
}
} // namespace media
diff --git a/chromium/media/base/audio_parameters.h b/chromium/media/base/audio_parameters.h
index d153110f63c..c5f9f6bc32c 100644
--- a/chromium/media/base/audio_parameters.h
+++ b/chromium/media/base/audio_parameters.h
@@ -78,6 +78,22 @@ struct MEDIA_SHMEM_EXPORT AudioOutputBuffer {
int8_t audio[1];
};
+struct MEDIA_SHMEM_EXPORT AudioRendererAlgorithmParameters {
+ // The maximum size for the audio buffer.
+ base::TimeDelta max_capacity;
+
+ // The minimum size for the audio buffer.
+ base::TimeDelta starting_capacity;
+
+ // The minimum size for the audio buffer for encrypted streams.
+ // Set this to be larger than |max_capacity| because the
+ // performance of encrypted playback is always worse than clear playback, due
+ // to decryption and potentially IPC overhead. For the context, see
+ // https://crbug.com/403462, https://crbug.com/718161 and
+ // https://crbug.com/879970.
+ base::TimeDelta starting_capacity_for_encrypted;
+};
+
// These convenience function safely computes the size required for
// |shared_memory_count| AudioInputBuffers, with enough memory for AudioBus
// data, using |paremeters| (or alternatively |channels| and |frames|). The
diff --git a/chromium/media/base/audio_push_fifo.h b/chromium/media/base/audio_push_fifo.h
index eca0722acb5..2eb6bfc467a 100644
--- a/chromium/media/base/audio_push_fifo.h
+++ b/chromium/media/base/audio_push_fifo.h
@@ -29,7 +29,8 @@ class MEDIA_EXPORT AudioPushFifo final {
// Push(). If zero or positive, the output contains data from the current
// call to Push(). Clients can use this to adjust timestamps.
using OutputCallback =
- base::Callback<void(const AudioBus& output_bus, int frame_delay)>;
+ base::RepeatingCallback<void(const AudioBus& output_bus,
+ int frame_delay)>;
// Creates a new AudioPushFifo which delivers re-buffered audio by running
// |callback|.
diff --git a/chromium/media/base/audio_renderer_mixer_input.cc b/chromium/media/base/audio_renderer_mixer_input.cc
index ee04a4677a7..b8b00249333 100644
--- a/chromium/media/base/audio_renderer_mixer_input.cc
+++ b/chromium/media/base/audio_renderer_mixer_input.cc
@@ -114,6 +114,10 @@ void AudioRendererMixerInput::Pause() {
playing_ = false;
}
+// Flush is not supported with mixed sinks due to how delayed pausing works in
+// the mixer.
+void AudioRendererMixerInput::Flush() {}
+
bool AudioRendererMixerInput::SetVolume(double volume) {
base::AutoLock auto_lock(volume_lock_);
volume_ = volume;
diff --git a/chromium/media/base/audio_renderer_mixer_input.h b/chromium/media/base/audio_renderer_mixer_input.h
index 0e47c24d9b8..53681859fd2 100644
--- a/chromium/media/base/audio_renderer_mixer_input.h
+++ b/chromium/media/base/audio_renderer_mixer_input.h
@@ -45,6 +45,7 @@ class MEDIA_EXPORT AudioRendererMixerInput
void Stop() override;
void Play() override;
void Pause() override;
+ void Flush() override;
bool SetVolume(double volume) override;
OutputDeviceInfo GetOutputDeviceInfo() override;
void GetOutputDeviceInfoAsync(OutputDeviceInfoCB info_cb) override;
diff --git a/chromium/media/base/audio_renderer_sink.h b/chromium/media/base/audio_renderer_sink.h
index fefc018b4b0..e04bcd0aef0 100644
--- a/chromium/media/base/audio_renderer_sink.h
+++ b/chromium/media/base/audio_renderer_sink.h
@@ -61,6 +61,10 @@ class AudioRendererSink
// Resumes playback after calling Pause().
virtual void Play() = 0;
+ // Flushes playback.
+ // This should only be called if the sink is not playing.
+ virtual void Flush() = 0;
+
// Sets the playback volume, with range [0.0, 1.0] inclusive.
// Returns |true| on success.
virtual bool SetVolume(double volume) = 0;
diff --git a/chromium/media/base/bitstream_buffer.cc b/chromium/media/base/bitstream_buffer.cc
index a4662663cce..e434157e7f0 100644
--- a/chromium/media/base/bitstream_buffer.cc
+++ b/chromium/media/base/bitstream_buffer.cc
@@ -9,26 +9,45 @@
namespace media {
BitstreamBuffer::BitstreamBuffer()
- : BitstreamBuffer(-1, base::SharedMemoryHandle(), 0) {}
+ : BitstreamBuffer(-1, base::subtle::PlatformSharedMemoryRegion(), 0) {}
+
+BitstreamBuffer::BitstreamBuffer(
+ int32_t id,
+ base::subtle::PlatformSharedMemoryRegion region,
+ size_t size,
+ off_t offset,
+ base::TimeDelta presentation_timestamp)
+ : id_(id),
+ region_(std::move(region)),
+ size_(size),
+ offset_(offset),
+ presentation_timestamp_(presentation_timestamp) {}
BitstreamBuffer::BitstreamBuffer(int32_t id,
base::SharedMemoryHandle handle,
+ bool read_only,
size_t size,
off_t offset,
base::TimeDelta presentation_timestamp)
: id_(id),
- handle_(handle),
+ region_(
+ base::subtle::PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+ handle.Duplicate(),
+ read_only
+ ? base::subtle::PlatformSharedMemoryRegion::Mode::kReadOnly
+ : base::subtle::PlatformSharedMemoryRegion::Mode::kUnsafe)),
size_(size),
offset_(offset),
presentation_timestamp_(presentation_timestamp) {}
-BitstreamBuffer::BitstreamBuffer(const BitstreamBuffer& other) = default;
+BitstreamBuffer::BitstreamBuffer(BitstreamBuffer&&) = default;
+BitstreamBuffer& BitstreamBuffer::operator=(BitstreamBuffer&&) = default;
BitstreamBuffer::~BitstreamBuffer() = default;
-scoped_refptr<DecoderBuffer> BitstreamBuffer::ToDecoderBuffer() const {
+scoped_refptr<DecoderBuffer> BitstreamBuffer::ToDecoderBuffer() {
scoped_refptr<DecoderBuffer> buffer =
- DecoderBuffer::FromSharedMemoryHandle(handle_, offset_, size_);
+ DecoderBuffer::FromSharedMemoryRegion(std::move(region_), offset_, size_);
if (!buffer)
return nullptr;
buffer->set_timestamp(presentation_timestamp_);
diff --git a/chromium/media/base/bitstream_buffer.h b/chromium/media/base/bitstream_buffer.h
index 524b4df7eaa..8a72eceb90c 100644
--- a/chromium/media/base/bitstream_buffer.h
+++ b/chromium/media/base/bitstream_buffer.h
@@ -9,8 +9,8 @@
#include <stdint.h>
#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/scoped_refptr.h"
-#include "base/memory/shared_memory.h"
#include "base/time/time.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
@@ -37,23 +37,35 @@ class MEDIA_EXPORT BitstreamBuffer {
// When not provided, |presentation_timestamp| will be
// |media::kNoTimestamp|.
BitstreamBuffer(int32_t id,
+ base::subtle::PlatformSharedMemoryRegion region,
+ size_t size,
+ off_t offset = 0,
+ base::TimeDelta presentation_timestamp = kNoTimestamp);
+
+ // As above, but creates by duplicating a SharedMemoryHandle.
+ // TODO(https://crbug.com/793446): remove once legacy shared memory has been
+ // converted.
+ BitstreamBuffer(int32_t id,
base::SharedMemoryHandle handle,
+ bool read_only,
size_t size,
off_t offset = 0,
base::TimeDelta presentation_timestamp = kNoTimestamp);
- BitstreamBuffer(const BitstreamBuffer& other);
+ // Move operations are allowed.
+ BitstreamBuffer(BitstreamBuffer&&);
+ BitstreamBuffer& operator=(BitstreamBuffer&&);
~BitstreamBuffer();
- // Produce an equivalent DecoderBuffer. This consumes handle(), even if
+ // Produce an equivalent DecoderBuffer. This consumes region(), even if
// nullptr is returned.
//
// This method is only intended to be used by VDAs that are being converted to
// use DecoderBuffer.
//
// TODO(sandersd): Remove once all VDAs are converted.
- scoped_refptr<DecoderBuffer> ToDecoderBuffer() const;
+ scoped_refptr<DecoderBuffer> ToDecoderBuffer();
// TODO(crbug.com/813845): As this is only used by Android, include
// EncryptionMode and optional EncryptionPattern when updating for Android.
@@ -61,8 +73,24 @@ class MEDIA_EXPORT BitstreamBuffer {
const std::string& iv,
const std::vector<SubsampleEntry>& subsamples);
+ // Taking the region invalides the one in this BitstreamBuffer.
+ base::subtle::PlatformSharedMemoryRegion TakeRegion() {
+ return std::move(region_);
+ }
+
+ // If a region needs to be taken from a const BitstreamBuffer, it must be
+ // duplicated. This function makes that explicit.
+ // TODO(crbug.com/793446): this is probably only needed by legacy IPC, and can
+ // be removed once that is converted to the new shared memory API.
+ base::subtle::PlatformSharedMemoryRegion DuplicateRegion() const {
+ return region_.Duplicate();
+ }
+
+ const base::subtle::PlatformSharedMemoryRegion& region() const {
+ return region_;
+ }
+
int32_t id() const { return id_; }
- base::SharedMemoryHandle handle() const { return handle_; }
// The number of bytes of the actual bitstream data. It is the size of the
// content instead of the whole shared memory.
@@ -76,7 +104,9 @@ class MEDIA_EXPORT BitstreamBuffer {
return presentation_timestamp_;
}
- void set_handle(const base::SharedMemoryHandle& handle) { handle_ = handle; }
+ void set_region(base::subtle::PlatformSharedMemoryRegion region) {
+ region_ = std::move(region);
+ }
// The following methods come from SetDecryptionSettings().
const std::string& key_id() const { return key_id_; }
@@ -85,7 +115,7 @@ class MEDIA_EXPORT BitstreamBuffer {
private:
int32_t id_;
- base::SharedMemoryHandle handle_;
+ base::subtle::PlatformSharedMemoryRegion region_;
size_t size_;
off_t offset_;
@@ -105,7 +135,7 @@ class MEDIA_EXPORT BitstreamBuffer {
friend struct IPC::ParamTraits<media::BitstreamBuffer>;
- // Allow compiler-generated copy & assign constructors.
+ DISALLOW_COPY_AND_ASSIGN(BitstreamBuffer);
};
} // namespace media
diff --git a/chromium/media/base/callback_holder.h b/chromium/media/base/callback_holder.h
index b77f022adf2..ee2da8cef9a 100644
--- a/chromium/media/base/callback_holder.h
+++ b/chromium/media/base/callback_holder.h
@@ -22,10 +22,10 @@ template <typename CB> class CallbackHolder {
}
// Sets the callback to be potentially held.
- void SetCallback(const CB& cb) {
+ void SetCallback(CB cb) {
DCHECK(original_cb_.is_null());
DCHECK(held_cb_.is_null());
- original_cb_ = cb;
+ original_cb_ = std::move(cb);
}
bool IsNull() const {
diff --git a/chromium/media/base/container_names_unittest.cc b/chromium/media/base/container_names_unittest.cc
index cc3f9c138cb..3b0d883dba1 100644
--- a/chromium/media/base/container_names_unittest.cc
+++ b/chromium/media/base/container_names_unittest.cc
@@ -244,8 +244,6 @@ TEST(ContainerNamesTest, FileCheckSWF) {
TEST(ContainerNamesTest, FileCheckUNKNOWN) {
TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("ten_byte_file"));
TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("README"));
- TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("bali_640x360_P422.yuv"));
- TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("bali_640x360_RGB24.rgb"));
TestFile(CONTAINER_UNKNOWN, GetTestDataFilePath("webm_vp8_track_entry"));
}
diff --git a/chromium/media/base/decoder_buffer.cc b/chromium/media/base/decoder_buffer.cc
index 8ededb93cc4..375b3814e15 100644
--- a/chromium/media/base/decoder_buffer.cc
+++ b/chromium/media/base/decoder_buffer.cc
@@ -116,11 +116,19 @@ scoped_refptr<DecoderBuffer> DecoderBuffer::CopyFrom(const uint8_t* data,
}
// static
-scoped_refptr<DecoderBuffer> DecoderBuffer::FromSharedMemoryHandle(
- const base::SharedMemoryHandle& handle,
+scoped_refptr<DecoderBuffer> DecoderBuffer::FromSharedMemoryRegion(
+ base::subtle::PlatformSharedMemoryRegion region,
off_t offset,
size_t size) {
- auto shm = std::make_unique<UnalignedSharedMemory>(handle, size, true);
+ // TODO(crbug.com/795291): when clients have converted to using
+ // base::ReadOnlySharedMemoryRegion the ugly mode check below will no longer
+ // be necessary.
+ auto shm = std::make_unique<UnalignedSharedMemory>(
+ std::move(region), size,
+ region.GetMode() ==
+ base::subtle::PlatformSharedMemoryRegion::Mode::kReadOnly
+ ? true
+ : false);
if (size == 0 || !shm->MapAt(offset, size))
return nullptr;
return base::WrapRefCounted(new DecoderBuffer(std::move(shm), size));
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 65dbf14e3b5..dba5253c694 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -68,15 +68,14 @@ class MEDIA_EXPORT DecoderBuffer
size_t side_data_size);
// Create a DecoderBuffer where data() of |size| bytes resides within the
- // memory referred to by |handle| at non-negative offset |offset|. The
+ // memory referred to by |region| at non-negative offset |offset|. The
// buffer's |is_key_frame_| will default to false.
//
// The shared memory will be mapped read-only.
//
- // If mapping fails, nullptr will be returned. In all cases |handle| is
- // consumed.
- static scoped_refptr<DecoderBuffer> FromSharedMemoryHandle(
- const base::SharedMemoryHandle& handle,
+ // If mapping fails, nullptr will be returned.
+ static scoped_refptr<DecoderBuffer> FromSharedMemoryRegion(
+ base::subtle::PlatformSharedMemoryRegion region,
off_t offset,
size_t size);
diff --git a/chromium/media/base/decoder_buffer_unittest.cc b/chromium/media/base/decoder_buffer_unittest.cc
index 1434fe701ff..49a393f9f1a 100644
--- a/chromium/media/base/decoder_buffer_unittest.cc
+++ b/chromium/media/base/decoder_buffer_unittest.cc
@@ -10,7 +10,7 @@
#include <memory>
#include "base/memory/read_only_shared_memory_region.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
@@ -63,16 +63,19 @@ TEST(DecoderBufferTest, CopyFrom) {
EXPECT_FALSE(buffer3->is_key_frame());
}
-TEST(DecoderBufferTest, FromSharedMemoryHandle) {
+TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion) {
const uint8_t kData[] = "hello";
const size_t kDataSize = base::size(kData);
- base::SharedMemory mem;
- ASSERT_TRUE(mem.CreateAndMapAnonymous(kDataSize));
- memcpy(mem.memory(), kData, kDataSize);
+ auto region = base::UnsafeSharedMemoryRegion::Create(kDataSize);
+ auto mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ memcpy(mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
- scoped_refptr<DecoderBuffer> buffer(
- DecoderBuffer::FromSharedMemoryHandle(mem.TakeHandle(), 0, kDataSize));
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ 0, kDataSize));
ASSERT_TRUE(buffer.get());
EXPECT_EQ(buffer->data_size(), kDataSize);
EXPECT_EQ(0, memcmp(buffer->data(), kData, kDataSize));
@@ -80,17 +83,20 @@ TEST(DecoderBufferTest, FromSharedMemoryHandle) {
EXPECT_FALSE(buffer->is_key_frame());
}
-TEST(DecoderBufferTest, FromSharedMemoryHandle_Unaligned) {
+TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion_Unaligned) {
const uint8_t kData[] = "XXXhello";
const size_t kDataSize = base::size(kData);
const off_t kDataOffset = 3;
- base::SharedMemory mem;
- ASSERT_TRUE(mem.CreateAndMapAnonymous(kDataSize));
- memcpy(mem.memory(), kData, kDataSize);
+ auto region = base::UnsafeSharedMemoryRegion::Create(kDataSize);
+ auto mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ memcpy(mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
- scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryHandle(
- mem.TakeHandle(), kDataOffset, kDataSize - kDataOffset));
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataOffset, kDataSize - kDataOffset));
ASSERT_TRUE(buffer.get());
EXPECT_EQ(buffer->data_size(), kDataSize - kDataOffset);
EXPECT_EQ(
@@ -99,16 +105,19 @@ TEST(DecoderBufferTest, FromSharedMemoryHandle_Unaligned) {
EXPECT_FALSE(buffer->is_key_frame());
}
-TEST(DecoderBufferTest, FromSharedMemoryHandle_ZeroSize) {
+TEST(DecoderBufferTest, FromPlatformSharedMemoryRegion_ZeroSize) {
const uint8_t kData[] = "hello";
const size_t kDataSize = base::size(kData);
- base::SharedMemory mem;
- ASSERT_TRUE(mem.CreateAndMapAnonymous(kDataSize));
- memcpy(mem.memory(), kData, kDataSize);
+ auto region = base::UnsafeSharedMemoryRegion::Create(kDataSize);
+ auto mapping = region.Map();
+ ASSERT_TRUE(mapping.IsValid());
+ memcpy(mapping.memory(), kData, kDataSize);
- scoped_refptr<DecoderBuffer> buffer(
- DecoderBuffer::FromSharedMemoryHandle(mem.TakeHandle(), 0, 0));
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ 0, 0));
ASSERT_FALSE(buffer.get());
}
@@ -116,8 +125,8 @@ TEST(DecoderBufferTest, FromSharedMemoryRegion) {
const uint8_t kData[] = "hello";
const size_t kDataSize = base::size(kData);
- base::MappedReadOnlyRegion mapping_region =
- base::ReadOnlySharedMemoryRegion::Create(kDataSize);
+ auto mapping_region = base::ReadOnlySharedMemoryRegion::Create(kDataSize);
+ ASSERT_TRUE(mapping_region.IsValid());
memcpy(mapping_region.mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
@@ -129,27 +138,13 @@ TEST(DecoderBufferTest, FromSharedMemoryRegion) {
EXPECT_FALSE(buffer->is_key_frame());
}
-TEST(DecoderBufferTest, FromSharedMemoryRegion_ZeroSize) {
- const uint8_t kData[] = "hello";
- const size_t kDataSize = base::size(kData);
-
- base::MappedReadOnlyRegion mapping_region =
- base::ReadOnlySharedMemoryRegion::Create(kDataSize);
- memcpy(mapping_region.mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
-
- scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
- std::move(mapping_region.region), 0, 0));
-
- ASSERT_FALSE(buffer.get());
-}
-
TEST(DecoderBufferTest, FromSharedMemoryRegion_Unaligned) {
const uint8_t kData[] = "XXXhello";
const size_t kDataSize = base::size(kData);
const off_t kDataOffset = 3;
- base::MappedReadOnlyRegion mapping_region =
- base::ReadOnlySharedMemoryRegion::Create(kDataSize);
+ auto mapping_region = base::ReadOnlySharedMemoryRegion::Create(kDataSize);
+ ASSERT_TRUE(mapping_region.IsValid());
memcpy(mapping_region.mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
@@ -163,6 +158,19 @@ TEST(DecoderBufferTest, FromSharedMemoryRegion_Unaligned) {
EXPECT_FALSE(buffer->is_key_frame());
}
+TEST(DecoderBufferTest, FromSharedMemoryRegion_ZeroSize) {
+ const uint8_t kData[] = "hello";
+ const size_t kDataSize = base::size(kData);
+
+ auto mapping_region = base::ReadOnlySharedMemoryRegion::Create(kDataSize);
+ memcpy(mapping_region.mapping.GetMemoryAs<uint8_t>(), kData, kDataSize);
+
+ scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::FromSharedMemoryRegion(
+ std::move(mapping_region.region), 0, 0));
+
+ ASSERT_FALSE(buffer.get());
+}
+
#if !defined(OS_ANDROID)
TEST(DecoderBufferTest, PaddingAlignment) {
const uint8_t kData[] = "hello";
diff --git a/chromium/media/base/decryptor.h b/chromium/media/base/decryptor.h
index b865cabc8b8..62b7902c6df 100644
--- a/chromium/media/base/decryptor.h
+++ b/chromium/media/base/decryptor.h
@@ -121,9 +121,10 @@ class MEDIA_EXPORT Decryptor {
// - Set to kError if unexpected error has occurred. In this case the
// returned frame(s) must be NULL/empty.
// Second parameter: The decoded video frame or audio buffers.
- typedef base::Callback<void(Status, const AudioFrames&)> AudioDecodeCB;
- typedef base::Callback<void(Status,
- const scoped_refptr<VideoFrame>&)> VideoDecodeCB;
+ typedef base::RepeatingCallback<void(Status, const AudioFrames&)>
+ AudioDecodeCB;
+ typedef base::RepeatingCallback<void(Status, scoped_refptr<VideoFrame>)>
+ VideoDecodeCB;
// Decrypts and decodes the |encrypted| buffer. The status and the decrypted
// buffer are returned via the provided callback.
diff --git a/chromium/media/base/demuxer_stream.h b/chromium/media/base/demuxer_stream.h
index 3888102a4be..fff2f3a132c 100644
--- a/chromium/media/base/demuxer_stream.h
+++ b/chromium/media/base/demuxer_stream.h
@@ -9,7 +9,7 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
namespace media {
diff --git a/chromium/media/base/fake_audio_renderer_sink.cc b/chromium/media/base/fake_audio_renderer_sink.cc
index 924691ead1b..bf5ca25b581 100644
--- a/chromium/media/base/fake_audio_renderer_sink.cc
+++ b/chromium/media/base/fake_audio_renderer_sink.cc
@@ -51,6 +51,10 @@ void FakeAudioRendererSink::Stop() {
ChangeState(kStopped);
}
+void FakeAudioRendererSink::Flush() {
+ DCHECK_NE(state_, kPlaying);
+}
+
void FakeAudioRendererSink::Pause() {
DCHECK(state_ == kStarted || state_ == kPlaying) << "state_ " << state_;
ChangeState(kPaused);
diff --git a/chromium/media/base/fake_audio_renderer_sink.h b/chromium/media/base/fake_audio_renderer_sink.h
index de599d2652b..9918eabad63 100644
--- a/chromium/media/base/fake_audio_renderer_sink.h
+++ b/chromium/media/base/fake_audio_renderer_sink.h
@@ -35,6 +35,7 @@ class FakeAudioRendererSink : public AudioRendererSink {
RenderCallback* callback) override;
void Start() override;
void Stop() override;
+ void Flush() override;
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
diff --git a/chromium/media/base/fake_audio_worker_unittest.cc b/chromium/media/base/fake_audio_worker_unittest.cc
index 91c3be11c8d..df0363d6869 100644
--- a/chromium/media/base/fake_audio_worker_unittest.cc
+++ b/chromium/media/base/fake_audio_worker_unittest.cc
@@ -116,7 +116,10 @@ TEST_F(FakeAudioWorkerTest, MAYBE_FakeBasicCallback) {
}
// Ensure the time between callbacks is sane.
-TEST_F(FakeAudioWorkerTest, TimeBetweenCallbacks) {
+//
+// TODO(https://crbug.com/960729): Test is flaky because its behavior depends on
+// real wallclock time. Need to mock time to fix this.
+TEST_F(FakeAudioWorkerTest, DISABLED_TimeBetweenCallbacks) {
message_loop_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&FakeAudioWorkerTest::TimeCallbacksOnAudioThread,
diff --git a/chromium/media/base/fake_demuxer_stream.cc b/chromium/media/base/fake_demuxer_stream.cc
index 736f6944901..f71aff3b5ac 100644
--- a/chromium/media/base/fake_demuxer_stream.cc
+++ b/chromium/media/base/fake_demuxer_stream.cc
@@ -158,7 +158,7 @@ void FakeDemuxerStream::UpdateVideoDecoderConfig() {
const gfx::Rect kVisibleRect(kStartWidth, kStartHeight);
video_decoder_config_.Initialize(
kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, next_coded_size_, kVisibleRect,
+ VideoColorSpace(), kNoTransformation, next_coded_size_, kVisibleRect,
next_coded_size_, EmptyExtraData(),
is_encrypted_ ? AesCtrEncryptionScheme() : Unencrypted());
next_coded_size_.Enlarge(kWidthDelta, kHeightDelta);
diff --git a/chromium/media/base/fallback_video_decoder.cc b/chromium/media/base/fallback_video_decoder.cc
index 0d5826f69a5..bf763ea5223 100644
--- a/chromium/media/base/fallback_video_decoder.cc
+++ b/chromium/media/base/fallback_video_decoder.cc
@@ -23,19 +23,20 @@ FallbackVideoDecoder::FallbackVideoDecoder(
void FallbackVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
// If we've already fallen back, just reinitialize the selected decoder.
if (selected_decoder_ && did_fallback_) {
- selected_decoder_->Initialize(config, low_delay, cdm_context, init_cb,
- output_cb, waiting_cb);
+ selected_decoder_->Initialize(config, low_delay, cdm_context,
+ std::move(init_cb), output_cb, waiting_cb);
return;
}
- InitCB fallback_initialize_cb = base::BindRepeating(
- &FallbackVideoDecoder::FallbackInitialize, weak_factory_.GetWeakPtr(),
- config, low_delay, cdm_context, init_cb, output_cb, waiting_cb);
+ InitCB fallback_initialize_cb =
+ base::BindOnce(&FallbackVideoDecoder::FallbackInitialize,
+ weak_factory_.GetWeakPtr(), config, low_delay, cdm_context,
+ std::move(init_cb), output_cb, waiting_cb);
preferred_decoder_->Initialize(config, low_delay, cdm_context,
std::move(fallback_initialize_cb), output_cb,
@@ -45,14 +46,14 @@ void FallbackVideoDecoder::Initialize(const VideoDecoderConfig& config,
void FallbackVideoDecoder::FallbackInitialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb,
bool success) {
// The preferred decoder was successfully initialized.
if (success) {
selected_decoder_ = preferred_decoder_.get();
- init_cb.Run(true);
+ std::move(init_cb).Run(true);
return;
}
@@ -65,19 +66,19 @@ void FallbackVideoDecoder::FallbackInitialize(const VideoDecoderConfig& config,
base::BindOnce(base::DoNothing::Once<std::unique_ptr<VideoDecoder>>(),
std::move(preferred_decoder_)));
selected_decoder_ = fallback_decoder_.get();
- fallback_decoder_->Initialize(config, low_delay, cdm_context, init_cb,
- output_cb, waiting_cb);
+ fallback_decoder_->Initialize(config, low_delay, cdm_context,
+ std::move(init_cb), output_cb, waiting_cb);
}
void FallbackVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK(selected_decoder_);
- selected_decoder_->Decode(std::move(buffer), decode_cb);
+ selected_decoder_->Decode(std::move(buffer), std::move(decode_cb));
}
-void FallbackVideoDecoder::Reset(const base::RepeatingClosure& reset_cb) {
+void FallbackVideoDecoder::Reset(base::OnceClosure reset_cb) {
DCHECK(selected_decoder_);
- selected_decoder_->Reset(reset_cb);
+ selected_decoder_->Reset(std::move(reset_cb));
}
bool FallbackVideoDecoder::NeedsBitstreamConversion() const {
diff --git a/chromium/media/base/fallback_video_decoder.h b/chromium/media/base/fallback_video_decoder.h
index 69ee1f2779e..dc064d90c1d 100644
--- a/chromium/media/base/fallback_video_decoder.h
+++ b/chromium/media/base/fallback_video_decoder.h
@@ -24,12 +24,11 @@ class MEDIA_EXPORT FallbackVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::RepeatingClosure& reset_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
@@ -41,7 +40,7 @@ class MEDIA_EXPORT FallbackVideoDecoder : public VideoDecoder {
void FallbackInitialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb,
bool success);
diff --git a/chromium/media/base/fallback_video_decoder_unittest.cc b/chromium/media/base/fallback_video_decoder_unittest.cc
index d2896e7f513..8f5725d725e 100644
--- a/chromium/media/base/fallback_video_decoder_unittest.cc
+++ b/chromium/media/base/fallback_video_decoder_unittest.cc
@@ -40,14 +40,14 @@ class FallbackVideoDecoderUnittest : public ::testing::TestWithParam<bool> {
StrictMock<MockVideoDecoder>* result = new StrictMock<MockVideoDecoder>(n);
if (is_fallback && !preferred_should_succeed) {
- EXPECT_CALL(*result, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(true));
+ EXPECT_CALL(*result, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(true));
}
if (!is_fallback) {
preferred_decoder_ = result;
- EXPECT_CALL(*result, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(preferred_should_succeed));
+ EXPECT_CALL(*result, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(preferred_should_succeed));
} else {
backup_decoder_ = result;
}
@@ -96,10 +96,10 @@ INSTANTIATE_TEST_SUITE_P(DoesPreferredInitFail,
TEST_P(FallbackVideoDecoderUnittest, MethodsRedirectedAsExpected) {
Initialize(PreferredShouldSucceed());
- EXPECT_ON_CORRECT_DECODER(Decode(_, _));
+ EXPECT_ON_CORRECT_DECODER(Decode_(_, _));
fallback_decoder_->Decode(nullptr, base::DoNothing());
- EXPECT_ON_CORRECT_DECODER(Reset(_));
+ EXPECT_ON_CORRECT_DECODER(Reset_(_));
fallback_decoder_->Reset(base::DoNothing());
EXPECT_ON_CORRECT_DECODER(NeedsBitstreamConversion());
@@ -123,11 +123,11 @@ TEST_P(FallbackVideoDecoderUnittest, ReinitializeWithPreferredFailing) {
// If we succeedd the first time, it should still be alive.
if (PreferredShouldSucceed()) {
- EXPECT_CALL(*preferred_decoder_, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(false)); // fail initialization
+ EXPECT_CALL(*preferred_decoder_, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(false)); // fail initialization
}
- EXPECT_CALL(*backup_decoder_, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(true));
+ EXPECT_CALL(*backup_decoder_, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(true));
fallback_decoder_->Initialize(
video_decoder_config_, false, nullptr,
@@ -146,17 +146,17 @@ TEST_P(FallbackVideoDecoderUnittest, ReinitializeWithPreferredSuccessful) {
// If we succeedd the first time, it should still be alive.
if (PreferredShouldSucceed()) {
- EXPECT_CALL(*preferred_decoder_, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(true)); // pass initialization
+ EXPECT_CALL(*preferred_decoder_, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(true)); // pass initialization
} else {
// Otherwise, preferred was deleted, and we only backup still exists.
- EXPECT_CALL(*backup_decoder_, Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(true));
+ EXPECT_CALL(*backup_decoder_, Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(true));
}
fallback_decoder_->Initialize(
video_decoder_config_, false, nullptr,
- base::BindRepeating([](bool success) { EXPECT_TRUE(success); }),
+ base::BindOnce([](bool success) { EXPECT_TRUE(success); }),
base::DoNothing(), base::DoNothing());
}
diff --git a/chromium/media/base/gmock_callback_support.h b/chromium/media/base/gmock_callback_support.h
index 5227875b72e..0c0aa8ea5b9 100644
--- a/chromium/media/base/gmock_callback_support.h
+++ b/chromium/media/base/gmock_callback_support.h
@@ -124,6 +124,12 @@ ACTION_P(RunOnceClosure, closure) {
ACTION_TEMPLATE(RunOnceCallback,
HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ return std::move(std::get<k>(args)).Run();
+}
+
+ACTION_TEMPLATE(RunOnceCallback,
+ HAS_1_TEMPLATE_PARAMS(int, k),
AND_1_VALUE_PARAMS(p0)) {
return std::move(std::get<k>(args)).Run(p0);
}
diff --git a/chromium/media/base/ipc/media_param_traits_macros.h b/chromium/media/base/ipc/media_param_traits_macros.h
index 6590f055966..e093ed84b7b 100644
--- a/chromium/media/base/ipc/media_param_traits_macros.h
+++ b/chromium/media/base/ipc/media_param_traits_macros.h
@@ -32,7 +32,7 @@
#include "media/base/subsample_entry.h"
#include "media/base/video_codecs.h"
#include "media/base/video_color_space.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/base/video_types.h"
#include "media/base/waiting.h"
#include "media/base/watch_time_keys.h"
diff --git a/chromium/media/base/key_systems_unittest.cc b/chromium/media/base/key_systems_unittest.cc
index b2f3e288195..3782240064a 100644
--- a/chromium/media/base/key_systems_unittest.cc
+++ b/chromium/media/base/key_systems_unittest.cc
@@ -13,6 +13,7 @@
#include <vector>
#include "base/logging.h"
+#include "media/base/audio_parameters.h"
#include "media/base/decrypt_config.h"
#include "media/base/eme_constants.h"
#include "media/base/key_systems.h"
@@ -271,6 +272,9 @@ class TestMediaClient : public MediaClient {
// test the key system update case.
void DisableExternalKeySystemSupport();
+ base::Optional<::media::AudioRendererAlgorithmParameters>
+ GetAudioRendererAlgorithmParameters(AudioParameters audio_parameters) final;
+
private:
bool is_update_needed_;
bool supports_external_key_system_;
@@ -317,6 +321,12 @@ void TestMediaClient::DisableExternalKeySystemSupport() {
supports_external_key_system_ = false;
}
+base::Optional<::media::AudioRendererAlgorithmParameters>
+TestMediaClient::GetAudioRendererAlgorithmParameters(
+ AudioParameters audio_parameters) {
+ return base::nullopt;
+}
+
} // namespace
class KeySystemsTest : public testing::Test {
diff --git a/chromium/media/base/mac/video_frame_mac_unittests.cc b/chromium/media/base/mac/video_frame_mac_unittests.cc
index 86ecdf5bfe9..abe9e5142bb 100644
--- a/chromium/media/base/mac/video_frame_mac_unittests.cc
+++ b/chromium/media/base/mac/video_frame_mac_unittests.cc
@@ -94,7 +94,7 @@ TEST(VideoFrameMac, CheckLifetime) {
int instances_destroyed = 0;
auto wrapper_frame = VideoFrame::WrapVideoFrame(
- frame, frame->format(), frame->visible_rect(), frame->natural_size());
+ *frame, frame->format(), frame->visible_rect(), frame->natural_size());
wrapper_frame->AddDestructionObserver(
base::Bind(&Increment, &instances_destroyed));
ASSERT_TRUE(wrapper_frame.get());
diff --git a/chromium/media/base/media_client.h b/chromium/media/base/media_client.h
index f60faa08c26..5c7d6c06c68 100644
--- a/chromium/media/base/media_client.h
+++ b/chromium/media/base/media_client.h
@@ -9,7 +9,9 @@
#include <string>
#include <vector>
+#include "base/optional.h"
#include "media/base/audio_codecs.h"
+#include "media/base/audio_parameters.h"
#include "media/base/key_system_properties.h"
#include "media/base/media_export.h"
#include "media/base/media_types.h"
@@ -57,6 +59,10 @@ class MEDIA_EXPORT MediaClient {
// Returns true if the compressed audio |codec| format is supported by the
// audio sink.
virtual bool IsSupportedBitstreamAudioCodec(AudioCodec codec) = 0;
+
+ // Optionally returns audio renderer algorithm parameters.
+ virtual base::Optional<::media::AudioRendererAlgorithmParameters>
+ GetAudioRendererAlgorithmParameters(AudioParameters audio_parameters) = 0;
};
} // namespace media
diff --git a/chromium/media/base/media_permission.h b/chromium/media/base/media_permission.h
index 05b10a7bd0f..4226086ba47 100644
--- a/chromium/media/base/media_permission.h
+++ b/chromium/media/base/media_permission.h
@@ -14,7 +14,7 @@ namespace media {
// Interface to handle media related permission checks and requests.
class MEDIA_EXPORT MediaPermission {
public:
- typedef base::Callback<void(bool)> PermissionStatusCB;
+ using PermissionStatusCB = base::OnceCallback<void(bool)>;
enum Type {
PROTECTED_MEDIA_IDENTIFIER,
@@ -28,15 +28,13 @@ class MEDIA_EXPORT MediaPermission {
// Checks whether |type| is permitted without triggering user interaction
// (e.g. permission prompt). The status will be |false| if the permission
// has never been set.
- virtual void HasPermission(
- Type type,
- const PermissionStatusCB& permission_status_cb) = 0;
+ virtual void HasPermission(Type type,
+ PermissionStatusCB permission_status_cb) = 0;
// Requests |type| permission. This may trigger user interaction
// (e.g. permission prompt) if the permission has never been set.
- virtual void RequestPermission(
- Type type,
- const PermissionStatusCB& permission_status_cb) = 0;
+ virtual void RequestPermission(Type type,
+ PermissionStatusCB permission_status_cb) = 0;
// Whether to allow the use of Encrypted Media Extensions (EME), except for
// the use of Clear Key key systems, which is always allowed as required by
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index 390522cabfc..8a48c2bb51a 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -199,9 +199,10 @@ const base::Feature kPictureInPicture {
#endif
};
-// Only decode preload=metadata elements upon visibility?
+// Only decode preload=metadata elements upon visibility.
+// TODO(crbug.com/879406): Remove this after M76 ships to stable
const base::Feature kPreloadMetadataLazyLoad{"PreloadMetadataLazyLoad",
- base::FEATURE_DISABLED_BY_DEFAULT};
+ base::FEATURE_ENABLED_BY_DEFAULT};
// Let videos be resumed via remote controls (for example, the notification)
// when in background.
@@ -231,10 +232,6 @@ const base::Feature kUseAndroidOverlay{"UseAndroidOverlay",
const base::Feature kUseAndroidOverlayAggressively{
"UseAndroidOverlayAggressively", base::FEATURE_ENABLED_BY_DEFAULT};
-// Let video track be unselected when video is playing in the background.
-const base::Feature kBackgroundSrcVideoTrackOptimization{
- "BackgroundSrcVideoTrackOptimization", base::FEATURE_ENABLED_BY_DEFAULT};
-
// Let video without audio be paused when it is playing in the background.
const base::Feature kBackgroundVideoPauseOptimization{
"BackgroundVideoPauseOptimization", base::FEATURE_ENABLED_BY_DEFAULT};
@@ -245,12 +242,7 @@ const base::Feature kBackgroundVideoPauseOptimization{
const base::Feature kMemoryPressureBasedSourceBufferGC{
"MemoryPressureBasedSourceBufferGC", base::FEATURE_DISABLED_BY_DEFAULT};
-// Enable MojoVideoDecoder, replacing GpuVideoDecoder.
-const base::Feature kMojoVideoDecoder{"MojoVideoDecoder",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
-// Enable The D3D11 Video decoder. Must also enable MojoVideoDecoder for
-// this to have any effect.
+// Enable The D3D11 Video decoder.
const base::Feature kD3D11VideoDecoder{"D3D11VideoDecoder",
base::FEATURE_DISABLED_BY_DEFAULT};
@@ -267,10 +259,6 @@ const base::Feature kD3D11VideoDecoderIgnoreWorkarounds{
const base::Feature kFallbackAfterDecodeError{"FallbackAfterDecodeError",
base::FEATURE_ENABLED_BY_DEFAULT};
-// Manage and report MSE buffered ranges by PTS intervals, not DTS intervals.
-const base::Feature kMseBufferByPts{"MseBufferByPts",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
// Enable new cpu load estimator. Intended for evaluation in local
// testing and origin-trial.
// TODO(nisse): Delete once we have switched over to always using the
@@ -278,10 +266,6 @@ const base::Feature kMseBufferByPts{"MseBufferByPts",
const base::Feature kNewEncodeCpuLoadEstimator{
"NewEncodeCpuLoadEstimator", base::FEATURE_DISABLED_BY_DEFAULT};
-// Use the new RTC hardware decode path via RTCVideoDecoderAdapter.
-const base::Feature kRTCVideoDecoderAdapter{"RTCVideoDecoderAdapter",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
// CanPlayThrough issued according to standard.
const base::Feature kSpecCompliantCanPlayThrough{
"SpecCompliantCanPlayThrough", base::FEATURE_ENABLED_BY_DEFAULT};
@@ -326,6 +310,13 @@ const base::Feature kVideoBlitColorAccuracy{"video-blit-color-accuracy",
const base::Feature kExternalClearKeyForTesting{
"ExternalClearKeyForTesting", base::FEATURE_DISABLED_BY_DEFAULT};
+// Prevents UrlProvisionFetcher from making a provisioning request. If
+// specified, any provisioning request made will not be sent to the provisioning
+// server, and the response will indicate a failure to communicate with the
+// provisioning server.
+const base::Feature kFailUrlProvisionFetcherForTesting{
+ "FailUrlProvisionFetcherForTesting", base::FEATURE_DISABLED_BY_DEFAULT};
+
// Enables hardware secure decryption if supported by hardware and CDM.
// TODO(xhwang): Currently this is only used for development of new features.
// Apply this to Android and ChromeOS as well where hardware secure decryption
@@ -333,6 +324,13 @@ const base::Feature kExternalClearKeyForTesting{
const base::Feature kHardwareSecureDecryption{
"HardwareSecureDecryption", base::FEATURE_DISABLED_BY_DEFAULT};
+// Enables encrypted AV1 support in EME requestMediaKeySystemAccess() query by
+// Widevine key system if it is also supported by the underlying Widevine CDM.
+// This feature does not affect the actual playback of encrypted AV1 if it's
+// served by the player regardless of the query result.
+const base::Feature kWidevineAv1{"WidevineAv1",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
// Enables handling of hardware media keys for controlling media.
const base::Feature kHardwareMediaKeyHandling{
"HardwareMediaKeyHandling",
@@ -397,6 +395,15 @@ const base::Feature kAImageReaderVideoOutput{"AImageReaderVideoOutput",
const base::Feature kDisableSurfaceLayerForVideo{
"DisableSurfaceLayerForVideo", base::FEATURE_DISABLED_BY_DEFAULT};
+// Enables CanPlayType() (and other queries) for HLS MIME types. Note that
+// disabling this also causes navigation to .m3u8 files to trigger downloading
+// instead of playback.
+const base::Feature kCanPlayHls{"CanPlayHls", base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Enables the use of MediaPlayerRenderer for HLS playback. When disabled,
+// HLS manifests will fail to load (triggering source fallback or load error).
+const base::Feature kHlsPlayer{"HlsPlayer", base::FEATURE_ENABLED_BY_DEFAULT};
+
#endif // defined(OS_ANDROID)
#if defined(OS_WIN)
@@ -442,11 +449,6 @@ std::string GetEffectiveAutoplayPolicy(const base::CommandLine& command_line) {
const base::Feature kOverflowIconsForMediaControls{
"OverflowIconsForMediaControls", base::FEATURE_ENABLED_BY_DEFAULT};
-// Enables the new redesigned media controls.
-// TODO(steimel): Remove this.
-const base::Feature kUseModernMediaControls{"UseModernMediaControls",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
// Enables Media Engagement Index recording. This data will be used to determine
// when to bypass autoplay policies. This is recorded on all platforms.
const base::Feature kRecordMediaEngagementScores{
@@ -475,6 +477,9 @@ const base::Feature kPreloadMediaEngagementData{
"PreloadMediaEngagementData", base::FEATURE_ENABLED_BY_DEFAULT};
#endif
+const base::Feature kMediaEngagementHTTPSOnly{
+ "MediaEngagementHTTPSOnly", base::FEATURE_DISABLED_BY_DEFAULT};
+
// Enables experimental local learning for media. Adds reporting only; does not
// change media behavior.
const base::Feature kMediaLearningExperiment{"MediaLearningExperiment",
@@ -491,6 +496,13 @@ const base::Feature kAudioFocusDuckFlash {
#endif
};
+// Only affects Android. Suspends a media session when audio focus is lost; when
+// this setting is disabled, an Android media session will not be suspended when
+// Audio focus is lost. This is used by Cast which sometimes needs to drive
+// multiple media sessions.
+const base::Feature kAudioFocusLossSuspendMediaSession{
+ "AudioFocusMediaSession", base::FEATURE_ENABLED_BY_DEFAULT};
+
// Enables the internal Media Session logic without enabling the Media Session
// service.
const base::Feature kInternalMediaSession {
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index ee0241ba570..267154b7afe 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -94,14 +94,15 @@ namespace media {
// alongside the definition of their values in the .cc file.
MEDIA_EXPORT extern const base::Feature kAudioFocusDuckFlash;
+MEDIA_EXPORT extern const base::Feature kAudioFocusLossSuspendMediaSession;
MEDIA_EXPORT extern const base::Feature kAutoplayIgnoreWebAudio;
MEDIA_EXPORT extern const base::Feature kAutoplayDisableSettings;
MEDIA_EXPORT extern const base::Feature kAutoplayWhitelistSettings;
-MEDIA_EXPORT extern const base::Feature kBackgroundSrcVideoTrackOptimization;
MEDIA_EXPORT extern const base::Feature kBackgroundVideoPauseOptimization;
MEDIA_EXPORT extern const base::Feature kD3D11VideoDecoder;
MEDIA_EXPORT extern const base::Feature kD3D11VideoDecoderIgnoreWorkarounds;
MEDIA_EXPORT extern const base::Feature kExternalClearKeyForTesting;
+MEDIA_EXPORT extern const base::Feature kFailUrlProvisionFetcherForTesting;
MEDIA_EXPORT extern const base::Feature kFallbackAfterDecodeError;
MEDIA_EXPORT extern const base::Feature kHardwareMediaKeyHandling;
MEDIA_EXPORT extern const base::Feature kHardwareSecureDecryption;
@@ -110,10 +111,9 @@ MEDIA_EXPORT extern const base::Feature kLowDelayVideoRenderingOnLiveStream;
MEDIA_EXPORT extern const base::Feature kMediaCapabilitiesWithParameters;
MEDIA_EXPORT extern const base::Feature kMediaCastOverlayButton;
MEDIA_EXPORT extern const base::Feature kMediaEngagementBypassAutoplayPolicies;
+MEDIA_EXPORT extern const base::Feature kMediaEngagementHTTPSOnly;
MEDIA_EXPORT extern const base::Feature kMediaLearningExperiment;
MEDIA_EXPORT extern const base::Feature kMemoryPressureBasedSourceBufferGC;
-MEDIA_EXPORT extern const base::Feature kMojoVideoDecoder;
-MEDIA_EXPORT extern const base::Feature kMseBufferByPts;
MEDIA_EXPORT extern const base::Feature kNewEncodeCpuLoadEstimator;
MEDIA_EXPORT extern const base::Feature kOverflowIconsForMediaControls;
MEDIA_EXPORT extern const base::Feature kOverlayFullscreenVideo;
@@ -121,7 +121,6 @@ MEDIA_EXPORT extern const base::Feature kPictureInPicture;
MEDIA_EXPORT extern const base::Feature kPreloadMediaEngagementData;
MEDIA_EXPORT extern const base::Feature kPreloadMetadataLazyLoad;
MEDIA_EXPORT extern const base::Feature kPreloadMetadataSuspend;
-MEDIA_EXPORT extern const base::Feature kRTCVideoDecoderAdapter;
MEDIA_EXPORT extern const base::Feature kRecordMediaEngagementScores;
MEDIA_EXPORT extern const base::Feature kRecordWebAudioEngagement;
MEDIA_EXPORT extern const base::Feature kResumeBackgroundVideo;
@@ -129,7 +128,6 @@ MEDIA_EXPORT extern const base::Feature kSpecCompliantCanPlayThrough;
MEDIA_EXPORT extern const base::Feature kUnifiedAutoplay;
MEDIA_EXPORT extern const base::Feature kUseAndroidOverlay;
MEDIA_EXPORT extern const base::Feature kUseAndroidOverlayAggressively;
-MEDIA_EXPORT extern const base::Feature kUseModernMediaControls;
MEDIA_EXPORT extern const base::Feature kUseNewMediaCache;
MEDIA_EXPORT extern const base::Feature kUseR16Texture;
MEDIA_EXPORT extern const base::Feature kUseSurfaceLayerForVideo;
@@ -137,6 +135,7 @@ MEDIA_EXPORT extern const base::Feature kVaapiLowPowerEncoder;
MEDIA_EXPORT extern const base::Feature kVaapiVP8Encoder;
MEDIA_EXPORT extern const base::Feature kVaapiVP9Encoder;
MEDIA_EXPORT extern const base::Feature kVideoBlitColorAccuracy;
+MEDIA_EXPORT extern const base::Feature kWidevineAv1;
#if defined(OS_ANDROID)
MEDIA_EXPORT extern const base::Feature kMediaControlsExpandGesture;
@@ -145,6 +144,8 @@ MEDIA_EXPORT extern const base::Feature kMediaDrmPreprovisioning;
MEDIA_EXPORT extern const base::Feature kMediaDrmPreprovisioningAtStartup;
MEDIA_EXPORT extern const base::Feature kAImageReaderVideoOutput;
MEDIA_EXPORT extern const base::Feature kDisableSurfaceLayerForVideo;
+MEDIA_EXPORT extern const base::Feature kCanPlayHls;
+MEDIA_EXPORT extern const base::Feature kHlsPlayer;
#endif // defined(OS_ANDROID)
#if defined(OS_WIN)
diff --git a/chromium/media/base/mime_util_internal.cc b/chromium/media/base/mime_util_internal.cc
index ce2aaa815e8..3b97368d5b3 100644
--- a/chromium/media/base/mime_util_internal.cc
+++ b/chromium/media/base/mime_util_internal.cc
@@ -5,6 +5,7 @@
#include "media/base/mime_util_internal.h"
#include "base/command_line.h"
+#include "base/feature_list.h"
#include "base/no_destructor.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -360,21 +361,23 @@ void MimeUtil::AddSupportedMediaFormats() {
AddContainerWithCodecs("video/mp2t", mp2t_codecs);
#endif // BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
#if defined(OS_ANDROID)
- // HTTP Live Streaming (HLS).
- CodecSet hls_codecs{H264,
- // TODO(ddorwin): Is any MP3 codec string variant included
- // in real queries?
- MP3,
- // Android HLS only supports MPEG4_AAC (missing demuxer
- // support for MPEG2_AAC)
- MPEG4_AAC};
- AddContainerWithCodecs("application/x-mpegurl", hls_codecs);
- AddContainerWithCodecs("application/vnd.apple.mpegurl", hls_codecs);
- AddContainerWithCodecs("audio/mpegurl", hls_codecs);
- // Not documented by Apple, but unfortunately used extensively by Apple and
- // others for both audio-only and audio+video playlists. See
- // https://crbug.com/675552 for details and examples.
- AddContainerWithCodecs("audio/x-mpegurl", hls_codecs);
+ if (base::FeatureList::IsEnabled(kCanPlayHls)) {
+ // HTTP Live Streaming (HLS).
+ CodecSet hls_codecs{H264,
+ // TODO(ddorwin): Is any MP3 codec string variant
+ // included in real queries?
+ MP3,
+ // Android HLS only supports MPEG4_AAC (missing demuxer
+ // support for MPEG2_AAC)
+ MPEG4_AAC};
+ AddContainerWithCodecs("application/x-mpegurl", hls_codecs);
+ AddContainerWithCodecs("application/vnd.apple.mpegurl", hls_codecs);
+ AddContainerWithCodecs("audio/mpegurl", hls_codecs);
+ // Not documented by Apple, but unfortunately used extensively by Apple and
+ // others for both audio-only and audio+video playlists. See
+ // https://crbug.com/675552 for details and examples.
+ AddContainerWithCodecs("audio/x-mpegurl", hls_codecs);
+ }
#endif // defined(OS_ANDROID)
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
}
@@ -838,7 +841,8 @@ bool MimeUtil::ParseCodecHelper(const std::string& mime_type_lower_case,
#endif
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
- if (base::StartsWith(codec_id, "mhm1.", base::CompareCase::SENSITIVE)) {
+ if (base::StartsWith(codec_id, "mhm1.", base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_id, "mha1.", base::CompareCase::SENSITIVE)) {
out_result->codec = MimeUtil::MPEG_H_AUDIO;
return true;
}
diff --git a/chromium/media/base/mock_audio_renderer_sink.h b/chromium/media/base/mock_audio_renderer_sink.h
index 3c84881284b..06da1643fa9 100644
--- a/chromium/media/base/mock_audio_renderer_sink.h
+++ b/chromium/media/base/mock_audio_renderer_sink.h
@@ -26,6 +26,7 @@ class MockAudioRendererSink : public SwitchableAudioRendererSink {
MOCK_METHOD0(Start, void());
MOCK_METHOD0(Stop, void());
+ MOCK_METHOD0(Flush, void());
MOCK_METHOD0(Pause, void());
MOCK_METHOD0(Play, void());
MOCK_METHOD1(SetVolume, bool(double volume));
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index 8505f32908d..df5cb56dbe4 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -15,6 +15,7 @@
#include "base/macros.h"
#include "media/base/audio_decoder.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/audio_parameters.h"
#include "media/base/audio_renderer.h"
#include "media/base/cdm_config.h"
#include "media/base/cdm_context.h"
@@ -52,7 +53,7 @@ class MockPipelineClient : public Pipeline::Client {
MOCK_METHOD1(OnError, void(PipelineStatus));
MOCK_METHOD0(OnEnded, void());
- MOCK_METHOD1(OnMetadata, void(PipelineMetadata));
+ MOCK_METHOD1(OnMetadata, void(const PipelineMetadata&));
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
MOCK_METHOD0(OnDurationChange, void());
MOCK_METHOD2(OnAddTextTrack,
@@ -208,16 +209,27 @@ class MockVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- MOCK_METHOD6(Initialize,
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override {
+ Initialize_(config, low_delay, cdm_context, init_cb, output_cb, waiting_cb);
+ }
+ MOCK_METHOD6(Initialize_,
void(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB& init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb));
- MOCK_METHOD2(Decode,
- void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
- MOCK_METHOD1(Reset, void(const base::Closure&));
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB cb) override {
+ Decode_(std::move(buffer), cb);
+ }
+ MOCK_METHOD2(Decode_, void(scoped_refptr<DecoderBuffer> buffer, DecodeCB&));
+ void Reset(base::OnceClosure cb) override { Reset_(cb); }
+ MOCK_METHOD1(Reset_, void(base::OnceClosure&));
MOCK_CONST_METHOD0(GetMaxDecodeRequests, int());
MOCK_CONST_METHOD0(CanReadWithoutStalling, bool());
MOCK_CONST_METHOD0(NeedsBitstreamConversion, bool());
@@ -235,15 +247,23 @@ class MockAudioDecoder : public AudioDecoder {
// AudioDecoder implementation.
std::string GetDisplayName() const override;
- MOCK_METHOD5(Initialize,
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override {
+ Initialize_(config, cdm_context, init_cb, output_cb, waiting_cb);
+ }
+ MOCK_METHOD5(Initialize_,
void(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB& init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb));
MOCK_METHOD2(Decode,
void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
- MOCK_METHOD1(Reset, void(const base::Closure&));
+ void Reset(base::OnceClosure cb) override { Reset_(cb); }
+ MOCK_METHOD1(Reset_, void(base::OnceClosure&));
private:
std::string decoder_name_;
@@ -622,6 +642,9 @@ class MockMediaClient : public media::MediaClient {
MOCK_METHOD1(IsSupportedAudioType, bool(const media::AudioType& type));
MOCK_METHOD1(IsSupportedVideoType, bool(const media::VideoType& type));
MOCK_METHOD1(IsSupportedBitstreamAudioCodec, bool(media::AudioCodec codec));
+ MOCK_METHOD1(GetAudioRendererAlgorithmParameters,
+ base::Optional<::media::AudioRendererAlgorithmParameters>(
+ media::AudioParameters audio_parameters));
private:
DISALLOW_COPY_AND_ASSIGN(MockMediaClient);
diff --git a/chromium/media/base/null_video_sink.cc b/chromium/media/base/null_video_sink.cc
index a91a813f943..128212ef76a 100644
--- a/chromium/media/base/null_video_sink.cc
+++ b/chromium/media/base/null_video_sink.cc
@@ -87,14 +87,14 @@ void NullVideoSink::CallRender() {
delay);
}
-void NullVideoSink::PaintSingleFrame(const scoped_refptr<VideoFrame>& frame,
+void NullVideoSink::PaintSingleFrame(scoped_refptr<VideoFrame> frame,
bool repaint_duplicate_frame) {
if (!repaint_duplicate_frame && frame == last_frame_)
return;
last_frame_ = frame;
if (new_frame_cb_)
- new_frame_cb_.Run(frame);
+ new_frame_cb_.Run(std::move(frame));
}
} // namespace media
diff --git a/chromium/media/base/null_video_sink.h b/chromium/media/base/null_video_sink.h
index 54cd9549e62..1d5a31ad87b 100644
--- a/chromium/media/base/null_video_sink.h
+++ b/chromium/media/base/null_video_sink.h
@@ -20,7 +20,7 @@ namespace media {
class MEDIA_EXPORT NullVideoSink : public VideoRendererSink {
public:
- using NewFrameCB = base::Callback<void(const scoped_refptr<VideoFrame>&)>;
+ using NewFrameCB = base::RepeatingCallback<void(scoped_refptr<VideoFrame>)>;
// Periodically calls |callback| every |interval| on |task_runner| once the
// sink has been started. If |clockless| is true, the RenderCallback will
@@ -35,7 +35,7 @@ class MEDIA_EXPORT NullVideoSink : public VideoRendererSink {
// VideoRendererSink implementation.
void Start(RenderCallback* callback) override;
void Stop() override;
- void PaintSingleFrame(const scoped_refptr<VideoFrame>& frame,
+ void PaintSingleFrame(scoped_refptr<VideoFrame> frame,
bool repaint_duplicate_frame) override;
void set_tick_clock_for_testing(const base::TickClock* tick_clock) {
diff --git a/chromium/media/base/null_video_sink_unittest.cc b/chromium/media/base/null_video_sink_unittest.cc
index 2b4c5e15e8e..5909ec9bee3 100644
--- a/chromium/media/base/null_video_sink_unittest.cc
+++ b/chromium/media/base/null_video_sink_unittest.cc
@@ -9,6 +9,7 @@
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/test/simple_test_tick_clock.h"
+#include "components/viz/common/frame_sinks/begin_frame_args.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/null_video_sink.h"
#include "media/base/test_helpers.h"
@@ -46,6 +47,9 @@ class NullVideoSinkTest : public testing::Test,
gfx::Rect(natural_size), natural_size,
timestamp);
}
+ base::TimeDelta GetPreferredRenderInterval() override {
+ return viz::BeginFrameArgs::MinInterval();
+ }
// VideoRendererSink::RenderCallback implementation.
MOCK_METHOD3(Render,
@@ -54,7 +58,7 @@ class NullVideoSinkTest : public testing::Test,
bool));
MOCK_METHOD0(OnFrameDropped, void());
- MOCK_METHOD1(FrameReceived, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(FrameReceived, void(scoped_refptr<VideoFrame>));
protected:
base::MessageLoop message_loop_;
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 7b96cb29dd3..3e48fc7eed6 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -20,7 +20,7 @@
#include "media/base/ranges.h"
#include "media/base/text_track.h"
#include "media/base/video_decoder_config.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/base/waiting.h"
#include "ui/gfx/geometry/size.h"
@@ -45,7 +45,7 @@ class MEDIA_EXPORT Pipeline {
// Executed when the content duration, container video size, start time,
// and whether the content has audio and/or video in supported formats are
// known.
- virtual void OnMetadata(PipelineMetadata metadata) = 0;
+ virtual void OnMetadata(const PipelineMetadata& metadata) = 0;
// Executed whenever there are changes in the buffering state of the
// pipeline.
diff --git a/chromium/media/base/pipeline_impl.cc b/chromium/media/base/pipeline_impl.cc
index 686b2131964..33a6d052fd9 100644
--- a/chromium/media/base/pipeline_impl.cc
+++ b/chromium/media/base/pipeline_impl.cc
@@ -953,7 +953,7 @@ void PipelineImpl::RendererWrapper::ReportMetadata(StartType start_type) {
if (stream->type() == DemuxerStream::VIDEO && !metadata.has_video) {
metadata.has_video = true;
metadata.natural_size = GetRotatedVideoSize(
- stream->video_decoder_config().video_rotation(),
+ stream->video_decoder_config().video_transformation().rotation,
stream->video_decoder_config().natural_size());
metadata.video_decoder_config = stream->video_decoder_config();
}
@@ -1301,7 +1301,7 @@ void PipelineImpl::OnEnded() {
client_->OnEnded();
}
-void PipelineImpl::OnMetadata(PipelineMetadata metadata) {
+void PipelineImpl::OnMetadata(const PipelineMetadata& metadata) {
DVLOG(2) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(IsRunning());
diff --git a/chromium/media/base/pipeline_impl.h b/chromium/media/base/pipeline_impl.h
index f1240bb75d0..f5cadcaddcd 100644
--- a/chromium/media/base/pipeline_impl.h
+++ b/chromium/media/base/pipeline_impl.h
@@ -135,7 +135,7 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
// Notifications from RendererWrapper.
void OnError(PipelineStatus error);
void OnEnded();
- void OnMetadata(PipelineMetadata metadata);
+ void OnMetadata(const PipelineMetadata& metadata);
void OnBufferingStateChange(BufferingState state);
void OnDurationChange(base::TimeDelta duration);
void OnWaiting(WaitingReason reason);
diff --git a/chromium/media/base/pipeline_metadata.h b/chromium/media/base/pipeline_metadata.h
index 1838222b1a9..4f34ff80061 100644
--- a/chromium/media/base/pipeline_metadata.h
+++ b/chromium/media/base/pipeline_metadata.h
@@ -8,7 +8,7 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/video_decoder_config.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "ui/gfx/geometry/size.h"
namespace media {
diff --git a/chromium/media/base/routing_token_callback.h b/chromium/media/base/routing_token_callback.h
index 8dbeb4afb0b..9663c17f3f0 100644
--- a/chromium/media/base/routing_token_callback.h
+++ b/chromium/media/base/routing_token_callback.h
@@ -12,12 +12,13 @@ namespace media {
// Handy callback type to provide a routing token.
using RoutingTokenCallback =
- base::Callback<void(const base::UnguessableToken&)>;
+ base::OnceCallback<void(const base::UnguessableToken&)>;
// Callback to register a RoutingTokenCallback with something that can provide
// it. For example, RenderFrame(Impl) will provide this, while WMPI can choose
// to call it if it would like to be called back with a routing token.
-using RequestRoutingTokenCallback = base::Callback<void(RoutingTokenCallback)>;
+using RequestRoutingTokenCallback =
+ base::RepeatingCallback<void(RoutingTokenCallback)>;
} // namespace media
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
index dff8401565c..80a26ca03d7 100644
--- a/chromium/media/base/test_helpers.cc
+++ b/chromium/media/base/test_helpers.cc
@@ -136,8 +136,9 @@ static VideoDecoderConfig GetTestConfig(VideoCodec codec,
gfx::Size natural_size = coded_size;
return VideoDecoderConfig(
- codec, profile, PIXEL_FORMAT_I420, color_space, rotation, coded_size,
- visible_rect, natural_size, EmptyExtraData(),
+ codec, profile, PIXEL_FORMAT_I420, color_space,
+ VideoTransformation(rotation), coded_size, visible_rect, natural_size,
+ EmptyExtraData(),
is_encrypted ? AesCtrEncryptionScheme() : Unencrypted());
}
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
index 20fbf90a574..ff2a2431e49 100644
--- a/chromium/media/base/test_helpers.h
+++ b/chromium/media/base/test_helpers.h
@@ -391,18 +391,8 @@ MATCHER_P2(NoSpliceForBadMux, overlapped_buffer_count, splice_time_us, "") {
base::NumberToString(splice_time_us));
}
-MATCHER_P(BufferingByPtsDts, by_pts_bool, "") {
- return CONTAINS_STRING(arg, std::string("ChunkDemuxer: buffering by ") +
- (by_pts_bool ? "PTS" : "DTS"));
-}
-
-MATCHER_P3(NegativeDtsFailureWhenByDts, frame_type, pts_us, dts_us, "") {
- return CONTAINS_STRING(
- arg, std::string(frame_type) + " frame with PTS " +
- base::NumberToString(pts_us) + "us has negative DTS " +
- base::NumberToString(dts_us) +
- "us after applying timestampOffset, handling any discontinuity, "
- "and filtering against append window");
+MATCHER(ChunkDemuxerCtor, "") {
+ return CONTAINS_STRING(arg, "ChunkDemuxer");
}
MATCHER_P2(DiscardingEmptyFrame, pts_us, dts_us, "") {
diff --git a/chromium/media/base/unaligned_shared_memory.cc b/chromium/media/base/unaligned_shared_memory.cc
index f4b7de77234..3ac85579787 100644
--- a/chromium/media/base/unaligned_shared_memory.cc
+++ b/chromium/media/base/unaligned_shared_memory.cc
@@ -7,6 +7,7 @@
#include <limits>
#include "base/logging.h"
+#include "base/memory/read_only_shared_memory_region.h"
#include "base/system/sys_info.h"
#include "mojo/public/cpp/system/platform_handle.h"
@@ -29,9 +30,10 @@ bool CalculateMisalignmentAndOffset(size_t size,
// Note: result of % computation may be off_t or size_t, depending on the
// relative ranks of those types. In any case we assume that
// VMAllocationGranularity() fits in both types, so the final result does too.
+ DCHECK_GE(offset, 0);
*misalignment = offset % base::SysInfo::VMAllocationGranularity();
- // Above this |size_|, |size_| + |misalignment| overflows.
+ // Above this |max_size|, |size| + |*misalignment| overflows.
size_t max_size = std::numeric_limits<size_t>::max() - *misalignment;
if (size > max_size) {
DLOG(ERROR) << "Invalid size";
@@ -48,7 +50,13 @@ UnalignedSharedMemory::UnalignedSharedMemory(
const base::SharedMemoryHandle& handle,
size_t size,
bool read_only)
- : shm_(handle, read_only), size_(size), misalignment_(0) {}
+ : shm_(handle, read_only), read_only_(read_only), size_(size) {}
+
+UnalignedSharedMemory::UnalignedSharedMemory(
+ base::subtle::PlatformSharedMemoryRegion region,
+ size_t size,
+ bool read_only)
+ : region_(std::move(region)), read_only_(read_only), size_(size) {}
UnalignedSharedMemory::~UnalignedSharedMemory() = default;
@@ -66,19 +74,47 @@ bool UnalignedSharedMemory::MapAt(off_t offset, size_t size) {
return false;
}
- if (!shm_.MapAt(adjusted_offset, size + misalignment)) {
- DLOG(ERROR) << "Failed to map shared memory";
- return false;
+ if (region_.IsValid()) {
+ if (read_only_) {
+ auto shm =
+ base::ReadOnlySharedMemoryRegion::Deserialize(std::move(region_));
+ read_only_mapping_ = shm.MapAt(adjusted_offset, size + misalignment);
+ if (!read_only_mapping_.IsValid()) {
+ DLOG(ERROR) << "Failed to map shared memory";
+ return false;
+ }
+ // TODO(crbug.com/849207): this ugly const cast will go away when uses of
+ // UnalignedSharedMemory are converted to
+ // {Writable,ReadOnly}UnalignedMapping.
+ mapping_ptr_ = const_cast<uint8_t*>(
+ static_cast<const uint8_t*>(read_only_mapping_.memory()));
+ } else {
+ auto shm =
+ base::UnsafeSharedMemoryRegion::Deserialize(std::move(region_));
+ writable_mapping_ = shm.MapAt(adjusted_offset, size + misalignment);
+ if (!writable_mapping_.IsValid()) {
+ DLOG(ERROR) << "Failed to map shared memory";
+ return false;
+ }
+ mapping_ptr_ = static_cast<uint8_t*>(writable_mapping_.memory());
+ }
+ } else {
+ if (!shm_.MapAt(adjusted_offset, size + misalignment)) {
+ DLOG(ERROR) << "Failed to map shared memory";
+ return false;
+ }
+ mapping_ptr_ = static_cast<uint8_t*>(shm_.memory());
}
- misalignment_ = misalignment;
+ DCHECK(mapping_ptr_);
+ // There should be no way for the IsValid() checks above to succeed and yet
+ // |mapping_ptr_| remain null. However, since an invalid but non-null pointer
+ // could be disastrous an extra-careful check is done.
+ if (mapping_ptr_)
+ mapping_ptr_ += misalignment;
return true;
}
-void* UnalignedSharedMemory::memory() const {
- return static_cast<uint8_t*>(shm_.memory()) + misalignment_;
-}
-
WritableUnalignedMapping::WritableUnalignedMapping(
const base::UnsafeSharedMemoryRegion& region,
size_t size,
@@ -111,15 +147,6 @@ WritableUnalignedMapping::WritableUnalignedMapping(
}
}
-WritableUnalignedMapping::WritableUnalignedMapping(
- const base::SharedMemoryHandle& handle,
- size_t size,
- off_t offset)
- : WritableUnalignedMapping(
- base::UnsafeSharedMemoryRegion::CreateFromHandle(handle),
- size,
- offset) {}
-
WritableUnalignedMapping::~WritableUnalignedMapping() = default;
void* WritableUnalignedMapping::memory() const {
diff --git a/chromium/media/base/unaligned_shared_memory.h b/chromium/media/base/unaligned_shared_memory.h
index 7e3cbbaef62..46341ca775d 100644
--- a/chromium/media/base/unaligned_shared_memory.h
+++ b/chromium/media/base/unaligned_shared_memory.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_mapping.h"
@@ -26,23 +27,38 @@ class MEDIA_EXPORT UnalignedSharedMemory {
UnalignedSharedMemory(const base::SharedMemoryHandle& handle,
size_t size,
bool read_only);
+
+ // As above, but from a PlatformSharedMemoryRegion.
+ UnalignedSharedMemory(base::subtle::PlatformSharedMemoryRegion region,
+ size_t size,
+ bool read_only);
+
~UnalignedSharedMemory();
// Map the shared memory region. Note that the passed |size| parameter should
// be less than or equal to |size()|.
bool MapAt(off_t offset, size_t size);
size_t size() const { return size_; }
- void* memory() const;
+ void* memory() const { return mapping_ptr_; }
private:
+ // Either |shm_| or the set |region_| and one of the mappings are active,
+ // depending on which constructor was used and the value of read_only_. These
+ // variables are held to keep the shared memory mapping valid for the lifetime
+ // of this instance.
base::SharedMemory shm_;
+ base::subtle::PlatformSharedMemoryRegion region_;
+ base::WritableSharedMemoryMapping writable_mapping_;
+ base::ReadOnlySharedMemoryMapping read_only_mapping_;
+
+ // If the mapping should be made read-only.
+ bool read_only_;
// The size of the region associated with |shm_|.
size_t size_;
- // Offset withing |shm_| memory that data has been mapped; strictly less than
- // base::SysInfo::VMAllocationGranularity().
- size_t misalignment_;
+ // Pointer to the unaligned data in the shared memory mapping.
+ uint8_t* mapping_ptr_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(UnalignedSharedMemory);
};
@@ -59,13 +75,6 @@ class MEDIA_EXPORT WritableUnalignedMapping {
size_t size,
off_t offset);
- // As above, but creates from a handle. This region will own the handle.
- // DEPRECATED: this should be used only for the legacy shared memory
- // conversion project, see https://crbug.com/795291.
- WritableUnalignedMapping(const base::SharedMemoryHandle& handle,
- size_t size,
- off_t offset);
-
~WritableUnalignedMapping();
size_t size() const { return size_; }
diff --git a/chromium/media/base/unaligned_shared_memory_unittest.cc b/chromium/media/base/unaligned_shared_memory_unittest.cc
index 067c3e15121..f7311ef5386 100644
--- a/chromium/media/base/unaligned_shared_memory_unittest.cc
+++ b/chromium/media/base/unaligned_shared_memory_unittest.cc
@@ -54,11 +54,32 @@ TEST(UnalignedSharedMemoryTest, CreateAndDestroy) {
UnalignedSharedMemory shm(handle, kDataSize, true);
}
+TEST(UnalignedSharedMemoryTest, CreateAndDestroyRegion) {
+ auto region = CreateRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, false);
+}
+
+TEST(UnalignedSharedMemoryTest, CreateAndDestroyReadOnlyRegion) {
+ auto region = CreateReadOnlyRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, true);
+}
+
TEST(UnalignedSharedMemoryTest, CreateAndDestroy_InvalidHandle) {
base::SharedMemoryHandle handle;
UnalignedSharedMemory shm(handle, kDataSize, true);
}
+TEST(UnalignedSharedMemoryTest, CreateAndDestroy_InvalidRegion) {
+ UnalignedSharedMemory shm(base::subtle::PlatformSharedMemoryRegion(),
+ kDataSize, false);
+}
+
TEST(UnalignedSharedMemoryTest, Map) {
auto handle = CreateHandle(kData, kDataSize);
UnalignedSharedMemory shm(handle, kDataSize, true);
@@ -66,6 +87,26 @@ TEST(UnalignedSharedMemoryTest, Map) {
EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
}
+TEST(UnalignedSharedMemoryTest, MapRegion) {
+ auto region = CreateRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, false);
+ ASSERT_TRUE(shm.MapAt(0, kDataSize));
+ EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
+}
+
+TEST(UnalignedSharedMemoryTest, MapReadOnlyRegion) {
+ auto region = CreateReadOnlyRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, true);
+ ASSERT_TRUE(shm.MapAt(0, kDataSize));
+ EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
+}
+
TEST(UnalignedSharedMemoryTest, Map_Unaligned) {
auto handle = CreateHandle(kUnalignedData, kUnalignedDataSize);
UnalignedSharedMemory shm(handle, kUnalignedDataSize, true);
@@ -73,6 +114,26 @@ TEST(UnalignedSharedMemoryTest, Map_Unaligned) {
EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
}
+TEST(UnalignedSharedMemoryTest, Map_UnalignedRegion) {
+ auto region = CreateRegion(kUnalignedData, kUnalignedDataSize);
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kUnalignedDataSize, false);
+ ASSERT_TRUE(shm.MapAt(kUnalignedOffset, kDataSize));
+ EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
+}
+
+TEST(UnalignedSharedMemoryTest, Map_UnalignedReadOnlyRegion) {
+ auto region = CreateReadOnlyRegion(kUnalignedData, kUnalignedDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kUnalignedDataSize, true);
+ ASSERT_TRUE(shm.MapAt(kUnalignedOffset, kDataSize));
+ EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
+}
+
TEST(UnalignedSharedMemoryTest, Map_InvalidHandle) {
base::SharedMemoryHandle handle;
UnalignedSharedMemory shm(handle, kDataSize, true);
@@ -80,39 +141,88 @@ TEST(UnalignedSharedMemoryTest, Map_InvalidHandle) {
EXPECT_EQ(shm.memory(), nullptr);
}
+TEST(UnalignedSharedMemoryTest, Map_InvalidRegion) {
+ UnalignedSharedMemory shm(base::subtle::PlatformSharedMemoryRegion(),
+ kDataSize, true);
+ ASSERT_FALSE(shm.MapAt(1, kDataSize));
+ EXPECT_EQ(shm.memory(), nullptr);
+}
+
TEST(UnalignedSharedMemoryTest, Map_NegativeOffset) {
auto handle = CreateHandle(kData, kDataSize);
UnalignedSharedMemory shm(handle, kDataSize, true);
ASSERT_FALSE(shm.MapAt(-1, kDataSize));
}
+TEST(UnalignedSharedMemoryTest, Map_NegativeOffsetRegion) {
+ auto region = CreateRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, false);
+ ASSERT_FALSE(shm.MapAt(-1, kDataSize));
+}
+
+TEST(UnalignedSharedMemoryTest, Map_NegativeOffsetReadOnlyRegion) {
+ auto region = CreateReadOnlyRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, true);
+ ASSERT_FALSE(shm.MapAt(-1, kDataSize));
+}
+
TEST(UnalignedSharedMemoryTest, Map_SizeOverflow) {
auto handle = CreateHandle(kData, kDataSize);
UnalignedSharedMemory shm(handle, kDataSize, true);
ASSERT_FALSE(shm.MapAt(1, std::numeric_limits<size_t>::max()));
}
+TEST(UnalignedSharedMemoryTest, Map_SizeOverflowRegion) {
+ auto region = CreateRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, false);
+ ASSERT_FALSE(shm.MapAt(1, std::numeric_limits<size_t>::max()));
+}
+
+TEST(UnalignedSharedMemoryTest, Map_SizeOverflowReadOnlyRegion) {
+ auto region = CreateReadOnlyRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, true);
+ ASSERT_FALSE(shm.MapAt(1, std::numeric_limits<size_t>::max()));
+}
+
TEST(UnalignedSharedMemoryTest, UnmappedIsNullptr) {
auto handle = CreateHandle(kData, kDataSize);
UnalignedSharedMemory shm(handle, kDataSize, true);
ASSERT_EQ(shm.memory(), nullptr);
}
-TEST(WritableUnalignedMappingTest, CreateAndDestroy) {
+TEST(UnalignedSharedMemoryTest, UnmappedRegionIsNullptr) {
auto region = CreateRegion(kData, kDataSize);
- WritableUnalignedMapping shm(region, kDataSize, 0);
- EXPECT_TRUE(shm.IsValid());
+ UnalignedSharedMemory shm(
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, false);
+ ASSERT_EQ(shm.memory(), nullptr);
}
-TEST(WritableUnalignedMappingTest, CreateAndDestroy_InvalidHandle) {
- base::SharedMemoryHandle handle;
- WritableUnalignedMapping shm(handle, kDataSize, 0);
- EXPECT_FALSE(shm.IsValid());
+TEST(UnalignedSharedMemoryTest, UnmappedReadOnlyRegionIsNullptr) {
+ auto region = CreateReadOnlyRegion(kData, kDataSize);
+ UnalignedSharedMemory shm(
+ base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+ std::move(region)),
+ kDataSize, true);
+ ASSERT_EQ(shm.memory(), nullptr);
}
-TEST(WritableUnalignedMappingTest, CreateAndDestroyHandle) {
- auto handle = CreateHandle(kData, kDataSize);
- WritableUnalignedMapping shm(handle, kDataSize, 0);
+TEST(WritableUnalignedMappingTest, CreateAndDestroy) {
+ auto region = CreateRegion(kData, kDataSize);
+ WritableUnalignedMapping shm(region, kDataSize, 0);
EXPECT_TRUE(shm.IsValid());
}
@@ -136,13 +246,6 @@ TEST(WritableUnalignedMappingTest, Map_Unaligned) {
EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
}
-TEST(WritableUnalignedMappingTest, Map_UnalignedHandle) {
- auto region = CreateHandle(kUnalignedData, kUnalignedDataSize);
- WritableUnalignedMapping shm(region, kDataSize, kUnalignedOffset);
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(0, memcmp(shm.memory(), kData, kDataSize));
-}
-
TEST(WritableUnalignedMappingTest, Map_InvalidRegion) {
base::UnsafeSharedMemoryRegion region;
WritableUnalignedMapping shm(region, kDataSize, 0);
diff --git a/chromium/media/base/vector_math.cc b/chromium/media/base/vector_math.cc
index bafb7f068be..e6e26922249 100644
--- a/chromium/media/base/vector_math.cc
+++ b/chromium/media/base/vector_math.cc
@@ -63,13 +63,6 @@ void FMUL_C(const float src[], float scale, int len, float dest[]) {
dest[i] = src[i] * scale;
}
-void Crossfade(const float src[], int len, float dest[]) {
- float cf_ratio = 0;
- const float cf_increment = 1.0f / len;
- for (int i = 0; i < len; ++i, cf_ratio += cf_increment)
- dest[i] = (1.0f - cf_ratio) * src[i] + cf_ratio * dest[i];
-}
-
std::pair<float, float> EWMAAndMaxPower(
float initial_value, const float src[], int len, float smoothing_factor) {
// Ensure |src| is 16-byte aligned.
diff --git a/chromium/media/base/vector_math.h b/chromium/media/base/vector_math.h
index 302b85f642a..66ca849fa98 100644
--- a/chromium/media/base/vector_math.h
+++ b/chromium/media/base/vector_math.h
@@ -42,8 +42,6 @@ MEDIA_SHMEM_EXPORT std::pair<float, float> EWMAAndMaxPower(
int len,
float smoothing_factor);
-MEDIA_SHMEM_EXPORT void Crossfade(const float src[], int len, float dest[]);
-
} // namespace vector_math
} // namespace media
diff --git a/chromium/media/base/vector_math_unittest.cc b/chromium/media/base/vector_math_unittest.cc
index 6be3eeb7b10..290a95cc8df 100644
--- a/chromium/media/base/vector_math_unittest.cc
+++ b/chromium/media/base/vector_math_unittest.cc
@@ -135,16 +135,6 @@ TEST_F(VectorMathTest, FMUL) {
#endif
}
-TEST_F(VectorMathTest, Crossfade) {
- FillTestVectors(0, 1);
- vector_math::Crossfade(
- input_vector_.get(), kVectorSize, output_vector_.get());
- for (int i = 0; i < kVectorSize; ++i) {
- ASSERT_FLOAT_EQ(i / static_cast<float>(kVectorSize), output_vector_[i])
- << "i=" << i;
- }
-}
-
class EWMATestScenario {
public:
EWMATestScenario(float initial_value, const float src[], int len,
diff --git a/chromium/media/base/video_decoder.h b/chromium/media/base/video_decoder.h
index 784bb73853c..52753e8c17e 100644
--- a/chromium/media/base/video_decoder.h
+++ b/chromium/media/base/video_decoder.h
@@ -26,16 +26,16 @@ class VideoFrame;
class MEDIA_EXPORT VideoDecoder {
public:
// Callback for VideoDecoder initialization.
- using InitCB = base::Callback<void(bool success)>;
+ using InitCB = base::OnceCallback<void(bool success)>;
// Callback for VideoDecoder to return a decoded frame whenever it becomes
// available. Only non-EOS frames should be returned via this callback.
- using OutputCB = base::Callback<void(const scoped_refptr<VideoFrame>&)>;
+ using OutputCB = base::RepeatingCallback<void(scoped_refptr<VideoFrame>)>;
// Callback type for Decode(). Called after the decoder has completed decoding
// corresponding DecoderBuffer, indicating that it's ready to accept another
// buffer to decode.
- using DecodeCB = base::Callback<void(DecodeStatus)>;
+ using DecodeCB = base::OnceCallback<void(DecodeStatus)>;
VideoDecoder();
@@ -84,7 +84,7 @@ class MEDIA_EXPORT VideoDecoder {
virtual void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) = 0;
@@ -107,12 +107,12 @@ class MEDIA_EXPORT VideoDecoder {
// |decode_cb| must be called after that. Callers will not call Decode()
// again until after the flush completes.
virtual void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) = 0;
+ DecodeCB decode_cb) = 0;
// Resets decoder state. All pending Decode() requests will be finished or
// aborted before |closure| is called.
// Note: No VideoDecoder calls should be made before |closure| is executed.
- virtual void Reset(const base::Closure& closure) = 0;
+ virtual void Reset(base::OnceClosure closure) = 0;
// Returns true if the decoder needs bitstream conversion before decoding.
virtual bool NeedsBitstreamConversion() const;
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
index f514abe250b..23aa265e0b0 100644
--- a/chromium/media/base/video_decoder_config.cc
+++ b/chromium/media/base/video_decoder_config.cc
@@ -62,14 +62,14 @@ VideoDecoderConfig::VideoDecoderConfig()
: codec_(kUnknownVideoCodec),
profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
format_(PIXEL_FORMAT_UNKNOWN),
- rotation_(VIDEO_ROTATION_0) {}
+ transformation_(kNoTransformation) {}
VideoDecoderConfig::VideoDecoderConfig(
VideoCodec codec,
VideoCodecProfile profile,
VideoPixelFormat format,
const VideoColorSpace& color_space,
- VideoRotation rotation,
+ VideoTransformation rotation,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -105,7 +105,7 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
VideoCodecProfile profile,
VideoPixelFormat format,
const VideoColorSpace& color_space,
- VideoRotation rotation,
+ VideoTransformation transformation,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -114,7 +114,7 @@ void VideoDecoderConfig::Initialize(VideoCodec codec,
codec_ = codec;
profile_ = profile;
format_ = format;
- rotation_ = rotation;
+ transformation_ = transformation;
coded_size_ = coded_size;
visible_rect_ = visible_rect;
natural_size_ = natural_size;
@@ -133,7 +133,7 @@ bool VideoDecoderConfig::IsValidConfig() const {
bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const {
return ((codec() == config.codec()) && (format() == config.format()) &&
(profile() == config.profile()) &&
- (video_rotation() == config.video_rotation()) &&
+ (video_transformation() == config.video_transformation()) &&
(coded_size() == config.coded_size()) &&
(visible_rect() == config.visible_rect()) &&
(natural_size() == config.natural_size()) &&
@@ -154,7 +154,8 @@ std::string VideoDecoderConfig::AsHumanReadableString() const {
<< natural_size().height() << "]"
<< ", has extra data: " << (extra_data().empty() ? "false" : "true")
<< ", encryption scheme: " << encryption_scheme()
- << ", rotation: " << VideoRotationToString(video_rotation())
+ << ", rotation: " << VideoRotationToString(video_transformation().rotation)
+ << ", flipped: " << video_transformation().mirrored
<< ", color space: " << color_space_info().ToGfxColorSpace().ToString();
if (hdr_metadata().has_value()) {
s << std::setprecision(4) << ", luminance range: "
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
index bd65211a1f3..ef65fee31a3 100644
--- a/chromium/media/base/video_decoder_config.h
+++ b/chromium/media/base/video_decoder_config.h
@@ -17,7 +17,7 @@
#include "media/base/media_export.h"
#include "media/base/video_codecs.h"
#include "media/base/video_color_space.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/base/video_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
@@ -41,7 +41,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
VideoCodecProfile profile,
VideoPixelFormat format,
const VideoColorSpace& color_space,
- VideoRotation rotation,
+ VideoTransformation transformation,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -57,7 +57,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
VideoCodecProfile profile,
VideoPixelFormat format,
const VideoColorSpace& color_space,
- VideoRotation rotation,
+ VideoTransformation transformation,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -94,7 +94,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
// scaling to natural_size().
//
// TODO(sandersd): Which direction is orientation measured in?
- VideoRotation video_rotation() const { return rotation_; }
+ VideoTransformation video_transformation() const { return transformation_; }
// Deprecated. TODO(wolenetz): Remove. See https://crbug.com/665539.
// Width and height of video frame immediately post-decode. Not all pixels
@@ -154,7 +154,7 @@ class MEDIA_EXPORT VideoDecoderConfig {
VideoPixelFormat format_;
- VideoRotation rotation_;
+ VideoTransformation transformation_;
// Deprecated. TODO(wolenetz): Remove. See https://crbug.com/665539.
gfx::Size coded_size_;
diff --git a/chromium/media/base/video_decoder_config_unittest.cc b/chromium/media/base/video_decoder_config_unittest.cc
index 33d849e4ceb..3c7b4e3d98e 100644
--- a/chromium/media/base/video_decoder_config_unittest.cc
+++ b/chromium/media/base/video_decoder_config_unittest.cc
@@ -18,7 +18,7 @@ static const gfx::Size kNaturalSize(320, 240);
TEST(VideoDecoderConfigTest, Invalid_UnsupportedPixelFormat) {
VideoDecoderConfig config(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
PIXEL_FORMAT_UNKNOWN, VideoColorSpace(),
- VIDEO_ROTATION_0, kCodedSize, kVisibleRect,
+ kNoTransformation, kCodedSize, kVisibleRect,
kNaturalSize, EmptyExtraData(), Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
}
@@ -26,7 +26,7 @@ TEST(VideoDecoderConfigTest, Invalid_UnsupportedPixelFormat) {
TEST(VideoDecoderConfigTest, Invalid_AspectRatioNumeratorZero) {
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), 0, 1);
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
@@ -35,7 +35,7 @@ TEST(VideoDecoderConfigTest, Invalid_AspectRatioNumeratorZero) {
TEST(VideoDecoderConfigTest, Invalid_AspectRatioDenominatorZero) {
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), 1, 0);
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
@@ -44,7 +44,7 @@ TEST(VideoDecoderConfigTest, Invalid_AspectRatioDenominatorZero) {
TEST(VideoDecoderConfigTest, Invalid_AspectRatioNumeratorNegative) {
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), -1, 1);
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
@@ -53,7 +53,7 @@ TEST(VideoDecoderConfigTest, Invalid_AspectRatioNumeratorNegative) {
TEST(VideoDecoderConfigTest, Invalid_AspectRatioDenominatorNegative) {
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), 1, -1);
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
@@ -64,7 +64,7 @@ TEST(VideoDecoderConfigTest, Invalid_AspectRatioNumeratorTooLarge) {
int num = ceil(static_cast<double>(limits::kMaxDimension + 1) / width);
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), num, 1);
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
@@ -78,7 +78,7 @@ TEST(VideoDecoderConfigTest, Invalid_AspectRatioDenominatorVeryLarge) {
EXPECT_EQ(320, natural_size.width());
EXPECT_EQ(240 * 641, natural_size.height());
VideoDecoderConfig config(kCodecVP8, VP8PROFILE_ANY, kVideoFormat,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, natural_size, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(config.IsValidConfig());
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index 064d6b2598e..63cae40e863 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -588,48 +588,48 @@ scoped_refptr<VideoFrame> VideoFrame::WrapCVPixelBuffer(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
+ const VideoFrame& frame,
VideoPixelFormat format,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size) {
// Frames with textures need mailbox info propagated, and there's no support
// for that here yet, see http://crbug/362521.
- CHECK(!frame->HasTextures());
- DCHECK(frame->visible_rect().Contains(visible_rect));
+ CHECK(!frame.HasTextures());
+ DCHECK(frame.visible_rect().Contains(visible_rect));
- if (!AreValidPixelFormatsForWrap(frame->format(), format)) {
+ if (!AreValidPixelFormatsForWrap(frame.format(), format)) {
DLOG(ERROR) << __func__ << " Invalid format conversion."
- << VideoPixelFormatToString(frame->format()) << " to "
+ << VideoPixelFormatToString(frame.format()) << " to "
<< VideoPixelFormatToString(format);
return nullptr;
}
- if (!IsValidConfig(format, frame->storage_type(), frame->coded_size(),
+ if (!IsValidConfig(format, frame.storage_type(), frame.coded_size(),
visible_rect, natural_size)) {
DLOG(ERROR) << __func__ << " Invalid config."
- << ConfigToString(format, frame->storage_type(),
- frame->coded_size(), visible_rect,
+ << ConfigToString(format, frame.storage_type(),
+ frame.coded_size(), visible_rect,
natural_size);
return nullptr;
}
scoped_refptr<VideoFrame> wrapping_frame(
- new VideoFrame(frame->layout(), frame->storage_type(), visible_rect,
- natural_size, frame->timestamp()));
+ new VideoFrame(frame.layout(), frame.storage_type(), visible_rect,
+ natural_size, frame.timestamp()));
// Copy all metadata to the wrapped frame.
- wrapping_frame->metadata()->MergeMetadataFrom(frame->metadata());
+ wrapping_frame->metadata()->MergeMetadataFrom(frame.metadata());
- if (frame->IsMappable()) {
+ if (frame.IsMappable()) {
for (size_t i = 0; i < NumPlanes(format); ++i) {
- wrapping_frame->data_[i] = frame->data(i);
+ wrapping_frame->data_[i] = frame.data_[i];
}
}
#if defined(OS_LINUX)
// If there are any |dmabuf_fds_| plugged in, we should duplicate them.
- if (frame->storage_type() == STORAGE_DMABUFS) {
- wrapping_frame->dmabuf_fds_ = DuplicateFDs(frame->dmabuf_fds_);
+ if (frame.storage_type() == STORAGE_DMABUFS) {
+ wrapping_frame->dmabuf_fds_ = DuplicateFDs(frame.dmabuf_fds_);
if (wrapping_frame->dmabuf_fds_.empty()) {
DLOG(ERROR) << __func__ << " Couldn't duplicate fds.";
return nullptr;
@@ -637,18 +637,18 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
}
#endif
- if (frame->storage_type() == STORAGE_SHMEM) {
- if (frame->read_only_shared_memory_region_) {
- DCHECK(frame->read_only_shared_memory_region_->IsValid());
+ if (frame.storage_type() == STORAGE_SHMEM) {
+ if (frame.read_only_shared_memory_region_) {
+ DCHECK(frame.read_only_shared_memory_region_->IsValid());
wrapping_frame->AddReadOnlySharedMemoryRegion(
- frame->read_only_shared_memory_region_);
- } else if (frame->unsafe_shared_memory_region_) {
- DCHECK(frame->unsafe_shared_memory_region_->IsValid());
+ frame.read_only_shared_memory_region_);
+ } else if (frame.unsafe_shared_memory_region_) {
+ DCHECK(frame.unsafe_shared_memory_region_->IsValid());
wrapping_frame->AddUnsafeSharedMemoryRegion(
- frame->unsafe_shared_memory_region_);
+ frame.unsafe_shared_memory_region_);
} else {
- DCHECK(frame->shared_memory_handle_.IsValid());
- wrapping_frame->AddSharedMemoryHandle(frame->shared_memory_handle_);
+ DCHECK(frame.shared_memory_handle_.IsValid());
+ wrapping_frame->AddSharedMemoryHandle(frame.shared_memory_handle_);
}
}
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index 3ab20a32431..9f24faa1594 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -22,11 +22,13 @@
#include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_handle.h"
#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/optional.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_frame_metadata.h"
#include "media/base/video_types.h"
@@ -302,7 +304,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Wraps |frame|. |visible_rect| must be a sub rect within
// frame->visible_rect().
static scoped_refptr<VideoFrame> WrapVideoFrame(
- const scoped_refptr<VideoFrame>& frame,
+ const VideoFrame& frame,
VideoPixelFormat format,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size);
@@ -434,6 +436,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
return data_[plane];
}
+ const base::Optional<gpu::VulkanYCbCrInfo>& ycbcr_info() const {
+ return ycbcr_info_;
+ }
+
// Returns pointer to the data in the visible region of the frame, for
// IsMappable() storage types. The returned pointer is offsetted into the
// plane buffer specified by visible_rect().origin(). Memory is owned by
@@ -537,6 +543,11 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns the number of bits per channel.
size_t BitDepth() const;
+ // Provide the sampler conversion information for the frame.
+ void set_ycbcr_info(const base::Optional<gpu::VulkanYCbCrInfo>& ycbcr_info) {
+ ycbcr_info_ = ycbcr_info;
+ }
+
protected:
friend class base::RefCountedThreadSafe<VideoFrame>;
@@ -678,6 +689,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
gfx::ColorSpace color_space_;
+ // Sampler conversion information which is used in vulkan context for android.
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoFrame);
};
diff --git a/chromium/media/base/video_frame_layout.cc b/chromium/media/base/video_frame_layout.cc
index 27121ce28e1..cfa26f8a9bb 100644
--- a/chromium/media/base/video_frame_layout.cc
+++ b/chromium/media/base/video_frame_layout.cc
@@ -108,7 +108,8 @@ base::Optional<VideoFrameLayout> VideoFrameLayout::CreateWithPlanes(
const gfx::Size& coded_size,
std::vector<Plane> planes,
std::vector<size_t> buffer_sizes,
- size_t buffer_addr_align) {
+ size_t buffer_addr_align,
+ uint64_t modifier) {
// NOTE: Even if format is UNKNOWN, it is valid if coded_sizes is not Empty().
// TODO(crbug.com/896135): Return base::nullopt,
// if (format != PIXEL_FORMAT_UNKNOWN || !coded_sizes.IsEmpty())
@@ -117,19 +118,21 @@ base::Optional<VideoFrameLayout> VideoFrameLayout::CreateWithPlanes(
// TODO(crbug.com/896135): Return base::nullopt,
// if (buffer_sizes.size() > planes.size())
return VideoFrameLayout(format, coded_size, std::move(planes),
- std::move(buffer_sizes), buffer_addr_align);
+ std::move(buffer_sizes), buffer_addr_align, modifier);
}
VideoFrameLayout::VideoFrameLayout(VideoPixelFormat format,
const gfx::Size& coded_size,
std::vector<Plane> planes,
std::vector<size_t> buffer_sizes,
- size_t buffer_addr_align)
+ size_t buffer_addr_align,
+ uint64_t modifier)
: format_(format),
coded_size_(coded_size),
planes_(std::move(planes)),
buffer_sizes_(std::move(buffer_sizes)),
- buffer_addr_align_(buffer_addr_align) {}
+ buffer_addr_align_(buffer_addr_align),
+ modifier_(modifier) {}
VideoFrameLayout::~VideoFrameLayout() = default;
VideoFrameLayout::VideoFrameLayout(const VideoFrameLayout&) = default;
@@ -143,15 +146,13 @@ size_t VideoFrameLayout::GetTotalBufferSize() const {
std::ostream& operator<<(std::ostream& ostream,
const VideoFrameLayout::Plane& plane) {
- ostream << "(" << plane.stride << ", " << plane.offset << ", "
- << plane.modifier << ")";
+ ostream << "(" << plane.stride << ", " << plane.offset << ")";
return ostream;
}
bool VideoFrameLayout::Plane::operator==(
const VideoFrameLayout::Plane& rhs) const {
- return stride == rhs.stride && offset == rhs.offset &&
- modifier == rhs.modifier;
+ return stride == rhs.stride && offset == rhs.offset;
}
bool VideoFrameLayout::Plane::operator!=(
@@ -162,7 +163,8 @@ bool VideoFrameLayout::Plane::operator!=(
bool VideoFrameLayout::operator==(const VideoFrameLayout& rhs) const {
return format_ == rhs.format_ && coded_size_ == rhs.coded_size_ &&
planes_ == rhs.planes_ && buffer_sizes_ == rhs.buffer_sizes_ &&
- buffer_addr_align_ == rhs.buffer_addr_align_;
+ buffer_addr_align_ == rhs.buffer_addr_align_ &&
+ modifier_ == rhs.modifier_;
}
bool VideoFrameLayout::operator!=(const VideoFrameLayout& rhs) const {
@@ -173,9 +175,9 @@ std::ostream& operator<<(std::ostream& ostream,
const VideoFrameLayout& layout) {
ostream << "VideoFrameLayout(format: " << layout.format()
<< ", coded_size: " << layout.coded_size().ToString()
- << ", planes (stride, offset, modifier): "
- << VectorToString(layout.planes())
- << ", buffer_sizes: " << VectorToString(layout.buffer_sizes()) << ")";
+ << ", planes (stride, offset): " << VectorToString(layout.planes())
+ << ", buffer_sizes: " << VectorToString(layout.buffer_sizes())
+ << ", modifier: " << layout.modifier() << ")";
return ostream;
}
diff --git a/chromium/media/base/video_frame_layout.h b/chromium/media/base/video_frame_layout.h
index 6825d7ced85..b328490464b 100644
--- a/chromium/media/base/video_frame_layout.h
+++ b/chromium/media/base/video_frame_layout.h
@@ -39,8 +39,6 @@ class MEDIA_EXPORT VideoFrameLayout {
struct Plane {
Plane() = default;
Plane(int32_t stride, size_t offset) : stride(stride), offset(offset) {}
- Plane(int32_t stride, size_t offset, uint64_t modifier)
- : stride(stride), offset(offset), modifier(modifier) {}
bool operator==(const Plane& rhs) const;
bool operator!=(const Plane& rhs) const;
@@ -52,11 +50,6 @@ class MEDIA_EXPORT VideoFrameLayout {
// Offset of a plane, which stands for the offset of a start point of a
// color plane from a buffer fd.
size_t offset = 0;
-
- // Modifier of a plane. The modifier is retrieved from GBM library. This can
- // be a different value from kNoModifier only if the VideoFrame is created
- // by using NativePixmap.
- uint64_t modifier = gfx::NativePixmapPlane::kNoModifier;
};
// Factory functions.
@@ -87,7 +80,8 @@ class MEDIA_EXPORT VideoFrameLayout {
const gfx::Size& coded_size,
std::vector<Plane> planes,
std::vector<size_t> buffer_sizes = {},
- size_t buffer_addr_align = kBufferAddressAlignment);
+ size_t buffer_addr_align = kBufferAddressAlignment,
+ uint64_t modifier = gfx::NativePixmapHandle::kNoModifier);
VideoFrameLayout() = delete;
VideoFrameLayout(const VideoFrameLayout&);
@@ -116,16 +110,17 @@ class MEDIA_EXPORT VideoFrameLayout {
bool operator!=(const VideoFrameLayout& rhs) const;
// Returns the required memory alignment for buffers.
- size_t buffer_addr_align() const {
- return buffer_addr_align_;
- }
+ size_t buffer_addr_align() const { return buffer_addr_align_; }
+ // Return the modifier of buffers.
+ uint64_t modifier() const { return modifier_; }
private:
VideoFrameLayout(VideoPixelFormat format,
const gfx::Size& coded_size,
std::vector<Plane> planes,
std::vector<size_t> buffer_sizes,
- size_t buffer_addr_align);
+ size_t buffer_addr_align,
+ uint64_t modifier);
VideoPixelFormat format_;
@@ -147,6 +142,11 @@ class MEDIA_EXPORT VideoFrameLayout {
// allocating physical memory for the buffer, so it doesn't need to be
// serialized when frames are passed through Mojo.
size_t buffer_addr_align_;
+
+ // Modifier of buffers. The modifier is retrieved from GBM library. This
+ // can be a different value from kNoModifier only if the VideoFrame is created
+ // by using NativePixmap.
+ uint64_t modifier_;
};
// Outputs VideoFrameLayout::Plane to stream.
diff --git a/chromium/media/base/video_frame_layout_unittest.cc b/chromium/media/base/video_frame_layout_unittest.cc
index 3c9309a3358..a83cf958643 100644
--- a/chromium/media/base/video_frame_layout_unittest.cc
+++ b/chromium/media/base/video_frame_layout_unittest.cc
@@ -237,12 +237,12 @@ TEST(VideoFrameLayout, ToString) {
std::ostringstream ostream;
ostream << *layout;
const std::string kNoModifier =
- std::to_string(gfx::NativePixmapPlane::kNoModifier);
+ std::to_string(gfx::NativePixmapHandle::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_I420, coded_size: 320x180, "
- "planes (stride, offset, modifier): [(384, 0, " +
- kNoModifier + "), (192, 0, " + kNoModifier + "), (192, 0, " +
- kNoModifier + ")], buffer_sizes: [73728, 18432, 18432])");
+ "planes (stride, offset): [(384, 0), (192, 0), (192, 0)], "
+ "buffer_sizes: [73728, 18432, 18432], modifier: " +
+ kNoModifier + ")");
}
TEST(VideoFrameLayout, ToStringOneBuffer) {
@@ -258,11 +258,12 @@ TEST(VideoFrameLayout, ToStringOneBuffer) {
std::ostringstream ostream;
ostream << *layout;
const std::string kNoModifier =
- std::to_string(gfx::NativePixmapPlane::kNoModifier);
+ std::to_string(gfx::NativePixmapHandle::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 320x180, "
- "planes (stride, offset, modifier): [(384, 100, " +
- kNoModifier + ")], buffer_sizes: [122880])");
+ "planes (stride, offset): [(384, 100)], buffer_sizes: [122880], "
+ "modifier: " +
+ kNoModifier + ")");
}
TEST(VideoFrameLayout, ToStringNoBufferInfo) {
@@ -273,12 +274,12 @@ TEST(VideoFrameLayout, ToStringNoBufferInfo) {
std::ostringstream ostream;
ostream << *layout;
const std::string kNoModifier =
- std::to_string(gfx::NativePixmapPlane::kNoModifier);
+ std::to_string(gfx::NativePixmapHandle::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 320x180, "
- "planes (stride, offset, modifier): [(0, 0, " +
- kNoModifier + "), (0, 0, " + kNoModifier +
- ")], buffer_sizes: [])");
+ "planes (stride, offset): [(0, 0), (0, 0)], buffer_sizes: [], "
+ "modifier: " +
+ kNoModifier + ")");
}
TEST(VideoFrameLayout, EqualOperator) {
@@ -287,22 +288,23 @@ TEST(VideoFrameLayout, EqualOperator) {
std::vector<size_t> offsets = {0, 100, 200};
std::vector<size_t> buffer_sizes = {73728, 18432, 18432};
const size_t align = VideoFrameLayout::kBufferAddressAlignment;
+ const uint64_t modifier = 1;
auto layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- buffer_sizes, align);
+ buffer_sizes, align, modifier);
ASSERT_TRUE(layout.has_value());
auto same_layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- buffer_sizes, align);
+ buffer_sizes, align, modifier);
ASSERT_TRUE(same_layout.has_value());
EXPECT_EQ(*layout, *same_layout);
std::vector<size_t> another_buffer_sizes = {73728};
auto different_layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- another_buffer_sizes, align);
+ another_buffer_sizes, align, modifier);
ASSERT_TRUE(different_layout.has_value());
EXPECT_NE(*layout, *different_layout);
@@ -312,6 +314,13 @@ TEST(VideoFrameLayout, EqualOperator) {
buffer_sizes, another_align);
ASSERT_TRUE(different_layout.has_value());
EXPECT_NE(*layout, *different_layout);
+
+ const size_t another_modifier = 2;
+ different_layout = VideoFrameLayout::CreateWithPlanes(
+ PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
+ buffer_sizes, align, another_modifier);
+ ASSERT_TRUE(different_layout.has_value());
+ EXPECT_NE(*layout, *different_layout);
}
} // namespace media
diff --git a/chromium/media/base/video_frame_metadata.h b/chromium/media/base/video_frame_metadata.h
index 206d3e981c0..86207131eb8 100644
--- a/chromium/media/base/video_frame_metadata.h
+++ b/chromium/media/base/video_frame_metadata.h
@@ -15,7 +15,7 @@
#include "base/values.h"
#include "build/build_config.h"
#include "media/base/media_export.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
namespace gfx {
class Rect;
@@ -164,7 +164,7 @@ class MEDIA_EXPORT VideoFrameMetadata {
// If present, this field represents the local time at which the VideoFrame
// was decoded from whichever format it was encoded in.
// Use Get/SetTimeTicks() for this key.
- DECODE_COMPLETE_TIMESTAMP,
+ DECODE_TIME,
NUM_KEYS
};
diff --git a/chromium/media/base/video_frame_pool.cc b/chromium/media/base/video_frame_pool.cc
index b70a1f3a962..16156e9ff6c 100644
--- a/chromium/media/base/video_frame_pool.cc
+++ b/chromium/media/base/video_frame_pool.cc
@@ -111,7 +111,7 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
}
scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
- frame, frame->format(), frame->visible_rect(), frame->natural_size());
+ *frame, frame->format(), frame->visible_rect(), frame->natural_size());
wrapped_frame->AddDestructionObserver(base::Bind(
&VideoFramePool::PoolImpl::FrameReleased, this, std::move(frame)));
return wrapped_frame;
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 5ac862c33eb..88b83a7b4b5 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -259,9 +259,8 @@ TEST(VideoFrame, CreateBlackFrame) {
}
}
-static void FrameNoLongerNeededCallback(
- const scoped_refptr<media::VideoFrame>& frame,
- bool* triggered) {
+static void FrameNoLongerNeededCallback(scoped_refptr<media::VideoFrame> frame,
+ bool* triggered) {
*triggered = true;
}
@@ -282,7 +281,7 @@ TEST(VideoFrame, WrapVideoFrame) {
wrapped_frame->metadata()->SetTimeDelta(
media::VideoFrameMetadata::FRAME_DURATION, kFrameDuration);
frame = media::VideoFrame::WrapVideoFrame(
- wrapped_frame, wrapped_frame->format(), visible_rect, natural_size);
+ *wrapped_frame, wrapped_frame->format(), visible_rect, natural_size);
frame->AddDestructionObserver(base::Bind(
&FrameNoLongerNeededCallback, wrapped_frame, &done_callback_was_run));
EXPECT_EQ(wrapped_frame->coded_size(), frame->coded_size());
diff --git a/chromium/media/base/video_renderer_sink.h b/chromium/media/base/video_renderer_sink.h
index 5670f0424c1..2598d0c3b0a 100644
--- a/chromium/media/base/video_renderer_sink.h
+++ b/chromium/media/base/video_renderer_sink.h
@@ -38,6 +38,10 @@ class MEDIA_EXPORT VideoRendererSink {
// not actually rendered. Must be called before the next Render() call.
virtual void OnFrameDropped() = 0;
+ // Returns the interval at which the sink expects to have new frames for the
+ // client.
+ virtual base::TimeDelta GetPreferredRenderInterval() = 0;
+
virtual ~RenderCallback() {}
};
@@ -58,7 +62,7 @@ class MEDIA_EXPORT VideoRendererSink {
// useful for painting poster images or hole frames without having to issue a
// Start() -> Render() -> Stop(). Clients are free to mix usage of Render()
// based painting and PaintSingleFrame().
- virtual void PaintSingleFrame(const scoped_refptr<VideoFrame>& frame,
+ virtual void PaintSingleFrame(scoped_refptr<VideoFrame> frame,
bool repaint_duplicate_frame = false) = 0;
virtual ~VideoRendererSink() {}
diff --git a/chromium/media/base/video_rotation.cc b/chromium/media/base/video_rotation.cc
deleted file mode 100644
index accfd553958..00000000000
--- a/chromium/media/base/video_rotation.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/base/video_rotation.h"
-
-#include "base/logging.h"
-
-namespace media {
-
-std::string VideoRotationToString(VideoRotation rotation) {
- switch (rotation) {
- case VIDEO_ROTATION_0:
- return "0°";
- case VIDEO_ROTATION_90:
- return "90°";
- case VIDEO_ROTATION_180:
- return "180°";
- case VIDEO_ROTATION_270:
- return "270°";
- }
- NOTREACHED();
- return "";
-}
-
-} // namespace media
diff --git a/chromium/media/base/video_rotation.h b/chromium/media/base/video_rotation.h
deleted file mode 100644
index 05690aef559..00000000000
--- a/chromium/media/base/video_rotation.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_VIDEO_ROTATION_H_
-#define MEDIA_BASE_VIDEO_ROTATION_H_
-
-#include <string>
-
-namespace media {
-
-// Enumeration to represent 90 degree video rotation for MP4 videos
-// where it can be rotated by 90 degree intervals.
-enum VideoRotation : int {
- VIDEO_ROTATION_0 = 0,
- VIDEO_ROTATION_90,
- VIDEO_ROTATION_180,
- VIDEO_ROTATION_270,
- VIDEO_ROTATION_MAX = VIDEO_ROTATION_270
-};
-
-std::string VideoRotationToString(VideoRotation rotation);
-
-} // namespace media
-
-#endif // MEDIA_BASE_VIDEO_ROTATION_H_
diff --git a/chromium/media/base/video_thumbnail_decoder.cc b/chromium/media/base/video_thumbnail_decoder.cc
index 1c18ac253de..db709b27774 100644
--- a/chromium/media/base/video_thumbnail_decoder.cc
+++ b/chromium/media/base/video_thumbnail_decoder.cc
@@ -70,7 +70,7 @@ void VideoThumbnailDecoder::OnEosBufferDecoded(DecodeStatus status) {
}
void VideoThumbnailDecoder::OnVideoFrameDecoded(
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
NotifyComplete(std::move(frame));
}
diff --git a/chromium/media/base/video_thumbnail_decoder.h b/chromium/media/base/video_thumbnail_decoder.h
index 8f9518fd683..f4553755d4f 100644
--- a/chromium/media/base/video_thumbnail_decoder.h
+++ b/chromium/media/base/video_thumbnail_decoder.h
@@ -43,7 +43,7 @@ class MEDIA_EXPORT VideoThumbnailDecoder {
void OnEosBufferDecoded(DecodeStatus status);
// Called when the output frame is generated.
- void OnVideoFrameDecoded(const scoped_refptr<VideoFrame>& frame);
+ void OnVideoFrameDecoded(scoped_refptr<VideoFrame> frame);
void NotifyComplete(scoped_refptr<VideoFrame> frame);
diff --git a/chromium/media/base/video_thumbnail_decoder_unittest.cc b/chromium/media/base/video_thumbnail_decoder_unittest.cc
index 1f6429118fc..75a1b9becb5 100644
--- a/chromium/media/base/video_thumbnail_decoder_unittest.cc
+++ b/chromium/media/base/video_thumbnail_decoder_unittest.cc
@@ -35,7 +35,7 @@ class VideoThumbnailDecoderTest : public testing::Test {
mock_video_decoder_ = mock_video_decoder.get();
VideoDecoderConfig valid_config(
kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420, VideoColorSpace(),
- VIDEO_ROTATION_0, gfx::Size(1, 1), gfx::Rect(1, 1), gfx::Size(1, 1),
+ kNoTransformation, gfx::Size(1, 1), gfx::Rect(1, 1), gfx::Size(1, 1),
EmptyExtraData(), Unencrypted());
thumbnail_decoder_ = std::make_unique<VideoThumbnailDecoder>(
@@ -60,7 +60,7 @@ class VideoThumbnailDecoderTest : public testing::Test {
return thumbnail_decoder_.get();
}
MockVideoDecoder* mock_video_decoder() { return mock_video_decoder_; }
- const scoped_refptr<VideoFrame>& frame() { return frame_; }
+ scoped_refptr<VideoFrame> frame() { return frame_; }
private:
void OnFrameDecoded(scoped_refptr<VideoFrame> frame) {
@@ -82,11 +82,12 @@ class VideoThumbnailDecoderTest : public testing::Test {
// the video frame.
TEST_F(VideoThumbnailDecoderTest, Success) {
auto expected_frame = CreateFrame();
- EXPECT_CALL(*mock_video_decoder(), Initialize(_, _, _, _, _, _))
- .WillOnce(DoAll(RunCallback<3>(true), RunCallback<4>(expected_frame)));
- EXPECT_CALL(*mock_video_decoder(), Decode(_, _))
+ EXPECT_CALL(*mock_video_decoder(), Initialize_(_, _, _, _, _, _))
+ .WillOnce(
+ DoAll(RunOnceCallback<3>(true), RunCallback<4>(expected_frame)));
+ EXPECT_CALL(*mock_video_decoder(), Decode_(_, _))
.Times(2)
- .WillRepeatedly(RunCallback<1>(DecodeStatus::OK));
+ .WillRepeatedly(RunOnceCallback<1>(DecodeStatus::OK));
Start();
EXPECT_TRUE(frame());
@@ -95,8 +96,8 @@ TEST_F(VideoThumbnailDecoderTest, Success) {
// No output video frame when decoder failed to initialize.
TEST_F(VideoThumbnailDecoderTest, InitializationFailed) {
auto expected_frame = CreateFrame();
- EXPECT_CALL(*mock_video_decoder(), Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(false));
+ EXPECT_CALL(*mock_video_decoder(), Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(false));
Start();
EXPECT_FALSE(frame());
@@ -105,10 +106,10 @@ TEST_F(VideoThumbnailDecoderTest, InitializationFailed) {
// No output video frame when decoder failed to decode.
TEST_F(VideoThumbnailDecoderTest, DecodingFailed) {
auto expected_frame = CreateFrame();
- EXPECT_CALL(*mock_video_decoder(), Initialize(_, _, _, _, _, _))
- .WillOnce(RunCallback<3>(true));
- EXPECT_CALL(*mock_video_decoder(), Decode(_, _))
- .WillOnce(RunCallback<1>(DecodeStatus::DECODE_ERROR));
+ EXPECT_CALL(*mock_video_decoder(), Initialize_(_, _, _, _, _, _))
+ .WillOnce(RunOnceCallback<3>(true));
+ EXPECT_CALL(*mock_video_decoder(), Decode_(_, _))
+ .WillOnce(RunOnceCallback<1>(DecodeStatus::DECODE_ERROR));
Start();
EXPECT_FALSE(frame());
diff --git a/chromium/media/base/video_transformation.cc b/chromium/media/base/video_transformation.cc
new file mode 100644
index 00000000000..4ba3658ffae
--- /dev/null
+++ b/chromium/media/base/video_transformation.cc
@@ -0,0 +1,95 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_transformation.h"
+
+#include <math.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+
+namespace media {
+namespace {
+
+double FixedToFloatingPoint(int32_t i) {
+ return static_cast<double>(i >> 16);
+}
+
+} // namespace
+
+std::string VideoRotationToString(VideoRotation rotation) {
+ switch (rotation) {
+ case VIDEO_ROTATION_0:
+ return "0°";
+ case VIDEO_ROTATION_90:
+ return "90°";
+ case VIDEO_ROTATION_180:
+ return "180°";
+ case VIDEO_ROTATION_270:
+ return "270°";
+ }
+ NOTREACHED();
+ return "";
+}
+
+bool operator==(const struct VideoTransformation& first,
+ const struct VideoTransformation& second) {
+ return first.rotation == second.rotation && first.mirrored == second.mirrored;
+}
+
+VideoTransformation::VideoTransformation(int32_t matrix[4]) {
+ // Rotation by angle Θ is represented in the matrix as:
+ // [ cos(Θ), -sin(Θ)]
+ // [ sin(Θ), cos(Θ)]
+ // A vertical flip is represented by the cosine's having opposite signs
+ // and a horizontal flip is represented by the sine's having the same sign.
+
+ // Check the matrix for validity
+ if (abs(matrix[0]) != abs(matrix[3]) || abs(matrix[1]) != abs(matrix[2])) {
+ rotation = VIDEO_ROTATION_0;
+ mirrored = false;
+ return;
+ }
+
+ double angle = acos(FixedToFloatingPoint(matrix[0])) * 180 / base::kPiDouble;
+
+ // Calculate angle offsets for rotation - rotating about the X axis
+ // can be expressed as a 180 degree rotation and a Y axis rotation
+ mirrored = false;
+ if (matrix[0] != matrix[3] && matrix[0] != 0) {
+ mirrored = !mirrored;
+ angle += 180;
+ }
+
+ if (matrix[1] == matrix[3] && matrix[1] != 0) {
+ mirrored = !mirrored;
+ }
+
+ // Normalize the angle
+ while (angle < 0)
+ angle += 360;
+
+ while (angle >= 360)
+ angle -= 360;
+
+ // 16 bits of fixed point decimal is enough to give 6 decimals of precision
+ // to cos(Θ). A delta of ±0.000001 causes acos(cos(Θ)) to differ by a minimum
+ // of 0.0002, which is why we only need to check that the angle is only
+ // accurate to within four decimal places. This is preferred to checking for
+ // a more precise accuracy, as the 'double' type is architecture dependent and
+ // there may be variance in floating point errors.
+ if (abs(angle - 0) < 1e-4) {
+ rotation = VIDEO_ROTATION_0;
+ } else if (abs(angle - 180) < 1e-4) {
+ rotation = VIDEO_ROTATION_180;
+ } else if (abs(angle - 90) < 1e-4) {
+ bool quadrant = asin(FixedToFloatingPoint(matrix[2])) < 0;
+ rotation = quadrant ? VIDEO_ROTATION_90 : VIDEO_ROTATION_270;
+ } else {
+ rotation = VIDEO_ROTATION_0;
+ mirrored = false;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_transformation.h b/chromium/media/base/video_transformation.h
new file mode 100644
index 00000000000..eff89cd2aec
--- /dev/null
+++ b/chromium/media/base/video_transformation.h
@@ -0,0 +1,61 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_TRANSFORMATION_H_
+#define MEDIA_BASE_VIDEO_TRANSFORMATION_H_
+
+#include <string>
+
+#include "base/numerics/math_constants.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Enumeration to represent 90 degree video rotation for MP4 videos
+// where it can be rotated by 90 degree intervals.
+enum VideoRotation : int {
+ VIDEO_ROTATION_0 = 0,
+ VIDEO_ROTATION_90,
+ VIDEO_ROTATION_180,
+ VIDEO_ROTATION_270,
+ VIDEO_ROTATION_MAX = VIDEO_ROTATION_270
+};
+
+// Stores frame rotation & mirroring values. These are usually calculated from
+// a rotation matrix from a demuxer, and we only support 90 degree rotation
+// increments.
+struct MEDIA_EXPORT VideoTransformation {
+ constexpr VideoTransformation(VideoRotation rotation, bool mirrored)
+ : rotation(rotation), mirrored(mirrored) {}
+ constexpr VideoTransformation(VideoRotation r)
+ : VideoTransformation(r, false) {}
+ constexpr VideoTransformation()
+ : VideoTransformation(VIDEO_ROTATION_0, false) {}
+
+ // Rotation by angle Θ is represented in the matrix as:
+ // [ cos(Θ), -sin(Θ)]
+ // [ sin(Θ), cos(Θ)]
+ // A vertical flip is represented by the cosine's having opposite signs
+ // and a horizontal flip is represented by the sine's having the same sign.
+ VideoTransformation(int32_t matrix[4]);
+
+ // The video rotation value, in 90 degree steps.
+ VideoRotation rotation;
+
+ // Whether the video should be flipped about its Y axis.
+ // This transformation takes place _after_ rotation, since they are not
+ // commutative.
+ bool mirrored;
+};
+
+MEDIA_EXPORT bool operator==(const struct VideoTransformation& first,
+ const struct VideoTransformation& second);
+
+constexpr VideoTransformation kNoTransformation = VideoTransformation();
+
+std::string VideoRotationToString(VideoRotation rotation);
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_TRANSFORMATION_H_
diff --git a/chromium/media/base/video_types.h b/chromium/media/base/video_types.h
index 8803f605f51..af74e689b05 100644
--- a/chromium/media/base/video_types.h
+++ b/chromium/media/base/video_types.h
@@ -18,6 +18,9 @@ namespace media {
// http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
// Logged to UMA, so never reuse values. Leave gaps if necessary.
// Ordered as planar, semi-planar, YUV-packed, and RGB formats.
+// When a VideoFrame is backed by native textures, VideoPixelFormat describes
+// how those textures should be sampled and combined to produce the final
+// pixels.
enum VideoPixelFormat {
PIXEL_FORMAT_UNKNOWN = 0, // Unknown or unspecified format value.
PIXEL_FORMAT_I420 =
@@ -38,10 +41,12 @@ enum VideoPixelFormat {
8, // 16bpp interleaved 2x1 U, 1x1 Y, 2x1 V, 1x1 Y samples.
PIXEL_FORMAT_YUY2 =
9, // 16bpp interleaved 1x1 Y, 2x1 U, 1x1 Y, 2x1 V samples.
- PIXEL_FORMAT_ARGB = 10, // 32bpp ARGB, 1 plane.
- PIXEL_FORMAT_XRGB = 11, // 24bpp XRGB, 1 plane.
- PIXEL_FORMAT_RGB24 = 12, // 24bpp BGR, 1 plane.
- PIXEL_FORMAT_RGB32 = 13, // 32bpp BGRA, 1 plane.
+ PIXEL_FORMAT_ARGB = 10, // 32bpp BGRA (byte-order), 1 plane.
+ PIXEL_FORMAT_XRGB = 11, // 24bpp BGRX (byte-order), 1 plane.
+ PIXEL_FORMAT_RGB24 = 12, // 24bpp BGR (byte-order), 1 plane.
+ // TODO(crbug.com/953128): Deprecate PIXEL_FORMAT_RGB32 in favor of
+ // PIXEL_FORMAT_ARGB.
+ PIXEL_FORMAT_RGB32 = 13, // 32bpp BGRA (byte-order), 1 plane.
PIXEL_FORMAT_MJPEG = 14, // MJPEG compressed.
// MediaTek proprietary format. MT21 is similar to NV21 except the memory
// layout and pixel layout (swizzles). 12bpp with Y plane followed by a 2x2
@@ -69,8 +74,8 @@ enum VideoPixelFormat {
/* PIXEL_FORMAT_Y8 = 25, Deprecated */
PIXEL_FORMAT_Y16 = 26, // single 16bpp plane.
- PIXEL_FORMAT_ABGR = 27, // 32bpp RGBA, 1 plane.
- PIXEL_FORMAT_XBGR = 28, // 24bpp RGB, 1 plane.
+ PIXEL_FORMAT_ABGR = 27, // 32bpp RGBA (byte-order), 1 plane.
+ PIXEL_FORMAT_XBGR = 28, // 24bpp RGBX (byte-order), 1 plane.
PIXEL_FORMAT_P016LE = 29, // 24bpp NV12, 16 bits per channel
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
index cb29dcb1128..8f4e0c22074 100644
--- a/chromium/media/base/video_util.cc
+++ b/chromium/media/base/video_util.cc
@@ -18,7 +18,7 @@ namespace media {
namespace {
// Empty method used for keeping a reference to the original media::VideoFrame.
-void ReleaseOriginalFrame(const scoped_refptr<media::VideoFrame>& frame) {}
+void ReleaseOriginalFrame(scoped_refptr<media::VideoFrame> frame) {}
// Helper to apply padding to the region outside visible rect up to the coded
// size with the repeated last column / row of the visible rect.
@@ -423,18 +423,18 @@ void CopyRGBToVideoFrame(const uint8_t* source,
}
scoped_refptr<VideoFrame> WrapAsI420VideoFrame(
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
DCHECK_EQ(VideoFrame::STORAGE_OWNED_MEMORY, frame->storage_type());
DCHECK_EQ(PIXEL_FORMAT_I420A, frame->format());
scoped_refptr<media::VideoFrame> wrapped_frame =
- media::VideoFrame::WrapVideoFrame(frame, PIXEL_FORMAT_I420,
+ media::VideoFrame::WrapVideoFrame(*frame, PIXEL_FORMAT_I420,
frame->visible_rect(),
frame->natural_size());
if (!wrapped_frame)
return nullptr;
wrapped_frame->AddDestructionObserver(
- base::Bind(&ReleaseOriginalFrame, frame));
+ base::BindOnce(&ReleaseOriginalFrame, std::move(frame)));
return wrapped_frame;
}
diff --git a/chromium/media/base/video_util.h b/chromium/media/base/video_util.h
index 2d5d47b2ac5..2b7e4f227da 100644
--- a/chromium/media/base/video_util.h
+++ b/chromium/media/base/video_util.h
@@ -133,7 +133,7 @@ MEDIA_EXPORT void CopyRGBToVideoFrame(const uint8_t* source,
// Converts a frame with YV12A format into I420 by dropping alpha channel.
MEDIA_EXPORT scoped_refptr<VideoFrame> WrapAsI420VideoFrame(
- const scoped_refptr<VideoFrame>& frame);
+ scoped_refptr<VideoFrame> frame);
// Copy I420 video frame to match the required coded size and pad the region
// outside the visible rect repeatly with the last column / row up to the coded
diff --git a/chromium/media/blink/BUILD.gn b/chromium/media/blink/BUILD.gn
index 1220be98d87..acd0edfa9f2 100644
--- a/chromium/media/blink/BUILD.gn
+++ b/chromium/media/blink/BUILD.gn
@@ -61,8 +61,6 @@ component("blink") {
"webencryptedmediaclient_impl.h",
"webinbandtexttrack_impl.cc",
"webinbandtexttrack_impl.h",
- "webmediacapabilitiesclient_impl.cc",
- "webmediacapabilitiesclient_impl.h",
"webmediaplayer_delegate.h",
"webmediaplayer_params.cc",
"webmediaplayer_params.h",
@@ -79,6 +77,7 @@ component("blink") {
deps = [
"//base",
"//cc",
+ "//components/viz/common",
"//gpu",
"//media",
"//media:shared_memory_support",
@@ -155,7 +154,6 @@ test("media_blink_unittests") {
"watch_time_component_unittest.cc",
"watch_time_reporter_unittest.cc",
"webaudiosourceprovider_impl_unittest.cc",
- "webmediacapabilitiesclient_impl_unittest.cc",
"webmediaplayer_impl_unittest.cc",
"webmediaplayer_util_unittest.cc",
]
diff --git a/chromium/media/blink/DEPS b/chromium/media/blink/DEPS
index 39cc187dd9c..92a8bc084b0 100644
--- a/chromium/media/blink/DEPS
+++ b/chromium/media/blink/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+cc/layers/video_frame_provider.h",
"+cc/layers/video_layer.h",
"+components/scheduler", # Only allowed in tests.
+ "+components/viz/common/frame_sinks/begin_frame_args.h",
"+components/viz/common/gpu/context_provider.h",
"+components/viz/common/surfaces/frame_sink_id.h",
"+gin",
diff --git a/chromium/media/blink/key_system_config_selector_unittest.cc b/chromium/media/blink/key_system_config_selector_unittest.cc
index c2bd18abba2..2867bf2a758 100644
--- a/chromium/media/blink/key_system_config_selector_unittest.cc
+++ b/chromium/media/blink/key_system_config_selector_unittest.cc
@@ -335,15 +335,14 @@ class FakeMediaPermission : public MediaPermission {
public:
// MediaPermission implementation.
void HasPermission(Type type,
- const PermissionStatusCB& permission_status_cb) override {
- permission_status_cb.Run(is_granted);
+ PermissionStatusCB permission_status_cb) override {
+ std::move(permission_status_cb).Run(is_granted);
}
- void RequestPermission(
- Type type,
- const PermissionStatusCB& permission_status_cb) override {
+ void RequestPermission(Type type,
+ PermissionStatusCB permission_status_cb) override {
requests++;
- permission_status_cb.Run(is_granted);
+ std::move(permission_status_cb).Run(is_granted);
}
bool IsEncryptedMediaEnabled() override { return is_encrypted_media_enabled; }
diff --git a/chromium/media/blink/multibuffer_data_source.cc b/chromium/media/blink/multibuffer_data_source.cc
index f5e776f90dc..f489e211c51 100644
--- a/chromium/media/blink/multibuffer_data_source.cc
+++ b/chromium/media/blink/multibuffer_data_source.cc
@@ -299,8 +299,7 @@ UrlData::CorsMode MultibufferDataSource::cors_mode() const {
void MultibufferDataSource::MediaPlaybackRateChanged(double playback_rate) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
-
- if (playback_rate < 0.0)
+ if (playback_rate < 0 || playback_rate == playback_rate_)
return;
playback_rate_ = playback_rate;
@@ -310,6 +309,9 @@ void MultibufferDataSource::MediaPlaybackRateChanged(double playback_rate) {
void MultibufferDataSource::MediaIsPlaying() {
DCHECK(render_task_runner_->BelongsToCurrentThread());
+ if (media_has_played_)
+ return;
+
media_has_played_ = true;
cancel_on_defer_ = false;
// Once we start playing, we need preloading.
diff --git a/chromium/media/blink/multibuffer_data_source.h b/chromium/media/blink/multibuffer_data_source.h
index 9e3a38c0e47..3befff25db3 100644
--- a/chromium/media/blink/multibuffer_data_source.h
+++ b/chromium/media/blink/multibuffer_data_source.h
@@ -125,6 +125,8 @@ class MEDIA_BLINK_EXPORT MultibufferDataSource : public DataSource {
is_client_audio_element_ = is_client_audio_element;
}
+ bool cancel_on_defer_for_testing() const { return cancel_on_defer_; }
+
protected:
void OnRedirect(const scoped_refptr<UrlData>& destination);
diff --git a/chromium/media/blink/video_decode_stats_reporter_unittest.cc b/chromium/media/blink/video_decode_stats_reporter_unittest.cc
index e9d11ca2fa8..bc41f4cb515 100644
--- a/chromium/media/blink/video_decode_stats_reporter_unittest.cc
+++ b/chromium/media/blink/video_decode_stats_reporter_unittest.cc
@@ -48,7 +48,7 @@ VideoDecoderConfig MakeVideoConfig(VideoCodec codec,
gfx::Size coded_size = natural_size;
gfx::Rect visible_rect(coded_size.width(), coded_size.height());
return VideoDecoderConfig(codec, profile, PIXEL_FORMAT_I420,
- VideoColorSpace::JPEG(), VIDEO_ROTATION_0,
+ VideoColorSpace::JPEG(), kNoTransformation,
coded_size, visible_rect, natural_size,
EmptyExtraData(), Unencrypted());
}
diff --git a/chromium/media/blink/video_frame_compositor.cc b/chromium/media/blink/video_frame_compositor.cc
index a3fe778331a..ede81f7aecb 100644
--- a/chromium/media/blink/video_frame_compositor.cc
+++ b/chromium/media/blink/video_frame_compositor.cc
@@ -8,6 +8,7 @@
#include "base/callback_helpers.h"
#include "base/time/default_tick_clock.h"
#include "base/trace_event/trace_event.h"
+#include "components/viz/common/frame_sinks/begin_frame_args.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
#include "media/base/video_frame.h"
@@ -153,11 +154,10 @@ scoped_refptr<VideoFrame> VideoFrameCompositor::GetCurrentFrameOnAnyThread() {
return current_frame_;
}
-void VideoFrameCompositor::SetCurrentFrame(
- const scoped_refptr<VideoFrame>& frame) {
+void VideoFrameCompositor::SetCurrentFrame(scoped_refptr<VideoFrame> frame) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock lock(current_frame_lock_);
- current_frame_ = frame;
+ current_frame_ = std::move(frame);
}
void VideoFrameCompositor::PutCurrentFrame() {
@@ -176,6 +176,15 @@ bool VideoFrameCompositor::HasCurrentFrame() {
return static_cast<bool>(GetCurrentFrame());
}
+base::TimeDelta VideoFrameCompositor::GetPreferredRenderInterval() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ base::AutoLock lock(callback_lock_);
+
+ if (!callback_)
+ return viz::BeginFrameArgs::MinInterval();
+ return callback_->GetPreferredRenderInterval();
+}
+
void VideoFrameCompositor::Start(RenderCallback* callback) {
// Called from the media thread, so acquire the callback under lock before
// returning in case a Stop() call comes in before the PostTask is processed.
@@ -184,7 +193,7 @@ void VideoFrameCompositor::Start(RenderCallback* callback) {
callback_ = callback;
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoFrameCompositor::OnRendererStateUpdate,
- base::Unretained(this), true));
+ weak_ptr_factory_.GetWeakPtr(), true));
}
void VideoFrameCompositor::Stop() {
@@ -196,20 +205,19 @@ void VideoFrameCompositor::Stop() {
callback_ = nullptr;
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoFrameCompositor::OnRendererStateUpdate,
- base::Unretained(this), false));
+ weak_ptr_factory_.GetWeakPtr(), false));
}
-void VideoFrameCompositor::PaintSingleFrame(
- const scoped_refptr<VideoFrame>& frame,
- bool repaint_duplicate_frame) {
+void VideoFrameCompositor::PaintSingleFrame(scoped_refptr<VideoFrame> frame,
+ bool repaint_duplicate_frame) {
if (!task_runner_->BelongsToCurrentThread()) {
task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&VideoFrameCompositor::PaintSingleFrame,
- base::Unretained(this), frame, repaint_duplicate_frame));
+ FROM_HERE, base::BindOnce(&VideoFrameCompositor::PaintSingleFrame,
+ weak_ptr_factory_.GetWeakPtr(),
+ std::move(frame), repaint_duplicate_frame));
return;
}
- if (ProcessNewFrame(frame, repaint_duplicate_frame) &&
+ if (ProcessNewFrame(std::move(frame), repaint_duplicate_frame) &&
IsClientSinkAvailable()) {
client_->DidReceiveFrame();
}
@@ -251,9 +259,8 @@ void VideoFrameCompositor::SetOnNewProcessedFrameCallback(
new_processed_frame_cb_ = std::move(cb);
}
-bool VideoFrameCompositor::ProcessNewFrame(
- const scoped_refptr<VideoFrame>& frame,
- bool repaint_duplicate_frame) {
+bool VideoFrameCompositor::ProcessNewFrame(scoped_refptr<VideoFrame> frame,
+ bool repaint_duplicate_frame) {
DCHECK(task_runner_->BelongsToCurrentThread());
if (frame && GetCurrentFrame() && !repaint_duplicate_frame &&
@@ -265,7 +272,7 @@ bool VideoFrameCompositor::ProcessNewFrame(
// subsequent PutCurrentFrame() call it will mark it as rendered.
rendered_last_frame_ = false;
- SetCurrentFrame(frame);
+ SetCurrentFrame(std::move(frame));
if (new_processed_frame_cb_)
std::move(new_processed_frame_cb_).Run(base::TimeTicks::Now());
diff --git a/chromium/media/blink/video_frame_compositor.h b/chromium/media/blink/video_frame_compositor.h
index 7626381433f..da94514fe61 100644
--- a/chromium/media/blink/video_frame_compositor.h
+++ b/chromium/media/blink/video_frame_compositor.h
@@ -93,6 +93,7 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
bool HasCurrentFrame() override;
scoped_refptr<VideoFrame> GetCurrentFrame() override;
void PutCurrentFrame() override;
+ base::TimeDelta GetPreferredRenderInterval() override;
// Returns |current_frame_|, without offering a guarantee as to how recently
// it was updated. In certain applications, one might need to periodically
@@ -104,7 +105,7 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
// same thread (typically the media thread).
void Start(RenderCallback* callback) override;
void Stop() override;
- void PaintSingleFrame(const scoped_refptr<VideoFrame>& frame,
+ void PaintSingleFrame(scoped_refptr<VideoFrame> frame,
bool repaint_duplicate_frame = false) override;
// If |client_| is not set, |callback_| is set, and |is_background_rendering_|
@@ -170,10 +171,10 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
void OnRendererStateUpdate(bool new_state);
// Handles setting of |current_frame_|.
- bool ProcessNewFrame(const scoped_refptr<VideoFrame>& frame,
+ bool ProcessNewFrame(scoped_refptr<VideoFrame> frame,
bool repaint_duplicate_frame);
- void SetCurrentFrame(const scoped_refptr<VideoFrame>& frame);
+ void SetCurrentFrame(scoped_refptr<VideoFrame> frame);
// Called by |background_rendering_timer_| when enough time elapses where we
// haven't seen a Render() call.
diff --git a/chromium/media/blink/video_frame_compositor_unittest.cc b/chromium/media/blink/video_frame_compositor_unittest.cc
index ebbf759e4be..7eb763100e8 100644
--- a/chromium/media/blink/video_frame_compositor_unittest.cc
+++ b/chromium/media/blink/video_frame_compositor_unittest.cc
@@ -8,6 +8,7 @@
#include "base/run_loop.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/simple_test_tick_clock.h"
+#include "components/viz/common/frame_sinks/begin_frame_args.h"
#include "components/viz/common/surfaces/frame_sink_id.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/video_frame.h"
@@ -107,6 +108,10 @@ class VideoFrameCompositorTest : public VideoRendererSink::RenderCallback,
bool));
MOCK_METHOD0(OnFrameDropped, void());
+ base::TimeDelta GetPreferredRenderInterval() override {
+ return preferred_render_interval_;
+ }
+
void StartVideoRendererSink() {
EXPECT_CALL(*submitter_, StartRendering());
const bool had_current_frame = !!compositor_->GetCurrentFrame();
@@ -131,6 +136,7 @@ class VideoFrameCompositorTest : public VideoRendererSink::RenderCallback,
compositor()->PutCurrentFrame();
}
+ base::TimeDelta preferred_render_interval_;
base::SimpleTestTickClock tick_clock_;
StrictMock<MockWebVideoFrameSubmitter>* submitter_;
std::unique_ptr<StrictMock<MockWebVideoFrameSubmitter>> client_;
@@ -341,6 +347,16 @@ TEST_P(VideoFrameCompositorTest, UpdateCurrentFrameIfStale) {
StopVideoRendererSink(false);
}
+TEST_P(VideoFrameCompositorTest, PreferredRenderInterval) {
+ preferred_render_interval_ = base::TimeDelta::FromSeconds(1);
+ compositor_->Start(this);
+ EXPECT_EQ(compositor_->GetPreferredRenderInterval(),
+ preferred_render_interval_);
+ compositor_->Stop();
+ EXPECT_EQ(compositor_->GetPreferredRenderInterval(),
+ viz::BeginFrameArgs::MinInterval());
+}
+
INSTANTIATE_TEST_SUITE_P(SubmitterEnabled,
VideoFrameCompositorTest,
::testing::Bool());
diff --git a/chromium/media/blink/watch_time_reporter.cc b/chromium/media/blink/watch_time_reporter.cc
index 52fe5c4aac5..317826da949 100644
--- a/chromium/media/blink/watch_time_reporter.cc
+++ b/chromium/media/blink/watch_time_reporter.cc
@@ -41,7 +41,7 @@ PropertyAction HandlePropertyChange(T new_value,
WatchTimeReporter::WatchTimeReporter(
mojom::PlaybackPropertiesPtr properties,
- const gfx::Size& initial_natural_size,
+ const gfx::Size& natural_size,
GetMediaTimeCB get_media_time_cb,
mojom::MediaMetricsProvider* provider,
scoped_refptr<base::SequencedTaskRunner> task_runner,
@@ -49,7 +49,7 @@ WatchTimeReporter::WatchTimeReporter(
: WatchTimeReporter(std::move(properties),
false /* is_background */,
false /* is_muted */,
- initial_natural_size,
+ natural_size,
std::move(get_media_time_cb),
provider,
task_runner,
@@ -59,7 +59,7 @@ WatchTimeReporter::WatchTimeReporter(
mojom::PlaybackPropertiesPtr properties,
bool is_background,
bool is_muted,
- const gfx::Size& initial_natural_size,
+ const gfx::Size& natural_size,
GetMediaTimeCB get_media_time_cb,
mojom::MediaMetricsProvider* provider,
scoped_refptr<base::SequencedTaskRunner> task_runner,
@@ -67,9 +67,9 @@ WatchTimeReporter::WatchTimeReporter(
: properties_(std::move(properties)),
is_background_(is_background),
is_muted_(is_muted),
- initial_natural_size_(initial_natural_size),
get_media_time_cb_(std::move(get_media_time_cb)),
- reporting_timer_(tick_clock) {
+ reporting_timer_(tick_clock),
+ natural_size_(natural_size) {
DCHECK(get_media_time_cb_);
DCHECK(properties_->has_audio || properties_->has_video);
DCHECK_EQ(is_background, properties_->is_background);
@@ -100,9 +100,8 @@ WatchTimeReporter::WatchTimeReporter(
display_type_component_ = CreateDisplayTypeComponent();
}
- // If this is a sub-reporter or we shouldn't report watch time, we're done. We
- // don't support muted+background reporting currently.
- if (is_background_ || is_muted_ || !ShouldReportWatchTime())
+ // If this is a sub-reporter we're done.
+ if (is_background_ || is_muted_)
return;
// Background watch time is reported by creating an background only watch time
@@ -112,8 +111,7 @@ WatchTimeReporter::WatchTimeReporter(
prop_copy->is_background = true;
background_reporter_.reset(new WatchTimeReporter(
std::move(prop_copy), true /* is_background */, false /* is_muted */,
- initial_natural_size_, get_media_time_cb_, provider, task_runner,
- tick_clock));
+ natural_size_, get_media_time_cb_, provider, task_runner, tick_clock));
// Muted watch time is only reported for audio+video playback.
if (!properties_->has_video || !properties_->has_audio)
@@ -125,8 +123,7 @@ WatchTimeReporter::WatchTimeReporter(
prop_copy->is_muted = true;
muted_reporter_.reset(new WatchTimeReporter(
std::move(prop_copy), false /* is_background */, true /* is_muted */,
- initial_natural_size_, get_media_time_cb_, provider, task_runner,
- tick_clock));
+ natural_size_, get_media_time_cb_, provider, task_runner, tick_clock));
}
WatchTimeReporter::~WatchTimeReporter() {
@@ -280,7 +277,19 @@ void WatchTimeReporter::UpdateSecondaryProperties(
secondary_properties.Clone());
}
if (muted_reporter_)
- muted_reporter_->UpdateSecondaryProperties(std::move(secondary_properties));
+ muted_reporter_->UpdateSecondaryProperties(secondary_properties.Clone());
+
+ // A change in resolution may affect ShouldReportingTimerRun().
+ bool original_should_run = ShouldReportingTimerRun();
+ natural_size_ = secondary_properties->natural_size;
+ bool should_run = ShouldReportingTimerRun();
+ if (original_should_run != should_run) {
+ if (should_run) {
+ MaybeStartReportingTimer(get_media_time_cb_.Run());
+ } else {
+ MaybeFinalizeWatchTime(FinalizeTime::ON_NEXT_UPDATE);
+ }
+ }
}
void WatchTimeReporter::SetAutoplayInitiated(bool autoplay_initiated) {
@@ -332,8 +341,8 @@ void WatchTimeReporter::OnDisplayTypeChanged(DisplayType display_type) {
bool WatchTimeReporter::ShouldReportWatchTime() const {
// Report listen time or watch time for videos of sufficient size.
return properties_->has_video
- ? (initial_natural_size_.height() >= kMinimumVideoSize.height() &&
- initial_natural_size_.width() >= kMinimumVideoSize.width())
+ ? (natural_size_.height() >= kMinimumVideoSize.height() &&
+ natural_size_.width() >= kMinimumVideoSize.width())
: properties_->has_audio;
}
@@ -341,14 +350,20 @@ bool WatchTimeReporter::ShouldReportingTimerRun() const {
// TODO(dalecurtis): We should only consider |volume_| when there is actually
// an audio track; requires updating lots of tests to fix.
return ShouldReportWatchTime() && is_playing_ && volume_ && is_visible_ &&
- !in_shutdown_ && !is_seeking_;
+ !in_shutdown_ && !is_seeking_ && has_valid_start_timestamp_;
}
void WatchTimeReporter::MaybeStartReportingTimer(
base::TimeDelta start_timestamp) {
- DCHECK_NE(start_timestamp, kInfiniteDuration);
DCHECK_GE(start_timestamp, base::TimeDelta());
+ // It's possible for |current_time| to be kInfiniteDuration here if the page
+ // seeks to kInfiniteDuration (2**64 - 1) when Duration() is infinite. There
+ // is no possible elapsed watch time when this occurs, so don't start the
+ // WatchTimeReporter at this time. If a later seek puts us earlier in the
+ // stream this method will be called again after OnSeeking().
+ has_valid_start_timestamp_ = start_timestamp != kInfiniteDuration;
+
// Don't start the timer if our state indicates we shouldn't; this check is
// important since the various event handlers do not have to care about the
// state of other events.
@@ -435,8 +450,6 @@ void WatchTimeReporter::RecordWatchTime() {
}
void WatchTimeReporter::UpdateWatchTime() {
- DCHECK(ShouldReportWatchTime());
-
// First record watch time.
RecordWatchTime();
diff --git a/chromium/media/blink/watch_time_reporter.h b/chromium/media/blink/watch_time_reporter.h
index 19cb9ff48c5..eb143afe6b2 100644
--- a/chromium/media/blink/watch_time_reporter.h
+++ b/chromium/media/blink/watch_time_reporter.h
@@ -80,7 +80,7 @@ class MEDIA_BLINK_EXPORT WatchTimeReporter : base::PowerObserver {
// TODO(dalecurtis): Should we only report when rate == 1.0? Should we scale
// the elapsed media time instead?
WatchTimeReporter(mojom::PlaybackPropertiesPtr properties,
- const gfx::Size& initial_natural_size,
+ const gfx::Size& natural_size,
GetMediaTimeCB get_media_time_cb,
mojom::MediaMetricsProvider* provider,
scoped_refptr<base::SequencedTaskRunner> task_runner,
@@ -136,6 +136,9 @@ class MEDIA_BLINK_EXPORT WatchTimeReporter : base::PowerObserver {
// Mutates various properties that may change over the lifetime of a playback
// but for which we don't want to interrupt reporting for. UMA watch time will
// not be interrupted by changes to these properties, while UKM will.
+ //
+ // Note: Both UMA and UMK watch time will be interrupted if the natural size
+ // transitions above/below kMinimumVideoSize.
void UpdateSecondaryProperties(
mojom::SecondaryPlaybackPropertiesPtr secondary_properties);
@@ -154,7 +157,7 @@ class MEDIA_BLINK_EXPORT WatchTimeReporter : base::PowerObserver {
WatchTimeReporter(mojom::PlaybackPropertiesPtr properties,
bool is_background,
bool is_muted,
- const gfx::Size& initial_natural_size,
+ const gfx::Size& natural_size,
GetMediaTimeCB get_media_time_cb,
mojom::MediaMetricsProvider* provider,
scoped_refptr<base::SequencedTaskRunner> task_runner,
@@ -195,7 +198,6 @@ class MEDIA_BLINK_EXPORT WatchTimeReporter : base::PowerObserver {
const mojom::PlaybackPropertiesPtr properties_;
const bool is_background_;
const bool is_muted_;
- const gfx::Size initial_natural_size_;
const GetMediaTimeCB get_media_time_cb_;
mojom::WatchTimeRecorderPtr recorder_;
@@ -211,8 +213,13 @@ class MEDIA_BLINK_EXPORT WatchTimeReporter : base::PowerObserver {
bool is_visible_ = true;
bool is_seeking_ = false;
bool in_shutdown_ = false;
+ bool has_valid_start_timestamp_ = false;
double volume_ = 1.0;
+ // Updated by UpdateSecondaryProperties(); controls timer state when
+ // transitioning above/below kMinimumVideoSize.
+ gfx::Size natural_size_;
+
int underflow_count_ = 0;
std::vector<base::TimeDelta> pending_underflow_events_;
diff --git a/chromium/media/blink/watch_time_reporter_unittest.cc b/chromium/media/blink/watch_time_reporter_unittest.cc
index d1b54e34c29..2c99e3035d2 100644
--- a/chromium/media/blink/watch_time_reporter_unittest.cc
+++ b/chromium/media/blink/watch_time_reporter_unittest.cc
@@ -22,6 +22,7 @@
namespace media {
+constexpr gfx::Size kSizeTooSmall = gfx::Size(101, 101);
constexpr gfx::Size kSizeJustRight = gfx::Size(201, 201);
using blink::WebMediaPlayer;
@@ -658,6 +659,14 @@ TEST_P(WatchTimeReporterTest, WatchTimeReporter) {
wtr_.reset();
}
+TEST_P(WatchTimeReporterTest, WatchTimeReporterInfiniteStartTime) {
+ EXPECT_CALL(*this, GetCurrentMediaTime())
+ .WillRepeatedly(testing::Return(kInfiniteDuration));
+ Initialize(false, false, kSizeJustRight);
+ wtr_->OnPlaying();
+ EXPECT_FALSE(IsMonitoring());
+}
+
TEST_P(WatchTimeReporterTest, WatchTimeReporterBasic) {
constexpr base::TimeDelta kWatchTimeEarly = base::TimeDelta::FromSeconds(5);
constexpr base::TimeDelta kWatchTimeLate = base::TimeDelta::FromSeconds(10);
@@ -808,6 +817,51 @@ TEST_P(WatchTimeReporterTest, WatchTimeReporterSecondaryProperties) {
testing::Mock::VerifyAndClearExpectations(this);
}
+TEST_P(WatchTimeReporterTest, SecondaryProperties_SizeIncreased) {
+ if (!has_video_)
+ return;
+
+ EXPECT_CALL(*this, GetCurrentMediaTime())
+ .WillRepeatedly(testing::Return(base::TimeDelta()));
+ Initialize(false, false, kSizeTooSmall);
+ wtr_->OnPlaying();
+ EXPECT_FALSE(IsMonitoring());
+
+ EXPECT_CALL(*this, OnUpdateSecondaryProperties(_))
+ .Times((has_audio_ && has_video_) ? 3 : 2);
+ wtr_->UpdateSecondaryProperties(mojom::SecondaryPlaybackProperties::New(
+ kUnknownAudioCodec, kUnknownVideoCodec, "", "",
+ EncryptionMode::kUnencrypted, EncryptionMode::kUnencrypted,
+ kSizeJustRight));
+ EXPECT_TRUE(IsMonitoring());
+
+ EXPECT_WATCH_TIME_FINALIZED();
+ wtr_.reset();
+}
+
+TEST_P(WatchTimeReporterTest, SecondaryProperties_SizeDecreased) {
+ if (!has_video_)
+ return;
+
+ EXPECT_CALL(*this, GetCurrentMediaTime())
+ .WillRepeatedly(testing::Return(base::TimeDelta()));
+ Initialize(false, false, kSizeJustRight);
+ wtr_->OnPlaying();
+ EXPECT_TRUE(IsMonitoring());
+
+ EXPECT_CALL(*this, OnUpdateSecondaryProperties(_))
+ .Times((has_audio_ && has_video_) ? 3 : 2);
+ wtr_->UpdateSecondaryProperties(mojom::SecondaryPlaybackProperties::New(
+ kUnknownAudioCodec, kUnknownVideoCodec, "", "",
+ EncryptionMode::kUnencrypted, EncryptionMode::kUnencrypted,
+ kSizeTooSmall));
+ EXPECT_WATCH_TIME_FINALIZED();
+ CycleReportingTimer();
+
+ EXPECT_FALSE(IsMonitoring());
+ wtr_.reset();
+}
+
TEST_P(WatchTimeReporterTest, WatchTimeReporterAutoplayInitiated) {
Initialize(true, true, kSizeJustRight);
diff --git a/chromium/media/blink/webaudiosourceprovider_impl.cc b/chromium/media/blink/webaudiosourceprovider_impl.cc
index bc6d4544109..ec6596a215b 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl.cc
+++ b/chromium/media/blink/webaudiosourceprovider_impl.cc
@@ -236,6 +236,12 @@ void WebAudioSourceProviderImpl::Pause() {
sink_->Pause();
}
+void WebAudioSourceProviderImpl::Flush() {
+ base::AutoLock auto_lock(sink_lock_);
+ if (!client_ && sink_)
+ sink_->Flush();
+}
+
bool WebAudioSourceProviderImpl::SetVolume(double volume) {
base::AutoLock auto_lock(sink_lock_);
volume_ = volume;
diff --git a/chromium/media/blink/webaudiosourceprovider_impl.h b/chromium/media/blink/webaudiosourceprovider_impl.h
index 15038b8d522..6a7f6b809f9 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl.h
+++ b/chromium/media/blink/webaudiosourceprovider_impl.h
@@ -65,6 +65,7 @@ class MEDIA_BLINK_EXPORT WebAudioSourceProviderImpl
void Stop() override;
void Play() override;
void Pause() override;
+ void Flush() override;
bool SetVolume(double volume) override;
OutputDeviceInfo GetOutputDeviceInfo() override;
void GetOutputDeviceInfoAsync(OutputDeviceInfoCB info_cb) override;
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
index 7cf0831cdf8..23507d4f091 100644
--- a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
@@ -92,8 +92,8 @@ void WebContentDecryptionModuleAccessImpl::CreateContentDecryptionModule(
cdm_config_, base::Passed(&result_copy)));
}
-const CdmConfig& WebContentDecryptionModuleAccessImpl::GetCdmConfig() const {
- return cdm_config_;
+bool WebContentDecryptionModuleAccessImpl::UseHardwareSecureCodecs() const {
+ return cdm_config_.use_hw_secure_codecs;
}
} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
index 7a02a4d48ad..4e71a3b2f39 100644
--- a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
@@ -47,8 +47,7 @@ class WebContentDecryptionModuleAccessImpl
void CreateContentDecryptionModule(
blink::WebContentDecryptionModuleResult result,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) override;
-
- const CdmConfig& GetCdmConfig() const;
+ bool UseHardwareSecureCodecs() const override;
private:
const blink::WebString key_system_;
diff --git a/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
index bc1bb19af21..5f081a98e2a 100644
--- a/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
+++ b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
@@ -152,8 +152,12 @@ bool SanitizeSessionId(const blink::WebString& session_id,
if (sanitized_session_id->length() > limits::kMaxSessionIdLength)
return false;
+ // Check that |sanitized_session_id| only contains non-space printable
+ // characters for easier logging. Note that checking alphanumeric is too
+ // strict because there are key systems using Base64 session IDs. See
+ // https://crbug.com/902828.
for (const char c : *sanitized_session_id) {
- if (!base::IsAsciiAlpha(c) && !base::IsAsciiDigit(c))
+ if (!base::IsAsciiPrintable(c) || c == ' ')
return false;
}
diff --git a/chromium/media/blink/webmediacapabilitiesclient_impl.cc b/chromium/media/blink/webmediacapabilitiesclient_impl.cc
deleted file mode 100644
index 37b023d305a..00000000000
--- a/chromium/media/blink/webmediacapabilitiesclient_impl.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/blink/webmediacapabilitiesclient_impl.h"
-
-#include <string>
-#include <vector>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "media/base/cdm_config.h"
-#include "media/base/key_system_names.h"
-#include "media/base/mime_util.h"
-#include "media/base/supported_types.h"
-#include "media/base/video_codecs.h"
-#include "media/base/video_color_space.h"
-#include "media/blink/webcontentdecryptionmoduleaccess_impl.h"
-#include "media/mojo/interfaces/media_types.mojom.h"
-#include "mojo/public/cpp/bindings/associated_interface_ptr.h"
-#include "services/service_manager/public/cpp/connector.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_media_capabilities_info.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_media_decoding_configuration.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_video_configuration.h"
-#include "third_party/blink/public/platform/platform.h"
-#include "third_party/blink/public/platform/scoped_web_callbacks.h"
-
-namespace media {
-
-void BindToHistoryService(mojom::VideoDecodePerfHistoryPtr* history_ptr) {
- DVLOG(2) << __func__;
- blink::Platform* platform = blink::Platform::Current();
- service_manager::Connector* connector = platform->GetConnector();
-
- connector->BindInterface(platform->GetBrowserServiceName(),
- mojo::MakeRequest(history_ptr));
-}
-
-bool CheckVideoSupport(const blink::WebVideoConfiguration& video_config,
- VideoCodecProfile* out_video_profile) {
- bool video_supported = false;
- VideoCodec video_codec = kUnknownVideoCodec;
- uint8_t video_level = 0;
- VideoColorSpace video_color_space;
- bool is_video_codec_ambiguous = true;
-
- if (!ParseVideoCodecString(
- video_config.mime_type.Ascii(), video_config.codec.Ascii(),
- &is_video_codec_ambiguous, &video_codec, out_video_profile,
- &video_level, &video_color_space)) {
- DVLOG(2) << __func__ << " Failed to parse video contentType: "
- << video_config.mime_type.Ascii()
- << "; codecs=" << video_config.codec.Ascii();
- video_supported = false;
- } else if (is_video_codec_ambiguous) {
- DVLOG(2) << __func__ << " Invalid (ambiguous) video codec string:"
- << video_config.codec.Ascii();
- video_supported = false;
- } else {
- video_supported = IsSupportedVideoType(
- {video_codec, *out_video_profile, video_level, video_color_space});
- }
-
- return video_supported;
-}
-
-WebMediaCapabilitiesClientImpl::WebMediaCapabilitiesClientImpl() = default;
-
-WebMediaCapabilitiesClientImpl::~WebMediaCapabilitiesClientImpl() = default;
-
-namespace {
-void VideoPerfInfoCallback(
- blink::ScopedWebCallbacks<blink::WebMediaCapabilitiesDecodingInfoCallbacks>
- scoped_callbacks,
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfo> info,
- bool is_smooth,
- bool is_power_efficient) {
- DCHECK(info->supported);
- info->smooth = is_smooth;
- info->power_efficient = is_power_efficient;
-
- scoped_callbacks.PassCallbacks()->OnSuccess(std::move(info));
-}
-
-void OnGetPerfInfoError(
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfoCallbacks>
- callbacks) {
- callbacks->OnError();
-}
-} // namespace
-
-void WebMediaCapabilitiesClientImpl::DecodingInfo(
- const blink::WebMediaDecodingConfiguration& configuration,
- std::unique_ptr<blink::WebContentDecryptionModuleAccess> cdm_access,
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfoCallbacks>
- callbacks) {
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfo> info(
- new blink::WebMediaCapabilitiesDecodingInfo());
-
- DCHECK(configuration.video_configuration);
- const blink::WebVideoConfiguration& video_config =
- configuration.video_configuration.value();
- VideoCodecProfile video_profile = VIDEO_CODEC_PROFILE_UNKNOWN;
-
- // TODO(chcunningham): Skip the call to IsSupportedVideoType() when we already
- // have a |cdm_access|. In this case, we know the codec is supported, but we
- // still need to get the profile and validate the mime type is allowed (not
- // ambiguous) by MediaCapapbilities.
- bool video_supported = CheckVideoSupport(video_config, &video_profile);
-
- // Return early for unsupported configurations.
- if (!video_supported) {
- info->supported = info->smooth = info->power_efficient = video_supported;
- callbacks->OnSuccess(std::move(info));
- return;
- }
-
- // Video is supported! Check its performance history.
- info->supported = true;
-
- if (!decode_history_ptr_.is_bound())
- BindToHistoryService(&decode_history_ptr_);
- DCHECK(decode_history_ptr_.is_bound());
-
- std::string key_system = "";
- bool use_hw_secure_codecs = false;
- if (cdm_access) {
- WebContentDecryptionModuleAccessImpl* cdm_access_impl =
- WebContentDecryptionModuleAccessImpl::From(cdm_access.get());
-
- key_system = cdm_access_impl->GetKeySystem().Ascii();
- use_hw_secure_codecs = cdm_access_impl->GetCdmConfig().use_hw_secure_codecs;
-
- // EME is supported! Provide the MediaKeySystemAccess.
- info->content_decryption_module_access = std::move(cdm_access);
- }
-
- mojom::PredictionFeaturesPtr features = mojom::PredictionFeatures::New(
- video_profile, gfx::Size(video_config.width, video_config.height),
- video_config.framerate, key_system, use_hw_secure_codecs);
-
- decode_history_ptr_->GetPerfInfo(
- std::move(features),
- base::BindOnce(
- &VideoPerfInfoCallback,
- blink::MakeScopedWebCallbacks(std::move(callbacks),
- base::BindOnce(&OnGetPerfInfoError)),
- std::move(info)));
-}
-
-void WebMediaCapabilitiesClientImpl::BindVideoDecodePerfHistoryForTests(
- mojom::VideoDecodePerfHistoryPtr decode_history_ptr) {
- decode_history_ptr_ = std::move(decode_history_ptr);
-}
-
-} // namespace media
diff --git a/chromium/media/blink/webmediacapabilitiesclient_impl.h b/chromium/media/blink/webmediacapabilitiesclient_impl.h
deleted file mode 100644
index 594b0906ff2..00000000000
--- a/chromium/media/blink/webmediacapabilitiesclient_impl.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BLINK_WEBMEDIACAPABILITIESCLIENT_IMPL_H_
-#define MEDIA_BLINK_WEBMEDIACAPABILITIESCLIENT_IMPL_H_
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "media/blink/media_blink_export.h"
-#include "media/mojo/interfaces/video_decode_perf_history.mojom.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_media_capabilities_client.h"
-
-namespace media {
-
-class MEDIA_BLINK_EXPORT WebMediaCapabilitiesClientImpl
- : public blink::WebMediaCapabilitiesClient {
- public:
- WebMediaCapabilitiesClientImpl();
- ~WebMediaCapabilitiesClientImpl() override;
-
- // Implementation of blink::WebMediaCapabilitiesClient.
- void DecodingInfo(
- const blink::WebMediaDecodingConfiguration&,
- std::unique_ptr<blink::WebContentDecryptionModuleAccess> cdm_access,
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfoCallbacks>)
- override;
-
- void BindVideoDecodePerfHistoryForTests(
- mojom::VideoDecodePerfHistoryPtr decode_history_ptr);
-
- private:
- mojom::VideoDecodePerfHistoryPtr decode_history_ptr_;
-
- DISALLOW_COPY_AND_ASSIGN(WebMediaCapabilitiesClientImpl);
-};
-
-} // namespace media
-
-#endif // MEDIA_BLINK_WEBMEDIACAPABILITIESCLIENT_IMPL_H_
diff --git a/chromium/media/blink/webmediacapabilitiesclient_impl_unittest.cc b/chromium/media/blink/webmediacapabilitiesclient_impl_unittest.cc
deleted file mode 100644
index bf2a4980380..00000000000
--- a/chromium/media/blink/webmediacapabilitiesclient_impl_unittest.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <memory>
-
-#include "base/run_loop.h"
-#include "base/test/scoped_task_environment.h"
-#include "media/blink/webmediacapabilitiesclient_impl.h"
-#include "media/mojo/interfaces/media_types.mojom.h"
-#include "media/mojo/interfaces/video_decode_perf_history.mojom.h"
-#include "mojo/public/cpp/bindings/binding.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_media_capabilities_callbacks.h"
-#include "third_party/blink/public/platform/modules/media_capabilities/web_media_decoding_configuration.h"
-
-using ::testing::_;
-
-namespace media {
-
-class MockVideoDecodePerfHistory : public mojom::VideoDecodePerfHistory {
- public:
- explicit MockVideoDecodePerfHistory(
- mojom::VideoDecodePerfHistoryPtr* decode_perf_history_ptr)
- : binding_(this, mojo::MakeRequest(decode_perf_history_ptr)) {}
-
- MOCK_METHOD2(GetPerfInfo,
- void(mojom::PredictionFeaturesPtr, GetPerfInfoCallback));
-
- void CloseMojoBinding() { binding_.Close(); }
-
- private:
- mojo::Binding<mojom::VideoDecodePerfHistory> binding_;
-};
-
-class MockWebMediaCapabilitiesQueryCallbacks
- : public blink::WebMediaCapabilitiesDecodingInfoCallbacks {
- public:
- ~MockWebMediaCapabilitiesQueryCallbacks() override = default;
-
- void OnSuccess(
- std::unique_ptr<blink::WebMediaCapabilitiesDecodingInfo>) override {}
- MOCK_METHOD0(OnError, void());
-};
-
-// Verify that query callback is called even if mojo connection is lost while
-// waiting for the result of mojom.VideoDecodePerfHistory.GetPerfInfo() call.
-// See https://crbug.com/847211
-TEST(WebMediaCapabilitiesClientImplTest, RunCallbackEvenIfMojoDisconnects) {
- static const blink::WebVideoConfiguration kFakeVideoConfiguration{
- blink::WebString::FromASCII("video/webm"), // mime type
- blink::WebString::FromASCII("vp09.00.51.08.01.01.01.01"), // codec
- 1920, // width
- 1080, // height
- 2661034, // bitrate
- 25, // framerate
- };
-
- static const blink::WebMediaDecodingConfiguration kFakeMediaConfiguration{
- blink::MediaConfigurationType::kFile,
- base::nullopt, // audio configuration
- kFakeVideoConfiguration, // video configuration
- base::nullopt, // key system configuration
- };
-
- using ::testing::InvokeWithoutArgs;
-
- mojom::VideoDecodePerfHistoryPtr decode_perf_history_ptr;
- MockVideoDecodePerfHistory decode_perf_history_impl(&decode_perf_history_ptr);
-
- ASSERT_TRUE(decode_perf_history_ptr.is_bound());
-
- WebMediaCapabilitiesClientImpl media_capabilities_client_impl;
- media_capabilities_client_impl.BindVideoDecodePerfHistoryForTests(
- std::move(decode_perf_history_ptr));
-
- auto query_callbacks =
- std::make_unique<MockWebMediaCapabilitiesQueryCallbacks>();
-
- EXPECT_CALL(decode_perf_history_impl, GetPerfInfo(_, _))
- .WillOnce(
- InvokeWithoutArgs(&decode_perf_history_impl,
- &MockVideoDecodePerfHistory::CloseMojoBinding));
-
- EXPECT_CALL(*query_callbacks, OnError());
-
- media_capabilities_client_impl.DecodingInfo(kFakeMediaConfiguration,
- nullptr, /* cdm_access */
- std::move(query_callbacks));
-
- base::RunLoop().RunUntilIdle();
-}
-
-} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_delegate.h b/chromium/media/blink/webmediaplayer_delegate.h
index 021af49f27b..b592b21dc80 100644
--- a/chromium/media/blink/webmediaplayer_delegate.h
+++ b/chromium/media/blink/webmediaplayer_delegate.h
@@ -69,10 +69,6 @@ class WebMediaPlayerDelegate {
// Called to set as the persistent video. A persistent video should hide its
// controls and go fullscreen.
virtual void OnBecamePersistentVideo(bool value) = 0;
-
- // Called when Picture-in-Picture mode is terminated from the
- // Picture-in-Picture window.
- virtual void OnPictureInPictureModeEnded() = 0;
};
// Returns true if the host frame is hidden or closed.
diff --git a/chromium/media/blink/webmediaplayer_impl.cc b/chromium/media/blink/webmediaplayer_impl.cc
index 4d842f49c43..93747dc7a82 100644
--- a/chromium/media/blink/webmediaplayer_impl.cc
+++ b/chromium/media/blink/webmediaplayer_impl.cc
@@ -116,13 +116,6 @@ bool IsResumeBackgroundVideosEnabled() {
return base::FeatureList::IsEnabled(kResumeBackgroundVideo);
}
-bool IsBackgroundVideoTrackOptimizationEnabled(
- WebMediaPlayer::LoadType load_type) {
- // Background video track optimization is always enabled for MSE videos.
- return load_type == WebMediaPlayer::LoadType::kLoadTypeMediaSource ||
- base::FeatureList::IsEnabled(kBackgroundSrcVideoTrackOptimization);
-}
-
bool IsBackgroundVideoPauseOptimizationEnabled() {
return base::FeatureList::IsEnabled(kBackgroundVideoPauseOptimization);
}
@@ -296,26 +289,12 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
media_task_runner_(params->media_task_runner()),
worker_task_runner_(params->worker_task_runner()),
media_log_(params->take_media_log()),
- pipeline_controller_(
- std::make_unique<PipelineImpl>(media_task_runner_,
- main_task_runner_,
- media_log_.get()),
- base::Bind(&WebMediaPlayerImpl::CreateRenderer,
- base::Unretained(this)),
- base::Bind(&WebMediaPlayerImpl::OnPipelineSeeked, AsWeakPtr()),
- base::Bind(&WebMediaPlayerImpl::OnPipelineSuspended, AsWeakPtr()),
- base::Bind(&WebMediaPlayerImpl::OnBeforePipelineResume, AsWeakPtr()),
- base::Bind(&WebMediaPlayerImpl::OnPipelineResumed, AsWeakPtr()),
- base::Bind(&WebMediaPlayerImpl::OnError, AsWeakPtr())),
client_(client),
encrypted_client_(encrypted_client),
delegate_(delegate),
defer_load_cb_(params->defer_load_cb()),
adjust_allocated_memory_cb_(params->adjust_allocated_memory_cb()),
tick_clock_(base::DefaultTickClock::GetInstance()),
- buffered_data_source_host_(
- base::Bind(&WebMediaPlayerImpl::OnProgress, AsWeakPtr()),
- tick_clock_),
url_index_(url_index),
context_provider_(params->context_provider()),
vfc_task_runner_(params->video_frame_compositor_task_runner()),
@@ -335,13 +314,32 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
is_background_video_playback_enabled_(
params->IsBackgroundVideoPlaybackEnabled()),
is_background_video_track_optimization_supported_(
- params->IsBackgroundVideoTrackOptimizationSupported()) {
+ params->IsBackgroundVideoTrackOptimizationSupported()),
+ weak_factory_(this) {
DVLOG(1) << __func__;
DCHECK(adjust_allocated_memory_cb_);
DCHECK(renderer_factory_selector_);
DCHECK(client_);
DCHECK(delegate_);
+ weak_this_ = weak_factory_.GetWeakPtr();
+
+ pipeline_controller_ = std::make_unique<PipelineController>(
+ std::make_unique<PipelineImpl>(media_task_runner_, main_task_runner_,
+ media_log_.get()),
+ base::BindRepeating(&WebMediaPlayerImpl::CreateRenderer,
+ base::Unretained(this)),
+ base::BindRepeating(&WebMediaPlayerImpl::OnPipelineSeeked, weak_this_),
+ base::BindRepeating(&WebMediaPlayerImpl::OnPipelineSuspended, weak_this_),
+ base::BindRepeating(&WebMediaPlayerImpl::OnBeforePipelineResume,
+ weak_this_),
+ base::BindRepeating(&WebMediaPlayerImpl::OnPipelineResumed, weak_this_),
+ base::BindRepeating(&WebMediaPlayerImpl::OnError, weak_this_));
+
+ buffered_data_source_host_ = std::make_unique<BufferedDataSourceHostImpl>(
+ base::BindRepeating(&WebMediaPlayerImpl::OnProgress, weak_this_),
+ tick_clock_);
+
// If we're supposed to force video overlays, then make sure that they're
// enabled all the time.
always_enable_overlays_ = base::CommandLine::ForCurrentProcess()->HasSwitch(
@@ -428,7 +426,7 @@ WebMediaPlayerImpl::~WebMediaPlayerImpl() {
// |demuxer_|, |data_source_|, |compositor_|, and |media_log_| must outlive
// this process. They will be destructed by the DestructionHelper below
// after trampolining through the media thread.
- pipeline_controller_.Stop();
+ pipeline_controller_->Stop();
if (last_reported_memory_usage_)
adjust_allocated_memory_cb_.Run(-last_reported_memory_usage_);
@@ -480,7 +478,6 @@ WebMediaPlayer::LoadTiming WebMediaPlayerImpl::Load(
LoadType load_type,
const blink::WebMediaPlayerSource& source,
CorsMode cors_mode) {
- DVLOG(1) << __func__;
// Only URL or MSE blob URL is supported.
DCHECK(source.IsURL());
blink::WebURL url = source.GetAsURL();
@@ -491,7 +488,7 @@ WebMediaPlayer::LoadTiming WebMediaPlayerImpl::Load(
if (defer_load_cb_) {
is_deferred = defer_load_cb_.Run(base::BindOnce(
- &WebMediaPlayerImpl::DoLoad, AsWeakPtr(), load_type, url, cors_mode));
+ &WebMediaPlayerImpl::DoLoad, weak_this_, load_type, url, cors_mode));
} else {
DoLoad(load_type, url, cors_mode);
}
@@ -536,7 +533,7 @@ void WebMediaPlayerImpl::EnableOverlay() {
overlay_mode_ == OverlayMode::kUseAndroidOverlay) {
overlay_routing_token_is_pending_ = true;
token_available_cb_.Reset(
- base::Bind(&WebMediaPlayerImpl::OnOverlayRoutingToken, AsWeakPtr()));
+ base::Bind(&WebMediaPlayerImpl::OnOverlayRoutingToken, weak_this_));
request_routing_token_cb_.Run(token_available_cb_.callback());
}
@@ -720,8 +717,8 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
loaded_url_ = GURL("data:,");
// Mark all the data as buffered.
- buffered_data_source_host_.SetTotalBytes(data.size());
- buffered_data_source_host_.AddBufferedByteRange(0, data.size());
+ buffered_data_source_host_->SetTotalBytes(data.size());
+ buffered_data_source_host_->AddBufferedByteRange(0, data.size());
DCHECK(!mb_data_source_);
data_source_.reset(new MemoryDataSource(std::move(data)));
@@ -732,18 +729,18 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
auto url_data =
url_index_->GetByUrl(url, static_cast<UrlData::CorsMode>(cors_mode));
// Notify |this| of bytes received by the network.
- url_data->AddBytesReceivedCallback(BindToCurrentLoop(base::BindRepeating(
- &WebMediaPlayerImpl::OnBytesReceived, AsWeakPtr())));
+ url_data->AddBytesReceivedCallback(BindToCurrentLoop(
+ base::BindRepeating(&WebMediaPlayerImpl::OnBytesReceived, weak_this_)));
mb_data_source_ = new MultibufferDataSource(
main_task_runner_, std::move(url_data), media_log_.get(),
- &buffered_data_source_host_,
+ buffered_data_source_host_.get(),
base::BindRepeating(&WebMediaPlayerImpl::NotifyDownloading,
- AsWeakPtr()));
+ weak_this_));
data_source_.reset(mb_data_source_);
mb_data_source_->SetPreload(preload_);
mb_data_source_->SetIsClientAudioElement(client_->IsAudioElement());
mb_data_source_->Initialize(
- base::Bind(&WebMediaPlayerImpl::DataSourceInitialized, AsWeakPtr()));
+ base::Bind(&WebMediaPlayerImpl::DataSourceInitialized, weak_this_));
}
}
@@ -758,12 +755,9 @@ void WebMediaPlayerImpl::Play() {
// TODO(sandersd): Do we want to reset the idle timer here?
delegate_->SetIdle(delegate_id_, false);
paused_ = false;
- pipeline_controller_.SetPlaybackRate(playback_rate_);
+ pipeline_controller_->SetPlaybackRate(playback_rate_);
background_pause_timer_.Stop();
- if (mb_data_source_)
- mb_data_source_->MediaIsPlaying();
-
if (observer_)
observer_->OnPlaying();
@@ -781,6 +775,8 @@ void WebMediaPlayerImpl::Play() {
video_decode_stats_reporter_->OnPlaying();
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::PLAY));
+
+ MaybeUpdateBufferSizesForPlayback();
UpdatePlayState();
}
@@ -800,8 +796,11 @@ void WebMediaPlayerImpl::Pause() {
if (blink::WebUserGestureIndicator::IsProcessingUserGesture(frame_))
video_locked_when_paused_when_hidden_ = true;
- pipeline_controller_.SetPlaybackRate(0.0);
- paused_time_ = pipeline_controller_.GetMediaTime();
+ pipeline_controller_->SetPlaybackRate(0.0);
+
+ // For states <= kReadyStateHaveMetadata, we may not have a renderer yet.
+ if (highest_ready_state_ > WebMediaPlayer::kReadyStateHaveMetadata)
+ paused_time_ = pipeline_controller_->GetMediaTime();
if (observer_)
observer_->OnPaused();
@@ -842,7 +841,7 @@ void WebMediaPlayerImpl::DoSeek(base::TimeDelta time, bool time_updated) {
// 2) For MSE.
// Because the buffers may have changed between seeks, MSE seeks are
// never elided.
- if (paused_ && pipeline_controller_.IsStable() &&
+ if (paused_ && pipeline_controller_->IsStable() &&
(paused_time_ == time ||
(ended_ && time == base::TimeDelta::FromSecondsD(Duration()))) &&
!chunk_demuxer_) {
@@ -852,7 +851,7 @@ void WebMediaPlayerImpl::DoSeek(base::TimeDelta time, bool time_updated) {
if (old_state == kReadyStateHaveEnoughData) {
main_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&WebMediaPlayerImpl::OnBufferingStateChange,
- AsWeakPtr(), BUFFERING_HAVE_ENOUGH));
+ weak_this_, BUFFERING_HAVE_ENOUGH));
}
return;
}
@@ -874,7 +873,7 @@ void WebMediaPlayerImpl::DoSeek(base::TimeDelta time, bool time_updated) {
seek_time_ = time;
if (paused_)
paused_time_ = time;
- pipeline_controller_.Seek(time, time_updated);
+ pipeline_controller_->Seek(time, time_updated);
// This needs to be called after Seek() so that if a resume is triggered, it
// is to the correct time.
@@ -893,18 +892,17 @@ void WebMediaPlayerImpl::SetRate(double rate) {
}
playback_rate_ = rate;
- if (!paused_) {
- pipeline_controller_.SetPlaybackRate(rate);
- if (mb_data_source_)
- mb_data_source_->MediaPlaybackRateChanged(rate);
- }
+ if (!paused_)
+ pipeline_controller_->SetPlaybackRate(rate);
+
+ MaybeUpdateBufferSizesForPlayback();
}
void WebMediaPlayerImpl::SetVolume(double volume) {
DVLOG(1) << __func__ << "(" << volume << ")";
DCHECK(main_task_runner_->BelongsToCurrentThread());
volume_ = volume;
- pipeline_controller_.SetVolume(volume_ * volume_multiplier_);
+ pipeline_controller_->SetVolume(volume_ * volume_multiplier_);
if (watch_time_reporter_)
watch_time_reporter_->OnVolumeChange(volume);
delegate_->DidPlayerMutedStatusChange(delegate_id_, volume == 0.0);
@@ -974,7 +972,7 @@ void WebMediaPlayerImpl::EnabledAudioTracksChanged(
}
MEDIA_LOG(INFO, media_log_.get())
<< "Enabled audio tracks: [" << logstr.str() << "]";
- pipeline_controller_.OnEnabledAudioTracksChanged(enabledMediaTrackIds);
+ pipeline_controller_->OnEnabledAudioTracksChanged(enabledMediaTrackIds);
}
void WebMediaPlayerImpl::SelectedVideoTrackChanged(
@@ -987,7 +985,7 @@ void WebMediaPlayerImpl::SelectedVideoTrackChanged(
MEDIA_LOG(INFO, media_log_.get())
<< "Selected video track: [" << selected_video_track_id.value_or("")
<< "]";
- pipeline_controller_.OnSelectedVideoTrackChanged(selected_video_track_id);
+ pipeline_controller_->OnSelectedVideoTrackChanged(selected_video_track_id);
}
blink::WebSize WebMediaPlayerImpl::NaturalSize() const {
@@ -1008,7 +1006,7 @@ blink::WebSize WebMediaPlayerImpl::VisibleRect() const {
bool WebMediaPlayerImpl::Paused() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- return pipeline_controller_.GetPlaybackRate() == 0.0f;
+ return pipeline_controller_->GetPlaybackRate() == 0.0f;
}
bool WebMediaPlayerImpl::PausedWhenHidden() const {
@@ -1055,7 +1053,6 @@ double WebMediaPlayerImpl::timelineOffset() const {
base::TimeDelta WebMediaPlayerImpl::GetCurrentTimeInternal() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- DCHECK_NE(ready_state_, WebMediaPlayer::kReadyStateHaveNothing);
base::TimeDelta current_time;
if (Seeking())
@@ -1063,9 +1060,10 @@ base::TimeDelta WebMediaPlayerImpl::GetCurrentTimeInternal() const {
else if (paused_)
current_time = paused_time_;
else
- current_time = pipeline_controller_.GetMediaTime();
+ current_time = pipeline_controller_->GetMediaTime();
- DCHECK_NE(current_time, kInfiniteDuration);
+ // It's possible for |current_time| to be kInfiniteDuration here if the page
+ // seeks to kInfiniteDuration (2**64 - 1) when Duration() is infinite.
DCHECK_GE(current_time, base::TimeDelta());
return current_time;
}
@@ -1106,12 +1104,12 @@ blink::WebTimeRanges WebMediaPlayerImpl::Buffered() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
Ranges<base::TimeDelta> buffered_time_ranges =
- pipeline_controller_.GetBufferedTimeRanges();
+ pipeline_controller_->GetBufferedTimeRanges();
const base::TimeDelta duration = GetPipelineMediaDuration();
if (duration != kInfiniteDuration) {
- buffered_data_source_host_.AddBufferedTimeRanges(&buffered_time_ranges,
- duration);
+ buffered_data_source_host_->AddBufferedTimeRanges(&buffered_time_ranges,
+ duration);
}
return ConvertToWebTimeRanges(buffered_time_ranges);
}
@@ -1147,6 +1145,13 @@ bool WebMediaPlayerImpl::IsPrerollAttemptNeeded() {
// TODO(sandersd): Replace with |highest_ready_state_since_seek_| if we need
// to ensure that preroll always gets a chance to complete.
// See http://crbug.com/671525.
+ //
+ // Note: Even though we get play/pause signals at kReadyStateHaveMetadata, we
+ // must attempt to preroll until kReadyStateHaveFutureData so that the
+ // canplaythrough event will be fired to the page (which may be waiting).
+ //
+ // TODO(dalecurtis): We should try signaling kReadyStateHaveFutureData upon
+ // automatic-suspend of a non-playing element to avoid wasting resources.
if (highest_ready_state_ >= ReadyState::kReadyStateHaveFutureData)
return false;
@@ -1177,8 +1182,8 @@ bool WebMediaPlayerImpl::DidLoadingProgress() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
// Note: Separate variables used to ensure both methods are called every time.
- const bool pipeline_progress = pipeline_controller_.DidLoadingProgress();
- const bool data_progress = buffered_data_source_host_.DidLoadingProgress();
+ const bool pipeline_progress = pipeline_controller_->DidLoadingProgress();
+ const bool data_progress = buffered_data_source_host_->DidLoadingProgress();
return pipeline_progress || data_progress;
}
@@ -1197,17 +1202,10 @@ void WebMediaPlayerImpl::Paint(cc::PaintCanvas* canvas,
scoped_refptr<VideoFrame> video_frame = GetCurrentFrameFromCompositor();
gfx::Rect gfx_rect(rect);
- Context3D context_3d;
- gpu::ContextSupport* context_support = nullptr;
if (video_frame.get() && video_frame->HasTextures()) {
- if (context_provider_) {
- context_3d = Context3D(context_provider_->ContextGL(),
- context_provider_->GrContext());
- context_support = context_provider_->ContextSupport();
- }
- if (!context_3d.gl)
+ if (!context_provider_)
return; // Unable to get/create a shared main thread context.
- if (!context_3d.gr_context)
+ if (!context_provider_->GrContext())
return; // The context has been lost since and can't setup a GrContext.
}
if (out_metadata && video_frame) {
@@ -1221,8 +1219,8 @@ void WebMediaPlayerImpl::Paint(cc::PaintCanvas* canvas,
}
video_renderer_.Paint(
video_frame, canvas, gfx::RectF(gfx_rect), flags,
- pipeline_metadata_.video_decoder_config.video_rotation(), context_3d,
- context_support);
+ pipeline_metadata_.video_decoder_config.video_transformation(),
+ context_provider_.get());
}
bool WebMediaPlayerImpl::WouldTaintOrigin() const {
@@ -1304,15 +1302,8 @@ bool WebMediaPlayerImpl::CopyVideoTextureToPlatformTexture(
}
}
- Context3D context_3d;
- gpu::ContextSupport* context_support = nullptr;
- if (context_provider_) {
- context_3d = Context3D(context_provider_->ContextGL(),
- context_provider_->GrContext());
- context_support = context_provider_->ContextSupport();
- }
return video_renderer_.CopyVideoFrameTexturesToGLTexture(
- context_3d, context_support, gl, video_frame.get(), target, texture,
+ context_provider_.get(), gl, video_frame.get(), target, texture,
internal_format, format, type, level, premultiply_alpha, flip_y);
}
@@ -1340,14 +1331,8 @@ bool WebMediaPlayerImpl::PrepareVideoFrameForWebGL(
}
}
- Context3D context_3d;
- if (context_provider_) {
- context_3d = Context3D(context_provider_->ContextGL(),
- context_provider_->GrContext());
- }
-
return video_renderer_.PrepareVideoFrameForWebGL(
- context_3d, gl, video_frame.get(), target, texture);
+ context_provider_.get(), gl, video_frame.get(), target, texture);
}
// static
@@ -1491,8 +1476,8 @@ void WebMediaPlayerImpl::SetCdmInternal(
// Keep the reference to the CDM, as it shouldn't be destroyed until
// after the pipeline is done with the |cdm_context|.
pending_cdm_context_ref_ = std::move(cdm_context_ref);
- pipeline_controller_.SetCdm(
- cdm_context, base::Bind(&WebMediaPlayerImpl::OnCdmAttached, AsWeakPtr()));
+ pipeline_controller_->SetCdm(
+ cdm_context, base::Bind(&WebMediaPlayerImpl::OnCdmAttached, weak_this_));
}
void WebMediaPlayerImpl::OnCdmAttached(bool success) {
@@ -1531,7 +1516,7 @@ void WebMediaPlayerImpl::OnPipelineSeeked(bool time_updated) {
seek_time_ = base::TimeDelta();
if (paused_) {
- paused_time_ = pipeline_controller_.GetMediaTime();
+ paused_time_ = pipeline_controller_->GetMediaTime();
} else {
DCHECK(watch_time_reporter_);
watch_time_reporter_->OnPlaying();
@@ -1547,26 +1532,21 @@ void WebMediaPlayerImpl::OnPipelineSeeked(bool time_updated) {
// is seeking.
UpdateBackgroundVideoOptimizationState();
- // If we successfully completed a suspended startup, lie about our buffering
- // state for the time being. While ultimately we want to avoid lying about the
- // buffering state, for the initial test of true preload=metadata, signal
- // BUFFERING_HAVE_ENOUGH so that canplay and canplaythrough fire correctly.
- //
- // Later we can experiment with the impact of removing this lie; initial data
- // suggests high disruption since we've also made preload=metadata the
- // default. Most sites are not prepared for a lack of canplay; even many of
- // our own tests don't function correctly. See https://crbug.com/694855.
- //
- // Note: This call is dual purpose, it is also responsible for triggering an
- // UpdatePlayState() call which may need to resume the pipeline once Blink
- // has been told about the ReadyState change.
+ // If we successfully completed a suspended startup, we need to make a call to
+ // UpdatePlayState() in case any events which should trigger a resume have
+ // occurred during startup.
if (attempting_suspended_start_ &&
- pipeline_controller_.IsPipelineSuspended()) {
- did_lazy_load_ = !has_poster_ && HasVideo();
- if (did_lazy_load_)
- DCHECK(base::FeatureList::IsEnabled(kPreloadMetadataLazyLoad));
-
+ pipeline_controller_->IsPipelineSuspended()) {
skip_metrics_due_to_startup_suspend_ = true;
+
+ // If we successfully completed a suspended startup, signal that we have
+ // reached BUFFERING_HAVE_ENOUGH so that canplay and canplaythrough fire
+ // correctly. We must unfortunately always do this because it's valid for
+ // elements to play while not visible nor even in the DOM.
+ //
+ // Note: This call is dual purpose, it is also responsible for triggering an
+ // UpdatePlayState() call which may need to resume the pipeline once Blink
+ // has been told about the ReadyState change.
OnBufferingStateChangeInternal(BUFFERING_HAVE_ENOUGH, true);
// If |skip_metrics_due_to_startup_suspend_| is unset by a resume started by
@@ -1589,10 +1569,39 @@ void WebMediaPlayerImpl::OnPipelineSuspended() {
// Add a log event so the player shows up as "SUSPENDED" in media-internals.
media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::SUSPENDED));
+ if (attempting_suspended_start_) {
+ DCHECK(pipeline_controller_->IsSuspended());
+ did_lazy_load_ = !has_poster_ && HasVideo();
+ }
+
// Tell the data source we have enough data so that it may release the
- // connection.
- if (mb_data_source_)
- mb_data_source_->OnBufferingHaveEnough(true);
+ // connection (unless blink is waiting on us to signal play()).
+ if (mb_data_source_ && !client_->CouldPlayIfEnoughData()) {
+ // |attempting_suspended_start_| will be cleared by OnPipelineSeeked() which
+ // will occur after this method during a suspended startup.
+ if (attempting_suspended_start_ && did_lazy_load_) {
+ DCHECK(!has_first_frame_);
+ DCHECK(have_enough_after_lazy_load_cb_.IsCancelled());
+
+ // For lazy load, we won't know if the element is non-visible until a
+ // layout completes, so to avoid unnecessarily tearing down the network
+ // connection, briefly (250ms chosen arbitrarily) delay signaling "have
+ // enough" to the MultiBufferDataSource.
+ //
+ // base::Unretained() is safe here since the base::CancelableOnceClosure
+ // will cancel upon destruction of this class and |mb_data_source_| is
+ // gauranteeed to outlive us.
+ have_enough_after_lazy_load_cb_.Reset(
+ base::BindOnce(&MultibufferDataSource::OnBufferingHaveEnough,
+ base::Unretained(mb_data_source_), true));
+ main_task_runner_->PostDelayedTask(
+ FROM_HERE, have_enough_after_lazy_load_cb_.callback(),
+ base::TimeDelta::FromMilliseconds(250));
+ } else {
+ have_enough_after_lazy_load_cb_.Cancel();
+ mb_data_source_->OnBufferingHaveEnough(true);
+ }
+ }
ReportMemoryUsage();
@@ -1603,6 +1612,9 @@ void WebMediaPlayerImpl::OnPipelineSuspended() {
}
void WebMediaPlayerImpl::OnBeforePipelineResume() {
+ // Since we're resuming, cancel closing of the network connection.
+ have_enough_after_lazy_load_cb_.Cancel();
+
// We went through suspended startup, so the player is only just now spooling
// up for playback. As such adjust |load_start_time_| so it reports the same
// metric as what would be reported if we had not suspended at startup.
@@ -1679,7 +1691,8 @@ void WebMediaPlayerImpl::OnError(PipelineStatus status) {
#if defined(OS_ANDROID)
// |mb_data_source_| may be nullptr if someone passes in a m3u8 as a data://
// URL, since MediaPlayer doesn't support data:// URLs, fail playback now.
- const bool found_hls = status == PipelineStatus::DEMUXER_ERROR_DETECTED_HLS;
+ const bool found_hls = base::FeatureList::IsEnabled(kHlsPlayer) &&
+ status == PipelineStatus::DEMUXER_ERROR_DETECTED_HLS;
if (found_hls && mb_data_source_) {
demuxer_found_hls_ = true;
@@ -1709,7 +1722,7 @@ void WebMediaPlayerImpl::OnError(PipelineStatus status) {
data_source_->Stop();
mb_data_source_ = nullptr;
- pipeline_controller_.Stop();
+ pipeline_controller_->Stop();
SetMemoryReportingState(false);
// Trampoline through the media task runner to destruct the demuxer and
@@ -1727,7 +1740,7 @@ void WebMediaPlayerImpl::OnError(PipelineStatus status) {
std::move(start_pipeline_cb).Run();
},
std::move(demuxer_), std::move(data_source_),
- base::BindOnce(&WebMediaPlayerImpl::StartPipeline, AsWeakPtr()))));
+ base::BindOnce(&WebMediaPlayerImpl::StartPipeline, weak_this_))));
return;
}
@@ -1753,7 +1766,7 @@ void WebMediaPlayerImpl::OnError(PipelineStatus status) {
}
// PipelineController::Stop() is idempotent.
- pipeline_controller_.Stop();
+ pipeline_controller_->Stop();
UpdatePlayState();
}
@@ -1765,7 +1778,7 @@ void WebMediaPlayerImpl::OnEnded() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
// Ignore state changes until we've completed all outstanding operations.
- if (!pipeline_controller_.IsStable())
+ if (!pipeline_controller_->IsStable())
return;
ended_ = true;
@@ -1781,7 +1794,7 @@ void WebMediaPlayerImpl::OnEnded() {
UpdatePlayState();
}
-void WebMediaPlayerImpl::OnMetadata(PipelineMetadata metadata) {
+void WebMediaPlayerImpl::OnMetadata(const PipelineMetadata& metadata) {
DVLOG(1) << __func__;
DCHECK(main_task_runner_->BelongsToCurrentThread());
@@ -1795,10 +1808,10 @@ void WebMediaPlayerImpl::OnMetadata(PipelineMetadata metadata) {
pipeline_metadata_ = metadata;
- SetReadyState(WebMediaPlayer::kReadyStateHaveMetadata);
- UMA_HISTOGRAM_ENUMERATION("Media.VideoRotation",
- metadata.video_decoder_config.video_rotation(),
- VIDEO_ROTATION_MAX + 1);
+ UMA_HISTOGRAM_ENUMERATION(
+ "Media.VideoRotation",
+ metadata.video_decoder_config.video_transformation().rotation,
+ VIDEO_ROTATION_MAX + 1);
if (HasAudio()) {
RecordEncryptionScheme("Audio",
@@ -1828,9 +1841,11 @@ void WebMediaPlayerImpl::OnMetadata(PipelineMetadata metadata) {
ActivateSurfaceLayerForVideo();
} else {
DCHECK(!video_layer_);
+ // TODO(tmathmeyer) does this need support for reflections as well?
video_layer_ = cc::VideoLayer::Create(
compositor_.get(),
- pipeline_metadata_.video_decoder_config.video_rotation());
+ pipeline_metadata_.video_decoder_config.video_transformation()
+ .rotation);
video_layer_->SetContentsOpaque(opaque_);
client_->SetCcLayer(video_layer_.get());
}
@@ -1845,6 +1860,11 @@ void WebMediaPlayerImpl::OnMetadata(PipelineMetadata metadata) {
CreateWatchTimeReporter();
CreateVideoDecodeStatsReporter();
+ // SetReadyState() may trigger all sorts of calls into this class (e.g.,
+ // Play(), Pause(), etc) so do it last to avoid unexpected states during the
+ // calls. An exception to this is UpdatePlayState(), which is safe to call and
+ // needs to use the new ReadyState in its calculations.
+ SetReadyState(WebMediaPlayer::kReadyStateHaveMetadata);
UpdatePlayState();
}
@@ -1864,14 +1884,17 @@ void WebMediaPlayerImpl::ActivateSurfaceLayerForVideo() {
.Run(this, compositor_->GetUpdateSubmissionStateCallback());
bridge_->CreateSurfaceLayer();
+ // TODO(tmathmeyer) does this need support for the reflection transformation
+ // as well?
vfc_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(&VideoFrameCompositor::EnableSubmission,
- base::Unretained(compositor_.get()),
- bridge_->GetSurfaceId(),
- bridge_->GetLocalSurfaceIdAllocationTime(),
- pipeline_metadata_.video_decoder_config.video_rotation(),
- IsInPictureInPicture()));
+ base::BindOnce(
+ &VideoFrameCompositor::EnableSubmission,
+ base::Unretained(compositor_.get()), bridge_->GetSurfaceId(),
+ bridge_->GetLocalSurfaceIdAllocationTime(),
+ pipeline_metadata_.video_decoder_config.video_transformation()
+ .rotation,
+ IsInPictureInPicture()));
bridge_->SetContentsOpaque(opaque_);
// If the element is already in Picture-in-Picture mode, it means that it
@@ -1944,7 +1967,7 @@ void WebMediaPlayerImpl::CreateVideoDecodeStatsReporter() {
void WebMediaPlayerImpl::OnProgress() {
DVLOG(4) << __func__;
- if (highest_ready_state_ < ReadyState::kReadyStateHaveFutureData) {
+ if (highest_ready_state_ < ReadyState::kReadyStateHaveMetadata) {
// Reset the preroll attempt clock.
preroll_attempt_pending_ = true;
preroll_attempt_start_time_ = base::TimeTicks();
@@ -1971,7 +1994,7 @@ bool WebMediaPlayerImpl::CanPlayThrough() {
// we're ever going to get, which means we say we can play through.
if (network_state_ == WebMediaPlayer::kNetworkStateIdle)
return true;
- return buffered_data_source_host_.CanPlayThrough(
+ return buffered_data_source_host_->CanPlayThrough(
base::TimeDelta::FromSecondsD(CurrentTime()),
base::TimeDelta::FromSecondsD(Duration()),
playback_rate_ == 0.0 ? 1.0 : playback_rate_);
@@ -1985,7 +2008,7 @@ void WebMediaPlayerImpl::OnBufferingStateChangeInternal(
// Ignore buffering state changes caused by back-to-back seeking, so as not
// to assume the second seek has finished when it was only the first seek.
- if (pipeline_controller_.IsPendingSeek())
+ if (pipeline_controller_->IsPendingSeek())
return;
auto log_event = media_log_->CreateBufferingStateChangedEvent(
@@ -2011,10 +2034,18 @@ void WebMediaPlayerImpl::OnBufferingStateChangeInternal(
SetReadyState(CanPlayThrough() ? WebMediaPlayer::kReadyStateHaveEnoughData
: WebMediaPlayer::kReadyStateHaveFutureData);
- // Let the DataSource know we have enough data. It may use this information
- // to release unused network connections.
- if (mb_data_source_ && !client_->CouldPlayIfEnoughData())
- mb_data_source_->OnBufferingHaveEnough(false);
+ // Let the DataSource know we have enough data -- this is the only function
+ // during which we advance to (or past) the kReadyStateHaveEnoughData state.
+ // It may use this information to update buffer sizes or release unused
+ // network connections.
+ MaybeUpdateBufferSizesForPlayback();
+ if (mb_data_source_ && !client_->CouldPlayIfEnoughData()) {
+ // For LazyLoad this will be handled during OnPipelineSuspended().
+ if (for_suspended_start && did_lazy_load_)
+ DCHECK(!have_enough_after_lazy_load_cb_.IsCancelled());
+ else
+ mb_data_source_->OnBufferingHaveEnough(false);
+ }
// Blink expects a timeChanged() in response to a seek().
if (should_notify_time_changed_) {
@@ -2113,7 +2144,7 @@ void WebMediaPlayerImpl::OnWaiting(WaitingReason reason) {
// picture on how to refactor WebMediaPlayerImpl, PipelineController and
// PipelineImpl.
case WaitingReason::kDecoderStateLost:
- pipeline_controller_.OnDecoderStateLost();
+ pipeline_controller_->OnDecoderStateLost();
return;
}
}
@@ -2127,7 +2158,8 @@ void WebMediaPlayerImpl::OnVideoNaturalSizeChange(const gfx::Size& size) {
// The input |size| is from the decoded video frame, which is the original
// natural size and need to be rotated accordingly.
gfx::Size rotated_size = GetRotatedVideoSize(
- pipeline_metadata_.video_decoder_config.video_rotation(), size);
+ pipeline_metadata_.video_decoder_config.video_transformation().rotation,
+ size);
RecordVideoNaturalSize(rotated_size);
@@ -2143,9 +2175,6 @@ void WebMediaPlayerImpl::OnVideoNaturalSizeChange(const gfx::Size& size) {
// in CreateWatchTimeReporter() that guesses the existence of a video track.
CreateWatchTimeReporter();
} else {
- // TODO(sandersd): If the size changed such that ShouldReportWatchTime()
- // changes, |watch_time_reporter_| should be reinitialized. This should be
- // internal to WatchTimeReporter.
UpdateSecondaryProperties();
}
@@ -2310,7 +2339,7 @@ void WebMediaPlayerImpl::OnFrameShown() {
if ((!paused_ && IsBackgroundOptimizationCandidate()) ||
paused_when_hidden_) {
frame_time_report_cb_.Reset(base::BindOnce(
- &WebMediaPlayerImpl::ReportTimeFromForegroundToFirstFrame, AsWeakPtr(),
+ &WebMediaPlayerImpl::ReportTimeFromForegroundToFirstFrame, weak_this_,
base::TimeTicks::Now()));
vfc_task_runner_->PostTask(
FROM_HERE,
@@ -2378,15 +2407,6 @@ void WebMediaPlayerImpl::OnBecamePersistentVideo(bool value) {
MaybeSendOverlayInfoToDecoder();
}
-void WebMediaPlayerImpl::OnPictureInPictureModeEnded() {
- // It is possible for this method to be called when the player is no longer in
- // Picture-in-Picture mode.
- if (!client_ || !IsInPictureInPicture())
- return;
-
- client_->PictureInPictureStopped();
-}
-
void WebMediaPlayerImpl::SendBytesReceivedUpdate() {
media_metrics_provider_->AddBytesReceived(bytes_received_since_last_update_);
bytes_received_since_last_update_ = 0;
@@ -2412,8 +2432,8 @@ void WebMediaPlayerImpl::OnBytesReceived(uint64_t data_length) {
void WebMediaPlayerImpl::ScheduleRestart() {
// TODO(watk): All restart logic should be moved into PipelineController.
- if (pipeline_controller_.IsPipelineRunning() &&
- !pipeline_controller_.IsPipelineSuspended()) {
+ if (pipeline_controller_->IsPipelineRunning() &&
+ !pipeline_controller_->IsPipelineSuspended()) {
pending_suspend_resume_cycle_ = true;
UpdatePlayState();
}
@@ -2585,7 +2605,7 @@ std::unique_ptr<Renderer> WebMediaPlayerImpl::CreateRenderer() {
RequestOverlayInfoCB request_overlay_info_cb;
#if defined(OS_ANDROID)
request_overlay_info_cb = BindToCurrentLoop(
- base::Bind(&WebMediaPlayerImpl::OnOverlayInfoRequested, AsWeakPtr()));
+ base::Bind(&WebMediaPlayerImpl::OnOverlayInfoRequested, weak_this_));
#endif
return renderer_factory_selector_->GetCurrentFactory()->CreateRenderer(
media_task_runner_, worker_task_runner_, audio_source_provider_.get(),
@@ -2597,14 +2617,14 @@ void WebMediaPlayerImpl::StartPipeline() {
Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb =
BindToCurrentLoop(base::Bind(
- &WebMediaPlayerImpl::OnEncryptedMediaInitData, AsWeakPtr()));
+ &WebMediaPlayerImpl::OnEncryptedMediaInitData, weak_this_));
vfc_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VideoFrameCompositor::SetOnNewProcessedFrameCallback,
base::Unretained(compositor_.get()),
BindToCurrentLoop(base::BindOnce(
- &WebMediaPlayerImpl::OnFirstFrame, AsWeakPtr()))));
+ &WebMediaPlayerImpl::OnFirstFrame, weak_this_))));
#if defined(OS_ANDROID)
if (demuxer_found_hls_ ||
@@ -2621,8 +2641,8 @@ void WebMediaPlayerImpl::StartPipeline() {
demuxer_.reset(new MediaUrlDemuxer(
media_task_runner_, loaded_url_, frame_->GetDocument().SiteForCookies(),
allow_media_player_renderer_credentials_));
- pipeline_controller_.Start(Pipeline::StartType::kNormal, demuxer_.get(),
- this, false, false);
+ pipeline_controller_->Start(Pipeline::StartType::kNormal, demuxer_.get(),
+ this, false, false);
return;
}
#endif // defined(OS_ANDROID)
@@ -2635,7 +2655,7 @@ void WebMediaPlayerImpl::StartPipeline() {
#if BUILDFLAG(ENABLE_FFMPEG)
Demuxer::MediaTracksUpdatedCB media_tracks_updated_cb =
BindToCurrentLoop(base::Bind(
- &WebMediaPlayerImpl::OnFFmpegMediaTracksUpdated, AsWeakPtr()));
+ &WebMediaPlayerImpl::OnFFmpegMediaTracksUpdated, weak_this_));
demuxer_.reset(new FFmpegDemuxer(
media_task_runner_, data_source_.get(), encrypted_media_init_data_cb,
@@ -2648,16 +2668,15 @@ void WebMediaPlayerImpl::StartPipeline() {
DCHECK(!chunk_demuxer_);
DCHECK(!data_source_);
- chunk_demuxer_ = new ChunkDemuxer(
- BindToCurrentLoop(
- base::Bind(&WebMediaPlayerImpl::OnDemuxerOpened, AsWeakPtr())),
- BindToCurrentLoop(
- base::Bind(&WebMediaPlayerImpl::OnProgress, AsWeakPtr())),
- encrypted_media_init_data_cb, media_log_.get());
+ chunk_demuxer_ =
+ new ChunkDemuxer(BindToCurrentLoop(base::Bind(
+ &WebMediaPlayerImpl::OnDemuxerOpened, weak_this_)),
+ BindToCurrentLoop(base::Bind(
+ &WebMediaPlayerImpl::OnProgress, weak_this_)),
+ encrypted_media_init_data_cb, media_log_.get());
// Notify |this| of bytes that are received via MSE.
- chunk_demuxer_->AddBytesReceivedCallback(
- BindToCurrentLoop(base::BindRepeating(
- &WebMediaPlayerImpl::OnBytesReceived, AsWeakPtr())));
+ chunk_demuxer_->AddBytesReceivedCallback(BindToCurrentLoop(
+ base::BindRepeating(&WebMediaPlayerImpl::OnBytesReceived, weak_this_)));
demuxer_.reset(chunk_demuxer_);
if (base::FeatureList::IsEnabled(kMemoryPressureBasedSourceBufferGC)) {
@@ -2688,8 +2707,8 @@ void WebMediaPlayerImpl::StartPipeline() {
// ... and we're ready to go!
// TODO(sandersd): On Android, defer Start() if the tab is not visible.
seeking_ = true;
- pipeline_controller_.Start(start_type, demuxer_.get(), this, is_streaming,
- is_static);
+ pipeline_controller_->Start(start_type, demuxer_.get(), this, is_streaming,
+ is_static);
}
void WebMediaPlayerImpl::SetNetworkState(WebMediaPlayer::NetworkState state) {
@@ -2757,7 +2776,7 @@ void WebMediaPlayerImpl::UpdatePlayState() {
can_auto_suspend = false;
}
- bool is_suspended = pipeline_controller_.IsSuspended();
+ bool is_suspended = pipeline_controller_->IsSuspended();
bool is_backgrounded = IsBackgroundSuspendEnabled(this) && IsHidden();
PlayState state = UpdatePlayState_ComputePlayState(
is_flinging_, can_auto_suspend, is_suspended, is_backgrounded);
@@ -2844,14 +2863,14 @@ void WebMediaPlayerImpl::SetSuspendState(bool is_suspended) {
preroll_attempt_pending_ = true;
preroll_attempt_start_time_ = base::TimeTicks();
}
- pipeline_controller_.Suspend();
+ pipeline_controller_->Suspend();
} else {
// When resuming, start the preroll attempt clock.
if (preroll_attempt_pending_) {
preroll_attempt_pending_ = false;
preroll_attempt_start_time_ = tick_clock_->NowTicks();
}
- pipeline_controller_.Resume();
+ pipeline_controller_->Resume();
}
}
@@ -2874,19 +2893,19 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_flinging,
// errors.
bool has_error = IsNetworkStateError(network_state_);
- // After HaveFutureData, Blink will call play() if the state is not paused;
- // prior to this point |paused_| is not accurate.
- bool have_future_data =
- highest_ready_state_ >= WebMediaPlayer::kReadyStateHaveFutureData;
+ // After kReadyStateHaveMetadata, Blink will call play() if the state is not
+ // paused; prior to this point |paused_| is not accurate.
+ bool have_metadata =
+ highest_ready_state_ >= WebMediaPlayer::kReadyStateHaveMetadata;
// Background suspend is only enabled for paused players.
// In the case of players with audio the session should be kept.
bool background_suspended =
- can_auto_suspend && is_backgrounded && paused_ && have_future_data;
+ can_auto_suspend && is_backgrounded && paused_ && have_metadata;
- // Idle suspension is allowed prior to have future data since there exist
- // mechanisms to exit the idle state when the player is capable of reaching
- // the have future data state; see didLoadingProgress().
+ // Idle suspension is allowed prior to kReadyStateHaveMetadata since there
+ // exist mechanisms to exit the idle state when the player is capable of
+ // reaching the kReadyStateHaveMetadata state; see didLoadingProgress().
//
// TODO(sandersd): Make the delegate suspend idle players immediately when
// hidden.
@@ -2894,9 +2913,10 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_flinging,
!overlay_enabled_ && !needs_first_frame_;
// If we're already suspended, see if we can wait for user interaction. Prior
- // to HaveFutureData, we require |is_stale| to remain suspended. |is_stale|
- // will be cleared when we receive data which may take us to HaveFutureData.
- bool can_stay_suspended = (is_stale || have_future_data) && is_suspended &&
+ // to kReadyStateHaveMetadata, we require |is_stale| to remain suspended.
+ // |is_stale| will be cleared when we receive data which may take us to
+ // kReadyStateHaveMetadata.
+ bool can_stay_suspended = (is_stale || have_metadata) && is_suspended &&
paused_ && !seeking_ && !needs_first_frame_;
// Combined suspend state.
@@ -2907,8 +2927,7 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_flinging,
<< ", idle_suspended=" << idle_suspended
<< ", background_suspended=" << background_suspended
<< ", can_stay_suspended=" << can_stay_suspended
- << ", is_stale=" << is_stale
- << ", have_future_data=" << have_future_data
+ << ", is_stale=" << is_stale << ", have_metadata=" << have_metadata
<< ", paused_=" << paused_ << ", seeking_=" << seeking_;
// We do not treat |playback_rate_| == 0 as paused. For the media session,
@@ -2926,14 +2945,14 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_flinging,
// suspension does not destroy the media session, because we expect that the
// notification controls (and audio focus) remain. With some exceptions for
// background videos, the player only needs to have audio to have controls
- // (requires |have_future_data|).
+ // (requires |have_current_data|).
//
// |alive| indicates if the player should be present (not |GONE|) to the
// delegate, either paused or playing. The following must be true for the
// player:
- // - |have_future_data|, since we need to know whether we are paused to
- // correctly configure the session and also because the tracks and
- // duration are passed to DidPlay(),
+ // - |have_current_data|, since playback can't begin before that point, we
+ // need to know whether we are paused to correctly configure the session,
+ // and also because the tracks and duration are passed to DidPlay(),
// - |is_flinging| is false (RemotePlayback is not handled by the delegate)
// - |has_error| is false as player should have no errors,
// - |background_suspended| is false, otherwise |has_remote_controls| must
@@ -2948,7 +2967,8 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_flinging,
bool backgrounded_video_has_no_remote_controls =
IsBackgroundSuspendEnabled(this) && !IsResumeBackgroundVideosEnabled() &&
is_backgrounded && HasVideo();
- bool can_play = !has_error && have_future_data;
+ bool have_current_data = highest_ready_state_ >= kReadyStateHaveCurrentData;
+ bool can_play = !has_error && have_current_data;
bool has_remote_controls =
HasAudio() && !backgrounded_video_has_no_remote_controls;
bool alive = can_play && !is_flinging && !must_suspend &&
@@ -2993,7 +3013,7 @@ void WebMediaPlayerImpl::ReportMemoryUsage() {
base::PostTaskAndReplyWithResult(
media_task_runner_.get(), FROM_HERE,
base::Bind(&Demuxer::GetMemoryUsage, base::Unretained(demuxer_.get())),
- base::Bind(&WebMediaPlayerImpl::FinishMemoryUsageReport, AsWeakPtr()));
+ base::Bind(&WebMediaPlayerImpl::FinishMemoryUsageReport, weak_this_));
} else {
FinishMemoryUsageReport(0);
}
@@ -3057,7 +3077,7 @@ void WebMediaPlayerImpl::ScheduleIdlePauseTimer() {
// Only schedule the pause timer if we're not paused or paused but going to
// resume when foregrounded, and are suspended and have audio.
if ((paused_ && !paused_when_hidden_) ||
- !pipeline_controller_.IsSuspended() || !HasAudio()) {
+ !pipeline_controller_->IsSuspended() || !HasAudio()) {
return;
}
@@ -3151,8 +3171,8 @@ bool WebMediaPlayerImpl::IsStreaming() const {
}
bool WebMediaPlayerImpl::DoesOverlaySupportMetadata() const {
- return pipeline_metadata_.video_decoder_config.video_rotation() ==
- VIDEO_ROTATION_0;
+ return pipeline_metadata_.video_decoder_config.video_transformation() ==
+ kNoTransformation;
}
void WebMediaPlayerImpl::ActivateViewportIntersectionMonitoring(bool activate) {
@@ -3175,7 +3195,7 @@ void WebMediaPlayerImpl::ForceStaleStateForTesting(ReadyState target_state) {
bool WebMediaPlayerImpl::IsSuspendedForTesting() {
// This intentionally uses IsPipelineSuspended since we need to know when the
// pipeline has reached the suspended state, not when it's in suspending.
- return pipeline_controller_.IsPipelineSuspended();
+ return pipeline_controller_->IsPipelineSuspended();
}
bool WebMediaPlayerImpl::DidLazyLoad() const {
@@ -3183,6 +3203,7 @@ bool WebMediaPlayerImpl::DidLazyLoad() const {
}
void WebMediaPlayerImpl::OnBecameVisible() {
+ have_enough_after_lazy_load_cb_.Cancel();
needs_first_frame_ = !has_first_frame_;
UpdatePlayState();
}
@@ -3201,6 +3222,10 @@ base::Optional<viz::SurfaceId> WebMediaPlayerImpl::GetSurfaceId() {
return bridge_->GetSurfaceId();
}
+base::WeakPtr<blink::WebMediaPlayer> WebMediaPlayerImpl::AsWeakPtr() {
+ return weak_this_;
+}
+
bool WebMediaPlayerImpl::ShouldPausePlaybackWhenHidden() const {
// Audio only stream is allowed to play when in background.
// TODO: We should check IsBackgroundOptimizationCandidate here. But we need
@@ -3234,8 +3259,7 @@ bool WebMediaPlayerImpl::ShouldDisableVideoWhenHidden() const {
// video. MSE video track switching on hide has gone through a field test.
// TODO(tmathmeyer): Passing load_type_ won't be needed after src= field
// testing is finished. see: http://crbug.com/709302
- if (!is_background_video_track_optimization_supported_ ||
- !IsBackgroundVideoTrackOptimizationEnabled(load_type_))
+ if (!is_background_video_track_optimization_supported_)
return false;
// Disable video track only for players with audio that match the criteria for
@@ -3309,7 +3333,7 @@ void WebMediaPlayerImpl::PauseVideoIfNeeded() {
// Don't pause video while the pipeline is stopped, resuming or seeking.
// Also if the video is paused already.
- if (!pipeline_controller_.IsPipelineRunning() || is_pipeline_resuming_ ||
+ if (!pipeline_controller_->IsPipelineRunning() || is_pipeline_resuming_ ||
seeking_ || paused_)
return;
@@ -3322,7 +3346,7 @@ void WebMediaPlayerImpl::PauseVideoIfNeeded() {
void WebMediaPlayerImpl::EnableVideoTrackIfNeeded() {
// Don't change video track while the pipeline is stopped, resuming or
// seeking.
- if (!pipeline_controller_.IsPipelineRunning() || is_pipeline_resuming_ ||
+ if (!pipeline_controller_->IsPipelineRunning() || is_pipeline_resuming_ ||
seeking_)
return;
@@ -3357,7 +3381,7 @@ PipelineStatistics WebMediaPlayerImpl::GetPipelineStatistics() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
return pipeline_statistics_for_test_.value_or(
- pipeline_controller_.GetStatistics());
+ pipeline_controller_->GetStatistics());
}
void WebMediaPlayerImpl::SetPipelineMediaDurationForTest(
@@ -3369,7 +3393,7 @@ base::TimeDelta WebMediaPlayerImpl::GetPipelineMediaDuration() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
return pipeline_media_duration_for_test_.value_or(
- pipeline_controller_.GetMediaDuration());
+ pipeline_controller_->GetMediaDuration());
}
void WebMediaPlayerImpl::ReportTimeFromForegroundToFirstFrame(
@@ -3471,7 +3495,7 @@ void WebMediaPlayerImpl::RecordVideoNaturalSize(const gfx::Size& natural_size) {
void WebMediaPlayerImpl::SetTickClockForTest(
const base::TickClock* tick_clock) {
tick_clock_ = tick_clock;
- buffered_data_source_host_.SetTickClockForTest(tick_clock);
+ buffered_data_source_host_->SetTickClockForTest(tick_clock);
}
void WebMediaPlayerImpl::OnFirstFrame(base::TimeTicks frame_time) {
@@ -3535,4 +3559,16 @@ void WebMediaPlayerImpl::MaybeSetContainerName() {
#endif
}
+void WebMediaPlayerImpl::MaybeUpdateBufferSizesForPlayback() {
+ // Don't increase the MultiBufferDataSource buffer size until we've reached
+ // kReadyStateHaveEnoughData. Otherwise we will unnecessarily slow down
+ // playback startup -- it can instead be done for free after playback starts.
+ if (!mb_data_source_ || highest_ready_state_ < kReadyStateHaveEnoughData)
+ return;
+
+ mb_data_source_->MediaPlaybackRateChanged(playback_rate_);
+ if (!paused_)
+ mb_data_source_->MediaIsPlaying();
+}
+
} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_impl.h b/chromium/media/blink/webmediaplayer_impl.h
index cbef28577e3..71aceb128e8 100644
--- a/chromium/media/blink/webmediaplayer_impl.h
+++ b/chromium/media/blink/webmediaplayer_impl.h
@@ -89,8 +89,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
public WebMediaPlayerDelegate::Observer,
public Pipeline::Client,
public MediaObserverClient,
- public blink::WebSurfaceLayerBridgeObserver,
- public base::SupportsWeakPtr<WebMediaPlayerImpl> {
+ public blink::WebSurfaceLayerBridgeObserver {
public:
// Constructs a WebMediaPlayer implementation using Chromium's media stack.
// |delegate| and |renderer_factory_selector| must not be null.
@@ -235,7 +234,6 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
void OnSeekBackward(double seconds) override;
void OnVolumeMultiplierUpdate(double multiplier) override;
void OnBecamePersistentVideo(bool value) override;
- void OnPictureInPictureModeEnded() override;
// Callback for when bytes are received by |chunk_demuxer_| or the UrlData
// being loaded.
@@ -271,6 +269,8 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
int GetDelegateId() override;
base::Optional<viz::SurfaceId> GetSurfaceId() override;
+ base::WeakPtr<blink::WebMediaPlayer> AsWeakPtr() override;
+
bool IsBackgroundMediaSuspendEnabled() const {
return is_background_suspend_enabled_;
}
@@ -322,7 +322,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
// Pipeline::Client overrides.
void OnError(PipelineStatus status) override;
void OnEnded() override;
- void OnMetadata(PipelineMetadata metadata) override;
+ void OnMetadata(const PipelineMetadata& metadata) override;
void OnBufferingStateChange(BufferingState state) override;
void OnDurationChange() override;
void OnAddTextTrack(const TextTrackConfig& config,
@@ -580,6 +580,11 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
void SendBytesReceivedUpdate();
+ // Notifies |mb_data_source_| of playback and rate changes which may increase
+ // the amount of data the DataSource buffers. Does nothing prior to reaching
+ // kReadyStateHaveEnoughData for the first time.
+ void MaybeUpdateBufferSizesForPlayback();
+
blink::WebLocalFrame* const frame_;
// The playback state last reported to |delegate_|, to avoid setting duplicate
@@ -609,7 +614,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
std::unique_ptr<MediaLog> media_log_;
// |pipeline_controller_| owns an instance of Pipeline.
- PipelineController pipeline_controller_;
+ std::unique_ptr<PipelineController> pipeline_controller_;
// The LoadType passed in the |load_type| parameter of the load() call.
LoadType load_type_ = kLoadTypeURL;
@@ -712,7 +717,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
const base::TickClock* tick_clock_ = nullptr;
- BufferedDataSourceHostImpl buffered_data_source_host_;
+ std::unique_ptr<BufferedDataSourceHostImpl> buffered_data_source_host_;
UrlIndex* const url_index_;
scoped_refptr<viz::ContextProvider> context_provider_;
@@ -970,6 +975,11 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
// Whether background video optimization is supported on current platform.
bool is_background_video_track_optimization_supported_ = true;
+ base::CancelableOnceClosure have_enough_after_lazy_load_cb_;
+
+ base::WeakPtr<WebMediaPlayerImpl> weak_this_;
+ base::WeakPtrFactory<WebMediaPlayerImpl> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerImpl);
};
diff --git a/chromium/media/blink/webmediaplayer_impl_unittest.cc b/chromium/media/blink/webmediaplayer_impl_unittest.cc
index f40e4d6e26b..1519cb10cc9 100644
--- a/chromium/media/blink/webmediaplayer_impl_unittest.cc
+++ b/chromium/media/blink/webmediaplayer_impl_unittest.cc
@@ -456,7 +456,9 @@ class WebMediaPlayerImplTest : public testing::Test {
wmpi_->OnError(status);
}
- void OnMetadata(PipelineMetadata metadata) { wmpi_->OnMetadata(metadata); }
+ void OnMetadata(const PipelineMetadata& metadata) {
+ wmpi_->OnMetadata(metadata);
+ }
void OnWaiting(WaitingReason reason) { wmpi_->OnWaiting(reason); }
@@ -498,14 +500,14 @@ class WebMediaPlayerImplTest : public testing::Test {
return wmpi_->UpdatePlayState_ComputePlayState(false, false, false, true);
}
- bool IsSuspended() { return wmpi_->pipeline_controller_.IsSuspended(); }
+ bool IsSuspended() { return wmpi_->pipeline_controller_->IsSuspended(); }
int64_t GetDataSourceMemoryUsage() const {
return wmpi_->data_source_->GetMemoryUsage();
}
void AddBufferedRanges() {
- wmpi_->buffered_data_source_host_.AddBufferedByteRange(0, 1);
+ wmpi_->buffered_data_source_host_->AddBufferedByteRange(0, 1);
}
void SetDelegateState(WebMediaPlayerImpl::DelegateState state) {
@@ -575,6 +577,14 @@ class WebMediaPlayerImplTest : public testing::Test {
return GetVideoStatsReporter()->codec_profile_;
}
+ bool ShouldCancelUponDefer() const {
+ return wmpi_->mb_data_source_->cancel_on_defer_for_testing();
+ }
+
+ bool IsDataSourceMarkedAsPlaying() const {
+ return wmpi_->mb_data_source_->media_has_played();
+ }
+
enum class LoadType { kFullyBuffered, kStreaming };
void Load(std::string data_file,
LoadType load_type = LoadType::kFullyBuffered) {
@@ -632,14 +642,14 @@ class WebMediaPlayerImplTest : public testing::Test {
client->DidFinishLoading();
}
- void LoadAndWaitForMetadata(std::string data_file) {
+ // This runs until we reach the |ready_state_|. Attempting to wait for ready
+ // states < kReadyStateHaveCurrentData in non-startup-suspend test cases is
+ // unreliable due to asynchronous execution of tasks on the
+ // base::test:ScopedTaskEnvironment.
+ void LoadAndWaitForReadyState(std::string data_file,
+ blink::WebMediaPlayer::ReadyState ready_state) {
Load(data_file);
-
- // This runs until we reach the have current data state. Attempting to wait
- // for states < kReadyStateHaveCurrentData is unreliable due to asynchronous
- // execution of tasks on the base::test:ScopedTaskEnvironment.
- while (wmpi_->GetReadyState() <
- blink::WebMediaPlayer::kReadyStateHaveCurrentData) {
+ while (wmpi_->GetReadyState() < ready_state) {
base::RunLoop loop;
EXPECT_CALL(client_, ReadyStateChanged())
.WillRepeatedly(RunClosure(loop.QuitClosure()));
@@ -652,7 +662,14 @@ class WebMediaPlayerImplTest : public testing::Test {
// Verify we made it through pipeline startup.
EXPECT_TRUE(wmpi_->data_source_);
EXPECT_TRUE(wmpi_->demuxer_);
- EXPECT_FALSE(wmpi_->seeking_);
+
+ if (ready_state > blink::WebMediaPlayer::kReadyStateHaveCurrentData)
+ EXPECT_FALSE(wmpi_->seeking_);
+ }
+
+ void LoadAndWaitForCurrentData(std::string data_file) {
+ LoadAndWaitForReadyState(data_file,
+ blink::WebMediaPlayer::kReadyStateHaveCurrentData);
}
void CycleThreads() {
@@ -725,12 +742,12 @@ TEST_F(WebMediaPlayerImplTest, ConstructAndDestroy) {
EXPECT_FALSE(IsSuspended());
}
-// Verify LoadAndWaitForMetadata() functions without issue.
+// Verify LoadAndWaitForCurrentData() functions without issue.
TEST_F(WebMediaPlayerImplTest, LoadAndDestroy) {
InitializeWebMediaPlayerImpl();
EXPECT_FALSE(IsSuspended());
wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadAuto);
- LoadAndWaitForMetadata(kAudioOnlyTestFile);
+ LoadAndWaitForCurrentData(kAudioOnlyTestFile);
EXPECT_FALSE(IsSuspended());
CycleThreads();
@@ -741,7 +758,7 @@ TEST_F(WebMediaPlayerImplTest, LoadAndDestroy) {
EXPECT_GT(reported_memory_ - data_source_size, 0);
}
-// Verify LoadAndWaitForMetadata() functions without issue.
+// Verify LoadAndWaitForCurrentData() functions without issue.
TEST_F(WebMediaPlayerImplTest, LoadAndDestroyDataUrl) {
InitializeWebMediaPlayerImpl();
EXPECT_FALSE(IsSuspended());
@@ -829,11 +846,13 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspend) {
InitializeWebMediaPlayerImpl();
EXPECT_CALL(client_, CouldPlayIfEnoughData()).WillRepeatedly(Return(false));
wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadMetaData);
- LoadAndWaitForMetadata(kAudioOnlyTestFile);
+ LoadAndWaitForReadyState(kAudioOnlyTestFile,
+ blink::WebMediaPlayer::kReadyStateHaveMetadata);
testing::Mock::VerifyAndClearExpectations(&client_);
EXPECT_CALL(client_, ReadyStateChanged()).Times(AnyNumber());
CycleThreads();
EXPECT_TRUE(IsSuspended());
+ EXPECT_TRUE(ShouldCancelUponDefer());
// The data source contains the entire file, so subtract it from the memory
// usage to ensure there's no other memory usage.
@@ -842,6 +861,32 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspend) {
EXPECT_EQ(reported_memory_ - data_source_size, 0);
}
+// Verify that Play() before kReadyStateHaveEnough doesn't increase buffer size.
+TEST_F(WebMediaPlayerImplTest, NoBufferSizeIncreaseUntilHaveEnough) {
+ InitializeWebMediaPlayerImpl();
+ EXPECT_CALL(client_, CouldPlayIfEnoughData()).WillRepeatedly(Return(true));
+ wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadAuto);
+ LoadAndWaitForReadyState(kAudioOnlyTestFile,
+ blink::WebMediaPlayer::kReadyStateHaveMetadata);
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ EXPECT_CALL(client_, ReadyStateChanged()).Times(AnyNumber());
+ wmpi_->Play();
+ EXPECT_FALSE(IsDataSourceMarkedAsPlaying());
+
+ while (wmpi_->GetReadyState() <
+ blink::WebMediaPlayer::kReadyStateHaveEnoughData) {
+ // Clear the mock so it doesn't have a stale QuitClosure.
+ testing::Mock::VerifyAndClearExpectations(&client_);
+
+ base::RunLoop loop;
+ EXPECT_CALL(client_, ReadyStateChanged())
+ .WillRepeatedly(RunClosure(loop.QuitClosure()));
+ loop.Run();
+ }
+
+ EXPECT_TRUE(IsDataSourceMarkedAsPlaying());
+}
+
// Verify that preload=metadata suspend works properly for streaming sources.
TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspendNoStreaming) {
InitializeWebMediaPlayerImpl();
@@ -853,11 +898,9 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspendNoStreaming) {
constexpr char kLargeAudioOnlyTestFile[] = "bear_192kHz.wav";
Load(kLargeAudioOnlyTestFile, LoadType::kStreaming);
- // This runs until we reach the have current data state. Attempting to wait
- // for states < kReadyStateHaveCurrentData is unreliable due to asynchronous
- // execution of tasks on the base::test:ScopedTaskEnvironment.
+ // This runs until we reach the metadata state.
while (wmpi_->GetReadyState() <
- blink::WebMediaPlayer::kReadyStateHaveCurrentData) {
+ blink::WebMediaPlayer::kReadyStateHaveMetadata) {
base::RunLoop loop;
EXPECT_CALL(client_, ReadyStateChanged())
.WillRepeatedly(RunClosure(loop.QuitClosure()));
@@ -894,12 +937,14 @@ TEST_F(WebMediaPlayerImplTest, LazyLoadPreloadMetadataSuspend) {
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
}
- LoadAndWaitForMetadata(kVideoOnlyTestFile);
+ LoadAndWaitForReadyState(kVideoOnlyTestFile,
+ blink::WebMediaPlayer::kReadyStateHaveMetadata);
testing::Mock::VerifyAndClearExpectations(&client_);
EXPECT_CALL(client_, ReadyStateChanged()).Times(AnyNumber());
CycleThreads();
EXPECT_TRUE(IsSuspended());
EXPECT_TRUE(wmpi_->DidLazyLoad());
+ EXPECT_FALSE(ShouldCancelUponDefer());
// The data source contains the entire file, so subtract it from the memory
// usage to ensure there's no other memory usage.
@@ -928,7 +973,8 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspendNoVideoMemoryUsage) {
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
}
- LoadAndWaitForMetadata(kVideoOnlyTestFile);
+ LoadAndWaitForReadyState(kVideoOnlyTestFile,
+ blink::WebMediaPlayer::kReadyStateHaveMetadata);
testing::Mock::VerifyAndClearExpectations(&client_);
EXPECT_CALL(client_, ReadyStateChanged()).Times(AnyNumber());
CycleThreads();
@@ -949,7 +995,7 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspendCouldPlay) {
InitializeWebMediaPlayerImpl();
EXPECT_CALL(client_, CouldPlayIfEnoughData()).WillRepeatedly(Return(true));
wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadMetaData);
- LoadAndWaitForMetadata(kAudioOnlyTestFile);
+ LoadAndWaitForCurrentData(kAudioOnlyTestFile);
testing::Mock::VerifyAndClearExpectations(&client_);
EXPECT_CALL(client_, ReadyStateChanged()).Times(AnyNumber());
base::RunLoop().RunUntilIdle();
@@ -1371,6 +1417,7 @@ TEST_F(WebMediaPlayerImplTest, Encrypted) {
TEST_F(WebMediaPlayerImplTest, Waiting_NoDecryptionKey) {
InitializeWebMediaPlayerImpl();
+ wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadAuto);
scoped_refptr<cc::Layer> layer = cc::Layer::Create();
EXPECT_CALL(*surface_layer_bridge_ptr_, GetCcLayer())
@@ -1392,7 +1439,7 @@ TEST_F(WebMediaPlayerImplTest, Waiting_NoDecryptionKey) {
// Use non-encrypted file here since we don't have a CDM. Otherwise pipeline
// initialization will stall waiting for a CDM to be set.
- LoadAndWaitForMetadata(kVideoOnlyTestFile);
+ LoadAndWaitForCurrentData(kVideoOnlyTestFile);
EXPECT_CALL(encrypted_client_, DidBlockPlaybackWaitingForKey());
EXPECT_CALL(encrypted_client_, DidResumePlaybackBlockedForKey());
@@ -1746,18 +1793,17 @@ TEST_F(WebMediaPlayerImplTest, PictureInPictureStateChange) {
class WebMediaPlayerImplBackgroundBehaviorTest
: public WebMediaPlayerImplTest,
public ::testing::WithParamInterface<
- std::tuple<bool, bool, int, int, bool, bool, bool, bool, bool>> {
+ std::tuple<bool, int, int, bool, bool, bool, bool, bool>> {
public:
// Indices of the tuple parameters.
static const int kIsMediaSuspendEnabled = 0;
- static const int kIsBackgroundOptimizationEnabled = 1;
- static const int kDurationSec = 2;
- static const int kAverageKeyframeDistanceSec = 3;
- static const int kIsResumeBackgroundVideoEnabled = 4;
- static const int kIsMediaSource = 5;
- static const int kIsBackgroundPauseEnabled = 6;
- static const int kIsPictureInPictureEnabled = 7;
- static const int kIsBackgroundVideoPlaybackEnabled = 8;
+ static const int kDurationSec = 1;
+ static const int kAverageKeyframeDistanceSec = 2;
+ static const int kIsResumeBackgroundVideoEnabled = 3;
+ static const int kIsMediaSource = 4;
+ static const int kIsBackgroundPauseEnabled = 5;
+ static const int kIsPictureInPictureEnabled = 6;
+ static const int kIsBackgroundVideoPlaybackEnabled = 7;
void SetUp() override {
WebMediaPlayerImplTest::SetUp();
@@ -1766,11 +1812,6 @@ class WebMediaPlayerImplBackgroundBehaviorTest
std::string enabled_features;
std::string disabled_features;
- if (IsBackgroundOptimizationOn()) {
- enabled_features += kBackgroundSrcVideoTrackOptimization.name;
- } else {
- disabled_features += kBackgroundSrcVideoTrackOptimization.name;
- }
if (IsBackgroundPauseOn()) {
if (!enabled_features.empty())
@@ -1823,10 +1864,6 @@ class WebMediaPlayerImplBackgroundBehaviorTest
return std::get<kIsMediaSuspendEnabled>(GetParam());
}
- bool IsBackgroundOptimizationOn() {
- return std::get<kIsBackgroundOptimizationEnabled>(GetParam());
- }
-
bool IsResumeBackgroundVideoEnabled() {
return std::get<kIsResumeBackgroundVideoEnabled>(GetParam());
}
@@ -1915,8 +1952,7 @@ TEST_P(WebMediaPlayerImplBackgroundBehaviorTest, AudioVideo) {
(GetAverageKeyframeDistanceSec() < GetMaxKeyframeDistanceSec()));
EXPECT_EQ(matches_requirements, IsBackgroundOptimizationCandidate());
- EXPECT_EQ(IsBackgroundOptimizationOn() && matches_requirements,
- ShouldDisableVideoWhenHidden());
+ EXPECT_EQ(matches_requirements, ShouldDisableVideoWhenHidden());
// Only pause audible videos if both media suspend and resume background
// videos is on and background video playback is disabled. Background video
@@ -1926,8 +1962,8 @@ TEST_P(WebMediaPlayerImplBackgroundBehaviorTest, AudioVideo) {
(IsMediaSuspendOn() && IsResumeBackgroundVideoEnabled()),
ShouldPausePlaybackWhenHidden());
- if (!IsBackgroundOptimizationOn() || !matches_requirements ||
- !ShouldDisableVideoWhenHidden() || IsMediaSuspendOn()) {
+ if (!matches_requirements || !ShouldDisableVideoWhenHidden() ||
+ IsMediaSuspendOn()) {
return;
}
@@ -1952,7 +1988,6 @@ INSTANTIATE_TEST_SUITE_P(
WebMediaPlayerImplBackgroundBehaviorTest,
::testing::Combine(
::testing::Bool(),
- ::testing::Bool(),
::testing::Values(
WebMediaPlayerImpl::kMaxKeyframeDistanceToDisableBackgroundVideoMs /
base::Time::kMillisecondsPerSecond +
diff --git a/chromium/media/blink/webmediaplayer_params.h b/chromium/media/blink/webmediaplayer_params.h
index f88979c209f..dda284c8927 100644
--- a/chromium/media/blink/webmediaplayer_params.h
+++ b/chromium/media/blink/webmediaplayer_params.h
@@ -20,7 +20,6 @@
#include "media/base/media_switches.h"
#include "media/base/routing_token_callback.h"
#include "media/blink/media_blink_export.h"
-#include "media/filters/context_3d.h"
#include "media/mojo/interfaces/media_metrics_provider.mojom.h"
#include "third_party/blink/public/platform/web_media_player.h"
#include "third_party/blink/public/platform/web_video_frame_submitter.h"
@@ -52,8 +51,6 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerParams {
// Returns true if load will deferred. False if it will run immediately.
using DeferLoadCB = base::RepeatingCallback<bool(base::OnceClosure)>;
- using Context3DCB = base::Callback<Context3D()>;
-
// Callback to obtain the media ContextProvider.
// Requires being called on the media thread.
// The argument callback is also called on the media thread as a reply.
diff --git a/chromium/media/capabilities/learning_helper.cc b/chromium/media/capabilities/learning_helper.cc
index bab24d24312..eee53f0532d 100644
--- a/chromium/media/capabilities/learning_helper.cc
+++ b/chromium/media/capabilities/learning_helper.cc
@@ -23,20 +23,23 @@ using learning::TargetValue;
// Remember that these are used to construct UMA histogram names! Be sure to
// update histograms.xml if you change them!
-// Dropped frame ratio, default features, regression tree.
-const char* const kDroppedFrameRatioBaseTreeTaskName = "BaseTree";
-// Dropped frame ratio, default+FeatureLibrary features, regression tree.
-const char* const kDroppedFrameRatioEnhancedTreeTaskName = "EnhancedTree";
-// Dropped frame ratio, default+FeatureLibrary features, regression tree,
-// examples are unweighted.
-const char* const kDroppedFrameRatioEnhancedUnweightedTreeTaskName =
- "EnhancedUnweightedTree";
-// Binary smoothness, default+FeatureLibrary features, regression tree,
-// examples are unweighted.
-const char* const kBinarySmoothnessEnhancedUnweightedTreeTaskName =
- "BinarySmoothnessTree";
-// Dropped frame ratio, default features, lookup table.
-const char* const kDroppedFrameRatioBaseTableTaskName = "BaseTable";
+
+// Dropped frame ratio, default features, unweighted regression tree.
+const char* const kDroppedFrameRatioBaseUnweightedTreeTaskName =
+ "BaseUnweightedTree";
+
+// Dropped frame ratio, default features, unweighted examples, lookup table.
+const char* const kDroppedFrameRatioBaseUnweightedTableTaskName =
+ "BaseUnweightedTable";
+
+// Same as BaseUnweightedTree, but with 200 training examples max.
+const char* const kDroppedFrameRatioBaseUnweightedTree200TaskName =
+ "BaseUnweightedTree200";
+
+// Dropped frame ratio, default+FeatureLibrary features, regression tree with
+// unweighted examples and 200 training examples max.
+const char* const kDroppedFrameRatioEnhancedUnweightedTree200TaskName =
+ "EnhancedUnweightedTree200";
// Threshold for the dropped frame to total frame ratio, at which we'll decide
// that the playback was not smooth.
@@ -57,7 +60,7 @@ LearningHelper::LearningHelper(FeatureProviderFactoryCB feature_factory) {
// We only do this here since we own the session. Normally, whatever creates
// the session would register all the learning tasks.
LearningTask dropped_frame_task(
- kDroppedFrameRatioBaseTableTaskName, LearningTask::Model::kLookupTable,
+ "no name", LearningTask::Model::kLookupTable,
{
{"codec_profile",
::media::learning::LearningTask::Ordering::kUnordered},
@@ -73,33 +76,40 @@ LearningHelper::LearningHelper(FeatureProviderFactoryCB feature_factory) {
dropped_frame_task.uma_hacky_aggregate_confusion_matrix = true;
dropped_frame_task.uma_hacky_by_training_weight_confusion_matrix = true;
- // Pick a max reporting weight that represents the total number of frames.
- // This will record in bucket [0, 4999], [5000, 9999], etc. Unlike the
- // existing mcap thresholds, these are not per-bucket. That's why they're 10x
- // higher than the per-bucket thresholds we're using there. Mcap allows on
- // the order of 2,500 frames in each of {resolution X fps X codec} buckets,
- // while the reported training weight here would be total for the whole set.
- // So, we multiply by about 20 to approximate the number of buckets to keep
- // it about the same as the size of the cross product.
- const double weighted_reporting_max = 49999.;
- dropped_frame_task.max_reporting_weight = weighted_reporting_max;
+ // Buckets will have 10 examples each, or 20 for the 200-set tasks.
+ const double data_set_size = 100;
+ const double big_data_set_size = 200;
+ // Unweighted table
+ dropped_frame_task.name = kDroppedFrameRatioBaseUnweightedTableTaskName;
+ dropped_frame_task.max_data_set_size = data_set_size;
learning_session_->RegisterTask(dropped_frame_task,
SequenceBoundFeatureProvider());
- base_table_controller_ =
+ base_unweighted_table_controller_ =
learning_session_->GetController(dropped_frame_task.name);
- // Modify the task to use ExtraTrees.
- dropped_frame_task.name = kDroppedFrameRatioBaseTreeTaskName;
+ // Unweighted base tree.
+ dropped_frame_task.name = kDroppedFrameRatioBaseUnweightedTreeTaskName;
dropped_frame_task.model = LearningTask::Model::kExtraTrees;
+ dropped_frame_task.max_data_set_size = data_set_size;
+ learning_session_->RegisterTask(dropped_frame_task,
+ SequenceBoundFeatureProvider());
+ base_unweighted_tree_controller_ =
+ learning_session_->GetController(dropped_frame_task.name);
+
+ // Unweighted tree with a larger training set.
+ dropped_frame_task.name = kDroppedFrameRatioBaseUnweightedTree200TaskName;
+ dropped_frame_task.max_data_set_size = big_data_set_size;
learning_session_->RegisterTask(dropped_frame_task,
SequenceBoundFeatureProvider());
- base_tree_controller_ =
+ base_unweighted_tree_200_controller_ =
learning_session_->GetController(dropped_frame_task.name);
// Add common features, if we have a factory.
if (feature_factory) {
- dropped_frame_task.name = kDroppedFrameRatioEnhancedTreeTaskName;
+ dropped_frame_task.name =
+ kDroppedFrameRatioEnhancedUnweightedTree200TaskName;
+ dropped_frame_task.max_data_set_size = big_data_set_size;
dropped_frame_task.feature_descriptions.push_back(
{"origin", ::media::learning::LearningTask::Ordering::kUnordered});
dropped_frame_task.feature_descriptions.push_back(
@@ -108,38 +118,7 @@ LearningHelper::LearningHelper(FeatureProviderFactoryCB feature_factory) {
FeatureLibrary::BatteryPower());
learning_session_->RegisterTask(dropped_frame_task,
feature_factory.Run(dropped_frame_task));
- enhanced_tree_controller_ =
- learning_session_->GetController(dropped_frame_task.name);
-
- // Duplicate the task with a new name and UMA histogram. We'll add
- // unweighted examples to it to see which one does better.
- dropped_frame_task.name = kDroppedFrameRatioEnhancedUnweightedTreeTaskName;
- // Adjust the reporting weight since we'll have 100 or fewer examples.
- dropped_frame_task.max_reporting_weight = 99.;
- learning_session_->RegisterTask(dropped_frame_task,
- feature_factory.Run(dropped_frame_task));
- unweighted_tree_controller_ =
- learning_session_->GetController(dropped_frame_task.name);
-
- // Set up the binary smoothness task. This has a nominal target, with
- // "smooth" as 0, and "not smooth" as 1. This is so that the low numbers
- // are still smooth, and the hight numbers are still not smooth. It makes
- // reporting the same for both.
- dropped_frame_task.name = kBinarySmoothnessEnhancedUnweightedTreeTaskName;
- /* TODO(liberato): DistributionReporter only supports regression, so we
- leave it as kNumeric. Since we only add 0,1 as targets, it's probably
- fairly close to the same thing.
- dropped_frame_task.target_description = {
- "is_smooth", ::media::learning::LearningTask::Ordering::kUnordered};
- */
- // We'll threshold the ratio when figuring out the binary label, so we just
- // want to pick the majority. Note that I have no idea if this is actually
- // the best threshold, but it seems like a good place to start.
- dropped_frame_task.smoothness_threshold = 0.5;
- dropped_frame_task.max_reporting_weight = weighted_reporting_max;
- learning_session_->RegisterTask(dropped_frame_task,
- feature_factory.Run(dropped_frame_task));
- binary_tree_controller_ =
+ enhanced_unweighted_tree_200_controller_ =
learning_session_->GetController(dropped_frame_task.name);
}
}
@@ -178,23 +157,16 @@ void LearningHelper::AppendStats(
// the examples is the right thing to do.
example.target_value = TargetValue(
static_cast<double>(new_stats.frames_dropped) / new_stats.frames_decoded);
- example.weight = new_stats.frames_decoded;
+ example.weight = 1u;
// Add this example to all tasks.
- AddExample(base_table_controller_.get(), example);
- AddExample(base_tree_controller_.get(), example);
- if (enhanced_tree_controller_) {
- example.features.push_back(origin);
- AddExample(enhanced_tree_controller_.get(), example);
+ AddExample(base_unweighted_table_controller_.get(), example);
+ AddExample(base_unweighted_tree_controller_.get(), example);
+ AddExample(base_unweighted_tree_200_controller_.get(), example);
- // Also add to the unweighted model.
- example.weight = 1u;
- AddExample(unweighted_tree_controller_.get(), example);
-
- // Threshold the target to 0 for "smooth", and 1 for "not smooth".
- example.target_value =
- TargetValue(example.target_value.value() > kSmoothnessThreshold);
- AddExample(binary_tree_controller_.get(), example);
+ if (enhanced_unweighted_tree_200_controller_) {
+ example.features.push_back(origin);
+ AddExample(enhanced_unweighted_tree_200_controller_.get(), example);
}
}
diff --git a/chromium/media/capabilities/learning_helper.h b/chromium/media/capabilities/learning_helper.h
index 32bf9cb0c52..7c1a0ef2294 100644
--- a/chromium/media/capabilities/learning_helper.h
+++ b/chromium/media/capabilities/learning_helper.h
@@ -45,11 +45,14 @@ class MEDIA_EXPORT LearningHelper {
std::unique_ptr<learning::LearningSessionImpl> learning_session_;
// Controllers for each task.
- std::unique_ptr<learning::LearningTaskController> base_table_controller_;
- std::unique_ptr<learning::LearningTaskController> base_tree_controller_;
- std::unique_ptr<learning::LearningTaskController> enhanced_tree_controller_;
- std::unique_ptr<learning::LearningTaskController> unweighted_tree_controller_;
- std::unique_ptr<learning::LearningTaskController> binary_tree_controller_;
+ std::unique_ptr<learning::LearningTaskController>
+ base_unweighted_table_controller_;
+ std::unique_ptr<learning::LearningTaskController>
+ base_unweighted_tree_controller_;
+ std::unique_ptr<learning::LearningTaskController>
+ base_unweighted_tree_200_controller_;
+ std::unique_ptr<learning::LearningTaskController>
+ enhanced_unweighted_tree_200_controller_;
};
} // namespace media
diff --git a/chromium/media/capabilities/video_decode_stats.proto b/chromium/media/capabilities/video_decode_stats.proto
index a809795037c..4298adc0263 100644
--- a/chromium/media/capabilities/video_decode_stats.proto
+++ b/chromium/media/capabilities/video_decode_stats.proto
@@ -26,4 +26,16 @@ message DecodeStatsProto {
// where a few bad outlier playbacks permanently define a machines
// capabilities.
optional double last_write_date = 7;
-} \ No newline at end of file
+
+ // Required (for those in the "unweighted" experiment). An unweighted average
+ // of dropped frames percentage from the last |num_unweighted_playbacks|.
+ optional double unweighted_average_frames_dropped = 8 [default = 0];
+
+ // Required (for those in the "unweighted" experiment). An unweighted average
+ // of efficient frames percentage from the last |num_unweighted_playbacks|.
+ optional double unweighted_average_frames_efficient = 9 [default = 0];
+
+ // Required (for those in the "unweighted" experiment). Count of playbacks
+ // contributing to |unweighted_average_frames_*| fields.
+ optional uint64 num_unweighted_playbacks = 10 [default = 0];
+}
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.cc b/chromium/media/capabilities/video_decode_stats_db_impl.cc
index 8dd9b0b693e..642b4a3addf 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.cc
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.cc
@@ -34,6 +34,8 @@ const int kMaxFramesPerBufferDefault = 2500;
const int kMaxDaysToKeepStatsDefault = 30;
+const bool kEnableUnweightedEntriesDefault = false;
+
} // namespace
const char VideoDecodeStatsDBImpl::kMaxFramesPerBufferParamName[] =
@@ -42,6 +44,9 @@ const char VideoDecodeStatsDBImpl::kMaxFramesPerBufferParamName[] =
const char VideoDecodeStatsDBImpl::kMaxDaysToKeepStatsParamName[] =
"db_days_to_keep_stats";
+const char VideoDecodeStatsDBImpl::kEnableUnweightedEntriesParamName[] =
+ "db_enable_unweighted_entries";
+
// static
int VideoDecodeStatsDBImpl::GetMaxFramesPerBuffer() {
return base::GetFieldTrialParamByFeatureAsDouble(
@@ -57,6 +62,13 @@ int VideoDecodeStatsDBImpl::GetMaxDaysToKeepStats() {
}
// static
+bool VideoDecodeStatsDBImpl::GetEnableUnweightedEntries() {
+ return base::GetFieldTrialParamByFeatureAsBool(
+ kMediaCapabilitiesWithParameters, kEnableUnweightedEntriesParamName,
+ kEnableUnweightedEntriesDefault);
+}
+
+// static
std::unique_ptr<VideoDecodeStatsDBImpl> VideoDecodeStatsDBImpl::Create(
base::FilePath db_dir) {
DVLOG(2) << __func__ << " db_dir:" << db_dir;
@@ -203,6 +215,16 @@ void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
const uint64_t kMaxFramesPerBuffer = GetMaxFramesPerBuffer();
DCHECK_GT(kMaxFramesPerBuffer, 0UL);
+ double new_entry_dropped_ratio = 0;
+ double new_entry_efficient_ratio = 0;
+ if (new_entry.frames_decoded) {
+ new_entry_dropped_ratio = static_cast<double>(new_entry.frames_dropped) /
+ new_entry.frames_decoded;
+ new_entry_efficient_ratio =
+ static_cast<double>(new_entry.frames_power_efficient) /
+ new_entry.frames_decoded;
+ }
+
if (old_frames_decoded + new_entry.frames_decoded > kMaxFramesPerBuffer) {
// The |new_entry| is pushing out some or all of the old data. Achieve this
// by weighting the dropped and power efficiency stats by the ratio of the
@@ -215,12 +237,6 @@ void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
static_cast<double>(old_frames_dropped) / old_frames_decoded;
double old_efficient_ratio =
static_cast<double>(old_frames_power_efficient) / old_frames_decoded;
- double new_entry_dropped_ratio =
- static_cast<double>(new_entry.frames_dropped) /
- new_entry.frames_decoded;
- double new_entry_efficient_ratio =
- static_cast<double>(new_entry.frames_power_efficient) /
- new_entry.frames_decoded;
double agg_dropped_ratio = fill_ratio * new_entry_dropped_ratio +
(1 - fill_ratio) * old_dropped_ratio;
@@ -243,6 +259,35 @@ void VideoDecodeStatsDBImpl::WriteUpdatedEntry(
old_frames_power_efficient);
}
+ if (GetEnableUnweightedEntries()) {
+ uint64_t old_num_unweighted_playbacks =
+ stats_proto->num_unweighted_playbacks();
+ double old_unweighted_drop_avg =
+ stats_proto->unweighted_average_frames_dropped();
+ double old_unweighted_efficient_avg =
+ stats_proto->unweighted_average_frames_efficient();
+
+ uint64_t new_num_unweighted_playbacks = old_num_unweighted_playbacks + 1;
+ double new_unweighted_drop_avg =
+ ((old_unweighted_drop_avg * old_num_unweighted_playbacks) +
+ new_entry_dropped_ratio) /
+ new_num_unweighted_playbacks;
+ double new_unweighted_efficient_avg =
+ ((old_unweighted_efficient_avg * old_num_unweighted_playbacks) +
+ new_entry_efficient_ratio) /
+ new_num_unweighted_playbacks;
+
+ stats_proto->set_num_unweighted_playbacks(new_num_unweighted_playbacks);
+ stats_proto->set_unweighted_average_frames_dropped(new_unweighted_drop_avg);
+ stats_proto->set_unweighted_average_frames_efficient(
+ new_unweighted_efficient_avg);
+
+ DVLOG(2) << __func__ << " Updating unweighted averages. dropped:"
+ << new_unweighted_drop_avg
+ << " efficient:" << new_unweighted_efficient_avg
+ << " num_playbacks:" << new_num_unweighted_playbacks;
+ }
+
// Update the time stamp for the current write.
stats_proto->set_last_write_date(wall_clock_->Now().ToJsTime());
@@ -278,9 +323,47 @@ void VideoDecodeStatsDBImpl::OnGotDecodeStats(
if (stats_proto && !AreStatsExpired(stats_proto.get())) {
DCHECK(success);
- entry = std::make_unique<DecodeStatsEntry>(
- stats_proto->frames_decoded(), stats_proto->frames_dropped(),
- stats_proto->frames_power_efficient());
+ if (GetEnableUnweightedEntries()) {
+ DCHECK_GE(stats_proto->unweighted_average_frames_dropped(), 0);
+ DCHECK_LE(stats_proto->unweighted_average_frames_dropped(), 1);
+ DCHECK_GE(stats_proto->unweighted_average_frames_efficient(), 0);
+ DCHECK_LE(stats_proto->unweighted_average_frames_efficient(), 1);
+
+ DVLOG(2) << __func__ << " Using unweighted averages. dropped:"
+ << stats_proto->unweighted_average_frames_dropped()
+ << " efficient:"
+ << stats_proto->unweighted_average_frames_efficient()
+ << " num_playbacks:" << stats_proto->num_unweighted_playbacks();
+
+ // The meaning of DecodStatsEntry is a little different for folks in the
+ // unweighted experiment group
+ // - The *ratios* of dropped / decoded and efficient / decoded are valid,
+ // which means no change to any math in the upper layer. The ratio is
+ // internally computed as an unweighted average of the dropped frames
+ // ratio over all the playbacks in this bucket.
+ // - The denominator "decoded" is actually the number of entries
+ // accumulated by this key scaled by 100,000. Scaling by 100,000
+ // preserves the precision of the dropped / decoded ratio to the 5th
+ // decimal place (i.e. 0.01234, or 1.234%)
+ // - The numerator "dropped" or "efficient" doesn't represent anything and
+ // is simply chosen to create the correct ratio.
+ //
+ // This is obviously not the most efficient or readable way to do this,
+ // but allows us to continue using the same proto and UKM reporting
+ // while we experiment with the unweighted approach. If this approach
+ // proves successful we will refactor the API and proto.
+ uint64_t frames_decoded_lie =
+ 100000 * stats_proto->num_unweighted_playbacks();
+ entry = std::make_unique<DecodeStatsEntry>(
+ frames_decoded_lie,
+ frames_decoded_lie * stats_proto->unweighted_average_frames_dropped(),
+ frames_decoded_lie *
+ stats_proto->unweighted_average_frames_efficient());
+ } else {
+ entry = std::make_unique<DecodeStatsEntry>(
+ stats_proto->frames_decoded(), stats_proto->frames_dropped(),
+ stats_proto->frames_power_efficient());
+ }
}
DVLOG(3) << __func__ << " read " << (success ? "succeeded" : "FAILED!")
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.h b/chromium/media/capabilities/video_decode_stats_db_impl.h
index 1dd9740db57..e2b61444928 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.h
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.h
@@ -31,6 +31,7 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
public:
static const char kMaxFramesPerBufferParamName[];
static const char kMaxDaysToKeepStatsParamName[];
+ static const char kEnableUnweightedEntriesParamName[];
// Create an instance! |db_dir| specifies where to store LevelDB files to
// disk. LevelDB generates a handful of files, so its recommended to provide a
@@ -72,6 +73,10 @@ class MEDIA_EXPORT VideoDecodeStatsDBImpl : public VideoDecodeStatsDB {
// been due to one-off circumstances.
static int GetMaxDaysToKeepStats();
+ // When true, each playback entry in the DB should be given equal weight
+ // regardless of how many frames were decoded.
+ static bool GetEnableUnweightedEntries();
+
// Called when the database has been initialized. Will immediately call
// |init_cb| to forward |success|.
void OnInit(InitializeCB init_cb, bool success);
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc b/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
index a8ebbdcc3e3..06e22a4be39 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
+++ b/chromium/media/capabilities/video_decode_stats_db_impl_unittest.cc
@@ -74,6 +74,10 @@ class VideoDecodeStatsDBImplTest : public ::testing::Test {
return VideoDecodeStatsDBImpl::GetMaxDaysToKeepStats();
}
+ bool GetEnableUnweightedEntries() {
+ return VideoDecodeStatsDBImpl::GetEnableUnweightedEntries();
+ }
+
void SetDBClock(base::Clock* clock) {
stats_db_->set_wall_clock_for_test(clock);
}
@@ -551,4 +555,99 @@ TEST_F(VideoDecodeStatsDBImplTest, AppendAndExpire) {
VerifyEmptyStats(kStatsKeyVp9);
}
+TEST_F(VideoDecodeStatsDBImplTest, EnableUnweightedEntries) {
+ base::test::ScopedFeatureList scoped_feature_list;
+ std::unique_ptr<base::FieldTrialList> field_trial_list;
+
+ // Default is false.
+ EXPECT_FALSE(GetEnableUnweightedEntries());
+
+ // Override field trial.
+ std::map<std::string, std::string> params;
+ params[VideoDecodeStatsDBImpl::kEnableUnweightedEntriesParamName] = "true";
+
+ const std::string kTrialName = "TrialName";
+ const std::string kGroupName = "GroupName";
+
+ field_trial_list.reset();
+ field_trial_list.reset(new base::FieldTrialList(nullptr));
+ base::FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
+
+ base::AssociateFieldTrialParams(kTrialName, kGroupName, params);
+ base::FieldTrial* field_trial =
+ base::FieldTrialList::CreateFieldTrial(kTrialName, kGroupName);
+
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ feature_list->RegisterFieldTrialOverride(
+ media::kMediaCapabilitiesWithParameters.name,
+ base::FeatureList::OVERRIDE_ENABLE_FEATURE, field_trial);
+ base::FeatureList::ClearInstanceForTesting();
+ scoped_feature_list.InitWithFeatureList(std::move(feature_list));
+
+ std::map<std::string, std::string> actual_params;
+ EXPECT_TRUE(base::GetFieldTrialParamsByFeature(
+ media::kMediaCapabilitiesWithParameters, &actual_params));
+ EXPECT_EQ(params, actual_params);
+
+ // Confirm field trial overridden.
+ EXPECT_TRUE(GetMaxDaysToKeepStats());
+
+ InitializeDB();
+
+ // Append 200 frames with 10% dropped, 1% efficient.
+ AppendStats(kStatsKeyVp9, DecodeStatsEntry(200, 0.10 * 200, 0.01 * 200));
+ // Use real doubles to keep track of these things to make sure the precision
+ // math for repeating decimals works out with whats done internally.
+ int num_appends = 1;
+ double unweighted_smoothness_avg = 0.10;
+ double unweighted_efficiency_avg = 0.01;
+
+ // NOTE, the members of DecodeStatsEntry have a different meaning when using
+ // unweighted DB entries. The denominator is 100,000 * the number of appends
+ // and the numerator is whatever value achieves the correct unweighted ratio
+ // for those appends. See detailed comment in
+ // VideoDecodeStatsDBImpl::OnGotDecodeStats();
+ const int kNumAppendScale = 100000;
+ int expected_denominator = kNumAppendScale * num_appends;
+ VerifyReadStats(
+ kStatsKeyVp9,
+ DecodeStatsEntry(expected_denominator,
+ unweighted_smoothness_avg * expected_denominator,
+ unweighted_efficiency_avg * expected_denominator));
+
+ // Append 20K frames with 5% dropped and 10% efficient.
+ AppendStats(kStatsKeyVp9,
+ DecodeStatsEntry(20000, 0.05 * 20000, 0.10 * 20000));
+ num_appends++;
+ unweighted_smoothness_avg = (0.10 + 0.05) / num_appends;
+ unweighted_efficiency_avg = (0.01 + 0.10) / num_appends;
+
+ // While new record had 100x more frames than the previous append, the ratios
+ // should be an unweighted average of the two records (7.5% dropped and
+ // 5.5% efficient).
+ expected_denominator = kNumAppendScale * num_appends;
+ VerifyReadStats(
+ kStatsKeyVp9,
+ DecodeStatsEntry(expected_denominator,
+ unweighted_smoothness_avg * expected_denominator,
+ unweighted_efficiency_avg * expected_denominator));
+
+ // Append 1M frames with 3.4567% dropped and 3.4567% efficient.
+ AppendStats(kStatsKeyVp9, DecodeStatsEntry(1000000, 0.012345 * 1000000,
+ 0.034567 * 1000000));
+ num_appends++;
+ unweighted_smoothness_avg = (0.10 + 0.05 + 0.012345) / num_appends;
+ unweighted_efficiency_avg = (0.01 + 0.10 + 0.034567) / num_appends;
+
+ // Here, the ratios should still be averaged in the unweighted fashion, but
+ // truncated after the 3rd decimal place of the percentage (e.g. 1.234%
+ // or the 5th decimal place when represented as a fraction of 1 (0.01234)).
+ expected_denominator = kNumAppendScale * num_appends;
+ VerifyReadStats(
+ kStatsKeyVp9,
+ DecodeStatsEntry(expected_denominator,
+ unweighted_smoothness_avg * expected_denominator,
+ unweighted_efficiency_avg * expected_denominator));
+}
+
} // namespace media
diff --git a/chromium/media/capture/BUILD.gn b/chromium/media/capture/BUILD.gn
index f55f830b1e2..d4e634cace3 100644
--- a/chromium/media/capture/BUILD.gn
+++ b/chromium/media/capture/BUILD.gn
@@ -86,6 +86,7 @@ jumbo_source_set("capture_device_specific") {
"//media/capture/mojom:image_capture",
"//media/capture/mojom:image_capture_types",
"//media/mojo/interfaces:interfaces",
+ "//media/parsers",
"//third_party/libyuv",
"//ui/gfx",
]
@@ -97,8 +98,6 @@ jumbo_component("capture_lib") {
"video/create_video_capture_device_factory.cc",
"video/create_video_capture_device_factory.h",
"video/scoped_buffer_pool_reservation.h",
- "video/scoped_video_capture_jpeg_decoder.cc",
- "video/scoped_video_capture_jpeg_decoder.h",
"video/shared_memory_buffer_tracker.cc",
"video/shared_memory_buffer_tracker.h",
"video/shared_memory_handle_provider.cc",
@@ -112,9 +111,6 @@ jumbo_component("capture_lib") {
"video/video_capture_buffer_tracker_factory_impl.h",
"video/video_capture_device_client.cc",
"video/video_capture_device_client.h",
- "video/video_capture_jpeg_decoder.h",
- "video/video_capture_jpeg_decoder_impl.cc",
- "video/video_capture_jpeg_decoder_impl.h",
"video/video_capture_system.h",
"video/video_capture_system_impl.cc",
"video/video_capture_system_impl.h",
@@ -137,7 +133,6 @@ jumbo_component("capture_lib") {
"//media/capture/mojom:image_capture",
"//media/capture/mojom:image_capture_types",
"//media/capture/mojom:video_capture",
- "//media/mojo/clients:jpeg_decode_accelerator",
"//media/mojo/interfaces:interfaces",
"//services/service_manager/public/cpp",
"//third_party/libyuv",
@@ -273,6 +268,8 @@ jumbo_component("capture_lib") {
"video/chromeos/request_builder.h",
"video/chromeos/request_manager.cc",
"video/chromeos/request_manager.h",
+ "video/chromeos/scoped_video_capture_jpeg_decoder.cc",
+ "video/chromeos/scoped_video_capture_jpeg_decoder.h",
"video/chromeos/stream_buffer_manager.cc",
"video/chromeos/stream_buffer_manager.h",
"video/chromeos/vendor_tag_ops_delegate.cc",
@@ -281,11 +278,15 @@ jumbo_component("capture_lib") {
"video/chromeos/video_capture_device_chromeos_halv3.h",
"video/chromeos/video_capture_device_factory_chromeos.cc",
"video/chromeos/video_capture_device_factory_chromeos.h",
+ "video/chromeos/video_capture_jpeg_decoder.h",
+ "video/chromeos/video_capture_jpeg_decoder_impl.cc",
+ "video/chromeos/video_capture_jpeg_decoder_impl.h",
]
public_deps += [ "//media/capture/video/chromeos/public" ]
deps += [
"//build/config/linux/libdrm",
"//chromeos/dbus/power",
+ "//components/chromeos_camera:mojo_mjpeg_decode_accelerator",
"//components/chromeos_camera/common",
"//media/capture/video/chromeos/mojo:cros_camera",
"//third_party/libsync",
@@ -327,6 +328,20 @@ source_set("test_support") {
testonly = true
}
+if (is_chromeos) {
+ source_set("chromeos_test_utils") {
+ sources = [
+ "video/chromeos/local_gpu_memory_buffer_manager.cc",
+ ]
+
+ deps = [
+ "//base",
+ "//build/config/linux/libdrm",
+ "//third_party/minigbm",
+ ]
+ }
+}
+
test("capture_unittests") {
sources = [
"content/animated_content_sampler_unittest.cc",
@@ -402,7 +417,6 @@ test("capture_unittests") {
"video/chromeos/camera_device_delegate_unittest.cc",
"video/chromeos/camera_hal_delegate_unittest.cc",
"video/chromeos/camera_hal_dispatcher_impl_unittest.cc",
- "video/chromeos/local_gpu_memory_buffer_manager.cc",
"video/chromeos/mock_camera_module.cc",
"video/chromeos/mock_camera_module.h",
"video/chromeos/mock_vendor_tag_ops.cc",
@@ -412,6 +426,7 @@ test("capture_unittests") {
"video/chromeos/request_manager_unittest.cc",
]
deps += [
+ ":chromeos_test_utils",
"//build/config/linux/libdrm",
"//chromeos/dbus/power",
"//media/capture/video/chromeos/mojo:cros_camera",
diff --git a/chromium/media/capture/video/OWNERS b/chromium/media/capture/video/OWNERS
index 0a79c53912d..04487664136 100644
--- a/chromium/media/capture/video/OWNERS
+++ b/chromium/media/capture/video/OWNERS
@@ -1,8 +1,8 @@
-emircan@chromium.org
chfremer@chromium.org
tommi@chromium.org
# Original (legacy) owner.
+emircan@chromium.org
mcasas@chromium.org
# TEAM: webrtc-dev@chromium.org
diff --git a/chromium/media/capture/video/android/video_capture_device_android.cc b/chromium/media/capture/video/android/video_capture_device_android.cc
index 777e8229e36..e51c0f40ed7 100644
--- a/chromium/media/capture/video/android/video_capture_device_android.cc
+++ b/chromium/media/capture/video/android/video_capture_device_android.cc
@@ -145,6 +145,11 @@ void VideoCaptureDeviceAndroid::AllocateAndStart(
return;
}
+ // TODO(julien.isorce): Use Camera.SENSOR_COLOR_TRANSFORM2 to build a
+ // gfx::ColorSpace, and rename VideoCaptureDeviceAndroid::GetColorspace()
+ // to GetPixelFormat, see http://crbug.com/959901.
+ capture_color_space_ = gfx::ColorSpace();
+
capture_format_.frame_size.SetSize(
Java_VideoCapture_queryWidth(env, j_capture_),
Java_VideoCapture_queryHeight(env, j_capture_));
@@ -624,7 +629,8 @@ void VideoCaptureDeviceAndroid::SendIncomingDataToClient(
base::AutoLock lock(lock_);
if (!client_)
return;
- client_->OnIncomingCapturedData(data, length, capture_format_, rotation,
+ client_->OnIncomingCapturedData(data, length, capture_format_,
+ capture_color_space_, rotation,
reference_time, timestamp);
}
diff --git a/chromium/media/capture/video/android/video_capture_device_android.h b/chromium/media/capture/video/android/video_capture_device_android.h
index d234be83de8..587ec58d057 100644
--- a/chromium/media/capture/video/android/video_capture_device_android.h
+++ b/chromium/media/capture/video/android/video_capture_device_android.h
@@ -201,6 +201,7 @@ class CAPTURE_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
const VideoCaptureDeviceDescriptor device_descriptor_;
VideoCaptureFormat capture_format_;
+ gfx::ColorSpace capture_color_space_;
// Java VideoCaptureAndroid instance.
base::android::ScopedJavaLocalRef<jobject> j_capture_;
diff --git a/chromium/media/capture/video/android/video_capture_device_factory_android.cc b/chromium/media/capture/video/android/video_capture_device_factory_android.cc
index 4a7e12cc8e9..b57ab05f8aa 100644
--- a/chromium/media/capture/video/android/video_capture_device_factory_android.cc
+++ b/chromium/media/capture/video/android/video_capture_device_factory_android.cc
@@ -112,11 +112,7 @@ void VideoCaptureDeviceFactoryAndroid::GetSupportedFormats(
if (collected_formats.is_null())
return;
- jsize num_formats = env->GetArrayLength(collected_formats.obj());
- for (int i = 0; i < num_formats; ++i) {
- base::android::ScopedJavaLocalRef<jobject> format(
- env, env->GetObjectArrayElement(collected_formats.obj(), i));
-
+ for (auto format : collected_formats.ReadElements<jobject>()) {
VideoPixelFormat pixel_format = PIXEL_FORMAT_UNKNOWN;
switch (Java_VideoCaptureFactory_getCaptureFormatPixelFormat(env, format)) {
case VideoCaptureDeviceAndroid::ANDROID_IMAGE_FORMAT_YV12:
diff --git a/chromium/media/capture/video/chromeos/camera_device_delegate.cc b/chromium/media/capture/video/chromeos/camera_device_delegate.cc
index 3beb0dfe8a8..d352f7f3639 100644
--- a/chromium/media/capture/video/chromeos/camera_device_delegate.cc
+++ b/chromium/media/capture/video/chromeos/camera_device_delegate.cc
@@ -4,6 +4,7 @@
#include "media/capture/video/chromeos/camera_device_delegate.h"
+#include <algorithm>
#include <memory>
#include <string>
#include <utility>
@@ -31,12 +32,10 @@ namespace {
// The result of max_width and max_height could be zero if the stream
// is not in the pre-defined configuration.
-void GetMaxStreamResolution(
- const cros::mojom::CameraMetadataPtr& static_metadata,
- cros::mojom::Camera3StreamType stream_type,
- cros::mojom::HalPixelFormat stream_format,
- int32_t* max_width,
- int32_t* max_height) {
+void GetStreamResolutions(const cros::mojom::CameraMetadataPtr& static_metadata,
+ cros::mojom::Camera3StreamType stream_type,
+ cros::mojom::HalPixelFormat stream_format,
+ std::vector<gfx::Size>* resolutions) {
const cros::mojom::CameraMetadataEntryPtr* stream_configurations =
GetMetadataEntry(static_metadata,
cros::mojom::CameraMetadataTag::
@@ -51,8 +50,6 @@ void GetMaxStreamResolution(
const size_t kStreamConfigurationSize = 4;
int32_t* iter =
reinterpret_cast<int32_t*>((*stream_configurations)->data.data());
- *max_width = 0;
- *max_height = 0;
for (size_t i = 0; i < (*stream_configurations)->count;
i += kStreamConfigurationSize) {
auto format =
@@ -67,13 +64,13 @@ void GetMaxStreamResolution(
continue;
}
- // TODO(wtlee): Once we have resolution settings mechanism, we could set
- // stream resolution based on user's settings.
- if (width > *max_width && height > *max_height) {
- *max_width = width;
- *max_height = height;
- }
+ resolutions->emplace_back(width, height);
}
+
+ std::sort(resolutions->begin(), resolutions->end(),
+ [](const gfx::Size& a, const gfx::Size& b) -> bool {
+ return a.width() * a.height() < b.width() * b.height();
+ });
}
// VideoCaptureDevice::TakePhotoCallback is given by the application and is used
@@ -251,18 +248,26 @@ void CameraDeviceDelegate::GetPhotoState(
return;
}
- int32_t max_blob_width = 0, max_blob_height = 0;
- GetMaxStreamResolution(static_metadata_,
- cros::mojom::Camera3StreamType::CAMERA3_STREAM_OUTPUT,
- cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_BLOB,
- &max_blob_width, &max_blob_height);
- photo_state->width->current = max_blob_width;
- photo_state->width->min = max_blob_width;
- photo_state->width->max = max_blob_width;
+ std::vector<gfx::Size> blob_resolutions;
+ GetStreamResolutions(
+ static_metadata_, cros::mojom::Camera3StreamType::CAMERA3_STREAM_OUTPUT,
+ cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_BLOB, &blob_resolutions);
+ if (blob_resolutions.empty()) {
+ std::move(callback).Run(std::move(photo_state));
+ return;
+ }
+
+ // Sets the correct range of min/max resolution in order to bypass checks that
+ // the resolution caller request should fall within the range when taking
+ // photos. And since we are not actually use the mechanism to get other
+ // resolutions, we set the step to 0.0 here.
+ photo_state->width->current = current_blob_resolution_.width();
+ photo_state->width->min = blob_resolutions.front().width();
+ photo_state->width->max = blob_resolutions.back().width();
photo_state->width->step = 0.0;
- photo_state->height->current = max_blob_height;
- photo_state->height->min = max_blob_height;
- photo_state->height->max = max_blob_height;
+ photo_state->height->current = current_blob_resolution_.height();
+ photo_state->height->min = blob_resolutions.front().height();
+ photo_state->height->max = blob_resolutions.back().height();
photo_state->height->step = 0.0;
std::move(callback).Run(std::move(photo_state));
}
@@ -284,9 +289,25 @@ void CameraDeviceDelegate::SetPhotoOptions(
return;
}
- if (!request_manager_->HasStreamsConfiguredForTakePhoto()) {
- request_manager_->StopPreview(
- base::BindOnce(&CameraDeviceDelegate::OnFlushed, GetWeakPtr()));
+ bool is_resolution_specified = settings->has_width && settings->has_height;
+ bool should_reconfigure_streams =
+ is_resolution_specified && (current_blob_resolution_.IsEmpty() ||
+ current_blob_resolution_.width() !=
+ static_cast<int32_t>(settings->width) ||
+ current_blob_resolution_.height() !=
+ static_cast<int32_t>(settings->height));
+ if (!request_manager_->HasStreamsConfiguredForTakePhoto() ||
+ should_reconfigure_streams) {
+ if (is_resolution_specified) {
+ gfx::Size new_blob_resolution(static_cast<int32_t>(settings->width),
+ static_cast<int32_t>(settings->height));
+ request_manager_->StopPreview(
+ base::BindOnce(&CameraDeviceDelegate::OnFlushed, GetWeakPtr(),
+ std::move(new_blob_resolution)));
+ } else {
+ request_manager_->StopPreview(base::BindOnce(
+ &CameraDeviceDelegate::OnFlushed, GetWeakPtr(), base::nullopt));
+ }
set_photo_option_callback_ = std::move(callback);
} else {
set_photo_option_callback_.Reset();
@@ -352,7 +373,9 @@ void CameraDeviceDelegate::OnMojoConnectionError() {
}
}
-void CameraDeviceDelegate::OnFlushed(int32_t result) {
+void CameraDeviceDelegate::OnFlushed(
+ base::Optional<gfx::Size> new_blob_resolution,
+ int32_t result) {
DCHECK(ipc_task_runner_->BelongsToCurrentThread());
if (result) {
device_context_->SetErrorState(
@@ -362,7 +385,7 @@ void CameraDeviceDelegate::OnFlushed(int32_t result) {
return;
}
device_context_->SetState(CameraDeviceContext::State::kInitialized);
- ConfigureStreams(true);
+ ConfigureStreams(true, std::move(new_blob_resolution));
}
void CameraDeviceDelegate::OnClosed(int32_t result) {
@@ -376,6 +399,7 @@ void CameraDeviceDelegate::OnClosed(int32_t result) {
}
ResetMojoInterface();
device_context_ = nullptr;
+ current_blob_resolution_.SetSize(0, 0);
std::move(device_close_callback_).Run();
}
@@ -405,12 +429,12 @@ void CameraDeviceDelegate::OnGotCameraInfo(
FROM_HERE, "Failed to get camera info");
return;
}
+
+ reprocess_manager_->UpdateCameraInfo(device_descriptor_.device_id,
+ camera_info);
SortCameraMetadata(&camera_info->static_camera_characteristics);
static_metadata_ = std::move(camera_info->static_camera_characteristics);
- reprocess_manager_->UpdateSupportedEffects(device_descriptor_.device_id,
- static_metadata_);
-
const cros::mojom::CameraMetadataEntryPtr* sensor_orientation =
GetMetadataEntry(
static_metadata_,
@@ -499,10 +523,12 @@ void CameraDeviceDelegate::OnInitialized(int32_t result) {
return;
}
device_context_->SetState(CameraDeviceContext::State::kInitialized);
- ConfigureStreams(false);
+ ConfigureStreams(false, base::nullopt);
}
-void CameraDeviceDelegate::ConfigureStreams(bool require_photo) {
+void CameraDeviceDelegate::ConfigureStreams(
+ bool require_photo,
+ base::Optional<gfx::Size> new_blob_resolution) {
DCHECK(ipc_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(device_context_->GetState(),
CameraDeviceContext::State::kInitialized);
@@ -529,22 +555,35 @@ void CameraDeviceDelegate::ConfigureStreams(bool require_photo) {
// Set up context for still capture stream. We set still capture stream to the
// JPEG stream configuration with maximum supported resolution.
- // TODO(jcliang): Once we support SetPhotoOptions() the still capture stream
- // should be configured dynamically per the photo options.
+ int32_t blob_width = 0;
+ int32_t blob_height = 0;
if (require_photo) {
- int32_t max_blob_width = 0, max_blob_height = 0;
- GetMaxStreamResolution(
+ std::vector<gfx::Size> blob_resolutions;
+ GetStreamResolutions(
static_metadata_, cros::mojom::Camera3StreamType::CAMERA3_STREAM_OUTPUT,
- cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_BLOB, &max_blob_width,
- &max_blob_height);
+ cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_BLOB, &blob_resolutions);
+ if (blob_resolutions.empty()) {
+ LOG(ERROR) << "Failed to configure streans: No BLOB resolution found.";
+ return;
+ }
+ if (new_blob_resolution.has_value() &&
+ std::find(blob_resolutions.begin(), blob_resolutions.end(),
+ *new_blob_resolution) != blob_resolutions.end()) {
+ blob_width = new_blob_resolution->width();
+ blob_height = new_blob_resolution->height();
+ } else {
+ // Use the largest resolution as default.
+ blob_width = blob_resolutions.back().width();
+ blob_height = blob_resolutions.back().height();
+ }
cros::mojom::Camera3StreamPtr still_capture_stream =
cros::mojom::Camera3Stream::New();
still_capture_stream->id = static_cast<uint64_t>(StreamType::kJpegOutput);
still_capture_stream->stream_type =
cros::mojom::Camera3StreamType::CAMERA3_STREAM_OUTPUT;
- still_capture_stream->width = max_blob_width;
- still_capture_stream->height = max_blob_height;
+ still_capture_stream->width = blob_width;
+ still_capture_stream->height = blob_height;
still_capture_stream->format =
cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_BLOB;
still_capture_stream->data_space = 0;
@@ -589,10 +628,12 @@ void CameraDeviceDelegate::ConfigureStreams(bool require_photo) {
CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE;
device_ops_->ConfigureStreams(
std::move(stream_config),
- base::BindOnce(&CameraDeviceDelegate::OnConfiguredStreams, GetWeakPtr()));
+ base::BindOnce(&CameraDeviceDelegate::OnConfiguredStreams, GetWeakPtr(),
+ gfx::Size(blob_width, blob_height)));
}
void CameraDeviceDelegate::OnConfiguredStreams(
+ gfx::Size blob_resolution,
int32_t result,
cros::mojom::Camera3StreamConfigurationPtr updated_config) {
DCHECK(ipc_task_runner_->BelongsToCurrentThread());
@@ -623,6 +664,9 @@ void CameraDeviceDelegate::OnConfiguredStreams(
return;
}
+ current_blob_resolution_.SetSize(blob_resolution.width(),
+ blob_resolution.height());
+
request_manager_->SetUpStreamsAndBuffers(
chrome_capture_params_.requested_format, static_metadata_,
std::move(updated_config->streams));
@@ -690,14 +734,16 @@ bool CameraDeviceDelegate::IsYUVReprocessingSupported(int* max_width,
return false;
}
- GetMaxStreamResolution(
+ std::vector<gfx::Size> yuv_resolutions;
+ GetStreamResolutions(
static_metadata_, cros::mojom::Camera3StreamType::CAMERA3_STREAM_INPUT,
- cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_YCbCr_420_888, max_width,
- max_height);
- if (max_width == 0 || max_height == 0) {
+ cros::mojom::HalPixelFormat::HAL_PIXEL_FORMAT_YCbCr_420_888,
+ &yuv_resolutions);
+ if (yuv_resolutions.empty()) {
return false;
}
-
+ *max_width = yuv_resolutions.back().width();
+ *max_height = yuv_resolutions.back().height();
return true;
}
diff --git a/chromium/media/capture/video/chromeos/camera_device_delegate.h b/chromium/media/capture/video/chromeos/camera_device_delegate.h
index fb3a2ae4ed4..b1d4c416d73 100644
--- a/chromium/media/capture/video/chromeos/camera_device_delegate.h
+++ b/chromium/media/capture/video/chromeos/camera_device_delegate.h
@@ -9,11 +9,13 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "media/capture/video/chromeos/mojo/camera3.mojom.h"
#include "media/capture/video/chromeos/mojo/camera_common.mojom.h"
#include "media/capture/video/video_capture_device.h"
#include "media/capture/video_capture_types.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -104,7 +106,7 @@ class CAPTURE_EXPORT CameraDeviceDelegate final {
void OnMojoConnectionError();
// Reconfigure streams for picture taking.
- void OnFlushed(int32_t result);
+ void OnFlushed(base::Optional<gfx::Size> new_blob_resolution, int32_t result);
// Callback method for the Close Mojo IPC call. This method resets the Mojo
// connection and closes the camera device.
@@ -131,8 +133,10 @@ class CAPTURE_EXPORT CameraDeviceDelegate final {
// indicates. If there's no error OnConfiguredStreams notifies
// |client_| the capture has started by calling OnStarted, and proceeds to
// ConstructDefaultRequestSettings.
- void ConfigureStreams(bool require_photo);
+ void ConfigureStreams(bool require_photo,
+ base::Optional<gfx::Size> new_blob_resolution);
void OnConfiguredStreams(
+ gfx::Size blob_resolution,
int32_t result,
cros::mojom::Camera3StreamConfigurationPtr updated_config);
@@ -171,6 +175,9 @@ class CAPTURE_EXPORT CameraDeviceDelegate final {
int32_t camera_id_;
+ // Current configured resolution of BLOB stream.
+ gfx::Size current_blob_resolution_;
+
const scoped_refptr<CameraHalDelegate> camera_hal_delegate_;
VideoCaptureParams chrome_capture_params_;
diff --git a/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc b/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
index 48b29f160ad..d27ec7ce259 100644
--- a/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
+++ b/chromium/media/capture/video/chromeos/camera_device_delegate_unittest.cc
@@ -11,6 +11,7 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
#include "media/base/bind_to_current_loop.h"
@@ -121,7 +122,7 @@ class CameraDeviceDelegateTest : public ::testing::Test {
hal_delegate_thread_.Start();
camera_hal_delegate_ =
new CameraHalDelegate(hal_delegate_thread_.task_runner());
- reprocess_manager_ = std::make_unique<ReprocessManager>();
+ reprocess_manager_ = std::make_unique<ReprocessManager>(base::DoNothing());
camera_hal_delegate_->SetCameraModule(
mock_camera_module_.GetInterfacePtrInfo());
}
@@ -386,9 +387,9 @@ class CameraDeviceDelegateTest : public ::testing::Test {
EXPECT_EQ(CameraDeviceContext::State::kStopped, GetState());
}
- unittest_internal::MockVideoCaptureClient* ResetDeviceContext() {
+ unittest_internal::NiceMockVideoCaptureClient* ResetDeviceContext() {
auto mock_client =
- std::make_unique<unittest_internal::MockVideoCaptureClient>();
+ std::make_unique<unittest_internal::NiceMockVideoCaptureClient>();
auto* client_ptr = mock_client.get();
device_context_ =
std::make_unique<CameraDeviceContext>(std::move(mock_client));
diff --git a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
index 2edb18c8906..bb42d28cbb4 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
@@ -197,12 +197,12 @@ void CameraHalDispatcherImpl::RegisterClient(
}
void CameraHalDispatcherImpl::GetJpegDecodeAccelerator(
- media::mojom::MjpegDecodeAcceleratorRequest jda_request) {
+ chromeos_camera::mojom::MjpegDecodeAcceleratorRequest jda_request) {
jda_factory_.Run(std::move(jda_request));
}
void CameraHalDispatcherImpl::GetJpegEncodeAccelerator(
- media::mojom::JpegEncodeAcceleratorRequest jea_request) {
+ chromeos_camera::mojom::JpegEncodeAcceleratorRequest jea_request) {
jea_factory_.Run(std::move(jea_request));
}
diff --git a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
index d36c792f218..828a9d35283 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
+++ b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.h
@@ -13,8 +13,10 @@
#include "base/memory/singleton.h"
#include "base/threading/thread.h"
#include "components/chromeos_camera/common/jpeg_encode_accelerator.mojom.h"
+#include "components/chromeos_camera/common/mjpeg_decode_accelerator.mojom.h"
#include "media/capture/capture_export.h"
#include "media/capture/video/chromeos/mojo/cros_camera_service.mojom.h"
+#include "media/capture/video/chromeos/video_capture_device_factory_chromeos.h"
#include "media/capture/video/video_capture_device_factory.h"
#include "mojo/public/cpp/bindings/binding_set.h"
#include "mojo/public/cpp/bindings/interface_ptr_set.h"
@@ -29,8 +31,8 @@ class WaitableEvent;
namespace media {
-using MojoJpegEncodeAcceleratorFactoryCB =
- base::RepeatingCallback<void(media::mojom::JpegEncodeAcceleratorRequest)>;
+using MojoJpegEncodeAcceleratorFactoryCB = base::RepeatingCallback<void(
+ chromeos_camera::mojom::JpegEncodeAcceleratorRequest)>;
class CAPTURE_EXPORT CameraClientObserver {
public:
@@ -65,9 +67,9 @@ class CAPTURE_EXPORT CameraHalDispatcherImpl final
void RegisterServer(cros::mojom::CameraHalServerPtr server) final;
void RegisterClient(cros::mojom::CameraHalClientPtr client) final;
void GetJpegDecodeAccelerator(
- media::mojom::MjpegDecodeAcceleratorRequest jda_request) final;
+ chromeos_camera::mojom::MjpegDecodeAcceleratorRequest jda_request) final;
void GetJpegEncodeAccelerator(
- media::mojom::JpegEncodeAcceleratorRequest jea_request) final;
+ chromeos_camera::mojom::JpegEncodeAcceleratorRequest jea_request) final;
// base::trace_event::TraceLog::EnabledStateObserver implementation.
void OnTraceLogEnabled() final;
diff --git a/chromium/media/capture/video/chromeos/cros_image_capture_impl.cc b/chromium/media/capture/video/chromeos/cros_image_capture_impl.cc
index 1bad8a6eb8f..1d14432f0cd 100644
--- a/chromium/media/capture/video/chromeos/cros_image_capture_impl.cc
+++ b/chromium/media/capture/video/chromeos/cros_image_capture_impl.cc
@@ -18,17 +18,11 @@ CrosImageCaptureImpl::CrosImageCaptureImpl(ReprocessManager* reprocess_manager)
CrosImageCaptureImpl::~CrosImageCaptureImpl() = default;
-void CrosImageCaptureImpl::BindRequest(
- cros::mojom::CrosImageCaptureRequest request) {
- bindings_.AddBinding(this, std::move(request));
-}
-
-void CrosImageCaptureImpl::GetSupportedEffects(
- const std::string& device_id,
- GetSupportedEffectsCallback callback) {
- reprocess_manager_->GetSupportedEffects(
+void CrosImageCaptureImpl::GetCameraInfo(const std::string& device_id,
+ GetCameraInfoCallback callback) {
+ reprocess_manager_->GetCameraInfo(
device_id, media::BindToCurrentLoop(base::BindOnce(
- &CrosImageCaptureImpl::OnGetSupportedEffects,
+ &CrosImageCaptureImpl::OnGotCameraInfo,
base::Unretained(this), std::move(callback))));
}
@@ -40,12 +34,10 @@ void CrosImageCaptureImpl::SetReprocessOption(
device_id, effect, media::BindToCurrentLoop(std::move(callback)));
}
-void CrosImageCaptureImpl::OnGetSupportedEffects(
- GetSupportedEffectsCallback callback,
- base::flat_set<cros::mojom::Effect> supported_effects) {
- std::vector<cros::mojom::Effect> effects(supported_effects.begin(),
- supported_effects.end());
- std::move(callback).Run(std::move(effects));
+void CrosImageCaptureImpl::OnGotCameraInfo(
+ GetCameraInfoCallback callback,
+ cros::mojom::CameraInfoPtr camera_info) {
+ std::move(callback).Run(std::move(camera_info));
}
} // namespace media
diff --git a/chromium/media/capture/video/chromeos/cros_image_capture_impl.h b/chromium/media/capture/video/chromeos/cros_image_capture_impl.h
index 278a403ffa0..9a5c78f1bc8 100644
--- a/chromium/media/capture/video/chromeos/cros_image_capture_impl.h
+++ b/chromium/media/capture/video/chromeos/cros_image_capture_impl.h
@@ -7,7 +7,7 @@
#include <string>
-#include "base/containers/flat_set.h"
+#include "media/capture/video/chromeos/mojo/camera_common.mojom.h"
#include "media/capture/video/chromeos/mojo/cros_image_capture.mojom.h"
#include "media/capture/video/chromeos/reprocess_manager.h"
#include "mojo/public/cpp/bindings/binding_set.h"
@@ -23,21 +23,18 @@ class CrosImageCaptureImpl : public cros::mojom::CrosImageCapture {
// cros::mojom::CrosImageCapture implementations.
- void GetSupportedEffects(const std::string& device_id,
- GetSupportedEffectsCallback callback) override;
+ void GetCameraInfo(const std::string& device_id,
+ GetCameraInfoCallback callback) override;
void SetReprocessOption(const std::string& device_id,
cros::mojom::Effect effect,
SetReprocessOptionCallback callback) override;
private:
- void OnGetSupportedEffects(
- GetSupportedEffectsCallback callback,
- base::flat_set<cros::mojom::Effect> supported_effects);
+ void OnGotCameraInfo(GetCameraInfoCallback callback,
+ cros::mojom::CameraInfoPtr camera_info);
ReprocessManager* reprocess_manager_; // weak
- mojo::BindingSet<cros::mojom::CrosImageCapture> bindings_;
-
DISALLOW_COPY_AND_ASSIGN(CrosImageCaptureImpl);
};
diff --git a/chromium/media/capture/video/chromeos/mock_video_capture_client.cc b/chromium/media/capture/video/chromeos/mock_video_capture_client.cc
index 04e26a48fba..23feb6e1cf6 100644
--- a/chromium/media/capture/video/chromeos/mock_video_capture_client.cc
+++ b/chromium/media/capture/video/chromeos/mock_video_capture_client.cc
@@ -42,6 +42,7 @@ void MockVideoCaptureClient::OnIncomingCapturedData(
const uint8_t* data,
int length,
const VideoCaptureFormat& format,
+ const gfx::ColorSpace& color_space,
int rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
diff --git a/chromium/media/capture/video/chromeos/mock_video_capture_client.h b/chromium/media/capture/video/chromeos/mock_video_capture_client.h
index 7ab44a083a1..e23c17c1090 100644
--- a/chromium/media/capture/video/chromeos/mock_video_capture_client.h
+++ b/chromium/media/capture/video/chromeos/mock_video_capture_client.h
@@ -42,6 +42,7 @@ class MockVideoCaptureClient : public VideoCaptureDevice::Client {
void OnIncomingCapturedData(const uint8_t* data,
int length,
const VideoCaptureFormat& format,
+ const gfx::ColorSpace& color_space,
int rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
@@ -75,6 +76,8 @@ class MockVideoCaptureClient : public VideoCaptureDevice::Client {
base::OnceClosure quit_cb_;
};
+using NiceMockVideoCaptureClient = ::testing::NiceMock<MockVideoCaptureClient>;
+
} // namespace unittest_internal
} // namespace media
diff --git a/chromium/media/capture/video/chromeos/mojo/cros_camera_service.mojom b/chromium/media/capture/video/chromeos/mojo/cros_camera_service.mojom
index db693175159..1bcd8c97dde 100644
--- a/chromium/media/capture/video/chromeos/mojo/cros_camera_service.mojom
+++ b/chromium/media/capture/video/chromeos/mojo/cros_camera_service.mojom
@@ -7,8 +7,8 @@
module cros.mojom;
import "components/chromeos_camera/common/jpeg_encode_accelerator.mojom";
+import "components/chromeos_camera/common/mjpeg_decode_accelerator.mojom";
import "media/capture/video/chromeos/mojo/camera_common.mojom";
-import "media/mojo/interfaces/mjpeg_decode_accelerator.mojom";
// The CrOS camera HAL v3 Mojo dispatcher. The dispatcher acts as a proxy and
// waits for the server and the clients to register. There can only be one
@@ -29,11 +29,11 @@ interface CameraHalDispatcher {
// Get JpegDecodeAccelerator from dispatcher.
[MinVersion=1] GetJpegDecodeAccelerator@2(
- media.mojom.MjpegDecodeAccelerator& jda_request);
+ chromeos_camera.mojom.MjpegDecodeAccelerator& jda_request);
// Get JpegEncodeAccelerator from dispatcher.
[MinVersion=2] GetJpegEncodeAccelerator@3(
- media.mojom.JpegEncodeAccelerator& jea_request);
+ chromeos_camera.mojom.JpegEncodeAccelerator& jea_request);
};
// The CrOS camera HAL v3 Mojo server.
diff --git a/chromium/media/capture/video/chromeos/mojo/cros_image_capture.mojom b/chromium/media/capture/video/chromeos/mojo/cros_image_capture.mojom
index 53a47fa4d27..e2adaf2310f 100644
--- a/chromium/media/capture/video/chromeos/mojo/cros_image_capture.mojom
+++ b/chromium/media/capture/video/chromeos/mojo/cros_image_capture.mojom
@@ -5,6 +5,7 @@
module cros.mojom;
import "media/capture/mojom/image_capture.mojom";
+import "media/capture/video/chromeos/mojo/camera_common.mojom";
// Effect that recognized by Chrome OS.
enum Effect {
@@ -18,9 +19,10 @@ enum Effect {
// translated to the actual video device id to be used in CrosImageCapture
// implementation.
interface CrosImageCapture {
- // Gets supported effects that recognized by CrOS for device. The |source_id|
- // might need translation to be actual video device id.
- GetSupportedEffects(string source_id) => (array<Effect> supported_effects);
+ // Gets camera information |camera_info| which includes camera facing,
+ // characteristics, orientation, etc. The |source_id| might need translation
+ // to be actual video device id.
+ GetCameraInfo(string source_id) => (CameraInfo camera_info);
// Sets reprocess option to bind with the coming take photo request. When this
// method is called, the reprocess option will be queued. All reprocess
diff --git a/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.cc b/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.cc
index 184c47ed45b..6d09adf509e 100644
--- a/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.cc
+++ b/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.cc
@@ -23,11 +23,11 @@ RendererFacingCrosImageCapture::RendererFacingCrosImageCapture(
RendererFacingCrosImageCapture::~RendererFacingCrosImageCapture() = default;
-void RendererFacingCrosImageCapture::GetSupportedEffectsWithRealId(
- GetSupportedEffectsCallback callback,
+void RendererFacingCrosImageCapture::GetCameraInfoWithRealId(
+ GetCameraInfoCallback callback,
const base::Optional<std::string>& device_id) {
DCHECK(device_id.has_value());
- cros_image_capture_->GetSupportedEffects(*device_id, std::move(callback));
+ cros_image_capture_->GetCameraInfo(*device_id, std::move(callback));
}
void RendererFacingCrosImageCapture::SetReprocessOptionWithRealId(
@@ -39,14 +39,13 @@ void RendererFacingCrosImageCapture::SetReprocessOptionWithRealId(
std::move(callback));
}
-void RendererFacingCrosImageCapture::GetSupportedEffects(
+void RendererFacingCrosImageCapture::GetCameraInfo(
const std::string& source_id,
- GetSupportedEffectsCallback callback) {
+ GetCameraInfoCallback callback) {
mapping_callback_.Run(
- source_id,
- media::BindToCurrentLoop(base::BindOnce(
- &RendererFacingCrosImageCapture::GetSupportedEffectsWithRealId,
- weak_ptr_factory_.GetWeakPtr(), std::move(callback))));
+ source_id, media::BindToCurrentLoop(base::BindOnce(
+ &RendererFacingCrosImageCapture::GetCameraInfoWithRealId,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback))));
}
void RendererFacingCrosImageCapture::SetReprocessOption(
diff --git a/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.h b/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.h
index 5eac4f9a1c6..ad6fa0b3a6a 100644
--- a/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.h
+++ b/chromium/media/capture/video/chromeos/renderer_facing_cros_image_capture.h
@@ -33,9 +33,8 @@ class CAPTURE_EXPORT RendererFacingCrosImageCapture
DeviceIdMappingCallback mapping_callback);
~RendererFacingCrosImageCapture() override;
- void GetSupportedEffectsWithRealId(
- GetSupportedEffectsCallback callback,
- const base::Optional<std::string>& device_id);
+ void GetCameraInfoWithRealId(GetCameraInfoCallback callback,
+ const base::Optional<std::string>& device_id);
void SetReprocessOptionWithRealId(
cros::mojom::Effect effect,
@@ -43,8 +42,8 @@ class CAPTURE_EXPORT RendererFacingCrosImageCapture
const base::Optional<std::string>& device_id);
// cros::mojom::CrosImageCapture implementations.
- void GetSupportedEffects(const std::string& source_id,
- GetSupportedEffectsCallback callback) override;
+ void GetCameraInfo(const std::string& source_id,
+ GetCameraInfoCallback callback) override;
void SetReprocessOption(const std::string& source_id,
cros::mojom::Effect effect,
SetReprocessOptionCallback callback) override;
diff --git a/chromium/media/capture/video/chromeos/reprocess_manager.cc b/chromium/media/capture/video/chromeos/reprocess_manager.cc
index 64a7b2607a6..ed5e46661f6 100644
--- a/chromium/media/capture/video/chromeos/reprocess_manager.cc
+++ b/chromium/media/capture/video/chromeos/reprocess_manager.cc
@@ -44,10 +44,10 @@ int ReprocessManager::GetReprocessReturnCode(
return kReprocessSuccess;
}
-ReprocessManager::ReprocessManager()
+ReprocessManager::ReprocessManager(UpdateCameraInfoCallback callback)
: sequenced_task_runner_(base::CreateSequencedTaskRunnerWithTraits(
{base::TaskPriority::USER_VISIBLE})),
- impl(new ReprocessManager::ReprocessManagerImpl) {}
+ impl(new ReprocessManager::ReprocessManagerImpl(std::move(callback))) {}
ReprocessManager::~ReprocessManager() {
sequenced_task_runner_->DeleteSoon(FROM_HERE, std::move(impl));
@@ -86,27 +86,28 @@ void ReprocessManager::FlushReprocessOptions(const std::string& device_id) {
base::Unretained(impl.get()), device_id));
}
-void ReprocessManager::GetSupportedEffects(
- const std::string& device_id,
- GetSupportedEffectsCallback callback) {
+void ReprocessManager::GetCameraInfo(const std::string& device_id,
+ GetCameraInfoCallback callback) {
sequenced_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(
- &ReprocessManager::ReprocessManagerImpl::GetSupportedEffects,
- base::Unretained(impl.get()), device_id, std::move(callback)));
+ base::BindOnce(&ReprocessManager::ReprocessManagerImpl::GetCameraInfo,
+ base::Unretained(impl.get()), device_id,
+ std::move(callback)));
}
-void ReprocessManager::UpdateSupportedEffects(
+void ReprocessManager::UpdateCameraInfo(
const std::string& device_id,
- const cros::mojom::CameraMetadataPtr& metadata) {
+ const cros::mojom::CameraInfoPtr& camera_info) {
sequenced_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(
- &ReprocessManager::ReprocessManagerImpl::UpdateSupportedEffects,
- base::Unretained(impl.get()), device_id, metadata.Clone()));
+ base::BindOnce(&ReprocessManager::ReprocessManagerImpl::UpdateCameraInfo,
+ base::Unretained(impl.get()), device_id,
+ camera_info.Clone()));
}
-ReprocessManager::ReprocessManagerImpl::ReprocessManagerImpl() {}
+ReprocessManager::ReprocessManagerImpl::ReprocessManagerImpl(
+ UpdateCameraInfoCallback callback)
+ : update_camera_info_callback_(std::move(callback)) {}
ReprocessManager::ReprocessManagerImpl::~ReprocessManagerImpl() = default;
@@ -161,25 +162,26 @@ void ReprocessManager::ReprocessManagerImpl::FlushReprocessOptions(
reprocess_task_queue_map_[device_id].swap(empty_queue);
}
-void ReprocessManager::ReprocessManagerImpl::GetSupportedEffects(
+void ReprocessManager::ReprocessManagerImpl::GetCameraInfo(
const std::string& device_id,
- GetSupportedEffectsCallback callback) {
- std::move(callback).Run(
- base::flat_set<cros::mojom::Effect>(supported_effects_map_[device_id]));
+ GetCameraInfoCallback callback) {
+ if (camera_info_map_[device_id]) {
+ std::move(callback).Run(camera_info_map_[device_id].Clone());
+ } else {
+ get_camera_info_callback_queue_map_[device_id].push(std::move(callback));
+ update_camera_info_callback_.Run(device_id);
+ }
}
-void ReprocessManager::ReprocessManagerImpl::UpdateSupportedEffects(
+void ReprocessManager::ReprocessManagerImpl::UpdateCameraInfo(
const std::string& device_id,
- const cros::mojom::CameraMetadataPtr metadata) {
- const cros::mojom::CameraMetadataEntryPtr* portrait_mode =
- media::GetMetadataEntry(
- metadata,
- static_cast<cros::mojom::CameraMetadataTag>(kPortraitModeVendorKey));
- supported_effects_map_[device_id].clear();
- supported_effects_map_[device_id].insert(cros::mojom::Effect::NO_EFFECT);
- if (portrait_mode) {
- supported_effects_map_[device_id].insert(
- cros::mojom::Effect::PORTRAIT_MODE);
+ cros::mojom::CameraInfoPtr camera_info) {
+ camera_info_map_[device_id] = std::move(camera_info);
+
+ auto& callback_queue = get_camera_info_callback_queue_map_[device_id];
+ while (!callback_queue.empty()) {
+ std::move(callback_queue.front()).Run(camera_info_map_[device_id].Clone());
+ callback_queue.pop();
}
}
diff --git a/chromium/media/capture/video/chromeos/reprocess_manager.h b/chromium/media/capture/video/chromeos/reprocess_manager.h
index a9e2050b2ba..2f3624710a5 100644
--- a/chromium/media/capture/video/chromeos/reprocess_manager.h
+++ b/chromium/media/capture/video/chromeos/reprocess_manager.h
@@ -16,6 +16,7 @@
#include "media/capture/capture_export.h"
#include "media/capture/mojom/image_capture.mojom.h"
#include "media/capture/video/chromeos/mojo/camera3.mojom.h"
+#include "media/capture/video/chromeos/mojo/camera_common.mojom.h"
#include "media/capture/video/chromeos/mojo/cros_image_capture.mojom.h"
#include "mojo/public/cpp/bindings/binding.h"
@@ -46,12 +47,14 @@ constexpr int32_t kReprocessSuccess = 0;
// sequentialize to a single sequence.
class CAPTURE_EXPORT ReprocessManager {
public:
- using GetSupportedEffectsCallback =
- base::OnceCallback<void(base::flat_set<cros::mojom::Effect>)>;
+ using GetCameraInfoCallback =
+ base::OnceCallback<void(cros::mojom::CameraInfoPtr camera_info)>;
+ using UpdateCameraInfoCallback =
+ base::RepeatingCallback<void(const std::string& device_id)>;
class ReprocessManagerImpl {
public:
- ReprocessManagerImpl();
+ ReprocessManagerImpl(UpdateCameraInfoCallback callback);
~ReprocessManagerImpl();
void SetReprocessOption(
@@ -67,17 +70,22 @@ class CAPTURE_EXPORT ReprocessManager {
void FlushReprocessOptions(const std::string& device_id);
- void GetSupportedEffects(const std::string& device_id,
- GetSupportedEffectsCallback callback);
+ void GetCameraInfo(const std::string& device_id,
+ GetCameraInfoCallback callback);
- void UpdateSupportedEffects(const std::string& device_id,
- const cros::mojom::CameraMetadataPtr metadata);
+ void UpdateCameraInfo(const std::string& device_id,
+ cros::mojom::CameraInfoPtr camera_info);
private:
base::flat_map<std::string, base::queue<ReprocessTask>>
reprocess_task_queue_map_;
- base::flat_map<std::string, base::flat_set<cros::mojom::Effect>>
- supported_effects_map_;
+
+ base::flat_map<std::string, cros::mojom::CameraInfoPtr> camera_info_map_;
+
+ base::flat_map<std::string, base::queue<GetCameraInfoCallback>>
+ get_camera_info_callback_queue_map_;
+
+ UpdateCameraInfoCallback update_camera_info_callback_;
DISALLOW_COPY_AND_ASSIGN(ReprocessManagerImpl);
};
@@ -85,7 +93,7 @@ class CAPTURE_EXPORT ReprocessManager {
static int GetReprocessReturnCode(
cros::mojom::Effect effect,
const cros::mojom::CameraMetadataPtr* metadata);
- ReprocessManager();
+ ReprocessManager(UpdateCameraInfoCallback callback);
~ReprocessManager();
// Sets the reprocess option for given device id and effect. Each reprocess
@@ -106,14 +114,13 @@ class CAPTURE_EXPORT ReprocessManager {
// Clears all remaining ReprocessTasks in the queue for given device id.
void FlushReprocessOptions(const std::string& device_id);
- // Gets supported effects for current active device.
- void GetSupportedEffects(const std::string& device_id,
- GetSupportedEffectsCallback callback);
+ // Gets camera information for current active device.
+ void GetCameraInfo(const std::string& device_id,
+ GetCameraInfoCallback callback);
- // Updates supported effects for given device. This method should be triggered
- // whenever the camera characteristics is updated.
- void UpdateSupportedEffects(const std::string& device_id,
- const cros::mojom::CameraMetadataPtr& metadata);
+ // Updates camera information for given device.
+ void UpdateCameraInfo(const std::string& device_id,
+ const cros::mojom::CameraInfoPtr& camera_info);
private:
scoped_refptr<base::SequencedTaskRunner> sequenced_task_runner_;
diff --git a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc b/chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.cc
index 43590846179..6597bf323bc 100644
--- a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc
+++ b/chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video/scoped_video_capture_jpeg_decoder.h"
+#include "media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.h"
namespace media {
diff --git a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h b/chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.h
index 76b3436f222..a047ec729f6 100644
--- a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h
+++ b/chromium/media/capture/video/chromeos/scoped_video_capture_jpeg_decoder.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
-#define MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
+#ifndef MEDIA_CAPTURE_VIDEO_CHROMEOS_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
+#define MEDIA_CAPTURE_VIDEO_CHROMEOS_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
#include <memory>
#include "base/sequenced_task_runner.h"
#include "media/capture/capture_export.h"
-#include "media/capture/video/video_capture_jpeg_decoder.h"
+#include "media/capture/video/chromeos/video_capture_jpeg_decoder.h"
namespace media {
@@ -42,4 +42,4 @@ class CAPTURE_EXPORT ScopedVideoCaptureJpegDecoder
} // namespace media
-#endif // MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
+#endif // MEDIA_CAPTURE_VIDEO_CHROMEOS_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
diff --git a/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.cc b/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.cc
index 5d14a389c30..d651773c61b 100644
--- a/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.cc
+++ b/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.cc
@@ -14,6 +14,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "base/trace_event/trace_event.h"
+#include "chromeos/dbus/power/power_manager_client.h"
#include "media/base/bind_to_current_loop.h"
#include "media/capture/video/chromeos/camera_device_context.h"
#include "media/capture/video/chromeos/camera_device_delegate.h"
@@ -25,8 +26,83 @@
namespace media {
+class VideoCaptureDeviceChromeOSHalv3::PowerManagerClientProxy
+ : public base::RefCountedThreadSafe<PowerManagerClientProxy>,
+ public chromeos::PowerManagerClient::Observer {
+ public:
+ PowerManagerClientProxy() = default;
+
+ void Init(base::WeakPtr<VideoCaptureDeviceChromeOSHalv3> device,
+ scoped_refptr<base::SingleThreadTaskRunner> device_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> dbus_task_runner) {
+ device_ = std::move(device);
+ device_task_runner_ = std::move(device_task_runner);
+ dbus_task_runner_ = std::move(dbus_task_runner);
+
+ dbus_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PowerManagerClientProxy::InitOnDBusThread, this));
+ }
+
+ void Shutdown() {
+ dbus_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PowerManagerClientProxy::ShutdownOnDBusThread, this));
+ }
+
+ void UnblockSuspend(const base::UnguessableToken& unblock_suspend_token) {
+ dbus_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PowerManagerClientProxy::UnblockSuspendOnDBusThread,
+ this, unblock_suspend_token));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<PowerManagerClientProxy>;
+
+ ~PowerManagerClientProxy() override = default;
+
+ void InitOnDBusThread() {
+ DCHECK(dbus_task_runner_->RunsTasksInCurrentSequence());
+ chromeos::PowerManagerClient::Get()->AddObserver(this);
+ }
+
+ void ShutdownOnDBusThread() {
+ DCHECK(dbus_task_runner_->RunsTasksInCurrentSequence());
+ chromeos::PowerManagerClient::Get()->RemoveObserver(this);
+ }
+
+ void UnblockSuspendOnDBusThread(
+ const base::UnguessableToken& unblock_suspend_token) {
+ DCHECK(dbus_task_runner_->RunsTasksInCurrentSequence());
+ chromeos::PowerManagerClient::Get()->UnblockSuspend(unblock_suspend_token);
+ }
+
+ // chromeos::PowerManagerClient::Observer:
+ void SuspendImminent(power_manager::SuspendImminent::Reason reason) final {
+ auto token = base::UnguessableToken::Create();
+ chromeos::PowerManagerClient::Get()->BlockSuspend(
+ token, "VideoCaptureDeviceChromeOSHalv3");
+ device_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VideoCaptureDeviceChromeOSHalv3::CloseDevice,
+ device_, token));
+ }
+
+ void SuspendDone(const base::TimeDelta& sleep_duration) final {
+ device_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoCaptureDeviceChromeOSHalv3::OpenDevice, device_));
+ }
+
+ base::WeakPtr<VideoCaptureDeviceChromeOSHalv3> device_;
+ scoped_refptr<base::SingleThreadTaskRunner> device_task_runner_;
+ scoped_refptr<base::TaskRunner> dbus_task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(PowerManagerClientProxy);
+};
+
VideoCaptureDeviceChromeOSHalv3::VideoCaptureDeviceChromeOSHalv3(
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_for_screen_observer,
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
const VideoCaptureDeviceDescriptor& device_descriptor,
scoped_refptr<CameraHalDelegate> camera_hal_delegate,
ReprocessManager* reprocess_manager)
@@ -35,9 +111,8 @@ VideoCaptureDeviceChromeOSHalv3::VideoCaptureDeviceChromeOSHalv3(
capture_task_runner_(base::ThreadTaskRunnerHandle::Get()),
camera_device_ipc_thread_(std::string("CameraDeviceIpcThread") +
device_descriptor.device_id),
- screen_observer_delegate_(ScreenObserverDelegate::Create(
- this,
- std::move(task_runner_for_screen_observer))),
+ screen_observer_delegate_(
+ ScreenObserverDelegate::Create(this, ui_task_runner)),
lens_facing_(device_descriptor.facing),
camera_orientation_(0),
// External cameras have lens_facing as MEDIA_VIDEO_FACING_NONE.
@@ -46,15 +121,19 @@ VideoCaptureDeviceChromeOSHalv3::VideoCaptureDeviceChromeOSHalv3(
VideoFacingMode::MEDIA_VIDEO_FACING_NONE),
rotation_(0),
reprocess_manager_(reprocess_manager),
+ power_manager_client_proxy_(
+ base::MakeRefCounted<PowerManagerClientProxy>()),
weak_ptr_factory_(this) {
- chromeos::PowerManagerClient::Get()->AddObserver(this);
+ power_manager_client_proxy_->Init(weak_ptr_factory_.GetWeakPtr(),
+ capture_task_runner_,
+ std::move(ui_task_runner));
}
VideoCaptureDeviceChromeOSHalv3::~VideoCaptureDeviceChromeOSHalv3() {
DCHECK(capture_task_runner_->BelongsToCurrentThread());
DCHECK(!camera_device_ipc_thread_.IsRunning());
screen_observer_delegate_->RemoveObserver();
- chromeos::PowerManagerClient::Get()->RemoveObserver(this);
+ power_manager_client_proxy_->Shutdown();
}
// VideoCaptureDevice implementation.
@@ -86,7 +165,7 @@ void VideoCaptureDeviceChromeOSHalv3::StopAndDeAllocate() {
if (!camera_device_delegate_) {
return;
}
- CloseDevice(base::OnceClosure());
+ CloseDevice(base::UnguessableToken());
camera_device_ipc_thread_.Stop();
camera_device_delegate_.reset();
device_context_.reset();
@@ -121,24 +200,6 @@ void VideoCaptureDeviceChromeOSHalv3::SetPhotoOptions(
base::Passed(&settings), base::Passed(&callback)));
}
-void VideoCaptureDeviceChromeOSHalv3::SuspendImminent(
- power_manager::SuspendImminent::Reason reason) {
- capture_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(
- &VideoCaptureDeviceChromeOSHalv3::CloseDevice,
- weak_ptr_factory_.GetWeakPtr(),
- BindToCurrentLoop(chromeos::PowerManagerClient::Get()
- ->GetSuspendReadinessCallback(FROM_HERE))));
-}
-
-void VideoCaptureDeviceChromeOSHalv3::SuspendDone(
- const base::TimeDelta& sleep_duration) {
- capture_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&VideoCaptureDeviceChromeOSHalv3::OpenDevice,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
void VideoCaptureDeviceChromeOSHalv3::OpenDevice() {
DCHECK(capture_task_runner_->BelongsToCurrentThread());
@@ -159,7 +220,8 @@ void VideoCaptureDeviceChromeOSHalv3::OpenDevice() {
camera_device_delegate_->GetWeakPtr(), rotation_));
}
-void VideoCaptureDeviceChromeOSHalv3::CloseDevice(base::OnceClosure callback) {
+void VideoCaptureDeviceChromeOSHalv3::CloseDevice(
+ base::UnguessableToken unblock_suspend_token) {
DCHECK(capture_task_runner_->BelongsToCurrentThread());
if (!camera_device_delegate_) {
@@ -182,9 +244,8 @@ void VideoCaptureDeviceChromeOSHalv3::CloseDevice(base::OnceClosure callback) {
base::Unretained(&device_closed))));
base::TimeDelta kWaitTimeoutSecs = base::TimeDelta::FromSeconds(3);
device_closed.TimedWait(kWaitTimeoutSecs);
- if (callback) {
- std::move(callback).Run();
- }
+ if (!unblock_suspend_token.is_empty())
+ power_manager_client_proxy_->UnblockSuspend(unblock_suspend_token);
}
void VideoCaptureDeviceChromeOSHalv3::SetDisplayRotation(
diff --git a/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.h b/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.h
index 559907ae5c5..f1fe54f718f 100644
--- a/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.h
+++ b/chromium/media/capture/video/chromeos/video_capture_device_chromeos_halv3.h
@@ -11,7 +11,6 @@
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
-#include "chromeos/dbus/power/power_manager_client.h"
#include "media/capture/video/chromeos/display_rotation_observer.h"
#include "media/capture/video/video_capture_device.h"
#include "media/capture/video/video_capture_device_descriptor.h"
@@ -33,12 +32,10 @@ class ReprocessManager;
// Implementation of VideoCaptureDevice for ChromeOS with CrOS camera HALv3.
class CAPTURE_EXPORT VideoCaptureDeviceChromeOSHalv3 final
: public VideoCaptureDevice,
- public DisplayRotationObserver,
- public chromeos::PowerManagerClient::Observer {
+ public DisplayRotationObserver {
public:
VideoCaptureDeviceChromeOSHalv3(
- scoped_refptr<base::SingleThreadTaskRunner>
- task_runner_for_screen_observer,
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
const VideoCaptureDeviceDescriptor& device_descriptor,
scoped_refptr<CameraHalDelegate> camera_hal_delegate,
ReprocessManager* reprocess_manager);
@@ -54,14 +51,12 @@ class CAPTURE_EXPORT VideoCaptureDeviceChromeOSHalv3 final
void SetPhotoOptions(mojom::PhotoSettingsPtr settings,
SetPhotoOptionsCallback callback) final;
- // chromeos::PowerManagerClient::Observer callbacks for system suspend and
- // resume events.
- void SuspendImminent(power_manager::SuspendImminent::Reason reason) final;
- void SuspendDone(const base::TimeDelta& sleep_duration) final;
-
private:
+ // Helper to interact with PowerManagerClient on DBus original thread.
+ class PowerManagerClientProxy;
+
void OpenDevice();
- void CloseDevice(base::OnceClosure callback);
+ void CloseDevice(base::UnguessableToken unblock_suspend_token);
// DisplayRotationDelegate implementation.
void SetDisplayRotation(const display::Display& display) final;
@@ -103,6 +98,8 @@ class CAPTURE_EXPORT VideoCaptureDeviceChromeOSHalv3 final
ReprocessManager* reprocess_manager_; // weak
+ scoped_refptr<PowerManagerClientProxy> power_manager_client_proxy_;
+
base::WeakPtrFactory<VideoCaptureDeviceChromeOSHalv3> weak_ptr_factory_;
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceChromeOSHalv3);
diff --git a/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.cc b/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.cc
index 6d3ad46933b..86aed90831a 100644
--- a/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.cc
+++ b/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.cc
@@ -7,9 +7,11 @@
#include <utility>
#include "base/memory/ptr_util.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/capture/video/chromeos/camera_hal_dispatcher_impl.h"
#include "media/capture/video/chromeos/cros_image_capture_impl.h"
#include "media/capture/video/chromeos/reprocess_manager.h"
+#include "mojo/public/cpp/bindings/strong_binding.h"
namespace media {
@@ -23,9 +25,13 @@ VideoCaptureDeviceFactoryChromeOS::VideoCaptureDeviceFactoryChromeOS(
scoped_refptr<base::SingleThreadTaskRunner> task_runner_for_screen_observer)
: task_runner_for_screen_observer_(task_runner_for_screen_observer),
camera_hal_ipc_thread_("CameraHalIpcThread"),
- reprocess_manager_(new ReprocessManager),
- cros_image_capture_(new CrosImageCaptureImpl(reprocess_manager_.get())),
- initialized_(Init()) {}
+ initialized_(Init()),
+ weak_ptr_factory_(this) {
+ auto callback =
+ base::BindRepeating(&VideoCaptureDeviceFactoryChromeOS::GetCameraInfo,
+ base::Unretained(this));
+ reprocess_manager_ = std::make_unique<ReprocessManager>(std::move(callback));
+}
VideoCaptureDeviceFactoryChromeOS::~VideoCaptureDeviceFactoryChromeOS() {
camera_hal_delegate_->Reset();
@@ -90,9 +96,30 @@ bool VideoCaptureDeviceFactoryChromeOS::Init() {
return true;
}
+void VideoCaptureDeviceFactoryChromeOS::GetCameraInfo(
+ const std::string& device_id) {
+ if (!initialized_) {
+ return;
+ }
+ camera_hal_delegate_->GetCameraInfo(
+ std::stoi(device_id),
+ BindToCurrentLoop(
+ base::BindOnce(&VideoCaptureDeviceFactoryChromeOS::OnGotCameraInfo,
+ weak_ptr_factory_.GetWeakPtr(), device_id)));
+}
+
+void VideoCaptureDeviceFactoryChromeOS::OnGotCameraInfo(
+ const std::string& device_id,
+ int32_t result,
+ cros::mojom::CameraInfoPtr camera_info) {
+ reprocess_manager_->UpdateCameraInfo(device_id, std::move(camera_info));
+}
+
void VideoCaptureDeviceFactoryChromeOS::BindCrosImageCaptureRequest(
cros::mojom::CrosImageCaptureRequest request) {
- cros_image_capture_->BindRequest(std::move(request));
+ mojo::MakeStrongBinding(
+ std::make_unique<CrosImageCaptureImpl>(reprocess_manager_.get()),
+ std::move(request));
}
} // namespace media
diff --git a/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.h b/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.h
index 56c74b52e05..55f68ddce6f 100644
--- a/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.h
+++ b/chromium/media/capture/video/chromeos/video_capture_device_factory_chromeos.h
@@ -9,13 +9,16 @@
#include "base/macros.h"
#include "base/single_thread_task_runner.h"
+#include "components/chromeos_camera/common/mjpeg_decode_accelerator.mojom.h"
#include "media/capture/video/chromeos/camera_hal_delegate.h"
#include "media/capture/video/chromeos/mojo/cros_image_capture.mojom.h"
#include "media/capture/video/video_capture_device_factory.h"
namespace media {
-class CrosImageCaptureImpl;
+using MojoMjpegDecodeAcceleratorFactoryCB = base::RepeatingCallback<void(
+ chromeos_camera::mojom::MjpegDecodeAcceleratorRequest)>;
+
class ReprocessManager;
class CAPTURE_EXPORT VideoCaptureDeviceFactoryChromeOS final
@@ -47,6 +50,14 @@ class CAPTURE_EXPORT VideoCaptureDeviceFactoryChromeOS final
// succeeds.
bool Init();
+ // Actively request camera static metadata for given |device_id|.
+ void GetCameraInfo(const std::string& device_id);
+
+ // Callback when receiving |camera_info| that contains camera static metadata.
+ void OnGotCameraInfo(const std::string& device_id,
+ int32_t result,
+ cros::mojom::CameraInfoPtr camera_info);
+
const scoped_refptr<base::SingleThreadTaskRunner>
task_runner_for_screen_observer_;
@@ -62,10 +73,10 @@ class CAPTURE_EXPORT VideoCaptureDeviceFactoryChromeOS final
std::unique_ptr<ReprocessManager> reprocess_manager_;
- std::unique_ptr<CrosImageCaptureImpl> cros_image_capture_;
-
bool initialized_;
+ base::WeakPtrFactory<VideoCaptureDeviceFactoryChromeOS> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryChromeOS);
};
diff --git a/chromium/media/capture/video/video_capture_jpeg_decoder.h b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder.h
index e2da00d54c8..34ce0dba2cc 100644
--- a/chromium/media/capture/video/video_capture_jpeg_decoder.h
+++ b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_H_
-#define MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_H_
+#ifndef MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_H_
+#define MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_H_
#include "base/callback.h"
#include "media/capture/capture_export.h"
@@ -53,4 +53,4 @@ class CAPTURE_EXPORT VideoCaptureJpegDecoder {
} // namespace media
-#endif // MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_H_
+#endif // MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_H_
diff --git a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.cc
index fa59e2f3866..91f3d014750 100644
--- a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc
+++ b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video/video_capture_jpeg_decoder_impl.h"
+#include "media/capture/video/chromeos/video_capture_jpeg_decoder_impl.h"
#include "base/bind.h"
#include "base/metrics/histogram_macros.h"
+#include "components/chromeos_camera/mojo_mjpeg_decode_accelerator.h"
#include "media/base/media_switches.h"
namespace media {
@@ -21,7 +22,8 @@ VideoCaptureJpegDecoderImpl::VideoCaptureJpegDecoderImpl(
send_log_message_cb_(std::move(send_log_message_cb)),
has_received_decoded_frame_(false),
next_bitstream_buffer_id_(0),
- in_buffer_id_(media::MjpegDecodeAccelerator::kInvalidBitstreamBufferId),
+ in_buffer_id_(
+ chromeos_camera::MjpegDecodeAccelerator::kInvalidBitstreamBufferId),
decoder_status_(INIT_PENDING),
weak_ptr_factory_(this) {}
@@ -88,7 +90,7 @@ void VideoCaptureJpegDecoderImpl::DecodeCapturedData(
// No need to lock for |in_buffer_id_| since IsDecoding_Locked() is false.
in_buffer_id_ = next_bitstream_buffer_id_;
media::BitstreamBuffer in_buffer(in_buffer_id_, in_shared_memory_->handle(),
- in_buffer_size);
+ false /* read_only */, in_buffer_size);
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
@@ -146,9 +148,10 @@ void VideoCaptureJpegDecoderImpl::DecodeCapturedData(
// base::Unretained is safe because |decoder_| is deleted on
// |decoder_task_runner_|.
decoder_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&media::MjpegDecodeAccelerator::Decode,
- base::Unretained(decoder_.get()), in_buffer,
- std::move(out_frame)));
+ FROM_HERE,
+ base::BindOnce(&chromeos_camera::MjpegDecodeAccelerator::Decode,
+ base::Unretained(decoder_.get()), std::move(in_buffer),
+ std::move(out_frame)));
}
void VideoCaptureJpegDecoderImpl::VideoFrameReady(int32_t bitstream_buffer_id) {
@@ -170,7 +173,8 @@ void VideoCaptureJpegDecoderImpl::VideoFrameReady(int32_t bitstream_buffer_id) {
<< ", expected " << in_buffer_id_;
return;
}
- in_buffer_id_ = media::MjpegDecodeAccelerator::kInvalidBitstreamBufferId;
+ in_buffer_id_ =
+ chromeos_camera::MjpegDecodeAccelerator::kInvalidBitstreamBufferId;
std::move(decode_done_closure_).Run();
@@ -180,7 +184,7 @@ void VideoCaptureJpegDecoderImpl::VideoFrameReady(int32_t bitstream_buffer_id) {
void VideoCaptureJpegDecoderImpl::NotifyError(
int32_t bitstream_buffer_id,
- media::MjpegDecodeAccelerator::Error error) {
+ chromeos_camera::MjpegDecodeAccelerator::Error error) {
DCHECK(decoder_task_runner_->RunsTasksInCurrentSequence());
LOG(ERROR) << "Decode error, bitstream_buffer_id=" << bitstream_buffer_id
<< ", error=" << error;
@@ -194,11 +198,11 @@ void VideoCaptureJpegDecoderImpl::FinishInitialization() {
TRACE_EVENT0("gpu", "VideoCaptureJpegDecoderImpl::FinishInitialization");
DCHECK(decoder_task_runner_->RunsTasksInCurrentSequence());
- media::mojom::MjpegDecodeAcceleratorPtr remote_decoder;
+ chromeos_camera::mojom::MjpegDecodeAcceleratorPtr remote_decoder;
jpeg_decoder_factory_.Run(mojo::MakeRequest(&remote_decoder));
base::AutoLock lock(lock_);
- decoder_ = std::make_unique<media::MojoMjpegDecodeAccelerator>(
+ decoder_ = std::make_unique<chromeos_camera::MojoMjpegDecodeAccelerator>(
decoder_task_runner_, remote_decoder.PassInterface());
decoder_->InitializeAsync(
diff --git a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.h
index 1b446f1f5bc..86b14dfb293 100644
--- a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h
+++ b/chromium/media/capture/video/chromeos/video_capture_jpeg_decoder_impl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
-#define MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
+#ifndef MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
+#define MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
#include <stddef.h>
#include <stdint.h>
@@ -15,16 +15,17 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/sequence_checker.h"
+#include "components/chromeos_camera/mojo_mjpeg_decode_accelerator.h"
#include "gpu/config/gpu_info.h"
#include "media/capture/capture_export.h"
+#include "media/capture/video/chromeos/video_capture_device_factory_chromeos.h"
+#include "media/capture/video/chromeos/video_capture_jpeg_decoder.h"
#include "media/capture/video/video_capture_device_factory.h"
-#include "media/capture/video/video_capture_jpeg_decoder.h"
-#include "media/mojo/clients/mojo_mjpeg_decode_accelerator.h"
namespace media {
// Implementation of media::VideoCaptureJpegDecoder that delegates to a
-// media::mojom::MjpegDecodeAccelerator. When a frame is received in
+// chromeos_camera::mojom::MjpegDecodeAccelerator. When a frame is received in
// DecodeCapturedData(), it is copied to |in_shared_memory| for IPC transport
// to |decoder_|. When the decoder is finished with the frame, |decode_done_cb_|
// is invoked. Until |decode_done_cb_| is invoked, subsequent calls to
@@ -34,7 +35,7 @@ namespace media {
// media::VideoCaptureJpegDecoder methods may be called from any thread.
class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
: public VideoCaptureJpegDecoder,
- public MjpegDecodeAccelerator::Client {
+ public chromeos_camera::MjpegDecodeAccelerator::Client {
public:
VideoCaptureJpegDecoderImpl(
MojoMjpegDecodeAcceleratorFactoryCB jpeg_decoder_factory,
@@ -54,11 +55,12 @@ class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
base::TimeDelta timestamp,
media::VideoCaptureDevice::Client::Buffer out_buffer) override;
- // MjpegDecodeAccelerator::Client implementation.
+ // chromeos_camera::MjpegDecodeAccelerator::Client implementation.
// These will be called on |decoder_task_runner|.
void VideoFrameReady(int32_t buffer_id) override;
- void NotifyError(int32_t buffer_id,
- media::MjpegDecodeAccelerator::Error error) override;
+ void NotifyError(
+ int32_t buffer_id,
+ chromeos_camera::MjpegDecodeAccelerator::Error error) override;
private:
void FinishInitialization();
@@ -76,7 +78,7 @@ class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner_;
// The underlying JPEG decode accelerator.
- std::unique_ptr<media::MjpegDecodeAccelerator> decoder_;
+ std::unique_ptr<chromeos_camera::MjpegDecodeAccelerator> decoder_;
// The callback to run when decode succeeds.
const DecodeDoneCB decode_done_cb_;
@@ -111,4 +113,4 @@ class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
} // namespace media
-#endif // MEDIA_CAPTURE_VIDEO_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
+#endif // MEDIA_CAPTURE_VIDEO_CHROMEOS_VIDEO_CAPTURE_JPEG_DECODER_IMPL_H_
diff --git a/chromium/media/capture/video/fake_video_capture_device.cc b/chromium/media/capture/video/fake_video_capture_device.cc
index e95dcfb127d..300f836e4f7 100644
--- a/chromium/media/capture/video/fake_video_capture_device.cc
+++ b/chromium/media/capture/video/fake_video_capture_device.cc
@@ -109,6 +109,44 @@ const VideoCaptureFormat& FindClosestSupportedFormat(
return supported_formats[best_index];
}
+gfx::ColorSpace GetDefaultColorSpace(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_I422:
+ case PIXEL_FORMAT_I420A:
+ case PIXEL_FORMAT_I444:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_MT21:
+ case PIXEL_FORMAT_YUV420P9:
+ case PIXEL_FORMAT_YUV420P10:
+ case PIXEL_FORMAT_YUV422P9:
+ case PIXEL_FORMAT_YUV422P10:
+ case PIXEL_FORMAT_YUV444P9:
+ case PIXEL_FORMAT_YUV444P10:
+ case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_YUV422P12:
+ case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_P016LE:
+ case PIXEL_FORMAT_Y16:
+ return gfx::ColorSpace::CreateREC601();
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_ABGR:
+ case PIXEL_FORMAT_XBGR:
+ return gfx::ColorSpace::CreateSRGB();
+ case PIXEL_FORMAT_UNKNOWN:
+ return gfx::ColorSpace();
+ }
+ return gfx::ColorSpace();
+}
+
} // anonymous namespace
// Paints and delivers frames to a client, which is set via Initialize().
@@ -366,7 +404,7 @@ void PacmanFramePainter::DrawPacman(base::TimeDelta elapsed_time,
milliseconds, frame_count);
canvas.scale(3, 3);
canvas.drawSimpleText(time_string.data(), time_string.length(),
- kUTF8_SkTextEncoding, 30, 20, font, paint);
+ SkTextEncoding::kUTF8, 30, 20, font, paint);
if (pixel_format_ == Format::Y16) {
// Use 8 bit bitmap rendered to first half of the buffer as high byte values
@@ -592,9 +630,10 @@ void OwnBufferFrameDeliverer::PaintAndDeliverNextFrame(
memset(buffer_.get(), 0, frame_size);
frame_painter()->PaintFrame(timestamp_to_paint, buffer_.get());
base::TimeTicks now = base::TimeTicks::Now();
- client()->OnIncomingCapturedData(buffer_.get(), frame_size,
- device_state()->format, 0 /* rotation */,
- now, CalculateTimeSinceFirstInvocation(now));
+ client()->OnIncomingCapturedData(
+ buffer_.get(), frame_size, device_state()->format,
+ GetDefaultColorSpace(device_state()->format.pixel_format),
+ 0 /* rotation */, now, CalculateTimeSinceFirstInvocation(now));
}
ClientBufferFrameDeliverer::ClientBufferFrameDeliverer(
@@ -665,9 +704,10 @@ void JpegEncodingFrameDeliverer::PaintAndDeliverNextFrame(
const size_t frame_size = jpeg_buffer_.size();
base::TimeTicks now = base::TimeTicks::Now();
- client()->OnIncomingCapturedData(&jpeg_buffer_[0], frame_size,
- device_state()->format, 0 /* rotation */,
- now, CalculateTimeSinceFirstInvocation(now));
+ client()->OnIncomingCapturedData(
+ &jpeg_buffer_[0], frame_size, device_state()->format,
+ gfx::ColorSpace::CreateJpeg(), 0 /* rotation */, now,
+ CalculateTimeSinceFirstInvocation(now));
}
void FakeVideoCaptureDevice::BeepAndScheduleNextCapture(
diff --git a/chromium/media/capture/video/fake_video_capture_device_unittest.cc b/chromium/media/capture/video/fake_video_capture_device_unittest.cc
index c5b3d806ba6..387ad551990 100644
--- a/chromium/media/capture/video/fake_video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/fake_video_capture_device_unittest.cc
@@ -147,7 +147,7 @@ class FakeVideoCaptureDeviceTestBase : public ::testing::Test {
void SetUp() override { EXPECT_CALL(*client_, OnError(_, _, _)).Times(0); }
std::unique_ptr<MockVideoCaptureDeviceClient> CreateClient() {
- auto result = std::make_unique<MockVideoCaptureDeviceClient>();
+ auto result = std::make_unique<NiceMockVideoCaptureDeviceClient>();
ON_CALL(*result, ReserveOutputBuffer(_, _, _, _))
.WillByDefault(
Invoke([](const gfx::Size& dimensions, VideoPixelFormat format, int,
@@ -157,12 +157,12 @@ class FakeVideoCaptureDeviceTestBase : public ::testing::Test {
*buffer = CreateStubBuffer(0, frame_format.ImageAllocationSize());
return VideoCaptureDevice::Client::ReserveResult::kSucceeded;
}));
- ON_CALL(*result, OnIncomingCapturedData(_, _, _, _, _, _, _))
- .WillByDefault(
- Invoke([this](const uint8_t*, int,
- const media::VideoCaptureFormat& frame_format, int,
- base::TimeTicks, base::TimeDelta,
- int) { OnFrameCaptured(frame_format); }));
+ ON_CALL(*result, OnIncomingCapturedData(_, _, _, _, _, _, _, _))
+ .WillByDefault(Invoke(
+ [this](const uint8_t*, int,
+ const media::VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace&, int, base::TimeTicks,
+ base::TimeDelta, int) { OnFrameCaptured(frame_format); }));
ON_CALL(*result, OnIncomingCapturedGfxBuffer(_, _, _, _, _, _))
.WillByDefault(
Invoke([this](gfx::GpuMemoryBuffer*,
diff --git a/chromium/media/capture/video/file_video_capture_device.cc b/chromium/media/capture/video/file_video_capture_device.cc
index c67c9a2dcc9..18d506afd32 100644
--- a/chromium/media/capture/video/file_video_capture_device.cc
+++ b/chromium/media/capture/video/file_video_capture_device.cc
@@ -18,7 +18,7 @@
#include "media/capture/mojom/image_capture_types.h"
#include "media/capture/video/blob_utils.h"
#include "media/capture/video_capture_types.h"
-#include "media/filters/jpeg_parser.h"
+#include "media/parsers/jpeg_parser.h"
namespace media {
@@ -431,8 +431,11 @@ void FileVideoCaptureDevice::OnCaptureTask() {
const base::TimeTicks current_time = base::TimeTicks::Now();
if (first_ref_time_.is_null())
first_ref_time_ = current_time;
- client_->OnIncomingCapturedData(frame_ptr, frame_size, capture_format_, 0,
- current_time, current_time - first_ref_time_);
+ // Leave the color space unset for compatibility purposes but this
+ // information should be retrieved from the container when possible.
+ client_->OnIncomingCapturedData(frame_ptr, frame_size, capture_format_,
+ gfx::ColorSpace(), 0, current_time,
+ current_time - first_ref_time_);
// Process waiting photo callbacks
while (!take_photo_callbacks_.empty()) {
diff --git a/chromium/media/capture/video/file_video_capture_device_unittest.cc b/chromium/media/capture/video/file_video_capture_device_unittest.cc
index 28ab439fd36..c32d147251f 100644
--- a/chromium/media/capture/video/file_video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/file_video_capture_device_unittest.cc
@@ -50,7 +50,8 @@ class MockImageCaptureClient {
class FileVideoCaptureDeviceTest : public ::testing::Test {
protected:
- FileVideoCaptureDeviceTest() : client_(new MockVideoCaptureDeviceClient()) {}
+ FileVideoCaptureDeviceTest()
+ : client_(new NiceMockVideoCaptureDeviceClient()) {}
void SetUp() override {
EXPECT_CALL(*client_, OnError(_, _, _)).Times(0);
@@ -62,7 +63,7 @@ class FileVideoCaptureDeviceTest : public ::testing::Test {
void TearDown() override { device_->StopAndDeAllocate(); }
- std::unique_ptr<MockVideoCaptureDeviceClient> client_;
+ std::unique_ptr<NiceMockVideoCaptureDeviceClient> client_;
MockImageCaptureClient image_capture_client_;
std::unique_ptr<VideoCaptureDevice> device_;
VideoCaptureFormat last_format_;
diff --git a/chromium/media/capture/video/linux/v4l2_capture_delegate.cc b/chromium/media/capture/video/linux/v4l2_capture_delegate.cc
index d4842a881b6..839165ba082 100644
--- a/chromium/media/capture/video/linux/v4l2_capture_delegate.cc
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate.cc
@@ -891,9 +891,15 @@ void V4L2CaptureDelegate::DoCapture() {
client_->OnFrameDropped(
VideoCaptureFrameDropReason::kV4L2InvalidNumberOfBytesInBuffer);
} else {
+ // TODO(julien.isorce): build gfx color space from v4l2 color space.
+ // primary = v4l2_format->fmt.pix.colorspace;
+ // range = v4l2_format->fmt.pix.quantization;
+ // matrix = v4l2_format->fmt.pix.ycbcr_enc;
+ // transfer = v4l2_format->fmt.pix.xfer_func;
+ // See http://crbug.com/959919.
client_->OnIncomingCapturedData(
buffer_tracker->start(), buffer_tracker->payload_size(),
- capture_format_, rotation_, now, timestamp);
+ capture_format_, gfx::ColorSpace(), rotation_, now, timestamp);
}
while (!take_photo_callbacks_.empty()) {
diff --git a/chromium/media/capture/video/linux/v4l2_capture_delegate_unittest.cc b/chromium/media/capture/video/linux/v4l2_capture_delegate_unittest.cc
index 34075f686a0..c186e14ede0 100644
--- a/chromium/media/capture/video/linux/v4l2_capture_delegate_unittest.cc
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate_unittest.cc
@@ -234,7 +234,7 @@ TEST_F(V4L2CaptureDelegateTest, MAYBE_CreateAndDestroyAndVerifyControls) {
base::RunLoop run_loop;
base::Closure quit_closure = run_loop.QuitClosure();
- EXPECT_CALL(*client_ptr, OnIncomingCapturedData(_, _, _, _, _, _, _))
+ EXPECT_CALL(*client_ptr, OnIncomingCapturedData(_, _, _, _, _, _, _, _))
.Times(1)
.WillOnce(RunClosure(quit_closure));
run_loop.Run();
diff --git a/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc b/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
index bb16228e6c3..7224c5107ee 100644
--- a/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_factory_linux_unittest.cc
@@ -123,12 +123,12 @@ TEST_F(VideoCaptureDeviceFactoryLinuxTest,
arbitrary_params.requested_format.frame_size = gfx::Size(1280, 720);
arbitrary_params.requested_format.frame_rate = 30.0f;
arbitrary_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- auto client = std::make_unique<MockVideoCaptureDeviceClient>();
+ auto client = std::make_unique<NiceMockVideoCaptureDeviceClient>();
MockVideoCaptureDeviceClient* client_ptr = client.get();
base::RunLoop wait_loop;
static const int kFrameToReceive = 3;
- EXPECT_CALL(*client_ptr, OnIncomingCapturedData(_, _, _, _, _, _, _))
+ EXPECT_CALL(*client_ptr, OnIncomingCapturedData(_, _, _, _, _, _, _, _))
.WillRepeatedly(InvokeWithoutArgs([&wait_loop]() {
static int received_frame_count = 0;
received_frame_count++;
diff --git a/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
index 132a1c3de1f..6e7d5ba9e1a 100644
--- a/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
@@ -490,6 +490,7 @@ void ExtractBaseAddressAndLength(char** base_address,
const media::VideoCaptureFormat captureFormat(
gfx::Size(dimensions.width, dimensions.height), frameRate_,
FourCCToChromiumPixelFormat(fourcc));
+ gfx::ColorSpace colorSpace;
// We have certain format expectation for capture output:
// For MJPEG, |sampleBuffer| is expected to always be a CVBlockBuffer.
@@ -510,6 +511,11 @@ void ExtractBaseAddressAndLength(char** base_address,
baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
frameSize = CVPixelBufferGetHeight(videoFrame) *
CVPixelBufferGetBytesPerRow(videoFrame);
+
+ // TODO(julien.isorce): move GetImageBufferColorSpace(CVImageBufferRef)
+ // from media::VTVideoDecodeAccelerator to media/base/mac and call it
+ // here to get the color space. See https://crbug.com/959962.
+ // colorSpace = media::GetImageBufferColorSpace(videoFrame);
} else {
videoFrame = nil;
}
@@ -531,7 +537,8 @@ void ExtractBaseAddressAndLength(char** base_address,
if (frameReceiver_ && baseAddress) {
frameReceiver_->ReceiveFrame(reinterpret_cast<uint8_t*>(baseAddress),
- frameSize, captureFormat, 0, 0, timestamp);
+ frameSize, captureFormat, colorSpace, 0, 0,
+ timestamp);
}
}
diff --git a/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h
index e642354a4a6..98bc8fd276f 100644
--- a/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h
@@ -57,6 +57,7 @@ class CAPTURE_EXPORT VideoCaptureDeviceDeckLinkMac : public VideoCaptureDevice {
void OnIncomingCapturedData(const uint8_t* data,
size_t length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int rotation, // Clockwise.
base::TimeTicks reference_time,
base::TimeDelta timestamp);
diff --git a/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm
index 53e96499cc6..f49981cd623 100644
--- a/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm
@@ -292,9 +292,12 @@ HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(
} else {
timestamp = now - first_ref_time_;
}
+ // TODO(julien.isorce): Build a gfx::ColorSpace from DeckLink API, .i.e
+ // using BMDDisplayModeFlags or BMDDeckLinkFrameMetadataID. See
+ // http://crbug.com/959953.
frame_receiver_->OnIncomingCapturedData(
video_data, video_frame->GetRowBytes() * video_frame->GetHeight(),
- capture_format,
+ capture_format, gfx::ColorSpace(),
0, // Rotation.
now, timestamp);
}
@@ -486,13 +489,14 @@ void VideoCaptureDeviceDeckLinkMac::OnIncomingCapturedData(
const uint8_t* data,
size_t length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int rotation, // Clockwise.
base::TimeTicks reference_time,
base::TimeDelta timestamp) {
base::AutoLock lock(lock_);
if (client_) {
- client_->OnIncomingCapturedData(data, length, frame_format, rotation,
- reference_time, timestamp);
+ client_->OnIncomingCapturedData(data, length, frame_format, color_space,
+ rotation, reference_time, timestamp);
}
}
diff --git a/chromium/media/capture/video/mac/video_capture_device_mac.h b/chromium/media/capture/video/mac/video_capture_device_mac.h
index ef1dc218c73..ba02224944a 100644
--- a/chromium/media/capture/video/mac/video_capture_device_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_mac.h
@@ -75,6 +75,7 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
void ReceiveFrame(const uint8_t* video_frame,
int video_frame_length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace color_space,
int aspect_numerator,
int aspect_denominator,
base::TimeDelta timestamp);
diff --git a/chromium/media/capture/video/mac/video_capture_device_mac.mm b/chromium/media/capture/video/mac/video_capture_device_mac.mm
index 39fc01f25ae..09748531281 100644
--- a/chromium/media/capture/video/mac/video_capture_device_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_mac.mm
@@ -442,6 +442,7 @@ bool VideoCaptureDeviceMac::Init(VideoCaptureApi capture_api_type) {
void VideoCaptureDeviceMac::ReceiveFrame(const uint8_t* video_frame,
int video_frame_length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace color_space,
int aspect_numerator,
int aspect_denominator,
base::TimeDelta timestamp) {
@@ -454,7 +455,8 @@ void VideoCaptureDeviceMac::ReceiveFrame(const uint8_t* video_frame,
}
client_->OnIncomingCapturedData(video_frame, video_frame_length, frame_format,
- 0, base::TimeTicks::Now(), timestamp);
+ color_space, 0, base::TimeTicks::Now(),
+ timestamp);
}
void VideoCaptureDeviceMac::OnPhotoTaken(const uint8_t* image_data,
diff --git a/chromium/media/capture/video/mock_device.cc b/chromium/media/capture/video/mock_device.cc
index 080b8a40301..833cde009b9 100644
--- a/chromium/media/capture/video/mock_device.cc
+++ b/chromium/media/capture/video/mock_device.cc
@@ -21,7 +21,7 @@ void MockDevice::SendStubFrame(const media::VideoCaptureFormat& format,
stub_frame->data(0),
static_cast<int>(media::VideoFrame::AllocationSize(
stub_frame->format(), stub_frame->coded_size())),
- format, rotation, base::TimeTicks(), base::TimeDelta(),
+ format, gfx::ColorSpace(), rotation, base::TimeTicks(), base::TimeDelta(),
frame_feedback_id);
}
diff --git a/chromium/media/capture/video/mock_device_factory.cc b/chromium/media/capture/video/mock_device_factory.cc
index cea527f76ea..ebfa57074db 100644
--- a/chromium/media/capture/video/mock_device_factory.cc
+++ b/chromium/media/capture/video/mock_device_factory.cc
@@ -4,6 +4,8 @@
#include "media/capture/video/mock_device_factory.h"
+#include <utility>
+
namespace {
// Report a single hard-coded supported format to clients.
@@ -84,7 +86,7 @@ void MockDeviceFactory::GetSupportedFormats(
void MockDeviceFactory::GetCameraLocationsAsync(
std::unique_ptr<media::VideoCaptureDeviceDescriptors> device_descriptors,
DeviceDescriptorsCallback result_callback) {
- base::ResetAndReturn(&result_callback).Run(std::move(device_descriptors));
+ std::move(result_callback).Run(std::move(device_descriptors));
}
} // namespace media
diff --git a/chromium/media/capture/video/mock_video_capture_device_client.h b/chromium/media/capture/video/mock_video_capture_device_client.h
index b0e2d76f5e0..383df0ef586 100644
--- a/chromium/media/capture/video/mock_video_capture_device_client.h
+++ b/chromium/media/capture/video/mock_video_capture_device_client.h
@@ -15,10 +15,11 @@ class MockVideoCaptureDeviceClient : public VideoCaptureDevice::Client {
MockVideoCaptureDeviceClient();
~MockVideoCaptureDeviceClient() override;
- MOCK_METHOD7(OnIncomingCapturedData,
+ MOCK_METHOD8(OnIncomingCapturedData,
void(const uint8_t* data,
int length,
const media::VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
@@ -68,6 +69,9 @@ class MockVideoCaptureDeviceClient : public VideoCaptureDevice::Client {
const media::VideoFrameMetadata& additional_metadata));
};
+using NiceMockVideoCaptureDeviceClient =
+ ::testing::NiceMock<MockVideoCaptureDeviceClient>;
+
} // namespace media
#endif // MEDIA_CAPTURE_VIDEO_MOCK_VIDEO_CAPTURE_DEVICE_CLIENT_H_
diff --git a/chromium/media/capture/video/video_capture_device.h b/chromium/media/capture/video/video_capture_device.h
index 3cef0193e4d..7255cf9bbef 100644
--- a/chromium/media/capture/video/video_capture_device.h
+++ b/chromium/media/capture/video/video_capture_device.h
@@ -147,6 +147,7 @@ class CAPTURE_EXPORT VideoCaptureDevice
virtual void OnIncomingCapturedData(const uint8_t* data,
int length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int clockwise_rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
diff --git a/chromium/media/capture/video/video_capture_device_client.cc b/chromium/media/capture/video/video_capture_device_client.cc
index 6c7cf02e6df..2ea5b303b35 100644
--- a/chromium/media/capture/video/video_capture_device_client.cc
+++ b/chromium/media/capture/video/video_capture_device_client.cc
@@ -19,11 +19,14 @@
#include "media/capture/video/scoped_buffer_pool_reservation.h"
#include "media/capture/video/video_capture_buffer_handle.h"
#include "media/capture/video/video_capture_buffer_pool.h"
-#include "media/capture/video/video_capture_jpeg_decoder.h"
#include "media/capture/video/video_frame_receiver.h"
#include "media/capture/video_capture_types.h"
#include "third_party/libyuv/include/libyuv.h"
+#if defined(OS_CHROMEOS)
+#include "media/capture/video/chromeos/video_capture_jpeg_decoder.h"
+#endif // defined(OS_CHROMEOS)
+
namespace {
bool IsFormatSupported(media::VideoPixelFormat pixel_format) {
@@ -65,6 +68,50 @@ void GetI420BufferAccess(
*uv_plane_stride = *y_plane_stride / 2;
}
+gfx::ColorSpace OverrideColorSpaceForLibYuvConversion(
+ const gfx::ColorSpace& color_space,
+ const media::VideoPixelFormat pixel_format) {
+ gfx::ColorSpace overriden_color_space = color_space;
+ switch (pixel_format) {
+ case media::PIXEL_FORMAT_UNKNOWN: // Color format not set.
+ break;
+ case media::PIXEL_FORMAT_ARGB:
+ case media::PIXEL_FORMAT_XRGB:
+ case media::PIXEL_FORMAT_RGB24:
+ case media::PIXEL_FORMAT_RGB32:
+ case media::PIXEL_FORMAT_ABGR:
+ case media::PIXEL_FORMAT_XBGR:
+ // Check if we can merge data 's primary and transfer function into the
+ // returned color space.
+ if (color_space.IsValid()) {
+ // The raw data is rgb so we expect its color space to only hold gamma
+ // correction.
+ DCHECK(color_space == color_space.GetAsFullRangeRGB());
+
+ // This captured ARGB data is going to be converted to yuv using libyuv
+ // ConvertToI420 which internally uses Rec601 coefficients. So build a
+ // combined colorspace that contains both the above gamma correction
+ // and the yuv conversion information.
+ // TODO(julien.isorce): instead pass color space information to libyuv
+ // once the support is added, see http://crbug.com/libyuv/835.
+ overriden_color_space = color_space.GetWithMatrixAndRange(
+ gfx::ColorSpace::MatrixID::SMPTE170M,
+ gfx::ColorSpace::RangeID::LIMITED);
+ } else {
+ // Color space is not specified but it's probably safe to assume its
+ // sRGB though, and so it would be valid to assume that libyuv's
+ // ConvertToI420() is going to produce results in Rec601, or very close
+ // to it.
+ overriden_color_space = gfx::ColorSpace::CreateREC601();
+ }
+ break;
+ default:
+ break;
+ }
+
+ return overriden_color_space;
+}
+
} // anonymous namespace
namespace media {
@@ -96,6 +143,7 @@ class BufferPoolBufferHandleProvider
const int buffer_id_;
};
+#if defined(OS_CHROMEOS)
VideoCaptureDeviceClient::VideoCaptureDeviceClient(
VideoCaptureBufferType target_buffer_type,
std::unique_ptr<VideoFrameReceiver> receiver,
@@ -111,6 +159,16 @@ VideoCaptureDeviceClient::VideoCaptureDeviceClient(
base::Bind(&VideoFrameReceiver::OnStartedUsingGpuDecode,
base::Unretained(receiver_.get()));
}
+#else
+VideoCaptureDeviceClient::VideoCaptureDeviceClient(
+ VideoCaptureBufferType target_buffer_type,
+ std::unique_ptr<VideoFrameReceiver> receiver,
+ scoped_refptr<VideoCaptureBufferPool> buffer_pool)
+ : target_buffer_type_(target_buffer_type),
+ receiver_(std::move(receiver)),
+ buffer_pool_(std::move(buffer_pool)),
+ last_captured_pixel_format_(PIXEL_FORMAT_UNKNOWN) {}
+#endif // defined(OS_CHROMEOS)
VideoCaptureDeviceClient::~VideoCaptureDeviceClient() {
for (int buffer_id : buffer_ids_known_by_receiver_)
@@ -134,6 +192,7 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
const uint8_t* data,
int length,
const VideoCaptureFormat& format,
+ const gfx::ColorSpace& data_color_space,
int rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
@@ -146,6 +205,7 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
OnLog("Pixel format: " + VideoPixelFormatToString(format.pixel_format));
last_captured_pixel_format_ = format.pixel_format;
+#if defined(OS_CHROMEOS)
if (format.pixel_format == PIXEL_FORMAT_MJPEG &&
optional_jpeg_decoder_factory_callback_) {
external_jpeg_decoder_ =
@@ -153,6 +213,7 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
DCHECK(external_jpeg_decoder_);
external_jpeg_decoder_->Initialize();
}
+#endif // defined(OS_CHROMEOS)
}
if (!format.IsValid()) {
@@ -203,7 +264,6 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
int crop_x = 0;
int crop_y = 0;
libyuv::FourCC fourcc_format = libyuv::FOURCC_ANY;
- gfx::ColorSpace color_space;
bool flip = false;
switch (format.pixel_format) {
@@ -252,11 +312,6 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
// that vertical flipping is needed.
flip = true;
#endif
- // We don't actually know, for sure, what the source color space is. It's
- // probably safe to assume its sRGB, though, and so it would be valid to
- // assume libyuv::ConvertToI420() is going to produce results in Rec601
- // (or very close to it).
- color_space = gfx::ColorSpace::CreateREC601();
break;
case PIXEL_FORMAT_RGB32:
// Fallback to PIXEL_FORMAT_ARGB setting |flip| in Windows
@@ -267,7 +322,6 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
#endif
case PIXEL_FORMAT_ARGB:
fourcc_format = libyuv::FOURCC_ARGB;
- color_space = gfx::ColorSpace::CreateREC601();
break;
case PIXEL_FORMAT_MJPEG:
fourcc_format = libyuv::FOURCC_MJPG;
@@ -276,10 +330,14 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
NOTREACHED();
}
+ const gfx::ColorSpace color_space = OverrideColorSpaceForLibYuvConversion(
+ data_color_space, format.pixel_format);
+
// The input |length| can be greater than the required buffer size because of
// paddings and/or alignments, but it cannot be smaller.
DCHECK_GE(static_cast<size_t>(length), format.ImageAllocationSize());
+#if defined(OS_CHROMEOS)
if (external_jpeg_decoder_) {
const VideoCaptureJpegDecoder::STATUS status =
external_jpeg_decoder_->GetStatus();
@@ -295,6 +353,7 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
return;
}
}
+#endif // defined(OS_CHROMEOS)
// libyuv::ConvertToI420 use Rec601 to convert RGB to YUV.
if (libyuv::ConvertToI420(
diff --git a/chromium/media/capture/video/video_capture_device_client.h b/chromium/media/capture/video/video_capture_device_client.h
index dfea6ac3e0a..05fd66a144d 100644
--- a/chromium/media/capture/video/video_capture_device_client.h
+++ b/chromium/media/capture/video/video_capture_device_client.h
@@ -43,11 +43,17 @@ using VideoCaptureJpegDecoderFactoryCB =
class CAPTURE_EXPORT VideoCaptureDeviceClient
: public VideoCaptureDevice::Client {
public:
+#if defined(OS_CHROMEOS)
VideoCaptureDeviceClient(
VideoCaptureBufferType target_buffer_type,
std::unique_ptr<VideoFrameReceiver> receiver,
scoped_refptr<VideoCaptureBufferPool> buffer_pool,
- VideoCaptureJpegDecoderFactoryCB optional_jpeg_decoder_factory_callback);
+ VideoCaptureJpegDecoderFactoryCB jpeg_decoder_factory_callback);
+#else
+ VideoCaptureDeviceClient(VideoCaptureBufferType target_buffer_type,
+ std::unique_ptr<VideoFrameReceiver> receiver,
+ scoped_refptr<VideoCaptureBufferPool> buffer_pool);
+#endif // defined(OS_CHROMEOS)
~VideoCaptureDeviceClient() override;
static Buffer MakeBufferStruct(
@@ -59,6 +65,7 @@ class CAPTURE_EXPORT VideoCaptureDeviceClient
void OnIncomingCapturedData(const uint8_t* data,
int length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int clockwise_rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
@@ -108,9 +115,11 @@ class CAPTURE_EXPORT VideoCaptureDeviceClient
const std::unique_ptr<VideoFrameReceiver> receiver_;
std::vector<int> buffer_ids_known_by_receiver_;
+#if defined(OS_CHROMEOS)
VideoCaptureJpegDecoderFactoryCB optional_jpeg_decoder_factory_callback_;
std::unique_ptr<VideoCaptureJpegDecoder> external_jpeg_decoder_;
base::OnceClosure on_started_using_gpu_cb_;
+#endif // defined(OS_CHROMEOS)
// The pool of shared-memory buffers used for capturing.
const scoped_refptr<VideoCaptureBufferPool> buffer_pool_;
diff --git a/chromium/media/capture/video/video_capture_device_client_unittest.cc b/chromium/media/capture/video/video_capture_device_client_unittest.cc
index 5041270224b..a8f3368957b 100644
--- a/chromium/media/capture/video/video_capture_device_client_unittest.cc
+++ b/chromium/media/capture/video/video_capture_device_client_unittest.cc
@@ -17,25 +17,31 @@
#include "media/capture/video/mock_video_frame_receiver.h"
#include "media/capture/video/video_capture_buffer_pool_impl.h"
#include "media/capture/video/video_capture_buffer_tracker_factory_impl.h"
-#include "media/capture/video/video_capture_jpeg_decoder.h"
#include "media/capture/video/video_frame_receiver.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_CHROMEOS)
+#include "media/capture/video/chromeos/video_capture_jpeg_decoder.h"
+#endif // defined(OS_CHROMEOS)
+
using ::testing::_;
using ::testing::AtLeast;
-using ::testing::Mock;
using ::testing::InSequence;
using ::testing::Invoke;
+using ::testing::Mock;
+using ::testing::NiceMock;
using ::testing::SaveArg;
namespace media {
namespace {
+#if defined(OS_CHROMEOS)
std::unique_ptr<VideoCaptureJpegDecoder> ReturnNullPtrAsJpecDecoder() {
return nullptr;
}
+#endif // defined(OS_CHROMEOS)
} // namespace
@@ -50,18 +56,24 @@ class VideoCaptureDeviceClientTest : public ::testing::Test {
scoped_refptr<VideoCaptureBufferPoolImpl> buffer_pool(
new VideoCaptureBufferPoolImpl(
std::make_unique<VideoCaptureBufferTrackerFactoryImpl>(), 2));
- auto controller = std::make_unique<MockVideoFrameReceiver>();
+ auto controller = std::make_unique<NiceMock<MockVideoFrameReceiver>>();
receiver_ = controller.get();
gpu_memory_buffer_manager_ =
std::make_unique<unittest_internal::MockGpuMemoryBufferManager>();
+#if defined(OS_CHROMEOS)
device_client_ = std::make_unique<VideoCaptureDeviceClient>(
VideoCaptureBufferType::kSharedMemory, std::move(controller),
buffer_pool, base::BindRepeating(&ReturnNullPtrAsJpecDecoder));
+#else
+ device_client_ = std::make_unique<VideoCaptureDeviceClient>(
+ VideoCaptureBufferType::kSharedMemory, std::move(controller),
+ buffer_pool);
+#endif // defined(OS_CHROMEOS)
}
~VideoCaptureDeviceClientTest() override = default;
protected:
- MockVideoFrameReceiver* receiver_;
+ NiceMock<MockVideoFrameReceiver>* receiver_;
std::unique_ptr<unittest_internal::MockGpuMemoryBufferManager>
gpu_memory_buffer_manager_;
std::unique_ptr<VideoCaptureDeviceClient> device_client_;
@@ -77,6 +89,7 @@ TEST_F(VideoCaptureDeviceClientTest, Minimal) {
unsigned char data[kScratchpadSizeInBytes] = {};
const VideoCaptureFormat kFrameFormat(gfx::Size(10, 10), 30.0f /*frame_rate*/,
PIXEL_FORMAT_I420);
+ const gfx::ColorSpace kColorSpace = gfx::ColorSpace::CreateREC601();
DCHECK(device_client_.get());
{
InSequence s;
@@ -85,9 +98,9 @@ TEST_F(VideoCaptureDeviceClientTest, Minimal) {
EXPECT_CALL(*receiver_, MockOnNewBufferHandle(expected_buffer_id));
EXPECT_CALL(*receiver_, MockOnFrameReadyInBuffer(expected_buffer_id, _, _));
}
- device_client_->OnIncomingCapturedData(data, kScratchpadSizeInBytes,
- kFrameFormat, 0 /*clockwise rotation*/,
- base::TimeTicks(), base::TimeDelta());
+ device_client_->OnIncomingCapturedData(
+ data, kScratchpadSizeInBytes, kFrameFormat, kColorSpace,
+ 0 /*clockwise rotation*/, base::TimeTicks(), base::TimeDelta());
const gfx::Size kBufferDimensions(10, 10);
const VideoCaptureFormat kFrameFormatNV12(
@@ -119,13 +132,14 @@ TEST_F(VideoCaptureDeviceClientTest, FailsSilentlyGivenInvalidFrameFormat) {
const VideoCaptureFormat kFrameFormat(
gfx::Size(limits::kMaxDimension + 1, limits::kMaxDimension),
limits::kMaxFramesPerSecond + 1, VideoPixelFormat::PIXEL_FORMAT_I420);
+ const gfx::ColorSpace kColorSpace = gfx::ColorSpace::CreateREC601();
DCHECK(device_client_.get());
// Expect the the call to fail silently inside the VideoCaptureDeviceClient.
EXPECT_CALL(*receiver_, OnLog(_)).Times(AtLeast(1));
EXPECT_CALL(*receiver_, MockOnFrameReadyInBuffer(_, _, _)).Times(0);
- device_client_->OnIncomingCapturedData(data, kScratchpadSizeInBytes,
- kFrameFormat, 0 /*clockwise rotation*/,
- base::TimeTicks(), base::TimeDelta());
+ device_client_->OnIncomingCapturedData(
+ data, kScratchpadSizeInBytes, kFrameFormat, kColorSpace,
+ 0 /*clockwise rotation*/, base::TimeTicks(), base::TimeDelta());
const gfx::Size kBufferDimensions(10, 10);
const VideoCaptureFormat kFrameFormatNV12(
@@ -148,6 +162,7 @@ TEST_F(VideoCaptureDeviceClientTest, DropsFrameIfNoBuffer) {
unsigned char data[kScratchpadSizeInBytes] = {};
const VideoCaptureFormat kFrameFormat(gfx::Size(10, 10), 30.0f /*frame_rate*/,
PIXEL_FORMAT_I420);
+ const gfx::ColorSpace kColorSpace = gfx::ColorSpace::CreateREC601();
EXPECT_CALL(*receiver_, OnLog(_)).Times(1);
// Simulate that receiver still holds |buffer_read_permission| for the first
// two buffers when the third call to OnIncomingCapturedData comes in.
@@ -168,15 +183,15 @@ TEST_F(VideoCaptureDeviceClientTest, DropsFrameIfNoBuffer) {
read_permission.push_back(std::move(*buffer_read_permission));
}));
// Pass three frames. The third will be dropped.
- device_client_->OnIncomingCapturedData(data, kScratchpadSizeInBytes,
- kFrameFormat, 0 /*clockwise rotation*/,
- base::TimeTicks(), base::TimeDelta());
- device_client_->OnIncomingCapturedData(data, kScratchpadSizeInBytes,
- kFrameFormat, 0 /*clockwise rotation*/,
- base::TimeTicks(), base::TimeDelta());
- device_client_->OnIncomingCapturedData(data, kScratchpadSizeInBytes,
- kFrameFormat, 0 /*clockwise rotation*/,
- base::TimeTicks(), base::TimeDelta());
+ device_client_->OnIncomingCapturedData(
+ data, kScratchpadSizeInBytes, kFrameFormat, kColorSpace,
+ 0 /*clockwise rotation*/, base::TimeTicks(), base::TimeDelta());
+ device_client_->OnIncomingCapturedData(
+ data, kScratchpadSizeInBytes, kFrameFormat, kColorSpace,
+ 0 /*clockwise rotation*/, base::TimeTicks(), base::TimeDelta());
+ device_client_->OnIncomingCapturedData(
+ data, kScratchpadSizeInBytes, kFrameFormat, kColorSpace,
+ 0 /*clockwise rotation*/, base::TimeTicks(), base::TimeDelta());
Mock::VerifyAndClearExpectations(receiver_);
}
@@ -212,6 +227,7 @@ TEST_F(VideoCaptureDeviceClientTest, DataCaptureGoodPixelFormats) {
PIXEL_FORMAT_ARGB,
PIXEL_FORMAT_Y16,
};
+ const gfx::ColorSpace kColorSpace = gfx::ColorSpace::CreateSRGB();
for (VideoPixelFormat format : kSupportedFormats) {
params.requested_format.pixel_format = format;
@@ -220,8 +236,8 @@ TEST_F(VideoCaptureDeviceClientTest, DataCaptureGoodPixelFormats) {
EXPECT_CALL(*receiver_, MockOnFrameReadyInBuffer(_, _, _)).Times(1);
device_client_->OnIncomingCapturedData(
data, params.requested_format.ImageAllocationSize(),
- params.requested_format, 0 /* clockwise_rotation */, base::TimeTicks(),
- base::TimeDelta());
+ params.requested_format, kColorSpace, 0 /* clockwise_rotation */,
+ base::TimeTicks(), base::TimeDelta());
Mock::VerifyAndClearExpectations(receiver_);
}
}
@@ -260,8 +276,8 @@ TEST_F(VideoCaptureDeviceClientTest, CheckRotationsAndCrops) {
.WillOnce(SaveArg<2>(&coded_size));
device_client_->OnIncomingCapturedData(
data, params.requested_format.ImageAllocationSize(),
- params.requested_format, size_and_rotation.rotation, base::TimeTicks(),
- base::TimeDelta());
+ params.requested_format, gfx::ColorSpace(), size_and_rotation.rotation,
+ base::TimeTicks(), base::TimeDelta());
EXPECT_EQ(coded_size.width(), size_and_rotation.output_resolution.width());
EXPECT_EQ(coded_size.height(),
diff --git a/chromium/media/capture/video/video_capture_device_factory.h b/chromium/media/capture/video/video_capture_device_factory.h
index 2f95f55c936..57e8c87cb87 100644
--- a/chromium/media/capture/video/video_capture_device_factory.h
+++ b/chromium/media/capture/video/video_capture_device_factory.h
@@ -10,13 +10,9 @@
#include "base/threading/thread_checker.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
#include "media/capture/video/video_capture_device.h"
-#include "media/mojo/interfaces/mjpeg_decode_accelerator.mojom.h"
namespace media {
-using MojoMjpegDecodeAcceleratorFactoryCB =
- base::RepeatingCallback<void(media::mojom::MjpegDecodeAcceleratorRequest)>;
-
// VideoCaptureDeviceFactory is the base class for creation of video capture
// devices in the different platforms. VCDFs are created by MediaStreamManager
// on UI thread and plugged into VideoCaptureManager, who owns and operates them
diff --git a/chromium/media/capture/video/video_capture_device_unittest.cc b/chromium/media/capture/video/video_capture_device_unittest.cc
index e5aceeaec31..d0387c9f9cf 100644
--- a/chromium/media/capture/video/video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/video_capture_device_unittest.cc
@@ -47,6 +47,7 @@
#endif
#if defined(OS_CHROMEOS)
+#include "chromeos/dbus/power/power_manager_client.h"
#include "media/capture/video/chromeos/camera_buffer_factory.h"
#include "media/capture/video/chromeos/camera_hal_dispatcher_impl.h"
#include "media/capture/video/chromeos/local_gpu_memory_buffer_manager.h"
@@ -259,12 +260,13 @@ class VideoCaptureDeviceTest
std::make_unique<LocalGpuMemoryBufferManager>();
VideoCaptureDeviceFactoryChromeOS::SetGpuBufferManager(
local_gpu_memory_buffer_manager_.get());
- if (!CameraHalDispatcherImpl::GetInstance()->IsStarted()) {
+ if (media::ShouldUseCrosCameraService() &&
+ !CameraHalDispatcherImpl::GetInstance()->IsStarted()) {
CameraHalDispatcherImpl::GetInstance()->Start(
base::DoNothing::Repeatedly<
- media::mojom::MjpegDecodeAcceleratorRequest>(),
+ chromeos_camera::mojom::MjpegDecodeAcceleratorRequest>(),
base::DoNothing::Repeatedly<
- media::mojom::JpegEncodeAcceleratorRequest>());
+ chromeos_camera::mojom::JpegEncodeAcceleratorRequest>());
}
#endif
video_capture_device_factory_ =
@@ -299,17 +301,18 @@ class VideoCaptureDeviceTest
#endif
std::unique_ptr<MockVideoCaptureDeviceClient> CreateDeviceClient() {
- auto result = std::make_unique<MockVideoCaptureDeviceClient>();
+ auto result = std::make_unique<NiceMockVideoCaptureDeviceClient>();
ON_CALL(*result, OnError(_, _, _)).WillByDefault(Invoke(DumpError));
EXPECT_CALL(*result, ReserveOutputBuffer(_, _, _, _)).Times(0);
EXPECT_CALL(*result, DoOnIncomingCapturedBuffer(_, _, _, _)).Times(0);
EXPECT_CALL(*result, DoOnIncomingCapturedBufferExt(_, _, _, _, _, _, _))
.Times(0);
- ON_CALL(*result, OnIncomingCapturedData(_, _, _, _, _, _, _))
+ ON_CALL(*result, OnIncomingCapturedData(_, _, _, _, _, _, _, _))
.WillByDefault(
Invoke([this](const uint8_t* data, int length,
- const media::VideoCaptureFormat& frame_format, int,
- base::TimeTicks, base::TimeDelta, int) {
+ const media::VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace&, int, base::TimeTicks,
+ base::TimeDelta, int) {
ASSERT_GT(length, 0);
ASSERT_TRUE(data);
main_thread_task_runner_->PostTask(
@@ -673,7 +676,7 @@ void VideoCaptureDeviceTest::RunCaptureMjpegTestCase() {
#if defined(OS_WIN)
base::win::Version version = base::win::GetVersion();
- if (version >= base::win::VERSION_WIN10) {
+ if (version >= base::win::Version::WIN10) {
VLOG(1) << "Skipped on Win10: http://crbug.com/570604, current: "
<< static_cast<int>(version);
return;
diff --git a/chromium/media/capture/video/video_frame_receiver_on_task_runner.cc b/chromium/media/capture/video/video_frame_receiver_on_task_runner.cc
index 7f6bb4be2f5..fc761bcb3a0 100644
--- a/chromium/media/capture/video/video_frame_receiver_on_task_runner.cc
+++ b/chromium/media/capture/video/video_frame_receiver_on_task_runner.cc
@@ -20,9 +20,8 @@ void VideoFrameReceiverOnTaskRunner::OnNewBuffer(
int buffer_id,
media::mojom::VideoBufferHandlePtr buffer_handle) {
task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&VideoFrameReceiver::OnNewBuffer, receiver_, buffer_id,
- base::Passed(std::move(buffer_handle))));
+ FROM_HERE, base::BindOnce(&VideoFrameReceiver::OnNewBuffer, receiver_,
+ buffer_id, std::move(buffer_handle)));
}
void VideoFrameReceiverOnTaskRunner::OnFrameReadyInBuffer(
diff --git a/chromium/media/capture/video/win/sink_input_pin_win.cc b/chromium/media/capture/video/win/sink_input_pin_win.cc
index ed40946a997..92ae610bf73 100644
--- a/chromium/media/capture/video/win/sink_input_pin_win.cc
+++ b/chromium/media/capture/video/win/sink_input_pin_win.cc
@@ -13,6 +13,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
+#include "base/win/win_util.h"
#include "media/base/timestamp_constants.h"
namespace media {
@@ -111,11 +112,8 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
return true;
}
-#ifndef NDEBUG
- WCHAR guid_str[128];
- StringFromGUID2(sub_type, guid_str, base::size(guid_str));
- DVLOG(2) << __func__ << " unsupported media type: " << guid_str;
-#endif
+ DVLOG(2) << __func__ << " unsupported media type: "
+ << base::win::String16FromGUID(sub_type);
return false;
}
diff --git a/chromium/media/capture/video/win/video_capture_device_factory_win.cc b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
index 673eeb541c5..fdcd6a863cd 100644
--- a/chromium/media/capture/video/win/video_capture_device_factory_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
@@ -326,7 +326,7 @@ void GetDeviceSupportedFormatsMediaFoundation(const Descriptor& descriptor,
bool IsEnclosureLocationSupported() {
// DeviceInformation class is only available in Win10 onwards (v10.0.10240.0).
- if (base::win::GetVersion() < base::win::VERSION_WIN10) {
+ if (base::win::GetVersion() < base::win::Version::WIN10) {
DVLOG(1) << "DeviceInformation not supported before Windows 10";
return false;
}
diff --git a/chromium/media/capture/video/win/video_capture_device_mf_win.cc b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
index 5ff5e5ebf73..86f096bcf27 100644
--- a/chromium/media/capture/video/win/video_capture_device_mf_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
@@ -1003,9 +1003,13 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedData(
client_->OnStarted();
}
+ // TODO(julien.isorce): retrieve the color space information using Media
+ // Foundation api, MFGetAttributeSize/MF_MT_VIDEO_PRIMARIES,in order to
+ // build a gfx::ColorSpace. See http://crbug.com/959988.
client_->OnIncomingCapturedData(
data, length, selected_video_capability_->supported_format,
- GetCameraRotation(facing_mode_), reference_time, timestamp);
+ gfx::ColorSpace(), GetCameraRotation(facing_mode_), reference_time,
+ timestamp);
}
while (!video_stream_take_photo_callbacks_.empty()) {
diff --git a/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc b/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
index f8c252f2a9e..6c96763597c 100644
--- a/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win_unittest.cc
@@ -30,6 +30,7 @@ class MockClient : public VideoCaptureDevice::Client {
void OnIncomingCapturedData(const uint8_t* data,
int length,
const VideoCaptureFormat& frame_format,
+ const gfx::ColorSpace& color_space,
int clockwise_rotation,
base::TimeTicks reference_time,
base::TimeDelta timestamp,
diff --git a/chromium/media/capture/video/win/video_capture_device_utils_win.cc b/chromium/media/capture/video/win/video_capture_device_utils_win.cc
index d661a62072a..868ae1990b4 100644
--- a/chromium/media/capture/video/win/video_capture_device_utils_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_utils_win.cc
@@ -6,6 +6,7 @@
#include <iostream>
+#include "base/win/win_util.h"
#include "base/win/windows_version.h"
namespace media {
@@ -65,9 +66,8 @@ int GetCameraRotation(VideoFacingMode facing) {
bool IsAutoRotationEnabled() {
typedef BOOL(WINAPI * GetAutoRotationState)(PAR_STATE state);
- GetAutoRotationState get_rotation_state =
- reinterpret_cast<GetAutoRotationState>(::GetProcAddress(
- GetModuleHandle(L"user32.dll"), "GetAutoRotationState"));
+ static const auto get_rotation_state = reinterpret_cast<GetAutoRotationState>(
+ base::win::GetUser32FunctionPointer("GetAutoRotationState"));
if (get_rotation_state) {
AR_STATE auto_rotation_state;
@@ -87,7 +87,7 @@ bool IsAutoRotationEnabled() {
}
bool IsInternalCamera(VideoFacingMode facing) {
- if (base::win::GetVersion() < base::win::VERSION_WIN10) {
+ if (base::win::GetVersion() < base::win::Version::WIN10) {
return true;
}
diff --git a/chromium/media/capture/video/win/video_capture_device_win.cc b/chromium/media/capture/video/win/video_capture_device_win.cc
index ee71ff53031..9f5989920ad 100644
--- a/chromium/media/capture/video/win/video_capture_device_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_win.cc
@@ -17,6 +17,7 @@
#include "base/strings/sys_string_conversions.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
+#include "base/win/win_util.h"
#include "media/base/media_switches.h"
#include "media/base/timestamp_constants.h"
#include "media/capture/mojom/image_capture_types.h"
@@ -28,6 +29,19 @@ using base::win::ScopedCoMem;
using base::win::ScopedVariant;
using Microsoft::WRL::ComPtr;
+namespace {
+const int kSecondsTo100MicroSeconds = 10000;
+
+// Windows platform stores exposure time (min, max and current) in log base 2
+// seconds. If value is n, exposure time is 2^n seconds. Spec expects exposure
+// times in 100 micro seconds.
+// https://docs.microsoft.com/en-us/previous-versions/ms784800(v%3Dvs.85)
+// spec: https://w3c.github.io/mediacapture-image/#exposure-time
+long ConvertWindowsTimeToSpec(long seconds) {
+ return (std::exp2(seconds) * kSecondsTo100MicroSeconds);
+}
+} // namespace
+
namespace media {
#if DCHECK_IS_ON()
@@ -338,11 +352,8 @@ VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
if (sub_type == pixel_format.sub_type)
return pixel_format.format;
}
-#ifndef NDEBUG
- WCHAR guid_str[128];
- StringFromGUID2(sub_type, guid_str, base::size(guid_str));
- DVLOG(2) << "Device (also) supports an unknown media type " << guid_str;
-#endif
+ DVLOG(2) << "Device (also) supports an unknown media type "
+ << base::win::String16FromGUID(sub_type);
return PIXEL_FORMAT_UNKNOWN;
}
@@ -391,6 +402,7 @@ VideoCaptureDeviceWin::VideoCaptureDeviceWin(
state_(kIdle),
white_balance_mode_manual_(false),
exposure_mode_manual_(false),
+ focus_mode_manual_(false),
enable_get_photo_state_(
base::FeatureList::IsEnabled(media::kDirectShowGetPhotoState)) {
// TODO(mcasas): Check that CoInitializeEx() has been called with the
@@ -656,7 +668,7 @@ void VideoCaptureDeviceWin::GetPhotoState(GetPhotoStateCallback callback) {
auto photo_capabilities = mojo::CreateEmptyPhotoState();
- photo_capabilities->exposure_compensation = RetrieveControlRangeAndCurrent(
+ photo_capabilities->exposure_time = RetrieveControlRangeAndCurrent(
[this](auto... args) {
return this->camera_control_->getRange_Exposure(args...);
},
@@ -666,6 +678,17 @@ void VideoCaptureDeviceWin::GetPhotoState(GetPhotoStateCallback callback) {
&photo_capabilities->supported_exposure_modes,
&photo_capabilities->current_exposure_mode);
+ // Windows returns the exposure time in log base 2 seconds.
+ // If value is n, exposure time is 2^n seconds.
+ photo_capabilities->exposure_time->min =
+ ConvertWindowsTimeToSpec(photo_capabilities->exposure_time->min);
+ photo_capabilities->exposure_time->max =
+ ConvertWindowsTimeToSpec(photo_capabilities->exposure_time->max);
+ photo_capabilities->exposure_time->step =
+ std::exp2(photo_capabilities->exposure_time->step);
+ photo_capabilities->exposure_time->current =
+ ConvertWindowsTimeToSpec(photo_capabilities->exposure_time->current);
+
photo_capabilities->color_temperature = RetrieveControlRangeAndCurrent(
[this](auto... args) {
return this->video_control_->getRange_WhiteBalance(args...);
@@ -676,8 +699,7 @@ void VideoCaptureDeviceWin::GetPhotoState(GetPhotoStateCallback callback) {
&photo_capabilities->supported_white_balance_modes,
&photo_capabilities->current_white_balance_mode);
- // Ignore the returned Focus control range and status.
- RetrieveControlRangeAndCurrent(
+ photo_capabilities->focus_distance = RetrieveControlRangeAndCurrent(
[this](auto... args) {
return this->camera_control_->getRange_Focus(args...);
},
@@ -777,6 +799,26 @@ void VideoCaptureDeviceWin::SetPhotoOptions(
return;
}
+ if (settings->has_focus_mode) {
+ if (settings->focus_mode == mojom::MeteringMode::CONTINUOUS) {
+ hr = camera_control_->put_Focus(0L, VideoProcAmp_Flags_Auto);
+ DLOG_IF_FAILED_WITH_HRESULT("Auto focus config failed", hr);
+ if (FAILED(hr))
+ return;
+
+ focus_mode_manual_ = false;
+ } else {
+ focus_mode_manual_ = true;
+ }
+ }
+ if (focus_mode_manual_ && settings->has_focus_distance) {
+ hr = camera_control_->put_Focus(settings->focus_distance,
+ CameraControl_Flags_Manual);
+ DLOG_IF_FAILED_WITH_HRESULT("Focus Distance config failed", hr);
+ if (FAILED(hr))
+ return;
+ }
+
if (settings->has_exposure_mode) {
if (settings->exposure_mode == mojom::MeteringMode::CONTINUOUS) {
hr = camera_control_->put_Exposure(0L, VideoProcAmp_Flags_Auto);
@@ -789,14 +831,15 @@ void VideoCaptureDeviceWin::SetPhotoOptions(
exposure_mode_manual_ = true;
}
}
- if (exposure_mode_manual_ && settings->has_exposure_compensation) {
- hr = camera_control_->put_Exposure(settings->exposure_compensation,
- CameraControl_Flags_Manual);
- DLOG_IF_FAILED_WITH_HRESULT("Exposure Compensation config failed", hr);
+ if (exposure_mode_manual_ && settings->has_exposure_time) {
+ // Windows expects the exposure time in log base 2 seconds.
+ hr = camera_control_->put_Exposure(
+ std::log2(settings->exposure_time / kSecondsTo100MicroSeconds),
+ CameraControl_Flags_Manual);
+ DLOG_IF_FAILED_WITH_HRESULT("Exposure Time config failed", hr);
if (FAILED(hr))
return;
}
-
if (settings->has_brightness) {
hr = video_control_->put_Brightness(settings->brightness,
CameraControl_Flags_Manual);
@@ -883,7 +926,13 @@ void VideoCaptureDeviceWin::FrameReceived(const uint8_t* buffer,
if (timestamp == kNoTimestamp)
timestamp = base::TimeTicks::Now() - first_ref_time_;
- client_->OnIncomingCapturedData(buffer, length, format,
+ // TODO(julien.isorce): retrieve the color space information using the
+ // DirectShow api, AM_MEDIA_TYPE::VIDEOINFOHEADER2::dwControlFlags. If
+ // AMCONTROL_COLORINFO_PRESENT, then reinterpret dwControlFlags as a
+ // DXVA_ExtendedFormat. Then use its fields DXVA_VideoPrimaries,
+ // DXVA_VideoTransferMatrix, DXVA_VideoTransferFunction and
+ // DXVA_NominalRangeto build a gfx::ColorSpace. See http://crbug.com/959992.
+ client_->OnIncomingCapturedData(buffer, length, format, gfx::ColorSpace(),
GetCameraRotation(device_descriptor_.facing),
base::TimeTicks::Now(), timestamp);
diff --git a/chromium/media/capture/video/win/video_capture_device_win.h b/chromium/media/capture/video/win/video_capture_device_win.h
index e135f077618..cae13913c59 100644
--- a/chromium/media/capture/video/win/video_capture_device_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_win.h
@@ -139,6 +139,7 @@ class VideoCaptureDeviceWin : public VideoCaptureDevice,
// These flags keep the manual/auto mode between cycles of SetPhotoOptions().
bool white_balance_mode_manual_;
bool exposure_mode_manual_;
+ bool focus_mode_manual_;
base::TimeTicks first_ref_time_;
diff --git a/chromium/media/capture/video_capturer_source.h b/chromium/media/capture/video_capturer_source.h
index b02a051557e..50fecbd6f28 100644
--- a/chromium/media/capture/video_capturer_source.h
+++ b/chromium/media/capture/video_capturer_source.h
@@ -42,11 +42,11 @@ class CAPTURE_EXPORT VideoCapturerSource {
// Because a source can start generating frames before a subscriber is added,
// the first video frame delivered may not have timestamp equal to 0.
using VideoCaptureDeliverFrameCB =
- base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
- base::TimeTicks estimated_capture_time)>;
+ base::RepeatingCallback<void(scoped_refptr<media::VideoFrame> video_frame,
+ base::TimeTicks estimated_capture_time)>;
using VideoCaptureDeviceFormatsCB =
- base::Callback<void(const media::VideoCaptureFormats&)>;
+ base::OnceCallback<void(const media::VideoCaptureFormats&)>;
using RunningCallback = base::Callback<void(bool)>;
diff --git a/chromium/media/cast/cast_receiver.h b/chromium/media/cast/cast_receiver.h
index 638ee04294b..9c7fe54ae6a 100644
--- a/chromium/media/cast/cast_receiver.h
+++ b/chromium/media/cast/cast_receiver.h
@@ -30,13 +30,15 @@ namespace cast {
// discontinuities for playback. Note: A NULL pointer can be returned when data
// is not available (e.g., bad/missing packet).
typedef base::Callback<void(std::unique_ptr<AudioBus> audio_bus,
- const base::TimeTicks& playout_time,
+ base::TimeTicks playout_time,
bool is_continuous)>
AudioFrameDecodedCallback;
// TODO(miu): |video_frame| includes a timestamp, so use that instead.
-typedef base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& playout_time,
- bool is_continuous)> VideoFrameDecodedCallback;
+typedef base::RepeatingCallback<void(
+ scoped_refptr<media::VideoFrame> video_frame,
+ base::TimeTicks playout_time,
+ bool is_continuous)>
+ VideoFrameDecodedCallback;
// The following callback delivers encoded frame data and metadata. The client
// should examine the |frame_id| field to determine whether any frames have been
diff --git a/chromium/media/cast/cast_sender.h b/chromium/media/cast/cast_sender.h
index 6628324b2a9..29b58598669 100644
--- a/chromium/media/cast/cast_sender.h
+++ b/chromium/media/cast/cast_sender.h
@@ -17,6 +17,7 @@
#include "base/time/tick_clock.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
+#include "media/base/video_frame.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_environment.h"
#include "media/cast/constants.h"
@@ -27,7 +28,6 @@ class Size;
}
namespace media {
-class VideoFrame;
namespace cast {
@@ -35,9 +35,8 @@ class VideoFrameInput : public base::RefCountedThreadSafe<VideoFrameInput> {
public:
// Insert video frames into Cast sender. Frames will be encoded, packetized
// and sent to the network.
- virtual void InsertRawVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time) = 0;
+ virtual void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
+ base::TimeTicks capture_time) = 0;
// Creates a |VideoFrame| optimized for the encoder. When available, these
// frames offer performance benefits, such as memory copy elimination. The
diff --git a/chromium/media/cast/cast_sender_impl.cc b/chromium/media/cast/cast_sender_impl.cc
index 20411431b3d..7f7ed5555cc 100644
--- a/chromium/media/cast/cast_sender_impl.cc
+++ b/chromium/media/cast/cast_sender_impl.cc
@@ -26,14 +26,12 @@ class LocalVideoFrameInput : public VideoFrameInput {
video_sender.get() ?
video_sender->CreateVideoFrameFactory().release() : nullptr) {}
- void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& capture_time) final {
- cast_environment_->PostTask(CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&VideoSender::InsertRawVideoFrame,
- video_sender_,
- video_frame,
- capture_time));
+ void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
+ base::TimeTicks capture_time) final {
+ cast_environment_->PostTask(
+ CastEnvironment::MAIN, FROM_HERE,
+ base::BindRepeating(&VideoSender::InsertRawVideoFrame, video_sender_,
+ std::move(video_frame), capture_time));
}
scoped_refptr<VideoFrame> MaybeCreateOptimizedFrame(
diff --git a/chromium/media/cast/net/udp_transport_impl.cc b/chromium/media/cast/net/udp_transport_impl.cc
index 8175004d0b8..f93d26b25d8 100644
--- a/chromium/media/cast/net/udp_transport_impl.cc
+++ b/chromium/media/cast/net/udp_transport_impl.cc
@@ -51,7 +51,6 @@ int32_t GetTransportSendBufferSize(const base::DictionaryValue& options) {
} // namespace
UdpTransportImpl::UdpTransportImpl(
- net::NetLog* net_log,
const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
@@ -60,7 +59,7 @@ UdpTransportImpl::UdpTransportImpl(
local_addr_(local_end_point),
remote_addr_(remote_end_point),
udp_socket_(new net::UDPSocket(net::DatagramSocket::DEFAULT_BIND,
- net_log,
+ nullptr /* net_log */,
net::NetLogSource())),
send_pending_(false),
receive_pending_(false),
diff --git a/chromium/media/cast/net/udp_transport_impl.h b/chromium/media/cast/net/udp_transport_impl.h
index e5d3374ffd8..b2662b1736d 100644
--- a/chromium/media/cast/net/udp_transport_impl.h
+++ b/chromium/media/cast/net/udp_transport_impl.h
@@ -24,10 +24,6 @@
#include "net/socket/diff_serv_code_point.h"
#include "net/socket/udp_socket.h"
-namespace net {
-class NetLog;
-} // namespace net
-
namespace media {
namespace cast {
@@ -46,7 +42,6 @@ class UdpTransportImpl final : public PacketTransport, public UdpTransport {
// address of the first packet received.
// |send_buffer_size| specifies the size of the socket send buffer.
UdpTransportImpl(
- net::NetLog* net_log,
const scoped_refptr<base::SingleThreadTaskRunner>& io_thread_proxy,
const net::IPEndPoint& local_end_point,
const net::IPEndPoint& remote_end_point,
diff --git a/chromium/media/cast/net/udp_transport_unittest.cc b/chromium/media/cast/net/udp_transport_unittest.cc
index 618a1d9a4d9..50775a44aef 100644
--- a/chromium/media/cast/net/udp_transport_unittest.cc
+++ b/chromium/media/cast/net/udp_transport_unittest.cc
@@ -77,15 +77,13 @@ class UdpTransportImplTest : public ::testing::Test {
net::IPEndPoint free_local_port2 = test::GetFreeLocalPort();
send_transport_ = std::make_unique<UdpTransportImpl>(
- nullptr, scoped_task_environment_.GetMainThreadTaskRunner(),
- free_local_port1, free_local_port2,
- base::BindRepeating(&UpdateCastTransportStatus));
+ scoped_task_environment_.GetMainThreadTaskRunner(), free_local_port1,
+ free_local_port2, base::BindRepeating(&UpdateCastTransportStatus));
send_transport_->SetSendBufferSize(65536);
recv_transport_ = std::make_unique<UdpTransportImpl>(
- nullptr, scoped_task_environment_.GetMainThreadTaskRunner(),
- free_local_port2, free_local_port1,
- base::BindRepeating(&UpdateCastTransportStatus));
+ scoped_task_environment_.GetMainThreadTaskRunner(), free_local_port2,
+ free_local_port1, base::BindRepeating(&UpdateCastTransportStatus));
recv_transport_->SetSendBufferSize(65536);
}
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.cc b/chromium/media/cast/receiver/cast_receiver_impl.cc
index 01e6cb4bc92..db343ac6dd4 100644
--- a/chromium/media/cast/receiver/cast_receiver_impl.cc
+++ b/chromium/media/cast/receiver/cast_receiver_impl.cc
@@ -197,11 +197,11 @@ void CastReceiverImpl::EmitDecodedVideoFrame(
FrameId frame_id,
RtpTimeTicks rtp_timestamp,
const base::TimeTicks& playout_time,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
bool is_continuous) {
DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
- if (video_frame.get()) {
+ if (video_frame) {
// TODO(miu): This is reporting incorrect timestamp and delay.
// http://crbug.com/547251
std::unique_ptr<FrameEvent> playout_event(new FrameEvent());
@@ -220,7 +220,7 @@ void CastReceiverImpl::EmitDecodedVideoFrame(
(playout_time - base::TimeTicks()).InMicroseconds());
}
- callback.Run(video_frame, playout_time, is_continuous);
+ callback.Run(std::move(video_frame), playout_time, is_continuous);
}
} // namespace cast
diff --git a/chromium/media/cast/receiver/cast_receiver_impl.h b/chromium/media/cast/receiver/cast_receiver_impl.h
index 6454d49203e..2b29b8527af 100644
--- a/chromium/media/cast/receiver/cast_receiver_impl.h
+++ b/chromium/media/cast/receiver/cast_receiver_impl.h
@@ -82,7 +82,7 @@ class CastReceiverImpl : public CastReceiver {
FrameId frame_id,
RtpTimeTicks rtp_timestamp,
const base::TimeTicks& playout_time,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
bool is_continuous);
const scoped_refptr<CastEnvironment> cast_environment_;
diff --git a/chromium/media/cast/receiver/video_decoder.h b/chromium/media/cast/receiver/video_decoder.h
index bde9b0ec87f..2116217cd53 100644
--- a/chromium/media/cast/receiver/video_decoder.h
+++ b/chromium/media/cast/receiver/video_decoder.h
@@ -26,8 +26,9 @@ class VideoDecoder {
// normally true, but will be false if the decoder has detected a frame skip
// since the last decode operation; and the client might choose to take steps
// to smooth/interpolate video discontinuities in this case.
- typedef base::Callback<void(const scoped_refptr<VideoFrame>& frame,
- bool is_continuous)> DecodeFrameCallback;
+ typedef base::RepeatingCallback<void(scoped_refptr<VideoFrame> frame,
+ bool is_continuous)>
+ DecodeFrameCallback;
VideoDecoder(const scoped_refptr<CastEnvironment>& cast_environment,
Codec codec);
diff --git a/chromium/media/cast/receiver/video_decoder_unittest.cc b/chromium/media/cast/receiver/video_decoder_unittest.cc
index e731618c1bb..d64c3bc659b 100644
--- a/chromium/media/cast/receiver/video_decoder_unittest.cc
+++ b/chromium/media/cast/receiver/video_decoder_unittest.cc
@@ -125,14 +125,14 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
private:
// Called by |vp8_decoder_| to deliver each frame of decoded video.
- void OnDecodedFrame(const scoped_refptr<VideoFrame>& expected_video_frame,
+ void OnDecodedFrame(scoped_refptr<VideoFrame> expected_video_frame,
bool should_be_continuous,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
bool is_continuous) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// A NULL |video_frame| indicates a decode error, which we don't expect.
- ASSERT_TRUE(video_frame.get());
+ ASSERT_TRUE(video_frame);
// Did the decoder detect whether frames were dropped?
EXPECT_EQ(should_be_continuous, is_continuous);
@@ -142,7 +142,7 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
video_frame->coded_size().width());
EXPECT_EQ(expected_video_frame->coded_size().height(),
video_frame->coded_size().height());
- EXPECT_LT(40.0, I420PSNR(expected_video_frame, video_frame));
+ EXPECT_LT(40.0, I420PSNR(*expected_video_frame, *video_frame));
// TODO(miu): Once we start using VideoFrame::timestamp_, check that here.
// Signal the main test thread that more video was decoded.
diff --git a/chromium/media/cast/sender/external_video_encoder.cc b/chromium/media/cast/sender/external_video_encoder.cc
index f4095a60f64..cc13dc5bd5d 100644
--- a/chromium/media/cast/sender/external_video_encoder.cc
+++ b/chromium/media/cast/sender/external_video_encoder.cc
@@ -68,11 +68,11 @@ struct InProgressExternalVideoFrameEncode {
const base::TimeTicks start_time;
InProgressExternalVideoFrameEncode(
- const scoped_refptr<VideoFrame>& v_frame,
+ scoped_refptr<VideoFrame> v_frame,
base::TimeTicks r_time,
VideoEncoder::FrameEncodedCallback callback,
int bit_rate)
- : video_frame(v_frame),
+ : video_frame(std::move(v_frame)),
reference_time(r_time),
frame_encoded_callback(callback),
target_bit_rate(bit_rate),
@@ -157,7 +157,7 @@ class ExternalVideoEncoder::VEAClientImpl
}
void EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
bool key_frame_requested,
const VideoEncoder::FrameEncodedCallback& frame_encoded_callback) {
@@ -409,6 +409,7 @@ class ExternalVideoEncoder::VEAClientImpl
media::BitstreamBuffer(
bitstream_buffer_id,
output_buffers_[bitstream_buffer_id]->handle(),
+ false /* read_only */,
output_buffers_[bitstream_buffer_id]->mapped_size()));
}
}
@@ -453,9 +454,9 @@ class ExternalVideoEncoder::VEAClientImpl
// Immediately provide all output buffers to the VEA.
for (size_t i = 0; i < output_buffers_.size(); ++i) {
video_encode_accelerator_->UseOutputBitstreamBuffer(
- media::BitstreamBuffer(static_cast<int32_t>(i),
- output_buffers_[i]->handle(),
- output_buffers_[i]->mapped_size()));
+ media::BitstreamBuffer(
+ static_cast<int32_t>(i), output_buffers_[i]->handle(),
+ false /* read_only */, output_buffers_[i]->mapped_size()));
}
}
@@ -662,7 +663,7 @@ void ExternalVideoEncoder::DestroyClientSoon() {
}
bool ExternalVideoEncoder::EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -673,7 +674,7 @@ bool ExternalVideoEncoder::EncodeVideoFrame(
client_->task_runner()->PostTask(
FROM_HERE, base::BindOnce(&VEAClientImpl::EncodeVideoFrame, client_,
- video_frame, reference_time,
+ std::move(video_frame), reference_time,
key_frame_requested_, frame_encoded_callback));
key_frame_requested_ = false;
return true;
diff --git a/chromium/media/cast/sender/external_video_encoder.h b/chromium/media/cast/sender/external_video_encoder.h
index 44002c2513a..d9a40ca1bf2 100644
--- a/chromium/media/cast/sender/external_video_encoder.h
+++ b/chromium/media/cast/sender/external_video_encoder.h
@@ -43,7 +43,7 @@ class ExternalVideoEncoder : public VideoEncoder {
// VideoEncoder implementation.
bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) final;
void SetBitRate(int new_bit_rate) final;
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.cc b/chromium/media/cast/sender/fake_software_video_encoder.cc
index 214929b8ad5..65f44b8bd1a 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.cc
+++ b/chromium/media/cast/sender/fake_software_video_encoder.cc
@@ -31,7 +31,7 @@ FakeSoftwareVideoEncoder::~FakeSoftwareVideoEncoder() = default;
void FakeSoftwareVideoEncoder::Initialize() {}
void FakeSoftwareVideoEncoder::Encode(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) {
DCHECK(encoded_frame);
diff --git a/chromium/media/cast/sender/fake_software_video_encoder.h b/chromium/media/cast/sender/fake_software_video_encoder.h
index 7061b7ee9e6..09106a2406b 100644
--- a/chromium/media/cast/sender/fake_software_video_encoder.h
+++ b/chromium/media/cast/sender/fake_software_video_encoder.h
@@ -21,7 +21,7 @@ class FakeSoftwareVideoEncoder : public SoftwareVideoEncoder {
// SoftwareVideoEncoder implementations.
void Initialize() final;
- void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ void Encode(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) final;
void UpdateRates(uint32_t new_bitrate) final;
diff --git a/chromium/media/cast/sender/h264_vt_encoder.cc b/chromium/media/cast/sender/h264_vt_encoder.cc
index e4dd235fed3..0cfb5733999 100644
--- a/chromium/media/cast/sender/h264_vt_encoder.cc
+++ b/chromium/media/cast/sender/h264_vt_encoder.cc
@@ -351,7 +351,7 @@ void H264VideoToolboxEncoder::DestroyCompressionSession() {
}
bool H264VideoToolboxEncoder::EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(thread_checker_.CalledOnValidThread());
diff --git a/chromium/media/cast/sender/h264_vt_encoder.h b/chromium/media/cast/sender/h264_vt_encoder.h
index 5a4178d3ea3..ab92ae79e9a 100644
--- a/chromium/media/cast/sender/h264_vt_encoder.h
+++ b/chromium/media/cast/sender/h264_vt_encoder.h
@@ -38,7 +38,7 @@ class H264VideoToolboxEncoder : public VideoEncoder,
// media::cast::VideoEncoder implementation
bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) final;
void SetBitRate(int new_bit_rate) final;
diff --git a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
index df56d9c9a34..60e02b31dda 100644
--- a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
+++ b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
@@ -138,8 +138,8 @@ class EndToEndFrameChecker
EXPECT_TRUE(decoder_init_result);
}
- void PushExpectation(const scoped_refptr<VideoFrame>& frame) {
- expectations_.push(frame);
+ void PushExpectation(scoped_refptr<VideoFrame> frame) {
+ expectations_.push(std::move(frame));
}
void EncodeDone(std::unique_ptr<SenderEncodedFrame> encoded_frame) {
@@ -149,11 +149,11 @@ class EndToEndFrameChecker
base::Unretained(this)));
}
- void CompareFrameWithExpected(const scoped_refptr<VideoFrame>& frame) {
+ void CompareFrameWithExpected(scoped_refptr<VideoFrame> frame) {
ASSERT_LT(0u, expectations_.size());
auto& e = expectations_.front();
expectations_.pop();
- EXPECT_LE(kVideoAcceptedPSNR, I420PSNR(e, frame));
+ EXPECT_LE(kVideoAcceptedPSNR, I420PSNR(*e, *frame));
++count_frames_checked_;
}
@@ -303,7 +303,7 @@ TEST_F(H264VideoToolboxEncoderTest, DISABLED_CheckFrameMetadataSequence) {
TEST_F(H264VideoToolboxEncoderTest, DISABLED_CheckFramesAreDecodable) {
VideoDecoderConfig config(
kCodecH264, H264PROFILE_MAIN, frame_->format(), VideoColorSpace(),
- VIDEO_ROTATION_0, frame_->coded_size(), frame_->visible_rect(),
+ kNoTransformation, frame_->coded_size(), frame_->visible_rect(),
frame_->natural_size(), EmptyExtraData(), Unencrypted());
scoped_refptr<EndToEndFrameChecker> checker(new EndToEndFrameChecker(config));
diff --git a/chromium/media/cast/sender/performance_metrics_overlay.cc b/chromium/media/cast/sender/performance_metrics_overlay.cc
index 9b35b42e9e8..c0f1c8139f0 100644
--- a/chromium/media/cast/sender/performance_metrics_overlay.cc
+++ b/chromium/media/cast/sender/performance_metrics_overlay.cc
@@ -274,7 +274,7 @@ scoped_refptr<VideoFrame> MaybeRenderPerformanceMetricsOverlay(
// are invoked.
frame->AddDestructionObserver(base::Bind(
[](const VideoFrameMetadata* sent_frame_metadata,
- const scoped_refptr<VideoFrame>& source_frame) {
+ scoped_refptr<VideoFrame> source_frame) {
source_frame->metadata()->Clear();
source_frame->metadata()->MergeMetadataFrom(sent_frame_metadata);
},
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc b/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
index f4464ccaa7f..244d8511cc3 100644
--- a/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
+++ b/chromium/media/cast/sender/size_adaptable_video_encoder_base.cc
@@ -34,7 +34,7 @@ SizeAdaptableVideoEncoderBase::~SizeAdaptableVideoEncoderBase() {
}
bool SizeAdaptableVideoEncoderBase::EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -56,11 +56,9 @@ bool SizeAdaptableVideoEncoderBase::EncodeVideoFrame(
}
const bool is_frame_accepted = encoder_->EncodeVideoFrame(
- video_frame,
- reference_time,
+ std::move(video_frame), reference_time,
base::Bind(&SizeAdaptableVideoEncoderBase::OnEncodedVideoFrame,
- weak_factory_.GetWeakPtr(),
- frame_encoded_callback));
+ weak_factory_.GetWeakPtr(), frame_encoded_callback));
if (is_frame_accepted)
++frames_in_encoder_;
return is_frame_accepted;
diff --git a/chromium/media/cast/sender/size_adaptable_video_encoder_base.h b/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
index 8443e2c09f6..c712787b1d4 100644
--- a/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
+++ b/chromium/media/cast/sender/size_adaptable_video_encoder_base.h
@@ -37,7 +37,7 @@ class SizeAdaptableVideoEncoderBase : public VideoEncoder {
// VideoEncoder implementation.
bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) final;
void SetBitRate(int new_bit_rate) final;
diff --git a/chromium/media/cast/sender/software_video_encoder.h b/chromium/media/cast/sender/software_video_encoder.h
index c39028da64d..b73afa0c0fd 100644
--- a/chromium/media/cast/sender/software_video_encoder.h
+++ b/chromium/media/cast/sender/software_video_encoder.h
@@ -30,7 +30,7 @@ class SoftwareVideoEncoder {
virtual void Initialize() = 0;
// Encode a raw image (as a part of a video stream).
- virtual void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ virtual void Encode(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) = 0;
diff --git a/chromium/media/cast/sender/video_encoder.h b/chromium/media/cast/sender/video_encoder.h
index 74cec93c431..363415a1c8a 100644
--- a/chromium/media/cast/sender/video_encoder.h
+++ b/chromium/media/cast/sender/video_encoder.h
@@ -50,7 +50,7 @@ class VideoEncoder {
// CastEnvironment thread with the result. If false is returned, nothing
// happens and the callback will not be run.
virtual bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) = 0;
diff --git a/chromium/media/cast/sender/video_encoder_impl.cc b/chromium/media/cast/sender/video_encoder_impl.cc
index e441c563053..ed3586e5e8d 100644
--- a/chromium/media/cast/sender/video_encoder_impl.cc
+++ b/chromium/media/cast/sender/video_encoder_impl.cc
@@ -29,7 +29,7 @@ void InitializeEncoderOnEncoderThread(
void EncodeVideoFrameOnEncoderThread(
scoped_refptr<CastEnvironment> environment,
SoftwareVideoEncoder* encoder,
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const VideoEncoderImpl::CodecDynamicConfig& dynamic_config,
const VideoEncoderImpl::FrameEncodedCallback& frame_encoded_callback) {
@@ -40,7 +40,7 @@ void EncodeVideoFrameOnEncoderThread(
encoder->UpdateRates(dynamic_config.bit_rate);
std::unique_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
- encoder->Encode(video_frame, reference_time, encoded_frame.get());
+ encoder->Encode(std::move(video_frame), reference_time, encoded_frame.get());
encoded_frame->encode_completion_time = environment->Clock()->NowTicks();
environment->PostTask(
CastEnvironment::MAIN,
@@ -104,22 +104,19 @@ VideoEncoderImpl::~VideoEncoderImpl() {
}
bool VideoEncoderImpl::EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
DCHECK(!video_frame->visible_rect().IsEmpty());
DCHECK(!frame_encoded_callback.is_null());
- cast_environment_->PostTask(CastEnvironment::VIDEO,
- FROM_HERE,
- base::Bind(&EncodeVideoFrameOnEncoderThread,
- cast_environment_,
- encoder_.get(),
- video_frame,
- reference_time,
- dynamic_config_,
- frame_encoded_callback));
+ cast_environment_->PostTask(
+ CastEnvironment::VIDEO, FROM_HERE,
+ base::BindRepeating(&EncodeVideoFrameOnEncoderThread, cast_environment_,
+ encoder_.get(), std::move(video_frame),
+ reference_time, dynamic_config_,
+ frame_encoded_callback));
dynamic_config_.key_frame_requested = false;
return true;
diff --git a/chromium/media/cast/sender/video_encoder_impl.h b/chromium/media/cast/sender/video_encoder_impl.h
index cd350e627d0..673b79f3834 100644
--- a/chromium/media/cast/sender/video_encoder_impl.h
+++ b/chromium/media/cast/sender/video_encoder_impl.h
@@ -38,7 +38,7 @@ class VideoEncoderImpl : public VideoEncoder {
// VideoEncoder implementation.
bool EncodeVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
const FrameEncodedCallback& frame_encoded_callback) final;
void SetBitRate(int new_bit_rate) final;
diff --git a/chromium/media/cast/sender/video_encoder_unittest.cc b/chromium/media/cast/sender/video_encoder_unittest.cc
index 35fa76fccbf..6eba9d5459e 100644
--- a/chromium/media/cast/sender/video_encoder_unittest.cc
+++ b/chromium/media/cast/sender/video_encoder_unittest.cc
@@ -252,6 +252,7 @@ TEST_P(VideoEncoderTest, MAYBE_EncodesVariedFrameSizes) {
encoded_frames[encoded_frames.size() - 4])) {
auto video_frame = CreateTestVideoFrame(frame_size);
const base::TimeTicks reference_time = Now();
+ const base::TimeDelta timestamp = video_frame->timestamp();
const bool accepted_request = video_encoder()->EncodeVideoFrame(
std::move(video_frame), reference_time,
base::BindRepeating(
@@ -271,8 +272,7 @@ TEST_P(VideoEncoderTest, MAYBE_EncodesVariedFrameSizes) {
encoded_frames->emplace_back(std::move(encoded_frame));
},
encoded_frames_weak_factory.GetWeakPtr(),
- RtpTimeTicks::FromTimeDelta(video_frame->timestamp(),
- kVideoFrequency),
+ RtpTimeTicks::FromTimeDelta(timestamp, kVideoFrequency),
reference_time));
if (accepted_request) {
++count_frames_accepted;
diff --git a/chromium/media/cast/sender/video_sender.cc b/chromium/media/cast/sender/video_sender.cc
index 196e1119f72..e55536841b5 100644
--- a/chromium/media/cast/sender/video_sender.cc
+++ b/chromium/media/cast/sender/video_sender.cc
@@ -129,7 +129,7 @@ VideoSender::VideoSender(
VideoSender::~VideoSender() = default;
void VideoSender::InsertRawVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
@@ -252,7 +252,7 @@ void VideoSender::InsertRawVideoFrame(
MaybeRenderPerformanceMetricsOverlay(
GetTargetPlayoutDelay(), low_latency_mode_, bitrate,
frames_in_encoder_ + 1, last_reported_encoder_utilization_,
- last_reported_lossy_utilization_, video_frame);
+ last_reported_lossy_utilization_, std::move(video_frame));
if (video_encoder_->EncodeVideoFrame(
frame_to_encode, reference_time,
base::Bind(&VideoSender::OnEncodedVideoFrame, AsWeakPtr(),
@@ -295,7 +295,7 @@ base::TimeDelta VideoSender::GetInFlightMediaDuration() const {
}
void VideoSender::OnEncodedVideoFrame(
- const scoped_refptr<media::VideoFrame>& video_frame,
+ scoped_refptr<media::VideoFrame> video_frame,
int encoder_bitrate,
std::unique_ptr<SenderEncodedFrame> encoded_frame) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
diff --git a/chromium/media/cast/sender/video_sender.h b/chromium/media/cast/sender/video_sender.h
index 910e155cd5d..7a8be22a958 100644
--- a/chromium/media/cast/sender/video_sender.h
+++ b/chromium/media/cast/sender/video_sender.h
@@ -52,7 +52,7 @@ class VideoSender : public FrameSender {
// Note: It is not guaranteed that |video_frame| will actually be encoded and
// sent, if VideoSender detects too many frames in flight. Therefore, clients
// should be careful about the rate at which this method is called.
- void InsertRawVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
+ void InsertRawVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time);
// Creates a |VideoFrameFactory| object to vend |VideoFrame| object with
@@ -68,7 +68,7 @@ class VideoSender : public FrameSender {
private:
// Called by the |video_encoder_| with the next EncodedFrame to send.
- void OnEncodedVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
+ void OnEncodedVideoFrame(scoped_refptr<media::VideoFrame> video_frame,
int encoder_bitrate,
std::unique_ptr<SenderEncodedFrame> encoded_frame);
diff --git a/chromium/media/cast/sender/vp8_encoder.cc b/chromium/media/cast/sender/vp8_encoder.cc
index 8a211cec46e..8d7794ffa56 100644
--- a/chromium/media/cast/sender/vp8_encoder.cc
+++ b/chromium/media/cast/sender/vp8_encoder.cc
@@ -183,7 +183,7 @@ void Vp8Encoder::ConfigureForNewFrameSize(const gfx::Size& frame_size) {
VPX_CODEC_OK);
}
-void Vp8Encoder::Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+void Vp8Encoder::Encode(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
diff --git a/chromium/media/cast/sender/vp8_encoder.h b/chromium/media/cast/sender/vp8_encoder.h
index 2b2cfc51ebc..79468ca3485 100644
--- a/chromium/media/cast/sender/vp8_encoder.h
+++ b/chromium/media/cast/sender/vp8_encoder.h
@@ -32,7 +32,7 @@ class Vp8Encoder : public SoftwareVideoEncoder {
// SoftwareVideoEncoder implementations.
void Initialize() final;
- void Encode(const scoped_refptr<media::VideoFrame>& video_frame,
+ void Encode(scoped_refptr<media::VideoFrame> video_frame,
const base::TimeTicks& reference_time,
SenderEncodedFrame* encoded_frame) final;
void UpdateRates(uint32_t new_bitrate) final;
diff --git a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
index fc984efcfeb..9504b1fef4a 100644
--- a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
+++ b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
@@ -55,8 +55,7 @@ media::VideoDecoderConfig ToClearMediaVideoDecoderConfig(
VideoDecoderConfig media_config(
ToMediaVideoCodec(config.codec), ToMediaVideoCodecProfile(config.profile),
ToMediaVideoFormat(config.format), ToMediaColorSpace(config.color_space),
- VideoRotation::VIDEO_ROTATION_0, coded_size, gfx::Rect(coded_size),
- coded_size,
+ kNoTransformation, coded_size, gfx::Rect(coded_size), coded_size,
std::vector<uint8_t>(config.extra_data,
config.extra_data + config.extra_data_size),
Unencrypted());
@@ -252,12 +251,12 @@ class VideoDecoderAdapter : public CdmVideoDecoder {
std::move(quit_closure).Run();
}
- void OnVideoFrameReady(const scoped_refptr<VideoFrame>& video_frame) {
+ void OnVideoFrameReady(scoped_refptr<VideoFrame> video_frame) {
// Do not queue EOS frames, which is not needed.
if (video_frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM))
return;
- decoded_video_frames_.push(video_frame);
+ decoded_video_frames_.push(std::move(video_frame));
}
void OnReset(base::OnceClosure quit_closure) {
diff --git a/chromium/media/device_monitors/device_monitor_udev.cc b/chromium/media/device_monitors/device_monitor_udev.cc
index a3707c29485..cd8ae1cffb6 100644
--- a/chromium/media/device_monitors/device_monitor_udev.cc
+++ b/chromium/media/device_monitors/device_monitor_udev.cc
@@ -54,6 +54,7 @@ class DeviceMonitorLinux::BlockingTaskRunnerHelper
// device::UdevWatcher::Observer overrides
void OnDeviceAdded(device::ScopedUdevDevicePtr device) override;
void OnDeviceRemoved(device::ScopedUdevDevicePtr device) override;
+ void OnDeviceChanged(device::ScopedUdevDevicePtr device) override;
std::unique_ptr<device::UdevWatcher> udev_watcher_;
@@ -87,6 +88,11 @@ void DeviceMonitorLinux::BlockingTaskRunnerHelper::OnDeviceRemoved(
OnDevicesChanged(std::move(device));
}
+void DeviceMonitorLinux::BlockingTaskRunnerHelper::OnDeviceChanged(
+ device::ScopedUdevDevicePtr device) {
+ OnDevicesChanged(std::move(device));
+}
+
void DeviceMonitorLinux::BlockingTaskRunnerHelper::OnDevicesChanged(
device::ScopedUdevDevicePtr device) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 142e992035c..1d5d08510a7 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -621,8 +621,10 @@ bool AVStreamToVideoDecoderConfig(const AVStream* stream,
extra_data.assign(codec_context->extradata,
codec_context->extradata + codec_context->extradata_size);
}
- config->Initialize(codec, profile, format, color_space, video_rotation,
- coded_size, visible_rect, natural_size, extra_data,
+ // TODO(tmathmeyer) ffmpeg can't provide us with an actual video rotation yet.
+ config->Initialize(codec, profile, format, color_space,
+ VideoTransformation(video_rotation), coded_size,
+ visible_rect, natural_size, extra_data,
GetEncryptionScheme(stream));
if (stream->nb_side_data) {
diff --git a/chromium/media/filters/BUILD.gn b/chromium/media/filters/BUILD.gn
index ed193cb0857..d08e5944ec9 100644
--- a/chromium/media/filters/BUILD.gn
+++ b/chromium/media/filters/BUILD.gn
@@ -24,7 +24,6 @@ jumbo_source_set("filters") {
"audio_timestamp_validator.h",
"chunk_demuxer.cc",
"chunk_demuxer.h",
- "context_3d.h",
"decoder_selector.cc",
"decoder_selector.h",
"decoder_stream.cc",
@@ -56,10 +55,6 @@ jumbo_source_set("filters") {
"source_buffer_parse_warnings.h",
"source_buffer_range.cc",
"source_buffer_range.h",
- "source_buffer_range_by_dts.cc",
- "source_buffer_range_by_dts.h",
- "source_buffer_range_by_pts.cc",
- "source_buffer_range_by_pts.h",
"source_buffer_state.cc",
"source_buffer_state.h",
"source_buffer_stream.cc",
@@ -70,10 +65,6 @@ jumbo_source_set("filters") {
"video_cadence_estimator.h",
"video_renderer_algorithm.cc",
"video_renderer_algorithm.h",
- "vp8_bool_decoder.cc",
- "vp8_bool_decoder.h",
- "vp8_parser.cc",
- "vp8_parser.h",
"vp9_bool_decoder.cc",
"vp9_bool_decoder.h",
"vp9_compressed_header_parser.cc",
@@ -94,10 +85,6 @@ jumbo_source_set("filters") {
"//media:subcomponent_config",
]
- public_deps = [
- ":jpeg_parser",
- ]
-
deps = [
"//cc/base", # For MathUtil.
"//gpu/command_buffer/common",
@@ -231,34 +218,6 @@ jumbo_source_set("filters") {
}
}
-# This component allows other targets to use the JPEG parser as a standalone,
-# general-purpose utility without having to pull all of //media as a dependency
-# (which could potentially result in cycles).
-component("jpeg_parser") {
- output_name = "media_filters_jpeg_parser"
- sources = [
- "jpeg_parser.cc",
- "jpeg_parser.h",
- ]
- defines = [ "IS_JPEG_PARSER_IMPL" ]
- deps = [
- "//base",
- ]
-}
-
-source_set("jpeg_parser_unit_tests") {
- testonly = true
- sources = [
- "jpeg_parser_unittest.cc",
- ]
- deps = [
- ":jpeg_parser",
- "//base",
- "//media:test_support",
- "//testing/gtest",
- ]
-}
-
source_set("perftests") {
testonly = true
sources = []
@@ -326,14 +285,11 @@ source_set("unit_tests") {
"video_cadence_estimator_unittest.cc",
"video_decoder_stream_unittest.cc",
"video_renderer_algorithm_unittest.cc",
- "vp8_bool_decoder_unittest.cc",
- "vp8_parser_unittest.cc",
"vp9_parser_unittest.cc",
"vp9_raw_bits_reader_unittest.cc",
]
deps = [
- ":jpeg_parser_unit_tests",
"//base/test:test_support",
"//media:test_support",
"//testing/gmock",
diff --git a/chromium/media/filters/android/media_codec_audio_decoder.cc b/chromium/media/filters/android/media_codec_audio_decoder.cc
index b6e66352232..9040ff98048 100644
--- a/chromium/media/filters/android/media_codec_audio_decoder.cc
+++ b/chromium/media/filters/android/media_codec_audio_decoder.cc
@@ -63,7 +63,7 @@ std::string MediaCodecAudioDecoder::GetDisplayName() const {
void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
@@ -76,7 +76,6 @@ void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
DCHECK(input_queue_.empty());
ClearInputQueue(DecodeStatus::ABORTED);
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
is_passthrough_ = MediaCodecUtil::IsPassthroughAudioFormat(config.codec());
sample_format_ = kSampleFormatS16;
@@ -89,7 +88,7 @@ void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
if (state_ == STATE_ERROR) {
DVLOG(1) << "Decoder is in error state.";
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
@@ -101,7 +100,7 @@ void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
config.codec() == kCodecOpus || is_passthrough_;
if (!is_codec_supported) {
DVLOG(1) << "Unsuported codec " << GetCodecName(config.codec());
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
@@ -120,24 +119,24 @@ void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
LOG(ERROR) << "The stream is encrypted but there is no CdmContext or "
"MediaCryptoContext is not supported";
SetState(STATE_ERROR);
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
// Postpone initialization after MediaCrypto is available.
// SetCdm uses init_cb in a method that's already bound to the current loop.
SetState(STATE_WAITING_FOR_MEDIA_CRYPTO);
- SetCdm(init_cb);
+ SetCdm(std::move(init_cb));
return;
}
if (!CreateMediaCodecLoop()) {
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
SetState(STATE_READY);
- bound_init_cb.Run(true);
+ BindToCurrentLoop(std::move(init_cb)).Run(true);
}
bool MediaCodecAudioDecoder::CreateMediaCodecLoop() {
@@ -205,7 +204,7 @@ void MediaCodecAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
codec_loop_->ExpectWork();
}
-void MediaCodecAudioDecoder::Reset(const base::Closure& closure) {
+void MediaCodecAudioDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << __func__;
ClearInputQueue(DecodeStatus::ABORTED);
@@ -221,7 +220,7 @@ void MediaCodecAudioDecoder::Reset(const base::Closure& closure) {
SetState(success ? STATE_READY : STATE_ERROR);
- task_runner_->PostTask(FROM_HERE, closure);
+ task_runner_->PostTask(FROM_HERE, std::move(closure));
}
bool MediaCodecAudioDecoder::NeedsBitstreamConversion() const {
@@ -230,7 +229,7 @@ bool MediaCodecAudioDecoder::NeedsBitstreamConversion() const {
return config_.codec() == kCodecAAC;
}
-void MediaCodecAudioDecoder::SetCdm(const InitCB& init_cb) {
+void MediaCodecAudioDecoder::SetCdm(InitCB init_cb) {
DCHECK(media_crypto_context_);
// Register CDM callbacks. The callbacks registered will be posted back to
@@ -247,8 +246,8 @@ void MediaCodecAudioDecoder::SetCdm(const InitCB& init_cb) {
base::DoNothing());
media_crypto_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
- base::Bind(&MediaCodecAudioDecoder::OnMediaCryptoReady,
- weak_factory_.GetWeakPtr(), init_cb)));
+ base::BindOnce(&MediaCodecAudioDecoder::OnMediaCryptoReady,
+ weak_factory_.GetWeakPtr(), std::move(init_cb))));
}
void MediaCodecAudioDecoder::OnKeyAdded() {
@@ -261,7 +260,7 @@ void MediaCodecAudioDecoder::OnKeyAdded() {
}
void MediaCodecAudioDecoder::OnMediaCryptoReady(
- const InitCB& init_cb,
+ InitCB init_cb,
JavaObjectPtr media_crypto,
bool /*requires_secure_video_codec*/) {
DVLOG(1) << __func__;
@@ -272,7 +271,7 @@ void MediaCodecAudioDecoder::OnMediaCryptoReady(
if (media_crypto->is_null()) {
LOG(ERROR) << "MediaCrypto is not available, can't play encrypted stream.";
SetState(STATE_UNINITIALIZED);
- init_cb.Run(false);
+ std::move(init_cb).Run(false);
return;
}
@@ -285,12 +284,12 @@ void MediaCodecAudioDecoder::OnMediaCryptoReady(
// After receiving |media_crypto_| we can configure MediaCodec.
if (!CreateMediaCodecLoop()) {
SetState(STATE_UNINITIALIZED);
- init_cb.Run(false);
+ std::move(init_cb).Run(false);
return;
}
SetState(STATE_READY);
- init_cb.Run(true);
+ std::move(init_cb).Run(true);
}
bool MediaCodecAudioDecoder::IsAnyInputPending() const {
diff --git a/chromium/media/filters/android/media_codec_audio_decoder.h b/chromium/media/filters/android/media_codec_audio_decoder.h
index 923b3f79ec2..c735f5f3869 100644
--- a/chromium/media/filters/android/media_codec_audio_decoder.h
+++ b/chromium/media/filters/android/media_codec_audio_decoder.h
@@ -86,12 +86,12 @@ class MEDIA_EXPORT MediaCodecAudioDecoder : public AudioDecoder,
std::string GetDisplayName() const override;
void Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Reset(base::OnceClosure closure) override;
bool NeedsBitstreamConversion() const override;
// MediaCodecLoop::Client implementation
@@ -126,10 +126,10 @@ class MEDIA_EXPORT MediaCodecAudioDecoder : public AudioDecoder,
// A helper method to start CDM initialization. This must be called if and
// only if we were constructed with |is_encrypted| set to true.
- void SetCdm(const InitCB& init_cb);
+ void SetCdm(InitCB init_cb);
// This callback is called after CDM obtained a MediaCrypto object.
- void OnMediaCryptoReady(const InitCB& init_cb,
+ void OnMediaCryptoReady(InitCB init_cb,
JavaObjectPtr media_crypto,
bool requires_secure_video_codec);
diff --git a/chromium/media/filters/aom_video_decoder.cc b/chromium/media/filters/aom_video_decoder.cc
index 14185a8ae90..5dbf0143a2f 100644
--- a/chromium/media/filters/aom_video_decoder.cc
+++ b/chromium/media/filters/aom_video_decoder.cc
@@ -139,15 +139,15 @@ std::string AomVideoDecoder::GetDisplayName() const {
void AomVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
CdmContext* /* cdm_context */,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(config.IsValidConfig());
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
+ InitCB bound_init_cb = BindToCurrentLoop(std::move(init_cb));
if (config.is_encrypted() || config.codec() != kCodecAV1) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -172,7 +172,7 @@ void AomVideoDecoder::Initialize(const VideoDecoderConfig& config,
0 /* flags */) != AOM_CODEC_OK) {
MEDIA_LOG(ERROR, media_log_) << "aom_codec_dec_init() failed: "
<< aom_codec_error(aom_decoder_.get());
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -184,7 +184,7 @@ void AomVideoDecoder::Initialize(const VideoDecoderConfig& config,
memory_pool_.get()) != AOM_CODEC_OK) {
DLOG(ERROR) << "Failed to configure external buffers. "
<< aom_codec_error(context.get());
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -192,21 +192,21 @@ void AomVideoDecoder::Initialize(const VideoDecoderConfig& config,
state_ = DecoderState::kNormal;
output_cb_ = BindToCurrentLoop(output_cb);
aom_decoder_ = std::move(context);
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
}
void AomVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(buffer);
DCHECK(decode_cb);
DCHECK_NE(state_, DecoderState::kUninitialized)
<< "Called Decode() before successful Initialize()";
- DecodeCB bound_decode_cb = BindToCurrentLoop(decode_cb);
+ DecodeCB bound_decode_cb = BindToCurrentLoop(std::move(decode_cb));
if (state_ == DecoderState::kError) {
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
@@ -215,18 +215,18 @@ void AomVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
if (buffer->end_of_stream()) {
DCHECK_EQ(state_, DecoderState::kNormal);
state_ = DecoderState::kDecodeFinished;
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
return;
}
if (!DecodeBuffer(buffer.get())) {
state_ = DecoderState::kError;
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
// VideoDecoderShim expects |decode_cb| call after |output_cb_|.
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
}
void AomVideoDecoder::Reset(const base::Closure& reset_cb) {
diff --git a/chromium/media/filters/aom_video_decoder.h b/chromium/media/filters/aom_video_decoder.h
index 52f0b36ac3e..228cbca6399 100644
--- a/chromium/media/filters/aom_video_decoder.h
+++ b/chromium/media/filters/aom_video_decoder.h
@@ -31,11 +31,10 @@ class MEDIA_EXPORT AomVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
void Reset(const base::Closure& reset_cb) override;
private:
diff --git a/chromium/media/filters/aom_video_decoder_unittest.cc b/chromium/media/filters/aom_video_decoder_unittest.cc
index 9f65692603a..596b7610f32 100644
--- a/chromium/media/filters/aom_video_decoder_unittest.cc
+++ b/chromium/media/filters/aom_video_decoder_unittest.cc
@@ -155,9 +155,9 @@ class AomVideoDecoderTest : public testing::Test {
return status;
}
- void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- output_frames_.push_back(frame);
+ output_frames_.push_back(std::move(frame));
}
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
diff --git a/chromium/media/filters/audio_decoder_stream_unittest.cc b/chromium/media/filters/audio_decoder_stream_unittest.cc
index f71d5832273..303e24d7ed5 100644
--- a/chromium/media/filters/audio_decoder_stream_unittest.cc
+++ b/chromium/media/filters/audio_decoder_stream_unittest.cc
@@ -95,8 +95,9 @@ class AudioDecoderStreamTest : public testing::Test {
private:
std::vector<std::unique_ptr<AudioDecoder>> CreateMockAudioDecoder() {
auto decoder = std::make_unique<MockAudioDecoder>();
- EXPECT_CALL(*decoder, Initialize(_, _, _, _, _))
- .WillOnce(DoAll(SaveArg<3>(&decoder_output_cb_), RunCallback<2>(true)));
+ EXPECT_CALL(*decoder, Initialize_(_, _, _, _, _))
+ .WillOnce(
+ DoAll(SaveArg<3>(&decoder_output_cb_), RunOnceCallback<2>(true)));
decoder_ = decoder.get();
std::vector<std::unique_ptr<AudioDecoder>> result;
@@ -106,7 +107,7 @@ class AudioDecoderStreamTest : public testing::Test {
void OnAudioBufferReadDone(base::OnceClosure closure,
AudioDecoderStream::Status status,
- const scoped_refptr<AudioBuffer>& audio_buffer) {
+ scoped_refptr<AudioBuffer> audio_buffer) {
std::move(closure).Run();
}
@@ -148,7 +149,7 @@ TEST_F(AudioDecoderStreamTest, FlushOnConfigChange) {
// Expect the decoder to be re-initialized when AudioDecoderStream finishes
// processing the last decode.
- EXPECT_CALL(*decoder(), Initialize(_, _, _, _, _));
+ EXPECT_CALL(*decoder(), Initialize_(_, _, _, _, _));
RunUntilIdle();
}
diff --git a/chromium/media/filters/audio_decoder_unittest.cc b/chromium/media/filters/audio_decoder_unittest.cc
index 7d95ed09cd3..8ac201a7c6a 100644
--- a/chromium/media/filters/audio_decoder_unittest.cc
+++ b/chromium/media/filters/audio_decoder_unittest.cc
@@ -289,9 +289,9 @@ class AudioDecoderTest
ASSERT_TRUE(reader_->SeekForTesting(seek_time));
}
- void OnDecoderOutput(const scoped_refptr<AudioBuffer>& buffer) {
+ void OnDecoderOutput(scoped_refptr<AudioBuffer> buffer) {
EXPECT_FALSE(buffer->end_of_stream());
- decoded_audio_.push_back(buffer);
+ decoded_audio_.push_back(std::move(buffer));
}
void DecodeFinished(const base::Closure& quit_closure, DecodeStatus status) {
diff --git a/chromium/media/filters/audio_renderer_algorithm.cc b/chromium/media/filters/audio_renderer_algorithm.cc
index fc92e5d0610..e70d4db681d 100644
--- a/chromium/media/filters/audio_renderer_algorithm.cc
+++ b/chromium/media/filters/audio_renderer_algorithm.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "cc/base/math_util.h"
#include "media/base/audio_bus.h"
+#include "media/base/audio_timestamp_helper.h"
#include "media/base/limits.h"
#include "media/filters/wsola_internals.h"
@@ -46,28 +47,38 @@ namespace media {
// |search_block_center_offset_|.
// Overlap-and-add window size in milliseconds.
-static const int kOlaWindowSizeMs = 20;
+constexpr base::TimeDelta kOlaWindowSize =
+ base::TimeDelta::FromMilliseconds(20);
// Size of search interval in milliseconds. The search interval is
// [-delta delta] around |output_index_| * |playback_rate|. So the search
// interval is 2 * delta.
-static const int kWsolaSearchIntervalMs = 30;
+constexpr base::TimeDelta kWsolaSearchInterval =
+ base::TimeDelta::FromMilliseconds(30);
-// The maximum size in seconds for the |audio_buffer_|. Arbitrarily determined.
-static const int kMaxCapacityInSeconds = 3;
+// The maximum size for the |audio_buffer_|. Arbitrarily determined.
+constexpr base::TimeDelta kMaxCapacity = base::TimeDelta::FromSeconds(3);
-// The minimum size in ms for the |audio_buffer_|. Arbitrarily determined.
-static const int kStartingCapacityInMs = 200;
+// The minimum size for the |audio_buffer_|. Arbitrarily determined.
+constexpr base::TimeDelta kStartingCapacity =
+ base::TimeDelta::FromMilliseconds(200);
-// The minimum size in ms for the |audio_buffer_| for encrypted streams.
-// Set this to be larger than |kStartingCapacityInMs| because the performance of
+// The minimum size for the |audio_buffer_| for encrypted streams.
+// Set this to be larger than |kStartingCapacity| because the performance of
// encrypted playback is always worse than clear playback, due to decryption and
// potentially IPC overhead. For the context, see https://crbug.com/403462,
// https://crbug.com/718161 and https://crbug.com/879970.
-static const int kStartingCapacityForEncryptedInMs = 500;
+constexpr base::TimeDelta kStartingCapacityForEncrypted =
+ base::TimeDelta::FromMilliseconds(500);
AudioRendererAlgorithm::AudioRendererAlgorithm()
- : channels_(0),
+ : AudioRendererAlgorithm(
+ {kMaxCapacity, kStartingCapacity, kStartingCapacityForEncrypted}) {}
+
+AudioRendererAlgorithm::AudioRendererAlgorithm(
+ AudioRendererAlgorithmParameters params)
+ : audio_renderer_algorithm_params_(std::move(params)),
+ channels_(0),
samples_per_second_(0),
is_bitstream_format_(false),
capacity_(0),
@@ -92,14 +103,20 @@ void AudioRendererAlgorithm::Initialize(const AudioParameters& params,
samples_per_second_ = params.sample_rate();
is_bitstream_format_ = params.IsBitstreamFormat();
initial_capacity_ = capacity_ = std::max(
- params.frames_per_buffer() * 2,
- ConvertMillisecondsToFrames(is_encrypted
- ? kStartingCapacityForEncryptedInMs
- : kStartingCapacityInMs));
- max_capacity_ =
- std::max(initial_capacity_, kMaxCapacityInSeconds * samples_per_second_);
- num_candidate_blocks_ = ConvertMillisecondsToFrames(kWsolaSearchIntervalMs);
- ola_window_size_ = ConvertMillisecondsToFrames(kOlaWindowSizeMs);
+ static_cast<int64_t>(params.frames_per_buffer()) * 2,
+ AudioTimestampHelper::TimeToFrames(
+ is_encrypted
+ ? audio_renderer_algorithm_params_.starting_capacity_for_encrypted
+ : audio_renderer_algorithm_params_.starting_capacity,
+ samples_per_second_));
+ max_capacity_ = std::max(
+ initial_capacity_,
+ AudioTimestampHelper::TimeToFrames(
+ audio_renderer_algorithm_params_.max_capacity, samples_per_second_));
+ num_candidate_blocks_ = AudioTimestampHelper::TimeToFrames(
+ kWsolaSearchInterval, samples_per_second_);
+ ola_window_size_ =
+ AudioTimestampHelper::TimeToFrames(kOlaWindowSize, samples_per_second_);
// Make sure window size in an even number.
ola_window_size_ += ola_window_size_ & 1;
@@ -224,9 +241,9 @@ void AudioRendererAlgorithm::FlushBuffers() {
}
void AudioRendererAlgorithm::EnqueueBuffer(
- const scoped_refptr<AudioBuffer>& buffer_in) {
+ scoped_refptr<AudioBuffer> buffer_in) {
DCHECK(!buffer_in->end_of_stream());
- audio_buffer_.Append(buffer_in);
+ audio_buffer_.Append(std::move(buffer_in));
}
bool AudioRendererAlgorithm::IsQueueFull() {
@@ -249,11 +266,6 @@ bool AudioRendererAlgorithm::CanPerformWsola() const {
search_block_index_ + search_block_size <= frames;
}
-int AudioRendererAlgorithm::ConvertMillisecondsToFrames(int ms) const {
- return ms * (samples_per_second_ /
- static_cast<double>(base::Time::kMillisecondsPerSecond));
-}
-
bool AudioRendererAlgorithm::RunOneWsolaIteration(double playback_rate) {
if (!CanPerformWsola())
return false;
diff --git a/chromium/media/filters/audio_renderer_algorithm.h b/chromium/media/filters/audio_renderer_algorithm.h
index 899e8cd1457..05c066225a2 100644
--- a/chromium/media/filters/audio_renderer_algorithm.h
+++ b/chromium/media/filters/audio_renderer_algorithm.h
@@ -39,6 +39,7 @@ class AudioBus;
class MEDIA_EXPORT AudioRendererAlgorithm {
public:
AudioRendererAlgorithm();
+ AudioRendererAlgorithm(AudioRendererAlgorithmParameters params);
~AudioRendererAlgorithm();
// Initializes this object with information about the audio stream.
@@ -74,7 +75,7 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Enqueues a buffer. It is called from the owner of the algorithm after a
// read completes.
- void EnqueueBuffer(const scoped_refptr<AudioBuffer>& buffer_in);
+ void EnqueueBuffer(scoped_refptr<AudioBuffer> buffer_in);
// Returns true if |audio_buffer_| is at or exceeds capacity.
bool IsQueueFull();
@@ -139,14 +140,14 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Do we have enough data to perform one round of WSOLA?
bool CanPerformWsola() const;
- // Converts a time in milliseconds to frames using |samples_per_second_|.
- int ConvertMillisecondsToFrames(int ms) const;
-
// Creates or recreates |target_block_wrapper_| and |search_block_wrapper_|
// after a |channel_mask_| change. May be called at anytime after a channel
// mask has been specified.
void CreateSearchWrappers();
+ // Parameters.
+ AudioRendererAlgorithmParameters audio_renderer_algorithm_params_;
+
// Number of channels in audio stream.
int channels_;
@@ -160,7 +161,7 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
AudioBufferQueue audio_buffer_;
// How many frames to have in the queue before we report the queue is full.
- int capacity_;
+ int64_t capacity_;
// Book keeping of the current time of generated audio, in frames. This
// should be appropriately updated when out samples are generated, regardless
@@ -234,8 +235,8 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
std::unique_ptr<AudioBus> target_block_wrapper_;
// The initial and maximum capacity calculated by Initialize().
- int initial_capacity_;
- int max_capacity_;
+ int64_t initial_capacity_;
+ int64_t max_capacity_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererAlgorithm);
};
diff --git a/chromium/media/filters/audio_timestamp_validator.cc b/chromium/media/filters/audio_timestamp_validator.cc
index cd5d42aee7c..fb857664502 100644
--- a/chromium/media/filters/audio_timestamp_validator.cc
+++ b/chromium/media/filters/audio_timestamp_validator.cc
@@ -131,18 +131,18 @@ void AudioTimestampValidator::CheckForTimestampGap(
}
void AudioTimestampValidator::RecordOutputDuration(
- const scoped_refptr<AudioBuffer>& audio_buffer) {
+ const AudioBuffer& audio_buffer) {
if (!audio_output_ts_helper_) {
DCHECK_NE(audio_base_ts_, kNoTimestamp);
// SUBTLE: deliberately creating this with output buffer sample rate because
// demuxer stream config is potentially stale for implicit AAC.
audio_output_ts_helper_.reset(
- new AudioTimestampHelper(audio_buffer->sample_rate()));
+ new AudioTimestampHelper(audio_buffer.sample_rate()));
audio_output_ts_helper_->SetBaseTimestamp(audio_base_ts_);
}
- DVLOG(3) << __func__ << " " << audio_buffer->frame_count() << " frames";
- audio_output_ts_helper_->AddFrames(audio_buffer->frame_count());
+ DVLOG(3) << __func__ << " " << audio_buffer.frame_count() << " frames";
+ audio_output_ts_helper_->AddFrames(audio_buffer.frame_count());
}
} // namespace media
diff --git a/chromium/media/filters/audio_timestamp_validator.h b/chromium/media/filters/audio_timestamp_validator.h
index 665501c320e..7a2ebdf71f9 100644
--- a/chromium/media/filters/audio_timestamp_validator.h
+++ b/chromium/media/filters/audio_timestamp_validator.h
@@ -27,7 +27,7 @@ class MEDIA_EXPORT AudioTimestampValidator {
// timestamp should roughly equal the timestamp of the previous buffer offset
// by the previous buffer's duration.
void CheckForTimestampGap(const DecoderBuffer& buffer);
- void RecordOutputDuration(const scoped_refptr<AudioBuffer>& buffer);
+ void RecordOutputDuration(const AudioBuffer& buffer);
private:
bool has_codec_delay_;
diff --git a/chromium/media/filters/audio_timestamp_validator_unittest.cc b/chromium/media/filters/audio_timestamp_validator_unittest.cc
index b47704654ac..24240917622 100644
--- a/chromium/media/filters/audio_timestamp_validator_unittest.cc
+++ b/chromium/media/filters/audio_timestamp_validator_unittest.cc
@@ -104,7 +104,7 @@ TEST_P(AudioTimestampValidatorTest, WarnForEraticTimes) {
scoped_refptr<AudioBuffer> decoded_buffer = MakeAudioBuffer<float>(
kSampleFormat, kChannelLayout, kChannelCount, kSamplesPerSecond, 1.0f,
0.0f, kFramesPerBuffer, i * kBufferDuration);
- validator.RecordOutputDuration(decoded_buffer.get());
+ validator.RecordOutputDuration(*decoded_buffer);
}
}
}
@@ -143,7 +143,7 @@ TEST_P(AudioTimestampValidatorTest, NoWarningForValidTimes) {
scoped_refptr<AudioBuffer> decoded_buffer = MakeAudioBuffer<float>(
kSampleFormat, kChannelLayout, kChannelCount, kSamplesPerSecond, 1.0f,
0.0f, kFramesPerBuffer, i * kBufferDuration);
- validator.RecordOutputDuration(decoded_buffer.get());
+ validator.RecordOutputDuration(*decoded_buffer);
}
}
}
@@ -188,7 +188,7 @@ TEST_P(AudioTimestampValidatorTest, SingleWarnForSingleLargeGap) {
scoped_refptr<AudioBuffer> decoded_buffer = MakeAudioBuffer<float>(
kSampleFormat, kChannelLayout, kChannelCount, kSamplesPerSecond, 1.0f,
0.0f, kFramesPerBuffer, i * kBufferDuration);
- validator.RecordOutputDuration(decoded_buffer.get());
+ validator.RecordOutputDuration(*decoded_buffer);
}
}
}
@@ -238,7 +238,7 @@ TEST_P(AudioTimestampValidatorTest, RepeatedWarnForSlowAccumulatingDrift) {
scoped_refptr<AudioBuffer> decoded_buffer = MakeAudioBuffer<float>(
kSampleFormat, kChannelLayout, kChannelCount, kSamplesPerSecond, 1.0f,
0.0f, kFramesPerBuffer, i * kBufferDuration);
- validator.RecordOutputDuration(decoded_buffer.get());
+ validator.RecordOutputDuration(*decoded_buffer);
}
}
}
diff --git a/chromium/media/filters/audio_video_metadata_extractor_unittest.cc b/chromium/media/filters/audio_video_metadata_extractor_unittest.cc
index 237ee488f48..df1cb131eb3 100644
--- a/chromium/media/filters/audio_video_metadata_extractor_unittest.cc
+++ b/chromium/media/filters/audio_video_metadata_extractor_unittest.cc
@@ -47,7 +47,12 @@ const std::string GetTagValue(
const media::AudioVideoMetadataExtractor::TagDictionary& tags,
const char* tag_name) {
auto tag_data = tags.find(tag_name);
- return tag_data == tags.end() ? "" : tag_data->second;
+ if (tag_data == tags.end()) {
+ DLOG(WARNING) << "Tag name \"" << tag_name << "\" not found!";
+ return "";
+ }
+
+ return tag_data->second;
}
TEST(AudioVideoMetadataExtractorTest, InvalidFile) {
@@ -67,7 +72,7 @@ TEST(AudioVideoMetadataExtractorTest, AudioOGG) {
EXPECT_EQ(1u, extractor->stream_infos()[1].tags.size());
EXPECT_EQ("vorbis", extractor->stream_infos()[1].type);
EXPECT_EQ("Processed by SoX",
- GetTagValue(extractor->stream_infos()[1].tags, "COMMENT"));
+ GetTagValue(extractor->stream_infos()[1].tags, "Comment"));
EXPECT_EQ(0u, extractor->attached_images_bytes().size());
}
@@ -104,9 +109,9 @@ TEST(AudioVideoMetadataExtractorTest, AudioFLAC) {
EXPECT_EQ(2u, extractor->stream_infos()[0].tags.size());
EXPECT_EQ("Lavf55.43.100",
- GetTagValue(extractor->stream_infos()[0].tags, "ENCODER"));
+ GetTagValue(extractor->stream_infos()[0].tags, "encoder"));
EXPECT_EQ("Amadeus Pro",
- GetTagValue(extractor->stream_infos()[0].tags, "ENCODED_BY"));
+ GetTagValue(extractor->stream_infos()[0].tags, "encoded_by"));
EXPECT_EQ("flac", extractor->stream_infos()[1].type);
EXPECT_EQ(0u, extractor->stream_infos()[1].tags.size());
diff --git a/chromium/media/filters/blocking_url_protocol.cc b/chromium/media/filters/blocking_url_protocol.cc
index 93414f88ce3..5448d3df994 100644
--- a/chromium/media/filters/blocking_url_protocol.cc
+++ b/chromium/media/filters/blocking_url_protocol.cc
@@ -46,11 +46,16 @@ int BlockingUrlProtocol::Read(int size, uint8_t* data) {
return AVERROR(EIO);
}
- // Even though FFmpeg defines AVERROR_EOF, it's not to be used with I/O
- // routines. Instead return 0 for any read at or past EOF.
+ // Not sure this can happen, but it's unclear from the ffmpeg code, so guard
+ // against it.
+ if (size < 0)
+ return AVERROR(EIO);
+ if (!size)
+ return 0;
+
int64_t file_size;
if (data_source_->GetSize(&file_size) && read_position_ >= file_size)
- return 0;
+ return AVERROR_EOF;
// Blocking read from data source until either:
// 1) |last_read_bytes_| is set and |read_complete_| is signalled
diff --git a/chromium/media/filters/blocking_url_protocol_unittest.cc b/chromium/media/filters/blocking_url_protocol_unittest.cc
index 9509e7b9506..362e0b772bc 100644
--- a/chromium/media/filters/blocking_url_protocol_unittest.cc
+++ b/chromium/media/filters/blocking_url_protocol_unittest.cc
@@ -69,7 +69,7 @@ TEST_F(BlockingUrlProtocolTest, Read) {
EXPECT_TRUE(url_protocol_->GetPosition(&position));
EXPECT_EQ(size, position);
- EXPECT_EQ(0, url_protocol_->Read(32, buffer));
+ EXPECT_EQ(AVERROR_EOF, url_protocol_->Read(32, buffer));
EXPECT_TRUE(url_protocol_->GetPosition(&position));
EXPECT_EQ(size, position);
}
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index d928477f478..6c20ba2acdb 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -19,7 +19,6 @@
#include "base/trace_event/trace_event.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/media_switches.h"
#include "media/base/media_tracks.h"
#include "media/base/mime_util.h"
#include "media/base/stream_parser_buffer.h"
@@ -32,28 +31,6 @@
using base::TimeDelta;
-// This macro determines which SourceBufferStream member various
-// ChunkDemuxerStream methods use based on the buffering API |range_api_| set at
-// construction time. See https://crbug.com/718641.
-#define SBSTREAM_IS_SET \
- (range_api_ == RangeApi::kLegacyByDts ? stream_dts_ != nullptr \
- : stream_pts_ != nullptr)
-
-#define SBSTREAM_OP(operation) \
- (range_api_ == RangeApi::kLegacyByDts ? stream_dts_->operation \
- : stream_pts_->operation)
-
-#define SBSTREAM_RESET(config, log) \
- { \
- if (range_api_ == RangeApi::kLegacyByDts) { \
- stream_dts_.reset( \
- new SourceBufferStream<SourceBufferRangeByDts>(config, log)); \
- } else { \
- stream_pts_.reset( \
- new SourceBufferStream<SourceBufferRangeByPts>(config, log)); \
- } \
- }
-
namespace {
// Helper to attempt construction of a StreamParser specific to |content_type|
@@ -87,11 +64,8 @@ std::string ExpectedCodecs(const std::string& content_type,
namespace media {
-ChunkDemuxerStream::ChunkDemuxerStream(Type type,
- MediaTrack::Id media_track_id,
- RangeApi range_api)
+ChunkDemuxerStream::ChunkDemuxerStream(Type type, MediaTrack::Id media_track_id)
: type_(type),
- range_api_(range_api),
liveness_(DemuxerStream::LIVENESS_UNKNOWN),
media_track_id_(media_track_id),
state_(UNINITIALIZED),
@@ -140,7 +114,7 @@ bool ChunkDemuxerStream::IsSeekWaitingForData() const {
// SourceBufferState::IsSeekWaitingForData().
DCHECK_NE(type_, DemuxerStream::TEXT);
- return SBSTREAM_OP(IsSeekPending());
+ return stream_->IsSeekPending();
}
void ChunkDemuxerStream::Seek(TimeDelta time) {
@@ -150,7 +124,7 @@ void ChunkDemuxerStream::Seek(TimeDelta time) {
DCHECK(state_ == UNINITIALIZED || state_ == RETURNING_ABORT_FOR_READS)
<< state_;
- SBSTREAM_OP(Seek(time));
+ stream_->Seek(time);
}
bool ChunkDemuxerStream::Append(const StreamParser::BufferQueue& buffers) {
@@ -162,7 +136,7 @@ bool ChunkDemuxerStream::Append(const StreamParser::BufferQueue& buffers) {
base::AutoLock auto_lock(lock_);
DCHECK_NE(state_, SHUTDOWN);
- if (!SBSTREAM_OP(Append(buffers))) {
+ if (!stream_->Append(buffers)) {
DVLOG(1) << "ChunkDemuxerStream::Append() : stream append failed";
return false;
}
@@ -176,7 +150,7 @@ bool ChunkDemuxerStream::Append(const StreamParser::BufferQueue& buffers) {
void ChunkDemuxerStream::Remove(TimeDelta start, TimeDelta end,
TimeDelta duration) {
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(Remove(start, end, duration));
+ stream_->Remove(start, end, duration);
}
bool ChunkDemuxerStream::EvictCodedFrames(base::TimeDelta media_time,
@@ -190,28 +164,25 @@ bool ChunkDemuxerStream::EvictCodedFrames(base::TimeDelta media_time,
// to collect unnecessary data that is earlier than the GOP containing
// |media_time|.
if (!is_enabled_)
- SBSTREAM_OP(Seek(media_time));
+ stream_->Seek(media_time);
- // Note: The direct conversion from PTS to DTS is safe here, since we don't
- // need to know currentTime precisely for GC. GC only needs to know which GOP
- // currentTime points to.
- DecodeTimestamp media_time_dts =
- DecodeTimestamp::FromPresentationTime(media_time);
- return SBSTREAM_OP(GarbageCollectIfNeeded(media_time_dts, newDataSize));
+ // |media_time| is allowed to be a little imprecise here. GC only needs to
+ // know which GOP currentTime points to.
+ return stream_->GarbageCollectIfNeeded(media_time, newDataSize);
}
void ChunkDemuxerStream::OnMemoryPressure(
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc) {
base::AutoLock auto_lock(lock_);
- return SBSTREAM_OP(
- OnMemoryPressure(media_time, memory_pressure_level, force_instant_gc));
+ return stream_->OnMemoryPressure(media_time, memory_pressure_level,
+ force_instant_gc);
}
void ChunkDemuxerStream::OnSetDuration(TimeDelta duration) {
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(OnSetDuration(duration));
+ stream_->OnSetDuration(duration);
}
Ranges<TimeDelta> ChunkDemuxerStream::GetBufferedRanges(
@@ -228,7 +199,7 @@ Ranges<TimeDelta> ChunkDemuxerStream::GetBufferedRanges(
return text_range;
}
- Ranges<TimeDelta> range = SBSTREAM_OP(GetBufferedTime());
+ Ranges<TimeDelta> range = stream_->GetBufferedTime();
if (range.size() == 0u)
return range;
@@ -243,17 +214,17 @@ Ranges<TimeDelta> ChunkDemuxerStream::GetBufferedRanges(
TimeDelta ChunkDemuxerStream::GetHighestPresentationTimestamp() const {
base::AutoLock auto_lock(lock_);
- return SBSTREAM_OP(GetHighestPresentationTimestamp());
+ return stream_->GetHighestPresentationTimestamp();
}
TimeDelta ChunkDemuxerStream::GetBufferedDuration() const {
base::AutoLock auto_lock(lock_);
- return SBSTREAM_OP(GetBufferedDuration());
+ return stream_->GetBufferedDuration();
}
size_t ChunkDemuxerStream::GetBufferedSize() const {
base::AutoLock auto_lock(lock_);
- return SBSTREAM_OP(GetBufferedSize());
+ return stream_->GetBufferedSize();
}
void ChunkDemuxerStream::OnStartOfCodedFrameGroup(DecodeTimestamp start_dts,
@@ -266,7 +237,7 @@ void ChunkDemuxerStream::OnStartOfCodedFrameGroup(DecodeTimestamp start_dts,
group_start_observer_cb_.Run(start_dts, start_pts);
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(OnStartOfCodedFrameGroup(start_dts, start_pts));
+ stream_->OnStartOfCodedFrameGroup(start_pts);
}
bool ChunkDemuxerStream::UpdateAudioConfig(const AudioDecoderConfig& config,
@@ -275,13 +246,13 @@ bool ChunkDemuxerStream::UpdateAudioConfig(const AudioDecoderConfig& config,
DCHECK(config.IsValidConfig());
DCHECK_EQ(type_, AUDIO);
base::AutoLock auto_lock(lock_);
- if (!SBSTREAM_IS_SET) {
+ if (!stream_) {
DCHECK_EQ(state_, UNINITIALIZED);
- SBSTREAM_RESET(config, media_log);
+ stream_.reset(new SourceBufferStream(config, media_log));
return true;
}
- return SBSTREAM_OP(UpdateAudioConfig(config, allow_codec_change));
+ return stream_->UpdateAudioConfig(config, allow_codec_change);
}
bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
@@ -291,32 +262,32 @@ bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
DCHECK_EQ(type_, VIDEO);
base::AutoLock auto_lock(lock_);
- if (!SBSTREAM_IS_SET) {
+ if (!stream_) {
DCHECK_EQ(state_, UNINITIALIZED);
- SBSTREAM_RESET(config, media_log);
+ stream_.reset(new SourceBufferStream(config, media_log));
return true;
}
- return SBSTREAM_OP(UpdateVideoConfig(config, allow_codec_change));
+ return stream_->UpdateVideoConfig(config, allow_codec_change);
}
void ChunkDemuxerStream::UpdateTextConfig(const TextTrackConfig& config,
MediaLog* media_log) {
DCHECK_EQ(type_, TEXT);
base::AutoLock auto_lock(lock_);
- DCHECK(!SBSTREAM_IS_SET);
+ DCHECK(!stream_);
DCHECK_EQ(state_, UNINITIALIZED);
- SBSTREAM_RESET(config, media_log);
+ stream_.reset(new SourceBufferStream(config, media_log));
}
void ChunkDemuxerStream::MarkEndOfStream() {
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(MarkEndOfStream());
+ stream_->MarkEndOfStream();
}
void ChunkDemuxerStream::UnmarkEndOfStream() {
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(UnmarkEndOfStream());
+ stream_->UnmarkEndOfStream();
}
// DemuxerStream methods.
@@ -347,16 +318,16 @@ AudioDecoderConfig ChunkDemuxerStream::audio_decoder_config() {
CHECK_EQ(type_, AUDIO);
base::AutoLock auto_lock(lock_);
// Trying to track down crash. http://crbug.com/715761
- CHECK(SBSTREAM_IS_SET);
- return SBSTREAM_OP(GetCurrentAudioDecoderConfig());
+ CHECK(stream_);
+ return stream_->GetCurrentAudioDecoderConfig();
}
VideoDecoderConfig ChunkDemuxerStream::video_decoder_config() {
CHECK_EQ(type_, VIDEO);
base::AutoLock auto_lock(lock_);
// Trying to track down crash. http://crbug.com/715761
- CHECK(SBSTREAM_IS_SET);
- return SBSTREAM_OP(GetCurrentVideoDecoderConfig());
+ CHECK(stream_);
+ return stream_->GetCurrentVideoDecoderConfig();
}
bool ChunkDemuxerStream::SupportsConfigChanges() { return true; }
@@ -374,8 +345,8 @@ void ChunkDemuxerStream::SetEnabled(bool enabled, base::TimeDelta timestamp) {
is_enabled_ = enabled;
if (enabled) {
- DCHECK(SBSTREAM_IS_SET);
- SBSTREAM_OP(Seek(timestamp));
+ DCHECK(stream_);
+ stream_->Seek(timestamp);
} else if (read_cb_) {
DVLOG(1) << "Read from disabled stream, returning EOS";
std::move(read_cb_).Run(kOk, StreamParserBuffer::CreateEOSBuffer());
@@ -385,12 +356,12 @@ void ChunkDemuxerStream::SetEnabled(bool enabled, base::TimeDelta timestamp) {
TextTrackConfig ChunkDemuxerStream::text_track_config() {
CHECK_EQ(type_, TEXT);
base::AutoLock auto_lock(lock_);
- return SBSTREAM_OP(GetCurrentTextTrackConfig());
+ return stream_->GetCurrentTextTrackConfig();
}
void ChunkDemuxerStream::SetStreamMemoryLimit(size_t memory_limit) {
base::AutoLock auto_lock(lock_);
- SBSTREAM_OP(set_memory_limit(memory_limit));
+ stream_->set_memory_limit(memory_limit);
}
void ChunkDemuxerStream::SetLiveness(Liveness liveness) {
@@ -420,7 +391,7 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
NOTREACHED();
return;
case RETURNING_DATA_FOR_READS:
- switch (SBSTREAM_OP(GetNextBuffer(&buffer))) {
+ switch (stream_->GetNextBuffer(&buffer)) {
case SourceBufferStreamStatus::kSuccess:
status = DemuxerStream::kOk;
DVLOG(2) << __func__ << ": returning kOk, type " << type_ << ", dts "
@@ -482,13 +453,10 @@ ChunkDemuxer::ChunkDemuxer(
liveness_(DemuxerStream::LIVENESS_UNKNOWN),
detected_audio_track_count_(0),
detected_video_track_count_(0),
- detected_text_track_count_(0),
- buffering_by_pts_(base::FeatureList::IsEnabled(kMseBufferByPts)) {
+ detected_text_track_count_(0) {
DCHECK(open_cb_);
DCHECK(encrypted_media_init_data_cb_);
- MEDIA_LOG(INFO, media_log_)
- << GetDisplayName()
- << (buffering_by_pts_ ? ": buffering by PTS" : ": buffering by DTS");
+ MEDIA_LOG(INFO, media_log_) << GetDisplayName();
}
std::string ChunkDemuxer::GetDisplayName() const {
@@ -497,7 +465,7 @@ std::string ChunkDemuxer::GetDisplayName() const {
void ChunkDemuxer::Initialize(DemuxerHost* host,
const PipelineStatusCB& init_cb) {
- DVLOG(1) << "Init(), buffering_by_pts_=" << buffering_by_pts_;
+ DVLOG(1) << "Initialize()";
TRACE_EVENT_ASYNC_BEGIN0("media", "ChunkDemuxer::Initialize", this);
base::AutoLock auto_lock(lock_);
@@ -674,11 +642,9 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
}
std::unique_ptr<FrameProcessor> frame_processor(new FrameProcessor(
- base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
- base::Unretained(this)),
- media_log_,
- buffering_by_pts_ ? ChunkDemuxerStream::RangeApi::kNewByPts
- : ChunkDemuxerStream::RangeApi::kLegacyByDts));
+ base::BindRepeating(&ChunkDemuxer::IncreaseDurationIfNecessary,
+ base::Unretained(this)),
+ media_log_));
std::unique_ptr<SourceBufferState> source_state(new SourceBufferState(
std::move(stream_parser), std::move(frame_processor),
@@ -839,11 +805,9 @@ void ChunkDemuxer::OnMemoryPressure(
base::TimeDelta currentMediaTime,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc) {
- DecodeTimestamp media_time_dts =
- DecodeTimestamp::FromPresentationTime(currentMediaTime);
base::AutoLock auto_lock(lock_);
for (const auto& itr : source_state_map_) {
- itr.second->OnMemoryPressure(media_time_dts, memory_pressure_level,
+ itr.second->OnMemoryPressure(currentMediaTime, memory_pressure_level,
force_instant_gc);
}
}
@@ -1370,10 +1334,7 @@ ChunkDemuxerStream* ChunkDemuxer::CreateDemuxerStream(
}
std::unique_ptr<ChunkDemuxerStream> stream =
- std::make_unique<ChunkDemuxerStream>(
- type, media_track_id,
- (buffering_by_pts_ ? ChunkDemuxerStream::RangeApi::kNewByPts
- : ChunkDemuxerStream::RangeApi::kLegacyByDts));
+ std::make_unique<ChunkDemuxerStream>(type, media_track_id);
DCHECK(track_id_to_demux_stream_map_.find(media_track_id) ==
track_id_to_demux_stream_map_.end());
track_id_to_demux_stream_map_[media_track_id] = stream.get();
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index 5cd48c0f67a..c7d06c3f654 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -26,8 +26,6 @@
#include "media/base/ranges.h"
#include "media/base/stream_parser.h"
#include "media/filters/source_buffer_parse_warnings.h"
-#include "media/filters/source_buffer_range_by_dts.h"
-#include "media/filters/source_buffer_range_by_pts.h"
#include "media/filters/source_buffer_state.h"
#include "media/filters/source_buffer_stream.h"
@@ -35,25 +33,11 @@ class MEDIA_EXPORT SourceBufferStream;
namespace media {
-template <>
-void SourceBufferStream<SourceBufferRangeByPts>::OnStartOfCodedFrameGroup(
- DecodeTimestamp coded_frame_group_start_dts,
- base::TimeDelta coded_frame_group_start_pts);
-
-template <>
-void SourceBufferStream<SourceBufferRangeByDts>::OnStartOfCodedFrameGroup(
- DecodeTimestamp coded_frame_group_start_dts,
- base::TimeDelta coded_frame_group_start_pts);
-
class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
public:
using BufferQueue = base::circular_deque<scoped_refptr<StreamParserBuffer>>;
- enum class RangeApi { kLegacyByDts, kNewByPts };
-
- ChunkDemuxerStream(Type type,
- MediaTrack::Id media_track_id,
- RangeApi range_api);
+ ChunkDemuxerStream(Type type, MediaTrack::Id media_track_id);
~ChunkDemuxerStream() override;
// ChunkDemuxerStream control methods.
@@ -88,7 +72,7 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
bool EvictCodedFrames(base::TimeDelta media_time, size_t newDataSize);
void OnMemoryPressure(
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc);
@@ -110,8 +94,10 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
size_t GetBufferedSize() const;
// Signal to the stream that buffers handed in through subsequent calls to
- // Append() belong to a coded frame group that starts at |start_dts| and
- // |start_pts|.
+ // Append() belong to a coded frame group that starts at |start_pts|.
+ // |start_dts| is used only to help tests verify correctness of calls to this
+ // method. If |group_start_observer_cb_| is set, first invokes this test-only
+ // callback with |start_dts| and |start_pts| to assist test verification.
void OnStartOfCodedFrameGroup(DecodeTimestamp start_dts,
base::TimeDelta start_pts);
@@ -182,19 +168,15 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// Specifies the type of the stream.
const Type type_;
- const RangeApi range_api_;
Liveness liveness_ GUARDED_BY(lock_);
- // Precisely one of these will be used by an instance, determined by
- // |range_api_| set in ctor. See https://crbug.com/718641.
- std::unique_ptr<SourceBufferStream<SourceBufferRangeByDts>> stream_dts_
- GUARDED_BY(lock_);
- std::unique_ptr<SourceBufferStream<SourceBufferRangeByPts>> stream_pts_
- GUARDED_BY(lock_);
+ std::unique_ptr<SourceBufferStream> stream_ GUARDED_BY(lock_);
const MediaTrack::Id media_track_id_;
+ // Test-only callbacks to assist verification of Append() and
+ // OnStartOfCodedFrameGroup() calls, respectively.
AppendObserverCB append_observer_cb_;
GroupStartObserverCB group_start_observer_cb_;
@@ -545,11 +527,6 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
int detected_video_track_count_;
int detected_text_track_count_;
- // Caches whether |media::kMseBufferByPts| feature was enabled at ChunkDemuxer
- // construction time. This makes sure that all buffering for this ChunkDemuxer
- // uses the same behavior. See https://crbug.com/718641.
- const bool buffering_by_pts_;
-
// Callback for reporting the number of bytes appended to this ChunkDemuxer.
BytesReceivedCB bytes_received_cb_;
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index 41f3e40f98e..9f36319c138 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -20,7 +20,6 @@
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/synchronization/waitable_event.h"
-#include "base/test/scoped_feature_list.h"
#include "base/test/scoped_task_environment.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
@@ -49,18 +48,9 @@ using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArgPointee;
using ::testing::StrictMock;
-using ::testing::Values;
using ::testing::WithParamInterface;
using ::testing::_;
-namespace {
-
-// See https://crbug.com/718641 and kMseBufferByPts. This controls which kind of
-// buffering implementation is constructed and tested.
-enum class BufferingApi { kLegacyByDts, kNewByPts };
-
-} // namespace
-
namespace media {
const uint8_t kTracksHeader[] = {
@@ -158,9 +148,7 @@ static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
*called = true;
}
-// Test parameter determines if media::kMseBufferByPts feature should be forced
-// on or off for the test.
-class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
+class ChunkDemuxerTest : public ::testing::Test {
public:
// Public method because test cases use it directly.
MOCK_METHOD1(DemuxerInitialized, void(PipelineStatus));
@@ -189,15 +177,6 @@ class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
ChunkDemuxerTest()
: did_progress_(false),
append_window_end_for_next_append_(kInfiniteDuration) {
- buffering_api_ = GetParam();
- switch (buffering_api_) {
- case BufferingApi::kLegacyByDts:
- scoped_feature_list_.InitAndDisableFeature(media::kMseBufferByPts);
- break;
- case BufferingApi::kNewByPts:
- scoped_feature_list_.InitAndEnableFeature(media::kMseBufferByPts);
- break;
- }
init_segment_received_cb_ = base::Bind(
&ChunkDemuxerTest::InitSegmentReceived, base::Unretained(this));
CreateNewDemuxer();
@@ -211,8 +190,7 @@ class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb =
base::BindRepeating(&ChunkDemuxerTest::OnEncryptedMediaInitData,
base::Unretained(this));
- EXPECT_MEDIA_LOG(
- BufferingByPtsDts(buffering_api_ == BufferingApi::kNewByPts));
+ EXPECT_MEDIA_LOG(ChunkDemuxerCtor());
demuxer_.reset(new ChunkDemuxer(open_cb, progress_cb,
encrypted_media_init_data_cb, &media_log_));
}
@@ -1283,9 +1261,6 @@ class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
base::TimeDelta append_window_start_for_next_append_;
base::TimeDelta append_window_end_for_next_append_;
- BufferingApi buffering_api_;
- base::test::ScopedFeatureList scoped_feature_list_;
-
// The size of coded frame data for a WebM SimpleBlock or BlockGroup muxed
// into a test cluster. This defaults to |kBlockSize|, but can be changed to
// test behavior.
@@ -1314,7 +1289,7 @@ class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
};
-TEST_P(ChunkDemuxerTest, Init) {
+TEST_F(ChunkDemuxerTest, Init) {
InSequence s;
// Test no streams, audio-only, video-only, and audio & video scenarios.
@@ -1382,7 +1357,7 @@ TEST_P(ChunkDemuxerTest, Init) {
}
}
-TEST_P(ChunkDemuxerTest, AudioVideoTrackIdsChange) {
+TEST_F(ChunkDemuxerTest, AudioVideoTrackIdsChange) {
// Test with 1 audio and 1 video stream. Send a second init segment in which
// the audio and video track IDs change. Verify that appended buffers before
// and after the second init segment map to the same underlying track buffers.
@@ -1411,7 +1386,7 @@ TEST_P(ChunkDemuxerTest, AudioVideoTrackIdsChange) {
ShutdownDemuxer();
}
-TEST_P(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
+TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
// Tests that non-key-frames following an init segment are allowed
// and dropped, as expected if the initialization segment received
// algorithm correctly sets the needs random access point flag to true for all
@@ -1441,7 +1416,7 @@ TEST_P(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
CheckExpectedBuffers(video_stream, "30K 90K");
}
-TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
+TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_, base::Bind(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
@@ -1458,7 +1433,7 @@ TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
// Verifies that all streams waiting for data receive an end of stream
// buffer when Shutdown() is called.
-TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
+TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO);
@@ -1481,7 +1456,7 @@ TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
// Test that Seek() completes successfully when the first cluster
// arrives.
-TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
+TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1503,7 +1478,7 @@ TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
}
// Test that parsing errors are handled for clusters appended after init.
-TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
+TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
InSequence s;
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
@@ -1515,7 +1490,7 @@ TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
// Test the case where a Seek() is requested while the parser
// is in the middle of cluster. This is to verify that the parser
// does not reset itself on a seek.
-TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
+TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
InSequence s;
@@ -1546,7 +1521,7 @@ TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
}
// Test the case where AppendData() is called before Init().
-TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
+TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
std::unique_ptr<uint8_t[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
@@ -1558,7 +1533,7 @@ TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
}
// Make sure Read() callbacks are dispatched with the proper data.
-TEST_P(ChunkDemuxerTest, Read) {
+TEST_F(ChunkDemuxerTest, Read) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1576,7 +1551,7 @@ TEST_P(ChunkDemuxerTest, Read) {
EXPECT_TRUE(video_read_done);
}
-TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
+TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = GetStream(DemuxerStream::VIDEO);
@@ -1612,7 +1587,7 @@ TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
CheckExpectedBuffers(video_stream, "45K");
}
-TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
+TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1639,7 +1614,7 @@ TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
&timestamp_offset_map_[kSourceId]));
}
-TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
+TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1666,7 +1641,7 @@ TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
&timestamp_offset_map_[kSourceId]));
}
-TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
+TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1688,7 +1663,7 @@ TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
// Test the case where a cluster is passed to AppendCluster() before
// INFO & TRACKS data.
-TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
+TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
NewExpectedStatusCB(CHUNK_DEMUXER_ERROR_APPEND_FAILED));
@@ -1701,7 +1676,7 @@ TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
}
// Test cases where we get an MarkEndOfStream() call during initialization.
-TEST_P(ChunkDemuxerTest, EOSDuringInit) {
+TEST_F(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
@@ -1709,7 +1684,7 @@ TEST_P(ChunkDemuxerTest, EOSDuringInit) {
MarkEndOfStream(PIPELINE_OK);
}
-TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
@@ -1727,7 +1702,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
demuxer_.reset();
}
-TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
CheckExpectedRanges("{ }");
@@ -1735,7 +1710,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
CheckExpectedRanges("{ }");
}
-TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
+TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1747,7 +1722,7 @@ TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
CheckExpectedRanges(kDefaultFirstClusterRange);
}
-TEST_P(ChunkDemuxerTest, NetworkErrorEndOfStream) {
+TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -1805,7 +1780,7 @@ class EndOfStreamHelper {
// Make sure that all pending reads that we don't have media data for get an
// "end of stream" buffer when MarkEndOfStream() is called.
-TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(GenerateCluster(0, 2)));
@@ -1842,7 +1817,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
// Make sure that all Read() calls after we get an MarkEndOfStream()
// call return an "end of stream" buffer.
-TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
+TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(GenerateCluster(0, 2)));
@@ -1883,7 +1858,7 @@ TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
}
-TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(0, 10));
@@ -1914,7 +1889,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
}
// Verify buffered range change behavior for audio/video/text tracks.
-TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
+TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendMuxedCluster(MuxedStreamInfo(kVideoTrackNum, "0K 33", 33),
@@ -1929,7 +1904,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
}
// Make sure AppendData() will accept elements that span multiple calls.
-TEST_P(ChunkDemuxerTest, AppendingInPieces) {
+TEST_F(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
@@ -1965,7 +1940,7 @@ TEST_P(ChunkDemuxerTest, AppendingInPieces) {
GenerateExpectedReads(0, 9);
}
-TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
+TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -1987,7 +1962,7 @@ TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
EXPECT_EQ(212949, demuxer_->GetMemoryUsage());
}
-TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
+TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -2009,7 +1984,7 @@ TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
EXPECT_EQ(212949, demuxer_->GetMemoryUsage());
}
-TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
+TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{kSkip, 0},
{kSkip, 3},
@@ -2032,7 +2007,7 @@ TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
EXPECT_EQ(18624, demuxer_->GetMemoryUsage());
}
-TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
+TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{0, kSkip},
{33, kSkip},
@@ -2054,7 +2029,7 @@ TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
EXPECT_EQ(194325, demuxer_->GetMemoryUsage());
}
-TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
+TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
@@ -2075,7 +2050,7 @@ TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
}
// Verify that we output buffers before the entire cluster has been parsed.
-TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
+TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
std::unique_ptr<Cluster> cluster(GenerateCluster(0, 6));
@@ -2127,7 +2102,7 @@ TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
EXPECT_TRUE(video_read_done);
}
-TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
+TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -2142,7 +2117,7 @@ TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
append_window_end_for_next_append_, &timestamp_offset_map_[kSourceId]));
}
-TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
+TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -2156,7 +2131,7 @@ TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
ASSERT_FALSE(AppendInitSegment(HAS_AUDIO | HAS_VIDEO));
}
-TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
+TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -2172,7 +2147,7 @@ TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
ASSERT_FALSE(AppendInitSegment(HAS_AUDIO | HAS_VIDEO));
}
-TEST_P(ChunkDemuxerTest, AudioOnlyHeaderWithAVType) {
+TEST_F(ChunkDemuxerTest, AudioOnlyHeaderWithAVType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -2188,7 +2163,7 @@ TEST_P(ChunkDemuxerTest, AudioOnlyHeaderWithAVType) {
ASSERT_FALSE(AppendInitSegment(HAS_AUDIO));
}
-TEST_P(ChunkDemuxerTest, VideoOnlyHeaderWithAVType) {
+TEST_F(ChunkDemuxerTest, VideoOnlyHeaderWithAVType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -2204,7 +2179,7 @@ TEST_P(ChunkDemuxerTest, VideoOnlyHeaderWithAVType) {
ASSERT_FALSE(AppendInitSegment(HAS_VIDEO));
}
-TEST_P(ChunkDemuxerTest, MultipleHeaders) {
+TEST_F(ChunkDemuxerTest, MultipleHeaders) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -2218,7 +2193,7 @@ TEST_P(ChunkDemuxerTest, MultipleHeaders) {
GenerateExpectedReads(0, 9);
}
-TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
+TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2234,7 +2209,7 @@ TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
GenerateVideoStreamExpectedReads(0, 4);
}
-TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
+TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
std::string audio_id = "audio1";
std::string video_id = "video1";
@@ -2251,7 +2226,7 @@ TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
GenerateVideoStreamExpectedReads(0, 4);
}
-TEST_P(ChunkDemuxerTest, AddIdFailures) {
+TEST_F(ChunkDemuxerTest, AddIdFailures) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK));
@@ -2270,7 +2245,7 @@ TEST_P(ChunkDemuxerTest, AddIdFailures) {
}
// Test that Read() calls after a RemoveId() return "end of stream" buffers.
-TEST_P(ChunkDemuxerTest, RemoveId) {
+TEST_F(ChunkDemuxerTest, RemoveId) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2305,7 +2280,7 @@ TEST_P(ChunkDemuxerTest, RemoveId) {
// Test that removing an ID immediately after adding it does not interfere with
// quota for new IDs in the future.
-TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
+TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
demuxer_->Initialize(
&host_, base::BindRepeating(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
@@ -2318,7 +2293,7 @@ TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
}
-TEST_P(ChunkDemuxerTest, SeekCanceled) {
+TEST_F(ChunkDemuxerTest, SeekCanceled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
@@ -2348,7 +2323,7 @@ TEST_P(ChunkDemuxerTest, SeekCanceled) {
GenerateExpectedReads(0, 4);
}
-TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
+TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Append cluster at the beginning of the stream.
@@ -2377,7 +2352,7 @@ TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
}
// Test that Seek() successfully seeks to all source IDs.
-TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
+TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2434,7 +2409,7 @@ TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
// is called before data is available for that seek point.
// This scenario might be useful if seeking past the end of stream
// of either audio or video (or both).
-TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendMuxedCluster(
@@ -2467,7 +2442,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
// Test that EndOfStream is ignored if coming during a pending seek
// whose seek time is before some existing ranges.
-TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendMuxedCluster(
@@ -2509,7 +2484,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
}
// Test ranges in an audio-only stream.
-TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
// Test a simple cluster.
@@ -2526,7 +2501,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
}
// Test ranges in a video-only stream.
-TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
// Test a simple cluster.
@@ -2542,7 +2517,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
CheckExpectedRanges("{ [0,132) [200,299) }");
}
-TEST_P(ChunkDemuxerTest, GetBufferedRanges_SeparateStreams) {
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_SeparateStreams) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2632,7 +2607,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRanges_SeparateStreams) {
"{ [0,23) [320,400) [620,670) [920,950) [1220,1250) }");
}
-TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Audio block: 0 -> 23
@@ -2721,7 +2696,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
// Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
// over-hanging tails at the end of the ranges as this is likely due to block
// duration differences.
-TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
+TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendMuxedCluster(MuxedStreamInfo(kAudioTrackNum, "0K 23K", 23),
@@ -2777,7 +2752,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
}
-TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Create a cluster where the video timecode begins 25ms after the audio.
@@ -2795,7 +2770,7 @@ TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
GenerateExpectedReads(5025, 5000, 8);
}
-TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2817,7 +2792,7 @@ TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
GenerateVideoStreamExpectedReads(30, 4);
}
-TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
+TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -2842,7 +2817,7 @@ TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
ExpectEndOfStream(DemuxerStream::VIDEO);
}
-TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
+TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
demuxer_->Initialize(
&host_, base::BindRepeating(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
@@ -2866,7 +2841,7 @@ TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
// Test codec ID's that are not compliant with RFC6381, but have been
// seen in the wild.
-TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
+TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
const char* codec_ids[] = {
@@ -2896,7 +2871,7 @@ TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
}
}
-TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
@@ -2933,7 +2908,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
}
-TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
+TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_, base::Bind(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
@@ -2946,7 +2921,7 @@ TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
// Test that Seek() completes successfully when the first cluster
// arrives.
-TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
+TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
InSequence s;
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
@@ -2973,7 +2948,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
end_of_stream_helper.CheckIfReadDonesWereCalled(true);
}
-TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -3020,7 +2995,7 @@ TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
ASSERT_EQ(status, DemuxerStream::kOk);
}
-TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -3062,7 +3037,7 @@ TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
}
-TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
+TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
@@ -3109,7 +3084,7 @@ TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
}
-TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
+TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
@@ -3120,7 +3095,7 @@ TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
GenerateExpectedReads(30000, 2);
}
-TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
+TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
@@ -3129,7 +3104,7 @@ TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
GenerateExpectedReads(0, 2);
}
-TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
+TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
@@ -3167,7 +3142,7 @@ TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
GenerateAudioStreamExpectedReads(27300, 4);
}
-TEST_P(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
+TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
std::unique_ptr<Cluster> cluster = GenerateCluster(0, 2);
@@ -3194,7 +3169,7 @@ const char* kMp2tMimeType = "video/mp2t";
const char* kMp2tCodecs = "mp4a.40.2,avc1.640028";
}
-TEST_P(ChunkDemuxerTest, EmitBuffersDuringAbort) {
+TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
EXPECT_CALL(*this, DemuxerOpened());
EXPECT_MEDIA_LOG(FoundStream("audio"));
EXPECT_MEDIA_LOG(CodecName("audio", "aac"));
@@ -3222,18 +3197,14 @@ TEST_P(ChunkDemuxerTest, EmitBuffersDuringAbort) {
EXPECT_CALL(*this, InitSegmentReceivedMock(_));
// This mp2ts file contains buffers which can trigger media logs related to
- // splicing. Related logic occurs more deterministically (and frequently) when
- // buffering ByPts; we append in small chunks to force the same logic when
- // buffering by either Pts or Dts here.
+ // splicing, especially when appending in small chunks.
EXPECT_MEDIA_LOG(TrimmedSpliceOverlap(1655422, 1655419, 23217));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(1957277, 4));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(2514555, 6));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(3071833, 6));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(3652333, 6));
- // Append the media in small chunks. 1 byte chunks would cause test timeout;
- // 1k chunks appear to be small enough to let ByDts meet the logging
- // expectations of the more deterministic ByPts logic, simplifying this test.
+ // Append the media in small chunks.
size_t appended_bytes = 0;
const size_t chunk_size = 1024;
while (appended_bytes < buffer->data_size()) {
@@ -3264,7 +3235,7 @@ TEST_P(ChunkDemuxerTest, EmitBuffersDuringAbort) {
EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
}
-TEST_P(ChunkDemuxerTest, SeekCompleteDuringAbort) {
+TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
EXPECT_CALL(*this, DemuxerOpened());
EXPECT_MEDIA_LOG(FoundStream("audio"));
EXPECT_MEDIA_LOG(CodecName("audio", "aac"));
@@ -3292,18 +3263,14 @@ TEST_P(ChunkDemuxerTest, SeekCompleteDuringAbort) {
EXPECT_CALL(*this, InitSegmentReceivedMock(_));
// This mp2ts file contains buffers which can trigger media logs related to
- // splicing. Related logic occurs more deterministically (and frequently) when
- // buffering ByPts; we append in small chunks to force the same logic when
- // buffering by either Pts or Dts here.
+ // splicing, especially when appending in small chunks.
EXPECT_MEDIA_LOG(TrimmedSpliceOverlap(1655422, 1655419, 23217));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(1957277, 4));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(2514555, 6));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(3071833, 6));
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(3652333, 6));
- // Append the media in small chunks. 1 byte chunks would cause test timeout;
- // 1k chunks appear to be small enough to let ByDts meet the logging
- // expectations of the more deterministic ByPts logic, simplifying this test.
+ // Append the media in small chunks.
size_t appended_bytes = 0;
const size_t chunk_size = 1024;
while (appended_bytes < buffer->data_size()) {
@@ -3332,7 +3299,7 @@ TEST_P(ChunkDemuxerTest, SeekCompleteDuringAbort) {
#endif
#endif
-TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
+TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
const uint8_t kBuffer[] = {
// CLUSTER (size = 10)
0x1F, 0x43, 0xB6, 0x75, 0x8A,
@@ -3411,7 +3378,7 @@ TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
}
}
-TEST_P(ChunkDemuxerTest, DurationChange) {
+TEST_F(ChunkDemuxerTest, DurationChange) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
const int kStreamDuration = kDefaultDuration().InMilliseconds();
@@ -3447,7 +3414,7 @@ TEST_P(ChunkDemuxerTest, DurationChange) {
CheckExpectedRanges("{ [201191,201290) }");
}
-TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
+TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
EXPECT_CALL(host_, SetDuration(
@@ -3456,7 +3423,7 @@ TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
ASSERT_TRUE(AppendCluster(GenerateCluster(0, 4)));
}
-TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
+TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(kDefaultFirstCluster()));
@@ -3466,12 +3433,12 @@ TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
MarkEndOfStream(PIPELINE_OK);
}
-TEST_P(ChunkDemuxerTest, ZeroLengthAppend) {
+TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendData(NULL, 0));
}
-TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
+TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
@@ -3489,7 +3456,7 @@ TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
// Test receiving a Shutdown() call before we get an Initialize()
// call. This can happen if video element gets destroyed before
// the pipeline has a chance to initialize the demuxer.
-TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
+TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
demuxer_->Shutdown();
demuxer_->Initialize(&host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN));
base::RunLoop().RunUntilIdle();
@@ -3497,7 +3464,7 @@ TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
// Verifies that signaling end of stream while stalled at a gap
// boundary does not trigger end of stream buffers to be returned.
-TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
+TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(0, 10));
@@ -3560,7 +3527,7 @@ TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
EXPECT_TRUE(video_read_done);
}
-TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
+TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Cancel preroll.
@@ -3574,7 +3541,7 @@ TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
ASSERT_TRUE(AppendCluster(seek_time.InMilliseconds(), 10));
}
-TEST_P(ChunkDemuxerTest, SetMemoryLimitType) {
+TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Set different memory limits for audio and video.
@@ -3614,7 +3581,7 @@ TEST_P(ChunkDemuxerTest, SetMemoryLimitType) {
CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekForward) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekForward) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
// Append some data at position 1000ms
@@ -3633,7 +3600,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekForward) {
CheckExpectedRanges("{ [1115,1230) [2000,2115) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekBack) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekBack) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
// Append some data at position 1000ms
@@ -3653,7 +3620,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekBack) {
CheckExpectedRanges("{ [0,115) [1115,1230) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekForward) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekForward) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
// Append some data at position 1000ms then at 2000ms
@@ -3673,7 +3640,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekForward) {
CheckExpectedRanges("{ [2069,2115) [3000,3115) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween1) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween1) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
// Append some data at position 1000ms then at 2000ms
@@ -3699,7 +3666,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween1) {
CheckExpectedRanges("{ [1500,1615) [2069,2115) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween2) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween2) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
@@ -3721,7 +3688,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween2) {
CheckExpectedRanges("{ [1000,1115) [1500,1615) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekBack) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekBack) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
// Append some data at position 1000ms then at 2000ms
@@ -3741,7 +3708,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekBack) {
CheckExpectedRanges("{ [0,115) [2069,2115) }");
}
-TEST_P(ChunkDemuxerTest, GCDuringSeek) {
+TEST_F(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 5 * block_size_);
@@ -3786,7 +3753,7 @@ TEST_P(ChunkDemuxerTest, GCDuringSeek) {
CheckExpectedRanges("{ [500,615) [700,815) }");
}
-TEST_P(ChunkDemuxerTest, GCKeepPlayhead) {
+TEST_F(ChunkDemuxerTest, GCKeepPlayhead) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 5 * block_size_);
@@ -3825,7 +3792,7 @@ TEST_P(ChunkDemuxerTest, GCKeepPlayhead) {
CheckExpectedRanges("{ [115,230) }");
}
-TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
+TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
DemuxerStream* stream = GetStream(DemuxerStream::VIDEO);
@@ -3861,7 +3828,7 @@ TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
CheckExpectedRanges("{ [120,270) [420,630) }");
}
-TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
+TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
DemuxerStream* stream = GetStream(DemuxerStream::AUDIO);
@@ -3904,7 +3871,7 @@ TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
CheckExpectedRanges("{ [50,280) [360,650) }");
}
-TEST_P(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
// Set the append window to [10,20).
@@ -3925,7 +3892,7 @@ TEST_P(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
CheckExpectedRanges("{ [10,20) }");
}
-TEST_P(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
+TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -3958,7 +3925,7 @@ TEST_P(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
}
-TEST_P(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
+TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_,
@@ -4006,7 +3973,7 @@ TEST_P(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
CheckExpectedBuffers(stream, "2768K 2789K 2811K 2832K");
}
-TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
+TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_MEDIA_LOG(StreamParsingFailed());
EXPECT_CALL(host_, OnDemuxerError(CHUNK_DEMUXER_ERROR_APPEND_FAILED));
@@ -4015,7 +3982,7 @@ TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
demuxer_->StartWaitingForSeek(seek_time);
}
-TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
+TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO);
@@ -4046,7 +4013,7 @@ TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
}
-TEST_P(ChunkDemuxerTest, Remove_StartAtDuration) {
+TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO);
@@ -4076,7 +4043,7 @@ TEST_P(ChunkDemuxerTest, Remove_StartAtDuration) {
// Verifies that a Seek() will complete without text cues for
// the seek point and will return cues after the seek position
// when they are eventually appended.
-TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
+TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
DemuxerStream* audio_stream = GetStream(DemuxerStream::AUDIO);
@@ -4116,7 +4083,7 @@ TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
CheckExpectedBuffers(video_stream, "180 210");
}
-TEST_P(ChunkDemuxerTest, ClusterWithUnknownSize) {
+TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(GenerateCluster(0, 0, 4, true)));
@@ -4127,7 +4094,7 @@ TEST_P(ChunkDemuxerTest, ClusterWithUnknownSize) {
CheckExpectedRanges("{ [0,115) }");
}
-TEST_P(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
+TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
// Add two clusters separated by Cues in a single Append() call.
@@ -4141,7 +4108,7 @@ TEST_P(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
CheckExpectedRanges("{ [0,115) }");
}
-TEST_P(ChunkDemuxerTest, CuesBetweenClusters) {
+TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(AppendCluster(GenerateCluster(0, 0, 4)));
@@ -4150,7 +4117,7 @@ TEST_P(ChunkDemuxerTest, CuesBetweenClusters) {
CheckExpectedRanges("{ [0,115) }");
}
-TEST_P(ChunkDemuxerTest, EvictCodedFramesTest) {
+TEST_F(ChunkDemuxerTest, EvictCodedFramesTest) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
demuxer_->SetMemoryLimitsForTest(DemuxerStream::AUDIO, 10 * block_size_);
demuxer_->SetMemoryLimitsForTest(DemuxerStream::VIDEO, 15 * block_size_);
@@ -4200,38 +4167,38 @@ TEST_P(ChunkDemuxerTest, EvictCodedFramesTest) {
CheckExpectedBuffers(video_stream, "60K 70 80K 90 100K 110 120K 130 140K");
}
-TEST_P(ChunkDemuxerTest, SegmentMissingAudioFrame_AudioOnly) {
+TEST_F(ChunkDemuxerTest, SegmentMissingAudioFrame_AudioOnly) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
EXPECT_MEDIA_LOG(SegmentMissingFrames("2"));
ASSERT_TRUE(AppendCluster(GenerateEmptyCluster(0)));
}
-TEST_P(ChunkDemuxerTest, SegmentMissingVideoFrame_VideoOnly) {
+TEST_F(ChunkDemuxerTest, SegmentMissingVideoFrame_VideoOnly) {
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
EXPECT_MEDIA_LOG(SegmentMissingFrames("1"));
ASSERT_TRUE(AppendCluster(GenerateEmptyCluster(0)));
}
-TEST_P(ChunkDemuxerTest, SegmentMissingAudioFrame_AudioVideo) {
+TEST_F(ChunkDemuxerTest, SegmentMissingAudioFrame_AudioVideo) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_MEDIA_LOG(SegmentMissingFrames("2"));
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 10);
}
-TEST_P(ChunkDemuxerTest, SegmentMissingVideoFrame_AudioVideo) {
+TEST_F(ChunkDemuxerTest, SegmentMissingVideoFrame_AudioVideo) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_MEDIA_LOG(SegmentMissingFrames("1"));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
}
-TEST_P(ChunkDemuxerTest, SegmentMissingAudioVideoFrames) {
+TEST_F(ChunkDemuxerTest, SegmentMissingAudioVideoFrames) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_MEDIA_LOG(SegmentMissingFrames("1"));
EXPECT_MEDIA_LOG(SegmentMissingFrames("2"));
ASSERT_TRUE(AppendCluster(GenerateEmptyCluster(0)));
}
-TEST_P(ChunkDemuxerTest, RelaxedKeyframe_FirstSegmentMissingKeyframe) {
+TEST_F(ChunkDemuxerTest, RelaxedKeyframe_FirstSegmentMissingKeyframe) {
// Append V:[n n n][n n K]
// Expect V: [K]
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
@@ -4244,7 +4211,7 @@ TEST_P(ChunkDemuxerTest, RelaxedKeyframe_FirstSegmentMissingKeyframe) {
CheckExpectedBuffers(video_stream, "50K");
}
-TEST_P(ChunkDemuxerTest, RelaxedKeyframe_SecondSegmentMissingKeyframe) {
+TEST_F(ChunkDemuxerTest, RelaxedKeyframe_SecondSegmentMissingKeyframe) {
// Append V:[K n n][n n n]
// Expect V:[K n n][n n n]
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
@@ -4257,7 +4224,7 @@ TEST_P(ChunkDemuxerTest, RelaxedKeyframe_SecondSegmentMissingKeyframe) {
CheckExpectedBuffers(video_stream, "0K 10 20 30 40 50");
}
-TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_1) {
+TEST_F(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_1) {
// Append V:[K n n]
// Remove *****
// Append V: [n n n][n K n]
@@ -4275,7 +4242,7 @@ TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_1) {
CheckExpectedBuffers(video_stream, "70K 80");
}
-TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_2) {
+TEST_F(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_2) {
// Append V:[K n n][n n n][n K n]
// Remove *
// Expect: [K n]
@@ -4292,7 +4259,7 @@ TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_2) {
CheckExpectedBuffers(video_stream, "70K 80");
}
-TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_3) {
+TEST_F(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_3) {
// Append V:[K n n][n n n][n K n]
// Remove *
// Expect: [K n n..n n] [K n]
@@ -4311,7 +4278,7 @@ TEST_P(ChunkDemuxerTest, RelaxedKeyframe_RemoveInterruptsCodedFrameGroup_3) {
CheckExpectedBuffers(video_stream, "70K 80");
}
-TEST_P(ChunkDemuxerTest,
+TEST_F(ChunkDemuxerTest,
RelaxedKeyframe_RemoveInterruptsMuxedCodedFrameGroup_1) {
// Append muxed:
// A:[K K K]
@@ -4342,7 +4309,7 @@ TEST_P(ChunkDemuxerTest,
CheckExpectedBuffers(video_stream, "70K 80");
}
-TEST_P(ChunkDemuxerTest,
+TEST_F(ChunkDemuxerTest,
RelaxedKeyframe_RemoveInterruptsMuxedCodedFrameGroup_2) {
// Append muxed:
// A:[K K K]
@@ -4377,7 +4344,7 @@ TEST_P(ChunkDemuxerTest,
CheckExpectedBuffers(video_stream, "70K 80");
}
-TEST_P(ChunkDemuxerTest,
+TEST_F(ChunkDemuxerTest,
RelaxedKeyframe_RemoveInterruptsMuxedCodedFrameGroup_3) {
// Append muxed:
// A:[K K K
@@ -4484,7 +4451,7 @@ void DisableAndEnableDemuxerTracks(
}
}
-TEST_P(ChunkDemuxerTest, StreamStatusNotifications) {
+TEST_F(ChunkDemuxerTest, StreamStatusNotifications) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ChunkDemuxerStream* audio_stream =
static_cast<ChunkDemuxerStream*>(GetStream(DemuxerStream::AUDIO));
@@ -4507,7 +4474,7 @@ TEST_P(ChunkDemuxerTest, StreamStatusNotifications) {
EXPECT_TRUE(read_done);
}
-TEST_P(ChunkDemuxerTest, MultipleIds) {
+TEST_F(ChunkDemuxerTest, MultipleIds) {
CreateNewDemuxer();
EXPECT_CALL(*this, DemuxerOpened());
EXPECT_CALL(host_, SetDuration(_)).Times(2);
@@ -4533,7 +4500,7 @@ TEST_P(ChunkDemuxerTest, MultipleIds) {
CheckExpectedRanges(kId2, "{ [0,10007) }");
}
-TEST_P(ChunkDemuxerTest, CompleteInitAfterIdRemoved) {
+TEST_F(ChunkDemuxerTest, CompleteInitAfterIdRemoved) {
CreateNewDemuxer();
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
@@ -4556,7 +4523,7 @@ TEST_P(ChunkDemuxerTest, CompleteInitAfterIdRemoved) {
AppendSingleStreamCluster(kId1, kVideoTrackNum, "0K 30 60 90");
}
-TEST_P(ChunkDemuxerTest, RemovingIdMustRemoveStreams) {
+TEST_F(ChunkDemuxerTest, RemovingIdMustRemoveStreams) {
CreateNewDemuxer();
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
@@ -4582,7 +4549,7 @@ TEST_P(ChunkDemuxerTest, RemovingIdMustRemoveStreams) {
EXPECT_EQ(nullptr, GetStream(DemuxerStream::VIDEO));
}
-TEST_P(ChunkDemuxerTest, SequenceModeMuxedAppendShouldWarn) {
+TEST_F(ChunkDemuxerTest, SequenceModeMuxedAppendShouldWarn) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
demuxer_->SetSequenceMode(kSourceId, true);
@@ -4594,7 +4561,7 @@ TEST_P(ChunkDemuxerTest, SequenceModeMuxedAppendShouldWarn) {
MuxedStreamInfo(kVideoTrackNum, "0D10K"));
}
-TEST_P(ChunkDemuxerTest, SequenceModeSingleTrackNoWarning) {
+TEST_F(ChunkDemuxerTest, SequenceModeSingleTrackNoWarning) {
std::string audio_id = "audio1";
std::string video_id = "video1";
@@ -4615,7 +4582,7 @@ TEST_P(ChunkDemuxerTest, SequenceModeSingleTrackNoWarning) {
video_id, GenerateSingleStreamCluster(0, 33, kVideoTrackNum, 33)));
}
-TEST_P(ChunkDemuxerTest, Mp4Vp9CodecSupport) {
+TEST_F(ChunkDemuxerTest, Mp4Vp9CodecSupport) {
demuxer_->Initialize(
&host_, base::BindRepeating(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
@@ -4623,7 +4590,7 @@ TEST_P(ChunkDemuxerTest, Mp4Vp9CodecSupport) {
EXPECT_EQ(AddId("source_id", "video/mp4", "vp09.00.10.08"), expected);
}
-TEST_P(ChunkDemuxerTest, UnmarkEOSRetainsParseErrorState_BeforeInit) {
+TEST_F(ChunkDemuxerTest, UnmarkEOSRetainsParseErrorState_BeforeInit) {
InSequence s;
// Trigger a (fatal) parse error prior to successfully reaching source init.
EXPECT_CALL(*this, DemuxerOpened());
@@ -4647,7 +4614,7 @@ TEST_P(ChunkDemuxerTest, UnmarkEOSRetainsParseErrorState_BeforeInit) {
ASSERT_FALSE(AppendInitSegment(HAS_AUDIO | HAS_VIDEO));
}
-TEST_P(ChunkDemuxerTest, UnmarkEOSRetainsParseErrorState_AfterInit) {
+TEST_F(ChunkDemuxerTest, UnmarkEOSRetainsParseErrorState_AfterInit) {
InSequence s;
// Trigger a (fatal) parse error after successfully reaching source init.
InitDemuxer(HAS_AUDIO | HAS_VIDEO);
@@ -4674,7 +4641,7 @@ struct ZeroLengthFrameCase {
};
// Test that 0-length audio and video coded frames are dropped gracefully.
-TEST_P(ChunkDemuxerTest, ZeroLengthFramesDropped) {
+TEST_F(ChunkDemuxerTest, ZeroLengthFramesDropped) {
struct ZeroLengthFrameCase cases[] = {
{DemuxerStream::AUDIO, HAS_AUDIO, kAudioTrackNum},
{DemuxerStream::VIDEO, HAS_VIDEO, kVideoTrackNum}};
@@ -4725,13 +4692,4 @@ TEST_P(ChunkDemuxerTest, ZeroLengthFramesDropped) {
// same codec type in a single SourceBufferState, when WebM parser supports
// multiple tracks. crbug.com/646900
-// Though most of these ChunkDemuxerTests use WebM (where PTS==DTS), we still
-// need to ensure that both versions of the buffering API work.
-INSTANTIATE_TEST_SUITE_P(LegacyByDts,
- ChunkDemuxerTest,
- Values(BufferingApi::kLegacyByDts));
-INSTANTIATE_TEST_SUITE_P(NewByPts,
- ChunkDemuxerTest,
- Values(BufferingApi::kNewByPts));
-
} // namespace media
diff --git a/chromium/media/filters/context_3d.h b/chromium/media/filters/context_3d.h
deleted file mode 100644
index ccb0c7c962a..00000000000
--- a/chromium/media/filters/context_3d.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_CONTEXT_3D_H_
-#define MEDIA_FILTERS_CONTEXT_3D_H_
-
-class GrContext;
-
-namespace gpu {
-namespace gles2 {
-class GLES2Interface;
-}
-}
-
-namespace media {
-
-// This struct can be used to make media use gpu::gles2::GLES2Interface and
-// GrContext.
-// Usage:
-// gpu::gles2::GLES2Interface* gl = ...;
-// GrContext* gr_context = ...;
-// Context3D context_3d(gl, gr_context);
-
-struct Context3D {
- Context3D() : gl(nullptr), gr_context(nullptr) {}
- Context3D(gpu::gles2::GLES2Interface* gl_, class GrContext* gr_context_)
- : gl(gl_), gr_context(gr_context_) {}
-
- gpu::gles2::GLES2Interface* gl;
- class GrContext* gr_context;
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_CONTEXT_3D_H_
diff --git a/chromium/media/filters/dav1d_video_decoder.cc b/chromium/media/filters/dav1d_video_decoder.cc
index a93e8a38ebb..27a40ddeeea 100644
--- a/chromium/media/filters/dav1d_video_decoder.cc
+++ b/chromium/media/filters/dav1d_video_decoder.cc
@@ -16,6 +16,7 @@
#include "base/threading/sequenced_task_runner_handle.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/limits.h"
#include "media/base/media_log.h"
#include "media/base/video_util.h"
@@ -25,13 +26,27 @@ extern "C" {
namespace media {
-// Returns the number of threads.
-static int GetDecoderThreadCount(const VideoDecoderConfig& config) {
- // For AV1 decode when using the default thread count, increase the number
- // of decode threads to equal the maximum number of tiles possible for
- // higher resolution streams.
- return VideoDecoder::GetRecommendedThreadCount(config.coded_size().width() /
- 256);
+static void GetDecoderThreadCounts(const int coded_height,
+ int* tile_threads,
+ int* frame_threads) {
+ // Tile thread counts based on currently available content. Recommended by
+ // YouTube, while frame thread values fit within limits::kMaxVideoThreads.
+ if (coded_height >= 700) {
+ *tile_threads =
+ 4; // Current 720p content is encoded in 5 tiles and 1080p content with
+ // 8 tiles, but we'll exceed limits::kMaxVideoThreads with 5+ tile
+ // threads with 3 frame threads (5 * 3 + 3 = 18 threads vs 16 max).
+ //
+ // Since 720p playback isn't smooth without 3 frame threads, we've
+ // chosen a slightly lower tile thread count.
+ *frame_threads = 3;
+ } else if (coded_height >= 300) {
+ *tile_threads = 3;
+ *frame_threads = 2;
+ } else {
+ *tile_threads = 2;
+ *frame_threads = 2;
+ }
}
static VideoPixelFormat Dav1dImgFmtToVideoPixelFormat(
@@ -132,15 +147,16 @@ std::string Dav1dVideoDecoder::GetDisplayName() const {
void Dav1dVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* /* cdm_context */,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(config.IsValidConfig());
- InitCB bound_init_cb = bind_callbacks_ ? BindToCurrentLoop(init_cb) : init_cb;
+ InitCB bound_init_cb = bind_callbacks_ ? BindToCurrentLoop(std::move(init_cb))
+ : std::move(init_cb);
if (config.is_encrypted() || config.codec() != kCodecAV1) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -149,61 +165,102 @@ void Dav1dVideoDecoder::Initialize(const VideoDecoderConfig& config,
Dav1dSettings s;
dav1d_default_settings(&s);
- s.n_tile_threads = GetDecoderThreadCount(config);
- // Use only 1 frame thread in low delay mode, otherwise we'll require at least
- // two buffers before the first frame can be output.
- s.n_frame_threads = low_delay ? 1 : 2;
+ // Compute the ideal thread count values. We'll then clamp these based on the
+ // maximum number of recommended threads (using number of processors, etc).
+ //
+ // dav1d will spawn |n_tile_threads| per frame thread.
+ GetDecoderThreadCounts(config.coded_size().height(), &s.n_tile_threads,
+ &s.n_frame_threads);
+
+ const int max_threads = VideoDecoder::GetRecommendedThreadCount(
+ s.n_frame_threads * (s.n_tile_threads + 1));
+
+ // First clamp tile threads to the allowed maximum. We prefer tile threads
+ // over frame threads since dav1d folk indicate they are more efficient. In an
+ // ideal world this would be auto-detected by dav1d from the content.
+ //
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1536783#c0
+ s.n_tile_threads = std::min(max_threads, s.n_tile_threads);
+
+ // Now clamp frame threads based on the number of total threads that would be
+ // created with the given |n_tile_threads| value. Note: A thread count of 1
+ // generates no additional threads since the calling thread (this thread) is
+ // counted as a thread.
+ //
+ // We only want 1 frame thread in low delay mode, since otherwise we'll
+ // require at least two buffers before the first frame can be output.
+ //
+ // If a system has the cores for it, we'll end up using the following:
+ // <300p: 2 tile threads, 2 frame threads = 2 * 2 + 2 = 6 total threads.
+ // <700p: 3 tile threads, 2 frame threads = 3 * 2 + 2 = 8 total threads.
+ //
+ // For higher resolutions we hit limits::kMaxVideoThreads (16):
+ // >700p: 4 tile threads, 3 frame threads = 4 * 3 + 3 = 15 total threads.
+ //
+ // Due to the (surprising) performance issues which occurred when setting
+ // |n_frame_threads|=1 (https://crbug.com/957511) the minimum total number of
+ // threads is 6 (two tile and two frame) regardless of core count. The maximum
+ // is min(2 * base::SysInfo::NumberOfProcessors(), limits::kMaxVideoThreads).
+ if (low_delay)
+ s.n_frame_threads = 1;
+ else if (s.n_frame_threads * (s.n_tile_threads + 1) > max_threads)
+ s.n_frame_threads = std::max(2, max_threads / (s.n_tile_threads + 1));
// Route dav1d internal logs through Chrome's DLOG system.
s.logger = {nullptr, &LogDav1dMessage};
+ // Set a maximum frame size limit to avoid OOM'ing fuzzers.
+ s.frame_size_limit = limits::kMaxCanvas;
+
if (dav1d_open(&dav1d_decoder_, &s) < 0) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
config_ = config;
state_ = DecoderState::kNormal;
output_cb_ = output_cb;
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
}
void Dav1dVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer);
DCHECK(decode_cb);
DCHECK_NE(state_, DecoderState::kUninitialized)
<< "Called Decode() before successful Initialize()";
- DecodeCB bound_decode_cb =
- bind_callbacks_ ? BindToCurrentLoop(decode_cb) : decode_cb;
+ DecodeCB bound_decode_cb = bind_callbacks_
+ ? BindToCurrentLoop(std::move(decode_cb))
+ : std::move(decode_cb);
if (state_ == DecoderState::kError) {
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (!DecodeBuffer(std::move(buffer))) {
state_ = DecoderState::kError;
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
// VideoDecoderShim expects |decode_cb| call after |output_cb_|.
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
}
-void Dav1dVideoDecoder::Reset(const base::RepeatingClosure& reset_cb) {
+void Dav1dVideoDecoder::Reset(base::OnceClosure reset_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
state_ = DecoderState::kNormal;
dav1d_flush(dav1d_decoder_);
if (bind_callbacks_)
- base::SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE, reset_cb);
+ base::SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ std::move(reset_cb));
else
- reset_cb.Run();
+ std::move(reset_cb).Run();
}
void Dav1dVideoDecoder::Detach() {
diff --git a/chromium/media/filters/dav1d_video_decoder.h b/chromium/media/filters/dav1d_video_decoder.h
index d400375c817..fe82a8e1f58 100644
--- a/chromium/media/filters/dav1d_video_decoder.h
+++ b/chromium/media/filters/dav1d_video_decoder.h
@@ -33,12 +33,11 @@ class MEDIA_EXPORT Dav1dVideoDecoder : public OffloadableVideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::RepeatingClosure& reset_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
// OffloadableVideoDecoder implementation.
void Detach() override;
diff --git a/chromium/media/filters/dav1d_video_decoder_unittest.cc b/chromium/media/filters/dav1d_video_decoder_unittest.cc
index 5b2d43415b4..21f1e916552 100644
--- a/chromium/media/filters/dav1d_video_decoder_unittest.cc
+++ b/chromium/media/filters/dav1d_video_decoder_unittest.cc
@@ -163,9 +163,9 @@ class Dav1dVideoDecoderTest : public testing::Test {
return status;
}
- void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- output_frames_.push_back(frame);
+ output_frames_.push_back(std::move(frame));
}
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
diff --git a/chromium/media/filters/decoder_selector_unittest.cc b/chromium/media/filters/decoder_selector_unittest.cc
index e10585da935..bd613acb29f 100644
--- a/chromium/media/filters/decoder_selector_unittest.cc
+++ b/chromium/media/filters/decoder_selector_unittest.cc
@@ -89,13 +89,14 @@ class AudioDecoderSelectorTestParam {
// Decoder::Initialize() takes different parameters depending on the type.
static void ExpectInitialize(MockDecoder* decoder,
DecoderCapability capability) {
- EXPECT_CALL(*decoder, Initialize(_, _, _, _, _))
- .WillRepeatedly(
- [capability](const AudioDecoderConfig& config, CdmContext*,
- const AudioDecoder::InitCB& init_cb,
- const AudioDecoder::OutputCB&, const WaitingCB&) {
- init_cb.Run(IsConfigSupported(capability, config.is_encrypted()));
- });
+ EXPECT_CALL(*decoder, Initialize_(_, _, _, _, _))
+ .WillRepeatedly([capability](const AudioDecoderConfig& config,
+ CdmContext*, AudioDecoder::InitCB& init_cb,
+ const AudioDecoder::OutputCB&,
+ const WaitingCB&) {
+ std::move(init_cb).Run(
+ IsConfigSupported(capability, config.is_encrypted()));
+ });
}
};
@@ -125,12 +126,13 @@ class VideoDecoderSelectorTestParam {
static void ExpectInitialize(MockDecoder* decoder,
DecoderCapability capability) {
- EXPECT_CALL(*decoder, Initialize(_, _, _, _, _, _))
+ EXPECT_CALL(*decoder, Initialize_(_, _, _, _, _, _))
.WillRepeatedly(
[capability](const VideoDecoderConfig& config, bool low_delay,
- CdmContext*, const VideoDecoder::InitCB& init_cb,
+ CdmContext*, VideoDecoder::InitCB& init_cb,
const VideoDecoder::OutputCB&, const WaitingCB&) {
- init_cb.Run(IsConfigSupported(capability, config.is_encrypted()));
+ std::move(init_cb).Run(
+ IsConfigSupported(capability, config.is_encrypted()));
});
}
};
@@ -162,7 +164,7 @@ class DecoderSelectorTest : public ::testing::Test {
demuxer_stream_(TypeParam::kStreamType) {}
void OnWaiting(WaitingReason reason) { NOTREACHED(); }
- void OnOutput(const scoped_refptr<Output>& output) { NOTREACHED(); }
+ void OnOutput(scoped_refptr<Output> output) { NOTREACHED(); }
MOCK_METHOD2_T(OnDecoderSelected,
void(std::string, std::unique_ptr<DecryptingDemuxerStream>));
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
index 325a8b60e5e..18804c6b2f7 100644
--- a/chromium/media/filters/decoder_stream.cc
+++ b/chromium/media/filters/decoder_stream.cc
@@ -421,13 +421,12 @@ void DecoderStream<StreamType>::OnDecoderSelected(
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::SatisfyRead(
- Status status,
- const scoped_refptr<Output>& output) {
+void DecoderStream<StreamType>::SatisfyRead(Status status,
+ scoped_refptr<Output> output) {
DCHECK(read_cb_);
TRACE_EVENT_ASYNC_END1("media", GetReadTraceString<StreamType>(), this,
"status", GetStatusString<StreamType>(status));
- std::move(read_cb_).Run(status, output);
+ std::move(read_cb_).Run(status, std::move(output));
}
template <DemuxerStream::Type StreamType>
@@ -590,7 +589,7 @@ void DecoderStream<StreamType>::OnDecodeDone(
template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnDecodeOutputReady(
- const scoped_refptr<Output>& output) {
+ scoped_refptr<Output> output) {
FUNCTION_DVLOG(3) << ": " << output->timestamp().InMilliseconds() << " ms";
DCHECK(output);
DCHECK(state_ == STATE_NORMAL || state_ == STATE_FLUSHING_DECODER ||
@@ -618,12 +617,12 @@ void DecoderStream<StreamType>::OnDecodeOutputReady(
}
// If the frame should be dropped, exit early and decode another frame.
- if (traits_->OnDecodeDone(output) == PostDecodeAction::DROP)
+ if (traits_->OnDecodeDone(output.get()) == PostDecodeAction::DROP)
return;
if (prepare_cb_ && output->timestamp() + AverageDuration() >=
skip_prepare_until_timestamp_) {
- unprepared_outputs_.push_back(output);
+ unprepared_outputs_.push_back(std::move(output));
MaybePrepareAnotherOutput();
return;
}
@@ -632,12 +631,12 @@ void DecoderStream<StreamType>::OnDecodeOutputReady(
// If |ready_outputs_| was non-empty, the read would have already been
// satisifed by Read().
DCHECK(ready_outputs_.empty());
- SatisfyRead(OK, output);
+ SatisfyRead(OK, std::move(output));
return;
}
// Store decoded output.
- ready_outputs_.push_back(output);
+ ready_outputs_.push_back(std::move(output));
}
template <DemuxerStream::Type StreamType>
@@ -971,7 +970,7 @@ void DecoderStream<StreamType>::MaybePrepareAnotherOutput() {
template <DemuxerStream::Type StreamType>
void DecoderStream<StreamType>::OnPreparedOutputReady(
- const scoped_refptr<Output>& output) {
+ scoped_refptr<Output> output) {
FUNCTION_DVLOG(2);
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -988,9 +987,9 @@ void DecoderStream<StreamType>::OnPreparedOutputReady(
CompletePrepare(output.get());
unprepared_outputs_.pop_front();
if (!read_cb_)
- ready_outputs_.emplace_back(output);
+ ready_outputs_.emplace_back(std::move(output));
else
- SatisfyRead(OK, output);
+ SatisfyRead(OK, std::move(output));
MaybePrepareAnotherOutput();
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
index f2f128b9790..7f224532411 100644
--- a/chromium/media/filters/decoder_stream.h
+++ b/chromium/media/filters/decoder_stream.h
@@ -60,7 +60,7 @@ class MEDIA_EXPORT DecoderStream {
using InitCB = base::OnceCallback<void(bool success)>;
// Indicates completion of a DecoderStream read.
- using ReadCB = base::OnceCallback<void(Status, const scoped_refptr<Output>&)>;
+ using ReadCB = base::OnceCallback<void(Status, scoped_refptr<Output>)>;
DecoderStream(std::unique_ptr<DecoderStreamTraits<StreamType>> traits,
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
@@ -120,9 +120,9 @@ class MEDIA_EXPORT DecoderStream {
// prepared at any one time; this alleviates resource usage issues incurred by
// the preparation process when a decoder has a burst of outputs after on
// Decode(). For more context on why, see https://crbug.com/820167.
- using OutputReadyCB = base::OnceCallback<void(const scoped_refptr<Output>&)>;
- using PrepareCB = base::RepeatingCallback<void(const scoped_refptr<Output>&,
- OutputReadyCB)>;
+ using OutputReadyCB = base::OnceCallback<void(scoped_refptr<Output>)>;
+ using PrepareCB =
+ base::RepeatingCallback<void(scoped_refptr<Output>, OutputReadyCB)>;
void SetPrepareCB(PrepareCB prepare_cb);
// Indicates that we won't need to prepare outputs before |start_timestamp|,
@@ -175,7 +175,7 @@ class MEDIA_EXPORT DecoderStream {
std::unique_ptr<DecryptingDemuxerStream> decrypting_demuxer_stream);
// Satisfy pending |read_cb_| with |status| and |output|.
- void SatisfyRead(Status status, const scoped_refptr<Output>& output);
+ void SatisfyRead(Status status, scoped_refptr<Output> output);
// Decodes |buffer| and returns the result via OnDecodeOutputReady().
// Saves |buffer| into |pending_buffers_| if appropriate.
@@ -195,7 +195,7 @@ class MEDIA_EXPORT DecoderStream {
DecodeStatus status);
// Output callback passed to Decoder::Initialize().
- void OnDecodeOutputReady(const scoped_refptr<Output>& output);
+ void OnDecodeOutputReady(scoped_refptr<Output> output);
// Reads a buffer from |stream_| and returns the result via OnBufferReady().
void ReadFromDemuxerStream();
@@ -216,7 +216,7 @@ class MEDIA_EXPORT DecoderStream {
void ClearOutputs();
void MaybePrepareAnotherOutput();
- void OnPreparedOutputReady(const scoped_refptr<Output>& frame);
+ void OnPreparedOutputReady(scoped_refptr<Output> frame);
void CompletePrepare(const Output* output);
std::unique_ptr<DecoderStreamTraits<StreamType>> traits_;
diff --git a/chromium/media/filters/decoder_stream_traits.cc b/chromium/media/filters/decoder_stream_traits.cc
index eca17402179..69886c7ed24 100644
--- a/chromium/media/filters/decoder_stream_traits.cc
+++ b/chromium/media/filters/decoder_stream_traits.cc
@@ -61,7 +61,7 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::InitializeDecoder(
const DecoderConfigType& config,
bool /* low_delay */,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DCHECK(config.IsValidConfig());
@@ -71,7 +71,8 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::InitializeDecoder(
config_ = config;
stats_.audio_decoder_name = decoder->GetDisplayName();
- decoder->Initialize(config, cdm_context, init_cb, output_cb, waiting_cb);
+ decoder->Initialize(config, cdm_context, std::move(init_cb), output_cb,
+ waiting_cb);
}
void DecoderStreamTraits<DemuxerStream::AUDIO>::OnStreamReset(
@@ -89,8 +90,8 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::OnDecode(
}
PostDecodeAction DecoderStreamTraits<DemuxerStream::AUDIO>::OnDecodeDone(
- const scoped_refptr<OutputType>& buffer) {
- audio_ts_validator_->RecordOutputDuration(buffer);
+ OutputType* buffer) {
+ audio_ts_validator_->RecordOutputDuration(*buffer);
return PostDecodeAction::DELIVER;
}
@@ -153,14 +154,14 @@ void DecoderStreamTraits<DemuxerStream::VIDEO>::InitializeDecoder(
const DecoderConfigType& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DCHECK(config.IsValidConfig());
stats_.video_decoder_name = decoder->GetDisplayName();
DVLOG(2) << stats_.video_decoder_name;
- decoder->Initialize(config, low_delay, cdm_context, init_cb, output_cb,
- waiting_cb);
+ decoder->Initialize(config, low_delay, cdm_context, std::move(init_cb),
+ output_cb, waiting_cb);
}
void DecoderStreamTraits<DemuxerStream::VIDEO>::OnStreamReset(
@@ -198,7 +199,12 @@ void DecoderStreamTraits<DemuxerStream::VIDEO>::OnDecode(
}
PostDecodeAction DecoderStreamTraits<DemuxerStream::VIDEO>::OnDecodeDone(
- const scoped_refptr<OutputType>& buffer) {
+ OutputType* buffer) {
+ // Add a timestamp here (after decoding completed) to enable buffering delay
+ // measurements down the line.
+ buffer->metadata()->SetTimeTicks(media::VideoFrameMetadata::DECODE_TIME,
+ base::TimeTicks::Now());
+
auto it = frame_metadata_.find(buffer->timestamp());
// If the frame isn't in |frame_metadata_| it probably was erased below on a
@@ -218,12 +224,6 @@ PostDecodeAction DecoderStreamTraits<DemuxerStream::VIDEO>::OnDecodeDone(
it->second.duration);
}
- // Add a timestamp here (after decoding completed) to enable buffering delay
- // measurements down the line.
- buffer->metadata()->SetTimeTicks(
- media::VideoFrameMetadata::DECODE_COMPLETE_TIMESTAMP,
- base::TimeTicks::Now());
-
// We erase from the beginning onward to our target frame since frames should
// be returned in presentation order. It's possible to accumulate entries in
// this queue if playback begins at a non-keyframe; those frames may never be
diff --git a/chromium/media/filters/decoder_stream_traits.h b/chromium/media/filters/decoder_stream_traits.h
index f97d02c38cd..4c5471721e9 100644
--- a/chromium/media/filters/decoder_stream_traits.h
+++ b/chromium/media/filters/decoder_stream_traits.h
@@ -50,12 +50,12 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
const DecoderConfigType& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb);
DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
void OnDecode(const DecoderBuffer& buffer);
- PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
+ PostDecodeAction OnDecodeDone(OutputType* buffer);
void OnStreamReset(DemuxerStream* stream);
private:
@@ -94,11 +94,11 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::VIDEO> {
const DecoderConfigType& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb);
void OnDecode(const DecoderBuffer& buffer);
- PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
+ PostDecodeAction OnDecodeDone(OutputType* buffer);
void OnStreamReset(DemuxerStream* stream);
private:
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index ed41d539c67..17b16f9e931 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -45,7 +45,7 @@ std::string DecryptingAudioDecoder::GetDisplayName() const {
void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(2) << "Initialize()";
@@ -53,7 +53,7 @@ void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
DCHECK(!decode_cb_);
DCHECK(!reset_cb_);
- init_cb_ = BindToCurrentLoop(init_cb);
+ init_cb_ = BindToCurrentLoop(std::move(init_cb));
if (!cdm_context) {
// Once we have a CDM context, one should always be present.
DCHECK(!support_clear_content_);
@@ -131,7 +131,7 @@ void DecryptingAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DecodePendingBuffer();
}
-void DecryptingAudioDecoder::Reset(const base::Closure& closure) {
+void DecryptingAudioDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << "Reset() - state: " << state_;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle || state_ == kPendingDecode ||
@@ -140,7 +140,7 @@ void DecryptingAudioDecoder::Reset(const base::Closure& closure) {
DCHECK(!init_cb_); // No Reset() during pending initialization.
DCHECK(!reset_cb_);
- reset_cb_ = BindToCurrentLoop(closure);
+ reset_cb_ = BindToCurrentLoop(std::move(closure));
decryptor_->ResetDecoder(Decryptor::kAudio);
diff --git a/chromium/media/filters/decrypting_audio_decoder.h b/chromium/media/filters/decrypting_audio_decoder.h
index 95f8d234295..4cb6be8c263 100644
--- a/chromium/media/filters/decrypting_audio_decoder.h
+++ b/chromium/media/filters/decrypting_audio_decoder.h
@@ -43,12 +43,12 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
std::string GetDisplayName() const override;
void Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Reset(base::OnceClosure closure) override;
private:
// For a detailed state diagram please see this link: http://goo.gl/8jAok
@@ -97,7 +97,7 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB decode_cb_;
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
WaitingCB waiting_cb_;
// The current decoder configuration.
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index 069f110c8d8..3364873d4f0 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -237,7 +237,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
base::RunLoop().RunUntilIdle();
}
- MOCK_METHOD1(FrameReady, void(const scoped_refptr<AudioBuffer>&));
+ MOCK_METHOD1(FrameReady, void(scoped_refptr<AudioBuffer>));
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
MOCK_METHOD1(OnWaiting, void(WaitingReason));
diff --git a/chromium/media/filters/decrypting_video_decoder.cc b/chromium/media/filters/decrypting_video_decoder.cc
index 1c0a3a975cf..0e2e37634dd 100644
--- a/chromium/media/filters/decrypting_video_decoder.cc
+++ b/chromium/media/filters/decrypting_video_decoder.cc
@@ -33,7 +33,7 @@ std::string DecryptingVideoDecoder::GetDisplayName() const {
void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(2) << __func__ << ": " << config.AsHumanReadableString();
@@ -46,7 +46,7 @@ void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(!reset_cb_);
DCHECK(config.IsValidConfig());
- init_cb_ = BindToCurrentLoop(init_cb);
+ init_cb_ = BindToCurrentLoop(std::move(init_cb));
if (!cdm_context) {
// Once we have a CDM context, one should always be present.
DCHECK(!support_clear_content_);
@@ -91,7 +91,7 @@ void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
}
void DecryptingVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << "Decode()";
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle || state_ == kDecodeFinished || state_ == kError)
@@ -99,7 +99,7 @@ void DecryptingVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DCHECK(decode_cb);
CHECK(!decode_cb_) << "Overlapping decodes are not supported.";
- decode_cb_ = BindToCurrentLoop(decode_cb);
+ decode_cb_ = BindToCurrentLoop(std::move(decode_cb));
if (state_ == kError) {
std::move(decode_cb_).Run(DecodeStatus::DECODE_ERROR);
@@ -117,7 +117,7 @@ void DecryptingVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DecodePendingBuffer();
}
-void DecryptingVideoDecoder::Reset(const base::Closure& closure) {
+void DecryptingVideoDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << "Reset() - state: " << state_;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(state_ == kIdle || state_ == kPendingDecode ||
@@ -127,7 +127,7 @@ void DecryptingVideoDecoder::Reset(const base::Closure& closure) {
DCHECK(!init_cb_); // No Reset() during pending initialization.
DCHECK(!reset_cb_);
- reset_cb_ = BindToCurrentLoop(closure);
+ reset_cb_ = BindToCurrentLoop(std::move(closure));
decryptor_->ResetDecoder(Decryptor::kVideo);
@@ -220,9 +220,8 @@ void DecryptingVideoDecoder::DecodePendingBuffer() {
&DecryptingVideoDecoder::DeliverFrame, weak_this_)));
}
-void DecryptingVideoDecoder::DeliverFrame(
- Decryptor::Status status,
- const scoped_refptr<VideoFrame>& frame) {
+void DecryptingVideoDecoder::DeliverFrame(Decryptor::Status status,
+ scoped_refptr<VideoFrame> frame) {
DVLOG(3) << "DeliverFrame() - status: " << status;
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, kPendingDecode) << state_;
@@ -301,7 +300,7 @@ void DecryptingVideoDecoder::DeliverFrame(
frame->set_color_space(config_.color_space_info().ToGfxColorSpace());
}
- output_cb_.Run(frame);
+ output_cb_.Run(std::move(frame));
if (scoped_pending_buffer_to_decode->end_of_stream()) {
// Set |pending_buffer_to_decode_| back as we need to keep flushing the
diff --git a/chromium/media/filters/decrypting_video_decoder.h b/chromium/media/filters/decrypting_video_decoder.h
index 03cec553195..9e33c41caf7 100644
--- a/chromium/media/filters/decrypting_video_decoder.h
+++ b/chromium/media/filters/decrypting_video_decoder.h
@@ -41,12 +41,11 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
static const char kDecoderName[];
@@ -70,8 +69,7 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
void DecodePendingBuffer();
// Callback for Decryptor::DecryptAndDecodeVideo().
- void DeliverFrame(Decryptor::Status status,
- const scoped_refptr<VideoFrame>& frame);
+ void DeliverFrame(Decryptor::Status status, scoped_refptr<VideoFrame> frame);
// Callback for the |decryptor_| to notify this object that a new key has been
// added.
@@ -93,7 +91,7 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB decode_cb_;
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
WaitingCB waiting_cb_;
VideoDecoderConfig config_;
diff --git a/chromium/media/filters/decrypting_video_decoder_unittest.cc b/chromium/media/filters/decrypting_video_decoder_unittest.cc
index 4515c82d139..700c56e8cae 100644
--- a/chromium/media/filters/decrypting_video_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_video_decoder_unittest.cc
@@ -214,7 +214,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
base::RunLoop().RunUntilIdle();
}
- MOCK_METHOD1(FrameReady, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(FrameReady, void(scoped_refptr<VideoFrame>));
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
MOCK_METHOD1(OnWaiting, void(WaitingReason));
diff --git a/chromium/media/filters/fake_video_decoder.cc b/chromium/media/filters/fake_video_decoder.cc
index 9081bb77849..23c0b9a3130 100644
--- a/chromium/media/filters/fake_video_decoder.cc
+++ b/chromium/media/filters/fake_video_decoder.cc
@@ -59,7 +59,7 @@ std::string FakeVideoDecoder::GetDisplayName() const {
void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(1) << decoder_name_ << ": " << __func__;
@@ -70,7 +70,7 @@ void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(reset_cb_.IsNull()) << "No reinitialization during pending reset.";
current_config_ = config;
- init_cb_.SetCallback(BindToCurrentLoop(init_cb));
+ init_cb_.SetCallback(BindToCurrentLoop(std::move(init_cb)));
// Don't need BindToCurrentLoop() because |output_cb_| is only called from
// RunDecodeCallback() which is posted from Decode().
@@ -100,7 +100,7 @@ void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
}
void FakeVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(reset_cb_.IsNull());
DCHECK_LE(decoded_frames_.size(),
@@ -110,13 +110,12 @@ void FakeVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DCHECK_NE(state_, STATE_END_OF_STREAM);
int buffer_size = buffer->end_of_stream() ? 0 : buffer->data_size();
- DecodeCB wrapped_decode_cb = base::Bind(&FakeVideoDecoder::OnFrameDecoded,
- weak_factory_.GetWeakPtr(),
- buffer_size,
- BindToCurrentLoop(decode_cb));
+ DecodeCB wrapped_decode_cb = base::BindOnce(
+ &FakeVideoDecoder::OnFrameDecoded, weak_factory_.GetWeakPtr(),
+ buffer_size, BindToCurrentLoop(std::move(decode_cb)));
if (state_ == STATE_ERROR) {
- wrapped_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(wrapped_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
@@ -129,14 +128,14 @@ void FakeVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
decoded_frames_.push_back(video_frame);
}
- RunOrHoldDecode(wrapped_decode_cb);
+ RunOrHoldDecode(std::move(wrapped_decode_cb));
}
-void FakeVideoDecoder::Reset(const base::Closure& closure) {
+void FakeVideoDecoder::Reset(base::OnceClosure closure) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(reset_cb_.IsNull());
- reset_cb_.SetCallback(BindToCurrentLoop(closure));
+ reset_cb_.SetCallback(BindToCurrentLoop(std::move(closure)));
decoded_frames_.clear();
// Defer the reset if a decode is pending.
@@ -184,9 +183,9 @@ void FakeVideoDecoder::SatisfySingleDecode() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!held_decode_callbacks_.empty());
- DecodeCB decode_cb = held_decode_callbacks_.front();
+ DecodeCB decode_cb = std::move(held_decode_callbacks_.front());
held_decode_callbacks_.pop_front();
- RunDecodeCallback(decode_cb);
+ RunDecodeCallback(std::move(decode_cb));
if (!reset_cb_.IsNull() && held_decode_callbacks_.empty())
DoReset();
@@ -203,7 +202,7 @@ void FakeVideoDecoder::SimulateError() {
state_ = STATE_ERROR;
while (!held_decode_callbacks_.empty()) {
- held_decode_callbacks_.front().Run(DecodeStatus::DECODE_ERROR);
+ std::move(held_decode_callbacks_.front()).Run(DecodeStatus::DECODE_ERROR);
held_decode_callbacks_.pop_front();
}
decoded_frames_.clear();
@@ -218,7 +217,7 @@ int FakeVideoDecoder::GetMaxDecodeRequests() const {
}
void FakeVideoDecoder::OnFrameDecoded(int buffer_size,
- const DecodeCB& decode_cb,
+ DecodeCB decode_cb,
DecodeStatus status) {
DCHECK(thread_checker_.CalledOnValidThread());
@@ -226,26 +225,26 @@ void FakeVideoDecoder::OnFrameDecoded(int buffer_size,
total_bytes_decoded_ += buffer_size;
bytes_decoded_cb_.Run(buffer_size);
}
- decode_cb.Run(status);
+ std::move(decode_cb).Run(status);
}
-void FakeVideoDecoder::RunOrHoldDecode(const DecodeCB& decode_cb) {
+void FakeVideoDecoder::RunOrHoldDecode(DecodeCB decode_cb) {
DCHECK(thread_checker_.CalledOnValidThread());
if (hold_decode_) {
- held_decode_callbacks_.push_back(decode_cb);
+ held_decode_callbacks_.push_back(std::move(decode_cb));
} else {
DCHECK(held_decode_callbacks_.empty());
- RunDecodeCallback(decode_cb);
+ RunDecodeCallback(std::move(decode_cb));
}
}
-void FakeVideoDecoder::RunDecodeCallback(const DecodeCB& decode_cb) {
+void FakeVideoDecoder::RunDecodeCallback(DecodeCB decode_cb) {
DCHECK(thread_checker_.CalledOnValidThread());
if (!reset_cb_.IsNull()) {
DCHECK(decoded_frames_.empty());
- decode_cb.Run(DecodeStatus::ABORTED);
+ std::move(decode_cb).Run(DecodeStatus::ABORTED);
return;
}
@@ -270,7 +269,7 @@ void FakeVideoDecoder::RunDecodeCallback(const DecodeCB& decode_cb) {
}
}
- decode_cb.Run(DecodeStatus::OK);
+ std::move(decode_cb).Run(DecodeStatus::OK);
}
void FakeVideoDecoder::DoReset() {
diff --git a/chromium/media/filters/fake_video_decoder.h b/chromium/media/filters/fake_video_decoder.h
index c8fe39ac762..ddae5f580bc 100644
--- a/chromium/media/filters/fake_video_decoder.h
+++ b/chromium/media/filters/fake_video_decoder.h
@@ -23,11 +23,9 @@
#include "media/base/video_frame.h"
#include "ui/gfx/geometry/size.h"
-using base::ResetAndReturn;
-
namespace media {
-typedef base::Callback<void(int)> BytesDecodedCB;
+using BytesDecodedCB = base::RepeatingCallback<void(int)>;
class FakeVideoDecoder : public VideoDecoder {
public:
@@ -48,12 +46,11 @@ class FakeVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
int GetMaxDecodeRequests() const override;
base::WeakPtr<FakeVideoDecoder> GetWeakPtr();
@@ -87,16 +84,14 @@ class FakeVideoDecoder : public VideoDecoder {
};
// Callback for updating |total_bytes_decoded_|.
- void OnFrameDecoded(int buffer_size,
- const DecodeCB& decode_cb,
- DecodeStatus status);
+ void OnFrameDecoded(int buffer_size, DecodeCB decode_cb, DecodeStatus status);
// Runs |decode_cb| or puts it to |held_decode_callbacks_| depending on
// current value of |hold_decode_|.
- void RunOrHoldDecode(const DecodeCB& decode_cb);
+ void RunOrHoldDecode(DecodeCB decode_cb);
// Runs |decode_cb| with a frame from |decoded_frames_|.
- void RunDecodeCallback(const DecodeCB& decode_cb);
+ void RunDecodeCallback(DecodeCB decode_cb);
void DoReset();
@@ -112,7 +107,7 @@ class FakeVideoDecoder : public VideoDecoder {
State state_;
CallbackHolder<InitCB> init_cb_;
- CallbackHolder<base::Closure> reset_cb_;
+ CallbackHolder<base::OnceClosure> reset_cb_;
OutputCB output_cb_;
diff --git a/chromium/media/filters/fake_video_decoder_unittest.cc b/chromium/media/filters/fake_video_decoder_unittest.cc
index 17a8194b8c4..5718aced400 100644
--- a/chromium/media/filters/fake_video_decoder_unittest.cc
+++ b/chromium/media/filters/fake_video_decoder_unittest.cc
@@ -81,9 +81,9 @@ class FakeVideoDecoderTest
last_decode_status_ = status;
}
- void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- last_decoded_frame_ = frame;
+ last_decoded_frame_ = std::move(frame);
num_decoded_frames_++;
}
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index caf8e1a1fda..4f594854ba4 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -69,22 +69,22 @@ std::string FFmpegAudioDecoder::GetDisplayName() const {
void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
CdmContext* /* cdm_context */,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(config.IsValidConfig());
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
+ InitCB bound_init_cb = BindToCurrentLoop(std::move(init_cb));
if (config.is_encrypted()) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
if (!ConfigureDecoder(config)) {
av_sample_format_ = 0;
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -92,7 +92,7 @@ void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
config_ = config;
output_cb_ = BindToCurrentLoop(output_cb);
state_ = kNormal;
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
}
void FFmpegAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
@@ -116,13 +116,13 @@ void FFmpegAudioDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DecodeBuffer(*buffer, decode_cb_bound);
}
-void FFmpegAudioDecoder::Reset(const base::Closure& closure) {
+void FFmpegAudioDecoder::Reset(base::OnceClosure closure) {
DCHECK(task_runner_->BelongsToCurrentThread());
avcodec_flush_buffers(codec_context_.get());
state_ = kNormal;
ResetTimestampState(config_);
- task_runner_->PostTask(FROM_HERE, closure);
+ task_runner_->PostTask(FROM_HERE, std::move(closure));
}
void FFmpegAudioDecoder::DecodeBuffer(const DecoderBuffer& buffer,
@@ -267,7 +267,7 @@ bool FFmpegAudioDecoder::OnNewFrame(const DecoderBuffer& buffer,
output->TrimEnd(unread_frames);
*decoded_frame_this_loop = true;
- if (discard_helper_->ProcessBuffers(buffer, output)) {
+ if (discard_helper_->ProcessBuffers(buffer, output.get())) {
if (is_config_change &&
output->sample_rate() != config_.samples_per_second()) {
// At the boundary of the config change, FFmpeg's AAC decoder gives the
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.h b/chromium/media/filters/ffmpeg_audio_decoder.h
index 923e8cd1ec3..51e3d5da435 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.h
+++ b/chromium/media/filters/ffmpeg_audio_decoder.h
@@ -42,12 +42,12 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
std::string GetDisplayName() const override;
void Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Reset(base::OnceClosure closure) override;
// Callback called from within FFmpeg to allocate a buffer based on the
// properties of |codec_context| and |frame|. See AVCodecContext.get_buffer2
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index e67c3c434ba..8246f096ee6 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -341,7 +341,7 @@ class MEDIA_EXPORT FFmpegDemuxer : public Demuxer {
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Task runner on which all blocking FFmpeg operations are executed; retrieved
- // from base::ThreadPool.
+ // from base::ThreadPoolInstance.
scoped_refptr<base::SequencedTaskRunner> blocking_task_runner_;
PipelineStatusCB init_cb_;
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index cada939e160..db87c5fc8fd 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -1288,7 +1288,7 @@ TEST_F(FFmpegDemuxerTest, Rotate_Metadata_0) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- ASSERT_EQ(VIDEO_ROTATION_0, video_config.video_rotation());
+ ASSERT_EQ(VIDEO_ROTATION_0, video_config.video_transformation().rotation);
}
TEST_F(FFmpegDemuxerTest, Rotate_Metadata_90) {
@@ -1299,7 +1299,7 @@ TEST_F(FFmpegDemuxerTest, Rotate_Metadata_90) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- ASSERT_EQ(VIDEO_ROTATION_90, video_config.video_rotation());
+ ASSERT_EQ(VIDEO_ROTATION_90, video_config.video_transformation().rotation);
}
TEST_F(FFmpegDemuxerTest, Rotate_Metadata_180) {
@@ -1310,7 +1310,7 @@ TEST_F(FFmpegDemuxerTest, Rotate_Metadata_180) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- ASSERT_EQ(VIDEO_ROTATION_180, video_config.video_rotation());
+ ASSERT_EQ(VIDEO_ROTATION_180, video_config.video_transformation().rotation);
}
TEST_F(FFmpegDemuxerTest, Rotate_Metadata_270) {
@@ -1321,7 +1321,7 @@ TEST_F(FFmpegDemuxerTest, Rotate_Metadata_270) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- ASSERT_EQ(VIDEO_ROTATION_270, video_config.video_rotation());
+ ASSERT_EQ(VIDEO_ROTATION_270, video_config.video_transformation().rotation);
}
TEST_F(FFmpegDemuxerTest, NaturalSizeWithoutPASP) {
diff --git a/chromium/media/filters/ffmpeg_glue.cc b/chromium/media/filters/ffmpeg_glue.cc
index bd498dcedbe..e0ac8452536 100644
--- a/chromium/media/filters/ffmpeg_glue.cc
+++ b/chromium/media/filters/ffmpeg_glue.cc
@@ -19,11 +19,7 @@ namespace media {
enum { kBufferSize = 32 * 1024 };
static int AVIOReadOperation(void* opaque, uint8_t* buf, int buf_size) {
- FFmpegURLProtocol* protocol = reinterpret_cast<FFmpegURLProtocol*>(opaque);
- int result = protocol->Read(buf_size, buf);
- if (result < 0)
- result = AVERROR(EIO);
- return result;
+ return reinterpret_cast<FFmpegURLProtocol*>(opaque)->Read(buf_size, buf);
}
static int64_t AVIOSeekOperation(void* opaque, int64_t offset, int whence) {
@@ -58,8 +54,6 @@ static int64_t AVIOSeekOperation(void* opaque, int64_t offset, int whence) {
default:
NOTREACHED();
}
- if (new_offset < 0)
- new_offset = AVERROR(EIO);
return new_offset;
}
diff --git a/chromium/media/filters/ffmpeg_glue_unittest.cc b/chromium/media/filters/ffmpeg_glue_unittest.cc
index 660b4d4654a..edb50ad0240 100644
--- a/chromium/media/filters/ffmpeg_glue_unittest.cc
+++ b/chromium/media/filters/ffmpeg_glue_unittest.cc
@@ -157,7 +157,7 @@ TEST_F(FFmpegGlueTest, Read) {
EXPECT_CALL(*protocol_, Read(kBufferSize, buffer))
.WillOnce(Return(kBufferSize));
EXPECT_CALL(*protocol_, Read(kBufferSize, buffer))
- .WillOnce(Return(DataSource::kReadError));
+ .WillOnce(Return(AVERROR(EIO)));
EXPECT_EQ(0, ReadPacket(0, buffer));
EXPECT_EQ(kBufferSize, ReadPacket(kBufferSize, buffer));
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index 14ee37261fd..1a5f0e16744 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -208,7 +208,7 @@ std::string FFmpegVideoDecoder::GetDisplayName() const {
void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* /* cdm_context */,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
@@ -216,15 +216,15 @@ void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(config.IsValidConfig());
DCHECK(output_cb);
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
+ InitCB bound_init_cb = BindToCurrentLoop(std::move(init_cb));
if (config.is_encrypted()) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
if (!ConfigureDecoder(config, low_delay)) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -232,26 +232,26 @@ void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
config_ = config;
output_cb_ = output_cb;
state_ = kNormal;
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
}
void FFmpegVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(buffer.get());
DCHECK(decode_cb);
CHECK_NE(state_, kUninitialized);
- DecodeCB decode_cb_bound = BindToCurrentLoop(decode_cb);
+ DecodeCB decode_cb_bound = BindToCurrentLoop(std::move(decode_cb));
if (state_ == kError) {
- decode_cb_bound.Run(DecodeStatus::DECODE_ERROR);
+ std::move(decode_cb_bound).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (state_ == kDecodeFinished) {
- decode_cb_bound.Run(DecodeStatus::OK);
+ std::move(decode_cb_bound).Run(DecodeStatus::OK);
return;
}
@@ -277,7 +277,7 @@ void FFmpegVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
if (!FFmpegDecode(*buffer)) {
state_ = kError;
- decode_cb_bound.Run(DecodeStatus::DECODE_ERROR);
+ std::move(decode_cb_bound).Run(DecodeStatus::DECODE_ERROR);
return;
}
@@ -286,17 +286,17 @@ void FFmpegVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
// VideoDecoderShim expects that |decode_cb| is called only after
// |output_cb_|.
- decode_cb_bound.Run(DecodeStatus::OK);
+ std::move(decode_cb_bound).Run(DecodeStatus::OK);
}
-void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
+void FFmpegVideoDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
avcodec_flush_buffers(codec_context_.get());
state_ = kNormal;
// PostTask() to avoid calling |closure| inmediately.
- base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, closure);
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, std::move(closure));
}
FFmpegVideoDecoder::~FFmpegVideoDecoder() {
diff --git a/chromium/media/filters/ffmpeg_video_decoder.h b/chromium/media/filters/ffmpeg_video_decoder.h
index c16141b2d72..f13ce417f8b 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.h
+++ b/chromium/media/filters/ffmpeg_video_decoder.h
@@ -42,12 +42,11 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
// Callback called from within FFmpeg to allocate a buffer based on
// the dimensions of |codec_context|. See AVCodecContext.get_buffer2
diff --git a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
index a91884196c5..782d4bcf2b6 100644
--- a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -59,10 +59,7 @@ MATCHER(ContainsFailedToSendLog, "") {
class FFmpegVideoDecoderTest : public testing::Test {
public:
- FFmpegVideoDecoderTest()
- : decoder_(new FFmpegVideoDecoder(&media_log_)),
- decode_cb_(base::Bind(&FFmpegVideoDecoderTest::DecodeDone,
- base::Unretained(this))) {
+ FFmpegVideoDecoderTest() : decoder_(new FFmpegVideoDecoder(&media_log_)) {
// Initialize various test buffers.
frame_buffer_.reset(new uint8_t[kCodedSize.GetArea()]);
end_of_stream_buffer_ = DecoderBuffer::CreateEOSBuffer();
@@ -187,16 +184,17 @@ class FFmpegVideoDecoderTest : public testing::Test {
DecodeStatus status;
EXPECT_CALL(*this, DecodeDone(_)).WillOnce(SaveArg<0>(&status));
- decoder_->Decode(buffer, decode_cb_);
+ decoder_->Decode(buffer, base::BindOnce(&FFmpegVideoDecoderTest::DecodeDone,
+ base::Unretained(this)));
base::RunLoop().RunUntilIdle();
return status;
}
- void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- output_frames_.push_back(frame);
+ output_frames_.push_back(std::move(frame));
}
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
@@ -206,8 +204,6 @@ class FFmpegVideoDecoderTest : public testing::Test {
base::MessageLoop message_loop_;
std::unique_ptr<FFmpegVideoDecoder> decoder_;
- VideoDecoder::DecodeCB decode_cb_;
-
// Various buffers for testing.
std::unique_ptr<uint8_t[]> frame_buffer_;
scoped_refptr<DecoderBuffer> end_of_stream_buffer_;
@@ -227,7 +223,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_Normal) {
TEST_F(FFmpegVideoDecoderTest, Initialize_OpenDecoderFails) {
// Specify Theora w/o extra data so that avcodec_open2() fails.
VideoDecoderConfig config(kCodecTheora, VIDEO_CODEC_PROFILE_UNKNOWN,
- kVideoFormat, VideoColorSpace(), VIDEO_ROTATION_0,
+ kVideoFormat, VideoColorSpace(), kNoTransformation,
kCodedSize, kVisibleRect, kNaturalSize,
EmptyExtraData(), Unencrypted());
InitializeWithConfigWithResult(config, false);
diff --git a/chromium/media/filters/frame_processor.cc b/chromium/media/filters/frame_processor.cc
index a6c22878b9d..3d4e312a849 100644
--- a/chromium/media/filters/frame_processor.cc
+++ b/chromium/media/filters/frame_processor.cc
@@ -32,8 +32,7 @@ class MseTrackBuffer {
public:
MseTrackBuffer(ChunkDemuxerStream* stream,
MediaLog* media_log,
- const SourceBufferParseWarningCB& parse_warning_cb,
- ChunkDemuxerStream::RangeApi range_api);
+ const SourceBufferParseWarningCB& parse_warning_cb);
~MseTrackBuffer();
// Get/set |last_decode_timestamp_|.
@@ -96,10 +95,9 @@ class MseTrackBuffer {
// monotonically increasing.
void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
- // Adds |frame| to the end of |processed_frames_|. In some BufferingByPts
- // SAP-Type-2 conditions, may also flush any previously enqueued frames, which
- // can fail. Returns the result of such flushing, or true if no flushing was
- // done.
+ // Adds |frame| to the end of |processed_frames_|. In some SAP-Type-2
+ // conditions, may also flush any previously enqueued frames, which can fail.
+ // Returns the result of such flushing, or true if no flushing was done.
bool EnqueueProcessedFrame(scoped_refptr<StreamParserBuffer> frame);
// Appends |processed_frames_|, if not empty, to |stream_| and clears
@@ -154,7 +152,6 @@ class MseTrackBuffer {
// EnqueueProcessedFrame().
base::TimeDelta last_signalled_group_start_pts_;
bool have_flushed_since_last_group_start_;
- ChunkDemuxerStream::RangeApi range_api_;
// The coded frame duration of the last coded frame appended in the current
// coded frame group. Initially kNoTimestamp, meaning "unset".
@@ -196,15 +193,13 @@ class MseTrackBuffer {
MseTrackBuffer::MseTrackBuffer(
ChunkDemuxerStream* stream,
MediaLog* media_log,
- const SourceBufferParseWarningCB& parse_warning_cb,
- ChunkDemuxerStream::RangeApi range_api)
+ const SourceBufferParseWarningCB& parse_warning_cb)
: last_decode_timestamp_(kNoDecodeTimestamp()),
last_processed_decode_timestamp_(DecodeTimestamp()),
pending_group_start_pts_(kNoTimestamp),
last_keyframe_presentation_timestamp_(kNoTimestamp),
last_signalled_group_start_pts_(kNoTimestamp),
have_flushed_since_last_group_start_(false),
- range_api_(range_api),
last_frame_duration_(kNoTimestamp),
highest_presentation_timestamp_(kNoTimestamp),
needs_random_access_point_(true),
@@ -276,17 +271,16 @@ bool MseTrackBuffer::EnqueueProcessedFrame(
"well supported by MSE; buffered range reporting may be less "
"precise.";
- // SAP-Type-2 GOPs (when buffering ByPts), by definition, contain at
- // least one non-keyframe with PTS prior to the keyframe's PTS, with DTS
- // continuous from keyframe forward to at least that non-keyframe. If
- // such a non-keyframe overlaps the end of a previously buffered GOP
- // sufficiently (such that, say, some previous GOP's non-keyframes
- // depending on the overlapped non-keyframe(s) must be dropped), then a
- // gap might need to result. But if we attempt to buffer the new GOP's
- // keyframe through at least that first non-keyframe that does such
- // overlapping all at once, the buffering mechanism doesn't expect such
- // a discontinuity could occur (failing assumptions in places like
- // SourceBufferRangeByPts).
+ // SAP-Type-2 GOPs, by definition, contain at least one non-keyframe with
+ // PTS prior to the keyframe's PTS, with DTS continuous from keyframe
+ // forward to at least that non-keyframe. If such a non-keyframe overlaps
+ // the end of a previously buffered GOP sufficiently (such that, say, some
+ // previous GOP's non-keyframes depending on the overlapped
+ // non-keyframe(s) must be dropped), then a gap might need to result. But
+ // if we attempt to buffer the new GOP's keyframe through at least that
+ // first non-keyframe that does such overlapping all at once, the
+ // buffering mechanism doesn't expect such a discontinuity could occur
+ // (failing assumptions in places like SourceBufferRange).
//
// To prevent such failure, we can first flush what's previously been
// enqueued (if anything), but do this conservatively to not flush
@@ -295,8 +289,7 @@ bool MseTrackBuffer::EnqueueProcessedFrame(
// this track and no flush has yet occurred for this track since then, or
// if there has been a flush since then but this nonkeyframe's PTS is no
// lower than the PTS of the first frame pending flush currently.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts &&
- !processed_frames_.empty()) {
+ if (!processed_frames_.empty()) {
DCHECK(kNoTimestamp != last_signalled_group_start_pts_);
if (!have_flushed_since_last_group_start_) {
@@ -347,12 +340,10 @@ void MseTrackBuffer::NotifyStartOfCodedFrameGroup(DecodeTimestamp start_dts,
}
FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb,
- MediaLog* media_log,
- ChunkDemuxerStream::RangeApi range_api)
+ MediaLog* media_log)
: group_start_timestamp_(kNoTimestamp),
update_duration_cb_(update_duration_cb),
- media_log_(media_log),
- range_api_(range_api) {
+ media_log_(media_log) {
DVLOG(2) << __func__ << "()";
DCHECK(update_duration_cb);
}
@@ -486,8 +477,8 @@ bool FrameProcessor::AddTrack(StreamParser::TrackId id,
return false;
}
- track_buffers_[id] = std::make_unique<MseTrackBuffer>(
- stream, media_log_, parse_warning_cb_, range_api_);
+ track_buffers_[id] =
+ std::make_unique<MseTrackBuffer>(stream, media_log_, parse_warning_cb_);
return true;
}
@@ -927,22 +918,6 @@ bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
}
DCHECK(presentation_timestamp >= base::TimeDelta());
- if (decode_timestamp < DecodeTimestamp() &&
- range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- // B-frames may still result in negative DTS here after being shifted by
- // |timestamp_offset_|.
- // TODO(wolenetz): This is no longer a step in the CFP, since negative DTS
- // are allowed. Remove this parse failure and error log as part of fixing
- // PTS/DTS conflation in SourceBufferStream. See https://crbug.com/398141
- // and https://crbug.com/718641.
- MEDIA_LOG(ERROR, media_log_)
- << frame->GetTypeName() << " frame with PTS "
- << presentation_timestamp.InMicroseconds() << "us has negative DTS "
- << decode_timestamp.InMicroseconds()
- << "us after applying timestampOffset, handling any discontinuity, "
- "and filtering against append window";
- return false;
- }
// 10. If the need random access point flag on track buffer equals true,
// then run the following steps:
@@ -985,13 +960,11 @@ bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
(track_buffer->pending_group_start_pts() != kNoTimestamp &&
track_buffer->pending_group_start_pts() > presentation_timestamp);
- if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts &&
- frame->is_key_frame()) {
- // When buffering by PTS intervals and a keyframe is discovered to have a
- // decreasing PTS versus the previous highest presentation timestamp for
- // that track in the current coded frame group, signal a new coded frame
- // group for that track buffer so that it can correctly process
- // overlap-removals for the new GOP.
+ if (frame->is_key_frame()) {
+ // When a keyframe is discovered to have a decreasing PTS versus the
+ // previous highest presentation timestamp for that track in the current
+ // coded frame group, signal a new coded frame group for that track buffer
+ // so that it can correctly process overlap-removals for the new GOP.
if (track_buffer->highest_presentation_timestamp() != kNoTimestamp &&
track_buffer->highest_presentation_timestamp() >
presentation_timestamp) {
@@ -1004,13 +977,13 @@ bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
track_buffer->ResetHighestPresentationTimestamp();
}
- // When buffering by PTS intervals and an otherwise continuous coded frame
- // group (by DTS, and with non-decreasing keyframe PTS) contains a
- // keyframe with PTS in the future significantly far enough that it may be
- // outside of buffering fudge room, signal a new coded frame group with
- // start time set to the previous highest frame end time in the coded
- // frame group for this track. This lets the stream coalesce a potential
- // gap, and also pass internal buffer adjacency checks.
+ // When an otherwise continuous coded frame group (by DTS, and with
+ // non-decreasing keyframe PTS) contains a keyframe with PTS in the future
+ // significantly far enough that it may be outside of buffering fudge
+ // room, signal a new coded frame group with start time set to the
+ // previous highest frame end time in the coded frame group for this
+ // track. This lets the stream coalesce a potential gap, and also pass
+ // internal buffer adjacency checks.
signal_new_cfg |=
track_buffer->highest_presentation_timestamp() != kNoTimestamp &&
track_buffer->highest_presentation_timestamp() + frame->duration() <
diff --git a/chromium/media/filters/frame_processor.h b/chromium/media/filters/frame_processor.h
index 819b1a5e812..de9007dd252 100644
--- a/chromium/media/filters/frame_processor.h
+++ b/chromium/media/filters/frame_processor.h
@@ -28,8 +28,7 @@ class MEDIA_EXPORT FrameProcessor {
typedef base::Callback<void(base::TimeDelta)> UpdateDurationCB;
FrameProcessor(const UpdateDurationCB& update_duration_cb,
- MediaLog* media_log,
- ChunkDemuxerStream::RangeApi range_api);
+ MediaLog* media_log);
~FrameProcessor();
// This must be called exactly once, before doing any track buffer creation or
@@ -177,10 +176,6 @@ class MEDIA_EXPORT FrameProcessor {
// MediaLog for reporting messages and properties to debug content and engine.
MediaLog* media_log_;
- // For differentiating behavior based on buffering by DTS interval versus PTS
- // interval. See https://crbug.com/718641.
- const ChunkDemuxerStream::RangeApi range_api_;
-
// Callback for reporting problematic conditions that are not necessarily
// errors.
SourceBufferParseWarningCB parse_warning_cb_;
diff --git a/chromium/media/filters/frame_processor_unittest.cc b/chromium/media/filters/frame_processor_unittest.cc
index 122fc1a6276..ba297313462 100644
--- a/chromium/media/filters/frame_processor_unittest.cc
+++ b/chromium/media/filters/frame_processor_unittest.cc
@@ -35,20 +35,6 @@ using ::testing::Values;
namespace {
-struct FrameProcessorTestParams {
- public:
- FrameProcessorTestParams(const bool use_sequence_mode,
- const media::ChunkDemuxerStream::RangeApi range_api)
- : use_sequence_mode(use_sequence_mode), range_api(range_api) {}
-
- // Test will use 'sequence' append mode if true, or 'segments' if false.
- const bool use_sequence_mode;
-
- // Determines if media::kMseBufferByPts feature should be forced on or off for
- // the test, and is also used in tests' ChunkDemuxerStream constructions.
- const media::ChunkDemuxerStream::RangeApi range_api;
-};
-
// Helper to shorten "base::TimeDelta::FromMilliseconds(...)" in these test
// cases for integer milliseconds.
constexpr base::TimeDelta Milliseconds(int64_t milliseconds) {
@@ -92,23 +78,19 @@ class FrameProcessorTestCallbackHelper {
DISALLOW_COPY_AND_ASSIGN(FrameProcessorTestCallbackHelper);
};
-class FrameProcessorTest
- : public ::testing::TestWithParam<FrameProcessorTestParams> {
+class FrameProcessorTest : public ::testing::TestWithParam<bool> {
protected:
FrameProcessorTest()
: append_window_end_(kInfiniteDuration),
frame_duration_(Milliseconds(10)),
audio_id_(1),
video_id_(2) {
- const FrameProcessorTestParams& params = GetParam();
- use_sequence_mode_ = params.use_sequence_mode;
- range_api_ = params.range_api;
-
+ use_sequence_mode_ = GetParam();
frame_processor_ = std::make_unique<FrameProcessor>(
base::Bind(
&FrameProcessorTestCallbackHelper::OnPossibleDurationIncrease,
base::Unretained(&callbacks_)),
- &media_log_, range_api_);
+ &media_log_);
frame_processor_->SetParseWarningCallback(
base::Bind(&FrameProcessorTestCallbackHelper::OnParseWarning,
base::Unretained(&callbacks_)));
@@ -321,7 +303,6 @@ class FrameProcessorTest
StrictMock<FrameProcessorTestCallbackHelper> callbacks_;
bool use_sequence_mode_;
- ChunkDemuxerStream::RangeApi range_api_;
std::unique_ptr<FrameProcessor> frame_processor_;
base::TimeDelta append_window_start_;
@@ -362,8 +343,7 @@ class FrameProcessorTest
switch (type) {
case DemuxerStream::AUDIO: {
ASSERT_FALSE(audio_);
- audio_.reset(
- new ChunkDemuxerStream(DemuxerStream::AUDIO, "1", range_api_));
+ audio_.reset(new ChunkDemuxerStream(DemuxerStream::AUDIO, "1"));
AudioDecoderConfig decoder_config(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO, 1000,
EmptyExtraData(), Unencrypted());
@@ -376,8 +356,7 @@ class FrameProcessorTest
}
case DemuxerStream::VIDEO: {
ASSERT_FALSE(video_);
- video_.reset(
- new ChunkDemuxerStream(DemuxerStream::VIDEO, "2", range_api_));
+ video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO, "2"));
ASSERT_TRUE(video_->UpdateVideoConfig(TestVideoConfig::Normal(), false,
&media_log_));
stream = video_.get();
@@ -891,12 +870,10 @@ TEST_P(FrameProcessorTest, AppendWindowFilterWithInexactPreroll_2) {
SetTimestampOffset(Milliseconds(-10));
EXPECT_MEDIA_LOG(DroppedFrame("audio", -10000));
- // When buffering ByDts, splice trimming checks are done only on every audio
- // frame following either a discontinuity or the beginning of ProcessFrames().
- // When buffering ByPts, splice trimming checks are also done on audio frames
- // with PTS not directly continuous with the highest frame end PTS already
- // processed. To simplify the test to have the same splice logging
- // expectations, process each frame by itself here.
+ // Splice trimming checks are done on every audio frame following either a
+ // discontinuity or the beginning of ProcessFrames(), and are also done on
+ // audio frames with PTS not directly continuous with the highest frame end
+ // PTS already processed.
if (use_sequence_mode_)
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(-10)));
else
@@ -987,13 +964,8 @@ TEST_P(FrameProcessorTest,
// Frame B is relocated by 7 to PTS 10, DTS 27, duration 10.
EXPECT_EQ(Milliseconds(7), timestamp_offset_);
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- // By DTS, start of frame A (17) through end of frame B (27+10).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [17,37) }");
- } else {
- // By PTS, start of frame A (0) through end of frame B (10+10).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
- }
+ // Start of frame A (0) through end of frame B (10+10).
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
// Frame A is now at PTS 0 (originally at PTS -7)
// Frame B is now at PTS 10 (originally at PTS 3)
@@ -1011,13 +983,8 @@ TEST_P(FrameProcessorTest,
// Frame B is buffered at PTS 3, DTS 20, duration 10.
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- // By DTS, start of frame A (17) through end of frame B (20+10).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [17,30) }");
- } else {
- // By PTS, start of frame A (0) through end of frame B (3+10).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,13) }");
- }
+ // Start of frame A (0) through end of frame B (3+10).
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,13) }");
// Frame A is now at PTS 0 (originally at PTS -7)
// Frame B is now at PTS 3 (same as it was originally)
@@ -1158,17 +1125,11 @@ TEST_P(FrameProcessorTest,
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(70)));
EXPECT_TRUE(ProcessFrames("", "40|70")); // PTS=40, DTS=70
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- // Verify DTS-based range is increased.
- CheckExpectedRangesByTimestamp(video_.get(), "{ [50,80) }");
- } else {
- // This reflects the expectation that PTS start is not "pulled backward" for
- // the new frame at PTS=40 because current spec text doesn't support SAP
- // Type 2; it has no steps in the coded frame processing algorithm that
- // would do that "pulling backward". See https://crbug.com/718641 and
- // https://github.com/w3c/media-source/issues/187.
- CheckExpectedRangesByTimestamp(video_.get(), "{ [50,70) }");
- }
+ // This reflects the expectation that PTS start is not "pulled backward" for
+ // the new frame at PTS=40 because current spec doesn't support SAP Type 2; it
+ // has no steps in the coded frame processing algorithm that would do that
+ // "pulling backward". See https://github.com/w3c/media-source/issues/187.
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [50,70) }");
SeekStream(video_.get(), Milliseconds(0));
CheckReadsThenReadStalls(video_.get(), "50 60 40");
@@ -1193,24 +1154,17 @@ TEST_P(FrameProcessorTest, OOOKeyframePts_1) {
EXPECT_TRUE(ProcessFrames("500|100K", ""));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,30) [100,110) }");
- CheckReadsThenReadStalls(audio_.get(), "0 1000 100"); // Verifies PTS
- SeekStream(audio_.get(), Milliseconds(100));
- CheckReadsThenReadStalls(audio_.get(), "500"); // Verifies PTS
- } else {
- // Note that the PTS discontinuity (100ms) in the first ProcessFrames()
- // call, above, overlaps the previously buffered range [0,1010), so the
- // frame at 100ms is processed with an adjusted coded frame group start to
- // be 0.001ms, which is just after the highest timestamp before it in the
- // overlapped range. This enables it to be continuous with the frame before
- // it. The remainder of the overlapped range (the buffer at [1000,1010)) is
- // adjusted to have a range start time at the split point (110), and is
- // within fudge room and merged into [0,110). The same happens with the
- // buffer appended [500,510).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,1010) }");
- CheckReadsThenReadStalls(audio_.get(), "0 100 500 1000");
- }
+ // Note that the PTS discontinuity (100ms) in the first ProcessFrames() call,
+ // above, overlaps the previously buffered range [0,1010), so the frame at
+ // 100ms is processed with an adjusted coded frame group start to be 0.001ms,
+ // which is just after the highest timestamp before it in the overlapped
+ // range. This enables it to be continuous with the frame before it. The
+ // remainder of the overlapped range (the buffer at [1000,1010)) is adjusted
+ // to have a range start time at the split point (110), and is within fudge
+ // room and merged into [0,110). The same happens with the buffer appended
+ // [500,510).
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,1010) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 100 500 1000");
}
TEST_P(FrameProcessorTest, OOOKeyframePts_2) {
@@ -1224,21 +1178,16 @@ TEST_P(FrameProcessorTest, OOOKeyframePts_2) {
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(1010)));
EXPECT_TRUE(ProcessFrames("100|20K", ""));
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,30) }");
- CheckReadsThenReadStalls(audio_.get(), "0 1000 100"); // Verifies PTS
- } else {
- // Note that the PTS discontinuity (100ms) in the first ProcessFrames()
- // call, above, overlaps the previously buffered range [0,1010), so the
- // frame at 100ms is processed with an adjusted coded frame group start to
- // be 0.001ms, which is just after the highest timestamp before it in the
- // overlapped range. This enables it to be continuous with the frame before
- // it. The remainder of the overlapped range (the buffer at [1000,1010)) is
- // adjusted to have a range start time at the split point (110), and is
- // within fudge room and merged into [0,110).
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,1010) }");
- CheckReadsThenReadStalls(audio_.get(), "0 100 1000");
- }
+ // Note that the PTS discontinuity (100ms) in the first ProcessFrames() call,
+ // above, overlaps the previously buffered range [0,1010), so the frame at
+ // 100ms is processed with an adjusted coded frame group start to be 0.001ms,
+ // which is just after the highest timestamp before it in the overlapped
+ // range. This enables it to be continuous with the frame before it. The
+ // remainder of the overlapped range (the buffer at [1000,1010)) is adjusted
+ // to have a range start time at the split point (110), and is within fudge
+ // room and merged into [0,110).
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,1010) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 100 1000");
}
TEST_P(FrameProcessorTest, AudioNonKeyframeChangedToKeyframe) {
@@ -1268,9 +1217,8 @@ TEST_P(FrameProcessorTest, AudioNonKeyframeChangedToKeyframe) {
}
TEST_P(FrameProcessorTest, TimestampOffsetNegativeDts) {
- // Shift a GOP earlier using timestampOffset such that the GOP has
- // starts with negative DTS, but PTS 0. Expect ByDts parse error, ByPts
- // success.
+ // Shift a GOP earlier using timestampOffset such that the GOP
+ // starts with negative DTS, but PTS 0.
InSequence s;
AddTestTracks(HAS_VIDEO);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1281,24 +1229,18 @@ TEST_P(FrameProcessorTest, TimestampOffsetNegativeDts) {
SetTimestampOffset(Milliseconds(-100));
}
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- EXPECT_MEDIA_LOG(NegativeDtsFailureWhenByDts("video", 0, -30000));
- EXPECT_FALSE(ProcessFrames("", "100|70K 130|80"));
- } else {
- EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(40)));
- EXPECT_TRUE(ProcessFrames("", "100|70K 130|80"));
- EXPECT_EQ(Milliseconds(-100), timestamp_offset_);
- CheckExpectedRangesByTimestamp(video_.get(), "{ [0,40) }");
- SeekStream(video_.get(), Milliseconds(0));
- CheckReadsThenReadStalls(video_.get(), "0:100 30:130");
- }
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(40)));
+ EXPECT_TRUE(ProcessFrames("", "100|70K 130|80"));
+ EXPECT_EQ(Milliseconds(-100), timestamp_offset_);
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [0,40) }");
+ SeekStream(video_.get(), Milliseconds(0));
+ CheckReadsThenReadStalls(video_.get(), "0:100 30:130");
}
TEST_P(FrameProcessorTest, LargeTimestampOffsetJumpForward) {
// Verifies that jumps forward in buffers emitted from the coded frame
// processing algorithm can create discontinuous buffered ranges if those
- // jumps are large enough, in both kinds of AppendMode, and in both kinds of
- // RangeApi.
+ // jumps are large enough, in both kinds of AppendMode.
InSequence s;
AddTestTracks(HAS_AUDIO);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1323,41 +1265,20 @@ TEST_P(FrameProcessorTest, LargeTimestampOffsetJumpForward) {
EXPECT_EQ(Milliseconds(5000), timestamp_offset_);
}
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- if (use_sequence_mode_) {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [100,110) }");
- CheckReadsThenReadStalls(audio_.get(), "0");
- SeekStream(audio_.get(), Milliseconds(100));
- CheckReadsThenReadStalls(audio_.get(), "5000"); // Util verifies PTS.
- } else {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [5100,5110) }");
- CheckReadsThenReadStalls(audio_.get(), "0");
- SeekStream(audio_.get(), Milliseconds(5100));
- CheckReadsThenReadStalls(audio_.get(),
- "10000:5000"); // Util verifies PTS.
- }
+ if (use_sequence_mode_) {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [5000,5010) }");
+ CheckReadsThenReadStalls(audio_.get(), "0");
+ SeekStream(audio_.get(), Milliseconds(5000));
+ CheckReadsThenReadStalls(audio_.get(), "5000");
} else {
- if (use_sequence_mode_) {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [5000,5010) }");
- CheckReadsThenReadStalls(audio_.get(), "0");
- SeekStream(audio_.get(), Milliseconds(5000));
- CheckReadsThenReadStalls(audio_.get(), "5000");
- } else {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [10000,10010) }");
- CheckReadsThenReadStalls(audio_.get(), "0");
- SeekStream(audio_.get(), Milliseconds(10000));
- CheckReadsThenReadStalls(audio_.get(), "10000:5000");
- }
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,10) [10000,10010) }");
+ CheckReadsThenReadStalls(audio_.get(), "0");
+ SeekStream(audio_.get(), Milliseconds(10000));
+ CheckReadsThenReadStalls(audio_.get(), "10000:5000");
}
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_SapType2_and_PtsJumpForward) {
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
+TEST_P(FrameProcessorTest, ContinuousDts_SapType2_and_PtsJumpForward) {
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1418,17 +1339,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "1130 1070 1120 1080 1110 1090 1100");
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewGopEndOverlapsLastGop_1) {
+TEST_P(FrameProcessorTest, ContinuousDts_NewGopEndOverlapsLastGop_1) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append.
// Tests SAP-Type-1 GOPs, where newly appended GOP overlaps a nonkeyframe of
// the last GOP appended.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1459,17 +1374,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "100 110 120 125 135 145 155");
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewGopEndOverlapsLastGop_2) {
+TEST_P(FrameProcessorTest, ContinuousDts_NewGopEndOverlapsLastGop_2) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append.
// Tests SAP-Type 1 GOPs, where newly appended GOP overlaps the keyframe of
// the last GOP appended.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1502,17 +1411,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "100 110 115 125");
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_1) {
+TEST_P(FrameProcessorTest, ContinuousDts_NewSap2GopEndOverlapsLastGop_1) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append, using SAP Type 2 GOPs.
// Tests SAP-Type 2 GOPs, where newly appended GOP overlaps nonkeyframes of
// the last GOP appended.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1551,17 +1454,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "145 125 155 135");
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_2) {
+TEST_P(FrameProcessorTest, ContinuousDts_NewSap2GopEndOverlapsLastGop_2) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append, using SAP Type 2 GOPs.
// Tests SAP-Type 2 GOPs, where newly appended GOP overlaps the keyframe of
// last GOP appended.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1615,7 +1512,7 @@ TEST_P(FrameProcessorTest,
}
TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_3_GopByGop) {
+ ContinuousDts_NewSap2GopEndOverlapsLastGop_3_GopByGop) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append, using SAP Type 2 GOPs. Tests
// SAP-Type 2 GOPs, where newly appended GOP overlaps enough nonkeyframes of
@@ -1624,11 +1521,6 @@ TEST_P(FrameProcessorTest,
// flushed at the same time as its keyframe, but the second GOP's keyframe PTS
// is close enough to the end of the first GOP's presentation interval to not
// signal a new coded frame group start.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1659,17 +1551,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "500 520 510 540 520 530");
}
-TEST_P(
- FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_3_FrameByFrame) {
+TEST_P(FrameProcessorTest,
+ ContinuousDts_NewSap2GopEndOverlapsLastGop_3_FrameByFrame) {
// Tests that the buffered range results match the previous GopByGop test if
// each frame of the second GOP is explicitly appended by the app
// one-at-a-time.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1708,18 +1594,13 @@ TEST_P(
}
TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_4_GopByGop) {
+ ContinuousDts_NewSap2GopEndOverlapsLastGop_4_GopByGop) {
// API user might craft a continuous-in-DTS-with-previous-append GOP that has
// PTS interval overlapping the previous append, using SAP Type 2 GOPs. Tests
// SAP-Type 2 GOPs, where newly appended GOP overlaps enough nonkeyframes of
// the previous GOP such that dropped decode dependencies might cause problems
// if the first nonkeyframe with PTS prior to the GOP's keyframe PTS is
// flushed at the same time as its keyframe.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1755,17 +1636,11 @@ TEST_P(FrameProcessorTest,
CheckReadsThenReadStalls(video_.get(), "500 520 510 550 520 530 540");
}
-TEST_P(
- FrameProcessorTest,
- BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_4_FrameByFrame) {
+TEST_P(FrameProcessorTest,
+ ContinuousDts_NewSap2GopEndOverlapsLastGop_4_FrameByFrame) {
// Tests that the buffered range results match the previous GopByGop test if
// each frame of the second GOP is explicitly appended by the app
// one-at-a-time.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1812,8 +1687,7 @@ TEST_P(
CheckReadsThenReadStalls(video_.get(), "500 520 510 550 520 530 540");
}
-TEST_P(FrameProcessorTest,
- BufferingByPts_ContinuousDts_GopKeyframePtsOrder_2_1_3) {
+TEST_P(FrameProcessorTest, ContinuousDts_GopKeyframePtsOrder_2_1_3) {
// White-box test, demonstrating expected behavior for a specially crafted
// sequence that "should" be unusual, but gracefully handled:
// SAP-Type 1 GOPs for simplicity of test. First appended GOP is highest in
@@ -1825,11 +1699,6 @@ TEST_P(FrameProcessorTest,
// GOPs). Note that MseTrackBuffer::ResetHighestPresentationTimestamp() done
// at the beginning of the second appended GOP is the key to gracefully
// handling the third appended GOP.
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- DVLOG(1) << "Skipping kLegacyByDts versions of this test";
- return;
- }
-
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1877,13 +1746,8 @@ TEST_P(FrameProcessorTest,
TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
// GOPs which overlap in DTS, but are continuous in PTS should be buffered
- // correctly (though with different results if the overlap is significant
- // enough) in each of ByPts and ByDts buffering modes. In particular,
- // monotonic increase of DTS in continuous-in-PTS append sequences is not
- // required across GOPs when buffering by PTS (just within GOPs). When
- // buffering by DTS, a continuous sequence is determined by DTS (not PTS), so
- // the append sequence is required to have monotonically increasing DTS (even
- // across GOPs).
+ // correctly. In particular, monotonic increase of DTS in continuous-in-PTS
+ // append sequences is not required across GOPs (just within GOPs).
InSequence s;
AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
@@ -1910,7 +1774,6 @@ TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
DecodeTimestamp::FromPresentationTime(Milliseconds(225)),
Milliseconds(240)));
EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
- // Note that duration is reported based on PTS regardless of buffering model.
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(280)));
// Append a second GOP whose first DTS is below the last DTS of the first GOP,
// but whose PTS interval is continuous with the end of the first GOP.
@@ -1918,15 +1781,8 @@ TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
SeekStream(video_.get(), Milliseconds(200));
- if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp(video_.get(), "{ [200,265) }");
- // Note that even when buffering by DTS, this verification method checks the
- // PTS sequence of frames read from the stream.
- CheckReadsThenReadStalls(video_.get(), "200 210 220 240 250 260 270");
- } else {
- CheckExpectedRangesByTimestamp(video_.get(), "{ [200,280) }");
- CheckReadsThenReadStalls(video_.get(), "200 210 220 230 240 250 260 270");
- }
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [200,280) }");
+ CheckReadsThenReadStalls(video_.get(), "200 210 220 230 240 250 260 270");
}
TEST_P(FrameProcessorTest, OnlyKeyframes_ContinuousDts_ContinousPts_1) {
@@ -1974,11 +1830,9 @@ TEST_P(FrameProcessorTest, OnlyKeyframes_ContinuousDts_ContinuousPts_2) {
TEST_P(FrameProcessorTest,
OnlyKeyframes_ContinuousDts_DiscontinuousPtsJustBeyondFudgeRoom) {
- // Verifies that, in ByPts, multiple group starts and distinct appends occur
+ // Verifies that multiple group starts and distinct appends occur
// when processing a single DTS-continuous set of frames with PTS deltas that
// just barely exceed the adjacency assumption in FrameProcessor.
- // Verifies that, in ByDts, precisely one group start and one stream append
- // occur.
InSequence s;
AddTestTracks(HAS_AUDIO | OBSERVE_APPENDS_AND_GROUP_STARTS);
if (use_sequence_mode_)
@@ -1989,67 +1843,44 @@ TEST_P(FrameProcessorTest,
EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO, DecodeTimestamp(),
base::TimeDelta()));
EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
- if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts) {
- // Frame "10|5K" following "0K" triggers start of new group and eventual
- // append.
- EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO,
- DecodeTimestamp(), frame_duration_));
- EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
-
- // Frame "20|10K" following "10|5K" triggers start of new group and eventual
- // append.
- EXPECT_CALL(
- callbacks_,
- OnGroupStart(DemuxerStream::AUDIO,
- DecodeTimestamp::FromPresentationTime(Milliseconds(5)),
- Milliseconds(10) + frame_duration_));
- EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
-
- // Frame "30|15K" following "20|10K" triggers start of new group and
- // eventual append.
- EXPECT_CALL(
- callbacks_,
- OnGroupStart(DemuxerStream::AUDIO,
- DecodeTimestamp::FromPresentationTime(Milliseconds(10)),
- Milliseconds(20) + frame_duration_));
- EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
- }
+ // Frame "10|5K" following "0K" triggers start of new group and eventual
+ // append.
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO, DecodeTimestamp(),
+ frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+
+ // Frame "20|10K" following "10|5K" triggers start of new group and eventual
+ // append.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::AUDIO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(5)),
+ Milliseconds(10) + frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+
+ // Frame "30|15K" following "20|10K" triggers start of new group and
+ // eventual append.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::AUDIO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(10)),
+ Milliseconds(20) + frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+
EXPECT_CALL(callbacks_, PossibleDurationIncrease(
base::TimeDelta::FromMicroseconds(34999)));
EXPECT_TRUE(ProcessFrames("0K 10|5K 20|10K 30|15K", ""));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
- if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts) {
- // Note that the ByPts result is still buffered continuous since DTS was
- // continuous and PTS was monotonically increasing (such that each group
- // start was signalled by FrameProcessor to be continuous with the end of
- // the previous group, if any.)
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,34) }");
- } else {
- CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,19) }");
- }
+ // Note that the result is still buffered continuous since DTS was continuous
+ // and PTS was monotonically increasing (such that each group start was
+ // signalled by FrameProcessor to be continuous with the end of the previous
+ // group, if any.)
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,34) }");
CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
}
-INSTANTIATE_TEST_SUITE_P(SequenceModeLegacyByDts,
- FrameProcessorTest,
- Values(FrameProcessorTestParams(
- true,
- ChunkDemuxerStream::RangeApi::kLegacyByDts)));
-INSTANTIATE_TEST_SUITE_P(SegmentsModeLegacyByDts,
- FrameProcessorTest,
- Values(FrameProcessorTestParams(
- false,
- ChunkDemuxerStream::RangeApi::kLegacyByDts)));
-INSTANTIATE_TEST_SUITE_P(
- SequenceModeNewByPts,
- FrameProcessorTest,
- Values(FrameProcessorTestParams(true,
- ChunkDemuxerStream::RangeApi::kNewByPts)));
-INSTANTIATE_TEST_SUITE_P(
- SegmentsModeNewByPts,
- FrameProcessorTest,
- Values(FrameProcessorTestParams(false,
- ChunkDemuxerStream::RangeApi::kNewByPts)));
+INSTANTIATE_TEST_SUITE_P(SequenceMode, FrameProcessorTest, Values(true));
+INSTANTIATE_TEST_SUITE_P(SegmentsMode, FrameProcessorTest, Values(false));
} // namespace media
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc b/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
index b572934f2db..17416179a12 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
+++ b/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
@@ -69,7 +69,7 @@ class PendingDecode {
public:
PendingDecode(scoped_refptr<DecoderBuffer> buffer,
VideoDecoder::DecodeCB decode_cb)
- : buffer_(buffer), decode_cb_(decode_cb) {
+ : buffer_(buffer), decode_cb_(std::move(decode_cb)) {
DCHECK(buffer_);
}
~PendingDecode() {
@@ -282,12 +282,11 @@ class FuchsiaVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
@@ -298,7 +297,9 @@ class FuchsiaVideoDecoder : public VideoDecoder {
void OnInputConstraints(
fuchsia::media::StreamBufferConstraints input_constraints);
void OnFreeInputPacket(fuchsia::media::PacketHeader free_input_packet);
- void OnOutputConfig(fuchsia::media::StreamOutputConfig output_config);
+ void OnOutputConstraints(
+ fuchsia::media::StreamOutputConstraints output_constraints);
+ void OnOutputFormat(fuchsia::media::StreamOutputFormat output_format);
void OnOutputPacket(fuchsia::media::Packet output_packet,
bool error_detected_before,
bool error_detected_during);
@@ -373,13 +374,13 @@ std::string FuchsiaVideoDecoder::GetDisplayName() const {
void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
output_cb_ = output_cb;
container_pixel_aspect_ratio_ = config.GetPixelAspectRatio();
- auto done_callback = BindToCurrentLoop(init_cb);
+ auto done_callback = BindToCurrentLoop(std::move(init_cb));
fuchsia::mediacodec::CreateDecoder_Params codec_params;
codec_params.mutable_input_details()->set_format_details_version_ordinal(0);
@@ -402,7 +403,7 @@ void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
break;
default:
- done_callback.Run(false);
+ std::move(done_callback).Run(false);
return;
}
@@ -427,8 +428,10 @@ void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
fit::bind_member(this, &FuchsiaVideoDecoder::OnInputConstraints);
codec_.events().OnFreeInputPacket =
fit::bind_member(this, &FuchsiaVideoDecoder::OnFreeInputPacket);
- codec_.events().OnOutputConfig =
- fit::bind_member(this, &FuchsiaVideoDecoder::OnOutputConfig);
+ codec_.events().OnOutputConstraints =
+ fit::bind_member(this, &FuchsiaVideoDecoder::OnOutputConstraints);
+ codec_.events().OnOutputFormat =
+ fit::bind_member(this, &FuchsiaVideoDecoder::OnOutputFormat);
codec_.events().OnOutputPacket =
fit::bind_member(this, &FuchsiaVideoDecoder::OnOutputPacket);
codec_.events().OnOutputEndOfStream =
@@ -436,11 +439,11 @@ void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
codec_->EnableOnStreamFailed();
- done_callback.Run(true);
+ std::move(done_callback).Run(true);
}
void FuchsiaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK_LT(static_cast<int>(pending_decodes_.size()) + num_used_input_buffers_,
GetMaxDecodeRequests());
@@ -448,15 +451,16 @@ void FuchsiaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
// Post the callback to the current sequence as DecoderStream doesn't expect
// Decode() to complete synchronously.
base::SequencedTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(decode_cb, DecodeStatus::DECODE_ERROR));
+ FROM_HERE,
+ base::BindOnce(std::move(decode_cb), DecodeStatus::DECODE_ERROR));
return;
}
- pending_decodes_.push_back(PendingDecode(buffer, decode_cb));
+ pending_decodes_.push_back(PendingDecode(buffer, std::move(decode_cb)));
PumpInput();
}
-void FuchsiaVideoDecoder::Reset(const base::Closure& closure) {
+void FuchsiaVideoDecoder::Reset(base::OnceClosure closure) {
// Call DecodeCB(ABORTED) for all active decode requests.
for (auto& buffer : input_buffers_) {
buffer.CallDecodeCallbackIfAny(DecodeStatus::ABORTED);
@@ -473,7 +477,7 @@ void FuchsiaVideoDecoder::Reset(const base::Closure& closure) {
active_stream_ = false;
}
- BindToCurrentLoop(closure).Run();
+ BindToCurrentLoop(std::move(closure)).Run();
}
bool FuchsiaVideoDecoder::NeedsBitstreamConversion() const {
@@ -541,43 +545,58 @@ void FuchsiaVideoDecoder::OnFreeInputPacket(
PumpInput();
}
-void FuchsiaVideoDecoder::OnOutputConfig(
- fuchsia::media::StreamOutputConfig output_config) {
- if (!output_config.has_stream_lifetime_ordinal() ||
- !output_config.has_format_details()) {
- DLOG(ERROR) << "Received OnOutputConfig() with missing required fields.";
+void FuchsiaVideoDecoder::OnOutputConstraints(
+ fuchsia::media::StreamOutputConstraints output_constraints) {
+ if (!output_constraints.has_stream_lifetime_ordinal()) {
+ DLOG(ERROR) << "Received OnOutputConstraints() with missing required "
+ "fields.";
OnError();
return;
}
- if (output_config.stream_lifetime_ordinal() != stream_lifetime_ordinal_) {
+ if (output_constraints.stream_lifetime_ordinal() !=
+ stream_lifetime_ordinal_) {
return;
}
- auto* format = output_config.mutable_format_details();
-
- if (!format->has_domain() || !format->domain().is_video() ||
- !format->domain().video().is_uncompressed()) {
- DLOG(ERROR) << "Received OnOutputConfig() with invalid format.";
- OnError();
- return;
- }
-
- if (output_config.has_buffer_constraints_action_required() &&
- output_config.buffer_constraints_action_required()) {
- if (!output_config.has_buffer_constraints()) {
- DLOG(ERROR) << "Received OnOutputConfig() which requires buffer "
+ if (output_constraints.has_buffer_constraints_action_required() &&
+ output_constraints.buffer_constraints_action_required()) {
+ if (!output_constraints.has_buffer_constraints()) {
+ DLOG(ERROR) << "Received OnOutputConstraints() which requires buffer "
"constraints action, but without buffer constraints.";
OnError();
return;
}
if (!InitializeOutputBuffers(
- std::move(*output_config.mutable_buffer_constraints()))) {
+ std::move(*output_constraints.mutable_buffer_constraints()))) {
DLOG(ERROR) << "Failed to initialize output buffers.";
OnError();
return;
}
}
+}
+
+void FuchsiaVideoDecoder::OnOutputFormat(
+ fuchsia::media::StreamOutputFormat output_format) {
+ if (!output_format.has_stream_lifetime_ordinal() ||
+ !output_format.has_format_details()) {
+ DLOG(ERROR) << "Received OnOutputFormat() with missing required fields.";
+ OnError();
+ return;
+ }
+
+ if (output_format.stream_lifetime_ordinal() != stream_lifetime_ordinal_) {
+ return;
+ }
+
+ auto* format = output_format.mutable_format_details();
+
+ if (!format->has_domain() || !format->domain().is_video() ||
+ !format->domain().video().is_uncompressed()) {
+ DLOG(ERROR) << "Received OnOutputFormat() with invalid format.";
+ OnError();
+ return;
+ }
output_format_ = std::move(format->mutable_domain()->video().uncompressed());
}
@@ -587,7 +606,8 @@ void FuchsiaVideoDecoder::OnOutputPacket(fuchsia::media::Packet output_packet,
bool error_detected_during) {
if (!output_packet.has_header() ||
!output_packet.header().has_buffer_lifetime_ordinal() ||
- !output_packet.header().has_packet_index()) {
+ !output_packet.header().has_packet_index() ||
+ !output_packet.has_buffer_index()) {
DLOG(ERROR) << "Received OnOutputPacket() with missing required fields.";
OnError();
return;
@@ -648,8 +668,11 @@ void FuchsiaVideoDecoder::OnOutputPacket(fuchsia::media::Packet output_packet,
}
auto packet_index = output_packet.header().packet_index();
- auto& buffer = output_buffers_[packet_index];
+ auto buffer_index = output_packet.buffer_index();
+ auto& buffer = output_buffers_[buffer_index];
+ // We're not using single buffer mode, so packet count will be equal to buffer
+ // count.
DCHECK_LT(num_used_output_buffers_, static_cast<int>(output_buffers_.size()));
num_used_output_buffers_++;
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc b/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
index 4e6adad56b4..14568a6640d 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
+++ b/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
@@ -41,9 +41,9 @@ class FuchsiaVideoDecoderTest : public testing::Test {
return init_cb_result;
}
- void OnVideoFrame(const scoped_refptr<VideoFrame>& frame) {
+ void OnVideoFrame(scoped_refptr<VideoFrame> frame) {
num_output_frames_++;
- output_frames_.push_back(frame);
+ output_frames_.push_back(std::move(frame));
while (output_frames_.size() > frames_to_keep_) {
output_frames_.pop_front();
}
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 72088c75a66..f057b8e2af6 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -56,9 +56,8 @@ enum { kMaxInFlightDecodes = 4 };
enum { kBufferCountBeforeGC = 1024 };
struct GpuVideoDecoder::PendingDecoderBuffer {
- PendingDecoderBuffer(std::unique_ptr<base::SharedMemory> s,
- const DecodeCB& done_cb)
- : shared_memory(std::move(s)), done_cb(done_cb) {}
+ PendingDecoderBuffer(std::unique_ptr<base::SharedMemory> s, DecodeCB done_cb)
+ : shared_memory(std::move(s)), done_cb(std::move(done_cb)) {}
std::unique_ptr<base::SharedMemory> shared_memory;
DecodeCB done_cb;
};
@@ -101,24 +100,26 @@ GpuVideoDecoder::GpuVideoDecoder(
this, "media::GpuVideoDecoder", base::ThreadTaskRunnerHandle::Get());
}
-void GpuVideoDecoder::Reset(const base::Closure& closure) {
+void GpuVideoDecoder::Reset(base::OnceClosure closure) {
DVLOG(3) << "Reset()";
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
if (state_ == kDrainingDecoder) {
base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(&GpuVideoDecoder::Reset,
- weak_factory_.GetWeakPtr(), closure));
+ FROM_HERE,
+ base::BindOnce(&GpuVideoDecoder::Reset, weak_factory_.GetWeakPtr(),
+ std::move(closure)));
return;
}
if (!vda_) {
- base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, closure);
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ std::move(closure));
return;
}
DCHECK(!pending_reset_cb_);
- pending_reset_cb_ = BindToCurrentLoop(closure);
+ pending_reset_cb_ = BindToCurrentLoop(std::move(closure));
vda_->Reset();
}
@@ -136,7 +137,7 @@ static bool IsCodedSizeSupported(const gfx::Size& coded_size,
// UMA stat reported because the UMA_HISTOGRAM_ENUMERATION API requires a
// callsite to always be called with the same stat name (can't parameterize it).
static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
- const VideoDecoder::InitCB& cb,
+ VideoDecoder::InitCB cb,
MediaLog* media_log,
bool success) {
// TODO(xhwang): Report |success| directly.
@@ -149,7 +150,7 @@ static void ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB(
"Media.OriginUrl.GpuVideoDecoderInitFailure");
}
- cb.Run(success);
+ std::move(cb).Run(success);
}
bool GpuVideoDecoder::IsPlatformDecoder() const {
@@ -163,7 +164,7 @@ std::string GpuVideoDecoder::GetDisplayName() const {
void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DVLOG(3) << "Initialize()";
@@ -171,8 +172,8 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(config.IsValidConfig());
InitCB bound_init_cb =
- base::Bind(&ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB,
- BindToCurrentLoop(init_cb), media_log_);
+ base::BindOnce(&ReportGpuVideoDecoderInitializeStatusToUMAAndRunCB,
+ BindToCurrentLoop(std::move(init_cb)), media_log_);
bool previously_initialized = config_.IsValidConfig();
DVLOG(1) << (previously_initialized ? "Reinitializing" : "Initializing")
@@ -182,14 +183,14 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
if (encryption_mode != EncryptionScheme::CIPHER_MODE_UNENCRYPTED &&
encryption_mode != EncryptionScheme::CIPHER_MODE_AES_CTR) {
DVLOG(1) << "VDAs only support clear or cenc encrypted streams.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
// Disallow codec changes between configuration changes.
if (previously_initialized && config_.codec() != config.codec()) {
DVLOG(1) << "Codec changed, cannot reinitialize.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -197,7 +198,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
// hardware decoder which supports alpha formats.
if (config.format() == PIXEL_FORMAT_I420A) {
DVLOG(1) << "Alpha transparency formats are not supported.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -209,7 +210,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
VideoDecodeAccelerator::Capabilities::SUPPORTS_ENCRYPTED_STREAMS;
if (config.is_encrypted() && (!cdm_context || !supports_encrypted_streams)) {
DVLOG(1) << "Encrypted stream not supported.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -219,7 +220,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
<< ", unsupported coded size " << config.coded_size().ToString()
<< ", or accelerator should only be used for encrypted content. "
<< " is_encrypted: " << (config.is_encrypted() ? "yes." : "no.");
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -258,7 +259,7 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
if (config.is_encrypted() && !supports_deferred_initialization_) {
DVLOG(1) << __func__
<< " Encrypted stream requires deferred initialialization.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -268,14 +269,14 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Reinitialization with a different config (but same codec and profile).
// VDA should handle it by detecting this in-stream by itself,
// no need to notify it.
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
return;
}
vda_ = factories_->CreateVideoDecodeAccelerator();
if (!vda_) {
DVLOG(1) << "Failed to create a VDA.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -284,11 +285,11 @@ void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
if (config.is_encrypted() && cdm_id_ == CdmContext::kInvalidCdmId) {
DVLOG(1) << "CDM ID not available.";
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
- init_cb_ = bound_init_cb;
+ init_cb_ = std::move(bound_init_cb);
const bool supports_external_output_surface = !!(
capabilities.flags &
@@ -413,16 +414,14 @@ void GpuVideoDecoder::DestroyVDA() {
}
void GpuVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DCHECK(!pending_reset_cb_);
DVLOG(3) << __func__ << " " << buffer->AsHumanReadableString();
- DecodeCB bound_decode_cb = BindToCurrentLoop(decode_cb);
-
if (state_ == kError || !vda_) {
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ BindToCurrentLoop(std::move(decode_cb)).Run(DecodeStatus::DECODE_ERROR);
return;
}
@@ -443,7 +442,7 @@ void GpuVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
if (buffer->end_of_stream()) {
DVLOG(3) << __func__ << " Initiating Flush for EOS.";
state_ = kDrainingDecoder;
- eos_decode_cb_ = bound_decode_cb;
+ eos_decode_cb_ = BindToCurrentLoop(std::move(decode_cb));
vda_->Flush();
return;
}
@@ -451,16 +450,16 @@ void GpuVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
size_t size = buffer->data_size();
auto shared_memory = GetSharedMemory(size);
if (!shared_memory) {
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ BindToCurrentLoop(std::move(decode_cb)).Run(DecodeStatus::DECODE_ERROR);
return;
}
memcpy(shared_memory->memory(), buffer->data(), size);
// AndroidVideoDecodeAccelerator needs the timestamp to output frames in
// presentation order.
- BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_,
- shared_memory->handle(), size, 0,
- buffer->timestamp());
+ BitstreamBuffer bitstream_buffer(
+ next_bitstream_buffer_id_, shared_memory->handle(), false /* read_only */,
+ size, 0, buffer->timestamp());
if (buffer->decrypt_config()) {
bitstream_buffer.SetDecryptionSettings(
@@ -476,11 +475,11 @@ void GpuVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
bitstream_buffers_in_decoder_.emplace(
bitstream_buffer.id(),
- PendingDecoderBuffer(std::move(shared_memory), decode_cb));
+ PendingDecoderBuffer(std::move(shared_memory), std::move(decode_cb)));
DCHECK_LE(static_cast<int>(bitstream_buffers_in_decoder_.size()),
kMaxInFlightDecodes);
- vda_->Decode(bitstream_buffer);
+ vda_->Decode(std::move(bitstream_buffer));
}
void GpuVideoDecoder::RecordBufferData(const BitstreamBuffer& bitstream_buffer,
@@ -707,7 +706,7 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
DeliverFrame(frame);
}
-void GpuVideoDecoder::DeliverFrame(const scoped_refptr<VideoFrame>& frame) {
+void GpuVideoDecoder::DeliverFrame(scoped_refptr<VideoFrame> frame) {
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
// During a pending vda->Reset(), we don't accumulate frames. Drop it on the
@@ -717,7 +716,7 @@ void GpuVideoDecoder::DeliverFrame(const scoped_refptr<VideoFrame>& frame) {
frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
- output_cb_.Run(frame);
+ output_cb_.Run(std::move(frame));
}
// static
@@ -817,8 +816,8 @@ void GpuVideoDecoder::NotifyEndOfBitstreamBuffer(int32_t id) {
}
PutSharedMemory(std::move(it->second.shared_memory), id);
- it->second.done_cb.Run(state_ == kError ? DecodeStatus::DECODE_ERROR
- : DecodeStatus::OK);
+ std::move(it->second.done_cb)
+ .Run(state_ == kError ? DecodeStatus::DECODE_ERROR : DecodeStatus::OK);
bitstream_buffers_in_decoder_.erase(it);
}
@@ -839,10 +838,8 @@ GpuVideoDecoder::~GpuVideoDecoder() {
std::move(request_overlay_info_cb_).Run(false, ProvideOverlayInfoCB());
}
- for (auto it = bitstream_buffers_in_decoder_.begin();
- it != bitstream_buffers_in_decoder_.end(); ++it) {
- it->second.done_cb.Run(DecodeStatus::ABORTED);
- }
+ for (auto& pair : bitstream_buffers_in_decoder_)
+ std::move(pair.second.done_cb).Run(DecodeStatus::ABORTED);
bitstream_buffers_in_decoder_.clear();
if (pending_reset_cb_)
@@ -888,7 +885,7 @@ void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
// won't be another decode request to report the error.
if (!bitstream_buffers_in_decoder_.empty()) {
auto it = bitstream_buffers_in_decoder_.begin();
- it->second.done_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(it->second.done_cb).Run(DecodeStatus::DECODE_ERROR);
bitstream_buffers_in_decoder_.erase(it);
}
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index 1f2ae7f0e2f..ef04bf9ad64 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -61,12 +61,11 @@ class MEDIA_EXPORT GpuVideoDecoder
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
@@ -104,7 +103,7 @@ class MEDIA_EXPORT GpuVideoDecoder
typedef std::map<int32_t, PictureBuffer> PictureBufferMap;
- void DeliverFrame(const scoped_refptr<VideoFrame>& frame);
+ void DeliverFrame(scoped_refptr<VideoFrame> frame);
// Static method is to allow it to run even after GVD is deleted.
static void ReleaseMailbox(base::WeakPtr<GpuVideoDecoder> decoder,
@@ -189,7 +188,7 @@ class MEDIA_EXPORT GpuVideoDecoder
DecodeCB eos_decode_cb_;
// Not null only during reset.
- base::Closure pending_reset_cb_;
+ base::OnceClosure pending_reset_cb_;
State state_;
diff --git a/chromium/media/filters/in_memory_url_protocol.cc b/chromium/media/filters/in_memory_url_protocol.cc
index 4a484f1bbd4..6f75bb9a251 100644
--- a/chromium/media/filters/in_memory_url_protocol.cc
+++ b/chromium/media/filters/in_memory_url_protocol.cc
@@ -19,10 +19,17 @@ InMemoryUrlProtocol::InMemoryUrlProtocol(const uint8_t* data,
InMemoryUrlProtocol::~InMemoryUrlProtocol() = default;
int InMemoryUrlProtocol::Read(int size, uint8_t* data) {
+ // Not sure this can happen, but it's unclear from the ffmpeg code, so guard
+ // against it.
if (size < 0)
return AVERROR(EIO);
+ if (!size)
+ return 0;
+
+ const int64_t available_bytes = size_ - position_;
+ if (available_bytes <= 0)
+ return AVERROR_EOF;
- int64_t available_bytes = size_ - position_;
if (size > available_bytes)
size = available_bytes;
diff --git a/chromium/media/filters/in_memory_url_protocol_unittest.cc b/chromium/media/filters/in_memory_url_protocol_unittest.cc
index 3368d5fa1ee..212bb9120b1 100644
--- a/chromium/media/filters/in_memory_url_protocol_unittest.cc
+++ b/chromium/media/filters/in_memory_url_protocol_unittest.cc
@@ -43,7 +43,7 @@ TEST(InMemoryUrlProtocolTest, SetPosition) {
uint8_t out;
EXPECT_TRUE(protocol.SetPosition(sizeof(kData)));
- EXPECT_EQ(0, protocol.Read(1, &out));
+ EXPECT_EQ(AVERROR_EOF, protocol.Read(1, &out));
int i = sizeof(kData) / 2;
EXPECT_TRUE(protocol.SetPosition(i));
diff --git a/chromium/media/filters/offloading_video_decoder.cc b/chromium/media/filters/offloading_video_decoder.cc
index c923c25a74e..944e1cd4330 100644
--- a/chromium/media/filters/offloading_video_decoder.cc
+++ b/chromium/media/filters/offloading_video_decoder.cc
@@ -27,23 +27,23 @@ class CancellationHelper {
void Cancel() { cancellation_flag_->Set(); }
void Decode(scoped_refptr<DecoderBuffer> buffer,
- const VideoDecoder::DecodeCB& decode_cb) {
+ VideoDecoder::DecodeCB decode_cb) {
if (cancellation_flag_->IsSet()) {
- decode_cb.Run(DecodeStatus::ABORTED);
+ std::move(decode_cb).Run(DecodeStatus::ABORTED);
return;
}
- decoder_->Decode(std::move(buffer), decode_cb);
+ decoder_->Decode(std::move(buffer), std::move(decode_cb));
}
- void Reset(const base::Closure& reset_cb) {
+ void Reset(base::OnceClosure reset_cb) {
// OffloadableVideoDecoders are required to have a synchronous Reset(), so
// we don't need to wait for the Reset to complete. Despite this, we don't
// want to run |reset_cb| before we've reset the cancellation flag or the
// client may end up issuing another Reset() before this code runs.
decoder_->Reset(base::DoNothing());
cancellation_flag_.reset(new base::AtomicFlag());
- reset_cb.Run();
+ std::move(reset_cb).Run();
}
OffloadableVideoDecoder* decoder() const { return decoder_.get(); }
@@ -83,7 +83,7 @@ std::string OffloadingVideoDecoder::GetDisplayName() const {
void OffloadingVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
@@ -109,7 +109,8 @@ void OffloadingVideoDecoder::Initialize(const VideoDecoderConfig& config,
// possible for this class to be destroyed during Initialize().
base::BindOnce(&OffloadingVideoDecoder::Initialize,
weak_factory_.GetWeakPtr(), config, low_delay,
- cdm_context, init_cb, output_cb, waiting_cb));
+ cdm_context, std::move(init_cb), output_cb,
+ waiting_cb));
return;
}
@@ -124,14 +125,15 @@ void OffloadingVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Offloaded decoders expect asynchronous execution of callbacks; even if we
// aren't currently using the offload thread.
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
+ InitCB bound_init_cb = BindToCurrentLoop(std::move(init_cb));
OutputCB bound_output_cb = BindToCurrentLoop(output_cb);
// If we're not offloading just pass through to the wrapped decoder.
if (disable_offloading) {
offload_task_runner_ = nullptr;
helper_->decoder()->Initialize(config, low_delay, cdm_context,
- bound_init_cb, bound_output_cb, waiting_cb);
+ std::move(bound_init_cb), bound_output_cb,
+ waiting_cb);
return;
}
@@ -144,39 +146,40 @@ void OffloadingVideoDecoder::Initialize(const VideoDecoderConfig& config,
FROM_HERE,
base::BindOnce(&OffloadableVideoDecoder::Initialize,
base::Unretained(helper_->decoder()), config, low_delay,
- cdm_context, bound_init_cb, bound_output_cb, waiting_cb));
+ cdm_context, std::move(bound_init_cb), bound_output_cb,
+ waiting_cb));
}
void OffloadingVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(buffer);
DCHECK(decode_cb);
- DecodeCB bound_decode_cb = BindToCurrentLoop(decode_cb);
+ DecodeCB bound_decode_cb = BindToCurrentLoop(std::move(decode_cb));
if (!offload_task_runner_) {
- helper_->decoder()->Decode(std::move(buffer), bound_decode_cb);
+ helper_->decoder()->Decode(std::move(buffer), std::move(bound_decode_cb));
return;
}
offload_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&CancellationHelper::Decode,
base::Unretained(helper_.get()),
- std::move(buffer), bound_decode_cb));
+ std::move(buffer), std::move(bound_decode_cb)));
}
-void OffloadingVideoDecoder::Reset(const base::Closure& reset_cb) {
+void OffloadingVideoDecoder::Reset(base::OnceClosure reset_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- base::Closure bound_reset_cb = BindToCurrentLoop(reset_cb);
+ base::OnceClosure bound_reset_cb = BindToCurrentLoop(std::move(reset_cb));
if (!offload_task_runner_) {
- helper_->Reset(bound_reset_cb);
+ helper_->Reset(std::move(bound_reset_cb));
} else {
helper_->Cancel();
offload_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&CancellationHelper::Reset,
- base::Unretained(helper_.get()), bound_reset_cb));
+ FROM_HERE, base::BindOnce(&CancellationHelper::Reset,
+ base::Unretained(helper_.get()),
+ std::move(bound_reset_cb)));
}
}
diff --git a/chromium/media/filters/offloading_video_decoder.h b/chromium/media/filters/offloading_video_decoder.h
index 88ab010bec9..de193178691 100644
--- a/chromium/media/filters/offloading_video_decoder.h
+++ b/chromium/media/filters/offloading_video_decoder.h
@@ -92,12 +92,11 @@ class MEDIA_EXPORT OffloadingVideoDecoder : public VideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& reset_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
int GetMaxDecodeRequests() const override;
private:
diff --git a/chromium/media/filters/offloading_video_decoder_unittest.cc b/chromium/media/filters/offloading_video_decoder_unittest.cc
index a79f524df18..f672a059ae6 100644
--- a/chromium/media/filters/offloading_video_decoder_unittest.cc
+++ b/chromium/media/filters/offloading_video_decoder_unittest.cc
@@ -36,16 +36,27 @@ class MockOffloadableVideoDecoder : public OffloadableVideoDecoder {
std::string GetDisplayName() const override {
return "MockOffloadableVideoDecoder";
}
- MOCK_METHOD6(Initialize,
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ InitCB init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override {
+ Initialize_(config, low_delay, cdm_context, init_cb, output_cb, waiting_cb);
+ }
+ MOCK_METHOD6(Initialize_,
void(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB& init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb));
- MOCK_METHOD2(Decode,
- void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
- MOCK_METHOD1(Reset, void(const base::Closure&));
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB cb) override {
+ Decode_(std::move(buffer), cb);
+ }
+ MOCK_METHOD2(Decode_, void(scoped_refptr<DecoderBuffer> buffer, DecodeCB&));
+ void Reset(base::OnceClosure cb) override { Reset_(cb); }
+ MOCK_METHOD1(Reset_, void(base::OnceClosure&));
MOCK_METHOD0(Detach, void(void));
};
@@ -53,7 +64,8 @@ class OffloadingVideoDecoderTest : public testing::Test {
public:
OffloadingVideoDecoderTest()
: task_env_(base::test::ScopedTaskEnvironment::MainThreadType::DEFAULT,
- base::test::ScopedTaskEnvironment::ExecutionMode::QUEUED) {}
+ base::test::ScopedTaskEnvironment::ThreadPoolExecutionMode::
+ QUEUED) {}
void CreateWrapper(int offload_width, VideoCodec codec) {
decoder_ = new testing::StrictMock<MockOffloadableVideoDecoder>();
@@ -101,26 +113,26 @@ class OffloadingVideoDecoderTest : public testing::Test {
// Verify methods are called on the current thread since the offload codec
// requirement is not satisfied.
VideoDecoder::OutputCB output_cb;
- EXPECT_CALL(*decoder_, Initialize(_, false, nullptr, _, _, _))
+ EXPECT_CALL(*decoder_, Initialize_(_, false, nullptr, _, _, _))
.WillOnce(DoAll(VerifyOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<3>(true), SaveArg<4>(&output_cb)));
+ RunOnceCallback<3>(true), SaveArg<4>(&output_cb)));
offloading_decoder_->Initialize(config, false, nullptr, ExpectInitCB(true),
ExpectOutputCB(), base::NullCallback());
task_env_.RunUntilIdle();
// Verify decode works and is called on the right thread.
- EXPECT_CALL(*decoder_, Decode(_, _))
+ EXPECT_CALL(*decoder_, Decode_(_, _))
.WillOnce(DoAll(VerifyOn(task_env_.GetMainThreadTaskRunner()),
RunClosure(base::Bind(output_cb, nullptr)),
- RunCallback<1>(DecodeStatus::OK)));
+ RunOnceCallback<1>(DecodeStatus::OK)));
offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(),
ExpectDecodeCB(DecodeStatus::OK));
task_env_.RunUntilIdle();
// Reset so we can call Initialize() again.
- EXPECT_CALL(*decoder_, Reset(_))
+ EXPECT_CALL(*decoder_, Reset_(_))
.WillOnce(DoAll(VerifyOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<0>()));
+ RunOnceCallback<0>()));
offloading_decoder_->Reset(ExpectResetCB());
task_env_.RunUntilIdle();
}
@@ -142,9 +154,9 @@ class OffloadingVideoDecoderTest : public testing::Test {
}
offloading_decoder_->Initialize(config, false, nullptr, ExpectInitCB(true),
ExpectOutputCB(), base::NullCallback());
- EXPECT_CALL(*decoder_, Initialize(_, false, nullptr, _, _, _))
+ EXPECT_CALL(*decoder_, Initialize_(_, false, nullptr, _, _, _))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<3>(true), SaveArg<4>(&output_cb)));
+ RunOnceCallback<3>(true), SaveArg<4>(&output_cb)));
task_env_.RunUntilIdle();
// When offloading decodes should be parallelized.
@@ -153,22 +165,22 @@ class OffloadingVideoDecoderTest : public testing::Test {
// Verify decode works and is called on the right thread.
offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(),
ExpectDecodeCB(DecodeStatus::OK));
- EXPECT_CALL(*decoder_, Decode(_, _))
+ EXPECT_CALL(*decoder_, Decode_(_, _))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
RunClosure(base::Bind(output_cb, nullptr)),
- RunCallback<1>(DecodeStatus::OK)));
+ RunOnceCallback<1>(DecodeStatus::OK)));
task_env_.RunUntilIdle();
// Reset so we can call Initialize() again.
offloading_decoder_->Reset(ExpectResetCB());
- EXPECT_CALL(*decoder_, Reset(_))
+ EXPECT_CALL(*decoder_, Reset_(_))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<0>()));
+ RunOnceCallback<0>()));
task_env_.RunUntilIdle();
}
MOCK_METHOD1(InitDone, void(bool));
- MOCK_METHOD1(OutputDone, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(OutputDone, void(scoped_refptr<VideoFrame>));
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
MOCK_METHOD0(ResetDone, void(void));
@@ -226,9 +238,9 @@ TEST_F(OffloadingVideoDecoderTest, OffloadingAfterNoOffloading) {
base::NullCallback());
EXPECT_CALL(*decoder_, Detach())
.WillOnce(VerifyNotOn(task_env_.GetMainThreadTaskRunner()));
- EXPECT_CALL(*decoder_, Initialize(_, false, nullptr, _, _, _))
+ EXPECT_CALL(*decoder_, Initialize_(_, false, nullptr, _, _, _))
.WillOnce(DoAll(VerifyOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<3>(true), SaveArg<4>(&output_cb)));
+ RunOnceCallback<3>(true), SaveArg<4>(&output_cb)));
task_env_.RunUntilIdle();
}
@@ -253,25 +265,29 @@ TEST_F(OffloadingVideoDecoderTest, ParallelizedOffloading) {
base::BindRepeating(&OffloadingVideoDecoderTest::OutputDone,
base::Unretained(this)),
base::NullCallback());
- EXPECT_CALL(*decoder_, Initialize(_, false, nullptr, _, _, _))
+ EXPECT_CALL(*decoder_, Initialize_(_, false, nullptr, _, _, _))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<3>(true), SaveArg<4>(&output_cb)));
+ RunOnceCallback<3>(true), SaveArg<4>(&output_cb)));
task_env_.RunUntilIdle();
// When offloading decodes should be parallelized.
EXPECT_GT(offloading_decoder_->GetMaxDecodeRequests(), 1);
// Verify decode works and is called on the right thread.
- VideoDecoder::DecodeCB decode_cb = base::BindRepeating(
- &OffloadingVideoDecoderTest::DecodeDone, base::Unretained(this));
- offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(), decode_cb);
- offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(), decode_cb);
-
- EXPECT_CALL(*decoder_, Decode(_, _))
+ offloading_decoder_->Decode(
+ DecoderBuffer::CreateEOSBuffer(),
+ base::BindOnce(&OffloadingVideoDecoderTest::DecodeDone,
+ base::Unretained(this)));
+ offloading_decoder_->Decode(
+ DecoderBuffer::CreateEOSBuffer(),
+ base::BindOnce(&OffloadingVideoDecoderTest::DecodeDone,
+ base::Unretained(this)));
+
+ EXPECT_CALL(*decoder_, Decode_(_, _))
.Times(2)
.WillRepeatedly(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
RunClosure(base::BindRepeating(output_cb, nullptr)),
- RunCallback<1>(DecodeStatus::OK)));
+ RunOnceCallback<1>(DecodeStatus::OK)));
EXPECT_CALL(*this, DecodeDone(DecodeStatus::OK))
.Times(2)
.WillRepeatedly(VerifyOn(task_env_.GetMainThreadTaskRunner()));
@@ -282,9 +298,9 @@ TEST_F(OffloadingVideoDecoderTest, ParallelizedOffloading) {
// Reset so we can call Initialize() again.
offloading_decoder_->Reset(ExpectResetCB());
- EXPECT_CALL(*decoder_, Reset(_))
+ EXPECT_CALL(*decoder_, Reset_(_))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<0>()));
+ RunOnceCallback<0>()));
task_env_.RunUntilIdle();
}
@@ -300,28 +316,32 @@ TEST_F(OffloadingVideoDecoderTest, ParallelizedOffloadingResetAbortsDecodes) {
base::BindRepeating(&OffloadingVideoDecoderTest::OutputDone,
base::Unretained(this)),
base::NullCallback());
- EXPECT_CALL(*decoder_, Initialize(_, false, nullptr, _, _, _))
+ EXPECT_CALL(*decoder_, Initialize_(_, false, nullptr, _, _, _))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<3>(true), SaveArg<4>(&output_cb)));
+ RunOnceCallback<3>(true), SaveArg<4>(&output_cb)));
task_env_.RunUntilIdle();
// When offloading decodes should be parallelized.
EXPECT_GT(offloading_decoder_->GetMaxDecodeRequests(), 1);
// Verify decode works and is called on the right thread.
- VideoDecoder::DecodeCB decode_cb = base::BindRepeating(
- &OffloadingVideoDecoderTest::DecodeDone, base::Unretained(this));
- offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(), decode_cb);
- offloading_decoder_->Decode(DecoderBuffer::CreateEOSBuffer(), decode_cb);
-
- EXPECT_CALL(*decoder_, Decode(_, _)).Times(0);
+ offloading_decoder_->Decode(
+ DecoderBuffer::CreateEOSBuffer(),
+ base::BindOnce(&OffloadingVideoDecoderTest::DecodeDone,
+ base::Unretained(this)));
+ offloading_decoder_->Decode(
+ DecoderBuffer::CreateEOSBuffer(),
+ base::BindOnce(&OffloadingVideoDecoderTest::DecodeDone,
+ base::Unretained(this)));
+
+ EXPECT_CALL(*decoder_, Decode_(_, _)).Times(0);
EXPECT_CALL(*this, DecodeDone(DecodeStatus::ABORTED))
.Times(2)
.WillRepeatedly(VerifyOn(task_env_.GetMainThreadTaskRunner()));
offloading_decoder_->Reset(ExpectResetCB());
- EXPECT_CALL(*decoder_, Reset(_))
+ EXPECT_CALL(*decoder_, Reset_(_))
.WillOnce(DoAll(VerifyNotOn(task_env_.GetMainThreadTaskRunner()),
- RunCallback<0>()));
+ RunOnceClosure<0>()));
task_env_.RunUntilIdle();
}
diff --git a/chromium/media/filters/pipeline_controller.cc b/chromium/media/filters/pipeline_controller.cc
index bd05ebdc78a..5d429445ed7 100644
--- a/chromium/media/filters/pipeline_controller.cc
+++ b/chromium/media/filters/pipeline_controller.cc
@@ -177,6 +177,11 @@ void PipelineController::OnPipelineStatus(State expected_state,
if (state_ == State::PLAYING_OR_SUSPENDED) {
waiting_for_seek_ = false;
state_ = pipeline_->IsSuspended() ? State::SUSPENDED : State::PLAYING;
+
+ // It's possible for a Suspend() call to come in during startup. If we've
+ // completed a suspended startup, we should clear that now.
+ if (state_ == State::SUSPENDED)
+ pending_suspend_ = false;
}
if (state_ == State::PLAYING) {
@@ -188,12 +193,15 @@ void PipelineController::OnPipelineStatus(State expected_state,
// properly fixed.
if (old_state == State::RESUMING) {
DCHECK(!pipeline_->IsSuspended());
+ DCHECK(!pending_resume_);
+
resumed_cb_.Run();
}
}
if (state_ == State::SUSPENDED) {
DCHECK(pipeline_->IsSuspended());
+ DCHECK(!pending_suspend_);
// Warning: possibly reentrant. The state may change inside this callback.
// It must be safe to call Dispatch() twice in a row here.
@@ -219,7 +227,11 @@ void PipelineController::Dispatch() {
return;
}
- if (pending_resume_ && state_ == State::SUSPENDED) {
+ // In additional to the standard |pending_resume_| case, if we completed a
+ // suspended startup, but a Seek() came in, we need to resume the pipeline to
+ // complete the seek before calling |seeked_cb_|.
+ if ((pending_resume_ || (pending_startup_ && pending_seek_)) &&
+ state_ == State::SUSPENDED) {
// If there is a pending seek, resume to that time instead...
if (pending_seek_) {
seek_time_ = pending_seek_time_;
diff --git a/chromium/media/filters/pipeline_controller_unittest.cc b/chromium/media/filters/pipeline_controller_unittest.cc
index bbf3e237a58..56ba583965f 100644
--- a/chromium/media/filters/pipeline_controller_unittest.cc
+++ b/chromium/media/filters/pipeline_controller_unittest.cc
@@ -142,7 +142,7 @@ class PipelineControllerTest : public ::testing::Test, public Pipeline::Client {
// Pipeline::Client overrides
void OnError(PipelineStatus status) override { NOTREACHED(); }
void OnEnded() override {}
- void OnMetadata(PipelineMetadata metadata) override {}
+ void OnMetadata(const PipelineMetadata& metadata) override {}
void OnBufferingStateChange(BufferingState state) override {}
void OnDurationChange() override {}
void OnAddTextTrack(const TextTrackConfig& config,
@@ -184,6 +184,47 @@ TEST_F(PipelineControllerTest, Startup) {
EXPECT_TRUE(pipeline_controller_.IsStable());
}
+TEST_F(PipelineControllerTest, StartSuspendedSeekAndResume) {
+ EXPECT_FALSE(pipeline_controller_.IsStable());
+ PipelineStatusCB start_cb;
+ EXPECT_CALL(*pipeline_, Start(_, _, _, _, _)).WillOnce(SaveArg<4>(&start_cb));
+ pipeline_controller_.Start(Pipeline::StartType::kSuspendAfterMetadata,
+ &demuxer_, this, false, true);
+ Mock::VerifyAndClear(pipeline_);
+
+ // Initiate a seek before the pipeline completes suspended startup.
+ base::TimeDelta seek_time = base::TimeDelta::FromSeconds(5);
+ EXPECT_CALL(demuxer_, StartWaitingForSeek(seek_time));
+ pipeline_controller_.Seek(seek_time, true);
+ base::RunLoop().RunUntilIdle();
+ EXPECT_FALSE(was_seeked_);
+
+ PipelineStatusCB resume_cb;
+ EXPECT_CALL(*pipeline_, Resume(_, _, _))
+ .WillOnce(DoAll(SaveArg<2>(&resume_cb)));
+ EXPECT_CALL(*pipeline_, GetMediaTime())
+ .WillRepeatedly(Return(base::TimeDelta()));
+
+ EXPECT_CALL(*pipeline_, IsSuspended()).WillRepeatedly(Return(true));
+ EXPECT_FALSE(pipeline_controller_.IsStable());
+ Complete(start_cb);
+
+ EXPECT_FALSE(pipeline_controller_.IsStable());
+ EXPECT_FALSE(pipeline_controller_.IsPipelineSuspended());
+ EXPECT_FALSE(pipeline_controller_.IsSuspended());
+ Mock::VerifyAndClear(pipeline_);
+
+ EXPECT_CALL(*pipeline_, IsSuspended()).WillRepeatedly(Return(false));
+ Complete(resume_cb);
+ EXPECT_TRUE(was_seeked_);
+ was_seeked_ = false;
+
+ EXPECT_TRUE(pipeline_controller_.IsStable());
+ EXPECT_FALSE(pipeline_controller_.IsPipelineSuspended());
+ EXPECT_FALSE(pipeline_controller_.IsSuspended());
+ Mock::VerifyAndClear(pipeline_);
+}
+
TEST_F(PipelineControllerTest, StartSuspendedAndResume) {
EXPECT_FALSE(pipeline_controller_.IsStable());
PipelineStatusCB start_cb;
diff --git a/chromium/media/filters/source_buffer_range.cc b/chromium/media/filters/source_buffer_range.cc
index e1d454124cf..08a332675e9 100644
--- a/chromium/media/filters/source_buffer_range.cc
+++ b/chromium/media/filters/source_buffer_range.cc
@@ -4,29 +4,42 @@
#include "media/filters/source_buffer_range.h"
+#include <algorithm>
+#include <sstream>
+#include <string>
+
+#include "base/logging.h"
#include "media/base/timestamp_constants.h"
namespace media {
-// static
-bool SourceBufferRange::IsUncommonSameTimestampSequence(
- bool prev_is_keyframe,
- bool current_is_keyframe) {
- return current_is_keyframe && !prev_is_keyframe;
-}
-
SourceBufferRange::SourceBufferRange(
GapPolicy gap_policy,
+ const BufferQueue& new_buffers,
+ base::TimeDelta range_start_pts,
const InterbufferDistanceCB& interbuffer_distance_cb)
: gap_policy_(gap_policy),
next_buffer_index_(-1),
interbuffer_distance_cb_(interbuffer_distance_cb),
- size_in_bytes_(0) {
+ size_in_bytes_(0),
+ range_start_pts_(range_start_pts),
+ keyframe_map_index_base_(0) {
+ DVLOG(3) << __func__;
DCHECK(interbuffer_distance_cb);
+ CHECK(!new_buffers.empty());
+ DCHECK(new_buffers.front()->is_key_frame());
+ AppendBuffersToEnd(new_buffers, range_start_pts_);
}
SourceBufferRange::~SourceBufferRange() = default;
+void SourceBufferRange::DeleteAll(BufferQueue* deleted_buffers) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ TruncateAt(0u, deleted_buffers);
+}
+
void SourceBufferRange::SeekToStart() {
CHECK(!buffers_.empty());
next_buffer_index_ = 0;
@@ -62,18 +75,600 @@ void SourceBufferRange::ResetNextBufferPosition() {
next_buffer_index_ = -1;
}
-void SourceBufferRange::GetRangeEndTimesForTesting(
- base::TimeDelta* highest_pts,
- base::TimeDelta* end_time) const {
- if (highest_frame_) {
- *highest_pts = highest_frame_->timestamp();
- *end_time = *highest_pts + highest_frame_->duration();
- DCHECK_NE(*highest_pts, kNoTimestamp);
- DCHECK_NE(*end_time, kNoTimestamp);
- return;
+void SourceBufferRange::AppendRangeToEnd(const SourceBufferRange& range,
+ bool transfer_current_position) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(CanAppendRangeToEnd(range));
+ DCHECK(!buffers_.empty());
+
+ if (transfer_current_position && range.next_buffer_index_ >= 0)
+ next_buffer_index_ = range.next_buffer_index_ + buffers_.size();
+
+ AppendBuffersToEnd(range.buffers_,
+ NextRangeStartTimeForAppendRangeToEnd(range));
+}
+
+bool SourceBufferRange::CanAppendRangeToEnd(
+ const SourceBufferRange& range) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ return CanAppendBuffersToEnd(range.buffers_,
+ NextRangeStartTimeForAppendRangeToEnd(range));
+}
+
+void SourceBufferRange::AppendBuffersToEnd(
+ const BufferQueue& new_buffers,
+ base::TimeDelta new_buffers_group_start_pts) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ CHECK(buffers_.empty() ||
+ CanAppendBuffersToEnd(new_buffers, new_buffers_group_start_pts));
+
+ DCHECK(new_buffers_group_start_pts == kNoTimestamp ||
+ new_buffers.front()->is_key_frame())
+ << range_start_pts_ << ", " << new_buffers.front()->is_key_frame();
+
+ AdjustEstimatedDurationForNewAppend(new_buffers);
+
+ for (BufferQueue::const_iterator itr = new_buffers.begin();
+ itr != new_buffers.end(); ++itr) {
+ DCHECK((*itr)->timestamp() != kNoTimestamp);
+ DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
+
+ buffers_.push_back(*itr);
+ UpdateEndTime(*itr);
+ size_in_bytes_ += (*itr)->data_size();
+
+ if ((*itr)->is_key_frame()) {
+ keyframe_map_.insert(std::make_pair(
+ (*itr)->timestamp(), buffers_.size() - 1 + keyframe_map_index_base_));
+ }
+ }
+
+ DVLOG(4) << __func__ << " Result: " << ToStringForDebugging();
+}
+
+bool SourceBufferRange::AllowableAppendAfterEstimatedDuration(
+ const BufferQueue& buffers,
+ base::TimeDelta new_buffers_group_start_pts) const {
+ if (buffers_.empty() || !buffers_.back()->is_duration_estimated() ||
+ buffers.empty() || !buffers.front()->is_key_frame()) {
+ return false;
+ }
+
+ if (new_buffers_group_start_pts == kNoTimestamp) {
+ return GetBufferedEndTimestamp() == buffers.front()->timestamp();
+ }
+
+ return GetBufferedEndTimestamp() == new_buffers_group_start_pts;
+}
+
+bool SourceBufferRange::CanAppendBuffersToEnd(
+ const BufferQueue& buffers,
+ base::TimeDelta new_buffers_group_start_pts) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ if (new_buffers_group_start_pts == kNoTimestamp) {
+ return buffers.front()->is_key_frame()
+ ? (IsNextInPresentationSequence(buffers.front()->timestamp()) ||
+ AllowableAppendAfterEstimatedDuration(
+ buffers, new_buffers_group_start_pts))
+ : IsNextInDecodeSequence(buffers.front()->GetDecodeTimestamp());
+ }
+ CHECK(buffers.front()->is_key_frame());
+ DCHECK(new_buffers_group_start_pts >= GetEndTimestamp());
+ DCHECK(buffers.front()->timestamp() >= new_buffers_group_start_pts);
+ return IsNextInPresentationSequence(new_buffers_group_start_pts) ||
+ AllowableAppendAfterEstimatedDuration(buffers,
+ new_buffers_group_start_pts);
+}
+
+void SourceBufferRange::Seek(base::TimeDelta timestamp) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(CanSeekTo(timestamp));
+ DCHECK(!keyframe_map_.empty());
+
+ auto result = GetFirstKeyframeAtOrBefore(timestamp);
+ next_buffer_index_ = result->second - keyframe_map_index_base_;
+ CHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()))
+ << next_buffer_index_ << ", size = " << buffers_.size();
+}
+
+bool SourceBufferRange::CanSeekTo(base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ base::TimeDelta start_timestamp =
+ std::max(base::TimeDelta(), GetStartTimestamp() - GetFudgeRoom());
+ return !keyframe_map_.empty() && start_timestamp <= timestamp &&
+ timestamp < GetBufferedEndTimestamp();
+}
+
+int SourceBufferRange::GetConfigIdAtTime(base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(CanSeekTo(timestamp));
+ DCHECK(!keyframe_map_.empty());
+
+ auto result = GetFirstKeyframeAtOrBefore(timestamp);
+ CHECK(result != keyframe_map_.end());
+ size_t buffer_index = result->second - keyframe_map_index_base_;
+ CHECK_LT(buffer_index, buffers_.size())
+ << buffer_index << ", size = " << buffers_.size();
+
+ return buffers_[buffer_index]->GetConfigId();
+}
+
+bool SourceBufferRange::SameConfigThruRange(base::TimeDelta start,
+ base::TimeDelta end) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(CanSeekTo(start));
+ DCHECK(CanSeekTo(end));
+ DCHECK(start <= end);
+ DCHECK(!keyframe_map_.empty());
+
+ if (start == end)
+ return true;
+
+ auto result = GetFirstKeyframeAtOrBefore(start);
+ CHECK(result != keyframe_map_.end());
+ size_t buffer_index = result->second - keyframe_map_index_base_;
+ CHECK_LT(buffer_index, buffers_.size())
+ << buffer_index << ", size = " << buffers_.size();
+
+ int start_config = buffers_[buffer_index]->GetConfigId();
+ buffer_index++;
+ while (buffer_index < buffers_.size() &&
+ buffers_[buffer_index]->timestamp() <= end) {
+ if (buffers_[buffer_index]->GetConfigId() != start_config)
+ return false;
+ buffer_index++;
+ }
+
+ return true;
+}
+
+std::unique_ptr<SourceBufferRange> SourceBufferRange::SplitRange(
+ base::TimeDelta timestamp) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ CHECK(!buffers_.empty());
+
+ // Find the first keyframe at or after |timestamp|.
+ auto new_beginning_keyframe = GetFirstKeyframeAt(timestamp, false);
+
+ // If there is no keyframe at or after |timestamp|, we can't split the range.
+ if (new_beginning_keyframe == keyframe_map_.end())
+ return nullptr;
+
+ // Remove the data beginning at |keyframe_index| from |buffers_| and save it
+ // into |removed_buffers|.
+ int keyframe_index =
+ new_beginning_keyframe->second - keyframe_map_index_base_;
+ CHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
+ BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
+ BufferQueue removed_buffers(starting_point, buffers_.end());
+
+ base::TimeDelta new_range_start_pts =
+ std::max(timestamp, GetStartTimestamp());
+ DCHECK(new_range_start_pts <= removed_buffers.front()->timestamp());
+
+ keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
+ FreeBufferRange(starting_point, buffers_.end());
+ UpdateEndTimeUsingLastGOP();
+
+ // Create a new range with |removed_buffers|.
+ std::unique_ptr<SourceBufferRange> split_range =
+ std::make_unique<SourceBufferRange>(gap_policy_, removed_buffers,
+ new_range_start_pts,
+ interbuffer_distance_cb_);
+
+ // If the next buffer position is now in |split_range|, update the state of
+ // this range and |split_range| accordingly.
+ if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
+ split_range->next_buffer_index_ = next_buffer_index_ - keyframe_index;
+
+ int split_range_next_buffer_index = split_range->next_buffer_index_;
+ CHECK_GE(split_range_next_buffer_index, 0);
+ // Note that a SourceBufferRange's |next_buffer_index_| can be the index
+ // of a buffer one beyond what is currently in |buffers_|.
+ CHECK_LE(split_range_next_buffer_index,
+ static_cast<int>(split_range->buffers_.size()));
+
+ ResetNextBufferPosition();
+ }
+
+ return split_range;
+}
+
+bool SourceBufferRange::TruncateAt(base::TimeDelta timestamp,
+ BufferQueue* deleted_buffers,
+ bool is_exclusive) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ // Find the place in |buffers_| where we will begin deleting data, then
+ // truncate from there.
+ return TruncateAt(GetBufferIndexAt(timestamp, is_exclusive), deleted_buffers);
+}
+
+size_t SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ DCHECK(!FirstGOPContainsNextBufferPosition());
+ DCHECK(deleted_buffers);
+
+ int buffers_deleted = 0;
+ size_t total_bytes_deleted = 0;
+
+ KeyframeMap::const_iterator front = keyframe_map_.begin();
+ DCHECK(front != keyframe_map_.end());
+
+ // Delete the keyframe at the start of |keyframe_map_|.
+ keyframe_map_.erase(front);
+
+ // Now we need to delete all the buffers that depend on the keyframe we've
+ // just deleted.
+ int end_index = keyframe_map_.size() > 0
+ ? keyframe_map_.begin()->second - keyframe_map_index_base_
+ : buffers_.size();
+
+ // Delete buffers from the beginning of the buffered range up until (but not
+ // including) the next keyframe.
+ for (int i = 0; i < end_index; i++) {
+ size_t bytes_deleted = buffers_.front()->data_size();
+ DCHECK_GE(size_in_bytes_, bytes_deleted);
+ size_in_bytes_ -= bytes_deleted;
+ total_bytes_deleted += bytes_deleted;
+ deleted_buffers->push_back(buffers_.front());
+ buffers_.pop_front();
+ ++buffers_deleted;
+ }
+
+ // Update |keyframe_map_index_base_| to account for the deleted buffers.
+ keyframe_map_index_base_ += buffers_deleted;
+
+ if (next_buffer_index_ > -1) {
+ next_buffer_index_ -= buffers_deleted;
+ CHECK_GE(next_buffer_index_, 0)
+ << next_buffer_index_ << ", deleted " << buffers_deleted;
+ }
+
+ // Invalidate range start time if we've deleted the first buffer of the range.
+ if (buffers_deleted > 0) {
+ range_start_pts_ = kNoTimestamp;
+ // Reset the range end time tracking if there are no more buffers in the
+ // range.
+ if (buffers_.empty())
+ highest_frame_ = nullptr;
+ }
+
+ return total_bytes_deleted;
+}
+
+size_t SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ DCHECK(!LastGOPContainsNextBufferPosition());
+ DCHECK(deleted_buffers);
+
+ // Remove the last GOP's keyframe from the |keyframe_map_|.
+ KeyframeMap::const_iterator back = keyframe_map_.end();
+ DCHECK_GT(keyframe_map_.size(), 0u);
+ --back;
+
+ // The index of the first buffer in the last GOP is equal to the new size of
+ // |buffers_| after that GOP is deleted.
+ size_t goal_size = back->second - keyframe_map_index_base_;
+ keyframe_map_.erase(back);
+
+ size_t total_bytes_deleted = 0;
+ while (buffers_.size() != goal_size) {
+ size_t bytes_deleted = buffers_.back()->data_size();
+ DCHECK_GE(size_in_bytes_, bytes_deleted);
+ size_in_bytes_ -= bytes_deleted;
+ total_bytes_deleted += bytes_deleted;
+ // We're removing buffers from the back, so push each removed buffer to the
+ // front of |deleted_buffers| so that |deleted_buffers| are in nondecreasing
+ // order.
+ deleted_buffers->push_front(buffers_.back());
+ buffers_.pop_back();
+ }
+
+ UpdateEndTimeUsingLastGOP();
+
+ return total_bytes_deleted;
+}
+
+size_t SourceBufferRange::GetRemovalGOP(
+ base::TimeDelta start_timestamp,
+ base::TimeDelta end_timestamp,
+ size_t total_bytes_to_free,
+ base::TimeDelta* removal_end_timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ size_t bytes_removed = 0;
+
+ auto gop_itr = GetFirstKeyframeAt(start_timestamp, false);
+ if (gop_itr == keyframe_map_.end())
+ return 0;
+ int keyframe_index = gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::const_iterator buffer_itr = buffers_.begin() + keyframe_index;
+ auto gop_end = keyframe_map_.end();
+ if (end_timestamp < GetBufferedEndTimestamp())
+ gop_end = GetFirstKeyframeAtOrBefore(end_timestamp);
+
+ // Check if the removal range is within a GOP and skip the loop if so.
+ // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
+ auto gop_itr_prev = gop_itr;
+ if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
+ gop_end = gop_itr;
+
+ while (gop_itr != gop_end && bytes_removed < total_bytes_to_free) {
+ ++gop_itr;
+
+ size_t gop_size = 0;
+ int next_gop_index = gop_itr == keyframe_map_.end()
+ ? buffers_.size()
+ : gop_itr->second - keyframe_map_index_base_;
+ BufferQueue::const_iterator next_gop_start =
+ buffers_.begin() + next_gop_index;
+ for (; buffer_itr != next_gop_start; ++buffer_itr) {
+ gop_size += (*buffer_itr)->data_size();
+ }
+
+ bytes_removed += gop_size;
+ }
+ if (bytes_removed > 0) {
+ *removal_end_timestamp = gop_itr == keyframe_map_.end()
+ ? GetBufferedEndTimestamp()
+ : gop_itr->first;
+ }
+ return bytes_removed;
+}
+
+bool SourceBufferRange::FirstGOPEarlierThanMediaTime(
+ base::TimeDelta media_time) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ if (keyframe_map_.size() == 1u)
+ return (GetBufferedEndTimestamp() <= media_time);
+
+ auto second_gop = keyframe_map_.begin();
+ ++second_gop;
+ return second_gop->first <= media_time;
+}
+
+bool SourceBufferRange::FirstGOPContainsNextBufferPosition() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ if (!HasNextBufferPosition())
+ return false;
+
+ // If there is only one GOP, it must contain the next buffer position.
+ if (keyframe_map_.size() == 1u)
+ return true;
+
+ auto second_gop = keyframe_map_.begin();
+ ++second_gop;
+ return next_buffer_index_ < second_gop->second - keyframe_map_index_base_;
+}
+
+bool SourceBufferRange::LastGOPContainsNextBufferPosition() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ if (!HasNextBufferPosition())
+ return false;
+
+ // If there is only one GOP, it must contain the next buffer position.
+ if (keyframe_map_.size() == 1u)
+ return true;
+
+ auto last_gop = keyframe_map_.end();
+ --last_gop;
+ return last_gop->second - keyframe_map_index_base_ <= next_buffer_index_;
+}
+
+base::TimeDelta SourceBufferRange::GetNextTimestamp() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ CHECK(!buffers_.empty()) << next_buffer_index_;
+ CHECK(HasNextBufferPosition())
+ << next_buffer_index_ << ", size=" << buffers_.size();
+
+ if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
+ return kNoTimestamp;
+ }
+
+ return buffers_[next_buffer_index_]->timestamp();
+}
+
+base::TimeDelta SourceBufferRange::GetStartTimestamp() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ base::TimeDelta start_timestamp = range_start_pts_;
+ if (start_timestamp == kNoTimestamp)
+ start_timestamp = buffers_.front()->timestamp();
+ return start_timestamp;
+}
+
+base::TimeDelta SourceBufferRange::GetEndTimestamp() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ return highest_frame_->timestamp();
+}
+
+base::TimeDelta SourceBufferRange::GetBufferedEndTimestamp() const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ base::TimeDelta duration = highest_frame_->duration();
+
+ // FrameProcessor should protect against unknown buffer durations.
+ DCHECK_NE(duration, kNoTimestamp);
+
+ // Because media::Ranges<base::TimeDelta>::Add() ignores 0 duration ranges,
+ // report 1 microsecond for the last buffer's duration if it is a 0 duration
+ // buffer.
+ if (duration.is_zero())
+ duration = base::TimeDelta::FromMicroseconds(1);
+
+ return GetEndTimestamp() + duration;
+}
+
+bool SourceBufferRange::BelongsToRange(base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+
+ return (IsNextInPresentationSequence(timestamp) ||
+ (GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
+}
+
+base::TimeDelta SourceBufferRange::FindHighestBufferedTimestampAtOrBefore(
+ base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!buffers_.empty());
+ DCHECK(BelongsToRange(timestamp));
+
+ if (keyframe_map_.begin()->first > timestamp) {
+ // If the first keyframe in the range starts after |timestamp|, then
+ // return the range start time (which could be earlier due to coded frame
+ // group signalling.)
+ base::TimeDelta range_start = GetStartTimestamp();
+ DCHECK(timestamp >= range_start) << "BelongsToRange() semantics failed.";
+ return range_start;
}
- *highest_pts = *end_time = kNoTimestamp;
+ if (keyframe_map_.begin()->first == timestamp) {
+ return timestamp;
+ }
+
+ auto key_iter = GetFirstKeyframeAtOrBefore(timestamp);
+ DCHECK(key_iter != keyframe_map_.end())
+ << "BelongsToRange() semantics failed.";
+ DCHECK(key_iter->first <= timestamp);
+
+ // Scan forward in |buffers_| to find the highest frame with timestamp <=
+ // |timestamp|. Stop once a frame with timestamp > |timestamp| is encountered.
+ size_t key_index = key_iter->second - keyframe_map_index_base_;
+ SourceBufferRange::BufferQueue::const_iterator search_iter =
+ buffers_.begin() + key_index;
+ CHECK(search_iter != buffers_.end());
+ base::TimeDelta cur_frame_time = (*search_iter)->timestamp();
+ base::TimeDelta result = cur_frame_time;
+ while (true) {
+ result = std::max(result, cur_frame_time);
+ search_iter++;
+ if (search_iter == buffers_.end())
+ return result;
+ cur_frame_time = (*search_iter)->timestamp();
+ if (cur_frame_time > timestamp)
+ return result;
+ }
+
+ NOTREACHED();
+ return base::TimeDelta();
+}
+
+base::TimeDelta SourceBufferRange::NextKeyframeTimestamp(
+ base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!keyframe_map_.empty());
+
+ if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
+ return kNoTimestamp;
+
+ auto itr = GetFirstKeyframeAt(timestamp, false);
+ if (itr == keyframe_map_.end())
+ return kNoTimestamp;
+
+ // If the timestamp is inside the gap between the start of the coded frame
+ // group and the first buffer, then just pretend there is a keyframe at the
+ // specified timestamp.
+ if (itr == keyframe_map_.begin() && timestamp > range_start_pts_ &&
+ timestamp < itr->first) {
+ return timestamp;
+ }
+
+ return itr->first;
+}
+
+base::TimeDelta SourceBufferRange::KeyframeBeforeTimestamp(
+ base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ DCHECK(!keyframe_map_.empty());
+
+ if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
+ return kNoTimestamp;
+
+ return GetFirstKeyframeAtOrBefore(timestamp)->first;
+}
+
+bool SourceBufferRange::GetBuffersInRange(base::TimeDelta start,
+ base::TimeDelta end,
+ BufferQueue* buffers) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ // Find the nearest buffer with a timestamp <= start.
+ const base::TimeDelta first_timestamp = KeyframeBeforeTimestamp(start);
+ if (first_timestamp == kNoTimestamp)
+ return false;
+
+ // Find all buffers involved in the range.
+ const size_t previous_size = buffers->size();
+ for (BufferQueue::const_iterator it = GetBufferItrAt(first_timestamp, false);
+ it != buffers_.end(); ++it) {
+ scoped_refptr<StreamParserBuffer> buffer = *it;
+ // Buffers without duration are not supported, so bail if we encounter any.
+ if (buffer->duration() == kNoTimestamp ||
+ buffer->duration() <= base::TimeDelta()) {
+ return false;
+ }
+ if (buffer->timestamp() >= end)
+ break;
+
+ if (buffer->timestamp() + buffer->duration() <= start)
+ continue;
+
+ DCHECK(buffer->is_key_frame());
+ buffers->emplace_back(std::move(buffer));
+ }
+ return previous_size < buffers->size();
}
void SourceBufferRange::AdjustEstimatedDurationForNewAppend(
@@ -184,4 +779,186 @@ bool SourceBufferRange::IsNextInDecodeSequence(
decode_timestamp <= end + GetFudgeRoom())));
}
+base::TimeDelta SourceBufferRange::NextRangeStartTimeForAppendRangeToEnd(
+ const SourceBufferRange& range) const {
+ DCHECK(!buffers_.empty());
+ DCHECK(!range.buffers_.empty());
+
+ base::TimeDelta next_range_first_buffer_time =
+ range.buffers_.front()->timestamp();
+ base::TimeDelta this_range_end_time = GetEndTimestamp();
+ if (next_range_first_buffer_time < this_range_end_time)
+ return kNoTimestamp;
+
+ base::TimeDelta next_range_start_time = range.GetStartTimestamp();
+ DCHECK(next_range_start_time <= next_range_first_buffer_time);
+
+ if (next_range_start_time >= this_range_end_time)
+ return next_range_start_time;
+
+ return this_range_end_time;
+}
+
+size_t SourceBufferRange::GetBufferIndexAt(base::TimeDelta timestamp,
+ bool skip_given_timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ // Find the GOP containing |timestamp| (or trivial buffers_.size() if none
+ // contain |timestamp|).
+ auto gop_iter = GetFirstKeyframeAtOrBefore(timestamp);
+ if (gop_iter == keyframe_map_.end())
+ return buffers_.size();
+
+ // Then scan forward in this GOP in decode sequence for the first frame with
+ // PTS >= |timestamp| (or strictly > if |skip_given_timestamp| is true). If
+ // this GOP doesn't contain such a frame, returns the index of the keyframe of
+ // the next GOP (which could be the index of end() of |buffers_| if this was
+ // the last GOP in |buffers_|). We do linear scan of the GOP here because we
+ // don't know the DTS for the searched-for frame, and the PTS sequence within
+ // a GOP may not match the DTS-sorted sequence of frames within the GOP.
+ DCHECK_GT(buffers_.size(), 0u);
+ size_t search_index = gop_iter->second - keyframe_map_index_base_;
+ SourceBufferRange::BufferQueue::const_iterator search_iter =
+ buffers_.begin() + search_index;
+ gop_iter++;
+
+ SourceBufferRange::BufferQueue::const_iterator next_gop_start =
+ gop_iter == keyframe_map_.end()
+ ? buffers_.end()
+ : buffers_.begin() + (gop_iter->second - keyframe_map_index_base_);
+
+ while (search_iter != next_gop_start) {
+ if (((*search_iter)->timestamp() > timestamp) ||
+ (!skip_given_timestamp && (*search_iter)->timestamp() == timestamp)) {
+ break;
+ }
+ search_index++;
+ search_iter++;
+ }
+
+ return search_index;
+}
+
+SourceBufferRange::BufferQueue::const_iterator
+SourceBufferRange::GetBufferItrAt(base::TimeDelta timestamp,
+ bool skip_given_timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ return buffers_.begin() + GetBufferIndexAt(timestamp, skip_given_timestamp);
+}
+
+SourceBufferRange::KeyframeMap::const_iterator
+SourceBufferRange::GetFirstKeyframeAt(base::TimeDelta timestamp,
+ bool skip_given_timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ return skip_given_timestamp ? keyframe_map_.upper_bound(timestamp)
+ : keyframe_map_.lower_bound(timestamp);
+}
+
+SourceBufferRange::KeyframeMap::const_iterator
+SourceBufferRange::GetFirstKeyframeAtOrBefore(base::TimeDelta timestamp) const {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ auto result = keyframe_map_.lower_bound(timestamp);
+ // lower_bound() returns the first element >= |timestamp|, so we want the
+ // previous element if it did not return the element exactly equal to
+ // |timestamp|.
+ if (result != keyframe_map_.begin() &&
+ (result == keyframe_map_.end() || result->first != timestamp)) {
+ --result;
+ }
+ return result;
+}
+
+bool SourceBufferRange::TruncateAt(const size_t starting_point,
+ BufferQueue* deleted_buffers) {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ CHECK_LE(starting_point, buffers_.size());
+ DCHECK(!deleted_buffers || deleted_buffers->empty());
+
+ // Return if we're not deleting anything.
+ if (starting_point == buffers_.size())
+ return buffers_.empty();
+
+ // Reset the next buffer index if we will be deleting the buffer that's next
+ // in sequence.
+ if (HasNextBufferPosition()) {
+ if (static_cast<size_t>(next_buffer_index_) >= starting_point) {
+ if (HasNextBuffer() && deleted_buffers) {
+ BufferQueue saved(buffers_.begin() + next_buffer_index_,
+ buffers_.end());
+ deleted_buffers->swap(saved);
+ }
+ ResetNextBufferPosition();
+ }
+ }
+
+ const BufferQueue::const_iterator starting_point_iter =
+ buffers_.begin() + starting_point;
+
+ // Remove keyframes from |starting_point| onward.
+ KeyframeMap::const_iterator starting_point_keyframe =
+ keyframe_map_.lower_bound((*starting_point_iter)->timestamp());
+ keyframe_map_.erase(starting_point_keyframe, keyframe_map_.end());
+
+ // Remove everything from |starting_point| onward.
+ FreeBufferRange(starting_point_iter, buffers_.end());
+
+ UpdateEndTimeUsingLastGOP();
+ return buffers_.empty();
+}
+
+void SourceBufferRange::UpdateEndTimeUsingLastGOP() {
+ DVLOG(1) << __func__;
+ DVLOG(4) << ToStringForDebugging();
+
+ if (buffers_.empty()) {
+ DVLOG(1) << __func__ << " Empty range, resetting range end";
+ highest_frame_ = nullptr;
+ return;
+ }
+
+ highest_frame_ = nullptr;
+
+ KeyframeMap::const_iterator last_gop = keyframe_map_.end();
+ CHECK_GT(keyframe_map_.size(), 0u);
+ --last_gop;
+
+ // Iterate through the frames of the last GOP in this range, finding the
+ // frame with the highest PTS.
+ for (BufferQueue::const_iterator buffer_itr =
+ buffers_.begin() + (last_gop->second - keyframe_map_index_base_);
+ buffer_itr != buffers_.end(); ++buffer_itr) {
+ UpdateEndTime(*buffer_itr);
+ }
+
+ DVLOG(1) << __func__ << " Updated range end time to "
+ << highest_frame_->timestamp() << ", "
+ << highest_frame_->timestamp() + highest_frame_->duration();
+}
+
+std::string SourceBufferRange::ToStringForDebugging() const {
+ std::stringstream result;
+
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+ result << "keyframe_map_index_base_=" << keyframe_map_index_base_
+ << ", buffers.size()=" << buffers_.size()
+ << ", keyframe_map_.size()=" << keyframe_map_.size()
+ << ", keyframe_map_:\n";
+ for (const auto& entry : keyframe_map_) {
+ result << "\t pts " << entry.first.InMicroseconds()
+ << ", unadjusted idx = " << entry.second << "\n";
+ }
+#endif // !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
+
+ return result.str();
+}
+
} // namespace media
diff --git a/chromium/media/filters/source_buffer_range.h b/chromium/media/filters/source_buffer_range.h
index b8ff274fa44..ef4426406ff 100644
--- a/chromium/media/filters/source_buffer_range.h
+++ b/chromium/media/filters/source_buffer_range.h
@@ -5,6 +5,10 @@
#ifndef MEDIA_FILTERS_SOURCE_BUFFER_RANGE_H_
#define MEDIA_FILTERS_SOURCE_BUFFER_RANGE_H_
+#include <stddef.h>
+#include <map>
+#include <memory>
+
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -37,40 +41,29 @@ class MEDIA_EXPORT SourceBufferRange {
ALLOW_GAPS
};
- // Sequential buffers with the same decode timestamp make sense under certain
- // conditions, typically when the first buffer is a keyframe. Due to some
- // atypical media append behaviors where a new keyframe might have the same
- // decode timestamp as a previous non-keyframe, the playback of the sequence
- // might involve some throwaway decode work. This method supports detecting
- // this situation so that callers can log warnings (it returns true in this
- // case only).
- // For all other cases, including more typical same-DTS sequences, this method
- // returns false. Examples of typical situations where DTS of two consecutive
- // frames can be equal:
- // - Video: VP8 Alt-Ref frames.
- // - Video: IPBPBP...: DTS for I frame and for P frame can be equal.
- // - Text track cues that start at same time.
- // Returns true if |prev_is_keyframe| and |current_is_keyframe| indicate a
- // same timestamp situation that is atypical. False is returned otherwise.
- static bool IsUncommonSameTimestampSequence(bool prev_is_keyframe,
- bool current_is_keyframe);
-
+ // Creates a range with |new_buffers|. |new_buffers| cannot be empty and the
+ // front of |new_buffers| must be a keyframe.
+ // |range_start_pts| refers to the starting timestamp for the coded
+ // frame group to which these buffers belong.
SourceBufferRange(GapPolicy gap_policy,
+ const BufferQueue& new_buffers,
+ base::TimeDelta range_start_pts,
const InterbufferDistanceCB& interbuffer_distance_cb);
- virtual ~SourceBufferRange();
+ ~SourceBufferRange();
// Deletes all buffers in range.
- virtual void DeleteAll(BufferQueue* deleted_buffers) = 0;
+ void DeleteAll(BufferQueue* deleted_buffers);
// Seeks to the beginning of the range.
void SeekToStart();
- // Updates |out_buffer| with the next buffer in presentation order. Seek()
- // must be called before calls to GetNextBuffer(), and buffers are returned
- // in order from the last call to Seek(). Returns true if |out_buffer| is
- // filled with a valid buffer, false if there is not enough data to fulfill
- // the request.
+ // Updates |out_buffer| with the next buffer in presentation order by GOP and
+ // by decode order within each GOP (in general, in sequence to feed a
+ // decoder). Seek() must be called before calls to GetNextBuffer(), and
+ // buffers are returned in order from the last call to Seek(). Returns true if
+ // |out_buffer| is filled with a valid buffer, false if there is not enough
+ // data to fulfill the request.
bool GetNextBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
bool HasNextBuffer() const;
@@ -86,18 +79,178 @@ class MEDIA_EXPORT SourceBufferRange {
// Resets this range to an "unseeked" state.
void ResetNextBufferPosition();
- // TODO(wolenetz): Remove in favor of
- // GetEndTimestamp()/GetBufferedEndTimestamp() once they report in PTS, not
- // DTS. See https://crbug.com/718641.
- void GetRangeEndTimesForTesting(base::TimeDelta* highest_pts,
- base::TimeDelta* end_time) const;
+ // Appends the buffers from |range| into this range.
+ // The first buffer in |range| must come directly after the last buffer
+ // in this range.
+ // If |transfer_current_position| is true, |range|'s |next_buffer_index_|
+ // is transferred to this SourceBufferRange.
+ // Note: Use these only to merge existing ranges. |range|'s first buffer
+ // timestamp must be adjacent to this range. No group start timestamp
+ // adjacency is involved in these methods.
+ // During append, |highest_frame_| is updated, if necessary.
+ void AppendRangeToEnd(const SourceBufferRange& range,
+ bool transfer_current_position);
+ bool CanAppendRangeToEnd(const SourceBufferRange& range) const;
+
+ // Appends |buffers| to the end of the range and updates |keyframe_map_| as
+ // it encounters new keyframes.
+ // If |new_buffers_group_start_pts| is kNoTimestamp, then the
+ // first buffer in |buffers| must come directly after the last buffer in this
+ // range (within the fudge room) - specifically, if the first buffer in
+ // |buffers| is not a keyframe, then it must be next in DTS order w.r.t. last
+ // buffer in |buffers|. Otherwise, it's a keyframe that must be next in PTS
+ // order w.r.t. |highest_frame_| or be immediately adjacent to the last buffer
+ // in this range if that buffer has estimated duration (only allowed in WebM
+ // streams).
+ // If |new_buffers_group_start_pts| is set otherwise, then that time must come
+ // directly after |highest_frame_| (within the fudge room), or directly after
+ // the last buffered frame if it has estimated duration (only allowed in WebM
+ // streams), and the first buffer in |buffers| must be a keyframe.
+ // The latter scenario is required when a muxed coded frame group has such a
+ // large jagged start across tracks that its first buffer is not within the
+ // fudge room, yet its group start was.
+ // The conditions around estimated duration are handled by
+ // AllowableAppendAfterEstimatedDuration, and are intended to solve the edge
+ // case in the SourceBufferStreamTest
+ // MergeAllowedIfRangeEndTimeWithEstimatedDurationMatchesNextRangeStart.
+ // During append, |highest_frame_| is updated, if necessary.
+ void AppendBuffersToEnd(const BufferQueue& buffers,
+ base::TimeDelta new_buffers_group_start_timestamp);
+ bool AllowableAppendAfterEstimatedDuration(
+ const BufferQueue& buffers,
+ base::TimeDelta new_buffers_group_start_pts) const;
+ bool CanAppendBuffersToEnd(const BufferQueue& buffers,
+ base::TimeDelta new_buffers_group_start_pts) const;
+
+ // Updates |next_buffer_index_| to point to the keyframe with presentation
+ // timestamp at or before |timestamp|. Assumes |timestamp| is valid and in
+ // this range.
+ void Seek(base::TimeDelta timestamp);
+
+ // Returns true if the range has enough data to seek to the specified
+ // |timestamp|, false otherwise.
+ bool CanSeekTo(base::TimeDelta timestamp) const;
+
+ // Return the config ID for the buffer at |timestamp|. Precondition: callers
+ // must first verify CanSeekTo(timestamp) == true.
+ int GetConfigIdAtTime(base::TimeDelta timestamp) const;
+
+ // Return true if all buffers in range of [start, end] have the same config
+ // ID. Precondition: callers must first verify that
+ // CanSeekTo(start) == CanSeekTo(end) == true.
+ bool SameConfigThruRange(base::TimeDelta start, base::TimeDelta end) const;
+
+ // Finds the next keyframe from |buffers_| starting at or after |timestamp|
+ // and creates and returns a new SourceBufferRange with the buffers from
+ // that keyframe onward. The buffers in the new SourceBufferRange are
+ // moved out of this range. The start time of the new SourceBufferRange
+ // is set to the later of |timestamp| and this range's GetStartTimestamp().
+ // Note that this may result in temporary overlap of the new range and this
+ // range until the caller truncates any nonkeyframes out of this range with
+ // time > |timestamp|. If there is no keyframe at or after |timestamp|,
+ // SplitRange() returns null and this range is unmodified. This range can
+ // become empty if |timestamp| <= the PTS of the first buffer in this range.
+ // |highest_frame_| is updated, if necessary.
+ std::unique_ptr<SourceBufferRange> SplitRange(base::TimeDelta timestamp);
+
+ // Deletes the buffers from this range starting at |timestamp|, exclusive if
+ // |is_exclusive| is true, inclusive otherwise.
+ // Resets |next_buffer_index_| if the buffer at |next_buffer_index_| was
+ // deleted, and deletes the |keyframe_map_| entries for the buffers that
+ // were removed.
+ // |highest_frame_| is updated, if necessary.
+ // |deleted_buffers| contains the buffers that were deleted from this range,
+ // starting at the buffer that had been at |next_buffer_index_|.
+ // Returns true if everything in the range was deleted. Otherwise
+ // returns false.
+ bool TruncateAt(base::TimeDelta timestamp,
+ BufferQueue* deleted_buffers,
+ bool is_exclusive);
+
+ // Deletes a GOP from the front or back of the range and moves these
+ // buffers into |deleted_buffers|. Returns the number of bytes deleted from
+ // the range (i.e. the size in bytes of |deleted_buffers|).
+ // |highest_frame_| is updated, if necessary.
+ // This range must NOT be empty when these methods are called.
+ // The GOP being deleted must NOT contain the next buffer position.
+ size_t DeleteGOPFromFront(BufferQueue* deleted_buffers);
+ size_t DeleteGOPFromBack(BufferQueue* deleted_buffers);
+
+ // Gets the range of GOP to secure at least |bytes_to_free| from
+ // [|start_timestamp|, |end_timestamp|).
+ // Returns the size of the buffers to secure if the buffers of
+ // [|start_timestamp|, |end_removal_timestamp|) is removed.
+ // Will not update |end_removal_timestamp| if the returned size is 0.
+ size_t GetRemovalGOP(base::TimeDelta start_timestamp,
+ base::TimeDelta end_timestamp,
+ size_t bytes_to_free,
+ base::TimeDelta* end_removal_timestamp) const;
+
+ // Returns true iff the buffered end time of the first GOP in this range is
+ // at or before |media_time|.
+ bool FirstGOPEarlierThanMediaTime(base::TimeDelta media_time) const;
+
+ // Indicates whether the GOP at the beginning or end of the range contains the
+ // next buffer position.
+ bool FirstGOPContainsNextBufferPosition() const;
+ bool LastGOPContainsNextBufferPosition() const;
+
+ // Returns the timestamp of the next buffer that will be returned from
+ // GetNextBuffer(), or kNoTimestamp if the timestamp is unknown.
+ base::TimeDelta GetNextTimestamp() const;
+
+ // Returns the start timestamp of the range.
+ base::TimeDelta GetStartTimestamp() const;
+
+ // Returns the highest presentation timestamp of frames in the last GOP in the
+ // range.
+ base::TimeDelta GetEndTimestamp() const;
+
+ // Returns the timestamp for the end of the buffered region in this range.
+ // This is an approximation if the duration for the buffer with highest PTS in
+ // the last GOP in the range is unset.
+ base::TimeDelta GetBufferedEndTimestamp() const;
+
+ // Returns whether a buffer with a starting timestamp of |timestamp| would
+ // belong in this range. This includes a buffer that would be appended to
+ // the end of the range.
+ bool BelongsToRange(base::TimeDelta timestamp) const;
+
+ // Returns the highest time from among GetStartTimestamp() and frame timestamp
+ // (in order in |buffers_| beginning at the first keyframe at or before
+ // |timestamp|) for buffers in this range up to and including |timestamp|.
+ // Note that |timestamp| must belong to this range.
+ base::TimeDelta FindHighestBufferedTimestampAtOrBefore(
+ base::TimeDelta timestamp) const;
+
+ // Gets the timestamp for the keyframe that is at or after |timestamp|. If
+ // there isn't such a keyframe in the range then kNoTimestamp is returned.
+ // If |timestamp| is in the "gap" between the value returned by
+ // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
+ // then |timestamp| is returned.
+ base::TimeDelta NextKeyframeTimestamp(base::TimeDelta timestamp) const;
+
+ // Gets the timestamp for the closest keyframe that is <= |timestamp|. If
+ // there isn't a keyframe before |timestamp| or |timestamp| is outside
+ // this range, then kNoTimestamp is returned.
+ base::TimeDelta KeyframeBeforeTimestamp(base::TimeDelta timestamp) const;
+
+ // Adds all buffers which overlap [start, end) to the end of |buffers|. If
+ // no buffers exist in the range returns false, true otherwise.
+ // This method is used for finding audio splice overlap buffers, so all
+ // buffers are expected to be keyframes here (so DTS doesn't matter at all).
+ bool GetBuffersInRange(base::TimeDelta start,
+ base::TimeDelta end,
+ BufferQueue* buffers) const;
size_t size_in_bytes() const { return size_in_bytes_; }
- protected:
- // Friend of protected is only for IsNextInPresentationSequence testing.
+ private:
+ // Friend of private is only for IsNextInPresentationSequence testing.
friend class SourceBufferStreamTest;
+ using KeyframeMap = std::map<base::TimeDelta, int>;
+
// Called during AppendBuffersToEnd to adjust estimated duration at the
// end of the last append to match the delta in timestamps between
// the last append and the upcoming append. This is a workaround for
@@ -108,30 +261,27 @@ class MEDIA_EXPORT SourceBufferRange {
// Frees the buffers in |buffers_| from [|start_point|,|ending_point|) and
// updates the |size_in_bytes_| accordingly. Note, this does not update
// |keyframe_map_|.
- // TODO(wolenetz): elevate keyframe_map_ to base class so this comment has
- // better context. See https://crbug.com/718641.
void FreeBufferRange(const BufferQueue::const_iterator& starting_point,
const BufferQueue::const_iterator& ending_point);
// Returns the distance in time estimating how far from the beginning or end
- // of this range a buffer can be to considered in the range.
+ // of this range a buffer can be to be considered in the range.
base::TimeDelta GetFudgeRoom() const;
// Returns the approximate duration of a buffer in this range.
base::TimeDelta GetApproximateDuration() const;
// Updates |highest_frame_| if |new_buffer| has a higher PTS than
- // |highest_frame_| or if the range was previously empty.
+ // |highest_frame_|, |new_buffer| has the same PTS as |highest_frame_| and
+ // duration at least as long as |highest_frame_|, or if the range was
+ // previously empty.
void UpdateEndTime(scoped_refptr<StreamParserBuffer> new_buffer);
// Returns true if |timestamp| is allowed in this range as the timestamp of
// the next buffer in presentation sequence at or after |highest_frame_|.
// |buffers_| must not be empty, and |highest_frame_| must not be nullptr.
// Uses |gap_policy_| to potentially allow gaps.
- // TODO(wolenetz): Switch to using this helper in CanAppendBuffersToEnd(),
- // etc, when switching to managing ranges by their presentation interval, and
- // not necessarily just their decode times. See https://crbug.com/718641. Once
- // being used and not just tested, the following also applies:
+ //
// Due to potential for out-of-order decode vs presentation time, this method
// should only be used to determine adjacency of keyframes with the end of
// |buffers_|.
@@ -141,10 +291,7 @@ class MEDIA_EXPORT SourceBufferRange {
// timestamp of the next buffer in decode sequence at or after the last buffer
// in |buffers_|'s decode timestamp. |buffers_| must not be empty. Uses
// |gap_policy_| to potentially allow gaps.
- // TODO(wolenetz): Switch to using this helper in CanAppendBuffersToEnd(),
- // etc, appropriately when switching to managing ranges by their presentation
- // interval between GOPs, and by their decode sequence within GOPs. See
- // https://crbug.com/718641. Once that's done, the following also would apply:
+ //
// Due to potential for out-of-order decode vs presentation time, this method
// should only be used to determine adjacency of non-keyframes with the end of
// |buffers_|, when determining if a non-keyframe with |decode_timestamp|
@@ -152,19 +299,71 @@ class MEDIA_EXPORT SourceBufferRange {
// |buffers_|.
bool IsNextInDecodeSequence(DecodeTimestamp decode_timestamp) const;
+ // Helper method for Appending |range| to the end of this range. If |range|'s
+ // first buffer time is before the time of the last buffer in this range,
+ // returns kNoTimestamp. Otherwise, returns the closest time within
+ // [|range|'s start time, |range|'s first buffer time] that is at or after the
+ // this range's GetEndTimestamp(). This allows |range| to potentially be
+ // determined to be adjacent within fudge room for appending to the end of
+ // this range, especially if |range| has a start time that is before its first
+ // buffer's time.
+ base::TimeDelta NextRangeStartTimeForAppendRangeToEnd(
+ const SourceBufferRange& range) const;
+
+ // Returns an index (or iterator) into |buffers_| pointing to the first buffer
+ // at or after |timestamp|. If |skip_given_timestamp| is true, this returns
+ // the first buffer with timestamp strictly greater than |timestamp|. If
+ // |buffers_| has no such buffer, returns |buffers_.size()| (or
+ // |buffers_.end()|).
+ size_t GetBufferIndexAt(base::TimeDelta timestamp,
+ bool skip_given_timestamp) const;
+ BufferQueue::const_iterator GetBufferItrAt(base::TimeDelta timestamp,
+ bool skip_given_timestamp) const;
+
+ // Returns an iterator in |keyframe_map_| pointing to the next keyframe after
+ // |timestamp|. If |skip_given_timestamp| is true, this returns the first
+ // keyframe with a timestamp strictly greater than |timestamp|.
+ KeyframeMap::const_iterator GetFirstKeyframeAt(
+ base::TimeDelta timestamp,
+ bool skip_given_timestamp) const;
+
+ // Returns an iterator in |keyframe_map_| pointing to the first keyframe
+ // before or at |timestamp|.
+ KeyframeMap::const_iterator GetFirstKeyframeAtOrBefore(
+ base::TimeDelta timestamp) const;
+
+ // Helper method to delete buffers in |buffers_| starting at
+ // |starting_point|, an index in |buffers_|.
+ // Returns true if everything in the range was removed. Returns
+ // false if the range still contains buffers.
+ bool TruncateAt(const size_t starting_point, BufferQueue* deleted_buffers);
+
+ // Updates |highest_frame_| to be the frame with highest PTS in the last GOP
+ // in this range. If there are no buffers in this range, resets
+ // |highest_frame_|.
+ // Normally, incremental additions to this range should just use
+ // UpdateEndTime(). When removing buffers from this range (which could be out
+ // of order presentation vs decode order), inspecting the last buffer in
+ // decode order of this range can be insufficient to determine the correct
+ // presentation end time of this range. Hence this helper method.
+ void UpdateEndTimeUsingLastGOP();
+
+ // Helper for debugging state.
+ std::string ToStringForDebugging() const;
+
// Keeps track of whether gaps are allowed.
const GapPolicy gap_policy_;
- // An ordered list of buffers in this range.
+ // The ordered list of buffers in this range.
BufferQueue buffers_;
// Index into |buffers_| for the next buffer to be returned by
- // GetNextBuffer(), set to -1 before Seek().
+ // GetNextBuffer(), set to -1 by ResetNextBufferPosition().
int next_buffer_index_;
// Caches the buffer, if any, with the highest PTS currently in |buffers_|.
// This is nullptr if this range is empty. This is useful in determining
- // range membership and adjacency in SourceBufferRangeByPts.
+ // range membership and adjacency.
scoped_refptr<StreamParserBuffer> highest_frame_;
// Called to get the largest interbuffer distance seen so far in the stream.
@@ -173,7 +372,26 @@ class MEDIA_EXPORT SourceBufferRange {
// Stores the amount of memory taken up by the data in |buffers_|.
size_t size_in_bytes_;
- private:
+ // If the first buffer in this range is the beginning of a coded frame group,
+ // |range_start_pts_| is the presentation time when the coded frame group
+ // begins. This is especially important in muxed media where the first coded
+ // frames for each track do not necessarily begin at the same time.
+ // |range_start_pts_| may be <= the timestamp of the first buffer in
+ // |buffers_|. |range_start_pts_| is kNoTimestamp if this range does not start
+ // at the beginning of a coded frame group, which can happen by range removal
+ // or split when we don't have a way of knowing, across potentially multiple
+ // muxed streams, the coded frame group start timestamp for the new range.
+ base::TimeDelta range_start_pts_;
+
+ // Index base of all positions in |keyframe_map_|. In other words, the
+ // real position of entry |k| of |keyframe_map_| in the range is:
+ // keyframe_map_[k] - keyframe_map_index_base_
+ int keyframe_map_index_base_;
+
+ // Maps keyframe presentation timestamps to GOP start index of |buffers_|
+ // (with index adjusted by |keyframe_map_index_base_|);
+ KeyframeMap keyframe_map_;
+
DISALLOW_COPY_AND_ASSIGN(SourceBufferRange);
};
diff --git a/chromium/media/filters/source_buffer_range_by_dts.cc b/chromium/media/filters/source_buffer_range_by_dts.cc
deleted file mode 100644
index 4b3fe7f85b8..00000000000
--- a/chromium/media/filters/source_buffer_range_by_dts.cc
+++ /dev/null
@@ -1,683 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/source_buffer_range_by_dts.h"
-
-#include <algorithm>
-#include <memory>
-
-#include "media/base/timestamp_constants.h"
-
-namespace media {
-
-// Comparison operators for std::upper_bound() and std::lower_bound().
-static bool CompareDecodeTimestampToStreamParserBuffer(
- const DecodeTimestamp& decode_timestamp,
- const scoped_refptr<StreamParserBuffer>& buffer) {
- return decode_timestamp < buffer->GetDecodeTimestamp();
-}
-static bool CompareStreamParserBufferToDecodeTimestamp(
- const scoped_refptr<StreamParserBuffer>& buffer,
- const DecodeTimestamp& decode_timestamp) {
- return buffer->GetDecodeTimestamp() < decode_timestamp;
-}
-
-SourceBufferRangeByDts::SourceBufferRangeByDts(
- GapPolicy gap_policy,
- const BufferQueue& new_buffers,
- DecodeTimestamp range_start_decode_time,
- const InterbufferDistanceCB& interbuffer_distance_cb)
- : SourceBufferRange(gap_policy, interbuffer_distance_cb),
- range_start_decode_time_(range_start_decode_time),
- keyframe_map_index_base_(0) {
- DVLOG(3) << __func__;
- CHECK(!new_buffers.empty());
- DCHECK(new_buffers.front()->is_key_frame());
- AppendBuffersToEnd(new_buffers, range_start_decode_time_);
-}
-
-SourceBufferRangeByDts::~SourceBufferRangeByDts() = default;
-
-DecodeTimestamp SourceBufferRangeByDts::NextRangeStartTimeForAppendRangeToEnd(
- const SourceBufferRangeByDts& range) const {
- DCHECK(!buffers_.empty());
- DCHECK(!range.buffers_.empty());
-
- DecodeTimestamp next_range_first_buffer_time =
- range.buffers_.front()->GetDecodeTimestamp();
- DecodeTimestamp this_range_end_time = GetEndTimestamp();
- if (next_range_first_buffer_time < this_range_end_time)
- return kNoDecodeTimestamp();
-
- DecodeTimestamp next_range_start_time = range.GetStartTimestamp();
- DCHECK(next_range_start_time <= next_range_first_buffer_time);
-
- if (next_range_start_time >= this_range_end_time)
- return next_range_start_time;
-
- return this_range_end_time;
-}
-
-void SourceBufferRangeByDts::AppendRangeToEnd(
- const SourceBufferRangeByDts& range,
- bool transfer_current_position) {
- DCHECK(CanAppendRangeToEnd(range));
- DCHECK(!buffers_.empty());
-
- if (transfer_current_position && range.next_buffer_index_ >= 0)
- next_buffer_index_ = range.next_buffer_index_ + buffers_.size();
-
- AppendBuffersToEnd(range.buffers_,
- NextRangeStartTimeForAppendRangeToEnd(range));
-}
-
-void SourceBufferRangeByDts::DeleteAll(BufferQueue* deleted_buffers) {
- TruncateAt(buffers_.begin(), deleted_buffers);
-}
-
-bool SourceBufferRangeByDts::CanAppendRangeToEnd(
- const SourceBufferRangeByDts& range) const {
- return CanAppendBuffersToEnd(range.buffers_,
- NextRangeStartTimeForAppendRangeToEnd(range));
-}
-
-void SourceBufferRangeByDts::AppendBuffersToEnd(
- const BufferQueue& new_buffers,
- DecodeTimestamp new_buffers_group_start_timestamp) {
- CHECK(buffers_.empty() ||
- CanAppendBuffersToEnd(new_buffers, new_buffers_group_start_timestamp));
- DCHECK(range_start_decode_time_ == kNoDecodeTimestamp() ||
- range_start_decode_time_ <= new_buffers.front()->GetDecodeTimestamp());
-
- AdjustEstimatedDurationForNewAppend(new_buffers);
-
- for (BufferQueue::const_iterator itr = new_buffers.begin();
- itr != new_buffers.end(); ++itr) {
- DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
-
- buffers_.push_back(*itr);
- UpdateEndTime(*itr);
- size_in_bytes_ += (*itr)->data_size();
-
- if ((*itr)->is_key_frame()) {
- keyframe_map_.insert(
- std::make_pair((*itr)->GetDecodeTimestamp(),
- buffers_.size() - 1 + keyframe_map_index_base_));
- }
- }
-}
-
-bool SourceBufferRangeByDts::AllowableAppendAfterEstimatedDuration(
- const BufferQueue& buffers,
- DecodeTimestamp new_buffers_group_start_timestamp) const {
- if (buffers_.empty() || !buffers_.back()->is_duration_estimated() ||
- buffers.empty() || !buffers.front()->is_key_frame()) {
- return false;
- }
-
- if (new_buffers_group_start_timestamp == kNoDecodeTimestamp()) {
- return GetBufferedEndTimestamp() == buffers.front()->GetDecodeTimestamp();
- }
-
- return GetBufferedEndTimestamp() == new_buffers_group_start_timestamp;
-}
-
-bool SourceBufferRangeByDts::CanAppendBuffersToEnd(
- const BufferQueue& buffers,
- DecodeTimestamp new_buffers_group_start_timestamp) const {
- DCHECK(!buffers_.empty());
- if (new_buffers_group_start_timestamp == kNoDecodeTimestamp()) {
- return IsNextInDecodeSequence(buffers.front()->GetDecodeTimestamp()) ||
- AllowableAppendAfterEstimatedDuration(
- buffers, new_buffers_group_start_timestamp);
- }
- DCHECK(new_buffers_group_start_timestamp >= GetEndTimestamp());
- DCHECK(buffers.front()->GetDecodeTimestamp() >=
- new_buffers_group_start_timestamp);
- return IsNextInDecodeSequence(new_buffers_group_start_timestamp) ||
- AllowableAppendAfterEstimatedDuration(
- buffers, new_buffers_group_start_timestamp);
-}
-
-void SourceBufferRangeByDts::Seek(DecodeTimestamp timestamp) {
- DCHECK(CanSeekTo(timestamp));
- DCHECK(!keyframe_map_.empty());
-
- auto result = GetFirstKeyframeAtOrBefore(timestamp);
- next_buffer_index_ = result->second - keyframe_map_index_base_;
- CHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()))
- << next_buffer_index_ << ", size = " << buffers_.size();
-}
-
-int SourceBufferRangeByDts::GetConfigIdAtTime(DecodeTimestamp timestamp) const {
- DCHECK(CanSeekTo(timestamp));
- DCHECK(!keyframe_map_.empty());
-
- auto result = GetFirstKeyframeAtOrBefore(timestamp);
- CHECK(result != keyframe_map_.end());
- size_t buffer_index = result->second - keyframe_map_index_base_;
- CHECK_LT(buffer_index, buffers_.size())
- << buffer_index << ", size = " << buffers_.size();
-
- return buffers_[buffer_index]->GetConfigId();
-}
-
-bool SourceBufferRangeByDts::SameConfigThruRange(DecodeTimestamp start,
- DecodeTimestamp end) const {
- DCHECK(CanSeekTo(start));
- DCHECK(CanSeekTo(end));
- DCHECK(start <= end);
- DCHECK(!keyframe_map_.empty());
-
- if (start == end)
- return true;
-
- auto result = GetFirstKeyframeAtOrBefore(start);
- CHECK(result != keyframe_map_.end());
- size_t buffer_index = result->second - keyframe_map_index_base_;
- CHECK_LT(buffer_index, buffers_.size())
- << buffer_index << ", size = " << buffers_.size();
-
- int start_config = buffers_[buffer_index]->GetConfigId();
- buffer_index++;
- while (buffer_index < buffers_.size() &&
- buffers_[buffer_index]->GetDecodeTimestamp() <= end) {
- if (buffers_[buffer_index]->GetConfigId() != start_config)
- return false;
- buffer_index++;
- }
-
- return true;
-}
-
-std::unique_ptr<SourceBufferRangeByDts> SourceBufferRangeByDts::SplitRange(
- DecodeTimestamp timestamp) {
- CHECK(!buffers_.empty());
-
- // Find the first keyframe at or after |timestamp|.
- auto new_beginning_keyframe = GetFirstKeyframeAt(timestamp, false);
-
- // If there is no keyframe after |timestamp|, we can't split the range.
- if (new_beginning_keyframe == keyframe_map_.end()) {
- return NULL;
- }
-
- // Remove the data beginning at |keyframe_index| from |buffers_| and save it
- // into |removed_buffers|.
- int keyframe_index =
- new_beginning_keyframe->second - keyframe_map_index_base_;
- DCHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
- BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
- BufferQueue removed_buffers(starting_point, buffers_.end());
-
- DecodeTimestamp new_range_start_decode_timestamp =
- std::max(timestamp, GetStartTimestamp());
- DCHECK(new_range_start_decode_timestamp <=
- removed_buffers.front()->GetDecodeTimestamp());
-
- keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
- FreeBufferRange(starting_point, buffers_.end());
- UpdateEndTimeUsingLastGOP();
-
- // Create a new range with |removed_buffers|.
- std::unique_ptr<SourceBufferRangeByDts> split_range =
- std::make_unique<SourceBufferRangeByDts>(gap_policy_, removed_buffers,
- new_range_start_decode_timestamp,
- interbuffer_distance_cb_);
-
- // If the next buffer position is now in |split_range|, update the state of
- // this range and |split_range| accordingly.
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- split_range->next_buffer_index_ = next_buffer_index_ - keyframe_index;
-
- int split_range_next_buffer_index = split_range->next_buffer_index_;
- CHECK_GE(split_range_next_buffer_index, 0);
- // Note that a SourceBufferRange's |next_buffer_index_| can be the index
- // of a buffer one beyond what is currently in |buffers_|.
- CHECK_LE(split_range_next_buffer_index,
- static_cast<int>(split_range->buffers_.size()));
-
- ResetNextBufferPosition();
- }
-
- return split_range;
-}
-
-bool SourceBufferRangeByDts::TruncateAt(DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive) {
- // Find the place in |buffers_| where we will begin deleting data.
- BufferQueue::const_iterator starting_point =
- GetBufferItrAt(timestamp, is_exclusive);
- return TruncateAt(starting_point, deleted_buffers);
-}
-
-size_t SourceBufferRangeByDts::DeleteGOPFromFront(
- BufferQueue* deleted_buffers) {
- DCHECK(!buffers_.empty());
- DCHECK(!FirstGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- int buffers_deleted = 0;
- size_t total_bytes_deleted = 0;
-
- KeyframeMap::const_iterator front = keyframe_map_.begin();
- DCHECK(front != keyframe_map_.end());
-
- // Delete the keyframe at the start of |keyframe_map_|.
- keyframe_map_.erase(front);
-
- // Now we need to delete all the buffers that depend on the keyframe we've
- // just deleted.
- int end_index = keyframe_map_.size() > 0
- ? keyframe_map_.begin()->second - keyframe_map_index_base_
- : buffers_.size();
-
- // Delete buffers from the beginning of the buffered range up until (but not
- // including) the next keyframe.
- for (int i = 0; i < end_index; i++) {
- size_t bytes_deleted = buffers_.front()->data_size();
- DCHECK_GE(size_in_bytes_, bytes_deleted);
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- deleted_buffers->push_back(buffers_.front());
- buffers_.pop_front();
- ++buffers_deleted;
- }
-
- // Update |keyframe_map_index_base_| to account for the deleted buffers.
- keyframe_map_index_base_ += buffers_deleted;
-
- if (next_buffer_index_ > -1) {
- next_buffer_index_ -= buffers_deleted;
- CHECK_GE(next_buffer_index_, 0)
- << next_buffer_index_ << ", deleted " << buffers_deleted;
- }
-
- // Invalidate range start time if we've deleted the first buffer of the range.
- if (buffers_deleted > 0) {
- range_start_decode_time_ = kNoDecodeTimestamp();
- // Reset the range end time tracking if there are no more buffers in the
- // range.
- if (buffers_.empty())
- highest_frame_ = nullptr;
- }
-
- return total_bytes_deleted;
-}
-
-size_t SourceBufferRangeByDts::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
- DCHECK(!buffers_.empty());
- DCHECK(!LastGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- // Remove the last GOP's keyframe from the |keyframe_map_|.
- KeyframeMap::const_iterator back = keyframe_map_.end();
- DCHECK_GT(keyframe_map_.size(), 0u);
- --back;
-
- // The index of the first buffer in the last GOP is equal to the new size of
- // |buffers_| after that GOP is deleted.
- size_t goal_size = back->second - keyframe_map_index_base_;
- keyframe_map_.erase(back);
-
- size_t total_bytes_deleted = 0;
- while (buffers_.size() != goal_size) {
- size_t bytes_deleted = buffers_.back()->data_size();
- DCHECK_GE(size_in_bytes_, bytes_deleted);
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- // We're removing buffers from the back, so push each removed buffer to the
- // front of |deleted_buffers| so that |deleted_buffers| are in nondecreasing
- // order.
- deleted_buffers->push_front(buffers_.back());
- buffers_.pop_back();
- }
-
- UpdateEndTimeUsingLastGOP();
-
- return total_bytes_deleted;
-}
-
-size_t SourceBufferRangeByDts::GetRemovalGOP(
- DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
- size_t total_bytes_to_free,
- DecodeTimestamp* removal_end_timestamp) const {
- size_t bytes_removed = 0;
-
- auto gop_itr = GetFirstKeyframeAt(start_timestamp, false);
- if (gop_itr == keyframe_map_.end())
- return 0;
- int keyframe_index = gop_itr->second - keyframe_map_index_base_;
- BufferQueue::const_iterator buffer_itr = buffers_.begin() + keyframe_index;
- auto gop_end = keyframe_map_.end();
- if (end_timestamp < GetBufferedEndTimestamp())
- gop_end = GetFirstKeyframeAtOrBefore(end_timestamp);
-
- // Check if the removal range is within a GOP and skip the loop if so.
- // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
- auto gop_itr_prev = gop_itr;
- if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
- gop_end = gop_itr;
-
- while (gop_itr != gop_end && bytes_removed < total_bytes_to_free) {
- ++gop_itr;
-
- size_t gop_size = 0;
- int next_gop_index = gop_itr == keyframe_map_.end()
- ? buffers_.size()
- : gop_itr->second - keyframe_map_index_base_;
- BufferQueue::const_iterator next_gop_start =
- buffers_.begin() + next_gop_index;
- for (; buffer_itr != next_gop_start; ++buffer_itr) {
- gop_size += (*buffer_itr)->data_size();
- }
-
- bytes_removed += gop_size;
- }
- if (bytes_removed > 0) {
- *removal_end_timestamp = gop_itr == keyframe_map_.end()
- ? GetBufferedEndTimestamp()
- : gop_itr->first;
- }
- return bytes_removed;
-}
-
-bool SourceBufferRangeByDts::FirstGOPEarlierThanMediaTime(
- DecodeTimestamp media_time) const {
- if (keyframe_map_.size() == 1u)
- return (GetBufferedEndTimestamp() <= media_time);
-
- auto second_gop = keyframe_map_.begin();
- ++second_gop;
- return second_gop->first <= media_time;
-}
-
-bool SourceBufferRangeByDts::FirstGOPContainsNextBufferPosition() const {
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- auto second_gop = keyframe_map_.begin();
- ++second_gop;
- return next_buffer_index_ < second_gop->second - keyframe_map_index_base_;
-}
-
-bool SourceBufferRangeByDts::LastGOPContainsNextBufferPosition() const {
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- auto last_gop = keyframe_map_.end();
- --last_gop;
- return last_gop->second - keyframe_map_index_base_ <= next_buffer_index_;
-}
-
-DecodeTimestamp SourceBufferRangeByDts::GetNextTimestamp() const {
- CHECK(!buffers_.empty()) << next_buffer_index_;
- CHECK(HasNextBufferPosition())
- << next_buffer_index_ << ", size=" << buffers_.size();
-
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- return kNoDecodeTimestamp();
- }
-
- return buffers_[next_buffer_index_]->GetDecodeTimestamp();
-}
-
-DecodeTimestamp SourceBufferRangeByDts::GetStartTimestamp() const {
- DCHECK(!buffers_.empty());
- DecodeTimestamp start_timestamp = range_start_decode_time_;
- if (start_timestamp == kNoDecodeTimestamp())
- start_timestamp = buffers_.front()->GetDecodeTimestamp();
- return start_timestamp;
-}
-
-DecodeTimestamp SourceBufferRangeByDts::GetEndTimestamp() const {
- DCHECK(!buffers_.empty());
- return buffers_.back()->GetDecodeTimestamp();
-}
-
-DecodeTimestamp SourceBufferRangeByDts::GetBufferedEndTimestamp() const {
- DCHECK(!buffers_.empty());
- base::TimeDelta duration = buffers_.back()->duration();
-
- // FrameProcessor should protect against unknown buffer durations.
- DCHECK_NE(duration, kNoTimestamp);
-
- // Because media::Ranges<base::TimeDelta>::Add() ignores 0 duration ranges,
- // report 1 microsecond for the last buffer's duration if it is a 0 duration
- // buffer.
- if (duration.is_zero())
- duration = base::TimeDelta::FromMicroseconds(1);
-
- return GetEndTimestamp() + duration;
-}
-
-bool SourceBufferRangeByDts::BelongsToRange(DecodeTimestamp timestamp) const {
- DCHECK(!buffers_.empty());
-
- return (IsNextInDecodeSequence(timestamp) ||
- (GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
-}
-
-DecodeTimestamp SourceBufferRangeByDts::FindHighestBufferedTimestampAtOrBefore(
- DecodeTimestamp timestamp) const {
- DCHECK(!buffers_.empty());
- DCHECK(BelongsToRange(timestamp));
-
- if (keyframe_map_.begin()->first > timestamp) {
- // If the first keyframe in the range starts after |timestamp|, then return
- // the range start time (which could be earlier due to coded frame group
- // signalling.)
- DecodeTimestamp range_start = GetStartTimestamp();
-
- DCHECK(timestamp >= range_start) << "BelongsToRange() semantics failed.";
- return range_start;
- }
-
- if (keyframe_map_.begin()->first == timestamp) {
- return timestamp;
- }
-
- auto key_iter = GetFirstKeyframeAtOrBefore(timestamp);
- DCHECK(key_iter != keyframe_map_.end())
- << "BelongsToRange() semantics failed.";
- DCHECK(key_iter->first <= timestamp);
-
- // Scan forward in |buffers_| to find the highest frame decode timestamp <=
- // |timestamp|.
- size_t key_index = key_iter->second - keyframe_map_index_base_;
- SourceBufferRange::BufferQueue::const_iterator search_iter =
- buffers_.begin() + key_index;
- CHECK(search_iter != buffers_.end());
- DecodeTimestamp result = (*search_iter)->GetDecodeTimestamp();
- while (true) {
- search_iter++;
- if (search_iter == buffers_.end())
- return result;
- DecodeTimestamp cur_frame_time = (*search_iter)->GetDecodeTimestamp();
- if (cur_frame_time > timestamp)
- return result;
- result = cur_frame_time;
- }
-
- NOTREACHED();
- return DecodeTimestamp();
-}
-
-DecodeTimestamp SourceBufferRangeByDts::NextKeyframeTimestamp(
- DecodeTimestamp timestamp) const {
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoDecodeTimestamp();
-
- auto itr = GetFirstKeyframeAt(timestamp, false);
- if (itr == keyframe_map_.end())
- return kNoDecodeTimestamp();
-
- // If the timestamp is inside the gap between the start of the coded frame
- // group and the first buffer, then just pretend there is a keyframe at the
- // specified timestamp.
- if (itr == keyframe_map_.begin() && timestamp > range_start_decode_time_ &&
- timestamp < itr->first) {
- return timestamp;
- }
-
- return itr->first;
-}
-
-DecodeTimestamp SourceBufferRangeByDts::KeyframeBeforeTimestamp(
- DecodeTimestamp timestamp) const {
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoDecodeTimestamp();
-
- return GetFirstKeyframeAtOrBefore(timestamp)->first;
-}
-
-bool SourceBufferRangeByDts::CanSeekTo(DecodeTimestamp timestamp) const {
- DecodeTimestamp start_timestamp =
- std::max(DecodeTimestamp(), GetStartTimestamp() - GetFudgeRoom());
- return !keyframe_map_.empty() && start_timestamp <= timestamp &&
- timestamp < GetBufferedEndTimestamp();
-}
-
-bool SourceBufferRangeByDts::GetBuffersInRange(DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers) const {
- // Find the nearest buffer with a decode timestamp <= start.
- const DecodeTimestamp first_timestamp = KeyframeBeforeTimestamp(start);
- if (first_timestamp == kNoDecodeTimestamp())
- return false;
-
- // Find all buffers involved in the range.
- const size_t previous_size = buffers->size();
- for (BufferQueue::const_iterator it = GetBufferItrAt(first_timestamp, false);
- it != buffers_.end(); ++it) {
- scoped_refptr<StreamParserBuffer> buffer = *it;
- // Buffers without duration are not supported, so bail if we encounter any.
- if (buffer->duration() == kNoTimestamp ||
- buffer->duration() <= base::TimeDelta()) {
- return false;
- }
- if (buffer->timestamp() >= end.ToPresentationTime())
- break;
-
- if (buffer->timestamp() + buffer->duration() <= start.ToPresentationTime())
- continue;
- buffers->emplace_back(std::move(buffer));
- }
- return previous_size < buffers->size();
-}
-
-SourceBufferRange::BufferQueue::const_iterator
-SourceBufferRangeByDts::GetBufferItrAt(DecodeTimestamp timestamp,
- bool skip_given_timestamp) const {
- return skip_given_timestamp
- ? std::upper_bound(buffers_.begin(), buffers_.end(), timestamp,
- CompareDecodeTimestampToStreamParserBuffer)
- : std::lower_bound(buffers_.begin(), buffers_.end(), timestamp,
- CompareStreamParserBufferToDecodeTimestamp);
-}
-
-SourceBufferRangeByDts::KeyframeMap::const_iterator
-SourceBufferRangeByDts::GetFirstKeyframeAt(DecodeTimestamp timestamp,
- bool skip_given_timestamp) const {
- return skip_given_timestamp ? keyframe_map_.upper_bound(timestamp)
- : keyframe_map_.lower_bound(timestamp);
-}
-
-SourceBufferRangeByDts::KeyframeMap::const_iterator
-SourceBufferRangeByDts::GetFirstKeyframeAtOrBefore(
- DecodeTimestamp timestamp) const {
- auto result = keyframe_map_.lower_bound(timestamp);
- // lower_bound() returns the first element >= |timestamp|, so we want the
- // previous element if it did not return the element exactly equal to
- // |timestamp|.
- if (result != keyframe_map_.begin() &&
- (result == keyframe_map_.end() || result->first != timestamp)) {
- --result;
- }
- return result;
-}
-
-bool SourceBufferRangeByDts::TruncateAt(
- const BufferQueue::const_iterator& starting_point,
- BufferQueue* deleted_buffers) {
- DCHECK(!deleted_buffers || deleted_buffers->empty());
-
- // Return if we're not deleting anything.
- if (starting_point == buffers_.end())
- return buffers_.empty();
-
- // Reset the next buffer index if we will be deleting the buffer that's next
- // in sequence.
- if (HasNextBufferPosition()) {
- DecodeTimestamp next_buffer_timestamp = GetNextTimestamp();
- if (next_buffer_timestamp == kNoDecodeTimestamp() ||
- next_buffer_timestamp >= (*starting_point)->GetDecodeTimestamp()) {
- if (HasNextBuffer() && deleted_buffers) {
- int starting_offset = starting_point - buffers_.begin();
- int next_buffer_offset = next_buffer_index_ - starting_offset;
- DCHECK_GE(next_buffer_offset, 0);
- int deleted_begin_offset = starting_offset + next_buffer_offset;
- CHECK_GE(deleted_begin_offset, 0);
-
- BufferQueue saved(buffers_.begin() + deleted_begin_offset,
- buffers_.end());
- deleted_buffers->swap(saved);
- }
- ResetNextBufferPosition();
- }
- }
-
- // Remove keyframes from |starting_point| onward.
- KeyframeMap::const_iterator starting_point_keyframe =
- keyframe_map_.lower_bound((*starting_point)->GetDecodeTimestamp());
- keyframe_map_.erase(starting_point_keyframe, keyframe_map_.end());
-
- // Remove everything from |starting_point| onward.
- FreeBufferRange(starting_point, buffers_.end());
-
- UpdateEndTimeUsingLastGOP();
- return buffers_.empty();
-}
-
-void SourceBufferRangeByDts::UpdateEndTimeUsingLastGOP() {
- if (buffers_.empty()) {
- DVLOG(1) << __func__ << " Empty range, resetting range end";
- highest_frame_ = nullptr;
- return;
- }
-
- highest_frame_ = nullptr;
-
- KeyframeMap::const_iterator last_gop = keyframe_map_.end();
- CHECK_GT(keyframe_map_.size(), 0u);
- --last_gop;
-
- // Iterate through the frames of the last GOP in this range, finding the
- // frame with the highest PTS.
- for (BufferQueue::const_iterator buffer_itr =
- buffers_.begin() + (last_gop->second - keyframe_map_index_base_);
- buffer_itr != buffers_.end(); ++buffer_itr) {
- UpdateEndTime(*buffer_itr);
- }
-
- DVLOG(1) << __func__ << " Updated range end time to "
- << highest_frame_->timestamp() << ", "
- << highest_frame_->timestamp() + highest_frame_->duration();
-}
-
-} // namespace media
diff --git a/chromium/media/filters/source_buffer_range_by_dts.h b/chromium/media/filters/source_buffer_range_by_dts.h
deleted file mode 100644
index 9681260283d..00000000000
--- a/chromium/media/filters/source_buffer_range_by_dts.h
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_DTS_H_
-#define MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_DTS_H_
-
-#include <stddef.h>
-#include <map>
-#include <memory>
-
-#include "media/filters/source_buffer_range.h"
-
-namespace media {
-
-// Helper class representing a continuous range of buffered data in the
-// presentation timeline. All buffers in a SourceBufferRangeByDts are ordered
-// sequentially in decode timestamp order with no gaps. This class conflates
-// decode intervals (and decode gaps) with presentation intervals and gaps; it
-// provides the "LegacyByDts" buffering implementation used prior to fixing
-// https://crbug.com/718641.
-class MEDIA_EXPORT SourceBufferRangeByDts : public SourceBufferRange {
- public:
- // Creates a range with |new_buffers|. |new_buffers| cannot be empty and the
- // front of |new_buffers| must be a keyframe.
- // |range_start_decode_time| refers to the starting timestamp for the coded
- // frame group to which these buffers belong.
- SourceBufferRangeByDts(GapPolicy gap_policy,
- const BufferQueue& new_buffers,
- DecodeTimestamp range_start_decode_time,
- const InterbufferDistanceCB& interbuffer_distance_cb);
-
- ~SourceBufferRangeByDts() override;
-
- void DeleteAll(BufferQueue* deleted_buffers) override;
-
- // Appends the buffers from |range| into this range.
- // The first buffer in |range| must come directly after the last buffer
- // in this range.
- // If |transfer_current_position| is true, |range|'s |next_buffer_index_|
- // is transfered to this SourceBufferRange.
- // Note: Use these only to merge existing ranges. |range|'s first buffer
- // timestamp must be adjacent to this range. No group start timestamp
- // adjacency is involved in these methods.
- // During append, |highest_frame_| is updated, if necessary.
- void AppendRangeToEnd(const SourceBufferRangeByDts& range,
- bool transfer_current_position);
- bool CanAppendRangeToEnd(const SourceBufferRangeByDts& range) const;
-
- // Appends |buffers| to the end of the range and updates |keyframe_map_| as
- // it encounters new keyframes.
- // If |new_buffers_group_start_timestamp| is kNoDecodeTimestamp(), then the
- // first buffer in |buffers| must come directly after the last buffer in this
- // range (within the fudge room), or be a keyframe that starts precisely where
- // the last buffer in this range ends and that last buffer has estimated
- // duration.
- // If |new_buffers_group_start_timestamp| is set otherwise, then that time
- // must come directly after the last buffer in this range (within the fudge
- // room) or precisely where the last buffer in this range ends and that last
- // buffer has estimated duration.
- // The latter scenario is required when a muxed coded frame group has such a
- // large jagged start across tracks that its first buffer is not within the
- // fudge room, yet its group start was.
- // The conditions around estimated duration are handled by
- // AllowableAppendAfterEstimatedDuration, and are intended to solve the edge
- // case in the SourceBufferStreamTest
- // MergeAllowedIfRangeEndTimeWithEstimatedDurationMatchesNextRangeStart.
- // During append, |highest_frame_| is updated, if necessary.
- void AppendBuffersToEnd(const BufferQueue& buffers,
- DecodeTimestamp new_buffers_group_start_timestamp);
- bool AllowableAppendAfterEstimatedDuration(
- const BufferQueue& buffers,
- DecodeTimestamp new_buffers_group_start_timestamp) const;
- bool CanAppendBuffersToEnd(
- const BufferQueue& buffers,
- DecodeTimestamp new_buffers_group_start_timestamp) const;
-
- // Updates |next_buffer_index_| to point to the Buffer containing |timestamp|.
- // Assumes |timestamp| is valid and in this range.
- void Seek(DecodeTimestamp timestamp);
-
- // Return the config ID for the buffer at |timestamp|. Precondition: callers
- // must first verify CanSeekTo(timestamp) == true.
- int GetConfigIdAtTime(DecodeTimestamp timestamp) const;
-
- // Return true if all buffers in range of [start, end] have the same config
- // ID. Precondition: callers must first verify that
- // CanSeekTo(start) == CanSeekTo(end) == true.
- bool SameConfigThruRange(DecodeTimestamp start, DecodeTimestamp end) const;
-
- // Finds the next keyframe from |buffers_| starting at or after |timestamp|
- // and creates and returns a new SourceBufferRangeByDts with the buffers from
- // that keyframe onward. The buffers in the new SourceBufferRangeByDts are
- // moved out of this range. The start time of the new SourceBufferRangeByDts
- // is set to the later of |timestamp| and this range's GetStartTimestamp().
- // If there is no keyframe at or after |timestamp|, SplitRange() returns null
- // and this range is unmodified. This range can become empty if |timestamp| <=
- // the DTS of the first buffer in this range. |highest_frame_| is updated, if
- // necessary.
- std::unique_ptr<SourceBufferRangeByDts> SplitRange(DecodeTimestamp timestamp);
-
- // Deletes the buffers from this range starting at |timestamp|, exclusive if
- // |is_exclusive| is true, inclusive otherwise.
- // Resets |next_buffer_index_| if the buffer at |next_buffer_index_| was
- // deleted, and deletes the |keyframe_map_| entries for the buffers that
- // were removed.
- // |highest_frame_| is updated, if necessary.
- // |deleted_buffers| contains the buffers that were deleted from this range,
- // starting at the buffer that had been at |next_buffer_index_|.
- // Returns true if everything in the range was deleted. Otherwise
- // returns false.
- bool TruncateAt(DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive);
-
- // Deletes a GOP from the front or back of the range and moves these
- // buffers into |deleted_buffers|. Returns the number of bytes deleted from
- // the range (i.e. the size in bytes of |deleted_buffers|).
- // |highest_frame_| is updated, if necessary.
- // This range must NOT be empty when these methods are called.
- // The GOP being deleted must NOT contain the next buffer position.
- size_t DeleteGOPFromFront(BufferQueue* deleted_buffers);
- size_t DeleteGOPFromBack(BufferQueue* deleted_buffers);
-
- // Gets the range of GOP to secure at least |bytes_to_free| from
- // [|start_timestamp|, |end_timestamp|).
- // Returns the size of the buffers to secure if the buffers of
- // [|start_timestamp|, |end_removal_timestamp|) is removed.
- // Will not update |end_removal_timestamp| if the returned size is 0.
- size_t GetRemovalGOP(DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
- size_t bytes_to_free,
- DecodeTimestamp* end_removal_timestamp) const;
-
- // Returns true iff the buffered end time of the first GOP in this range is
- // at or before |media_time|.
- bool FirstGOPEarlierThanMediaTime(DecodeTimestamp media_time) const;
-
- // Indicates whether the GOP at the beginning or end of the range contains the
- // next buffer position.
- bool FirstGOPContainsNextBufferPosition() const;
- bool LastGOPContainsNextBufferPosition() const;
-
- // Returns the timestamp of the next buffer that will be returned from
- // GetNextBuffer(), or kNoTimestamp if the timestamp is unknown.
- DecodeTimestamp GetNextTimestamp() const;
-
- // Returns the start timestamp of the range.
- DecodeTimestamp GetStartTimestamp() const;
-
- // Returns the timestamp of the last buffer in the range.
- DecodeTimestamp GetEndTimestamp() const;
-
- // Returns the timestamp for the end of the buffered region in this range.
- // This is an approximation if the duration for the last buffer in the range
- // is unset.
- DecodeTimestamp GetBufferedEndTimestamp() const;
-
- // Returns whether a buffer with a starting timestamp of |timestamp| would
- // belong in this range. This includes a buffer that would be appended to
- // the end of the range.
- bool BelongsToRange(DecodeTimestamp timestamp) const;
-
- // Returns the highest time from among GetStartTimestamp() and frame decode
- // timestamp (in order in |buffers_| beginning at the first keyframe at or
- // before |timestamp|) for buffers in this range up to and including
- // |timestamp|.
- // Note that |timestamp| must belong to this range.
- DecodeTimestamp FindHighestBufferedTimestampAtOrBefore(
- DecodeTimestamp timestamp) const;
-
- // Gets the timestamp for the keyframe that is after |timestamp|. If
- // there isn't a keyframe in the range after |timestamp| then kNoTimestamp
- // is returned. If |timestamp| is in the "gap" between the value returned by
- // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
- // then |timestamp| is returned.
- DecodeTimestamp NextKeyframeTimestamp(DecodeTimestamp timestamp) const;
-
- // Gets the timestamp for the closest keyframe that is <= |timestamp|. If
- // there isn't a keyframe before |timestamp| or |timestamp| is outside
- // this range, then kNoTimestamp is returned.
- DecodeTimestamp KeyframeBeforeTimestamp(DecodeTimestamp timestamp) const;
-
- // Returns true if the range has enough data to seek to the specified
- // |timestamp|, false otherwise.
- bool CanSeekTo(DecodeTimestamp timestamp) const;
-
- // Adds all buffers which overlap [start, end) to the end of |buffers|. If
- // no buffers exist in the range returns false, true otherwise.
- bool GetBuffersInRange(DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers) const;
-
- private:
- typedef std::map<DecodeTimestamp, int> KeyframeMap;
-
- // Helper method for Appending |range| to the end of this range. If |range|'s
- // first buffer time is before the time of the last buffer in this range,
- // returns kNoDecodeTimestamp(). Otherwise, returns the closest time within
- // [|range|'s start time, |range|'s first buffer time] that is at or after the
- // time of the last buffer in this range. This allows |range| to potentially
- // be determined to be adjacent within fudge room for appending to the end of
- // this range, especially if |range| has a start time that is before its first
- // buffer's time.
- DecodeTimestamp NextRangeStartTimeForAppendRangeToEnd(
- const SourceBufferRangeByDts& range) const;
-
- // Helper method to delete buffers in |buffers_| starting at
- // |starting_point|, an iterator in |buffers_|.
- // Returns true if everything in the range was removed. Returns
- // false if the range still contains buffers.
- bool TruncateAt(const BufferQueue::const_iterator& starting_point,
- BufferQueue* deleted_buffers);
-
- // Returns an iterator in |buffers_| pointing to the buffer at |timestamp|.
- // If |skip_given_timestamp| is true, this returns the first buffer with
- // timestamp greater than |timestamp|.
- BufferQueue::const_iterator GetBufferItrAt(DecodeTimestamp timestamp,
- bool skip_given_timestamp) const;
-
- // Returns an iterator in |keyframe_map_| pointing to the next keyframe after
- // |timestamp|. If |skip_given_timestamp| is true, this returns the first
- // keyframe with a timestamp strictly greater than |timestamp|.
- KeyframeMap::const_iterator GetFirstKeyframeAt(
- DecodeTimestamp timestamp,
- bool skip_given_timestamp) const;
-
- // Returns an iterator in |keyframe_map_| pointing to the first keyframe
- // before or at |timestamp|.
- KeyframeMap::const_iterator GetFirstKeyframeAtOrBefore(
- DecodeTimestamp timestamp) const;
-
- // Updates |highest_frame_| to be the frame with highest PTS in the last GOP
- // in this range. If there are no buffers in this range, resets
- // |highest_frame_|.
- // Normally, incremental additions to this range should just use
- // UpdateEndTime(). When removing buffers from this range (which could be out
- // of order presentation vs decode order), inspecting the last buffer in
- // decode order of this range can be insufficient to determine the correct
- // presentation end time of this range. Hence this helper method.
- void UpdateEndTimeUsingLastGOP();
-
- // If the first buffer in this range is the beginning of a coded frame group,
- // |range_start_decode_time_| is the time when the coded frame group begins.
- // This is especially important in muxed media where the first coded frames
- // for each track do not necessarily begin at the same time.
- // |range_start_decode_time_| may be <= the timestamp of the first buffer in
- // |buffers_|. |range_start_decode_time_| is kNoDecodeTimestamp() if this
- // range does not start at the beginning of a coded frame group, which can
- // happen by range removal or split when we don't have a way of knowing,
- // across potentially multiple muxed streams, the coded frame group start
- // timestamp for the new range.
- DecodeTimestamp range_start_decode_time_;
-
- // Index base of all positions in |keyframe_map_|. In other words, the
- // real position of entry |k| of |keyframe_map_| in the range is:
- // keyframe_map_[k] - keyframe_map_index_base_
- int keyframe_map_index_base_;
-
- // Maps keyframe decode timestamps to its index position in |buffers_|.
- KeyframeMap keyframe_map_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceBufferRangeByDts);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_DTS_H_
diff --git a/chromium/media/filters/source_buffer_range_by_pts.cc b/chromium/media/filters/source_buffer_range_by_pts.cc
deleted file mode 100644
index 3226bc310bd..00000000000
--- a/chromium/media/filters/source_buffer_range_by_pts.cc
+++ /dev/null
@@ -1,829 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/filters/source_buffer_range_by_pts.h"
-
-#include <algorithm>
-#include <memory>
-#include <sstream>
-#include <string>
-
-#include "base/logging.h"
-#include "media/base/timestamp_constants.h"
-
-namespace media {
-
-SourceBufferRangeByPts::SourceBufferRangeByPts(
- GapPolicy gap_policy,
- const BufferQueue& new_buffers,
- base::TimeDelta range_start_pts,
- const InterbufferDistanceCB& interbuffer_distance_cb)
- : SourceBufferRange(gap_policy, interbuffer_distance_cb),
- range_start_pts_(range_start_pts),
- keyframe_map_index_base_(0) {
- DVLOG(3) << __func__;
- CHECK(!new_buffers.empty());
- DCHECK(new_buffers.front()->is_key_frame());
- AppendBuffersToEnd(new_buffers, range_start_pts_);
-}
-
-SourceBufferRangeByPts::~SourceBufferRangeByPts() = default;
-
-void SourceBufferRangeByPts::DeleteAll(BufferQueue* deleted_buffers) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- TruncateAt(0u, deleted_buffers);
-}
-
-void SourceBufferRangeByPts::AppendRangeToEnd(
- const SourceBufferRangeByPts& range,
- bool transfer_current_position) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(CanAppendRangeToEnd(range));
- DCHECK(!buffers_.empty());
-
- if (transfer_current_position && range.next_buffer_index_ >= 0)
- next_buffer_index_ = range.next_buffer_index_ + buffers_.size();
-
- AppendBuffersToEnd(range.buffers_,
- NextRangeStartTimeForAppendRangeToEnd(range));
-}
-
-bool SourceBufferRangeByPts::CanAppendRangeToEnd(
- const SourceBufferRangeByPts& range) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- return CanAppendBuffersToEnd(range.buffers_,
- NextRangeStartTimeForAppendRangeToEnd(range));
-}
-
-bool SourceBufferRangeByPts::AllowableAppendAfterEstimatedDuration(
- const BufferQueue& buffers,
- base::TimeDelta new_buffers_group_start_pts) const {
- if (buffers_.empty() || !buffers_.back()->is_duration_estimated() ||
- buffers.empty() || !buffers.front()->is_key_frame()) {
- return false;
- }
-
- if (new_buffers_group_start_pts == kNoTimestamp) {
- return GetBufferedEndTimestamp() == buffers.front()->timestamp();
- }
-
- return GetBufferedEndTimestamp() == new_buffers_group_start_pts;
-}
-
-bool SourceBufferRangeByPts::CanAppendBuffersToEnd(
- const BufferQueue& buffers,
- base::TimeDelta new_buffers_group_start_pts) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- if (new_buffers_group_start_pts == kNoTimestamp) {
- return buffers.front()->is_key_frame()
- ? (IsNextInPresentationSequence(buffers.front()->timestamp()) ||
- AllowableAppendAfterEstimatedDuration(
- buffers, new_buffers_group_start_pts))
- : IsNextInDecodeSequence(buffers.front()->GetDecodeTimestamp());
- }
- CHECK(buffers.front()->is_key_frame());
- DCHECK(new_buffers_group_start_pts >= GetEndTimestamp());
- DCHECK(buffers.front()->timestamp() >= new_buffers_group_start_pts);
- return IsNextInPresentationSequence(new_buffers_group_start_pts) ||
- AllowableAppendAfterEstimatedDuration(buffers,
- new_buffers_group_start_pts);
-}
-
-void SourceBufferRangeByPts::AppendBuffersToEnd(
- const BufferQueue& new_buffers,
- base::TimeDelta new_buffers_group_start_pts) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- CHECK(buffers_.empty() ||
- CanAppendBuffersToEnd(new_buffers, new_buffers_group_start_pts));
-
- DCHECK(new_buffers_group_start_pts == kNoTimestamp ||
- new_buffers.front()->is_key_frame())
- << range_start_pts_ << ", " << new_buffers.front()->is_key_frame();
-
- // TODO(wolenetz): Uncomment this DCHECK once SAP-Type-2 is more fully
- // supported. It is hit by NewByPts versions of
- // FrameProcessorTest.OOOKeyframePrecededByDependantNonKeyframeShouldWarn. See
- // https://crbug.com/718641.
- // DCHECK(range_start_pts_ == kNoTimestamp ||
- // range_start_pts_ <= new_buffers.front()->timestamp());
-
- AdjustEstimatedDurationForNewAppend(new_buffers);
-
- for (BufferQueue::const_iterator itr = new_buffers.begin();
- itr != new_buffers.end(); ++itr) {
- DCHECK((*itr)->timestamp() != kNoTimestamp);
- DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
-
- buffers_.push_back(*itr);
- UpdateEndTime(*itr);
- size_in_bytes_ += (*itr)->data_size();
-
- if ((*itr)->is_key_frame()) {
- keyframe_map_.insert(std::make_pair(
- (*itr)->timestamp(), buffers_.size() - 1 + keyframe_map_index_base_));
- }
- }
-
- DVLOG(4) << __func__ << " Result: " << ToStringForDebugging();
-}
-
-void SourceBufferRangeByPts::Seek(base::TimeDelta timestamp) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(CanSeekTo(timestamp));
- DCHECK(!keyframe_map_.empty());
-
- auto result = GetFirstKeyframeAtOrBefore(timestamp);
- next_buffer_index_ = result->second - keyframe_map_index_base_;
- CHECK_LT(next_buffer_index_, static_cast<int>(buffers_.size()))
- << next_buffer_index_ << ", size = " << buffers_.size();
-}
-
-bool SourceBufferRangeByPts::CanSeekTo(base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- base::TimeDelta start_timestamp =
- std::max(base::TimeDelta(), GetStartTimestamp() - GetFudgeRoom());
- return !keyframe_map_.empty() && start_timestamp <= timestamp &&
- timestamp < GetBufferedEndTimestamp();
-}
-
-int SourceBufferRangeByPts::GetConfigIdAtTime(base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(CanSeekTo(timestamp));
- DCHECK(!keyframe_map_.empty());
-
- auto result = GetFirstKeyframeAtOrBefore(timestamp);
- CHECK(result != keyframe_map_.end());
- size_t buffer_index = result->second - keyframe_map_index_base_;
- CHECK_LT(buffer_index, buffers_.size())
- << buffer_index << ", size = " << buffers_.size();
-
- return buffers_[buffer_index]->GetConfigId();
-}
-
-bool SourceBufferRangeByPts::SameConfigThruRange(base::TimeDelta start,
- base::TimeDelta end) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(CanSeekTo(start));
- DCHECK(CanSeekTo(end));
- DCHECK(start <= end);
- DCHECK(!keyframe_map_.empty());
-
- if (start == end)
- return true;
-
- auto result = GetFirstKeyframeAtOrBefore(start);
- CHECK(result != keyframe_map_.end());
- size_t buffer_index = result->second - keyframe_map_index_base_;
- CHECK_LT(buffer_index, buffers_.size())
- << buffer_index << ", size = " << buffers_.size();
-
- int start_config = buffers_[buffer_index]->GetConfigId();
- buffer_index++;
- while (buffer_index < buffers_.size() &&
- buffers_[buffer_index]->timestamp() <= end) {
- if (buffers_[buffer_index]->GetConfigId() != start_config)
- return false;
- buffer_index++;
- }
-
- return true;
-}
-
-std::unique_ptr<SourceBufferRangeByPts> SourceBufferRangeByPts::SplitRange(
- base::TimeDelta timestamp) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- CHECK(!buffers_.empty());
-
- // Find the first keyframe at or after |timestamp|.
- auto new_beginning_keyframe = GetFirstKeyframeAt(timestamp, false);
-
- // If there is no keyframe at or after |timestamp|, we can't split the range.
- if (new_beginning_keyframe == keyframe_map_.end())
- return nullptr;
-
- // Remove the data beginning at |keyframe_index| from |buffers_| and save it
- // into |removed_buffers|.
- int keyframe_index =
- new_beginning_keyframe->second - keyframe_map_index_base_;
- CHECK_LT(keyframe_index, static_cast<int>(buffers_.size()));
- BufferQueue::iterator starting_point = buffers_.begin() + keyframe_index;
- BufferQueue removed_buffers(starting_point, buffers_.end());
-
- base::TimeDelta new_range_start_pts =
- std::max(timestamp, GetStartTimestamp());
- DCHECK(new_range_start_pts <= removed_buffers.front()->timestamp());
-
- keyframe_map_.erase(new_beginning_keyframe, keyframe_map_.end());
- FreeBufferRange(starting_point, buffers_.end());
- UpdateEndTimeUsingLastGOP();
-
- // Create a new range with |removed_buffers|.
- std::unique_ptr<SourceBufferRangeByPts> split_range =
- std::make_unique<SourceBufferRangeByPts>(gap_policy_, removed_buffers,
- new_range_start_pts,
- interbuffer_distance_cb_);
-
- // If the next buffer position is now in |split_range|, update the state of
- // this range and |split_range| accordingly.
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- split_range->next_buffer_index_ = next_buffer_index_ - keyframe_index;
-
- int split_range_next_buffer_index = split_range->next_buffer_index_;
- CHECK_GE(split_range_next_buffer_index, 0);
- // Note that a SourceBufferRange's |next_buffer_index_| can be the index
- // of a buffer one beyond what is currently in |buffers_|.
- CHECK_LE(split_range_next_buffer_index,
- static_cast<int>(split_range->buffers_.size()));
-
- ResetNextBufferPosition();
- }
-
- return split_range;
-}
-
-bool SourceBufferRangeByPts::TruncateAt(base::TimeDelta timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- // Find the place in |buffers_| where we will begin deleting data, then
- // truncate from there.
- return TruncateAt(GetBufferIndexAt(timestamp, is_exclusive), deleted_buffers);
-}
-
-size_t SourceBufferRangeByPts::DeleteGOPFromFront(
- BufferQueue* deleted_buffers) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- DCHECK(!FirstGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- int buffers_deleted = 0;
- size_t total_bytes_deleted = 0;
-
- KeyframeMap::const_iterator front = keyframe_map_.begin();
- DCHECK(front != keyframe_map_.end());
-
- // Delete the keyframe at the start of |keyframe_map_|.
- keyframe_map_.erase(front);
-
- // Now we need to delete all the buffers that depend on the keyframe we've
- // just deleted.
- int end_index = keyframe_map_.size() > 0
- ? keyframe_map_.begin()->second - keyframe_map_index_base_
- : buffers_.size();
-
- // Delete buffers from the beginning of the buffered range up until (but not
- // including) the next keyframe.
- for (int i = 0; i < end_index; i++) {
- size_t bytes_deleted = buffers_.front()->data_size();
- DCHECK_GE(size_in_bytes_, bytes_deleted);
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- deleted_buffers->push_back(buffers_.front());
- buffers_.pop_front();
- ++buffers_deleted;
- }
-
- // Update |keyframe_map_index_base_| to account for the deleted buffers.
- keyframe_map_index_base_ += buffers_deleted;
-
- if (next_buffer_index_ > -1) {
- next_buffer_index_ -= buffers_deleted;
- CHECK_GE(next_buffer_index_, 0)
- << next_buffer_index_ << ", deleted " << buffers_deleted;
- }
-
- // Invalidate range start time if we've deleted the first buffer of the range.
- if (buffers_deleted > 0) {
- range_start_pts_ = kNoTimestamp;
- // Reset the range end time tracking if there are no more buffers in the
- // range.
- if (buffers_.empty())
- highest_frame_ = nullptr;
- }
-
- return total_bytes_deleted;
-}
-
-size_t SourceBufferRangeByPts::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- DCHECK(!LastGOPContainsNextBufferPosition());
- DCHECK(deleted_buffers);
-
- // Remove the last GOP's keyframe from the |keyframe_map_|.
- KeyframeMap::const_iterator back = keyframe_map_.end();
- DCHECK_GT(keyframe_map_.size(), 0u);
- --back;
-
- // The index of the first buffer in the last GOP is equal to the new size of
- // |buffers_| after that GOP is deleted.
- size_t goal_size = back->second - keyframe_map_index_base_;
- keyframe_map_.erase(back);
-
- size_t total_bytes_deleted = 0;
- while (buffers_.size() != goal_size) {
- size_t bytes_deleted = buffers_.back()->data_size();
- DCHECK_GE(size_in_bytes_, bytes_deleted);
- size_in_bytes_ -= bytes_deleted;
- total_bytes_deleted += bytes_deleted;
- // We're removing buffers from the back, so push each removed buffer to the
- // front of |deleted_buffers| so that |deleted_buffers| are in nondecreasing
- // order.
- deleted_buffers->push_front(buffers_.back());
- buffers_.pop_back();
- }
-
- UpdateEndTimeUsingLastGOP();
-
- return total_bytes_deleted;
-}
-
-size_t SourceBufferRangeByPts::GetRemovalGOP(
- base::TimeDelta start_timestamp,
- base::TimeDelta end_timestamp,
- size_t total_bytes_to_free,
- base::TimeDelta* removal_end_timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- size_t bytes_removed = 0;
-
- auto gop_itr = GetFirstKeyframeAt(start_timestamp, false);
- if (gop_itr == keyframe_map_.end())
- return 0;
- int keyframe_index = gop_itr->second - keyframe_map_index_base_;
- BufferQueue::const_iterator buffer_itr = buffers_.begin() + keyframe_index;
- auto gop_end = keyframe_map_.end();
- if (end_timestamp < GetBufferedEndTimestamp())
- gop_end = GetFirstKeyframeAtOrBefore(end_timestamp);
-
- // Check if the removal range is within a GOP and skip the loop if so.
- // [keyframe]...[start_timestamp]...[end_timestamp]...[keyframe]
- auto gop_itr_prev = gop_itr;
- if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
- gop_end = gop_itr;
-
- while (gop_itr != gop_end && bytes_removed < total_bytes_to_free) {
- ++gop_itr;
-
- size_t gop_size = 0;
- int next_gop_index = gop_itr == keyframe_map_.end()
- ? buffers_.size()
- : gop_itr->second - keyframe_map_index_base_;
- BufferQueue::const_iterator next_gop_start =
- buffers_.begin() + next_gop_index;
- for (; buffer_itr != next_gop_start; ++buffer_itr) {
- gop_size += (*buffer_itr)->data_size();
- }
-
- bytes_removed += gop_size;
- }
- if (bytes_removed > 0) {
- *removal_end_timestamp = gop_itr == keyframe_map_.end()
- ? GetBufferedEndTimestamp()
- : gop_itr->first;
- }
- return bytes_removed;
-}
-
-bool SourceBufferRangeByPts::FirstGOPEarlierThanMediaTime(
- base::TimeDelta media_time) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- if (keyframe_map_.size() == 1u)
- return (GetBufferedEndTimestamp() <= media_time);
-
- auto second_gop = keyframe_map_.begin();
- ++second_gop;
- return second_gop->first <= media_time;
-}
-
-bool SourceBufferRangeByPts::FirstGOPContainsNextBufferPosition() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- auto second_gop = keyframe_map_.begin();
- ++second_gop;
- return next_buffer_index_ < second_gop->second - keyframe_map_index_base_;
-}
-
-bool SourceBufferRangeByPts::LastGOPContainsNextBufferPosition() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- if (!HasNextBufferPosition())
- return false;
-
- // If there is only one GOP, it must contain the next buffer position.
- if (keyframe_map_.size() == 1u)
- return true;
-
- auto last_gop = keyframe_map_.end();
- --last_gop;
- return last_gop->second - keyframe_map_index_base_ <= next_buffer_index_;
-}
-
-base::TimeDelta SourceBufferRangeByPts::GetNextTimestamp() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- CHECK(!buffers_.empty()) << next_buffer_index_;
- CHECK(HasNextBufferPosition())
- << next_buffer_index_ << ", size=" << buffers_.size();
-
- if (next_buffer_index_ >= static_cast<int>(buffers_.size())) {
- return kNoTimestamp;
- }
-
- return buffers_[next_buffer_index_]->timestamp();
-}
-
-base::TimeDelta SourceBufferRangeByPts::GetStartTimestamp() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- base::TimeDelta start_timestamp = range_start_pts_;
- if (start_timestamp == kNoTimestamp)
- start_timestamp = buffers_.front()->timestamp();
- return start_timestamp;
-}
-
-base::TimeDelta SourceBufferRangeByPts::GetEndTimestamp() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- return highest_frame_->timestamp();
-}
-
-base::TimeDelta SourceBufferRangeByPts::GetBufferedEndTimestamp() const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- base::TimeDelta duration = highest_frame_->duration();
-
- // FrameProcessor should protect against unknown buffer durations.
- DCHECK_NE(duration, kNoTimestamp);
-
- // Because media::Ranges<base::TimeDelta>::Add() ignores 0 duration ranges,
- // report 1 microsecond for the last buffer's duration if it is a 0 duration
- // buffer.
- if (duration.is_zero())
- duration = base::TimeDelta::FromMicroseconds(1);
-
- return GetEndTimestamp() + duration;
-}
-
-bool SourceBufferRangeByPts::BelongsToRange(base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
-
- return (IsNextInPresentationSequence(timestamp) ||
- (GetStartTimestamp() <= timestamp && timestamp <= GetEndTimestamp()));
-}
-
-base::TimeDelta SourceBufferRangeByPts::FindHighestBufferedTimestampAtOrBefore(
- base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!buffers_.empty());
- DCHECK(BelongsToRange(timestamp));
-
- if (keyframe_map_.begin()->first > timestamp) {
- // If the first keyframe in the range starts after |timestamp|, then
- // return the range start time (which could be earlier due to coded frame
- // group signalling.)
- base::TimeDelta range_start = GetStartTimestamp();
- DCHECK(timestamp >= range_start) << "BelongsToRange() semantics failed.";
- return range_start;
- }
-
- if (keyframe_map_.begin()->first == timestamp) {
- return timestamp;
- }
-
- auto key_iter = GetFirstKeyframeAtOrBefore(timestamp);
- DCHECK(key_iter != keyframe_map_.end())
- << "BelongsToRange() semantics failed.";
- DCHECK(key_iter->first <= timestamp);
-
- // Scan forward in |buffers_| to find the highest frame with timestamp <=
- // |timestamp|. Stop once a frame with timestamp > |timestamp| is encountered.
- size_t key_index = key_iter->second - keyframe_map_index_base_;
- SourceBufferRange::BufferQueue::const_iterator search_iter =
- buffers_.begin() + key_index;
- CHECK(search_iter != buffers_.end());
- base::TimeDelta cur_frame_time = (*search_iter)->timestamp();
- base::TimeDelta result = cur_frame_time;
- while (true) {
- result = std::max(result, cur_frame_time);
- search_iter++;
- if (search_iter == buffers_.end())
- return result;
- cur_frame_time = (*search_iter)->timestamp();
- if (cur_frame_time > timestamp)
- return result;
- }
-
- NOTREACHED();
- return base::TimeDelta();
-}
-
-base::TimeDelta SourceBufferRangeByPts::NextKeyframeTimestamp(
- base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoTimestamp;
-
- auto itr = GetFirstKeyframeAt(timestamp, false);
- if (itr == keyframe_map_.end())
- return kNoTimestamp;
-
- // If the timestamp is inside the gap between the start of the coded frame
- // group and the first buffer, then just pretend there is a keyframe at the
- // specified timestamp.
- if (itr == keyframe_map_.begin() && timestamp > range_start_pts_ &&
- timestamp < itr->first) {
- return timestamp;
- }
-
- return itr->first;
-}
-
-base::TimeDelta SourceBufferRangeByPts::KeyframeBeforeTimestamp(
- base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- DCHECK(!keyframe_map_.empty());
-
- if (timestamp < GetStartTimestamp() || timestamp >= GetBufferedEndTimestamp())
- return kNoTimestamp;
-
- return GetFirstKeyframeAtOrBefore(timestamp)->first;
-}
-
-bool SourceBufferRangeByPts::GetBuffersInRange(base::TimeDelta start,
- base::TimeDelta end,
- BufferQueue* buffers) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- // Find the nearest buffer with a timestamp <= start.
- const base::TimeDelta first_timestamp = KeyframeBeforeTimestamp(start);
- if (first_timestamp == kNoTimestamp)
- return false;
-
- // Find all buffers involved in the range.
- const size_t previous_size = buffers->size();
- for (BufferQueue::const_iterator it = GetBufferItrAt(first_timestamp, false);
- it != buffers_.end(); ++it) {
- scoped_refptr<StreamParserBuffer> buffer = *it;
- // Buffers without duration are not supported, so bail if we encounter any.
- if (buffer->duration() == kNoTimestamp ||
- buffer->duration() <= base::TimeDelta()) {
- return false;
- }
- if (buffer->timestamp() >= end)
- break;
-
- if (buffer->timestamp() + buffer->duration() <= start)
- continue;
-
- DCHECK(buffer->is_key_frame());
- buffers->emplace_back(std::move(buffer));
- }
- return previous_size < buffers->size();
-}
-
-base::TimeDelta SourceBufferRangeByPts::NextRangeStartTimeForAppendRangeToEnd(
- const SourceBufferRangeByPts& range) const {
- DCHECK(!buffers_.empty());
- DCHECK(!range.buffers_.empty());
-
- base::TimeDelta next_range_first_buffer_time =
- range.buffers_.front()->timestamp();
- base::TimeDelta this_range_end_time = GetEndTimestamp();
- if (next_range_first_buffer_time < this_range_end_time)
- return kNoTimestamp;
-
- base::TimeDelta next_range_start_time = range.GetStartTimestamp();
- DCHECK(next_range_start_time <= next_range_first_buffer_time);
-
- if (next_range_start_time >= this_range_end_time)
- return next_range_start_time;
-
- return this_range_end_time;
-}
-
-size_t SourceBufferRangeByPts::GetBufferIndexAt(
- base::TimeDelta timestamp,
- bool skip_given_timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- // Find the GOP containing |timestamp| (or trivial buffers_.size() if none
- // contain |timestamp|).
- auto gop_iter = GetFirstKeyframeAtOrBefore(timestamp);
- if (gop_iter == keyframe_map_.end())
- return buffers_.size();
-
- // Then scan forward in this GOP in decode sequence for the first frame with
- // PTS >= |timestamp| (or strictly > if |skip_given_timestamp| is true). If
- // this GOP doesn't contain such a frame, returns the index of the keyframe of
- // the next GOP (which could be the index of end() of |buffers_| if this was
- // the last GOP in |buffers_|). We do linear scan of the GOP here because we
- // don't know the DTS for the searched-for frame, and the PTS sequence within
- // a GOP may not match the DTS-sorted sequence of frames within the GOP.
- DCHECK_GT(buffers_.size(), 0u);
- size_t search_index = gop_iter->second - keyframe_map_index_base_;
- SourceBufferRange::BufferQueue::const_iterator search_iter =
- buffers_.begin() + search_index;
- gop_iter++;
-
- SourceBufferRange::BufferQueue::const_iterator next_gop_start =
- gop_iter == keyframe_map_.end()
- ? buffers_.end()
- : buffers_.begin() + (gop_iter->second - keyframe_map_index_base_);
-
- while (search_iter != next_gop_start) {
- if (((*search_iter)->timestamp() > timestamp) ||
- (!skip_given_timestamp && (*search_iter)->timestamp() == timestamp)) {
- break;
- }
- search_index++;
- search_iter++;
- }
-
- return search_index;
-}
-
-SourceBufferRange::BufferQueue::const_iterator
-SourceBufferRangeByPts::GetBufferItrAt(base::TimeDelta timestamp,
- bool skip_given_timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- return buffers_.begin() + GetBufferIndexAt(timestamp, skip_given_timestamp);
-}
-
-SourceBufferRangeByPts::KeyframeMap::const_iterator
-SourceBufferRangeByPts::GetFirstKeyframeAt(base::TimeDelta timestamp,
- bool skip_given_timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- return skip_given_timestamp ? keyframe_map_.upper_bound(timestamp)
- : keyframe_map_.lower_bound(timestamp);
-}
-
-SourceBufferRangeByPts::KeyframeMap::const_iterator
-SourceBufferRangeByPts::GetFirstKeyframeAtOrBefore(
- base::TimeDelta timestamp) const {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- auto result = keyframe_map_.lower_bound(timestamp);
- // lower_bound() returns the first element >= |timestamp|, so we want the
- // previous element if it did not return the element exactly equal to
- // |timestamp|.
- if (result != keyframe_map_.begin() &&
- (result == keyframe_map_.end() || result->first != timestamp)) {
- --result;
- }
- return result;
-}
-
-bool SourceBufferRangeByPts::TruncateAt(const size_t starting_point,
- BufferQueue* deleted_buffers) {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- CHECK_LE(starting_point, buffers_.size());
- DCHECK(!deleted_buffers || deleted_buffers->empty());
-
- // Return if we're not deleting anything.
- if (starting_point == buffers_.size())
- return buffers_.empty();
-
- // Reset the next buffer index if we will be deleting the buffer that's next
- // in sequence.
- if (HasNextBufferPosition()) {
- if (static_cast<size_t>(next_buffer_index_) >= starting_point) {
- if (HasNextBuffer() && deleted_buffers) {
- BufferQueue saved(buffers_.begin() + next_buffer_index_,
- buffers_.end());
- deleted_buffers->swap(saved);
- }
- ResetNextBufferPosition();
- }
- }
-
- const BufferQueue::const_iterator starting_point_iter =
- buffers_.begin() + starting_point;
-
- // Remove keyframes from |starting_point| onward.
- KeyframeMap::const_iterator starting_point_keyframe =
- keyframe_map_.lower_bound((*starting_point_iter)->timestamp());
- keyframe_map_.erase(starting_point_keyframe, keyframe_map_.end());
-
- // Remove everything from |starting_point| onward.
- FreeBufferRange(starting_point_iter, buffers_.end());
-
- UpdateEndTimeUsingLastGOP();
- return buffers_.empty();
-}
-
-void SourceBufferRangeByPts::UpdateEndTimeUsingLastGOP() {
- DVLOG(1) << __func__;
- DVLOG(4) << ToStringForDebugging();
-
- if (buffers_.empty()) {
- DVLOG(1) << __func__ << " Empty range, resetting range end";
- highest_frame_ = nullptr;
- return;
- }
-
- highest_frame_ = nullptr;
-
- KeyframeMap::const_iterator last_gop = keyframe_map_.end();
- CHECK_GT(keyframe_map_.size(), 0u);
- --last_gop;
-
- // Iterate through the frames of the last GOP in this range, finding the
- // frame with the highest PTS.
- for (BufferQueue::const_iterator buffer_itr =
- buffers_.begin() + (last_gop->second - keyframe_map_index_base_);
- buffer_itr != buffers_.end(); ++buffer_itr) {
- UpdateEndTime(*buffer_itr);
- }
-
- DVLOG(1) << __func__ << " Updated range end time to "
- << highest_frame_->timestamp() << ", "
- << highest_frame_->timestamp() + highest_frame_->duration();
-}
-
-std::string SourceBufferRangeByPts::ToStringForDebugging() const {
- std::stringstream result;
-
-#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
- result << "keyframe_map_index_base_=" << keyframe_map_index_base_
- << ", buffers.size()=" << buffers_.size()
- << ", keyframe_map_.size()=" << keyframe_map_.size()
- << ", keyframe_map_:\n";
- for (const auto& entry : keyframe_map_) {
- result << "\t pts " << entry.first.InMicroseconds()
- << ", unadjusted idx = " << entry.second << "\n";
- }
-#endif // !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
-
- return result.str();
-}
-
-} // namespace media
diff --git a/chromium/media/filters/source_buffer_range_by_pts.h b/chromium/media/filters/source_buffer_range_by_pts.h
deleted file mode 100644
index 353f744f9d0..00000000000
--- a/chromium/media/filters/source_buffer_range_by_pts.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_PTS_H_
-#define MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_PTS_H_
-
-#include <stddef.h>
-#include <map>
-#include <memory>
-
-#include "media/filters/source_buffer_range.h"
-
-namespace media {
-
-class MEDIA_EXPORT SourceBufferRangeByPts : public SourceBufferRange {
- public:
- // Creates a range with |new_buffers|. |new_buffers| cannot be empty and the
- // front of |new_buffers| must be a keyframe.
- // |range_start_pts| refers to the starting timestamp for the coded
- // frame group to which these buffers belong.
- SourceBufferRangeByPts(GapPolicy gap_policy,
- const BufferQueue& new_buffers,
- base::TimeDelta range_start_pts,
- const InterbufferDistanceCB& interbuffer_distance_cb);
-
- ~SourceBufferRangeByPts() override;
-
- void DeleteAll(BufferQueue* deleted_buffers) override;
-
- // Appends the buffers from |range| into this range.
- // The first buffer in |range| must come directly after the last buffer
- // in this range.
- // If |transfer_current_position| is true, |range|'s |next_buffer_index_|
- // is transfered to this SourceBufferRange.
- // Note: Use these only to merge existing ranges. |range|'s first buffer
- // timestamp must be adjacent to this range. No group start timestamp
- // adjacency is involved in these methods.
- // During append, |highest_frame_| is updated, if necessary.
- void AppendRangeToEnd(const SourceBufferRangeByPts& range,
- bool transfer_current_position);
- bool CanAppendRangeToEnd(const SourceBufferRangeByPts& range) const;
-
- // Appends |buffers| to the end of the range and updates |keyframe_map_| as
- // it encounters new keyframes.
- // If |new_buffers_group_start_pts| is kNoTimestamp, then the
- // first buffer in |buffers| must come directly after the last buffer in this
- // range (within the fudge room) - specifically, if the first buffer in
- // |buffers| is not a keyframe, then it must be next in DTS order w.r.t. last
- // buffer in |buffers|. Otherwise, it's a keyframe that must be next in PTS
- // order w.r.t. |highest_frame_| or be immediately adjacent to the last buffer
- // in this range if that buffer has estimated duration (only allowed in WebM
- // streams).
- // If |new_buffers_group_start_pts| is set otherwise, then that time must come
- // directly after |highest_frame_| (within the fudge room), or directly after
- // the last buffered frame if it has estimated duration (only allowed in WebM
- // streams), and the first buffer in |buffers| must be a keyframe.
- // The latter scenario is required when a muxed coded frame group has such a
- // large jagged start across tracks that its first buffer is not within the
- // fudge room, yet its group start was.
- // The conditions around estimated duration are handled by
- // AllowableAppendAfterEstimatedDuration, and are intended to solve the edge
- // case in the SourceBufferStreamTest
- // MergeAllowedIfRangeEndTimeWithEstimatedDurationMatchesNextRangeStart.
- // During append, |highest_frame_| is updated, if necessary.
- void AppendBuffersToEnd(const BufferQueue& buffers,
- base::TimeDelta new_buffers_group_start_timestamp);
- bool AllowableAppendAfterEstimatedDuration(
- const BufferQueue& buffers,
- base::TimeDelta new_buffers_group_start_pts) const;
- bool CanAppendBuffersToEnd(const BufferQueue& buffers,
- base::TimeDelta new_buffers_group_start_pts) const;
-
- // Updates |next_buffer_index_| to point to the keyframe with presentation
- // timestamp at or before |timestamp|. Assumes |timestamp| is valid and in
- // this range.
- void Seek(base::TimeDelta timestamp);
-
- // Returns true if the range has enough data to seek to the specified
- // |timestamp|, false otherwise.
- bool CanSeekTo(base::TimeDelta timestamp) const;
-
- // Return the config ID for the buffer at |timestamp|. Precondition: callers
- // must first verify CanSeekTo(timestamp) == true.
- int GetConfigIdAtTime(base::TimeDelta timestamp) const;
-
- // Return true if all buffers in range of [start, end] have the same config
- // ID. Precondition: callers must first verify that
- // CanSeekTo(start) == CanSeekTo(end) == true.
- bool SameConfigThruRange(base::TimeDelta start, base::TimeDelta end) const;
-
- // Finds the next keyframe from |buffers_| starting at or after |timestamp|
- // and creates and returns a new SourceBufferRangeByPts with the buffers from
- // that keyframe onward. The buffers in the new SourceBufferRangeByPts are
- // moved out of this range. The start time of the new SourceBufferRangeByPts
- // is set to the later of |timestamp| and this range's GetStartTimestamp().
- // Note that this may result in temporary overlap of the new range and this
- // range until the caller truncates any nonkeyframes out of this range with
- // time > |timestamp|. If there is no keyframe at or after |timestamp|,
- // SplitRange() returns null and this range is unmodified. This range can
- // become empty if |timestamp| <= the PTS of the first buffer in this range.
- // |highest_frame_| is updated, if necessary.
- std::unique_ptr<SourceBufferRangeByPts> SplitRange(base::TimeDelta timestamp);
-
- // Deletes the buffers from this range starting at |timestamp|, exclusive if
- // |is_exclusive| is true, inclusive otherwise.
- // Resets |next_buffer_index_| if the buffer at |next_buffer_index_| was
- // deleted, and deletes the |keyframe_map_| entries for the buffers that
- // were removed.
- // |highest_frame_| is updated, if necessary.
- // |deleted_buffers| contains the buffers that were deleted from this range,
- // starting at the buffer that had been at |next_buffer_index_|.
- // Returns true if everything in the range was deleted. Otherwise
- // returns false.
- bool TruncateAt(base::TimeDelta timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive);
-
- // Deletes a GOP from the front or back of the range and moves these
- // buffers into |deleted_buffers|. Returns the number of bytes deleted from
- // the range (i.e. the size in bytes of |deleted_buffers|).
- // |highest_frame_| is updated, if necessary.
- // This range must NOT be empty when these methods are called.
- // The GOP being deleted must NOT contain the next buffer position.
- size_t DeleteGOPFromFront(BufferQueue* deleted_buffers);
- size_t DeleteGOPFromBack(BufferQueue* deleted_buffers);
-
- // Gets the range of GOP to secure at least |bytes_to_free| from
- // [|start_timestamp|, |end_timestamp|).
- // Returns the size of the buffers to secure if the buffers of
- // [|start_timestamp|, |end_removal_timestamp|) is removed.
- // Will not update |end_removal_timestamp| if the returned size is 0.
- size_t GetRemovalGOP(base::TimeDelta start_timestamp,
- base::TimeDelta end_timestamp,
- size_t bytes_to_free,
- base::TimeDelta* end_removal_timestamp) const;
-
- // Returns true iff the buffered end time of the first GOP in this range is
- // at or before |media_time|.
- bool FirstGOPEarlierThanMediaTime(base::TimeDelta media_time) const;
-
- // Indicates whether the GOP at the beginning or end of the range contains the
- // next buffer position.
- bool FirstGOPContainsNextBufferPosition() const;
- bool LastGOPContainsNextBufferPosition() const;
-
- // Returns the timestamp of the next buffer that will be returned from
- // GetNextBuffer(), or kNoTimestamp if the timestamp is unknown.
- base::TimeDelta GetNextTimestamp() const;
-
- // Returns the start timestamp of the range.
- base::TimeDelta GetStartTimestamp() const;
-
- // Returns the highest presentation timestamp of frames in the last GOP in the
- // range.
- base::TimeDelta GetEndTimestamp() const;
-
- // Returns the timestamp for the end of the buffered region in this range.
- // This is an approximation if the duration for the buffer with highest PTS in
- // the last GOP in the range is unset.
- base::TimeDelta GetBufferedEndTimestamp() const;
-
- // Returns whether a buffer with a starting timestamp of |timestamp| would
- // belong in this range. This includes a buffer that would be appended to
- // the end of the range.
- bool BelongsToRange(base::TimeDelta timestamp) const;
-
- // Returns the highest time from among GetStartTimestamp() and frame timestamp
- // (in order in |buffers_| beginning at the first keyframe at or before
- // |timestamp|) for buffers in this range up to and including |timestamp|.
- // Note that |timestamp| must belong to this range.
- base::TimeDelta FindHighestBufferedTimestampAtOrBefore(
- base::TimeDelta timestamp) const;
-
- // Gets the timestamp for the keyframe that is at or after |timestamp|. If
- // there isn't such a keyframe in the range then kNoTimestamp is returned.
- // If |timestamp| is in the "gap" between the value returned by
- // GetStartTimestamp() and the timestamp on the first buffer in |buffers_|,
- // then |timestamp| is returned.
- base::TimeDelta NextKeyframeTimestamp(base::TimeDelta timestamp) const;
-
- // Gets the timestamp for the closest keyframe that is <= |timestamp|. If
- // there isn't a keyframe before |timestamp| or |timestamp| is outside
- // this range, then kNoTimestamp is returned.
- base::TimeDelta KeyframeBeforeTimestamp(base::TimeDelta timestamp) const;
-
- // Adds all buffers which overlap [start, end) to the end of |buffers|. If
- // no buffers exist in the range returns false, true otherwise.
- // This method is used for finding audio splice overlap buffers, so all
- // buffers are expected to be keyframes here (so DTS doesn't matter at all).
- bool GetBuffersInRange(base::TimeDelta start,
- base::TimeDelta end,
- BufferQueue* buffers) const;
-
- private:
- typedef std::map<base::TimeDelta, int> KeyframeMap;
-
- // Helper method for Appending |range| to the end of this range. If |range|'s
- // first buffer time is before the time of the last buffer in this range,
- // returns kNoTimestamp. Otherwise, returns the closest time within
- // [|range|'s start time, |range|'s first buffer time] that is at or after the
- // this range's GetEndTimestamp(). This allows |range| to potentially be
- // determined to be adjacent within fudge room for appending to the end of
- // this range, especially if |range| has a start time that is before its first
- // buffer's time.
- base::TimeDelta NextRangeStartTimeForAppendRangeToEnd(
- const SourceBufferRangeByPts& range) const;
-
- // Returns an index (or iterator) into |buffers_| pointing to the first buffer
- // at or after |timestamp|. If |skip_given_timestamp| is true, this returns
- // the first buffer with timestamp strictly greater than |timestamp|. If
- // |buffers_| has no such buffer, returns |buffers_.size()| (or
- // |buffers_.end()|).
- size_t GetBufferIndexAt(base::TimeDelta timestamp,
- bool skip_given_timestamp) const;
- BufferQueue::const_iterator GetBufferItrAt(base::TimeDelta timestamp,
- bool skip_given_timestamp) const;
-
- // Returns an iterator in |keyframe_map_| pointing to the next keyframe after
- // |timestamp|. If |skip_given_timestamp| is true, this returns the first
- // keyframe with a timestamp strictly greater than |timestamp|.
- KeyframeMap::const_iterator GetFirstKeyframeAt(
- base::TimeDelta timestamp,
- bool skip_given_timestamp) const;
-
- // Returns an iterator in |keyframe_map_| pointing to the first keyframe
- // before or at |timestamp|.
- KeyframeMap::const_iterator GetFirstKeyframeAtOrBefore(
- base::TimeDelta timestamp) const;
-
- // Helper method to delete buffers in |buffers_| starting at
- // |starting_point|, an index in |buffers_|.
- // Returns true if everything in the range was removed. Returns
- // false if the range still contains buffers.
- bool TruncateAt(const size_t starting_point, BufferQueue* deleted_buffers);
-
- // Updates |highest_frame_| to be the frame with highest PTS in the last GOP
- // in this range. If there are no buffers in this range, resets
- // |highest_frame_|.
- // Normally, incremental additions to this range should just use
- // UpdateEndTime(). When removing buffers from this range (which could be out
- // of order presentation vs decode order), inspecting the last buffer in
- // decode order of this range can be insufficient to determine the correct
- // presentation end time of this range. Hence this helper method.
- void UpdateEndTimeUsingLastGOP();
-
- // Helper for debugging state.
- std::string ToStringForDebugging() const;
-
- // If the first buffer in this range is the beginning of a coded frame group,
- // |range_start_pts_| is the presentation time when the coded frame group
- // begins. This is especially important in muxed media where the first coded
- // frames for each track do not necessarily begin at the same time.
- // |range_start_pts_| may be <= the timestamp of the first buffer in
- // |buffers_|. |range_start_pts_| is kNoTimestamp if this range does not start
- // at the beginning of a coded frame group, which can happen by range removal
- // or split when we don't have a way of knowing, across potentially multiple
- // muxed streams, the coded frame group start timestamp for the new range.
- base::TimeDelta range_start_pts_;
-
- // Index base of all positions in |keyframe_map_|. In other words, the
- // real position of entry |k| of |keyframe_map_| in the range is:
- // keyframe_map_[k] - keyframe_map_index_base_
- int keyframe_map_index_base_;
-
- // Maps keyframe presentation timestamps to GOP start index of |buffers_|
- // (with index adjusted by |keyframe_map_index_base_|);
- KeyframeMap keyframe_map_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceBufferRangeByPts);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_SOURCE_BUFFER_RANGE_BY_PTS_H_
diff --git a/chromium/media/filters/source_buffer_state.cc b/chromium/media/filters/source_buffer_state.cc
index 4e723a0ffe6..fdb3d3547f7 100644
--- a/chromium/media/filters/source_buffer_state.cc
+++ b/chromium/media/filters/source_buffer_state.cc
@@ -313,7 +313,7 @@ bool SourceBufferState::EvictCodedFrames(base::TimeDelta media_time,
}
void SourceBufferState::OnMemoryPressure(
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc) {
// Notify video streams about memory pressure first, since video typically
diff --git a/chromium/media/filters/source_buffer_state.h b/chromium/media/filters/source_buffer_state.h
index 7bac0c5616d..989c44e27c0 100644
--- a/chromium/media/filters/source_buffer_state.h
+++ b/chromium/media/filters/source_buffer_state.h
@@ -86,11 +86,11 @@ class MEDIA_EXPORT SourceBufferState {
// Gets invoked when the system is experiencing memory pressure, i.e. there's
// not enough free memory. The |media_time| is the media playback position at
// the time of memory pressure notification (needed for accurate GC). The
- // |memory_pressure_listener| indicates memory pressure severity. The
+ // |memory_pressure_level| indicates memory pressure severity. The
// |force_instant_gc| is used to force the MSE garbage collection algorithm to
// be run right away, without waiting for the next append.
void OnMemoryPressure(
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc);
diff --git a/chromium/media/filters/source_buffer_state_unittest.cc b/chromium/media/filters/source_buffer_state_unittest.cc
index 2f3139eabab..e99e9a59cc6 100644
--- a/chromium/media/filters/source_buffer_state_unittest.cc
+++ b/chromium/media/filters/source_buffer_state_unittest.cc
@@ -22,7 +22,6 @@ namespace media {
using testing::_;
using testing::SaveArg;
-using testing::Values;
namespace {
@@ -37,7 +36,7 @@ VideoDecoderConfig CreateVideoConfig(VideoCodec codec, int w, int h) {
gfx::Rect visible_rect(size);
return VideoDecoderConfig(codec, VIDEO_CODEC_PROFILE_UNKNOWN,
PIXEL_FORMAT_I420, VideoColorSpace::REC709(),
- VIDEO_ROTATION_0, size, visible_rect, size,
+ kNoTransformation, size, visible_rect, size,
EmptyExtraData(), Unencrypted());
}
@@ -55,18 +54,15 @@ void InvokeCbAndSaveResult(const base::Callback<bool()>& cb, bool* result) {
}
}
-class SourceBufferStateTest
- : public ::testing::TestWithParam<ChunkDemuxerStream::RangeApi> {
+class SourceBufferStateTest : public ::testing::Test {
public:
- SourceBufferStateTest() : mock_stream_parser_(nullptr) {
- range_api_ = GetParam();
- }
+ SourceBufferStateTest() : mock_stream_parser_(nullptr) {}
std::unique_ptr<SourceBufferState> CreateSourceBufferState() {
std::unique_ptr<FrameProcessor> frame_processor = base::WrapUnique(
new FrameProcessor(base::Bind(&SourceBufferStateTest::OnUpdateDuration,
base::Unretained(this)),
- &media_log_, range_api_));
+ &media_log_));
mock_stream_parser_ = new testing::StrictMock<MockStreamParser>();
return base::WrapUnique(new SourceBufferState(
base::WrapUnique(mock_stream_parser_), std::move(frame_processor),
@@ -145,8 +141,8 @@ class SourceBufferStateTest
ChunkDemuxerStream* CreateDemuxerStream(DemuxerStream::Type type) {
static unsigned track_id = 0;
- demuxer_streams_.push_back(base::WrapUnique(new ChunkDemuxerStream(
- type, base::NumberToString(++track_id), range_api_)));
+ demuxer_streams_.push_back(base::WrapUnique(
+ new ChunkDemuxerStream(type, base::NumberToString(++track_id))));
return demuxer_streams_.back().get();
}
@@ -154,10 +150,9 @@ class SourceBufferStateTest
std::vector<std::unique_ptr<ChunkDemuxerStream>> demuxer_streams_;
MockStreamParser* mock_stream_parser_;
StreamParser::NewConfigCB new_config_cb_;
- ChunkDemuxerStream::RangeApi range_api_;
};
-TEST_P(SourceBufferStateTest, InitSingleAudioTrack) {
+TEST_F(SourceBufferStateTest, InitSingleAudioTrack) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vorbis");
@@ -170,7 +165,7 @@ TEST_P(SourceBufferStateTest, InitSingleAudioTrack) {
EXPECT_TRUE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, InitSingleVideoTrack) {
+TEST_F(SourceBufferStateTest, InitSingleVideoTrack) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vp8");
@@ -183,7 +178,7 @@ TEST_P(SourceBufferStateTest, InitSingleVideoTrack) {
EXPECT_TRUE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, InitMultipleTracks) {
+TEST_F(SourceBufferStateTest, InitMultipleTracks) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vorbis,vp8,opus,vp9");
@@ -203,7 +198,7 @@ TEST_P(SourceBufferStateTest, InitMultipleTracks) {
EXPECT_TRUE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, AudioStreamMismatchesExpectedCodecs) {
+TEST_F(SourceBufferStateTest, AudioStreamMismatchesExpectedCodecs) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("opus");
std::unique_ptr<MediaTracks> tracks(new MediaTracks());
@@ -212,7 +207,7 @@ TEST_P(SourceBufferStateTest, AudioStreamMismatchesExpectedCodecs) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, VideoStreamMismatchesExpectedCodecs) {
+TEST_F(SourceBufferStateTest, VideoStreamMismatchesExpectedCodecs) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vp9");
std::unique_ptr<MediaTracks> tracks(new MediaTracks());
@@ -221,7 +216,7 @@ TEST_P(SourceBufferStateTest, VideoStreamMismatchesExpectedCodecs) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, MissingExpectedAudioStream) {
+TEST_F(SourceBufferStateTest, MissingExpectedAudioStream) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("opus,vp9");
std::unique_ptr<MediaTracks> tracks(new MediaTracks());
@@ -232,7 +227,7 @@ TEST_P(SourceBufferStateTest, MissingExpectedAudioStream) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, MissingExpectedVideoStream) {
+TEST_F(SourceBufferStateTest, MissingExpectedVideoStream) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("opus,vp9");
std::unique_ptr<MediaTracks> tracks(new MediaTracks());
@@ -243,7 +238,7 @@ TEST_P(SourceBufferStateTest, MissingExpectedVideoStream) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks)));
}
-TEST_P(SourceBufferStateTest, TrackIdsChangeInSecondInitSegment) {
+TEST_F(SourceBufferStateTest, TrackIdsChangeInSecondInitSegment) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("opus,vp9");
@@ -268,7 +263,7 @@ TEST_P(SourceBufferStateTest, TrackIdsChangeInSecondInitSegment) {
AppendDataAndReportTracks(sbs, std::move(tracks2));
}
-TEST_P(SourceBufferStateTest, TrackIdChangeWithTwoAudioTracks) {
+TEST_F(SourceBufferStateTest, TrackIdChangeWithTwoAudioTracks) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vorbis,opus");
@@ -298,7 +293,7 @@ TEST_P(SourceBufferStateTest, TrackIdChangeWithTwoAudioTracks) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks3)));
}
-TEST_P(SourceBufferStateTest, TrackIdChangeWithTwoVideoTracks) {
+TEST_F(SourceBufferStateTest, TrackIdChangeWithTwoVideoTracks) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("vp8,vp9");
@@ -328,7 +323,7 @@ TEST_P(SourceBufferStateTest, TrackIdChangeWithTwoVideoTracks) {
EXPECT_FALSE(AppendDataAndReportTracks(sbs, std::move(tracks3)));
}
-TEST_P(SourceBufferStateTest, TrackIdsSwappedInSecondInitSegment) {
+TEST_F(SourceBufferStateTest, TrackIdsSwappedInSecondInitSegment) {
std::unique_ptr<SourceBufferState> sbs =
CreateAndInitSourceBufferState("opus,vp9");
@@ -350,11 +345,4 @@ TEST_P(SourceBufferStateTest, TrackIdsSwappedInSecondInitSegment) {
AppendDataAndReportTracks(sbs, std::move(tracks2));
}
-INSTANTIATE_TEST_SUITE_P(LegacyByDts,
- SourceBufferStateTest,
- Values(ChunkDemuxerStream::RangeApi::kLegacyByDts));
-INSTANTIATE_TEST_SUITE_P(NewByPts,
- SourceBufferStateTest,
- Values(ChunkDemuxerStream::RangeApi::kNewByPts));
-
} // namespace media
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index f6c02b793a7..1645bbb5739 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -17,8 +17,6 @@
#include "media/base/demuxer_memory_limit.h"
#include "media/base/media_switches.h"
#include "media/base/timestamp_constants.h"
-#include "media/filters/source_buffer_range_by_dts.h"
-#include "media/filters/source_buffer_range_by_pts.h"
namespace media {
@@ -37,32 +35,9 @@ const int kMaxGarbageCollectAlgorithmWarningLogs = 20;
// Limit the number of MEDIA_LOG() logs for splice overlap trimming.
const int kMaxAudioSpliceLogs = 20;
-// Limit the number of MEDIA_LOG() logs for same DTS for non-keyframe followed
-// by keyframe. Prior to relaxing the "media segments must begin with a
-// keyframe" requirement, we issued decode error for this situation. That was
-// likely too strict, and now that the keyframe requirement is relaxed, we have
-// no knowledge of media segment boundaries here. Now, we log but don't trigger
-// decode error, since we allow these sequences which may cause extra decoder
-// work or other side-effects.
-const int kMaxStrangeSameTimestampsLogs = 20;
-
// Helper method that returns true if |ranges| is sorted in increasing order,
// false otherwise.
-bool IsRangeListSorted(
- const typename SourceBufferStream<SourceBufferRangeByDts>::RangeList&
- ranges) {
- DecodeTimestamp prev = kNoDecodeTimestamp();
- for (const auto& range_ptr : ranges) {
- if (prev != kNoDecodeTimestamp() && prev >= range_ptr->GetStartTimestamp())
- return false;
- prev = range_ptr->GetBufferedEndTimestamp();
- }
- return true;
-}
-
-bool IsRangeListSorted(
- const typename SourceBufferStream<SourceBufferRangeByPts>::RangeList&
- ranges) {
+bool IsRangeListSorted(const SourceBufferStream::RangeList& ranges) {
base::TimeDelta prev = kNoTimestamp;
for (const auto& range_ptr : ranges) {
if (prev != kNoTimestamp && prev >= range_ptr->GetStartTimestamp())
@@ -110,8 +85,7 @@ std::string StatusToString(const SourceBufferStreamStatus& status) {
}
// Helper method for logging, converts a range into a readable string.
-template <typename RangeClass>
-std::string RangeToString(const RangeClass& range) {
+std::string RangeToString(const SourceBufferRange& range) {
if (range.size_in_bytes() == 0) {
return "[]";
}
@@ -123,9 +97,7 @@ std::string RangeToString(const RangeClass& range) {
}
// Helper method for logging, converts a set of ranges into a readable string.
-template <typename RangeClass>
-std::string RangesToString(
- const typename SourceBufferStream<RangeClass>::RangeList& ranges) {
+std::string RangesToString(const SourceBufferStream::RangeList& ranges) {
if (ranges.empty())
return "<EMPTY>";
@@ -138,9 +110,8 @@ std::string RangesToString(
return ss.str();
}
-template <typename RangeClass>
std::string BufferQueueBuffersToLogString(
- const typename SourceBufferStream<RangeClass>::BufferQueue& buffers) {
+ const SourceBufferStream::BufferQueue& buffers) {
std::stringstream result;
result << "Buffers:\n";
@@ -154,14 +125,13 @@ std::string BufferQueueBuffersToLogString(
return result.str();
}
-template <typename RangeClass>
std::string BufferQueueMetadataToLogString(
- const typename SourceBufferStream<RangeClass>::BufferQueue& buffers) {
+ const SourceBufferStream::BufferQueue& buffers) {
std::stringstream result;
- DecodeTimestamp pts_interval_start;
- DecodeTimestamp pts_interval_end;
- SourceBufferStream<SourceBufferRangeByPts>::GetTimestampInterval(
- buffers, &pts_interval_start, &pts_interval_end);
+ base::TimeDelta pts_interval_start;
+ base::TimeDelta pts_interval_end;
+ SourceBufferStream::GetTimestampInterval(buffers, &pts_interval_start,
+ &pts_interval_end);
result << "dts=[" << buffers.front()->GetDecodeTimestamp().InMicroseconds()
<< "us;" << buffers.back()->GetDecodeTimestamp().InMicroseconds()
@@ -171,7 +141,6 @@ std::string BufferQueueMetadataToLogString(
return result.str();
}
-template <typename RangeClass>
SourceBufferRange::GapPolicy TypeToGapPolicy(SourceBufferStreamType type) {
switch (type) {
case SourceBufferStreamType::kAudio:
@@ -187,15 +156,13 @@ SourceBufferRange::GapPolicy TypeToGapPolicy(SourceBufferStreamType type) {
} // namespace
-template <typename RangeClass>
-SourceBufferStream<RangeClass>::SourceBufferStream(
- const AudioDecoderConfig& audio_config,
- MediaLog* media_log)
+SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
+ MediaLog* media_log)
: media_log_(media_log),
seek_buffer_timestamp_(kNoTimestamp),
- coded_frame_group_start_time_(kNoDecodeTimestamp()),
+ coded_frame_group_start_pts_(kNoTimestamp),
range_for_next_append_(ranges_.end()),
- highest_output_buffer_timestamp_(kNoDecodeTimestamp()),
+ highest_output_buffer_timestamp_(kNoTimestamp),
max_interbuffer_distance_(
base::TimeDelta::FromMilliseconds(kMinimumInterbufferDistanceInMs)),
memory_limit_(GetDemuxerStreamAudioMemoryLimit()) {
@@ -203,15 +170,13 @@ SourceBufferStream<RangeClass>::SourceBufferStream(
audio_configs_.push_back(audio_config);
}
-template <typename RangeClass>
-SourceBufferStream<RangeClass>::SourceBufferStream(
- const VideoDecoderConfig& video_config,
- MediaLog* media_log)
+SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
+ MediaLog* media_log)
: media_log_(media_log),
seek_buffer_timestamp_(kNoTimestamp),
- coded_frame_group_start_time_(kNoDecodeTimestamp()),
+ coded_frame_group_start_pts_(kNoTimestamp),
range_for_next_append_(ranges_.end()),
- highest_output_buffer_timestamp_(kNoDecodeTimestamp()),
+ highest_output_buffer_timestamp_(kNoTimestamp),
max_interbuffer_distance_(
base::TimeDelta::FromMilliseconds(kMinimumInterbufferDistanceInMs)),
memory_limit_(GetDemuxerStreamVideoMemoryLimit()) {
@@ -219,60 +184,36 @@ SourceBufferStream<RangeClass>::SourceBufferStream(
video_configs_.push_back(video_config);
}
-template <typename RangeClass>
-SourceBufferStream<RangeClass>::SourceBufferStream(
- const TextTrackConfig& text_config,
- MediaLog* media_log)
+SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
+ MediaLog* media_log)
: media_log_(media_log),
text_track_config_(text_config),
seek_buffer_timestamp_(kNoTimestamp),
- coded_frame_group_start_time_(kNoDecodeTimestamp()),
+ coded_frame_group_start_pts_(kNoTimestamp),
range_for_next_append_(ranges_.end()),
- highest_output_buffer_timestamp_(kNoDecodeTimestamp()),
+ highest_output_buffer_timestamp_(kNoTimestamp),
max_interbuffer_distance_(
base::TimeDelta::FromMilliseconds(kMinimumInterbufferDistanceInMs)),
memory_limit_(GetDemuxerStreamAudioMemoryLimit()) {}
-template <typename RangeClass>
-SourceBufferStream<RangeClass>::~SourceBufferStream() = default;
+SourceBufferStream::~SourceBufferStream() = default;
-template <>
-void SourceBufferStream<SourceBufferRangeByDts>::OnStartOfCodedFrameGroup(
- DecodeTimestamp coded_frame_group_start_dts,
+void SourceBufferStream::OnStartOfCodedFrameGroup(
base::TimeDelta coded_frame_group_start_pts) {
- DVLOG(1) << __func__ << " " << GetStreamTypeName() << " (dts "
- << coded_frame_group_start_dts.InMicroseconds() << "us, pts "
+ DVLOG(1) << __func__ << " " << GetStreamTypeName() << " (pts "
<< coded_frame_group_start_pts.InMicroseconds() << "us)";
DCHECK(!end_of_stream_);
- OnStartOfCodedFrameGroupInternal(coded_frame_group_start_dts);
-}
-
-template <>
-void SourceBufferStream<SourceBufferRangeByPts>::OnStartOfCodedFrameGroup(
- DecodeTimestamp coded_frame_group_start_dts,
- base::TimeDelta coded_frame_group_start_pts) {
- DVLOG(1) << __func__ << " " << GetStreamTypeName() << " (dts "
- << coded_frame_group_start_dts.InMicroseconds() << "us, pts "
- << coded_frame_group_start_pts.InMicroseconds() << "us)";
- DCHECK(!end_of_stream_);
- OnStartOfCodedFrameGroupInternal(
- DecodeTimestamp::FromPresentationTime(coded_frame_group_start_pts));
-}
-
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::OnStartOfCodedFrameGroupInternal(
- DecodeTimestamp coded_frame_group_start_time) {
- coded_frame_group_start_time_ = coded_frame_group_start_time;
+ coded_frame_group_start_pts_ = coded_frame_group_start_pts;
new_coded_frame_group_ = true;
auto last_range = range_for_next_append_;
- range_for_next_append_ = FindExistingRangeFor(coded_frame_group_start_time);
+ range_for_next_append_ = FindExistingRangeFor(coded_frame_group_start_pts_);
// Only reset |last_appended_buffer_timestamp_| if this new coded frame group
// is not adjacent to the previous coded frame group appended to the stream.
if (range_for_next_append_ == ranges_.end() ||
!IsNextGopAdjacentToEndOfCurrentAppendSequence(
- coded_frame_group_start_time)) {
+ coded_frame_group_start_pts_)) {
ResetLastAppendedState();
DVLOG(3) << __func__ << " next appended buffers will "
<< (range_for_next_append_ == ranges_.end()
@@ -283,17 +224,18 @@ void SourceBufferStream<RangeClass>::OnStartOfCodedFrameGroupInternal(
// If this new coded frame group overlaps an existing range, preserve
// continuity from that range to the new group by moving the start time
// earlier (but not at or beyond the most recent buffered frame's time
- // before |coded_frame_group_start_time| in the range, and not beyond the
+ // before |coded_frame_group_start_pts_| in the range, and not beyond the
// range's start time. This update helps prevent discontinuity from being
// introduced by the ::RemoveInternal processing during the next ::Append
// call.
- DecodeTimestamp adjusted_start_time =
- RangeFindHighestBufferedTimestampAtOrBefore(
- range_for_next_append_->get(), coded_frame_group_start_time_);
- if (adjusted_start_time < coded_frame_group_start_time_) {
+ base::TimeDelta adjusted_start_time =
+ (*range_for_next_append_)
+ ->FindHighestBufferedTimestampAtOrBefore(
+ coded_frame_group_start_pts_);
+ if (adjusted_start_time < coded_frame_group_start_pts_) {
// Exclude removal of that earlier frame during later Append
// processing by adjusting the removal range slightly forward.
- coded_frame_group_start_time_ =
+ coded_frame_group_start_pts_ =
adjusted_start_time + base::TimeDelta::FromMicroseconds(1);
}
}
@@ -304,33 +246,27 @@ void SourceBufferStream<RangeClass>::OnStartOfCodedFrameGroupInternal(
}
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
+bool SourceBufferStream::Append(const BufferQueue& buffers) {
TRACE_EVENT2("media", "SourceBufferStream::Append",
"stream type", GetStreamTypeName(),
"buffers to append", buffers.size());
DCHECK(!buffers.empty());
- DCHECK(coded_frame_group_start_time_ != kNoDecodeTimestamp());
+ DCHECK(coded_frame_group_start_pts_ != kNoTimestamp);
DCHECK(!end_of_stream_);
DVLOG(1) << __func__ << " " << GetStreamTypeName() << ": buffers "
- << BufferQueueMetadataToLogString<RangeClass>(buffers);
- DVLOG(4) << BufferQueueBuffersToLogString<RangeClass>(buffers);
-
- // TODO(wolenetz): Make this DCHECK also applicable to ByPts once SAP-Type-2
- // is more fully supported such that the NewByPts versions of
- // FrameProcessorTest.OOOKeyframePrecededByDependantNonKeyframeShouldWarn
- // don't crash. See https://crbug.com/718641.
- DCHECK(BufferingByPts() ||
- coded_frame_group_start_time_ <= BufferGetTimestamp(buffers.front()));
- DVLOG_IF(2, BufferingByPts() && coded_frame_group_start_time_ >
- BufferGetTimestamp(buffers.front()))
+ << BufferQueueMetadataToLogString(buffers);
+ DVLOG(4) << BufferQueueBuffersToLogString(buffers);
+
+ DCHECK(!buffers.front()->is_key_frame() ||
+ coded_frame_group_start_pts_ <= buffers.front()->timestamp());
+ DVLOG_IF(2, coded_frame_group_start_pts_ > buffers.front()->timestamp())
<< __func__
- << " Suspected SAP-Type-2 occurrence: coded_frame_group_start_time_="
- << coded_frame_group_start_time_.InMicroseconds()
+ << " Suspected SAP-Type-2 occurrence: coded_frame_group_start_pts_="
+ << coded_frame_group_start_pts_.InMicroseconds()
<< "us, first new buffer has timestamp="
- << BufferGetTimestamp(buffers.front()).InMicroseconds() << "us";
+ << buffers.front()->timestamp().InMicroseconds() << "us";
// New coded frame groups emitted by the coded frame processor must begin with
// a keyframe. TODO(wolenetz): Change this to [DCHECK + MEDIA_LOG(ERROR...) +
@@ -338,20 +274,15 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
// https://crbug.com/580621.
CHECK(!new_coded_frame_group_ || buffers.front()->is_key_frame());
- // Buffers within a coded frame group (when buffering by DTS) or within each
- // GOP in a coded frame group (when buffering by PTS) must be monotonically
+ // Buffers within each GOP in a coded frame group must be monotonically
// increasing in DTS order.
- // TODO(wolenetz): Relax to a DCHECK once this has baked long enough with a
- // large enough population of MseBufferByPts.
- CHECK(IsDtsMonotonicallyIncreasing(buffers));
+ DCHECK(IsDtsMonotonicallyIncreasing(buffers));
// Both of these checks enforce what should be guaranteed by how
// FrameProcessor signals OnStartOfCodedFrameGroup and the buffers it tells us
// to Append.
- // TODO(wolenetz): Relax to DCHECKS once this has baked long enough with a
- // large enough population of MseBufferByPts.
- CHECK(coded_frame_group_start_time_ >= DecodeTimestamp());
- CHECK(BufferGetTimestamp(buffers.front()) >= DecodeTimestamp());
+ DCHECK(coded_frame_group_start_pts_ >= base::TimeDelta());
+ DCHECK(buffers.front()->timestamp() >= base::TimeDelta());
if (UpdateMaxInterbufferDtsDistance(buffers)) {
// Coalesce |ranges_| using the new fudge room. This helps keep |ranges_|
@@ -362,7 +293,7 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
SetConfigIds(buffers);
// Save a snapshot of stream state before range modifications are made.
- DecodeTimestamp next_buffer_timestamp = GetNextBufferTimestamp();
+ base::TimeDelta next_buffer_timestamp = GetNextBufferTimestamp();
BufferQueue deleted_buffers;
PrepareRangesForNextAppend(buffers, &deleted_buffers);
@@ -377,26 +308,25 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
// A large gap (larger than our normal buffer adjacency test) can occur in
// a muxed set of streams (which share a common coded frame group start
// time) with a significantly jagged start across the streams.
- RangeAppendBuffersToEnd(range_for_next_append_->get(), buffers,
- coded_frame_group_start_time_);
+ (*range_for_next_append_)
+ ->AppendBuffersToEnd(buffers, coded_frame_group_start_pts_);
} else {
// Otherwise, use the first new buffer as proof of adjacency.
- RangeAppendBuffersToEnd(range_for_next_append_->get(), buffers,
- kNoDecodeTimestamp());
+ (*range_for_next_append_)->AppendBuffersToEnd(buffers, kNoTimestamp);
}
- last_appended_buffer_timestamp_ = BufferGetTimestamp(buffers.back());
+ last_appended_buffer_timestamp_ = buffers.back()->timestamp();
last_appended_buffer_duration_ = buffers.back()->duration();
last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
last_appended_buffer_decode_timestamp_ =
buffers.back()->GetDecodeTimestamp();
highest_timestamp_in_append_sequence_ =
- RangeGetEndTimestamp(range_for_next_append_->get());
+ (*range_for_next_append_)->GetEndTimestamp();
highest_buffered_end_time_in_append_sequence_ =
- RangeGetBufferedEndTimestamp(range_for_next_append_->get());
+ (*range_for_next_append_)->GetBufferedEndTimestamp();
} else {
- DecodeTimestamp new_range_start_time = std::min(
- coded_frame_group_start_time_, BufferGetTimestamp(buffers.front()));
+ base::TimeDelta new_range_start_time =
+ std::min(coded_frame_group_start_pts_, buffers.front()->timestamp());
const BufferQueue* buffers_for_new_range = &buffers;
BufferQueue trimmed_buffers;
@@ -415,7 +345,7 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
// If we didn't find a key frame, then update the last appended
// buffer state and return.
if (itr == buffers.end()) {
- last_appended_buffer_timestamp_ = BufferGetTimestamp(buffers.back());
+ last_appended_buffer_timestamp_ = buffers.back()->timestamp();
last_appended_buffer_duration_ = buffers.back()->duration();
last_appended_buffer_is_keyframe_ = buffers.back()->is_key_frame();
last_appended_buffer_decode_timestamp_ =
@@ -427,7 +357,7 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
" keyframe that has been removed, and contain no keyframes."
" Skipping further processing.";
DVLOG(1) << __func__ << " " << GetStreamTypeName()
- << ": done. ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": done. ranges_=" << RangesToString(ranges_);
return true;
} else if (itr != buffers.begin()) {
// Copy the first key frame and everything after it into
@@ -436,33 +366,34 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
buffers_for_new_range = &trimmed_buffers;
}
- new_range_start_time = BufferGetTimestamp(buffers_for_new_range->front());
+ new_range_start_time = buffers_for_new_range->front()->timestamp();
}
- range_for_next_append_ =
- AddToRanges(RangeNew(*buffers_for_new_range, new_range_start_time));
+ range_for_next_append_ = AddToRanges(std::make_unique<SourceBufferRange>(
+ TypeToGapPolicy(GetType()), *buffers_for_new_range,
+ new_range_start_time,
+ base::BindRepeating(&SourceBufferStream::GetMaxInterbufferDistance,
+ base::Unretained(this))));
last_appended_buffer_timestamp_ =
- BufferGetTimestamp(buffers_for_new_range->back());
+ buffers_for_new_range->back()->timestamp();
last_appended_buffer_duration_ = buffers_for_new_range->back()->duration();
last_appended_buffer_is_keyframe_ =
buffers_for_new_range->back()->is_key_frame();
last_appended_buffer_decode_timestamp_ =
buffers_for_new_range->back()->GetDecodeTimestamp();
highest_timestamp_in_append_sequence_ =
- RangeGetEndTimestamp(range_for_next_append_->get());
+ (*range_for_next_append_)->GetEndTimestamp();
highest_buffered_end_time_in_append_sequence_ =
- RangeGetBufferedEndTimestamp(range_for_next_append_->get());
+ (*range_for_next_append_)->GetBufferedEndTimestamp();
}
new_coded_frame_group_ = false;
MergeWithNextRangeIfNecessary(range_for_next_append_);
- // Some SAP-Type-2 append sequences, when buffering ByPts, require that we
- // coalesce |range_for_next_append_| with the range that is *before* it.
- // Likewise, some overlap buffering sequences, when buffering ByDts, require
- // similar.
+ // Some SAP-Type-2 append sequences require that we coalesce
+ // |range_for_next_append_| with the range that is *before* it.
if (range_for_next_append_ != ranges_.begin()) {
auto prior_range = range_for_next_append_;
prior_range--;
@@ -491,25 +422,24 @@ bool SourceBufferStream<RangeClass>::Append(const BufferQueue& buffers) {
// Prune any extra buffers in |track_buffer_| if new keyframes
// are appended to the range covered by |track_buffer_|.
if (!track_buffer_.empty()) {
- DecodeTimestamp keyframe_timestamp =
- FindKeyframeAfterTimestamp(BufferGetTimestamp(track_buffer_.front()));
- if (keyframe_timestamp != kNoDecodeTimestamp())
+ base::TimeDelta keyframe_timestamp =
+ FindKeyframeAfterTimestamp(track_buffer_.front()->timestamp());
+ if (keyframe_timestamp != kNoTimestamp)
PruneTrackBuffer(keyframe_timestamp);
}
SetSelectedRangeIfNeeded(next_buffer_timestamp);
DVLOG(1) << __func__ << " " << GetStreamTypeName()
- << ": done. ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": done. ranges_=" << RangesToString(ranges_);
DCHECK(IsRangeListSorted(ranges_));
DCHECK(OnlySelectedRangeIsSeeked());
return true;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::Remove(base::TimeDelta start,
- base::TimeDelta end,
- base::TimeDelta duration) {
+void SourceBufferStream::Remove(base::TimeDelta start,
+ base::TimeDelta end,
+ base::TimeDelta duration) {
DVLOG(1) << __func__ << " " << GetStreamTypeName() << " ("
<< start.InMicroseconds() << "us, " << end.InMicroseconds() << "us, "
<< duration.InMicroseconds() << "us)";
@@ -518,24 +448,21 @@ void SourceBufferStream<RangeClass>::Remove(base::TimeDelta start,
<< end.InMicroseconds() << "us";
DCHECK(duration != kNoTimestamp);
- DecodeTimestamp start_dts = DecodeTimestamp::FromPresentationTime(start);
- DecodeTimestamp end_dts = DecodeTimestamp::FromPresentationTime(end);
- DecodeTimestamp remove_end_timestamp =
- DecodeTimestamp::FromPresentationTime(duration);
- DecodeTimestamp keyframe_timestamp = FindKeyframeAfterTimestamp(end_dts);
- if (keyframe_timestamp != kNoDecodeTimestamp()) {
+ base::TimeDelta remove_end_timestamp = duration;
+ base::TimeDelta keyframe_timestamp = FindKeyframeAfterTimestamp(end);
+ if (keyframe_timestamp != kNoTimestamp) {
remove_end_timestamp = keyframe_timestamp;
- } else if (end_dts < remove_end_timestamp) {
- remove_end_timestamp = end_dts;
+ } else if (end < remove_end_timestamp) {
+ remove_end_timestamp = end;
}
BufferQueue deleted_buffers;
- RemoveInternal(start_dts, remove_end_timestamp, false, &deleted_buffers);
+ RemoveInternal(start, remove_end_timestamp, false, &deleted_buffers);
if (!deleted_buffers.empty()) {
// Buffers for the current position have been removed.
- SetSelectedRangeIfNeeded(BufferGetTimestamp(deleted_buffers.front()));
- if (highest_output_buffer_timestamp_ == kNoDecodeTimestamp()) {
+ SetSelectedRangeIfNeeded(deleted_buffers.front()->timestamp());
+ if (highest_output_buffer_timestamp_ == kNoTimestamp) {
// We just removed buffers for the current playback position for this
// stream, yet we also had output no buffer since the last Seek.
// Re-seek to prevent stall.
@@ -550,31 +477,27 @@ void SourceBufferStream<RangeClass>::Remove(base::TimeDelta start,
DCHECK(IsRangeListSorted(ranges_));
}
-template <typename RangeClass>
-DecodeTimestamp SourceBufferStream<RangeClass>::PotentialNextAppendTimestamp()
- const {
- // The next potential append will either be just at or after (if buffering
- // ByDts), or in a GOP adjacent if ByPts, to
+base::TimeDelta SourceBufferStream::PotentialNextAppendTimestamp() const {
+ // The next potential append will either be in a GOP adjacent to
// |highest_timestamp_in_append_sequence_| (if known), or if unknown and we
// are still at the beginning of a new coded frame group, then will be into
- // the range (if any) to which |coded_frame_group_start_time_| belongs.
- if (highest_timestamp_in_append_sequence_ != kNoDecodeTimestamp())
+ // the range (if any) to which |coded_frame_group_start_pts_| belongs.
+ if (highest_timestamp_in_append_sequence_ != kNoTimestamp)
return highest_timestamp_in_append_sequence_;
if (new_coded_frame_group_)
- return coded_frame_group_start_time_;
+ return coded_frame_group_start_pts_;
// If we still don't know a potential next append timestamp, then we have
- // removed the ranged to which it previously belonged and have not completed a
+ // removed the range to which it previously belonged and have not completed a
// subsequent append or received a subsequent OnStartOfCodedFrameGroup()
// signal.
- return kNoDecodeTimestamp();
+ return kNoTimestamp;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::UpdateLastAppendStateForRemove(
- DecodeTimestamp remove_start,
- DecodeTimestamp remove_end,
+void SourceBufferStream::UpdateLastAppendStateForRemove(
+ base::TimeDelta remove_start,
+ base::TimeDelta remove_end,
bool exclude_start) {
// TODO(chcunningham): change exclude_start to include_start in this class and
// SourceBufferRange. Negatives are hard to reason about.
@@ -586,11 +509,11 @@ void SourceBufferStream<RangeClass>::UpdateLastAppendStateForRemove(
return;
if (range_for_next_append_ != ranges_.end()) {
- if (last_appended_buffer_timestamp_ != kNoDecodeTimestamp()) {
+ if (last_appended_buffer_timestamp_ != kNoTimestamp) {
// Note start and end of last appended GOP.
- DecodeTimestamp gop_end = highest_timestamp_in_append_sequence_;
- DecodeTimestamp gop_start =
- RangeKeyframeBeforeTimestamp(range_for_next_append_->get(), gop_end);
+ base::TimeDelta gop_end = highest_timestamp_in_append_sequence_;
+ base::TimeDelta gop_start =
+ (*range_for_next_append_)->KeyframeBeforeTimestamp(gop_end);
// If last append is about to be disrupted, reset associated state so we
// know to create a new range for future appends and require an initial
@@ -614,35 +537,34 @@ void SourceBufferStream<RangeClass>::UpdateLastAppendStateForRemove(
}
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::RemoveInternal(
- DecodeTimestamp start,
- DecodeTimestamp end,
- bool exclude_start,
- BufferQueue* deleted_buffers) {
+void SourceBufferStream::RemoveInternal(base::TimeDelta start,
+ base::TimeDelta end,
+ bool exclude_start,
+ BufferQueue* deleted_buffers) {
DVLOG(2) << __func__ << " " << GetStreamTypeName() << " ("
<< start.InMicroseconds() << "us, " << end.InMicroseconds() << "us, "
<< exclude_start << ")";
DVLOG(3) << __func__ << " " << GetStreamTypeName()
- << ": before remove ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": before remove ranges_=" << RangesToString(ranges_);
- DCHECK(start >= DecodeTimestamp());
+ DCHECK(start >= base::TimeDelta());
DCHECK(start < end) << "start " << start.InMicroseconds() << "us, end "
<< end.InMicroseconds() << "us";
DCHECK(deleted_buffers);
- // Doing this upfront simplifies decisions about range_for_next_append_ below.
+ // Doing this up-front simplifies decisions about |range_for_next_append_|
+ // below.
UpdateLastAppendStateForRemove(start, end, exclude_start);
auto itr = ranges_.begin();
while (itr != ranges_.end()) {
- RangeClass* range = itr->get();
- if (RangeGetStartTimestamp(range) >= end)
+ SourceBufferRange* range = itr->get();
+ if (range->GetStartTimestamp() >= end)
break;
// Split off any remaining GOPs starting at or after |end| and add it to
// |ranges_|.
- std::unique_ptr<RangeClass> new_range = RangeSplitRange(range, end);
+ std::unique_ptr<SourceBufferRange> new_range = range->SplitRange(end);
if (new_range) {
itr = ranges_.insert(++itr, std::move(new_range));
@@ -650,10 +572,10 @@ void SourceBufferStream<RangeClass>::RemoveInternal(
// be the new range (that |itr| is at) now.
if (range_for_next_append_ != ranges_.end() &&
range_for_next_append_->get() == range) {
- DecodeTimestamp potential_next_append_timestamp =
+ base::TimeDelta potential_next_append_timestamp =
PotentialNextAppendTimestamp();
- if (potential_next_append_timestamp != kNoDecodeTimestamp() &&
- RangeBelongsToRange(itr->get(), potential_next_append_timestamp)) {
+ if (potential_next_append_timestamp != kNoTimestamp &&
+ (*itr)->BelongsToRange(potential_next_append_timestamp)) {
range_for_next_append_ = itr;
}
}
@@ -669,11 +591,10 @@ void SourceBufferStream<RangeClass>::RemoveInternal(
// Truncate the current range so that it only contains data before
// the removal range.
BufferQueue saved_buffers;
- bool delete_range =
- RangeTruncateAt(range, start, &saved_buffers, exclude_start);
+ bool delete_range = range->TruncateAt(start, &saved_buffers, exclude_start);
- // Check to see if the current playback position was removed and
- // update the selected range appropriately.
+ // Check to see if the current playback position was removed and update the
+ // selected range appropriately.
if (!saved_buffers.empty()) {
DCHECK(!range->HasNextBufferPosition());
DCHECK(deleted_buffers->empty());
@@ -696,10 +617,10 @@ void SourceBufferStream<RangeClass>::RemoveInternal(
// to the current range.
if (range_for_next_append_ != ranges_.end() &&
range_for_next_append_->get() == range) {
- DecodeTimestamp potential_next_append_timestamp =
+ base::TimeDelta potential_next_append_timestamp =
PotentialNextAppendTimestamp();
- if (!RangeBelongsToRange(range, potential_next_append_timestamp)) {
+ if (!range->BelongsToRange(potential_next_append_timestamp)) {
DVLOG(1) << "Resetting range_for_next_append_ since the next append"
<< " can't add to the current range.";
range_for_next_append_ =
@@ -712,54 +633,49 @@ void SourceBufferStream<RangeClass>::RemoveInternal(
}
DVLOG(3) << __func__ << " " << GetStreamTypeName()
- << ": after remove ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": after remove ranges_=" << RangesToString(ranges_);
DCHECK(OnlySelectedRangeIsSeeked());
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::ResetSeekState() {
+void SourceBufferStream::ResetSeekState() {
SetSelectedRange(NULL);
track_buffer_.clear();
config_change_pending_ = false;
- highest_output_buffer_timestamp_ = kNoDecodeTimestamp();
+ highest_output_buffer_timestamp_ = kNoTimestamp;
just_exhausted_track_buffer_ = false;
pending_buffer_ = NULL;
pending_buffers_complete_ = false;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::ResetLastAppendedState() {
- last_appended_buffer_timestamp_ = kNoDecodeTimestamp();
+void SourceBufferStream::ResetLastAppendedState() {
+ last_appended_buffer_timestamp_ = kNoTimestamp;
last_appended_buffer_duration_ = kNoTimestamp;
last_appended_buffer_is_keyframe_ = false;
last_appended_buffer_decode_timestamp_ = kNoDecodeTimestamp();
- highest_timestamp_in_append_sequence_ = kNoDecodeTimestamp();
- highest_buffered_end_time_in_append_sequence_ = kNoDecodeTimestamp();
+ highest_timestamp_in_append_sequence_ = kNoTimestamp;
+ highest_buffered_end_time_in_append_sequence_ = kNoTimestamp;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::ShouldSeekToStartOfBuffered(
+bool SourceBufferStream::ShouldSeekToStartOfBuffered(
base::TimeDelta seek_timestamp) const {
if (ranges_.empty())
return false;
- base::TimeDelta beginning_of_buffered =
- RangeGetStartTimestamp(ranges_.front().get()).ToPresentationTime();
+ base::TimeDelta beginning_of_buffered = ranges_.front()->GetStartTimestamp();
return (seek_timestamp <= beginning_of_buffered &&
beginning_of_buffered < kSeekToStartFudgeRoom());
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::IsDtsMonotonicallyIncreasing(
+bool SourceBufferStream::IsDtsMonotonicallyIncreasing(
const BufferQueue& buffers) {
DCHECK(!buffers.empty());
- DecodeTimestamp prev_timestamp = last_appended_buffer_decode_timestamp_;
+ DecodeTimestamp prev_dts = last_appended_buffer_decode_timestamp_;
bool prev_is_keyframe = last_appended_buffer_is_keyframe_;
for (BufferQueue::const_iterator itr = buffers.begin();
itr != buffers.end(); ++itr) {
- DecodeTimestamp current_timestamp = (*itr)->GetDecodeTimestamp();
+ DecodeTimestamp current_dts = (*itr)->GetDecodeTimestamp();
bool current_is_keyframe = (*itr)->is_key_frame();
- DCHECK(current_timestamp != kNoDecodeTimestamp());
+ DCHECK(current_dts != kNoDecodeTimestamp());
DCHECK((*itr)->duration() >= base::TimeDelta())
<< "Packet with invalid duration."
<< " pts " << (*itr)->timestamp().InMicroseconds() << "us dts "
@@ -770,39 +686,27 @@ bool SourceBufferStream<RangeClass>::IsDtsMonotonicallyIncreasing(
// already.
DCHECK(current_is_keyframe || GetType() != SourceBufferStreamType::kAudio);
- // When buffering by PTS, only verify DTS monotonicity within the current
- // GOP.
- if (current_is_keyframe && BufferingByPts()) {
- // Reset prev_timestamp DTS tracking since a new GOP is starting.
- prev_timestamp = kNoDecodeTimestamp();
+ // Only verify DTS monotonicity within the current GOP.
+ if (current_is_keyframe) {
+ // Reset prev_dts tracking since a new GOP is starting.
+ prev_dts = kNoDecodeTimestamp();
}
- if (prev_timestamp != kNoDecodeTimestamp()) {
- if (current_timestamp < prev_timestamp) {
+ if (prev_dts != kNoDecodeTimestamp()) {
+ if (current_dts < prev_dts) {
MEDIA_LOG(ERROR, media_log_)
<< "Buffers did not monotonically increase.";
return false;
}
-
- if (current_timestamp == prev_timestamp &&
- SourceBufferRange::IsUncommonSameTimestampSequence(
- prev_is_keyframe, current_is_keyframe)) {
- LIMITED_MEDIA_LOG(DEBUG, media_log_, num_strange_same_timestamps_logs_,
- kMaxStrangeSameTimestampsLogs)
- << "Detected an append sequence with keyframe following a "
- "non-keyframe, both with the same decode timestamp of "
- << current_timestamp.InSecondsF();
- }
}
- prev_timestamp = current_timestamp;
+ prev_dts = current_dts;
prev_is_keyframe = current_is_keyframe;
}
return true;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::OnlySelectedRangeIsSeeked() const {
+bool SourceBufferStream::OnlySelectedRangeIsSeeked() const {
for (auto itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
if ((*itr)->HasNextBufferPosition() && itr->get() != selected_range_)
return false;
@@ -810,30 +714,29 @@ bool SourceBufferStream<RangeClass>::OnlySelectedRangeIsSeeked() const {
return !selected_range_ || selected_range_->HasNextBufferPosition();
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::UpdateMaxInterbufferDtsDistance(
+bool SourceBufferStream::UpdateMaxInterbufferDtsDistance(
const BufferQueue& buffers) {
DCHECK(!buffers.empty());
base::TimeDelta old_distance = max_interbuffer_distance_;
- DecodeTimestamp prev_timestamp = last_appended_buffer_decode_timestamp_;
+ DecodeTimestamp prev_dts = last_appended_buffer_decode_timestamp_;
for (BufferQueue::const_iterator itr = buffers.begin();
itr != buffers.end(); ++itr) {
- DecodeTimestamp current_timestamp = (*itr)->GetDecodeTimestamp();
- DCHECK(current_timestamp != kNoDecodeTimestamp());
+ DecodeTimestamp current_dts = (*itr)->GetDecodeTimestamp();
+ DCHECK(current_dts != kNoDecodeTimestamp());
base::TimeDelta interbuffer_distance = (*itr)->duration();
DCHECK(interbuffer_distance >= base::TimeDelta());
- if (prev_timestamp != kNoDecodeTimestamp()) {
+ if (prev_dts != kNoDecodeTimestamp()) {
interbuffer_distance =
- std::max(current_timestamp - prev_timestamp, interbuffer_distance);
+ std::max(current_dts - prev_dts, interbuffer_distance);
}
DCHECK(max_interbuffer_distance_ >=
base::TimeDelta::FromMilliseconds(kMinimumInterbufferDistanceInMs));
max_interbuffer_distance_ =
std::max(max_interbuffer_distance_, interbuffer_distance);
- prev_timestamp = current_timestamp;
+ prev_dts = current_dts;
}
bool changed_max = max_interbuffer_distance_ != old_distance;
DVLOG_IF(2, changed_max) << __func__ << " " << GetStreamTypeName()
@@ -844,17 +747,15 @@ bool SourceBufferStream<RangeClass>::UpdateMaxInterbufferDtsDistance(
return changed_max;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::SetConfigIds(const BufferQueue& buffers) {
+void SourceBufferStream::SetConfigIds(const BufferQueue& buffers) {
for (BufferQueue::const_iterator itr = buffers.begin();
itr != buffers.end(); ++itr) {
(*itr)->SetConfigId(append_config_index_);
}
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::OnMemoryPressure(
- DecodeTimestamp media_time,
+void SourceBufferStream::OnMemoryPressure(
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc) {
DVLOG(4) << __func__ << " level=" << memory_pressure_level;
@@ -864,11 +765,9 @@ void SourceBufferStream<RangeClass>::OnMemoryPressure(
GarbageCollectIfNeeded(media_time, 0);
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
- DecodeTimestamp media_time,
- size_t newDataSize) {
- DCHECK(media_time != kNoDecodeTimestamp());
+bool SourceBufferStream::GarbageCollectIfNeeded(base::TimeDelta media_time,
+ size_t newDataSize) {
+ DCHECK(media_time != kNoTimestamp);
// Garbage collection should only happen before/during appending new data,
// which should not happen in end-of-stream state. Unless we also allow GC to
// happen on memory pressure notifications, which might happen even in EOS
@@ -916,7 +815,7 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
DVLOG(2) << __func__ << " " << GetStreamTypeName()
<< ": Before GC media_time=" << media_time.InMicroseconds()
- << "us ranges_=" << RangesToString<RangeClass>(ranges_)
+ << "us ranges_=" << RangesToString(ranges_)
<< " seek_pending_=" << seek_pending_
<< " ranges_size=" << ranges_size << " newDataSize=" << newDataSize
<< " memory_limit_=" << memory_limit_
@@ -930,7 +829,7 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
<< "us";
if (selected_range_ && !seek_pending_ &&
- media_time > RangeGetBufferedEndTimestamp(selected_range_)) {
+ media_time > selected_range_->GetBufferedEndTimestamp()) {
// Strictly speaking |media_time| (taken from HTMLMediaElement::currentTime)
// should always be in the buffered ranges, but media::Pipeline uses audio
// stream as the main time source, when audio is present.
@@ -939,8 +838,8 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
// range. In those cases we need to clamp |media_time| value to the current
// stream buffered ranges, to ensure the MSE garbage collection algorithm
// works correctly (see crbug.com/563292 for details).
- DecodeTimestamp selected_buffered_end =
- RangeGetBufferedEndTimestamp(selected_range_);
+ base::TimeDelta selected_buffered_end =
+ selected_range_->GetBufferedEndTimestamp();
DVLOG(2) << __func__ << " media_time " << media_time.InMicroseconds()
<< "us is outside of selected_range_=["
@@ -955,14 +854,14 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
// If last appended buffer position was earlier than the current playback time
// then try deleting data between last append and current media_time.
- if (last_appended_buffer_timestamp_ != kNoDecodeTimestamp() &&
+ if (last_appended_buffer_timestamp_ != kNoTimestamp &&
last_appended_buffer_duration_ != kNoTimestamp &&
- highest_buffered_end_time_in_append_sequence_ != kNoDecodeTimestamp() &&
+ highest_buffered_end_time_in_append_sequence_ != kNoTimestamp &&
media_time > highest_buffered_end_time_in_append_sequence_) {
size_t between = FreeBuffersAfterLastAppended(bytes_to_free, media_time);
DVLOG(3) << __func__ << " FreeBuffersAfterLastAppended "
<< " released " << between << " bytes"
- << " ranges_=" << RangesToString<RangeClass>(ranges_);
+ << " ranges_=" << RangesToString(ranges_);
bytes_freed += between;
// Some players start appending data at the new seek target position before
@@ -974,9 +873,8 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
// the most recently appended data, i.e. data belonging to the same buffered
// range as the most recent append.
if (range_for_next_append_ != ranges_.end()) {
- DCHECK(RangeGetStartTimestamp(range_for_next_append_->get()) <=
- media_time);
- media_time = RangeGetStartTimestamp(range_for_next_append_->get());
+ DCHECK((*range_for_next_append_)->GetStartTimestamp() <= media_time);
+ media_time = (*range_for_next_append_)->GetStartTimestamp();
DVLOG(3) << __func__ << " media_time adjusted to "
<< media_time.InMicroseconds() << "us";
}
@@ -991,8 +889,7 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
// All data earlier than the seek target |media_time| can be removed safely
size_t front = FreeBuffers(bytes_to_free - bytes_freed, media_time, false);
DVLOG(3) << __func__ << " Removed " << front
- << " bytes from the front. ranges_="
- << RangesToString<RangeClass>(ranges_);
+ << " bytes from the front. ranges_=" << RangesToString(ranges_);
bytes_freed += front;
// If removing data earlier than |media_time| didn't free up enough space,
@@ -1000,20 +897,17 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
if (bytes_freed < bytes_to_free) {
size_t back = FreeBuffers(bytes_to_free - bytes_freed, media_time, true);
DVLOG(3) << __func__ << " Removed " << back
- << " bytes from the back. ranges_="
- << RangesToString<RangeClass>(ranges_);
+ << " bytes from the back. ranges_=" << RangesToString(ranges_);
bytes_freed += back;
}
// If even that wasn't enough, then try greedily deleting from the front,
// that should allow us to remove as much data as necessary to succeed.
if (bytes_freed < bytes_to_free) {
- size_t front2 =
- FreeBuffers(bytes_to_free - bytes_freed,
- RangeGetEndTimestamp(ranges_.back().get()), false);
+ size_t front2 = FreeBuffers(bytes_to_free - bytes_freed,
+ ranges_.back()->GetEndTimestamp(), false);
DVLOG(3) << __func__ << " Removed " << front2
- << " bytes from the front. ranges_="
- << RangesToString<RangeClass>(ranges_);
+ << " bytes from the front. ranges_=" << RangesToString(ranges_);
bytes_freed += front2;
}
DCHECK(bytes_freed >= bytes_to_free);
@@ -1024,8 +918,7 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
if (bytes_freed < bytes_to_free) {
size_t front = FreeBuffers(bytes_to_free - bytes_freed, media_time, false);
DVLOG(3) << __func__ << " Removed " << front
- << " bytes from the front. ranges_="
- << RangesToString<RangeClass>(ranges_);
+ << " bytes from the front. ranges_=" << RangesToString(ranges_);
bytes_freed += front;
}
@@ -1034,8 +927,7 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
if (bytes_freed < bytes_to_free) {
size_t back = FreeBuffers(bytes_to_free - bytes_freed, media_time, true);
DVLOG(3) << __func__ << " Removed " << back
- << " bytes from the back. ranges_="
- << RangesToString<RangeClass>(ranges_);
+ << " bytes from the back. ranges_=" << RangesToString(ranges_);
bytes_freed += back;
}
@@ -1043,56 +935,50 @@ bool SourceBufferStream<RangeClass>::GarbageCollectIfNeeded(
<< ": After GC bytes_to_free=" << bytes_to_free
<< " bytes_freed=" << bytes_freed
<< " bytes_over_hard_memory_limit=" << bytes_over_hard_memory_limit
- << " ranges_=" << RangesToString<RangeClass>(ranges_);
+ << " ranges_=" << RangesToString(ranges_);
return bytes_freed >= bytes_over_hard_memory_limit;
}
-template <typename RangeClass>
-size_t SourceBufferStream<RangeClass>::FreeBuffersAfterLastAppended(
+size_t SourceBufferStream::FreeBuffersAfterLastAppended(
size_t total_bytes_to_free,
- DecodeTimestamp media_time) {
+ base::TimeDelta media_time) {
DVLOG(4) << __func__ << " highest_buffered_end_time_in_append_sequence_="
<< highest_buffered_end_time_in_append_sequence_.InMicroseconds()
<< "us media_time=" << media_time.InMicroseconds() << "us";
- DecodeTimestamp remove_range_start =
+ base::TimeDelta remove_range_start =
highest_buffered_end_time_in_append_sequence_;
if (last_appended_buffer_is_keyframe_)
remove_range_start += GetMaxInterbufferDistance();
- DecodeTimestamp remove_range_start_keyframe = FindKeyframeAfterTimestamp(
- remove_range_start);
- if (remove_range_start_keyframe != kNoDecodeTimestamp())
+ base::TimeDelta remove_range_start_keyframe =
+ FindKeyframeAfterTimestamp(remove_range_start);
+ if (remove_range_start_keyframe != kNoTimestamp)
remove_range_start = remove_range_start_keyframe;
if (remove_range_start >= media_time)
return 0;
- DecodeTimestamp remove_range_end;
+ base::TimeDelta remove_range_end;
size_t bytes_freed = GetRemovalRange(remove_range_start,
media_time,
total_bytes_to_free,
&remove_range_end);
if (bytes_freed > 0) {
- DVLOG(4) << __func__ << " removing ["
- << remove_range_start.ToPresentationTime().InMicroseconds()
- << "us;" << remove_range_end.ToPresentationTime().InMicroseconds()
- << "us]";
- Remove(remove_range_start.ToPresentationTime(),
- remove_range_end.ToPresentationTime(),
- media_time.ToPresentationTime());
+ DVLOG(4) << __func__ << " removing [" << remove_range_start.InMicroseconds()
+ << "us;" << remove_range_end.InMicroseconds() << "us]";
+ Remove(remove_range_start, remove_range_end, media_time);
}
return bytes_freed;
}
-template <typename RangeClass>
-size_t SourceBufferStream<RangeClass>::GetRemovalRange(
- DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
+size_t SourceBufferStream::GetRemovalRange(
+ base::TimeDelta start_timestamp,
+ base::TimeDelta end_timestamp,
size_t total_bytes_to_free,
- DecodeTimestamp* removal_end_timestamp) {
- DCHECK(start_timestamp >= DecodeTimestamp())
+ base::TimeDelta* removal_end_timestamp) {
+ DCHECK(start_timestamp >= base::TimeDelta())
<< start_timestamp.InMicroseconds() << "us";
DCHECK(start_timestamp < end_timestamp)
<< "start " << start_timestamp.InMicroseconds() << "us, end "
@@ -1102,25 +988,23 @@ size_t SourceBufferStream<RangeClass>::GetRemovalRange(
for (auto itr = ranges_.begin();
itr != ranges_.end() && bytes_freed < total_bytes_to_free; ++itr) {
- RangeClass* range = itr->get();
- if (RangeGetStartTimestamp(range) >= end_timestamp)
+ SourceBufferRange* range = itr->get();
+ if (range->GetStartTimestamp() >= end_timestamp)
break;
- if (RangeGetEndTimestamp(range) < start_timestamp)
+ if (range->GetEndTimestamp() < start_timestamp)
continue;
size_t bytes_to_free = total_bytes_to_free - bytes_freed;
- size_t bytes_removed =
- RangeGetRemovalGOP(range, start_timestamp, end_timestamp, bytes_to_free,
- removal_end_timestamp);
+ size_t bytes_removed = range->GetRemovalGOP(
+ start_timestamp, end_timestamp, bytes_to_free, removal_end_timestamp);
bytes_freed += bytes_removed;
}
return bytes_freed;
}
-template <typename RangeClass>
-size_t SourceBufferStream<RangeClass>::FreeBuffers(size_t total_bytes_to_free,
- DecodeTimestamp media_time,
- bool reverse_direction) {
+size_t SourceBufferStream::FreeBuffers(size_t total_bytes_to_free,
+ base::TimeDelta media_time,
+ bool reverse_direction) {
TRACE_EVENT2("media", "SourceBufferStream::FreeBuffers",
"total bytes to free", total_bytes_to_free,
"reverse direction", reverse_direction);
@@ -1130,10 +1014,10 @@ size_t SourceBufferStream<RangeClass>::FreeBuffers(size_t total_bytes_to_free,
// This range will save the last GOP appended to |range_for_next_append_|
// if the buffers surrounding it get deleted during garbage collection.
- std::unique_ptr<RangeClass> new_range_for_append;
+ std::unique_ptr<SourceBufferRange> new_range_for_append;
while (!ranges_.empty() && bytes_freed < total_bytes_to_free) {
- RangeClass* current_range = NULL;
+ SourceBufferRange* current_range = NULL;
BufferQueue buffers;
size_t bytes_deleted = 0;
@@ -1157,7 +1041,7 @@ size_t SourceBufferStream<RangeClass>::FreeBuffers(size_t total_bytes_to_free,
// FirstGOPContainsNextBufferPosition() is useful here especially if
// |!seek_pending_| to protect against DeleteGOPFromFront() if
// FirstGOPEarlierThanMediaTime() was insufficient alone.
- if (!RangeFirstGOPEarlierThanMediaTime(current_range, media_time) ||
+ if (!current_range->FirstGOPEarlierThanMediaTime(media_time) ||
current_range->FirstGOPContainsNextBufferPosition()) {
// We have removed all data up to the GOP that contains current playback
// position, we can't delete any further.
@@ -1172,13 +1056,17 @@ size_t SourceBufferStream<RangeClass>::FreeBuffers(size_t total_bytes_to_free,
}
// Check to see if we've just deleted the GOP that was last appended.
- DecodeTimestamp end_timestamp = BufferGetTimestamp(buffers.back());
+ base::TimeDelta end_timestamp = buffers.back()->timestamp();
if (end_timestamp == last_appended_buffer_timestamp_) {
- DCHECK(last_appended_buffer_timestamp_ != kNoDecodeTimestamp());
+ DCHECK(last_appended_buffer_timestamp_ != kNoTimestamp);
DCHECK(!new_range_for_append);
// Create a new range containing these buffers.
- new_range_for_append = RangeNew(buffers, kNoDecodeTimestamp());
+ new_range_for_append = std::make_unique<SourceBufferRange>(
+ TypeToGapPolicy(GetType()), buffers, kNoTimestamp,
+ base::BindRepeating(&SourceBufferStream::GetMaxInterbufferDistance,
+ base::Unretained(this)));
+
range_for_next_append_ = ranges_.end();
} else {
bytes_freed += bytes_deleted;
@@ -1220,17 +1108,13 @@ size_t SourceBufferStream<RangeClass>::FreeBuffers(size_t total_bytes_to_free,
return bytes_freed;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::TrimSpliceOverlap(
- const BufferQueue& new_buffers) {
+void SourceBufferStream::TrimSpliceOverlap(const BufferQueue& new_buffers) {
DCHECK(!new_buffers.empty());
DCHECK_EQ(SourceBufferStreamType::kAudio, GetType());
// Find the overlapped range (if any).
const base::TimeDelta splice_timestamp = new_buffers.front()->timestamp();
- const DecodeTimestamp splice_dts =
- DecodeTimestamp::FromPresentationTime(splice_timestamp);
- auto range_itr = FindExistingRangeFor(splice_dts);
+ auto range_itr = FindExistingRangeFor(splice_timestamp);
if (range_itr == ranges_.end()) {
DVLOG(3) << __func__ << " No splice trimming. No range overlap at time "
<< splice_timestamp.InMicroseconds();
@@ -1239,13 +1123,14 @@ void SourceBufferStream<RangeClass>::TrimSpliceOverlap(
// Search for overlapped buffer needs exclusive end value. Choosing smallest
// possible value.
- const DecodeTimestamp end_dts =
- splice_dts + base::TimeDelta::FromMicroseconds(1);
+ const base::TimeDelta end_pts =
+ splice_timestamp + base::TimeDelta::FromMicroseconds(1);
// Find if new buffer's start would overlap an existing buffer.
BufferQueue overlapped_buffers;
- if (!RangeGetBuffersInRange(range_itr->get(), splice_dts, end_dts,
- &overlapped_buffers)) {
+ if (!(*range_itr)
+ ->GetBuffersInRange(splice_timestamp, end_pts,
+ &overlapped_buffers)) {
// Bail if no overlapped buffers found.
DVLOG(3) << __func__ << " No splice trimming. No buffer overlap at time "
<< splice_timestamp.InMicroseconds();
@@ -1340,8 +1225,7 @@ void SourceBufferStream<RangeClass>::TrimSpliceOverlap(
DVLOG(1) << __func__ << log_string.str();
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::PrepareRangesForNextAppend(
+void SourceBufferStream::PrepareRangesForNextAppend(
const BufferQueue& new_buffers,
BufferQueue* deleted_buffers) {
DCHECK(deleted_buffers);
@@ -1349,26 +1233,26 @@ void SourceBufferStream<RangeClass>::PrepareRangesForNextAppend(
if (GetType() == SourceBufferStreamType::kAudio)
TrimSpliceOverlap(new_buffers);
- DecodeTimestamp buffers_start_timestamp = kNoDecodeTimestamp();
- DecodeTimestamp buffers_end_timestamp = kNoDecodeTimestamp();
+ base::TimeDelta buffers_start_timestamp = kNoTimestamp;
+ base::TimeDelta buffers_end_timestamp = kNoTimestamp;
GetTimestampInterval(new_buffers, &buffers_start_timestamp,
&buffers_end_timestamp);
- DCHECK(buffers_start_timestamp != kNoDecodeTimestamp());
- DCHECK(buffers_end_timestamp != kNoDecodeTimestamp());
+ DCHECK(buffers_start_timestamp != kNoTimestamp);
+ DCHECK(buffers_end_timestamp != kNoTimestamp);
// 1. Clean up the old buffers between the last appended buffers and the
// beginning of |new_buffers|.
- if (highest_timestamp_in_append_sequence_ != kNoDecodeTimestamp() &&
+ if (highest_timestamp_in_append_sequence_ != kNoTimestamp &&
highest_timestamp_in_append_sequence_ < buffers_start_timestamp) {
RemoveInternal(highest_timestamp_in_append_sequence_,
buffers_start_timestamp, true, deleted_buffers);
}
// 2. Delete the buffers that |new_buffers| overlaps.
- // When buffering ByPts, there may be buffers in |new_buffers| with timestamp
- // before |highest_timestamp_in_append_sequence_| that shouldn't trigger
- // removal of stuff before |highest_timestamp_in_append_sequence_|.
- if (highest_timestamp_in_append_sequence_ != kNoDecodeTimestamp() &&
+ // There may be buffers in |new_buffers| with timestamp before
+ // |highest_timestamp_in_append_sequence_| that shouldn't trigger removal of
+ // stuff before |highest_timestamp_in_append_sequence_|.
+ if (highest_timestamp_in_append_sequence_ != kNoTimestamp &&
buffers_start_timestamp < highest_timestamp_in_append_sequence_) {
DCHECK(highest_timestamp_in_append_sequence_ <=
highest_buffered_end_time_in_append_sequence_);
@@ -1378,31 +1262,27 @@ void SourceBufferStream<RangeClass>::PrepareRangesForNextAppend(
if (new_coded_frame_group_) {
// Extend the deletion range earlier to the coded frame group start time if
// this is the first append in a new coded frame group.
- DCHECK(coded_frame_group_start_time_ != kNoDecodeTimestamp());
+ DCHECK(coded_frame_group_start_pts_ != kNoTimestamp);
buffers_start_timestamp =
- std::min(coded_frame_group_start_time_, buffers_start_timestamp);
+ std::min(coded_frame_group_start_pts_, buffers_start_timestamp);
}
- // Return early if no further overlap removal is needed. When buffering by PTS
- // intervals, first check if |buffers_start_timestamp| is in the middle of the
- // range; we could be overlap-appending the middle of a previous coded frame
- // sequence's range with non-keyframes prior to
- // |highest_timestamp_in_append_sequence_|, so we need to split that range
- // appropriately here and then return early. If we don't return early here,
- // overlap removal (including any necessary range splitting) will occur.
+ // Return early if no further overlap removal is needed. First check if
+ // |buffers_start_timestamp| is in the middle of the range; we could be
+ // overlap-appending the middle of a previous coded frame sequence's range
+ // with non-keyframes prior to |highest_timestamp_in_append_sequence_|, so we
+ // need to split that range appropriately here and then return early. If we
+ // don't return early here, overlap removal (including any necessary range
+ // splitting) will occur.
if (buffers_start_timestamp >= buffers_end_timestamp) {
- if (!BufferingByPts())
- return;
-
- DCHECK(highest_timestamp_in_append_sequence_ != kNoDecodeTimestamp());
+ DCHECK(highest_timestamp_in_append_sequence_ != kNoTimestamp);
DCHECK(range_for_next_append_ != ranges_.end());
- DCHECK(RangeBelongsToRange(range_for_next_append_->get(),
- buffers_start_timestamp));
+ DCHECK((*range_for_next_append_)->BelongsToRange(buffers_start_timestamp));
// Split the range at |buffers_start_timestamp|, if necessary, then return
// early.
- std::unique_ptr<RangeClass> new_range =
- RangeSplitRange(range_for_next_append_->get(), buffers_start_timestamp);
+ std::unique_ptr<SourceBufferRange> new_range =
+ (*range_for_next_append_)->SplitRange(buffers_start_timestamp);
if (!new_range)
return;
@@ -1434,7 +1314,7 @@ void SourceBufferStream<RangeClass>::PrepareRangesForNextAppend(
// A/V sync (see AudioClock).
const bool exclude_start =
highest_timestamp_in_append_sequence_ ==
- BufferGetTimestamp(new_buffers.front()) &&
+ new_buffers.front()->timestamp() &&
(GetType() == SourceBufferStreamType::kVideo ||
GetType() == SourceBufferStreamType::kText ||
last_appended_buffer_duration_ == base::TimeDelta());
@@ -1445,38 +1325,9 @@ void SourceBufferStream<RangeClass>::PrepareRangesForNextAppend(
}
// static
-template <>
-void SourceBufferStream<SourceBufferRangeByDts>::GetTimestampInterval(
- const BufferQueue& buffers,
- DecodeTimestamp* start,
- DecodeTimestamp* end) {
- *start = buffers.front()->GetDecodeTimestamp();
- *end = buffers.back()->GetDecodeTimestamp();
-
- // Set end time to include the duration of last buffer. If the duration is
- // estimated, use 1 microsecond instead to ensure frames are not accidentally
- // removed due to over-estimation.
- base::TimeDelta duration = buffers.back()->duration();
-
- // FrameProcessor should protect against unknown buffer durations.
- DCHECK_NE(duration, kNoTimestamp);
-
- if (duration > base::TimeDelta() &&
- !buffers.back()->is_duration_estimated()) {
- *end += duration;
- } else {
- // TODO(chcunningham): Emit warning when 0ms durations are not expected.
- // http://crbug.com/312836
- *end += base::TimeDelta::FromMicroseconds(1);
- }
-}
-
-// static
-template <>
-void SourceBufferStream<SourceBufferRangeByPts>::GetTimestampInterval(
- const BufferQueue& buffers,
- DecodeTimestamp* start,
- DecodeTimestamp* end) {
+void SourceBufferStream::GetTimestampInterval(const BufferQueue& buffers,
+ base::TimeDelta* start,
+ base::TimeDelta* end) {
base::TimeDelta start_pts = buffers.front()->timestamp();
base::TimeDelta end_pts = start_pts;
@@ -1497,15 +1348,13 @@ void SourceBufferStream<SourceBufferRangeByPts>::GetTimestampInterval(
}
end_pts = std::max(timestamp, end_pts);
}
- *start = DecodeTimestamp::FromPresentationTime(start_pts);
- *end = DecodeTimestamp::FromPresentationTime(end_pts);
+ *start = start_pts;
+ *end = end_pts;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::
- IsNextGopAdjacentToEndOfCurrentAppendSequence(
- DecodeTimestamp next_gop_timestamp) const {
- DecodeTimestamp upper_bound = highest_timestamp_in_append_sequence_ +
+bool SourceBufferStream::IsNextGopAdjacentToEndOfCurrentAppendSequence(
+ base::TimeDelta next_gop_timestamp) const {
+ base::TimeDelta upper_bound = highest_timestamp_in_append_sequence_ +
ComputeFudgeRoom(GetMaxInterbufferDistance());
DVLOG(4) << __func__ << " " << GetStreamTypeName()
<< " next_gop_timestamp=" << next_gop_timestamp.InMicroseconds()
@@ -1516,18 +1365,14 @@ bool SourceBufferStream<RangeClass>::
next_gop_timestamp <= upper_bound;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::PruneTrackBuffer(
- const DecodeTimestamp timestamp) {
- // If we don't have the next timestamp, we don't have anything to delete.
- if (timestamp == kNoDecodeTimestamp())
- return;
+void SourceBufferStream::PruneTrackBuffer(const base::TimeDelta timestamp) {
+ DCHECK(timestamp != kNoTimestamp);
// Scan forward until we find a buffer with timestamp at or beyond the limit.
// Then remove all those at and beyond that point.
size_t goal_size = 0; // The number of buffers we will keep in the result.
for (const auto& buf : track_buffer_) {
- if (BufferGetTimestamp(buf) >= timestamp)
+ if (buf->timestamp() >= timestamp)
break;
goal_size++;
}
@@ -1537,18 +1382,18 @@ void SourceBufferStream<RangeClass>::PruneTrackBuffer(
}
DVLOG(3) << __func__ << " " << GetStreamTypeName()
- << " Removed all buffers with timestamp >= "
+ << " Removed all buffers in track buffer sequence starting with the "
+ "first at timestamp >= "
<< timestamp.InMicroseconds()
<< "us. New track buffer size:" << track_buffer_.size();
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::MergeWithNextRangeIfNecessary(
- const typename RangeList::iterator& range_with_new_buffers_itr) {
+void SourceBufferStream::MergeWithNextRangeIfNecessary(
+ const RangeList::iterator& range_with_new_buffers_itr) {
DCHECK(range_with_new_buffers_itr != ranges_.end());
- RangeClass* range_with_new_buffers = range_with_new_buffers_itr->get();
- typename RangeList::iterator next_range_itr = range_with_new_buffers_itr;
+ SourceBufferRange* range_with_new_buffers = range_with_new_buffers_itr->get();
+ RangeList::iterator next_range_itr = range_with_new_buffers_itr;
++next_range_itr;
if (next_range_itr == ranges_.end() ||
@@ -1573,10 +1418,9 @@ void SourceBufferStream<RangeClass>::MergeWithNextRangeIfNecessary(
DeleteAndRemoveRange(&next_range_itr);
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::MergeAllAdjacentRanges() {
+void SourceBufferStream::MergeAllAdjacentRanges() {
DVLOG(1) << __func__ << " " << GetStreamTypeName()
- << ": Before: ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": Before: ranges_=" << RangesToString(ranges_);
auto range_itr = ranges_.begin();
@@ -1590,11 +1434,10 @@ void SourceBufferStream<RangeClass>::MergeAllAdjacentRanges() {
}
DVLOG(1) << __func__ << " " << GetStreamTypeName()
- << ": After: ranges_=" << RangesToString<RangeClass>(ranges_);
+ << ": After: ranges_=" << RangesToString(ranges_);
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::Seek(base::TimeDelta timestamp) {
+void SourceBufferStream::Seek(base::TimeDelta timestamp) {
DCHECK(timestamp >= base::TimeDelta());
DVLOG(1) << __func__ << " " << GetStreamTypeName() << " ("
<< timestamp.InMicroseconds() << "us)";
@@ -1610,11 +1453,9 @@ void SourceBufferStream<RangeClass>::Seek(base::TimeDelta timestamp) {
return;
}
- DecodeTimestamp seek_dts = DecodeTimestamp::FromPresentationTime(timestamp);
-
auto itr = ranges_.end();
for (itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
- if (RangeCanSeekTo(itr->get(), seek_dts))
+ if ((*itr)->CanSeekTo(timestamp))
break;
}
@@ -1622,35 +1463,34 @@ void SourceBufferStream<RangeClass>::Seek(base::TimeDelta timestamp) {
return;
if (!audio_configs_.empty()) {
- // Adjust |seek_dts| for an Opus stream backward up to the config's seek
+ // Adjust |timestamp| for an Opus stream backward up to the config's seek
// preroll, but not further than the range start time, and not at all if
// there is a config change in the middle of that preroll interval. If
- // |seek_dts| is already before the range start time, as can happen due to
+ // |timestamp| is already before the range start time, as can happen due to
// fudge room, do not adjust it.
- const auto& config =
- audio_configs_[RangeGetConfigIdAtTime(itr->get(), seek_dts)];
+ const auto& config = audio_configs_[(*itr)->GetConfigIdAtTime(timestamp)];
if (config.codec() == kCodecOpus &&
- seek_dts > RangeGetStartTimestamp(itr->get())) {
- DecodeTimestamp preroll_dts = std::max(
- seek_dts - config.seek_preroll(), RangeGetStartTimestamp(itr->get()));
- if (RangeCanSeekTo(itr->get(), preroll_dts) &&
- RangeSameConfigThruRange(itr->get(), preroll_dts, seek_dts)) {
- seek_dts = preroll_dts;
+ timestamp > (*itr)->GetStartTimestamp()) {
+ base::TimeDelta preroll_timestamp = std::max(
+ timestamp - config.seek_preroll(), (*itr)->GetStartTimestamp());
+ if ((*itr)->CanSeekTo(preroll_timestamp) &&
+ (*itr)->SameConfigThruRange(preroll_timestamp, timestamp)) {
+ timestamp = preroll_timestamp;
}
}
}
- SeekAndSetSelectedRange(itr->get(), seek_dts);
+ SeekAndSetSelectedRange(itr->get(), timestamp);
seek_pending_ = false;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::IsSeekPending() const {
+bool SourceBufferStream::IsSeekPending() const {
return seek_pending_ && !IsEndOfStreamReached();
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::OnSetDuration(base::TimeDelta duration) {
+// TODO(wolenetz): Disallow duration changes that truncate buffered media. See
+// https://crbug.com/623729.
+void SourceBufferStream::OnSetDuration(base::TimeDelta duration) {
DVLOG(1) << __func__ << " " << GetStreamTypeName() << " ("
<< duration.InMicroseconds() << "us)";
DCHECK(!end_of_stream_);
@@ -1658,8 +1498,8 @@ void SourceBufferStream<RangeClass>::OnSetDuration(base::TimeDelta duration) {
if (ranges_.empty())
return;
- DecodeTimestamp start = DecodeTimestamp::FromPresentationTime(duration);
- DecodeTimestamp end = RangeGetBufferedEndTimestamp(ranges_.back().get());
+ base::TimeDelta start = duration;
+ base::TimeDelta end = ranges_.back()->GetBufferedEndTimestamp();
// Trim the end if it exceeds the new duration.
if (start < end) {
@@ -1673,8 +1513,7 @@ void SourceBufferStream<RangeClass>::OnSetDuration(base::TimeDelta duration) {
}
}
-template <typename RangeClass>
-SourceBufferStreamStatus SourceBufferStream<RangeClass>::GetNextBuffer(
+SourceBufferStreamStatus SourceBufferStream::GetNextBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
DVLOG(2) << __func__ << " " << GetStreamTypeName();
if (!pending_buffer_.get()) {
@@ -1698,9 +1537,7 @@ SourceBufferStreamStatus SourceBufferStream<RangeClass>::GetNextBuffer(
return status;
}
-template <typename RangeClass>
-SourceBufferStreamStatus
-SourceBufferStream<RangeClass>::HandleNextBufferWithPreroll(
+SourceBufferStreamStatus SourceBufferStream::HandleNextBufferWithPreroll(
scoped_refptr<StreamParserBuffer>* out_buffer) {
// Any config change should have already been handled.
DCHECK_EQ(current_config_index_, pending_buffer_->GetConfigId());
@@ -1717,8 +1554,7 @@ SourceBufferStream<RangeClass>::HandleNextBufferWithPreroll(
return SourceBufferStreamStatus::kSuccess;
}
-template <typename RangeClass>
-SourceBufferStreamStatus SourceBufferStream<RangeClass>::GetNextBufferInternal(
+SourceBufferStreamStatus SourceBufferStream::GetNextBufferInternal(
scoped_refptr<StreamParserBuffer>* out_buffer) {
CHECK(!config_change_pending_);
@@ -1735,8 +1571,8 @@ SourceBufferStreamStatus SourceBufferStream<RangeClass>::GetNextBufferInternal(
*out_buffer = std::move(track_buffer_.front());
track_buffer_.pop_front();
WarnIfTrackBufferExhaustionSkipsForward(*out_buffer);
- highest_output_buffer_timestamp_ = std::max(
- highest_output_buffer_timestamp_, BufferGetTimestamp(*out_buffer));
+ highest_output_buffer_timestamp_ =
+ std::max(highest_output_buffer_timestamp_, (*out_buffer)->timestamp());
// If the track buffer becomes empty, then try to set the selected range
// based on the timestamp of this buffer being returned.
@@ -1768,89 +1604,66 @@ SourceBufferStreamStatus SourceBufferStream<RangeClass>::GetNextBufferInternal(
CHECK(selected_range_->GetNextBuffer(out_buffer));
WarnIfTrackBufferExhaustionSkipsForward(*out_buffer);
- highest_output_buffer_timestamp_ = std::max(highest_output_buffer_timestamp_,
- BufferGetTimestamp(*out_buffer));
+ highest_output_buffer_timestamp_ =
+ std::max(highest_output_buffer_timestamp_, (*out_buffer)->timestamp());
return SourceBufferStreamStatus::kSuccess;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::WarnIfTrackBufferExhaustionSkipsForward(
+void SourceBufferStream::WarnIfTrackBufferExhaustionSkipsForward(
scoped_refptr<StreamParserBuffer> next_buffer) {
if (!just_exhausted_track_buffer_)
return;
just_exhausted_track_buffer_ = false;
DCHECK(next_buffer->is_key_frame());
- DecodeTimestamp next_output_buffer_timestamp =
- next_buffer->GetDecodeTimestamp();
+ base::TimeDelta next_output_buffer_timestamp = next_buffer->timestamp();
base::TimeDelta delta =
next_output_buffer_timestamp - highest_output_buffer_timestamp_;
if (delta > GetMaxInterbufferDistance()) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_track_buffer_gap_warning_logs_,
kMaxTrackBufferGapWarningLogs)
- << "Media append that overlapped current playback position caused time "
- "gap in playing "
+ << "Media append that overlapped current playback position may cause "
+ "time gap in playing "
<< GetStreamTypeName() << " stream because the next keyframe is "
- << delta.InMilliseconds() << "ms beyond last overlapped frame. Media "
- "may appear temporarily frozen.";
+ << delta.InMilliseconds()
+ << "ms beyond last overlapped frame. Media may appear temporarily "
+ "frozen.";
}
}
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::GetNextBufferTimestamp() {
+base::TimeDelta SourceBufferStream::GetNextBufferTimestamp() {
if (!track_buffer_.empty())
- return track_buffer_.front()->GetDecodeTimestamp();
+ return track_buffer_.front()->timestamp();
if (!selected_range_)
- return kNoDecodeTimestamp();
+ return kNoTimestamp;
DCHECK(selected_range_->HasNextBufferPosition());
return selected_range_->GetNextTimestamp();
}
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::GetNextBufferTimestamp() {
- if (!track_buffer_.empty())
- return DecodeTimestamp::FromPresentationTime(
- track_buffer_.front()->timestamp());
-
- if (!selected_range_)
- return kNoDecodeTimestamp();
-
- DCHECK(selected_range_->HasNextBufferPosition());
- return DecodeTimestamp::FromPresentationTime(
- selected_range_->GetNextTimestamp());
-}
-
-template <typename RangeClass>
-typename SourceBufferStream<RangeClass>::RangeList::iterator
-SourceBufferStream<RangeClass>::FindExistingRangeFor(
- DecodeTimestamp start_timestamp) {
+SourceBufferStream::RangeList::iterator
+SourceBufferStream::FindExistingRangeFor(base::TimeDelta start_timestamp) {
for (auto itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
- if (RangeBelongsToRange(itr->get(), start_timestamp))
+ if ((*itr)->BelongsToRange(start_timestamp))
return itr;
}
return ranges_.end();
}
-template <typename RangeClass>
-typename SourceBufferStream<RangeClass>::RangeList::iterator
-SourceBufferStream<RangeClass>::AddToRanges(
- std::unique_ptr<RangeClass> new_range) {
- DecodeTimestamp start_timestamp = RangeGetStartTimestamp(new_range.get());
+SourceBufferStream::RangeList::iterator SourceBufferStream::AddToRanges(
+ std::unique_ptr<SourceBufferRange> new_range) {
+ base::TimeDelta start_timestamp = new_range->GetStartTimestamp();
auto itr = ranges_.end();
for (itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
- if (RangeGetStartTimestamp(itr->get()) > start_timestamp)
+ if ((*itr)->GetStartTimestamp() > start_timestamp)
break;
}
return ranges_.insert(itr, std::move(new_range));
}
-template <typename RangeClass>
-typename SourceBufferStream<RangeClass>::RangeList::iterator
-SourceBufferStream<RangeClass>::GetSelectedRangeItr() {
+SourceBufferStream::RangeList::iterator
+SourceBufferStream::GetSelectedRangeItr() {
DCHECK(selected_range_);
auto itr = ranges_.end();
for (itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
@@ -1861,17 +1674,15 @@ SourceBufferStream<RangeClass>::GetSelectedRangeItr() {
return itr;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::SeekAndSetSelectedRange(
- RangeClass* range,
- DecodeTimestamp seek_timestamp) {
+void SourceBufferStream::SeekAndSetSelectedRange(
+ SourceBufferRange* range,
+ base::TimeDelta seek_timestamp) {
if (range)
- RangeSeek(range, seek_timestamp);
+ range->Seek(seek_timestamp);
SetSelectedRange(range);
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::SetSelectedRange(RangeClass* range) {
+void SourceBufferStream::SetSelectedRange(SourceBufferRange* range) {
DVLOG(1) << __func__ << " " << GetStreamTypeName() << ": " << selected_range_
<< " " << (selected_range_ ? RangeToString(*selected_range_) : "")
<< " -> " << range << " " << (range ? RangeToString(*range) : "");
@@ -1881,57 +1692,46 @@ void SourceBufferStream<RangeClass>::SetSelectedRange(RangeClass* range) {
selected_range_ = range;
}
-template <typename RangeClass>
-Ranges<base::TimeDelta> SourceBufferStream<RangeClass>::GetBufferedTime()
- const {
+Ranges<base::TimeDelta> SourceBufferStream::GetBufferedTime() const {
Ranges<base::TimeDelta> ranges;
for (auto itr = ranges_.begin(); itr != ranges_.end(); ++itr) {
- ranges.Add(RangeGetStartTimestamp(itr->get()).ToPresentationTime(),
- RangeGetBufferedEndTimestamp(itr->get()).ToPresentationTime());
+ ranges.Add((*itr)->GetStartTimestamp(), (*itr)->GetBufferedEndTimestamp());
}
return ranges;
}
-template <typename RangeClass>
-base::TimeDelta
-SourceBufferStream<RangeClass>::GetHighestPresentationTimestamp() const {
+base::TimeDelta SourceBufferStream::GetHighestPresentationTimestamp() const {
if (ranges_.empty())
return base::TimeDelta();
- return RangeGetEndTimestamp(ranges_.back().get()).ToPresentationTime();
+ return ranges_.back()->GetEndTimestamp();
}
-template <typename RangeClass>
-base::TimeDelta SourceBufferStream<RangeClass>::GetBufferedDuration() const {
+base::TimeDelta SourceBufferStream::GetBufferedDuration() const {
if (ranges_.empty())
return base::TimeDelta();
- return RangeGetBufferedEndTimestamp(ranges_.back().get())
- .ToPresentationTime();
+ return ranges_.back()->GetBufferedEndTimestamp();
}
-template <typename RangeClass>
-size_t SourceBufferStream<RangeClass>::GetBufferedSize() const {
+size_t SourceBufferStream::GetBufferedSize() const {
size_t ranges_size = 0;
for (const auto& range_ptr : ranges_)
ranges_size += range_ptr->size_in_bytes();
return ranges_size;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::MarkEndOfStream() {
+void SourceBufferStream::MarkEndOfStream() {
DCHECK(!end_of_stream_);
end_of_stream_ = true;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::UnmarkEndOfStream() {
+void SourceBufferStream::UnmarkEndOfStream() {
DCHECK(end_of_stream_);
end_of_stream_ = false;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::IsEndOfStreamReached() const {
+bool SourceBufferStream::IsEndOfStreamReached() const {
if (!end_of_stream_ || !track_buffer_.empty())
return false;
@@ -1940,7 +1740,7 @@ bool SourceBufferStream<RangeClass>::IsEndOfStreamReached() const {
if (seek_pending_) {
base::TimeDelta last_range_end_time =
- RangeGetBufferedEndTimestamp(ranges_.back().get()).ToPresentationTime();
+ ranges_.back()->GetBufferedEndTimestamp();
return seek_buffer_timestamp_ >= last_range_end_time;
}
@@ -1950,9 +1750,7 @@ bool SourceBufferStream<RangeClass>::IsEndOfStreamReached() const {
return selected_range_ == ranges_.back().get();
}
-template <typename RangeClass>
-const AudioDecoderConfig&
-SourceBufferStream<RangeClass>::GetCurrentAudioDecoderConfig() {
+const AudioDecoderConfig& SourceBufferStream::GetCurrentAudioDecoderConfig() {
if (config_change_pending_)
CompleteConfigChange();
// Trying to track down crash. http://crbug.com/715761
@@ -1961,9 +1759,7 @@ SourceBufferStream<RangeClass>::GetCurrentAudioDecoderConfig() {
return audio_configs_[current_config_index_];
}
-template <typename RangeClass>
-const VideoDecoderConfig&
-SourceBufferStream<RangeClass>::GetCurrentVideoDecoderConfig() {
+const VideoDecoderConfig& SourceBufferStream::GetCurrentVideoDecoderConfig() {
if (config_change_pending_)
CompleteConfigChange();
// Trying to track down crash. http://crbug.com/715761
@@ -1972,22 +1768,16 @@ SourceBufferStream<RangeClass>::GetCurrentVideoDecoderConfig() {
return video_configs_[current_config_index_];
}
-template <typename RangeClass>
-const TextTrackConfig&
-SourceBufferStream<RangeClass>::GetCurrentTextTrackConfig() {
+const TextTrackConfig& SourceBufferStream::GetCurrentTextTrackConfig() {
return text_track_config_;
}
-template <typename RangeClass>
-base::TimeDelta SourceBufferStream<RangeClass>::GetMaxInterbufferDistance()
- const {
+base::TimeDelta SourceBufferStream::GetMaxInterbufferDistance() const {
return max_interbuffer_distance_;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::UpdateAudioConfig(
- const AudioDecoderConfig& config,
- bool allow_codec_change) {
+bool SourceBufferStream::UpdateAudioConfig(const AudioDecoderConfig& config,
+ bool allow_codec_change) {
DCHECK(!audio_configs_.empty());
DCHECK(video_configs_.empty());
DVLOG(3) << "UpdateAudioConfig.";
@@ -2020,10 +1810,8 @@ bool SourceBufferStream<RangeClass>::UpdateAudioConfig(
return true;
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::UpdateVideoConfig(
- const VideoDecoderConfig& config,
- bool allow_codec_change) {
+bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config,
+ bool allow_codec_change) {
DCHECK(!video_configs_.empty());
DCHECK(audio_configs_.empty());
DVLOG(3) << "UpdateVideoConfig.";
@@ -2056,8 +1844,7 @@ bool SourceBufferStream<RangeClass>::UpdateVideoConfig(
return true;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::CompleteConfigChange() {
+void SourceBufferStream::CompleteConfigChange() {
config_change_pending_ = false;
if (!track_buffer_.empty()) {
@@ -2069,9 +1856,8 @@ void SourceBufferStream<RangeClass>::CompleteConfigChange() {
current_config_index_ = selected_range_->GetNextConfigId();
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::SetSelectedRangeIfNeeded(
- const DecodeTimestamp timestamp) {
+void SourceBufferStream::SetSelectedRangeIfNeeded(
+ const base::TimeDelta timestamp) {
DVLOG(2) << __func__ << " " << GetStreamTypeName() << "("
<< timestamp.InMicroseconds() << "us)";
@@ -2085,12 +1871,12 @@ void SourceBufferStream<RangeClass>::SetSelectedRangeIfNeeded(
return;
}
- DecodeTimestamp start_timestamp = timestamp;
+ base::TimeDelta start_timestamp = timestamp;
// If the next buffer timestamp is not known then use a timestamp just after
// the timestamp on the last buffer returned by GetNextBuffer().
- if (start_timestamp == kNoDecodeTimestamp()) {
- if (highest_output_buffer_timestamp_ == kNoDecodeTimestamp()) {
+ if (start_timestamp == kNoTimestamp) {
+ if (highest_output_buffer_timestamp_ == kNoTimestamp) {
DVLOG(2) << __func__ << " " << GetStreamTypeName()
<< " no previous output timestamp";
return;
@@ -2100,11 +1886,11 @@ void SourceBufferStream<RangeClass>::SetSelectedRangeIfNeeded(
highest_output_buffer_timestamp_ + base::TimeDelta::FromMicroseconds(1);
}
- DecodeTimestamp seek_timestamp =
+ base::TimeDelta seek_timestamp =
FindNewSelectedRangeSeekTimestamp(start_timestamp);
// If we don't have buffered data to seek to, then return.
- if (seek_timestamp == kNoDecodeTimestamp()) {
+ if (seek_timestamp == kNoTimestamp) {
DVLOG(2) << __func__ << " " << GetStreamTypeName()
<< " couldn't find new selected range seek timestamp";
return;
@@ -2115,63 +1901,59 @@ void SourceBufferStream<RangeClass>::SetSelectedRangeIfNeeded(
seek_timestamp);
}
-template <typename RangeClass>
-DecodeTimestamp
-SourceBufferStream<RangeClass>::FindNewSelectedRangeSeekTimestamp(
- const DecodeTimestamp start_timestamp) {
- DCHECK(start_timestamp != kNoDecodeTimestamp());
- DCHECK(start_timestamp >= DecodeTimestamp());
+base::TimeDelta SourceBufferStream::FindNewSelectedRangeSeekTimestamp(
+ const base::TimeDelta start_timestamp) {
+ DCHECK(start_timestamp != kNoTimestamp);
+ DCHECK(start_timestamp >= base::TimeDelta());
auto itr = ranges_.begin();
// When checking a range to see if it has or begins soon enough after
// |start_timestamp|, use the fudge room to determine "soon enough".
- DecodeTimestamp start_timestamp_plus_fudge =
+ base::TimeDelta start_timestamp_plus_fudge =
start_timestamp + ComputeFudgeRoom(GetMaxInterbufferDistance());
// Multiple ranges could be within the fudge room, because the fudge room is
// dynamic based on max inter-buffer distance seen so far. Optimistically
// check the earliest ones first.
for (; itr != ranges_.end(); ++itr) {
- DecodeTimestamp range_start = RangeGetStartTimestamp(itr->get());
+ base::TimeDelta range_start = (*itr)->GetStartTimestamp();
if (range_start >= start_timestamp_plus_fudge)
break;
- if (RangeGetEndTimestamp(itr->get()) < start_timestamp)
+ if ((*itr)->GetEndTimestamp() < start_timestamp)
continue;
- DecodeTimestamp search_timestamp = start_timestamp;
+ base::TimeDelta search_timestamp = start_timestamp;
if (start_timestamp < range_start &&
start_timestamp_plus_fudge >= range_start) {
search_timestamp = range_start;
}
- DecodeTimestamp keyframe_timestamp =
- RangeNextKeyframeTimestamp(itr->get(), search_timestamp);
- if (keyframe_timestamp != kNoDecodeTimestamp())
+ base::TimeDelta keyframe_timestamp =
+ (*itr)->NextKeyframeTimestamp(search_timestamp);
+ if (keyframe_timestamp != kNoTimestamp)
return keyframe_timestamp;
}
DVLOG(2) << __func__ << " " << GetStreamTypeName()
- << " no buffered data for dts=" << start_timestamp.InMicroseconds()
+ << " no buffered data for pts=" << start_timestamp.InMicroseconds()
<< "us";
- return kNoDecodeTimestamp();
+ return kNoTimestamp;
}
-template <typename RangeClass>
-DecodeTimestamp SourceBufferStream<RangeClass>::FindKeyframeAfterTimestamp(
- const DecodeTimestamp timestamp) {
- DCHECK(timestamp != kNoDecodeTimestamp());
+base::TimeDelta SourceBufferStream::FindKeyframeAfterTimestamp(
+ const base::TimeDelta timestamp) {
+ DCHECK(timestamp != kNoTimestamp);
auto itr = FindExistingRangeFor(timestamp);
if (itr == ranges_.end())
- return kNoDecodeTimestamp();
+ return kNoTimestamp;
// First check for a keyframe timestamp >= |timestamp|
// in the current range.
- return RangeNextKeyframeTimestamp(itr->get(), timestamp);
+ return (*itr)->NextKeyframeTimestamp(timestamp);
}
-template <typename RangeClass>
-std::string SourceBufferStream<RangeClass>::GetStreamTypeName() const {
+std::string SourceBufferStream::GetStreamTypeName() const {
switch (GetType()) {
case SourceBufferStreamType::kAudio:
return "AUDIO";
@@ -2184,8 +1966,7 @@ std::string SourceBufferStream<RangeClass>::GetStreamTypeName() const {
return "";
}
-template <typename RangeClass>
-SourceBufferStreamType SourceBufferStream<RangeClass>::GetType() const {
+SourceBufferStreamType SourceBufferStream::GetType() const {
if (!audio_configs_.empty())
return SourceBufferStreamType::kAudio;
if (!video_configs_.empty())
@@ -2194,9 +1975,7 @@ SourceBufferStreamType SourceBufferStream<RangeClass>::GetType() const {
return SourceBufferStreamType::kText;
}
-template <typename RangeClass>
-void SourceBufferStream<RangeClass>::DeleteAndRemoveRange(
- typename RangeList::iterator* itr) {
+void SourceBufferStream::DeleteAndRemoveRange(RangeList::iterator* itr) {
DVLOG(1) << __func__;
DCHECK(*itr != ranges_.end());
@@ -2214,8 +1993,7 @@ void SourceBufferStream<RangeClass>::DeleteAndRemoveRange(
*itr = ranges_.erase(*itr);
}
-template <typename RangeClass>
-bool SourceBufferStream<RangeClass>::SetPendingBuffer(
+bool SourceBufferStream::SetPendingBuffer(
scoped_refptr<StreamParserBuffer>* out_buffer) {
DCHECK(out_buffer->get());
DCHECK(!pending_buffer_.get());
@@ -2230,336 +2008,4 @@ bool SourceBufferStream<RangeClass>::SetPendingBuffer(
return true;
}
-template <>
-constexpr bool SourceBufferStream<SourceBufferRangeByDts>::BufferingByPts() {
- return false;
-}
-
-template <>
-constexpr bool SourceBufferStream<SourceBufferRangeByPts>::BufferingByPts() {
- return true;
-}
-
-template <>
-DecodeTimestamp SourceBufferStream<SourceBufferRangeByDts>::BufferGetTimestamp(
- scoped_refptr<StreamParserBuffer> buffer) {
- return buffer->GetDecodeTimestamp();
-}
-
-template <>
-DecodeTimestamp SourceBufferStream<SourceBufferRangeByPts>::BufferGetTimestamp(
- scoped_refptr<StreamParserBuffer> buffer) {
- return DecodeTimestamp::FromPresentationTime(buffer->timestamp());
-}
-
-template <>
-void SourceBufferStream<SourceBufferRangeByDts>::RangeAppendBuffersToEnd(
- SourceBufferRangeByDts* range,
- const BufferQueue& buffers,
- DecodeTimestamp group_start_time) {
- range->AppendBuffersToEnd(buffers, group_start_time);
-}
-
-template <>
-void SourceBufferStream<SourceBufferRangeByPts>::RangeAppendBuffersToEnd(
- SourceBufferRangeByPts* range,
- const BufferQueue& buffers,
- DecodeTimestamp group_start_time) {
- range->AppendBuffersToEnd(buffers, group_start_time.ToPresentationTime());
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::RangeGetBufferedEndTimestamp(
- SourceBufferRangeByDts* range) const {
- return range->GetBufferedEndTimestamp();
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::RangeGetBufferedEndTimestamp(
- SourceBufferRangeByPts* range) const {
- return DecodeTimestamp::FromPresentationTime(
- range->GetBufferedEndTimestamp());
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::RangeGetEndTimestamp(
- SourceBufferRangeByDts* range) const {
- return range->GetEndTimestamp();
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::RangeGetEndTimestamp(
- SourceBufferRangeByPts* range) const {
- return DecodeTimestamp::FromPresentationTime(range->GetEndTimestamp());
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::RangeGetStartTimestamp(
- SourceBufferRangeByDts* range) const {
- return range->GetStartTimestamp();
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::RangeGetStartTimestamp(
- SourceBufferRangeByPts* range) const {
- return DecodeTimestamp::FromPresentationTime(range->GetStartTimestamp());
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::RangeCanSeekTo(
- SourceBufferRangeByDts* range,
- DecodeTimestamp seek_time) const {
- return range->CanSeekTo(seek_time);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::RangeCanSeekTo(
- SourceBufferRangeByPts* range,
- DecodeTimestamp seek_time) const {
- return range->CanSeekTo(seek_time.ToPresentationTime());
-}
-
-template <>
-int SourceBufferStream<SourceBufferRangeByDts>::RangeGetConfigIdAtTime(
- SourceBufferRangeByDts* range,
- DecodeTimestamp config_time) {
- return range->GetConfigIdAtTime(config_time);
-}
-
-template <>
-int SourceBufferStream<SourceBufferRangeByPts>::RangeGetConfigIdAtTime(
- SourceBufferRangeByPts* range,
- DecodeTimestamp config_time) {
- return range->GetConfigIdAtTime(config_time.ToPresentationTime());
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::RangeSameConfigThruRange(
- SourceBufferRangeByDts* range,
- DecodeTimestamp start,
- DecodeTimestamp end) {
- return range->SameConfigThruRange(start, end);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::RangeSameConfigThruRange(
- SourceBufferRangeByPts* range,
- DecodeTimestamp start,
- DecodeTimestamp end) {
- return range->SameConfigThruRange(start.ToPresentationTime(),
- end.ToPresentationTime());
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::
- RangeFirstGOPEarlierThanMediaTime(SourceBufferRangeByDts* range,
- DecodeTimestamp media_time) const {
- return range->FirstGOPEarlierThanMediaTime(media_time);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::
- RangeFirstGOPEarlierThanMediaTime(SourceBufferRangeByPts* range,
- DecodeTimestamp media_time) const {
- return range->FirstGOPEarlierThanMediaTime(media_time.ToPresentationTime());
-}
-
-template <>
-size_t SourceBufferStream<SourceBufferRangeByDts>::RangeGetRemovalGOP(
- SourceBufferRangeByDts* range,
- DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
- size_t bytes_to_free,
- DecodeTimestamp* end_removal_timestamp) {
- return range->GetRemovalGOP(start_timestamp, end_timestamp, bytes_to_free,
- end_removal_timestamp);
-}
-
-template <>
-size_t SourceBufferStream<SourceBufferRangeByPts>::RangeGetRemovalGOP(
- SourceBufferRangeByPts* range,
- DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
- size_t bytes_to_free,
- DecodeTimestamp* end_removal_timestamp) {
- base::TimeDelta end_removal_pts = end_removal_timestamp->ToPresentationTime();
- size_t result = range->GetRemovalGOP(start_timestamp.ToPresentationTime(),
- end_timestamp.ToPresentationTime(),
- bytes_to_free, &end_removal_pts);
- *end_removal_timestamp =
- DecodeTimestamp::FromPresentationTime(end_removal_pts);
- return result;
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::RangeBelongsToRange(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) const {
- return range->BelongsToRange(timestamp);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::RangeBelongsToRange(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) const {
- return range->BelongsToRange(timestamp.ToPresentationTime());
-}
-
-template <>
-DecodeTimestamp SourceBufferStream<SourceBufferRangeByDts>::
- RangeFindHighestBufferedTimestampAtOrBefore(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) const {
- return range->FindHighestBufferedTimestampAtOrBefore(timestamp);
-}
-
-template <>
-DecodeTimestamp SourceBufferStream<SourceBufferRangeByPts>::
- RangeFindHighestBufferedTimestampAtOrBefore(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) const {
- return DecodeTimestamp::FromPresentationTime(
- range->FindHighestBufferedTimestampAtOrBefore(
- timestamp.ToPresentationTime()));
-}
-
-template <>
-void SourceBufferStream<SourceBufferRangeByDts>::RangeSeek(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) {
- range->Seek(timestamp);
-}
-
-template <>
-void SourceBufferStream<SourceBufferRangeByPts>::RangeSeek(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) {
- range->Seek(timestamp.ToPresentationTime());
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::RangeNextKeyframeTimestamp(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) {
- return range->NextKeyframeTimestamp(timestamp);
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::RangeNextKeyframeTimestamp(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) {
- return DecodeTimestamp::FromPresentationTime(
- range->NextKeyframeTimestamp(timestamp.ToPresentationTime()));
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::RangeGetBuffersInRange(
- SourceBufferRangeByDts* range,
- DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers) {
- return range->GetBuffersInRange(start, end, buffers);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::RangeGetBuffersInRange(
- SourceBufferRangeByPts* range,
- DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers) {
- return range->GetBuffersInRange(start.ToPresentationTime(),
- end.ToPresentationTime(), buffers);
-}
-
-template <>
-std::unique_ptr<SourceBufferRangeByDts>
-SourceBufferStream<SourceBufferRangeByDts>::RangeSplitRange(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) {
- return range->SplitRange(timestamp);
-}
-
-template <>
-std::unique_ptr<SourceBufferRangeByPts>
-SourceBufferStream<SourceBufferRangeByPts>::RangeSplitRange(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) {
- return range->SplitRange(timestamp.ToPresentationTime());
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByDts>::RangeTruncateAt(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive) {
- return range->TruncateAt(timestamp, deleted_buffers, is_exclusive);
-}
-
-template <>
-bool SourceBufferStream<SourceBufferRangeByPts>::RangeTruncateAt(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive) {
- return range->TruncateAt(timestamp.ToPresentationTime(), deleted_buffers,
- is_exclusive);
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByDts>::RangeKeyframeBeforeTimestamp(
- SourceBufferRangeByDts* range,
- DecodeTimestamp timestamp) {
- return range->KeyframeBeforeTimestamp(timestamp);
-}
-
-template <>
-DecodeTimestamp
-SourceBufferStream<SourceBufferRangeByPts>::RangeKeyframeBeforeTimestamp(
- SourceBufferRangeByPts* range,
- DecodeTimestamp timestamp) {
- return DecodeTimestamp::FromPresentationTime(
- range->KeyframeBeforeTimestamp(timestamp.ToPresentationTime()));
-}
-
-template <>
-std::unique_ptr<SourceBufferRangeByDts>
-SourceBufferStream<SourceBufferRangeByDts>::RangeNew(
- const BufferQueue& new_buffers,
- DecodeTimestamp range_start_time) {
- return std::make_unique<SourceBufferRangeByDts>(
- TypeToGapPolicy<SourceBufferRangeByDts>(GetType()), new_buffers,
- range_start_time,
- base::BindRepeating(
- &SourceBufferStream<
- SourceBufferRangeByDts>::GetMaxInterbufferDistance,
- base::Unretained(this)));
-}
-
-template <>
-std::unique_ptr<SourceBufferRangeByPts>
-SourceBufferStream<SourceBufferRangeByPts>::RangeNew(
- const BufferQueue& new_buffers,
- DecodeTimestamp range_start_time) {
- return std::make_unique<SourceBufferRangeByPts>(
- TypeToGapPolicy<SourceBufferRangeByPts>(GetType()), new_buffers,
- range_start_time.ToPresentationTime(),
- base::BindRepeating(
- &SourceBufferStream<
- SourceBufferRangeByPts>::GetMaxInterbufferDistance,
- base::Unretained(this)));
-}
-
-template class SourceBufferStream<SourceBufferRangeByDts>;
-template class SourceBufferStream<SourceBufferRangeByPts>;
-
} // namespace media
diff --git a/chromium/media/filters/source_buffer_stream.h b/chromium/media/filters/source_buffer_stream.h
index 590d88d85cb..62a75d8c0ac 100644
--- a/chromium/media/filters/source_buffer_stream.h
+++ b/chromium/media/filters/source_buffer_stream.h
@@ -5,7 +5,9 @@
// SourceBufferStream is a data structure that stores media Buffers in ranges.
// Buffers can be appended out of presentation order. Buffers are retrieved by
// seeking to the desired start point and calling GetNextBuffer(). Buffers are
-// returned in sequential presentation order.
+// returned in sequential order to feed decoder, generally near presentation
+// order though not necessarily the same as presentation order within GOPs of
+// out-of-order codecs.
#ifndef MEDIA_FILTERS_SOURCE_BUFFER_STREAM_H_
#define MEDIA_FILTERS_SOURCE_BUFFER_STREAM_H_
@@ -48,24 +50,16 @@ enum class SourceBufferStreamStatus {
enum class SourceBufferStreamType { kAudio, kVideo, kText };
// See file-level comment for complete description.
-// Template parameter determines which kind of buffering behavior is used. See
-// https://crbug.com/718641 and media::MseBufferByPts feature.
-template <typename RangeClass>
class MEDIA_EXPORT SourceBufferStream {
public:
- static_assert(
- std::is_base_of<SourceBufferRange, RangeClass>::value &&
- !std::is_abstract<RangeClass>::value,
- "RangeClass must be a concrete class having SourceBufferRange as base");
-
using BufferQueue = StreamParser::BufferQueue;
- using RangeList = std::list<std::unique_ptr<RangeClass>>;
+ using RangeList = std::list<std::unique_ptr<SourceBufferRange>>;
// Helper for PrepareRangesForNextAppend and BufferQueueToLogString that
// populates |start| and |end| with the presentation interval of |buffers|.
static void GetTimestampInterval(const BufferQueue& buffers,
- DecodeTimestamp* start,
- DecodeTimestamp* end);
+ base::TimeDelta* start,
+ base::TimeDelta* end);
SourceBufferStream(const AudioDecoderConfig& audio_config,
MediaLog* media_log);
@@ -76,10 +70,8 @@ class MEDIA_EXPORT SourceBufferStream {
~SourceBufferStream();
// Signals that the next buffers appended are part of a new coded frame group
- // starting at |coded_frame_group_start_{dts,pts}|, differentiated in impl
- // based on SBRByDts/Pts respectively.
- void OnStartOfCodedFrameGroup(DecodeTimestamp coded_frame_group_start_dts,
- base::TimeDelta coded_frame_group_start_pts);
+ // starting at |coded_frame_group_start_pts|.
+ void OnStartOfCodedFrameGroup(base::TimeDelta coded_frame_group_start_pts);
// Add the |buffers| to the SourceBufferStream. Buffers within the queue are
// expected to be in order, but multiple calls to Append() may add buffers out
@@ -100,17 +92,16 @@ class MEDIA_EXPORT SourceBufferStream {
// Frees up space if the SourceBufferStream is taking up too much memory.
// |media_time| is current playback position.
- bool GarbageCollectIfNeeded(DecodeTimestamp media_time,
- size_t newDataSize);
+ bool GarbageCollectIfNeeded(base::TimeDelta media_time, size_t newDataSize);
// Gets invoked when the system is experiencing memory pressure, i.e. there's
// not enough free memory. The |media_time| is the media playback position at
// the time of memory pressure notification (needed for accurate GC). The
- // |memory_pressure_listener| indicates memory pressure severity. The
+ // |memory_pressure_level| indicates memory pressure severity. The
// |force_instant_gc| is used to force the MSE garbage collection algorithm to
// be run right away, without waiting for the next append.
void OnMemoryPressure(
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level,
bool force_instant_gc);
@@ -188,35 +179,31 @@ class MEDIA_EXPORT SourceBufferStream {
private:
friend class SourceBufferStreamTest;
- // Helper that does the bulk of OnStartOfCodedFrameGroup() processing,
- // where caller differentiates ByPts or ByDts.
- void OnStartOfCodedFrameGroupInternal(
- DecodeTimestamp coded_frame_group_start_time);
-
// Attempts to delete approximately |total_bytes_to_free| amount of data
// |ranges_|, starting at the front of |ranges_| and moving linearly forward
// through the buffers. Deletes starting from the back if |reverse_direction|
// is true. |media_time| is current playback position.
// Returns the number of bytes freed.
size_t FreeBuffers(size_t total_bytes_to_free,
- DecodeTimestamp media_time,
+ base::TimeDelta media_time,
bool reverse_direction);
// Attempts to delete approximately |total_bytes_to_free| amount of data from
// |ranges_|, starting after the last appended media
- // (|highest_timestamp_in_append_sequence_|) but before the current playback
- // position |media_time|.
+ // (|highest_buffered_end_time_in_append_sequence_|) but before the current
+ // playback position |media_time|.
size_t FreeBuffersAfterLastAppended(size_t total_bytes_to_free,
- DecodeTimestamp media_time);
+ base::TimeDelta media_time);
- // Gets the removal range to secure |byte_to_free| from
+ // Gets the removal range to secure |total_bytes_to_free| from
// [|start_timestamp|, |end_timestamp|).
// Returns the size of buffers to secure if future
// Remove(|start_timestamp|, |removal_end_timestamp|, duration) is called.
// Will not update |removal_end_timestamp| if the returned size is 0.
- size_t GetRemovalRange(DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp, size_t byte_to_free,
- DecodeTimestamp* removal_end_timestamp);
+ size_t GetRemovalRange(base::TimeDelta start_timestamp,
+ base::TimeDelta end_timestamp,
+ size_t total_bytes_to_free,
+ base::TimeDelta* removal_end_timestamp);
// Prepares |range_for_next_append_| so |new_buffers| can be appended.
// This involves removing buffers between the end of the previous append
@@ -228,13 +215,16 @@ class MEDIA_EXPORT SourceBufferStream {
BufferQueue* deleted_buffers);
// Removes buffers, from the |track_buffer_|, that come after |timestamp|.
- void PruneTrackBuffer(const DecodeTimestamp timestamp);
+ // Due to out-of-order decode versus presentation times for some kinds of
+ // media, |timestamp| should be the time of a keyframe known by the caller.
+ // |timestamp| must not be kNoTimestamp.
+ void PruneTrackBuffer(const base::TimeDelta timestamp);
// Checks to see if |range_with_new_buffers_itr| can be merged with the range
// next to it, and merges them if so while preserving correctness of
// |range_for_next_append_| and |selected_range_|.
void MergeWithNextRangeIfNecessary(
- const typename RangeList::iterator& range_with_new_buffers_itr);
+ const RangeList::iterator& range_with_new_buffers_itr);
// Merges any adjacent ranges while preserving correctness of
// |range_for_next_append_| and |selected_range_|.
@@ -243,37 +233,35 @@ class MEDIA_EXPORT SourceBufferStream {
// Returns true if |next_gop_timestamp| follows
// |highest_timestamp_in_append_sequence_| within fudge room.
bool IsNextGopAdjacentToEndOfCurrentAppendSequence(
- DecodeTimestamp next_gop_timestamp) const;
+ base::TimeDelta next_gop_timestamp) const;
// Helper method that returns the timestamp for the next buffer that
// |selected_range_| will return from GetNextBuffer() call, or kNoTimestamp
// if in between seeking (i.e. |selected_range_| is null).
- DecodeTimestamp GetNextBufferTimestamp();
+ base::TimeDelta GetNextBufferTimestamp();
// Finds the range that should contain a coded frame group that begins with
- // |start_timestamp| and returns the iterator pointing to it. Returns
- // |ranges_.end()| if there's no such existing range.
- typename RangeList::iterator FindExistingRangeFor(
- DecodeTimestamp start_timestamp);
+ // |start_timestamp| (presentation time) and returns the iterator pointing to
+ // it. Returns |ranges_.end()| if there's no such existing range.
+ RangeList::iterator FindExistingRangeFor(base::TimeDelta start_timestamp);
// Inserts |new_range| into |ranges_| preserving sorted order. Returns an
// iterator in |ranges_| that points to |new_range|. |new_range| becomes owned
// by |ranges_|.
- typename RangeList::iterator AddToRanges(
- std::unique_ptr<RangeClass> new_range);
+ RangeList::iterator AddToRanges(std::unique_ptr<SourceBufferRange> new_range);
// Returns an iterator that points to the place in |ranges_| where
// |selected_range_| lives.
- typename RangeList::iterator GetSelectedRangeItr();
+ RangeList::iterator GetSelectedRangeItr();
// Sets the |selected_range_| to |range| and resets the next buffer position
// for the previous |selected_range_|.
- void SetSelectedRange(RangeClass* range);
+ void SetSelectedRange(SourceBufferRange* range);
// Seeks |range| to |seek_timestamp| and then calls SetSelectedRange() with
// |range|.
- void SeekAndSetSelectedRange(RangeClass* range,
- DecodeTimestamp seek_timestamp);
+ void SeekAndSetSelectedRange(SourceBufferRange* range,
+ base::TimeDelta seek_timestamp);
// Resets this stream back to an unseeked state.
void ResetSeekState();
@@ -286,8 +274,8 @@ class MEDIA_EXPORT SourceBufferStream {
bool ShouldSeekToStartOfBuffered(base::TimeDelta seek_timestamp) const;
// Returns true if the decode timestamps of |buffers| are monotonically
- // increasing since the previous append to the coded frame group, false
- // otherwise.
+ // increasing (within each GOP) since the previous append to the coded frame
+ // group, false otherwise.
bool IsDtsMonotonicallyIncreasing(const BufferQueue& buffers);
// Returns true if |selected_range_| is the only range in |ranges_| that
@@ -313,19 +301,19 @@ class MEDIA_EXPORT SourceBufferStream {
// |timestamp| if necessary and possible. This method only attempts to
// set |selected_range_| if |seleted_range_| is null and |track_buffer_|
// is empty.
- void SetSelectedRangeIfNeeded(const DecodeTimestamp timestamp);
+ void SetSelectedRangeIfNeeded(const base::TimeDelta timestamp);
// Find a keyframe timestamp that is >= |start_timestamp| and can be used to
// find a new selected range.
// Returns kNoTimestamp if an appropriate keyframe timestamp could not be
// found.
- DecodeTimestamp FindNewSelectedRangeSeekTimestamp(
- const DecodeTimestamp start_timestamp);
+ base::TimeDelta FindNewSelectedRangeSeekTimestamp(
+ const base::TimeDelta start_timestamp);
// Searches |ranges_| for the first keyframe timestamp that is >= |timestamp|.
// If |ranges_| doesn't contain a GOP that covers |timestamp| or doesn't
// have a keyframe after |timestamp| then kNoTimestamp is returned.
- DecodeTimestamp FindKeyframeAfterTimestamp(const DecodeTimestamp timestamp);
+ base::TimeDelta FindKeyframeAfterTimestamp(const base::TimeDelta timestamp);
// Returns "VIDEO" for a video SourceBufferStream, "AUDIO" for an audio
// stream, and "TEXT" for a text stream.
@@ -346,16 +334,16 @@ class MEDIA_EXPORT SourceBufferStream {
// If |*itr| points to |selected_range_|, then |selected_range_| is set to
// NULL. After the range is removed, |*itr| is to the range after the one that
// was removed or to |ranges_.end()| if the last range was removed.
- void DeleteAndRemoveRange(typename RangeList::iterator* itr);
+ void DeleteAndRemoveRange(RangeList::iterator* itr);
- // Helper function used when updating |range_for_next_append_|.
- // Returns a guess of what the next append timestamp will be based on
+ // Helper function used when updating |range_for_next_append_|. Returns a
+ // guess of what the next append timestamp will be based on
// |last_appended_buffer_timestamp_|, |new_coded_frame_group_| and
- // |coded_frame_group_start_time_|. Returns kNoDecodeTimestamp() if unable to
- // guess, which can occur prior to first OnStartOfCodedFrameGroup(), or
- // when the most recent GOP appended to since the last
- // OnStartOfCodedFrameGroup() is removed.
- DecodeTimestamp PotentialNextAppendTimestamp() const;
+ // |coded_frame_group_start_pts_|. Returns kNoTimestamp if unable to guess,
+ // which can occur prior to first OnStartOfCodedFrameGroup(), or when the most
+ // recent GOP appended to since the last OnStartOfCodedFrameGroup() is
+ // removed.
+ base::TimeDelta PotentialNextAppendTimestamp() const;
// Helper function used by Remove() and PrepareRangesForNextAppend() to
// remove buffers and ranges between |start| and |end|.
@@ -365,8 +353,8 @@ class MEDIA_EXPORT SourceBufferStream {
// |*deleted_buffers| - Filled with buffers for the current playback position
// if the removal range included the current playback position. These buffers
// can be used as candidates for placing in the |track_buffer_|.
- void RemoveInternal(DecodeTimestamp start,
- DecodeTimestamp end,
+ void RemoveInternal(base::TimeDelta start,
+ base::TimeDelta end,
bool exclude_start,
BufferQueue* deleted_buffers);
@@ -374,8 +362,8 @@ class MEDIA_EXPORT SourceBufferStream {
// disrupt the last appended GOP. If disruption is expected, reset state
// tracking the last append. This will trigger frame filtering in Append()
// until a new key frame is provided.
- void UpdateLastAppendStateForRemove(DecodeTimestamp remove_start,
- DecodeTimestamp remove_end,
+ void UpdateLastAppendStateForRemove(base::TimeDelta remove_start,
+ base::TimeDelta remove_end,
bool exclude_start);
SourceBufferStreamType GetType() const;
@@ -403,54 +391,6 @@ class MEDIA_EXPORT SourceBufferStream {
// returns true. Otherwise returns false.
bool SetPendingBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
- // Helpers that adapt StreamParserBuffer, SBRByPts and SBRByDts to a common
- // internal interface until SBRByDts can be dropped. See
- // https://crbug.com/718641.
- // TODO(wolenetz): Consider refactoring to reference a "buffering timestamp"
- // type (DTS for ByDts, PTS for ByPts) defined in RangeClass to reduce the
- // need for some of these helpers. See https://crbug.com/718641.
- static constexpr bool BufferingByPts();
- DecodeTimestamp BufferGetTimestamp(scoped_refptr<StreamParserBuffer> buffer);
- void RangeAppendBuffersToEnd(RangeClass* range,
- const BufferQueue& buffers,
- DecodeTimestamp group_start_time);
- DecodeTimestamp RangeGetBufferedEndTimestamp(RangeClass* range) const;
- DecodeTimestamp RangeGetEndTimestamp(RangeClass* range) const;
- DecodeTimestamp RangeGetStartTimestamp(RangeClass* range) const;
- bool RangeCanSeekTo(RangeClass* range, DecodeTimestamp seek_time) const;
- int RangeGetConfigIdAtTime(RangeClass* range, DecodeTimestamp config_time);
- bool RangeSameConfigThruRange(RangeClass* range,
- DecodeTimestamp start,
- DecodeTimestamp end);
- bool RangeFirstGOPEarlierThanMediaTime(RangeClass* range,
- DecodeTimestamp media_time) const;
- size_t RangeGetRemovalGOP(RangeClass* range,
- DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp,
- size_t bytes_to_free,
- DecodeTimestamp* end_removal_timestamp);
- bool RangeBelongsToRange(RangeClass* range, DecodeTimestamp timestamp) const;
- DecodeTimestamp RangeFindHighestBufferedTimestampAtOrBefore(
- RangeClass* range,
- DecodeTimestamp timestamp) const;
- void RangeSeek(RangeClass* range, DecodeTimestamp timestamp);
- DecodeTimestamp RangeNextKeyframeTimestamp(RangeClass* range,
- DecodeTimestamp timestamp);
- bool RangeGetBuffersInRange(RangeClass* range,
- DecodeTimestamp start,
- DecodeTimestamp end,
- BufferQueue* buffers);
- std::unique_ptr<RangeClass> RangeSplitRange(RangeClass* range,
- DecodeTimestamp timestamp);
- bool RangeTruncateAt(RangeClass* range,
- DecodeTimestamp timestamp,
- BufferQueue* deleted_buffers,
- bool is_exclusive);
- DecodeTimestamp RangeKeyframeBeforeTimestamp(RangeClass* range,
- DecodeTimestamp timestamp);
- std::unique_ptr<RangeClass> RangeNew(const BufferQueue& new_buffers,
- DecodeTimestamp range_start_time);
-
// Used to report log messages that can help the web developer figure out what
// is wrong with the content.
MediaLog* media_log_;
@@ -490,7 +430,7 @@ class MEDIA_EXPORT SourceBufferStream {
// Pointer to the seeked-to Range. This is the range from which
// GetNextBuffer() calls are fulfilled after the |track_buffer_| has been
// emptied.
- RangeClass* selected_range_ = nullptr;
+ SourceBufferRange* selected_range_ = nullptr;
// Queue of the next buffers to be returned from calls to GetNextBuffer(). If
// |track_buffer_| is empty, return buffers from |selected_range_|.
@@ -500,50 +440,47 @@ class MEDIA_EXPORT SourceBufferStream {
// emitted buffer emptied |track_buffer_|.
bool just_exhausted_track_buffer_ = false;
- // The start time of the current coded frame group being appended.
- // When ByDts, this is DTS; when ByPts, this is PTS converted to DTS type.
- // TODO(wolenetz): Make this pure PTS when ByPts ships always-on. See
- // https://crbug.com/718641.
- DecodeTimestamp coded_frame_group_start_time_;
+ // The start presentation time of the current coded frame group being
+ // appended.
+ base::TimeDelta coded_frame_group_start_pts_;
// Points to the range containing the current coded frame group being
// appended.
- typename RangeList::iterator range_for_next_append_;
+ RangeList::iterator range_for_next_append_;
// True when the next call to Append() begins a new coded frame group.
// TODO(wolenetz): Simplify by passing this flag into Append().
bool new_coded_frame_group_ = false;
// The timestamp of the last buffer appended to the coded frame group, set to
- // kNoDecodeTimestamp() if the beginning of the group.
- DecodeTimestamp last_appended_buffer_timestamp_ = kNoDecodeTimestamp();
+ // kNoTimestamp if the beginning of the group.
+ base::TimeDelta last_appended_buffer_timestamp_ = kNoTimestamp;
base::TimeDelta last_appended_buffer_duration_ = kNoTimestamp;
bool last_appended_buffer_is_keyframe_ = false;
- // When buffering ByPts, yet needing still to verify coded frame group is
- // monotically increasing in DTS sequence and to update max interbuffer
- // distance also by DTS deltas within a coded frame group, the following is
- // needed.
+ // When buffering GOPs by keyframe PTS and intra-gop by nonkeyframe DTS, to
+ // verify monotonically increasing intra-GOP DTS sequence and to update max
+ // interbuffer distance also by DTS deltas within a coded frame group, the
+ // following is needed.
DecodeTimestamp last_appended_buffer_decode_timestamp_ = kNoDecodeTimestamp();
- // The following is the highest timestamp appended so far in this coded frame
- // group. In ByPts buffering, this is a PTS in DTS type, and isn't necessarily
- // the most recently appended frame. This is used as the lower bound of
- // removing previously buffered media when processing new appends.
- DecodeTimestamp highest_timestamp_in_append_sequence_ = kNoDecodeTimestamp();
+ // The following is the highest presentation timestamp appended so far in this
+ // coded frame group. Due to potentially out-of-order decode versus
+ // presentation time sequence, this isn't necessarily the most recently
+ // appended frame. This is used as the lower bound of removing previously
+ // buffered media when processing new appends.
+ base::TimeDelta highest_timestamp_in_append_sequence_ = kNoTimestamp;
// The following is used in determining if FreeBuffersAfterLastAppended() is
- // allowed during garbage collection. In ByPts buffering, this is a PTS in DTS
- // type, and isn't necessarily the end time of the most recently appended
- // frame.
- DecodeTimestamp highest_buffered_end_time_in_append_sequence_ =
- kNoDecodeTimestamp();
-
- // The highest timestamp (DTS if ByDts, PTS as DTS type if ByPts) for buffers
- // returned by recent GetNextBuffer() calls. Set to kNoDecodeTimestamp() if
- // GetNextBuffer() hasn't been called yet or a seek has happened since the
- // last GetNextBuffer() call.
- DecodeTimestamp highest_output_buffer_timestamp_;
+ // allowed during garbage collection. Due to out-of-order decode versus
+ // presentation sequence, this isn't necessarily the end time of the most
+ // recently appended frame.
+ base::TimeDelta highest_buffered_end_time_in_append_sequence_ = kNoTimestamp;
+
+ // The highest presentation timestamp for buffers returned by recent
+ // GetNextBuffer() calls. Set to kNoTimestamp if GetNextBuffer() hasn't been
+ // called yet or a seek has happened since the last GetNextBuffer() call.
+ base::TimeDelta highest_output_buffer_timestamp_;
// Stores the largest distance between two adjacent buffers in this stream.
base::TimeDelta max_interbuffer_distance_;
@@ -571,9 +508,8 @@ class MEDIA_EXPORT SourceBufferStream {
int num_splice_logs_ = 0;
int num_track_buffer_gap_warning_logs_ = 0;
int num_garbage_collect_algorithm_logs_ = 0;
- int num_strange_same_timestamps_logs_ = 0;
- DISALLOW_COPY_AND_ASSIGN(SourceBufferStream<RangeClass>);
+ DISALLOW_COPY_AND_ASSIGN(SourceBufferStream);
};
} // namespace media
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 2e79af78d1f..5e78090671e 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -28,23 +28,16 @@
#include "media/base/timestamp_constants.h"
#include "media/base/webvtt_util.h"
#include "media/filters/source_buffer_range.h"
-#include "media/filters/source_buffer_range_by_dts.h"
-#include "media/filters/source_buffer_range_by_pts.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::StrictMock;
-using ::testing::Values;
namespace {
enum class TimeGranularity { kMicrosecond, kMillisecond };
-// See https://crbug.com/718641 and kMseBufferByPts. This controls which kind of
-// buffering implementation is constructed and tested.
-enum class BufferingApi { kLegacyByDts, kNewByPts };
-
} // namespace
namespace media {
@@ -58,66 +51,34 @@ static const uint8_t kDataB = 0x33;
static const int kDataSize = 1;
// Matchers for verifying common media log entry strings.
-MATCHER(ContainsSameTimestampAt30MillisecondsLog, "") {
- return CONTAINS_STRING(arg,
- "Detected an append sequence with keyframe following "
- "a non-keyframe, both with the same decode timestamp "
- "of 0.03");
-}
-
MATCHER_P(ContainsTrackBufferExhaustionSkipLog, skip_milliseconds, "") {
return CONTAINS_STRING(arg,
"Media append that overlapped current playback "
- "position caused time gap in playing VIDEO stream "
+ "position may cause time gap in playing VIDEO stream "
"because the next keyframe is " +
base::NumberToString(skip_milliseconds) +
"ms beyond last overlapped frame. Media may "
"appear temporarily frozen.");
}
-// Based on runtime SourceBufferStreamTest parameter, picks the associated
-// stream member pointer and performs |operation| on it through the pointer.
-#define STREAM_OP(operation) \
- (buffering_api_ == BufferingApi::kLegacyByDts ? stream_dts_->operation \
- : stream_pts_->operation)
-
-#define STREAM_RESET(config) \
- { \
- if (buffering_api_ == BufferingApi::kLegacyByDts) { \
- stream_dts_.reset(new SourceBufferStream<SourceBufferRangeByDts>( \
- config, &media_log_)); \
- } else { \
- stream_pts_.reset(new SourceBufferStream<SourceBufferRangeByPts>( \
- config, &media_log_)); \
- } \
- }
-
#define EXPECT_STATUS_FOR_STREAM_OP(status_suffix, operation) \
- { \
- if (buffering_api_ == BufferingApi::kLegacyByDts) { \
- EXPECT_EQ(SourceBufferStreamStatus::status_suffix, \
- stream_dts_->operation); \
- } else { \
- EXPECT_EQ(SourceBufferStreamStatus::status_suffix, \
- stream_pts_->operation); \
- } \
- }
+ { EXPECT_EQ(SourceBufferStreamStatus::status_suffix, stream_->operation); }
-// Test parameter determines which kind of SourceBufferRange we use in the test
-// instance's SourceBufferStream<>. An attempt to used TYPED_TEST instead of
-// TEST_P led to most lines in test cases needing prefix "this->", so we instead
-// use TestWithParam, conditional fixture logic and helper macros.
-class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
+class SourceBufferStreamTest : public testing::Test {
protected:
SourceBufferStreamTest() {
- buffering_api_ = GetParam();
video_config_ = TestVideoConfig::Normal();
SetStreamInfo(kDefaultFramesPerSecond, kDefaultKeyframesPerSecond);
- STREAM_RESET(video_config_);
+ ResetStream<>(video_config_);
+ }
+
+ template <typename ConfigT>
+ void ResetStream(const ConfigT& config) {
+ stream_.reset(new SourceBufferStream(config, &media_log_));
}
void SetMemoryLimit(size_t buffers_of_data) {
- STREAM_OP(set_memory_limit(buffers_of_data * kDataSize));
+ stream_->set_memory_limit(buffers_of_data * kDataSize);
}
void SetStreamInfo(int frames_per_second, int keyframes_per_second) {
@@ -129,7 +90,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
void SetTextStream() {
video_config_ = TestVideoConfig::Invalid();
TextTrackConfig config(kTextSubtitles, "", "", "");
- STREAM_RESET(config);
+ ResetStream<>(config);
SetStreamInfo(2, 2);
}
@@ -138,7 +99,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
audio_config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO, 1000, EmptyExtraData(),
Unencrypted(), base::TimeDelta(), 0);
- STREAM_RESET(audio_config_);
+ ResetStream<>(audio_config_);
// Equivalent to 2ms per frame.
SetStreamInfo(500, 500);
@@ -212,21 +173,19 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
AppendBuffers(buffers_to_append, false, kNoTimestamp, false, false);
}
- void Seek(int position) { STREAM_OP(Seek(position * frame_duration_)); }
+ void Seek(int position) { stream_->Seek(position * frame_duration_); }
void SeekToTimestampMs(int64_t timestamp_ms) {
- STREAM_OP(Seek(base::TimeDelta::FromMilliseconds(timestamp_ms)));
+ stream_->Seek(base::TimeDelta::FromMilliseconds(timestamp_ms));
}
bool GarbageCollect(base::TimeDelta media_time, int new_data_size) {
- return STREAM_OP(GarbageCollectIfNeeded(
- DecodeTimestamp::FromPresentationTime(media_time), new_data_size));
+ return stream_->GarbageCollectIfNeeded(media_time, new_data_size);
}
- bool GarbageCollectWithPlaybackAtBuffer(int position, int newDataBuffers) {
- return STREAM_OP(GarbageCollectIfNeeded(
- DecodeTimestamp::FromPresentationTime(position * frame_duration_),
- newDataBuffers * kDataSize));
+ bool GarbageCollectWithPlaybackAtBuffer(int position, int new_data_buffers) {
+ return GarbageCollect(position * frame_duration_,
+ new_data_buffers * kDataSize);
}
void RemoveInMs(int start, int end, int duration) {
@@ -237,29 +196,27 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
void Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration) {
- STREAM_OP(Remove(start, end, duration));
+ stream_->Remove(start, end, duration);
}
void SignalStartOfCodedFrameGroup(base::TimeDelta start_timestamp) {
- STREAM_OP(OnStartOfCodedFrameGroup(
- DecodeTimestamp::FromPresentationTime(start_timestamp),
- start_timestamp));
+ stream_->OnStartOfCodedFrameGroup(start_timestamp);
}
int GetRemovalRangeInMs(int start, int end, int bytes_to_free,
int* removal_end) {
- DecodeTimestamp removal_end_timestamp =
- DecodeTimestamp::FromMilliseconds(*removal_end);
+ base::TimeDelta removal_end_timestamp =
+ base::TimeDelta::FromMilliseconds(*removal_end);
int bytes_removed =
- STREAM_OP(GetRemovalRange(DecodeTimestamp::FromMilliseconds(start),
- DecodeTimestamp::FromMilliseconds(end),
- bytes_to_free, &removal_end_timestamp));
+ stream_->GetRemovalRange(base::TimeDelta::FromMilliseconds(start),
+ base::TimeDelta::FromMilliseconds(end),
+ bytes_to_free, &removal_end_timestamp);
*removal_end = removal_end_timestamp.InMilliseconds();
return bytes_removed;
}
void CheckExpectedRanges(const std::string& expected) {
- Ranges<base::TimeDelta> r = STREAM_OP(GetBufferedTime());
+ Ranges<base::TimeDelta> r = stream_->GetBufferedTime();
std::stringstream ss;
ss << "{ ";
@@ -275,7 +232,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
void CheckExpectedRangesByTimestamp(
const std::string& expected,
TimeGranularity granularity = TimeGranularity::kMillisecond) {
- Ranges<base::TimeDelta> r = STREAM_OP(GetBufferedTime());
+ Ranges<base::TimeDelta> r = stream_->GetBufferedTime();
std::stringstream ss;
ss << "{ ";
@@ -295,25 +252,11 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
void CheckExpectedRangeEndTimes(const std::string& expected) {
std::stringstream ss;
ss << "{ ";
- switch (buffering_api_) {
- case BufferingApi::kLegacyByDts:
- for (const auto& r : stream_dts_->ranges_) {
- base::TimeDelta highest_pts;
- base::TimeDelta end_time;
- r->GetRangeEndTimesForTesting(&highest_pts, &end_time);
- ss << "<" << highest_pts.InMilliseconds() << ","
- << end_time.InMilliseconds() << "> ";
- }
- break;
- case BufferingApi::kNewByPts:
- for (const auto& r : stream_pts_->ranges_) {
- base::TimeDelta highest_pts;
- base::TimeDelta end_time;
- r->GetRangeEndTimesForTesting(&highest_pts, &end_time);
- ss << "<" << highest_pts.InMilliseconds() << ","
- << end_time.InMilliseconds() << "> ";
- }
- break;
+ for (const auto& r : stream_->ranges_) {
+ base::TimeDelta highest_pts = r->GetEndTimestamp();
+ base::TimeDelta end_time = r->GetBufferedEndTimestamp();
+ ss << "<" << highest_pts.InMilliseconds() << ","
+ << end_time.InMilliseconds() << "> ";
}
ss << "}";
EXPECT_EQ(expected, ss.str());
@@ -321,23 +264,10 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
void CheckIsNextInPTSSequenceWithFirstRange(int64_t pts_in_ms,
bool expectation) {
- ASSERT_GE(STREAM_OP(ranges_.size()), 1u);
- switch (buffering_api_) {
- case BufferingApi::kLegacyByDts: {
- const auto& range_ptr = *(stream_dts_->ranges_.begin());
- EXPECT_EQ(expectation,
- range_ptr->IsNextInPresentationSequence(
- base::TimeDelta::FromMilliseconds(pts_in_ms)));
- break;
- }
- case BufferingApi::kNewByPts: {
- const auto& range_ptr = *(stream_pts_->ranges_.begin());
- EXPECT_EQ(expectation,
- range_ptr->IsNextInPresentationSequence(
- base::TimeDelta::FromMilliseconds(pts_in_ms)));
- break;
- }
- }
+ ASSERT_GE(stream_->ranges_.size(), 1u);
+ const auto& range_ptr = *(stream_->ranges_.begin());
+ EXPECT_EQ(expectation, range_ptr->IsNextInPresentationSequence(
+ base::TimeDelta::FromMilliseconds(pts_in_ms)));
}
void CheckExpectedBuffers(
@@ -374,7 +304,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
int current_position = starting_position;
for (; current_position <= ending_position; current_position++) {
scoped_refptr<StreamParserBuffer> buffer;
- SourceBufferStreamStatus status = STREAM_OP(GetNextBuffer(&buffer));
+ SourceBufferStreamStatus status = stream_->GetNextBuffer(&buffer);
EXPECT_NE(status, SourceBufferStreamStatus::kConfigChange);
if (status != SourceBufferStreamStatus::kSuccess)
break;
@@ -404,10 +334,10 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
std::vector<std::string> timestamps = base::SplitString(
expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::stringstream ss;
- const SourceBufferStreamType type = STREAM_OP(GetType());
+ const SourceBufferStreamType type = stream_->GetType();
for (size_t i = 0; i < timestamps.size(); i++) {
scoped_refptr<StreamParserBuffer> buffer;
- SourceBufferStreamStatus status = STREAM_OP(GetNextBuffer(&buffer));
+ SourceBufferStreamStatus status = stream_->GetNextBuffer(&buffer);
if (i > 0)
ss << " ";
@@ -415,13 +345,13 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
if (status == SourceBufferStreamStatus::kConfigChange) {
switch (type) {
case SourceBufferStreamType::kVideo:
- STREAM_OP(GetCurrentVideoDecoderConfig());
+ stream_->GetCurrentVideoDecoderConfig();
break;
case SourceBufferStreamType::kAudio:
- STREAM_OP(GetCurrentAudioDecoderConfig());
+ stream_->GetCurrentAudioDecoderConfig();
break;
case SourceBufferStreamType::kText:
- STREAM_OP(GetCurrentTextTrackConfig());
+ stream_->GetCurrentTextTrackConfig();
break;
}
@@ -497,16 +427,14 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
}
void CheckVideoConfig(const VideoDecoderConfig& config) {
- const VideoDecoderConfig& actual =
- STREAM_OP(GetCurrentVideoDecoderConfig());
+ const VideoDecoderConfig& actual = stream_->GetCurrentVideoDecoderConfig();
EXPECT_TRUE(actual.Matches(config))
<< "Expected: " << config.AsHumanReadableString()
<< "\nActual: " << actual.AsHumanReadableString();
}
void CheckAudioConfig(const AudioDecoderConfig& config) {
- const AudioDecoderConfig& actual =
- STREAM_OP(GetCurrentAudioDecoderConfig());
+ const AudioDecoderConfig& actual = stream_->GetCurrentAudioDecoderConfig();
EXPECT_TRUE(actual.Matches(config))
<< "Expected: " << config.AsHumanReadableString()
<< "\nActual: " << actual.AsHumanReadableString();
@@ -515,16 +443,14 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
base::TimeDelta frame_duration() const { return frame_duration_; }
StrictMock<MockMediaLog> media_log_;
- std::unique_ptr<SourceBufferStream<SourceBufferRangeByDts>> stream_dts_;
- std::unique_ptr<SourceBufferStream<SourceBufferRangeByPts>> stream_pts_;
+ std::unique_ptr<SourceBufferStream> stream_;
VideoDecoderConfig video_config_;
AudioDecoderConfig audio_config_;
- BufferingApi buffering_api_;
private:
DemuxerStream::Type GetStreamType() {
- switch (STREAM_OP(GetType())) {
+ switch (stream_->GetType()) {
case SourceBufferStreamType::kAudio:
return DemuxerStream::AUDIO;
case SourceBufferStreamType::kVideo:
@@ -550,10 +476,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
const uint8_t* data,
int size) {
if (begin_coded_frame_group) {
- STREAM_OP(OnStartOfCodedFrameGroup(
- DecodeTimestamp::FromPresentationTime(starting_position *
- frame_duration_),
- starting_position * frame_duration_));
+ stream_->OnStartOfCodedFrameGroup(starting_position * frame_duration_);
}
int keyframe_interval = frames_per_second_ / keyframes_per_second_;
@@ -591,7 +514,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
queue.push_back(buffer);
}
if (!queue.empty())
- EXPECT_EQ(expect_success, STREAM_OP(Append(queue)));
+ EXPECT_EQ(expect_success, stream_->Append(queue));
}
void UpdateLastBufferDuration(DecodeTimestamp current_dts,
@@ -763,25 +686,20 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
BufferQueue buffers = StringToBufferQueue(buffers_to_append);
if (start_new_coded_frame_group) {
- DecodeTimestamp start_timestamp = DecodeTimestamp::FromPresentationTime(
- coded_frame_group_start_timestamp);
+ base::TimeDelta start_timestamp = coded_frame_group_start_timestamp;
- DecodeTimestamp buffers_start_timestamp =
- buffering_api_ == BufferingApi::kLegacyByDts
- ? buffers[0]->GetDecodeTimestamp()
- : DecodeTimestamp::FromPresentationTime(buffers[0]->timestamp());
+ base::TimeDelta buffers_start_timestamp = buffers[0]->timestamp();
- if (start_timestamp == kNoDecodeTimestamp())
+ if (start_timestamp == kNoTimestamp)
start_timestamp = buffers_start_timestamp;
else
ASSERT_TRUE(start_timestamp <= buffers_start_timestamp);
- STREAM_OP(OnStartOfCodedFrameGroup(start_timestamp,
- start_timestamp.ToPresentationTime()));
+ stream_->OnStartOfCodedFrameGroup(start_timestamp);
}
if (!one_by_one) {
- EXPECT_EQ(expect_success, STREAM_OP(Append(buffers)));
+ EXPECT_EQ(expect_success, stream_->Append(buffers));
return;
}
@@ -789,7 +707,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
for (size_t i = 0; i < buffers.size(); i++) {
BufferQueue wrapper;
wrapper.push_back(buffers[i]);
- EXPECT_TRUE(STREAM_OP(Append(wrapper)));
+ EXPECT_TRUE(stream_->Append(wrapper));
}
}
@@ -799,7 +717,7 @@ class SourceBufferStreamTest : public testing::TestWithParam<BufferingApi> {
DISALLOW_COPY_AND_ASSIGN(SourceBufferStreamTest);
};
-TEST_P(SourceBufferStreamTest, Append_SingleRange) {
+TEST_F(SourceBufferStreamTest, Append_SingleRange) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15);
@@ -810,7 +728,7 @@ TEST_P(SourceBufferStreamTest, Append_SingleRange) {
CheckExpectedBuffers(0, 14);
}
-TEST_P(SourceBufferStreamTest, Append_SingleRange_OneBufferAtATime) {
+TEST_F(SourceBufferStreamTest, Append_SingleRange_OneBufferAtATime) {
// Append 15 buffers starting at position 0, one buffer at a time.
NewCodedFrameGroupAppend(0, 1);
for (int i = 1; i < 15; i++)
@@ -823,7 +741,7 @@ TEST_P(SourceBufferStreamTest, Append_SingleRange_OneBufferAtATime) {
CheckExpectedBuffers(0, 14);
}
-TEST_P(SourceBufferStreamTest, Append_DisjointRanges) {
+TEST_F(SourceBufferStreamTest, Append_DisjointRanges) {
// Append 5 buffers at positions 0 through 4.
NewCodedFrameGroupAppend(0, 5);
@@ -839,7 +757,7 @@ TEST_P(SourceBufferStreamTest, Append_DisjointRanges) {
CheckExpectedBuffers(15, 24);
}
-TEST_P(SourceBufferStreamTest, Append_AdjacentRanges) {
+TEST_F(SourceBufferStreamTest, Append_AdjacentRanges) {
// Append 10 buffers at positions 0 through 9.
NewCodedFrameGroupAppend(0, 10);
@@ -856,7 +774,7 @@ TEST_P(SourceBufferStreamTest, Append_AdjacentRanges) {
CheckExpectedBuffers(0, 25);
}
-TEST_P(SourceBufferStreamTest, Complete_Overlap) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap) {
// Append 5 buffers at positions 5 through 9.
NewCodedFrameGroupAppend(5, 5);
@@ -870,7 +788,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap) {
CheckExpectedBuffers(0, 14);
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
Complete_Overlap_AfterGroupTimestampAndBeforeFirstBufferTimestamp) {
// Append a coded frame group with a start timestamp of 0, but the first
// buffer starts at 30ms. This can happen in muxed content where the
@@ -892,7 +810,7 @@ TEST_P(SourceBufferStreamTest,
CheckExpectedBuffers("20K 50K 80K 110K 120K");
}
-TEST_P(SourceBufferStreamTest, Complete_Overlap_EdgeCase) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_EdgeCase) {
// Make each frame a keyframe so that it's okay to overlap frames at any point
// (instead of needing to respect keyframe boundaries).
SetStreamInfo(30, 30);
@@ -910,7 +828,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_EdgeCase) {
CheckExpectedBuffers(5, 12);
}
-TEST_P(SourceBufferStreamTest, Start_Overlap) {
+TEST_F(SourceBufferStreamTest, Start_Overlap) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 5);
@@ -924,7 +842,7 @@ TEST_P(SourceBufferStreamTest, Start_Overlap) {
CheckExpectedBuffers(5, 15);
}
-TEST_P(SourceBufferStreamTest, End_Overlap) {
+TEST_F(SourceBufferStreamTest, End_Overlap) {
// Append 10 buffers at positions 10 through 19.
NewCodedFrameGroupAppend(10, 10);
@@ -944,7 +862,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap) {
// old  : A a a a a A a a a a
// new : B b b b b B b b
// after: B b b b b B b b A a a a a
-TEST_P(SourceBufferStreamTest, End_Overlap_Several) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Several) {
// Append 10 buffers at positions 10 through 19 (DTS and PTS).
NewCodedFrameGroupAppend(10, 10, &kDataA);
@@ -955,11 +873,11 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Several) {
// Check expected ranges: stream should not have kept buffers at DTS 13,14;
// PTS 12,13 because the keyframe on which they depended (10, PTS=DTS) was
// overwritten. Note that partial second GOP of B includes PTS [10,14), DTS
- // [10,12). In both ByDts and ByPts, these are continuous with the overlapped
- // original range's next GOP at (15, PTS=DTS).
+ // [10,12). These are continuous with the overlapped original range's next GOP
+ // at (15, PTS=DTS).
// Unlike the rest of the position based test API used in this case, these
// range expectation strings are the actual timestamps (divided by
- // frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // frame_duration_).
CheckExpectedRanges("{ [5,19) }");
// Check buffers in range.
@@ -976,7 +894,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Several) {
// new : 0K
// after: 0K 120K 150
// track:
-TEST_P(SourceBufferStreamTest, End_Overlap_SingleBuffer) {
+TEST_F(SourceBufferStreamTest, End_Overlap_SingleBuffer) {
// Seek to start of stream.
SeekToTimestampMs(0);
@@ -996,7 +914,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_SingleBuffer) {
// old  : A a A a A a
// new : B b b b b B b b b b B b b b b B b b b b B b b b b B b b b b
// after == new
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Several) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Several) {
// Append 2 buffers at positions 5 through 6 (DTS); 5 through 9 (PTS) partial
// GOP.
NewCodedFrameGroupAppend(5, 2, &kDataA);
@@ -1011,11 +929,8 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Several) {
// Check expected ranges. Unlike the rest of the position based test API used
// in this case, these range expectation strings are the actual timestamps
- // (divided by frame_duration_), in DTS if ByDts, in PTS if ByPts.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRanges("{ [5,6) [15,16) [25,26) }");
- else
- CheckExpectedRanges("{ [5,9) [15,19) [25,29) }");
+ // (divided by frame_duration_).
+ CheckExpectedRanges("{ [5,9) [15,19) [25,29) }");
// Append buffers at positions 0 through 29 (DTS and PTS).
NewCodedFrameGroupAppend(0, 30, &kDataB);
@@ -1032,7 +947,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Several) {
// PTS:0 4 1 2 3 5 9 6 7 8 0 4 1 2 3 5 9 6 7 8 0 4 1 2 3 5 9 6 7 8 0 4 1 2 3 5 9
// old: A a A a A a A a
// new:B b b b b B b b b b B b b b b B b b b b B b b b b B b b b b B b b b b
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Several_Then_Merge) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Several_Then_Merge) {
// Append 2 buffers at positions 5 through 6 (DTS); 5 through 9 (PTS) partial
// GOP.
NewCodedFrameGroupAppend(5, 2, &kDataA);
@@ -1054,11 +969,8 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Several_Then_Merge) {
// Check expected ranges. Unlike the rest of the position based test API used
// in this case, these range expectation strings are the actual timestamps
- // (divided by frame_duration_), in DTS if ByDts, in PTS if ByPts.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRanges("{ [0,36) }");
- else
- CheckExpectedRanges("{ [0,39) }");
+ // (divided by frame_duration_).
+ CheckExpectedRanges("{ [0,39) }");
// Check buffers in range.
Seek(0);
@@ -1066,7 +978,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Several_Then_Merge) {
CheckExpectedBuffers(35, 36, &kDataA);
}
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Selected) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1087,7 +999,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected) {
// overlaps the range from which the client is currently grabbing buffers. We
// would expect that the SourceBufferStream would return old data until it hits
// the keyframe of the new data, after which it will return the new data.
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_TrackBuffer) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Selected_TrackBuffer) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1116,7 +1028,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_TrackBuffer) {
CheckExpectedRanges("{ [0,19) }");
}
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_EdgeCase) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Selected_EdgeCase) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1145,7 +1057,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_EdgeCase) {
CheckExpectedRanges("{ [5,14) }");
}
-TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_Multiple) {
+TEST_F(SourceBufferStreamTest, Complete_Overlap_Selected_Multiple) {
static const uint8_t kDataC = 0x55;
static const uint8_t kDataD = 0x77;
@@ -1181,7 +1093,7 @@ TEST_P(SourceBufferStreamTest, Complete_Overlap_Selected_Multiple) {
CheckExpectedBuffers(5, 14, &kDataD);
}
-TEST_P(SourceBufferStreamTest, Start_Overlap_Selected) {
+TEST_F(SourceBufferStreamTest, Start_Overlap_Selected) {
// Append 10 buffers at positions 0 through 9.
NewCodedFrameGroupAppend(0, 10, &kDataA);
@@ -1201,7 +1113,7 @@ TEST_P(SourceBufferStreamTest, Start_Overlap_Selected) {
CheckExpectedBuffers(5, 14, &kDataB);
}
-TEST_P(SourceBufferStreamTest, Start_Overlap_Selected_TrackBuffer) {
+TEST_F(SourceBufferStreamTest, Start_Overlap_Selected_TrackBuffer) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15, &kDataA);
@@ -1233,7 +1145,7 @@ TEST_P(SourceBufferStreamTest, Start_Overlap_Selected_TrackBuffer) {
CheckExpectedRanges("{ [0,19) }");
}
-TEST_P(SourceBufferStreamTest, Start_Overlap_Selected_EdgeCase) {
+TEST_F(SourceBufferStreamTest, Start_Overlap_Selected_EdgeCase) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1270,7 +1182,7 @@ TEST_P(SourceBufferStreamTest, Start_Overlap_Selected_EdgeCase) {
// old  : *A*a a a a A a a a a
// new  : B b b b b B b b b b
// after: B b b b b*B*b b b b A a a a a
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1299,7 +1211,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected) {
// old  : |A a a a a A a a*a*a|
// new  : B b b b b B b b b b
// after: |B b b b b B b b b b A a a*a*a|
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_1) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_1) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1331,7 +1243,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_1) {
// old  : A a a a a A a a*a*a
// new  : B b b b b B b b
// after: B b b b b B b b A a a*a*a
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_2) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_2) {
// Append 10 buffers at positions 5 through 14 (DTS and PTS, 2 full GOPs)
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1346,11 +1258,11 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_2) {
// Check expected ranges: stream should not have kept buffers at DTS 8,9;
// PTS 7,8 because the keyframe on which they depended (5, PTS=DTS) was
// overwritten. Note that partial second GOP of B includes PTS [5,9), DTS
- // [5,7). In both ByDts and ByPts, these are continuous with the overlapped
- // original range's next GOP at (10, PTS=DTS).
+ // [5,7). These are continuous with the overlapped original range's next GOP
+ // at (10, PTS=DTS).
// Unlike the rest of the position based test API used in this case, these
// range expectation strings are the actual timestamps (divided by
- // frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // frame_duration_).
CheckExpectedRanges("{ [0,14) }");
// Make sure rest of data is as expected.
@@ -1374,7 +1286,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_2) {
// new  : B b b b b B b b
// after: B b b b b B b b A a a a a
// track: a a
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_3) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_3) {
// Append 10 buffers at positions 5 through 14 (DTS and PTS, 2 full GOPs)
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1390,11 +1302,11 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_3) {
// PTS 7,8 because the keyframe on which they depended (5, PTS=DTS) was
// overwritten. However, they were in the GOP being read from, so were put
// into the track buffer. Note that partial second GOP of B includes PTS
- // [5,9), DTS [5,7). In both ByDts and ByPts, these are continuous with the
- // overlapped original range's next GOP at (10, PTS=DTS).
+ // [5,9), DTS [5,7). These are continuous with the overlapped original range's
+ // next GOP at (10, PTS=DTS).
// Unlike the rest of the position based test API used in this case, these
// range expectation strings are the actual timestamps (divided by
- // frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // frame_duration_).
CheckExpectedRanges("{ [0,14) }");
// Check for data in the track buffer.
@@ -1418,7 +1330,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_AfterEndOfNew_3) {
// new  : B b b b b B b b b b
// after: |B b b b b B b b b b A a a a a|
// track: |a a|
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_1) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_1) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1453,7 +1365,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_1) {
// new  : B b b b b B b
// after: B b b b b B b A a a a a
// track: a a a a
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_2) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_2) {
// Append 10 buffers at positions 5 through 14 (PTS and DTS, 2 full GOPs).
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -1469,11 +1381,11 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_2) {
// PTS 6,7,8 because the keyframe on which they depended (5, PTS=DTS) was
// overwritten. However, they were in the GOP being read from, so were put
// into the track buffer. Note that partial second GOP of B includes PTS
- // [5,9), DTS [5,6). In both ByDts and ByPts, these are continuous with the
- // overlapped original range's next GOP at (10, PTS=DTS).
+ // [5,9), DTS [5,6). These are continuous with the overlapped original range's
+ // next GOP at (10, PTS=DTS).
// Unlike the rest of the position based test API used in this case, these
// range expectation strings are the actual timestamps (divided by
- // frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // frame_duration_).
CheckExpectedRanges("{ [0,14) }");
// Check for data in the track buffer.
@@ -1501,7 +1413,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_2) {
// new  : B b b b b B b b b b B
// after: B b b b b B b b b b B A a a a a
// track: a a a a
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_3) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_3) {
// Append 15 buffers at positions 5 through 19 (PTS and DTS, 3 full GOPs).
NewCodedFrameGroupAppend(5, 15, &kDataA);
@@ -1517,11 +1429,11 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_3) {
// and PTS) because the keyframe on which they depended (10, PTS=DTS) was
// overwritten. The GOP being read from was overwritten, so track buffer
// should contain DTS 6-9 (PTS 9,6,7,8). Note that the partial third GOP of B
- // includes (10, PTS=DTS). In both ByDts and ByPts, this partial GOP is
- // continuous with the overlapped original range's next GOP at (15, PTS=DTS).
+ // includes (10, PTS=DTS). This partial GOP is continuous with the overlapped
+ // original range's next GOP at (15, PTS=DTS).
// Unlike the rest of the position based test API used in this case, these
// range expectation strings are the actual timestamps (divided by
- // frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // frame_duration_).
CheckExpectedRanges("{ [0,19) }");
// Check for data in the track buffer.
@@ -1544,7 +1456,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_OverlappedByNew_3) {
// new  : B b b b b B
// after: |B b b b b B|
// track: |a a a a|
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew) {
// Append 5 buffers at positions 5 through 9.
NewCodedFrameGroupAppend(5, 5, &kDataA);
@@ -1585,7 +1497,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew) {
// track: a
// new : B b b b b B
// after: A a a a a A B b b b b B b b b b B
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
// Append 7 buffers at positions 10 through 16 (DTS); 10 through 19 (PTS) with
// a partial second GOP.
NewCodedFrameGroupAppend(10, 7, &kDataA);
@@ -1604,12 +1516,12 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
// should contain DTS 16, PTS 19.
// Unlike the rest of the position based test API used in this case,
// CheckExpectedRanges() uses expectation strings containing actual timestamps
- // (divided by frame_duration_), in DTS if ByDts, in PTS if ByPts.
+ // (divided by frame_duration_).
CheckExpectedRanges("{ [5,15) }");
- // Now do another end-overlap. Append one full GOP plus keyframe of 2nd.
- // Note that this new keyframe at (5, PTS=DTS) is continuous in both ByPts and
- // ByDts with the overlapped range's next GOP (B) at (10, PTS=DTS).
+ // Now do another end-overlap. Append one full GOP plus keyframe of 2nd. Note
+ // that this new keyframe at (5, PTS=DTS) is continuous with the overlapped
+ // range's next GOP (B) at (10, PTS=DTS).
NewCodedFrameGroupAppend(0, 6, &kDataA);
CheckExpectedRanges("{ [0,15) }");
@@ -1632,12 +1544,8 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
// Now append a keyframe at PTS=DTS=20.
AppendBuffers(20, 1, &kDataB);
- // If ByPts, the buffer at position 16 (PTS 19) in track buffer is adjacent to
- // the next keyframe, so no warning should be emitted on that track buffer
- // exhaustion.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(133));
-
+ // The buffer at position 16 (PTS 19) in track buffer is adjacent to the next
+ // keyframe, so no warning should be emitted on that track buffer exhaustion.
// We should be able to get the next buffer (no longer from the track buffer).
CheckExpectedBuffers(20, 20, &kDataB, true);
CheckNoNextBuffer();
@@ -1658,7 +1566,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
// new  : B b b b b B
// after: |B b b b b B| |A a a a a|
// track: |a a a a|
-TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew3) {
+TEST_F(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew3) {
// Append 5 buffers at positions 5 through 9.
NewCodedFrameGroupAppend(5, 5, &kDataA);
@@ -1704,7 +1612,7 @@ TEST_P(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew3) {
// old  : A a a a a*A*a a a a A a a a a
// new  : B b b b b
// after: A a a a a*B*b b b b A a a a a
-TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_1) {
+TEST_F(SourceBufferStreamTest, Middle_Overlap_Selected_1) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15, &kDataA);
@@ -1735,7 +1643,7 @@ TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_1) {
// old  : A a a a a A a a a a A*a*a a a
// new  : B b b b b
// after: A a a a a B b b b b A*a*a a a
-TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_2) {
+TEST_F(SourceBufferStreamTest, Middle_Overlap_Selected_2) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15, &kDataA);
@@ -1767,7 +1675,7 @@ TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_2) {
// old  : A a*a*a a A a a a a A a a a a
// new  : B
// after: A a*a*a a B A a a a a
-TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_3) {
+TEST_F(SourceBufferStreamTest, Middle_Overlap_Selected_3) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15, &kDataA);
@@ -1806,7 +1714,7 @@ TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_3) {
// new  : B
// after: A a a a a B A a a a a
// track: a a
-TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_4) {
+TEST_F(SourceBufferStreamTest, Middle_Overlap_Selected_4) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15, &kDataA);
@@ -1835,7 +1743,7 @@ TEST_P(SourceBufferStreamTest, Middle_Overlap_Selected_4) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne) {
// Append 5 buffers starting at 10ms, 30ms apart.
NewCodedFrameGroupAppendOneByOne("10K 40 70 100 130");
@@ -1854,7 +1762,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne) {
CheckExpectedBuffers(0, 9, &kDataB);
}
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_DeleteGroup) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_DeleteGroup) {
NewCodedFrameGroupAppendOneByOne("10K 40 70 100 130K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -1872,7 +1780,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_DeleteGroup) {
CheckExpectedBuffers("0K 120 130K");
}
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_BetweenCodedFrameGroups) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_BetweenCodedFrameGroups) {
// Append 5 buffers starting at 110ms, 30ms apart.
NewCodedFrameGroupAppendOneByOne("110K 140 170 200 230");
CheckExpectedRangesByTimestamp("{ [110,260) }");
@@ -1893,7 +1801,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_BetweenCodedFrameGroups) {
// old : 10K 40 *70* 100K 125 130K
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(50));
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K 125 130D30K");
@@ -1925,7 +1833,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
// track: 70
// new : 110K 130
// after: 0K 30 60 90 *110K* 130
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(40));
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K 125 130D30K");
@@ -1958,7 +1866,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
// new : 50K 80 110 140
// after: 0K 30 50K 80 110 140 * (waiting for keyframe)
// track: 70
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(80));
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K 125 130D30K");
@@ -1996,7 +1904,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
// new : 80K 110 140
// after: 0K 30 60 *80K* 110 140
// track: 70
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -2027,7 +1935,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer4) {
// new : 80K 110 140
// after: 0K 30 60 *80K* 110 140
// track: 70
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K");
CheckExpectedRangesByTimestamp("{ [10,130) }");
@@ -2057,7 +1965,7 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
// new : 260K 290
// after: 0K 30 60 90 *120K* 130K ... 200K 230 260K 290
// track: 70
-TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
+TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(50));
NewCodedFrameGroupAppendOneByOne("10K 40 70 100K 125 130D30K");
@@ -2088,7 +1996,57 @@ TEST_P(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Seek_Keyframe) {
+// Test that overlap-appending with a GOP that begins with time of next track
+// buffer frame drops that track buffer frame and buffers the new GOP correctly.
+// append : 10K 40 70 100
+// read the first two buffers
+// after : 10K 40 *70* 100
+//
+// append : 0K 30 60 90 120
+// after : 0K 30 60 90 120
+// track : *70* 100
+//
+// read the buffer at 70ms from track
+// after : 0K 30 60 90 120
+// track : *100*
+//
+// append : 100K 130
+// after : 0K 30 60 90 *100K* 130
+// track : (empty)
+// 100K, not 100, should be the next buffer read.
+TEST_F(SourceBufferStreamTest,
+ Overlap_That_Prunes_All_of_Previous_TrackBuffer) {
+ NewCodedFrameGroupAppend("10K 40 70 100");
+ CheckExpectedRangesByTimestamp("{ [10,130) }");
+
+ // Seek to 70ms.
+ SeekToTimestampMs(70);
+ CheckExpectedBuffers("10K 40");
+
+ // Overlap with a new coded frame group from 0 to 120ms, leaving the original
+ // nonkeyframes at 70ms and 100ms in the track buffer.
+ NewCodedFrameGroupAppend("0K 30 60 90 120");
+ CheckExpectedRangesByTimestamp("{ [0,150) }");
+
+ // Verify that 70 gets read out of the track buffer, leaving the nonkeyframe
+ // at 100ms in the track buffer.
+ CheckExpectedBuffers("70");
+
+ // Overlap with a coded frame group having a keyframe at 100ms. This should
+ // clear the track buffer and serve that keyframe, not the original
+ // nonkeyframe at time 100ms on the next read call.
+ NewCodedFrameGroupAppend("100K 130");
+ CheckExpectedRangesByTimestamp("{ [0,160) }");
+ CheckExpectedBuffers("100K 130");
+ CheckNoNextBuffer();
+
+ // Check the final result: should not include data from the track buffer.
+ SeekToTimestampMs(0);
+ CheckExpectedBuffers("0K 30 60 90 100K 130");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest, Seek_Keyframe) {
// Append 6 buffers at positions 0 through 5.
NewCodedFrameGroupAppend(0, 6);
@@ -2097,7 +2055,7 @@ TEST_P(SourceBufferStreamTest, Seek_Keyframe) {
CheckExpectedBuffers(0, 5, true);
}
-TEST_P(SourceBufferStreamTest, Seek_NonKeyframe) {
+TEST_F(SourceBufferStreamTest, Seek_NonKeyframe) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15);
@@ -2114,7 +2072,7 @@ TEST_P(SourceBufferStreamTest, Seek_NonKeyframe) {
CheckExpectedBuffers(0, 3, true);
}
-TEST_P(SourceBufferStreamTest, Seek_NotBuffered) {
+TEST_F(SourceBufferStreamTest, Seek_NotBuffered) {
// Seek to beginning.
SeekToTimestampMs(0);
@@ -2133,7 +2091,7 @@ TEST_P(SourceBufferStreamTest, Seek_NotBuffered) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Seek_InBetweenTimestamps) {
+TEST_F(SourceBufferStreamTest, Seek_InBetweenTimestamps) {
// Append 10 buffers at positions 0 through 9.
NewCodedFrameGroupAppend(0, 10);
@@ -2141,11 +2099,11 @@ TEST_P(SourceBufferStreamTest, Seek_InBetweenTimestamps) {
CHECK(bump > base::TimeDelta());
// Seek to buffer a little after position 5.
- STREAM_OP(Seek(5 * frame_duration() + bump));
+ stream_->Seek(5 * frame_duration() + bump);
CheckExpectedBuffers(5, 5, true);
// Seek to buffer a little before position 5.
- STREAM_OP(Seek(5 * frame_duration() - bump));
+ stream_->Seek(5 * frame_duration() - bump);
CheckExpectedBuffers(0, 0, true);
}
@@ -2153,7 +2111,7 @@ TEST_P(SourceBufferStreamTest, Seek_InBetweenTimestamps) {
// buffers to the track buffers. Then the test does a seek to another part of
// the stream. The SourceBufferStream should clear its internal track buffer in
// response to the Seek().
-TEST_P(SourceBufferStreamTest, Seek_After_TrackBuffer_Filled) {
+TEST_F(SourceBufferStreamTest, Seek_After_TrackBuffer_Filled) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10, &kDataA);
@@ -2175,7 +2133,7 @@ TEST_P(SourceBufferStreamTest, Seek_After_TrackBuffer_Filled) {
CheckExpectedRanges("{ [0,19) }");
}
-TEST_P(SourceBufferStreamTest, Seek_StartOfGroup) {
+TEST_F(SourceBufferStreamTest, Seek_StartOfGroup) {
base::TimeDelta bump = frame_duration() / 4;
CHECK(bump > base::TimeDelta());
@@ -2209,7 +2167,7 @@ TEST_P(SourceBufferStreamTest, Seek_StartOfGroup) {
CheckExpectedBuffers(16, 19);
}
-TEST_P(SourceBufferStreamTest, Seek_BeforeStartOfGroup) {
+TEST_F(SourceBufferStreamTest, Seek_BeforeStartOfGroup) {
// Append 10 buffers at positions 5 through 14.
NewCodedFrameGroupAppend(5, 10);
@@ -2220,7 +2178,7 @@ TEST_P(SourceBufferStreamTest, Seek_BeforeStartOfGroup) {
CheckExpectedBuffers(5, 14);
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap) {
// Append 5 buffers at positions 0 through 4.
NewCodedFrameGroupAppend(0, 4);
@@ -2239,7 +2197,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap) {
CheckExpectedBuffers(0, 0);
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap_Pending) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap_Pending) {
// Append 2 buffers at positions 0 through 1.
NewCodedFrameGroupAppend(0, 2);
@@ -2258,7 +2216,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_CompleteOverlap_Pending) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap) {
// Append 1 buffer at position 0, duration 10ms.
NewCodedFrameGroupAppend("0D10K");
@@ -2287,7 +2245,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap_Pending) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap_Pending) {
// Append 1 buffer at position 0, duration 10ms.
NewCodedFrameGroupAppend("0D10K");
@@ -2318,7 +2276,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_MiddleOverlap_Pending) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_StartOverlap) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_StartOverlap) {
// Append 2 buffers at positions 0 through 1.
NewCodedFrameGroupAppend(0, 2);
@@ -2336,7 +2294,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_StartOverlap) {
CheckExpectedBuffers(0, 0);
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_StartOverlap_Pending) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_StartOverlap_Pending) {
// Append 2 buffers at positions 0 through 1.
NewCodedFrameGroupAppend(0, 2);
@@ -2354,7 +2312,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_StartOverlap_Pending) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_EndOverlap) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_EndOverlap) {
// Append 5 buffers at positions 0 through 4.
NewCodedFrameGroupAppend(0, 4);
@@ -2372,7 +2330,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_EndOverlap) {
CheckExpectedBuffers(0, 0);
}
-TEST_P(SourceBufferStreamTest, OldSeekPoint_EndOverlap_Pending) {
+TEST_F(SourceBufferStreamTest, OldSeekPoint_EndOverlap_Pending) {
// Append 2 buffers at positions 0 through 1.
NewCodedFrameGroupAppend(0, 2);
@@ -2390,7 +2348,7 @@ TEST_P(SourceBufferStreamTest, OldSeekPoint_EndOverlap_Pending) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, GetNextBuffer_AfterMerges) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_AfterMerges) {
// Append 5 buffers at positions 10 through 14.
NewCodedFrameGroupAppend(10, 5);
@@ -2414,7 +2372,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_AfterMerges) {
CheckExpectedBuffers(11, 14);
}
-TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenAppend) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenAppend) {
// Append 4 buffers at positions 0 through 3.
NewCodedFrameGroupAppend(0, 4);
@@ -2432,7 +2390,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenAppend) {
// This test covers the case where new buffers start-overlap a range whose next
// buffer is not buffered.
-TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap) {
// Append 10 buffers at positions 0 through 9 and exhaust the buffers.
NewCodedFrameGroupAppend(0, 10, &kDataA);
Seek(0);
@@ -2463,7 +2421,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap) {
// buffer that was returned by GetNextBuffer(). This test verifies that
// GetNextBuffer() skips to second GOP in the newly appended data instead
// of returning two buffers with the same timestamp.
-TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap2) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap2) {
NewCodedFrameGroupAppend("0K 30 60 90 120");
Seek(0);
@@ -2487,7 +2445,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenStartOverlap2) {
// This test covers the case where new buffers completely overlap a range
// whose next buffer is not buffered.
-TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenCompleteOverlap) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenCompleteOverlap) {
// Append 5 buffers at positions 10 through 14 and exhaust the buffers.
NewCodedFrameGroupAppend(10, 5, &kDataA);
Seek(10);
@@ -2515,7 +2473,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenCompleteOverlap) {
// This test covers the case where a range is stalled waiting for its next
// buffer, then an end-overlap causes the end of the range to be deleted.
-TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenEndOverlap) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_ExhaustThenEndOverlap) {
// Append 5 buffers at positions 10 through 14 and exhaust the buffers.
NewCodedFrameGroupAppend(10, 5, &kDataA);
Seek(10);
@@ -2547,7 +2505,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_ExhaustThenEndOverlap) {
// to fulfill the request. The SourceBufferStream should be able to fulfill the
// request when the data is later appended, and should not lose track of the
// "next buffer" position.
-TEST_P(SourceBufferStreamTest, GetNextBuffer_Overlap_Selected_Complete) {
+TEST_F(SourceBufferStreamTest, GetNextBuffer_Overlap_Selected_Complete) {
// Append 5 buffers at positions 5 through 9.
NewCodedFrameGroupAppend(5, 5, &kDataA);
@@ -2570,7 +2528,7 @@ TEST_P(SourceBufferStreamTest, GetNextBuffer_Overlap_Selected_Complete) {
CheckExpectedBuffers(10, 14, &kDataB);
}
-TEST_P(SourceBufferStreamTest, PresentationTimestampIndependence) {
+TEST_F(SourceBufferStreamTest, PresentationTimestampIndependence) {
// Append 20 buffers at position 0.
NewCodedFrameGroupAppend(0, 20);
Seek(0);
@@ -2603,7 +2561,7 @@ TEST_P(SourceBufferStreamTest, PresentationTimestampIndependence) {
}
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFront) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFront) {
// Set memory limit to 20 buffers.
SetMemoryLimit(20);
@@ -2635,7 +2593,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFront) {
CheckExpectedBuffers(5, 9, &kDataA);
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
GarbageCollection_DeleteFront_PreserveSeekedGOP) {
// Set memory limit to 15 buffers.
SetMemoryLimit(15);
@@ -2644,7 +2602,7 @@ TEST_P(SourceBufferStreamTest,
NewCodedFrameGroupAppend("1000K 1010 1020 1030 1040");
// GC should be a no-op, since we are just under memory limit.
- EXPECT_TRUE(STREAM_OP(GarbageCollectIfNeeded(DecodeTimestamp(), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta(), 0));
CheckExpectedRangesByTimestamp("{ [0,100) [1000,1050) }");
// Seek to the near the end of the first range
@@ -2656,12 +2614,11 @@ TEST_P(SourceBufferStreamTest,
// GOP in that first range. Neither can it collect the last appended GOP
// (which is the entire second range), so GC should return false since it
// couldn't collect enough.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(95), 7)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(95), 7));
CheckExpectedRangesByTimestamp("{ [50,100) [1000,1050) }");
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFrontGOPsAtATime) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFrontGOPsAtATime) {
// Set memory limit to 20 buffers.
SetMemoryLimit(20);
@@ -2684,7 +2641,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFrontGOPsAtATime) {
CheckExpectedBuffers(5, 9, &kDataA);
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteBack) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteBack) {
// Set memory limit to 5 buffers.
SetMemoryLimit(5);
@@ -2704,7 +2661,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteBack) {
CheckExpectedBuffers(0, 4, &kDataA);
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFrontAndBack) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFrontAndBack) {
// Set memory limit to 3 buffers.
SetMemoryLimit(3);
@@ -2727,7 +2684,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteFrontAndBack) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteSeveralRanges) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteSeveralRanges) {
// Append 5 buffers at positions 0 through 4.
NewCodedFrameGroupAppend(0, 5);
@@ -2786,7 +2743,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteSeveralRanges) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
// Set memory limit to 10 buffers.
SetMemoryLimit(10);
@@ -2807,14 +2764,13 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
// So the ranges before GC are "{ [100,280) [310,400) [490,670) }".
NewCodedFrameGroupAppend("100K 130 160 190K 220 250K");
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(580), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(580), 0));
// Should save the newly appended GOPs.
CheckExpectedRangesByTimestamp("{ [100,280) [580,670) }");
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppendMerged) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppendMerged) {
// Set memory limit to 10 buffers.
SetMemoryLimit(10);
@@ -2828,14 +2784,13 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppendMerged) {
// range. So the range before GC is "{ [220,670) }".
NewCodedFrameGroupAppend("220K 250 280 310K 340 370");
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(580), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(580), 0));
// Should save the newly appended GOPs.
CheckExpectedRangesByTimestamp("{ [220,400) [580,670) }");
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_NoSeek) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_NoSeek) {
// Set memory limit to 20 buffers.
SetMemoryLimit(20);
@@ -2855,7 +2810,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_NoSeek) {
CheckExpectedBuffers(5, 24, &kDataA);
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
// Append 10 buffers at positions 0 through 9.
NewCodedFrameGroupAppend(0, 10, &kDataA);
@@ -2893,7 +2848,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
CheckExpectedBuffers(30, 34, &kDataA);
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_NeedsMoreData) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_NeedsMoreData) {
// Set memory limit to 15 buffers.
SetMemoryLimit(15);
@@ -2945,7 +2900,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_NeedsMoreData) {
// after: B b b b b*B*b b b b
// -- Garbage Collect --
// after: *B*b b b b
-TEST_P(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
// Set memory limit to 3 buffers.
SetMemoryLimit(3);
@@ -2961,11 +2916,8 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
// GC should leave GOP containing seek position (15,16,17 DTS; 15,19,16 PTS).
// Unlike the rest of the position based test API used in this case,
// CheckExpectedRanges() uses expectation strings containing actual timestamps
- // (divided by frame_duration_), in DTS if ByDts, in PTS if ByPts.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRanges("{ [15,17) }");
- else
- CheckExpectedRanges("{ [15,19) }");
+ // (divided by frame_duration_).
+ CheckExpectedRanges("{ [15,19) }");
// Move next buffer position to 16.
CheckExpectedBuffers(15, 15, &kDataA);
@@ -2995,22 +2947,16 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
// the keyframe.
CheckExpectedRanges("{ [20,24) }");
- // If ByPts, the buffer at position 16 (PTS 19) in track buffer was adjacent
+ // The buffer at position 16 (PTS 19) in track buffer was adjacent
// to the next keyframe (PTS=DTS=20), so no warning should be emitted on that
// track buffer exhaustion even though the last frame read out of track buffer
// before exhaustion was position 17 (PTS 16).
- // If ByDts, exhaustion jumps from highest DTS in track buffer (17) to next
- // keyframe (20). The test infra uses 33ms durations for these position based
- // tests, resulting in a 99ms warning:
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(99));
-
CheckExpectedBuffers(20, 24, &kDataB);
CheckNoNextBuffer();
}
// Test GC preserves data starting at first GOP containing playback position.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveDataAtPlaybackPosition) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveDataAtPlaybackPosition) {
// Set memory limit to 30 buffers = 1 second of data.
SetMemoryLimit(30);
// And append 300 buffers = 10 seconds of data.
@@ -3018,57 +2964,49 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveDataAtPlaybackPosition) {
CheckExpectedRanges("{ [0,299) }");
// Playback position at 0, all data must be preserved.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(0), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(0), 0));
CheckExpectedRanges("{ [0,299) }");
// Playback position at 1 sec, the first second of data [0,29) should be
// collected, since we are way over memory limit.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(1000), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(1000), 0));
CheckExpectedRanges("{ [30,299) }");
// Playback position at 1.1 sec, no new data can be collected, since the
// playback position is still in the first GOP of buffered data.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(1100), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(1100), 0));
CheckExpectedRanges("{ [30,299) }");
// Playback position at 5.166 sec, just at the very end of GOP corresponding
// to buffer range 150-155, which should be preserved.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(5166), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(5166), 0));
CheckExpectedRanges("{ [150,299) }");
// Playback position at 5.167 sec, just past the end of GOP corresponding to
// buffer range 150-155, it should be garbage collected now.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(5167), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(5167), 0));
CheckExpectedRanges("{ [155,299) }");
// Playback at 9.0 sec, we can now successfully collect all data except the
// last second and we are back under memory limit of 30 buffers, so GCIfNeeded
// should return true.
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(9000), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(9000), 0));
CheckExpectedRanges("{ [270,299) }");
// Playback at 9.999 sec, GC succeeds, since we are under memory limit even
// without removing any data.
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(9999), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(9999), 0));
CheckExpectedRanges("{ [270,299) }");
// Playback at 15 sec, this should never happen during regular playback in
// browser, since this position has no data buffered, but it should still
// cause no problems to GC algorithm, so test it just in case.
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(15000), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(15000), 0));
CheckExpectedRanges("{ [270,299) }");
}
// Test saving the last GOP appended when this GOP is the only GOP in its range.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
// Set memory limit to 3 and make sure the 4-byte GOP is not garbage
// collected.
SetMemoryLimit(3);
@@ -3093,22 +3031,20 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
// GC. Because it is after 290ms, this tests that the GOP is saved when
// deleting from the back.
NewCodedFrameGroupAppend("500K 530 560 590");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(290), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(290), 0));
// Should save GOPs between 290ms and the last GOP appended.
CheckExpectedRangesByTimestamp("{ [290,380) [500,620) }");
// Continue appending to this GOP after GC.
AppendBuffers("620D30");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(290), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(290), 0));
CheckExpectedRangesByTimestamp("{ [290,380) [500,650) }");
}
// Test saving the last GOP appended when this GOP is in the middle of a
// non-selected range.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
// Append 3 GOPs starting at 0ms, 30ms apart.
NewCodedFrameGroupAppend("0K 30 60 90K 120 150 180K 210 240");
CheckExpectedRangesByTimestamp("{ [0,270) }");
@@ -3120,13 +3056,11 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
// This whole GOP should be saved after GC, which will fail due to GOP being
// larger than 1 buffer
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(80), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(80), 0));
CheckExpectedRangesByTimestamp("{ [80,170) }");
// We should still be able to continue appending data to GOP
AppendBuffers("170D30");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(80), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(80), 0));
CheckExpectedRangesByTimestamp("{ [80,200) }");
// Append a 2nd range after this range, without triggering GC.
@@ -3140,22 +3074,20 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
// it is after the selected range, this tests that the GOP is saved when
// deleting from the back.
NewCodedFrameGroupAppend("500K 530 560 590");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(80), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(80), 0));
// Should save the GOPs between the seek point and GOP that was last appended
CheckExpectedRangesByTimestamp("{ [80,200) [400,620) }");
// Continue appending to this GOP after GC.
AppendBuffers("620D30");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(80), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(80), 0));
CheckExpectedRangesByTimestamp("{ [80,200) [400,650) }");
}
// Test saving the last GOP appended when the GOP containing the next buffer is
// adjacent to the last GOP appended.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
// Append 3 GOPs at 0ms, 90ms, and 180ms.
NewCodedFrameGroupAppend("0K 30 60 90K 120 150 180K 210 240");
CheckExpectedRangesByTimestamp("{ [0,270) }");
@@ -3169,8 +3101,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
// GC should save the GOP at 0ms and 90ms, and will fail since GOP larger
// than 1 buffer
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(90), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(90), 0));
CheckExpectedRangesByTimestamp("{ [0,180) }");
// Seek to 0 and check all buffers.
@@ -3183,8 +3114,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
NewCodedFrameGroupAppend("180K 210 240");
// Should save the GOP at 90ms and the GOP at 180ms.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(90), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(90), 0));
CheckExpectedRangesByTimestamp("{ [90,270) }");
CheckExpectedBuffers("90K 120 150 180K 210 240");
CheckNoNextBuffer();
@@ -3193,7 +3123,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
// Test saving the last GOP appended when it is at the beginning or end of the
// selected range. This tests when the last GOP appended is before or after the
// GOP containing the next buffer, but not directly adjacent to this GOP.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
// Append 4 GOPs starting at positions 0ms, 90ms, 180ms, 270ms.
NewCodedFrameGroupAppend("0K 30 60 90K 120 150 180K 210 240 270K 300 330");
CheckExpectedRangesByTimestamp("{ [0,360) }");
@@ -3207,8 +3137,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
// GC will save data in the range where the most recent append has happened
// [0; 180) and the range where the next read position is [270;360)
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(270), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(270), 0));
CheckExpectedRangesByTimestamp("{ [0,180) [270,360) }");
// Add 3 GOPs to the end of the selected range at 360ms, 450ms, and 540ms.
@@ -3218,8 +3147,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
// Overlap the GOP at 450ms and garbage collect to test deleting from the
// back.
NewCodedFrameGroupAppend("450K 480 510");
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(270), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(270), 0));
// Should save GOPs from GOP at 270ms to GOP at 450ms.
CheckExpectedRangesByTimestamp("{ [270,540) }");
@@ -3227,7 +3155,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
// Test saving the last GOP appended when it is the same as the GOP containing
// the next buffer.
-TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
// Seek to start of stream.
SeekToTimestampMs(0);
@@ -3242,8 +3170,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
// GC should save the newly appended GOP, which is also the next GOP that
// will be returned from the seek request.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(0), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(0), 0));
CheckExpectedRangesByTimestamp("{ [0,60) }");
// Check the buffers in the range.
@@ -3255,15 +3182,14 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
// GC should still save the rest of this GOP and should be able to fulfill
// the read.
- EXPECT_FALSE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(0), 0)));
+ EXPECT_FALSE(GarbageCollect(base::TimeDelta::FromMilliseconds(0), 0));
CheckExpectedRangesByTimestamp("{ [0,120) }");
CheckExpectedBuffers("60 90");
CheckNoNextBuffer();
}
// Test the performance of garbage collection.
-TEST_P(SourceBufferStreamTest, GarbageCollection_Performance) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_Performance) {
// Force |keyframes_per_second_| to be equal to kDefaultFramesPerSecond.
SetStreamInfo(kDefaultFramesPerSecond, kDefaultFramesPerSecond);
@@ -3283,7 +3209,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_Performance) {
}
}
-TEST_P(SourceBufferStreamTest, GarbageCollection_MediaTimeAfterLastAppendTime) {
+TEST_F(SourceBufferStreamTest, GarbageCollection_MediaTimeAfterLastAppendTime) {
// Set memory limit to 10 buffers.
SetMemoryLimit(10);
@@ -3295,15 +3221,14 @@ TEST_P(SourceBufferStreamTest, GarbageCollection_MediaTimeAfterLastAppendTime) {
// the last appended buffer (330), but still within buffered ranges, taking
// into account the duration of the last frame (timestamp of the last frame is
// 330, duration is 30, so the latest valid buffered position is 330+30=360).
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(360), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(360), 0));
// GC should collect one GOP from the front to bring us back under memory
// limit of 10 buffers.
CheckExpectedRangesByTimestamp("{ [120,360) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
GarbageCollection_MediaTimeOutsideOfStreamBufferedRange) {
// Set memory limit to 10 buffers.
SetMemoryLimit(10);
@@ -3323,15 +3248,14 @@ TEST_P(SourceBufferStreamTest,
// return a media_time that is slightly outside of video buffered range). In
// those cases the GC algorithm should clamp the media_time value to the
// buffered ranges to work correctly (see crbug.com/563292).
- EXPECT_TRUE(STREAM_OP(
- GarbageCollectIfNeeded(DecodeTimestamp::FromMilliseconds(361), 0)));
+ EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(361), 0));
// GC should collect one GOP from the front to bring us back under memory
// limit of 10 buffers.
CheckExpectedRangesByTimestamp("{ [120,360) }");
}
-TEST_P(SourceBufferStreamTest, GetRemovalRange_BytesToFree) {
+TEST_F(SourceBufferStreamTest, GetRemovalRange_BytesToFree) {
// Append 2 GOPs starting at 300ms, 30ms apart.
NewCodedFrameGroupAppend("300K 330 360 390K 420 450");
@@ -3393,7 +3317,7 @@ TEST_P(SourceBufferStreamTest, GetRemovalRange_BytesToFree) {
EXPECT_EQ(18, bytes_removed);
}
-TEST_P(SourceBufferStreamTest, GetRemovalRange_Range) {
+TEST_F(SourceBufferStreamTest, GetRemovalRange_Range) {
// Append 2 GOPs starting at 300ms, 30ms apart.
NewCodedFrameGroupAppend("300K 330 360 390K 420 450");
@@ -3459,7 +3383,7 @@ TEST_P(SourceBufferStreamTest, GetRemovalRange_Range) {
EXPECT_EQ(18, bytes_removed);
}
-TEST_P(SourceBufferStreamTest, ConfigChange_Basic) {
+TEST_F(SourceBufferStreamTest, ConfigChange_Basic) {
VideoDecoderConfig new_config = TestVideoConfig::Large();
ASSERT_FALSE(new_config.Matches(video_config_));
@@ -3472,7 +3396,7 @@ TEST_P(SourceBufferStreamTest, ConfigChange_Basic) {
CheckVideoConfig(video_config_);
// Signal a config change.
- STREAM_OP(UpdateVideoConfig(new_config, false));
+ stream_->UpdateVideoConfig(new_config, false);
// Make sure updating the config doesn't change anything since new_config
// should not be associated with the buffer GetNextBuffer() will return.
@@ -3502,13 +3426,13 @@ TEST_P(SourceBufferStreamTest, ConfigChange_Basic) {
}
}
-TEST_P(SourceBufferStreamTest, ConfigChange_Seek) {
+TEST_F(SourceBufferStreamTest, ConfigChange_Seek) {
scoped_refptr<StreamParserBuffer> buffer;
VideoDecoderConfig new_config = TestVideoConfig::Large();
Seek(0);
NewCodedFrameGroupAppend(0, 5, &kDataA);
- STREAM_OP(UpdateVideoConfig(new_config, false));
+ stream_->UpdateVideoConfig(new_config, false);
NewCodedFrameGroupAppend(5, 5, &kDataB);
// Seek to the start of the buffers with the new config and make sure a
@@ -3539,37 +3463,28 @@ TEST_P(SourceBufferStreamTest, ConfigChange_Seek) {
CheckExpectedBuffers(0, 4, &kDataA);
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration) {
// Append 3 discontinuous partial GOPs.
NewCodedFrameGroupAppend("50K 90|60");
NewCodedFrameGroupAppend("150K 190|160");
NewCodedFrameGroupAppend("250K 290|260");
- // Check expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRangesByTimestamp("{ [50,70) [150,170) [250,270) }");
- else
- CheckExpectedRangesByTimestamp("{ [50,100) [150,200) [250,300) }");
-
- // Set duration to be 80ms.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(80)));
-
- // Truncate the buffered data after 80ms (DTS if ByDts, PTS if ByPts).
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp("{ [50,70) }");
- } else {
- // In ByPts buffering, the simulated P-frame at PTS 90ms should have been
- // removed by the duration truncation. Only the frame at PTS 50ms should
- // remain.
- CheckExpectedRangesByTimestamp("{ [50,60) }");
- }
+ CheckExpectedRangesByTimestamp("{ [50,100) [150,200) [250,300) }");
+
+ // Set duration to be 80ms. Truncates the buffered data after 80ms.
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(80));
+
+ // The simulated P-frame at PTS 90ms should have been
+ // removed by the duration truncation. Only the frame at PTS 50ms should
+ // remain.
+ CheckExpectedRangesByTimestamp("{ [50,60) }");
// Adding data past the previous duration should still work.
NewCodedFrameGroupAppend("0D50K 50 100K");
CheckExpectedRangesByTimestamp("{ [0,150) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_EdgeCase) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_EdgeCase) {
// Append 10 buffers at positions 10 through 19.
NewCodedFrameGroupAppend(10, 10);
@@ -3580,13 +3495,13 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_EdgeCase) {
CheckExpectedRanges("{ [10,19) [25,29) }");
// Set duration to be right before buffer 25.
- STREAM_OP(OnSetDuration(frame_duration() * 25));
+ stream_->OnSetDuration(frame_duration() * 25);
// Should truncate the last range.
CheckExpectedRanges("{ [10,19) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_EdgeCase2) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_EdgeCase2) {
// This test requires specific relative proportions for fudge room, append
// size, and duration truncation amounts. See details at:
// https://codereview.chromium.org/2385423002
@@ -3599,7 +3514,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_EdgeCase2) {
// Trim off last 2 buffers, totaling 8 ms. Notably less than the current fudge
// room of 10 ms.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(5)));
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(5));
// Verify truncation.
CheckExpectedRangesByTimestamp("{ [0,5) }");
@@ -3611,7 +3526,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_EdgeCase2) {
CheckExpectedRangesByTimestamp("{ [0,5) [11,19) }");
}
-TEST_P(SourceBufferStreamTest, RemoveWithinFudgeRoom) {
+TEST_F(SourceBufferStreamTest, RemoveWithinFudgeRoom) {
// This test requires specific relative proportions for fudge room, append
// size, and removal amounts. See details at:
// https://codereview.chromium.org/2385423002
@@ -3636,7 +3551,7 @@ TEST_P(SourceBufferStreamTest, RemoveWithinFudgeRoom) {
CheckExpectedRangesByTimestamp("{ [0,5) [11,19) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialRange) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_DeletePartialRange) {
// Append IPBBB GOPs into 3 discontinuous ranges.
NewCodedFrameGroupAppend("0K 40|10 10|20 20|30 30|40");
NewCodedFrameGroupAppend(
@@ -3647,35 +3562,26 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialRange) {
// Check expected ranges.
CheckExpectedRangesByTimestamp("{ [0,50) [100,200) [250,300) }");
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(140)));
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(140));
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- // Should truncate the data after 140ms.
- CheckExpectedRangesByTimestamp("{ [0,50) [100,140) }");
- } else {
- // The B-frames at PTS 110-130 were in the GOP in decode order after
- // the simulated P-frame at PTS 140 which was truncated, so those B-frames
- // are also removed.
- CheckExpectedRangesByTimestamp("{ [0,50) [100,110) }");
- }
+ // The B-frames at PTS 110-130 were in the GOP in decode order after
+ // the simulated P-frame at PTS 140 which was truncated, so those B-frames
+ // are also removed.
+ CheckExpectedRangesByTimestamp("{ [0,50) [100,110) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeleteSelectedRange) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_DeleteSelectedRange) {
// Append 3 discontinuous partial GOPs.
NewCodedFrameGroupAppend("50K 90|60");
NewCodedFrameGroupAppend("150K 190|160");
NewCodedFrameGroupAppend("250K 290|260");
- // Check expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRangesByTimestamp("{ [50,70) [150,170) [250,270) }");
- else
- CheckExpectedRangesByTimestamp("{ [50,100) [150,200) [250,300) }");
+ CheckExpectedRangesByTimestamp("{ [50,100) [150,200) [250,300) }");
SeekToTimestampMs(150);
// Set duration to 50ms.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(50)));
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(50));
// Expect everything to be deleted, and should not have next buffer anymore.
CheckNoNextBuffer();
@@ -3691,7 +3597,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeleteSelectedRange) {
CheckExpectedRangesByTimestamp("{ [0,250) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
// Append 5 buffers at positions 0 through 4.
NewCodedFrameGroupAppend(0, 5);
@@ -3705,7 +3611,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
Seek(10);
// Set duration to be between buffers 24 and 25.
- STREAM_OP(OnSetDuration(frame_duration() * 25));
+ stream_->OnSetDuration(frame_duration() * 25);
// Should truncate the data after 24.
CheckExpectedRanges("{ [0,4) [10,24) }");
@@ -3714,7 +3620,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
CheckExpectedBuffers(10, 10);
// Now set the duration immediately after buffer 10.
- STREAM_OP(OnSetDuration(frame_duration() * 11));
+ stream_->OnSetDuration(frame_duration() * 11);
// Seek position should be reset.
CheckNoNextBuffer();
@@ -3725,7 +3631,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_DeletePartialSelectedRange) {
// already start passing the data to decoding pipeline. Selected range,
// when invalidated by getting truncated, should be updated to NULL
// accordingly so that successive append operations keep working.
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
// Seek to start of stream.
SeekToTimestampMs(0);
@@ -3735,7 +3641,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
CheckExpectedBuffers("0K 30");
// Set duration to be right before buffer 1.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(60)));
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(60));
// Verify that there is no next buffer.
CheckNoNextBuffer();
@@ -3746,7 +3652,7 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_UpdateSelectedRange) {
CheckExpectedRangesByTimestamp("{ [0,60) [120,180) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
SetExplicitDuration_AfterGroupTimestampAndBeforeFirstBufferTimestamp) {
NewCodedFrameGroupAppend("0K 30K 60K");
@@ -3760,58 +3666,45 @@ TEST_P(SourceBufferStreamTest,
CheckExpectedRangesByTimestamp("{ [0,90) [200,350) [400,490) }");
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(120)));
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(120));
// Verify that the buffered ranges are updated properly and we don't crash.
CheckExpectedRangesByTimestamp("{ [0,90) }");
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_MarkEOS) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_MarkEOS) {
// Append 1 full and 1 partial GOP: IPBBBIPBB
NewCodedFrameGroupAppend(
"0K 40|10 10|20 20|30 30|40 "
"50K 90|60 60|70 70|80");
- // Check expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRangesByTimestamp("{ [0,90) }");
- else
- CheckExpectedRangesByTimestamp("{ [0,100) }");
+ CheckExpectedRangesByTimestamp("{ [0,100) }");
SeekToTimestampMs(50);
// Set duration to be before the seeked to position.
// This will result in truncation of the selected range and a reset
// of NextBufferPosition.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(40)));
-
- // Check the expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp("{ [0,40) }");
- } else {
- // The P-frame at PTS 40ms was removed, so its dependent
- // B-frames at PTS 10-30 were also removed.
- CheckExpectedRangesByTimestamp("{ [0,10) }");
- }
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(40));
+
+ // The P-frame at PTS 40ms was removed, so its dependent B-frames at PTS 10-30
+ // were also removed.
+ CheckExpectedRangesByTimestamp("{ [0,10) }");
// Mark EOS reached.
- STREAM_OP(MarkEndOfStream());
+ stream_->MarkEndOfStream();
// Expect EOS to be reached.
CheckEOSReached();
}
-TEST_P(SourceBufferStreamTest, SetExplicitDuration_MarkEOS_IsSeekPending) {
+TEST_F(SourceBufferStreamTest, SetExplicitDuration_MarkEOS_IsSeekPending) {
// Append 1 full and 1 partial GOP: IPBBBIPBB
NewCodedFrameGroupAppend(
"0K 40|10 10|20 20|30 30|40 "
"50K 90|60 60|70 70|80");
- // Check expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- CheckExpectedRangesByTimestamp("{ [0,90) }");
- else
- CheckExpectedRangesByTimestamp("{ [0,100) }");
+ CheckExpectedRangesByTimestamp("{ [0,100) }");
// Seek to 100ms will result in a pending seek.
SeekToTimestampMs(100);
@@ -3819,26 +3712,21 @@ TEST_P(SourceBufferStreamTest, SetExplicitDuration_MarkEOS_IsSeekPending) {
// Set duration to be before the seeked to position.
// This will result in truncation of the selected range and a reset
// of NextBufferPosition.
- STREAM_OP(OnSetDuration(base::TimeDelta::FromMilliseconds(40)));
-
- // Check the expected ranges.
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp("{ [0,40) }");
- } else {
- // The P-frame at PTS 40ms was removed, so its dependent
- // B-frames at PTS 10-30 were also removed.
- CheckExpectedRangesByTimestamp("{ [0,10) }");
- }
+ stream_->OnSetDuration(base::TimeDelta::FromMilliseconds(40));
+
+ // The P-frame at PTS 40ms was removed, so its dependent B-frames at PTS 10-30
+ // were also removed.
+ CheckExpectedRangesByTimestamp("{ [0,10) }");
- EXPECT_TRUE(STREAM_OP(IsSeekPending()));
+ EXPECT_TRUE(stream_->IsSeekPending());
// Mark EOS reached.
- STREAM_OP(MarkEndOfStream());
- EXPECT_FALSE(STREAM_OP(IsSeekPending()));
+ stream_->MarkEndOfStream();
+ EXPECT_FALSE(stream_->IsSeekPending());
}
// Test the case were the current playback position is at the end of the
// buffered data and several overlaps occur.
-TEST_P(SourceBufferStreamTest, OverlapWhileWaitingForMoreData) {
+TEST_F(SourceBufferStreamTest, OverlapWhileWaitingForMoreData) {
// Seek to start of stream.
SeekToTimestampMs(0);
@@ -3875,7 +3763,7 @@ TEST_P(SourceBufferStreamTest, OverlapWhileWaitingForMoreData) {
// Verify that a single coded frame at the current read position unblocks the
// read even if the frame is buffered after the previously read position is
// removed.
-TEST_P(SourceBufferStreamTest, AfterRemove_SingleFrameRange_Unblocks_Read) {
+TEST_F(SourceBufferStreamTest, AfterRemove_SingleFrameRange_Unblocks_Read) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90D30");
CheckExpectedRangesByTimestamp("{ [0,120) }");
@@ -3893,7 +3781,7 @@ TEST_P(SourceBufferStreamTest, AfterRemove_SingleFrameRange_Unblocks_Read) {
// Verify that multiple short (relative to max-inter-buffer-distance * 2) coded
// frames at the current read position unblock the read even if the frames are
// buffered after the previously read position is removed.
-TEST_P(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_1) {
+TEST_F(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_1) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90D30");
CheckExpectedRangesByTimestamp("{ [0,120) }");
@@ -3911,7 +3799,7 @@ TEST_P(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_1) {
// Verify that multiple short (relative to max-inter-buffer-distance * 2) coded
// frames starting at the fudge room boundary unblock the read even if the
// frames are buffered after the previously read position is removed.
-TEST_P(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_2) {
+TEST_F(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_2) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90D30");
CheckExpectedRangesByTimestamp("{ [0,120) }");
@@ -3928,7 +3816,7 @@ TEST_P(SourceBufferStreamTest, AfterRemove_TinyFrames_Unblock_Read_2) {
// Verify that coded frames starting after the fudge room boundary do not
// unblock the read when buffered after the previously read position is removed.
-TEST_P(SourceBufferStreamTest, AfterRemove_BeyondFudge_Stalled) {
+TEST_F(SourceBufferStreamTest, AfterRemove_BeyondFudge_Stalled) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90D30");
CheckExpectedRangesByTimestamp("{ [0,120) }");
@@ -3944,15 +3832,23 @@ TEST_P(SourceBufferStreamTest, AfterRemove_BeyondFudge_Stalled) {
// Verify that non-keyframes with the same timestamp in the same
// append are handled correctly.
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_SingleAppend) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_SingleAppend) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 30 60 90 120K 150");
CheckExpectedBuffers("0K 30 30 60 90 120K 150");
}
+// Verify that a non-keyframe followed by a keyframe with the same timestamp
+// is allowed.
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_SingleAppend2) {
+ Seek(0);
+ NewCodedFrameGroupAppend("0K 30 30K 60");
+ CheckExpectedBuffers("0K 30 30K 60");
+}
+
// Verify that non-keyframes with the same timestamp can occur
// in different appends.
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends) {
Seek(0);
NewCodedFrameGroupAppend("0K 30D0");
AppendBuffers("30 60 90 120K 150");
@@ -3960,22 +3856,8 @@ TEST_P(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends) {
}
// Verify that a non-keyframe followed by a keyframe with the same timestamp
-// is allowed (and also results in a MediaLog when buffering by DTS).
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_SingleAppend_Warning) {
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- EXPECT_MEDIA_LOG(ContainsSameTimestampAt30MillisecondsLog());
-
- Seek(0);
- NewCodedFrameGroupAppend("0K 30 30K 60");
- CheckExpectedBuffers("0K 30 30K 60");
-}
-
-// Verify that a non-keyframe followed by a keyframe with the same timestamp
-// is allowed (and also results in a MediaLog when buffering by DTS).
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends_Warning) {
- if (buffering_api_ == BufferingApi::kLegacyByDts)
- EXPECT_MEDIA_LOG(ContainsSameTimestampAt30MillisecondsLog());
-
+// is allowed.
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends2) {
Seek(0);
NewCodedFrameGroupAppend("0K 30D0");
AppendBuffers("30K 60");
@@ -3984,20 +3866,20 @@ TEST_P(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends_Warning) {
// Verify that a keyframe followed by a non-keyframe with the same timestamp
// is allowed.
-TEST_P(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_TwoAppends) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_TwoAppends) {
Seek(0);
NewCodedFrameGroupAppend("0K 30D0K");
AppendBuffers("30 60");
CheckExpectedBuffers("0K 30K 30 60");
}
-TEST_P(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_SingleAppend) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_VideoKeyFrame_SingleAppend) {
Seek(0);
NewCodedFrameGroupAppend("0K 30K 30 60");
CheckExpectedBuffers("0K 30K 30 60");
}
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_Overlap_1) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_1) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 60 90 120K 150");
@@ -4005,14 +3887,14 @@ TEST_P(SourceBufferStreamTest, SameTimestamp_Video_Overlap_1) {
CheckExpectedBuffers("0K 30 60K 91 121K 151");
}
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_Overlap_2) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_2) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 60 90 120K 150");
NewCodedFrameGroupAppend("0K 30 61");
CheckExpectedBuffers("0K 30 61 120K 150");
}
-TEST_P(SourceBufferStreamTest, SameTimestamp_Video_Overlap_3) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_3) {
Seek(0);
NewCodedFrameGroupAppend("0K 20 40 60 80 100K 101 102 103K");
NewCodedFrameGroupAppend("0K 20 40 60 80 90D0");
@@ -4025,10 +3907,10 @@ TEST_P(SourceBufferStreamTest, SameTimestamp_Video_Overlap_3) {
}
// Test all the valid same timestamp cases for audio.
-TEST_P(SourceBufferStreamTest, SameTimestamp_Audio) {
+TEST_F(SourceBufferStreamTest, SameTimestamp_Audio) {
AudioDecoderConfig config(kCodecMP3, kSampleFormatF32, CHANNEL_LAYOUT_STEREO,
44100, EmptyExtraData(), Unencrypted());
- STREAM_RESET(config);
+ ResetStream<>(config);
Seek(0);
NewCodedFrameGroupAppend("0K 0K 30K 30K");
CheckExpectedBuffers("0K 0K 30K 30K");
@@ -4037,19 +3919,19 @@ TEST_P(SourceBufferStreamTest, SameTimestamp_Audio) {
// If seeking past any existing range and the seek is pending
// because no data has been provided for that position,
// the stream position can be considered as the end of stream.
-TEST_P(SourceBufferStreamTest, EndSelected_During_PendingSeek) {
+TEST_F(SourceBufferStreamTest, EndSelected_During_PendingSeek) {
// Append 15 buffers at positions 0 through 14.
NewCodedFrameGroupAppend(0, 15);
Seek(20);
- EXPECT_TRUE(STREAM_OP(IsSeekPending()));
- STREAM_OP(MarkEndOfStream());
- EXPECT_FALSE(STREAM_OP(IsSeekPending()));
+ EXPECT_TRUE(stream_->IsSeekPending());
+ stream_->MarkEndOfStream();
+ EXPECT_FALSE(stream_->IsSeekPending());
}
// If there is a pending seek between 2 existing ranges,
// the end of the stream has not been reached.
-TEST_P(SourceBufferStreamTest, EndNotSelected_During_PendingSeek) {
+TEST_F(SourceBufferStreamTest, EndNotSelected_During_PendingSeek) {
// Append:
// - 10 buffers at positions 0 through 9.
// - 10 buffers at positions 30 through 39
@@ -4057,14 +3939,13 @@ TEST_P(SourceBufferStreamTest, EndNotSelected_During_PendingSeek) {
NewCodedFrameGroupAppend(30, 10);
Seek(20);
- EXPECT_TRUE(STREAM_OP(IsSeekPending()));
- STREAM_OP(MarkEndOfStream());
- EXPECT_TRUE(STREAM_OP(IsSeekPending()));
+ EXPECT_TRUE(stream_->IsSeekPending());
+ stream_->MarkEndOfStream();
+ EXPECT_TRUE(stream_->IsSeekPending());
}
-
// Removing exact start & end of a range.
-TEST_P(SourceBufferStreamTest, Remove_WholeRange1) {
+TEST_F(SourceBufferStreamTest, Remove_WholeRange1) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -4073,7 +3954,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeRange1) {
}
// Removal range starts before range and ends exactly at end.
-TEST_P(SourceBufferStreamTest, Remove_WholeRange2) {
+TEST_F(SourceBufferStreamTest, Remove_WholeRange2) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -4083,7 +3964,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeRange2) {
// Removal range starts at the start of a range and ends beyond the
// range end.
-TEST_P(SourceBufferStreamTest, Remove_WholeRange3) {
+TEST_F(SourceBufferStreamTest, Remove_WholeRange3) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -4092,7 +3973,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeRange3) {
}
// Removal range starts before range start and ends after the range end.
-TEST_P(SourceBufferStreamTest, Remove_WholeRange4) {
+TEST_F(SourceBufferStreamTest, Remove_WholeRange4) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -4101,7 +3982,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeRange4) {
}
// Removes multiple ranges.
-TEST_P(SourceBufferStreamTest, Remove_WholeRange5) {
+TEST_F(SourceBufferStreamTest, Remove_WholeRange5) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4112,7 +3993,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeRange5) {
}
// Verifies a [0-infinity) range removes everything.
-TEST_P(SourceBufferStreamTest, Remove_ZeroToInfinity) {
+TEST_F(SourceBufferStreamTest, Remove_ZeroToInfinity) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4124,7 +4005,7 @@ TEST_P(SourceBufferStreamTest, Remove_ZeroToInfinity) {
// Removal range starts at the beginning of the range and ends in the
// middle of the range. This test verifies that full GOPs are removed.
-TEST_P(SourceBufferStreamTest, Remove_Partial1) {
+TEST_F(SourceBufferStreamTest, Remove_Partial1) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4135,7 +4016,7 @@ TEST_P(SourceBufferStreamTest, Remove_Partial1) {
// Removal range starts in the middle of a range and ends at the exact
// end of the range.
-TEST_P(SourceBufferStreamTest, Remove_Partial2) {
+TEST_F(SourceBufferStreamTest, Remove_Partial2) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4145,7 +4026,7 @@ TEST_P(SourceBufferStreamTest, Remove_Partial2) {
}
// Removal range starts and ends within a range.
-TEST_P(SourceBufferStreamTest, Remove_Partial3) {
+TEST_F(SourceBufferStreamTest, Remove_Partial3) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4156,7 +4037,7 @@ TEST_P(SourceBufferStreamTest, Remove_Partial3) {
// Removal range starts in the middle of one range and ends in the
// middle of another range.
-TEST_P(SourceBufferStreamTest, Remove_Partial4) {
+TEST_F(SourceBufferStreamTest, Remove_Partial4) {
Seek(0);
NewCodedFrameGroupAppend("10K 40 70K 100 130K");
NewCodedFrameGroupAppend("1000K 1030 1060K 1090 1120K");
@@ -4168,7 +4049,7 @@ TEST_P(SourceBufferStreamTest, Remove_Partial4) {
// Test behavior when the current position is removed and new buffers
// are appended over the removal range.
-TEST_P(SourceBufferStreamTest, Remove_CurrentPosition) {
+TEST_F(SourceBufferStreamTest, Remove_CurrentPosition) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90K 120 150 180K 210 240 270K 300 330");
CheckExpectedRangesByTimestamp("{ [0,360) }");
@@ -4192,7 +4073,7 @@ TEST_P(SourceBufferStreamTest, Remove_CurrentPosition) {
// Test behavior when buffers in the selected range before the current position
// are removed.
-TEST_P(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
+TEST_F(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90K 120 150 180K 210 240 270K 300 330");
CheckExpectedRangesByTimestamp("{ [0,360) }");
@@ -4207,7 +4088,7 @@ TEST_P(SourceBufferStreamTest, Remove_BeforeCurrentPosition) {
// Test removing the preliminary portion for the current coded frame group being
// appended.
-TEST_P(SourceBufferStreamTest, Remove_MidGroup) {
+TEST_F(SourceBufferStreamTest, Remove_MidGroup) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90 120K 150 180 210");
CheckExpectedRangesByTimestamp("{ [0,240) }");
@@ -4244,7 +4125,7 @@ TEST_P(SourceBufferStreamTest, Remove_MidGroup) {
// Test removing the current GOP being appended, while not removing
// the entire range the GOP belongs to.
-TEST_P(SourceBufferStreamTest, Remove_GOPBeingAppended) {
+TEST_F(SourceBufferStreamTest, Remove_GOPBeingAppended) {
Seek(0);
NewCodedFrameGroupAppend("0K 30 60 90 120K 150 180");
CheckExpectedRangesByTimestamp("{ [0,210) }");
@@ -4268,7 +4149,7 @@ TEST_P(SourceBufferStreamTest, Remove_GOPBeingAppended) {
CheckExpectedBuffers("240K 270 300");
}
-TEST_P(SourceBufferStreamTest, Remove_WholeGOPBeingAppended) {
+TEST_F(SourceBufferStreamTest, Remove_WholeGOPBeingAppended) {
SeekToTimestampMs(1000);
NewCodedFrameGroupAppend("1000K 1030 1060 1090");
CheckExpectedRangesByTimestamp("{ [1000,1120) }");
@@ -4295,7 +4176,7 @@ TEST_P(SourceBufferStreamTest, Remove_WholeGOPBeingAppended) {
CheckExpectedBuffers("1270K 1300");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
Remove_PreviousAppendDestroyedAndOverwriteExistingRange) {
SeekToTimestampMs(90);
@@ -4319,7 +4200,7 @@ TEST_P(SourceBufferStreamTest,
CheckExpectedBuffers("90K 121 151");
}
-TEST_P(SourceBufferStreamTest, Remove_GapAtBeginningOfGroup) {
+TEST_F(SourceBufferStreamTest, Remove_GapAtBeginningOfGroup) {
Seek(0);
// Append a coded frame group that has a gap at the beginning of it.
@@ -4349,7 +4230,7 @@ TEST_P(SourceBufferStreamTest, Remove_GapAtBeginningOfGroup) {
CheckExpectedRangesByTimestamp("{ [120,180) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
OverlappingAppendRangeMembership_OneMicrosecond_Video) {
NewCodedFrameGroupAppend("10D20K");
CheckExpectedRangesByTimestamp("{ [10000,30000) }",
@@ -4371,7 +4252,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
OverlappingAppendRangeMembership_TwoMicroseconds_Video) {
NewCodedFrameGroupAppend("10D20K");
CheckExpectedRangesByTimestamp("{ [10000,30000) }",
@@ -4393,7 +4274,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Text_Append_SingleRange) {
+TEST_F(SourceBufferStreamTest, Text_Append_SingleRange) {
SetTextStream();
NewCodedFrameGroupAppend("0K 500K 1000K");
CheckExpectedRangesByTimestamp("{ [0,1500) }");
@@ -4402,7 +4283,7 @@ TEST_P(SourceBufferStreamTest, Text_Append_SingleRange) {
CheckExpectedBuffers("0K 500K 1000K");
}
-TEST_P(SourceBufferStreamTest, Text_Append_DisjointAfter) {
+TEST_F(SourceBufferStreamTest, Text_Append_DisjointAfter) {
SetTextStream();
NewCodedFrameGroupAppend("0K 500K 1000K");
CheckExpectedRangesByTimestamp("{ [0,1500) }");
@@ -4413,7 +4294,7 @@ TEST_P(SourceBufferStreamTest, Text_Append_DisjointAfter) {
CheckExpectedBuffers("0K 500K 1000K 3000K 3500K 4000K");
}
-TEST_P(SourceBufferStreamTest, Text_Append_DisjointBefore) {
+TEST_F(SourceBufferStreamTest, Text_Append_DisjointBefore) {
SetTextStream();
NewCodedFrameGroupAppend("3000K 3500K 4000K");
CheckExpectedRangesByTimestamp("{ [3000,4500) }");
@@ -4424,7 +4305,7 @@ TEST_P(SourceBufferStreamTest, Text_Append_DisjointBefore) {
CheckExpectedBuffers("0K 500K 1000K 3000K 3500K 4000K");
}
-TEST_P(SourceBufferStreamTest, Text_CompleteOverlap) {
+TEST_F(SourceBufferStreamTest, Text_CompleteOverlap) {
SetTextStream();
NewCodedFrameGroupAppend("3000K 3500K 4000K");
CheckExpectedRangesByTimestamp("{ [3000,4500) }");
@@ -4438,7 +4319,7 @@ TEST_P(SourceBufferStreamTest, Text_CompleteOverlap) {
"3001K 3501K 4001K 4501K 5001K");
}
-TEST_P(SourceBufferStreamTest, Text_OverlapAfter) {
+TEST_F(SourceBufferStreamTest, Text_OverlapAfter) {
SetTextStream();
NewCodedFrameGroupAppend("0K 500K 1000K 1500K 2000K");
CheckExpectedRangesByTimestamp("{ [0,2500) }");
@@ -4449,7 +4330,7 @@ TEST_P(SourceBufferStreamTest, Text_OverlapAfter) {
CheckExpectedBuffers("0K 500K 1000K 1499K 2001K 2501K 3001K");
}
-TEST_P(SourceBufferStreamTest, Text_OverlapBefore) {
+TEST_F(SourceBufferStreamTest, Text_OverlapBefore) {
SetTextStream();
NewCodedFrameGroupAppend("1500K 2000K 2500K 3000K 3500K");
CheckExpectedRangesByTimestamp("{ [1500,4000) }");
@@ -4460,7 +4341,7 @@ TEST_P(SourceBufferStreamTest, Text_OverlapBefore) {
CheckExpectedBuffers("0K 501K 1001K 1501K 2001K 3000K 3500K");
}
-TEST_P(SourceBufferStreamTest, Audio_SpliceTrimmingForOverlap) {
+TEST_F(SourceBufferStreamTest, Audio_SpliceTrimmingForOverlap) {
SetAudioStream();
Seek(0);
NewCodedFrameGroupAppend("0K 2K 4K 6K 8K 10K 12K");
@@ -4486,7 +4367,7 @@ TEST_P(SourceBufferStreamTest, Audio_SpliceTrimmingForOverlap) {
// Test that a splice is not created if an end timestamp and start timestamp
// perfectly overlap.
-TEST_P(SourceBufferStreamTest, Audio_SpliceFrame_NoSplice) {
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoSplice) {
SetAudioStream();
Seek(0);
@@ -4510,7 +4391,7 @@ TEST_P(SourceBufferStreamTest, Audio_SpliceFrame_NoSplice) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_NoSpliceForBadOverlap) {
+TEST_F(SourceBufferStreamTest, Audio_NoSpliceForBadOverlap) {
SetAudioStream();
Seek(0);
@@ -4535,7 +4416,7 @@ TEST_P(SourceBufferStreamTest, Audio_NoSpliceForBadOverlap) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_NoSpliceForEstimatedDuration) {
+TEST_F(SourceBufferStreamTest, Audio_NoSpliceForEstimatedDuration) {
SetAudioStream();
Seek(0);
@@ -4556,7 +4437,7 @@ TEST_P(SourceBufferStreamTest, Audio_NoSpliceForEstimatedDuration) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_SpliceTrimming_ExistingTrimming) {
+TEST_F(SourceBufferStreamTest, Audio_SpliceTrimming_ExistingTrimming) {
const base::TimeDelta kDuration = base::TimeDelta::FromMilliseconds(4);
const base::TimeDelta kNoDiscard = base::TimeDelta();
const bool is_keyframe = true;
@@ -4609,11 +4490,10 @@ TEST_P(SourceBufferStreamTest, Audio_SpliceTrimming_ExistingTrimming) {
B_buffers.push_back(bufferB2);
// Append buffers, trigger splice trimming.
- STREAM_OP(OnStartOfCodedFrameGroup(bufferA1->GetDecodeTimestamp(),
- bufferA1->timestamp()));
- STREAM_OP(Append(A_buffers));
+ stream_->OnStartOfCodedFrameGroup(bufferA1->timestamp());
+ stream_->Append(A_buffers);
EXPECT_MEDIA_LOG(TrimmedSpliceOverlap(3000, 2000, 1000));
- STREAM_OP(Append(B_buffers));
+ stream_->Append(B_buffers);
// Verify buffers.
scoped_refptr<StreamParserBuffer> read_buffer;
@@ -4651,14 +4531,14 @@ TEST_P(SourceBufferStreamTest, Audio_SpliceTrimming_ExistingTrimming) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
+TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
EXPECT_MEDIA_LOG(SkippingSpliceTooLittleOverlap(1250, 250));
video_config_ = TestVideoConfig::Invalid();
audio_config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_STEREO, 4000, EmptyExtraData(),
Unencrypted(), base::TimeDelta(), 0);
- STREAM_RESET(audio_config_);
+ ResetStream<>(audio_config_);
// Equivalent to 0.5ms per frame.
SetStreamInfo(2000, 2000);
Seek(0);
@@ -4680,14 +4560,14 @@ TEST_P(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_PrerollFrame) {
+TEST_F(SourceBufferStreamTest, Audio_PrerollFrame) {
Seek(0);
NewCodedFrameGroupAppend("0K 3P 6K");
CheckExpectedBuffers("0K 3P 6K");
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_ConfigChangeWithPreroll) {
+TEST_F(SourceBufferStreamTest, Audio_ConfigChangeWithPreroll) {
AudioDecoderConfig new_config(kCodecVorbis, kSampleFormatPlanarF32,
CHANNEL_LAYOUT_MONO, 2000, EmptyExtraData(),
Unencrypted());
@@ -4699,7 +4579,7 @@ TEST_P(SourceBufferStreamTest, Audio_ConfigChangeWithPreroll) {
NewCodedFrameGroupAppend("0K 3K 6K");
// Update the configuration.
- STREAM_OP(UpdateAudioConfig(new_config, false));
+ stream_->UpdateAudioConfig(new_config, false);
// We haven't read any buffers at this point, so the config for the next
// buffer at time 0 should still be the original config.
@@ -4727,7 +4607,7 @@ TEST_P(SourceBufferStreamTest, Audio_ConfigChangeWithPreroll) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, Audio_Opus_SeekToJustBeforeRangeStart) {
+TEST_F(SourceBufferStreamTest, Audio_Opus_SeekToJustBeforeRangeStart) {
// Seek to a time within the fudge room of seekability to a buffered Opus
// audio frame's range, but before the range's start. Use small seek_preroll
// in case the associated logic to check same config in the preroll time
@@ -4737,7 +4617,7 @@ TEST_P(SourceBufferStreamTest, Audio_Opus_SeekToJustBeforeRangeStart) {
CHANNEL_LAYOUT_STEREO, 1000, EmptyExtraData(),
Unencrypted(), base::TimeDelta::FromMilliseconds(10),
0);
- STREAM_RESET(audio_config_);
+ ResetStream<>(audio_config_);
// Equivalent to 1s per frame.
SetStreamInfo(1, 1);
@@ -4753,7 +4633,7 @@ TEST_P(SourceBufferStreamTest, Audio_Opus_SeekToJustBeforeRangeStart) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, BFrames) {
+TEST_F(SourceBufferStreamTest, BFrames) {
Seek(0);
NewCodedFrameGroupAppend("0K 120|30 30|60 60|90 90|120");
CheckExpectedRangesByTimestamp("{ [0,150) }");
@@ -4762,7 +4642,7 @@ TEST_P(SourceBufferStreamTest, BFrames) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, RemoveShouldAlwaysExcludeEnd) {
+TEST_F(SourceBufferStreamTest, RemoveShouldAlwaysExcludeEnd) {
NewCodedFrameGroupAppend("10D2K 12D2 14D2");
CheckExpectedRangesByTimestamp("{ [10,16) }");
@@ -4788,7 +4668,7 @@ TEST_P(SourceBufferStreamTest, RemoveShouldAlwaysExcludeEnd) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, RefinedDurationEstimates_BackOverlap) {
+TEST_F(SourceBufferStreamTest, RefinedDurationEstimates_BackOverlap) {
// Append a few buffers, the last one having estimated duration.
NewCodedFrameGroupAppend("0K 5 10 20D10E");
CheckExpectedRangesByTimestamp("{ [0,30) }");
@@ -4814,7 +4694,7 @@ TEST_P(SourceBufferStreamTest, RefinedDurationEstimates_BackOverlap) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, RefinedDurationEstimates_FrontOverlap) {
+TEST_F(SourceBufferStreamTest, RefinedDurationEstimates_FrontOverlap) {
// Append a few buffers.
NewCodedFrameGroupAppend("10K 15 20D5");
CheckExpectedRangesByTimestamp("{ [10,25) }");
@@ -4840,7 +4720,7 @@ TEST_P(SourceBufferStreamTest, RefinedDurationEstimates_FrontOverlap) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, SeekToStartSatisfiedUpToThreshold) {
+TEST_F(SourceBufferStreamTest, SeekToStartSatisfiedUpToThreshold) {
NewCodedFrameGroupAppend("999K 1010 1020D10");
CheckExpectedRangesByTimestamp("{ [999,1030) }");
@@ -4849,7 +4729,7 @@ TEST_P(SourceBufferStreamTest, SeekToStartSatisfiedUpToThreshold) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, SeekToStartUnsatisfiedBeyondThreshold) {
+TEST_F(SourceBufferStreamTest, SeekToStartUnsatisfiedBeyondThreshold) {
NewCodedFrameGroupAppend("1000K 1010 1020D10");
CheckExpectedRangesByTimestamp("{ [1000,1030) }");
@@ -4857,7 +4737,7 @@ TEST_P(SourceBufferStreamTest, SeekToStartUnsatisfiedBeyondThreshold) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
ReSeekToStartSatisfiedUpToThreshold_SameTimestamps) {
// Append a few buffers.
NewCodedFrameGroupAppend("999K 1010 1020D10");
@@ -4876,7 +4756,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
ReSeekToStartSatisfiedUpToThreshold_EarlierTimestamps) {
// Append a few buffers.
NewCodedFrameGroupAppend("999K 1010 1020D10");
@@ -4896,7 +4776,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
ReSeekToStartSatisfiedUpToThreshold_LaterTimestamps) {
// Append a few buffers.
NewCodedFrameGroupAppend("500K 510 520D10");
@@ -4917,7 +4797,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, ReSeekBeyondStartThreshold_SameTimestamps) {
+TEST_F(SourceBufferStreamTest, ReSeekBeyondStartThreshold_SameTimestamps) {
// Append a few buffers.
NewCodedFrameGroupAppend("1000K 1010 1020D10");
CheckExpectedRangesByTimestamp("{ [1000,1030) }");
@@ -4935,7 +4815,7 @@ TEST_P(SourceBufferStreamTest, ReSeekBeyondStartThreshold_SameTimestamps) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, ReSeekBeyondThreshold_EarlierTimestamps) {
+TEST_F(SourceBufferStreamTest, ReSeekBeyondThreshold_EarlierTimestamps) {
// Append a few buffers.
NewCodedFrameGroupAppend("2000K 2010 2020D10");
CheckExpectedRangesByTimestamp("{ [2000,2030) }");
@@ -4955,11 +4835,11 @@ TEST_P(SourceBufferStreamTest, ReSeekBeyondThreshold_EarlierTimestamps) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, ConfigChange_ReSeek) {
+TEST_F(SourceBufferStreamTest, ConfigChange_ReSeek) {
// Append a few buffers, with a config change in the middle.
VideoDecoderConfig new_config = TestVideoConfig::Large();
NewCodedFrameGroupAppend("2000K 2010 2020D10");
- STREAM_OP(UpdateVideoConfig(new_config, false));
+ stream_->UpdateVideoConfig(new_config, false);
NewCodedFrameGroupAppend("2030K 2040 2050D10");
CheckExpectedRangesByTimestamp("{ [2000,2060) }");
@@ -5001,7 +4881,7 @@ TEST_P(SourceBufferStreamTest, ConfigChange_ReSeek) {
CheckVideoConfig(new_config);
}
-TEST_P(SourceBufferStreamTest, TrackBuffer_ExhaustionWithSkipForward) {
+TEST_F(SourceBufferStreamTest, TrackBuffer_ExhaustionWithSkipForward) {
NewCodedFrameGroupAppend("0K 10 20 30 40");
// Read the first 4 buffers, so next buffer is at time 40.
@@ -5026,7 +4906,7 @@ TEST_P(SourceBufferStreamTest, TrackBuffer_ExhaustionWithSkipForward) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
TrackBuffer_ExhaustionAndImmediateNewTrackBuffer) {
NewCodedFrameGroupAppend("0K 10 20 30 40");
@@ -5060,7 +4940,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(
+TEST_F(
SourceBufferStreamTest,
AdjacentCodedFrameGroupContinuation_NoGapCreatedByTinyGapInGroupContinuation) {
NewCodedFrameGroupAppend("0K 10 20K 30 40K 50D10");
@@ -5072,7 +4952,7 @@ TEST_P(
CheckExpectedRangesByTimestamp("{ [0,81) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
AdjacentCodedFrameGroupContinuation_NoGapCreatedPrefixRemoved) {
NewCodedFrameGroupAppend("0K 10 20K 30 40K 50D10");
CheckExpectedRangesByTimestamp("{ [0,60) }");
@@ -5086,7 +4966,7 @@ TEST_P(SourceBufferStreamTest,
CheckExpectedRangesByTimestamp("{ [40,81) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
AdjacentNewCodedFrameGroupContinuation_NoGapCreatedPrefixRemoved) {
NewCodedFrameGroupAppend("0K 10 20K 30 40K 50D10");
CheckExpectedRangesByTimestamp("{ [0,60) }");
@@ -5103,7 +4983,7 @@ TEST_P(SourceBufferStreamTest,
CheckExpectedRangesByTimestamp("{ [40,81) }");
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_RemoveThenAppendMoreMuchLater) {
NewCodedFrameGroupAppend("1000K 1010 1020 1030K 1040 1050 1060K 1070 1080");
NewCodedFrameGroupAppend("0K 10 20");
@@ -5129,7 +5009,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_InExisting_AppendMuchLater) {
NewCodedFrameGroupAppend("0K 10 20 30K 40 50");
SignalStartOfCodedFrameGroup(base::TimeDelta::FromMilliseconds(45));
@@ -5142,7 +5022,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_InExisting_RemoveGOP_ThenAppend_1) {
NewCodedFrameGroupAppend("0K 10 20 30K 40 50");
SignalStartOfCodedFrameGroup(base::TimeDelta::FromMilliseconds(30));
@@ -5156,7 +5036,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_InExisting_RemoveGOP_ThenAppend_2) {
NewCodedFrameGroupAppend("0K 10 20 30K 40 50");
// Though we signal 45ms, it's adjusted internally (due to detected overlap)
@@ -5180,7 +5060,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_InExisting_RemoveMostRecentAppend_ThenAppend_1) {
NewCodedFrameGroupAppend("0K 10 20 30K 40 50");
SignalStartOfCodedFrameGroup(base::TimeDelta::FromMilliseconds(45));
@@ -5194,7 +5074,7 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
StartCodedFrameGroup_InExisting_RemoveMostRecentAppend_ThenAppend_2) {
NewCodedFrameGroupAppend("0K 10 20 30K 40 50");
SignalStartOfCodedFrameGroup(base::TimeDelta::FromMilliseconds(50));
@@ -5208,29 +5088,29 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, GetHighestPresentationTimestamp) {
- EXPECT_EQ(base::TimeDelta(), STREAM_OP(GetHighestPresentationTimestamp()));
+TEST_F(SourceBufferStreamTest, GetHighestPresentationTimestamp) {
+ EXPECT_EQ(base::TimeDelta(), stream_->GetHighestPresentationTimestamp());
NewCodedFrameGroupAppend("0K 10K");
EXPECT_EQ(base::TimeDelta::FromMilliseconds(10),
- STREAM_OP(GetHighestPresentationTimestamp()));
+ stream_->GetHighestPresentationTimestamp());
RemoveInMs(0, 10, 20);
EXPECT_EQ(base::TimeDelta::FromMilliseconds(10),
- STREAM_OP(GetHighestPresentationTimestamp()));
+ stream_->GetHighestPresentationTimestamp());
RemoveInMs(10, 20, 20);
- EXPECT_EQ(base::TimeDelta(), STREAM_OP(GetHighestPresentationTimestamp()));
+ EXPECT_EQ(base::TimeDelta(), stream_->GetHighestPresentationTimestamp());
NewCodedFrameGroupAppend("0K 10K");
EXPECT_EQ(base::TimeDelta::FromMilliseconds(10),
- STREAM_OP(GetHighestPresentationTimestamp()));
+ stream_->GetHighestPresentationTimestamp());
RemoveInMs(10, 20, 20);
- EXPECT_EQ(base::TimeDelta(), STREAM_OP(GetHighestPresentationTimestamp()));
+ EXPECT_EQ(base::TimeDelta(), stream_->GetHighestPresentationTimestamp());
}
-TEST_P(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
+TEST_F(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
SetMemoryLimit(16);
NewCodedFrameGroupAppend("0K 1 2 3K 4 5 6K 7 8 9K 10 11 12K 13 14 15K");
CheckExpectedRangesByTimestamp("{ [0,16) }");
@@ -5238,9 +5118,9 @@ TEST_P(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
// This feature is disabled by default, so by default memory pressure
// notification takes no effect and the memory limits and won't remove
// anything from buffered ranges, since we are under the limit of 20 bytes.
- STREAM_OP(OnMemoryPressure(
- DecodeTimestamp::FromMilliseconds(0),
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, false));
+ stream_->OnMemoryPressure(
+ base::TimeDelta::FromMilliseconds(0),
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, false);
EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(8), 0));
CheckExpectedRangesByTimestamp("{ [0,16) }");
@@ -5250,9 +5130,9 @@ TEST_P(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
scoped_feature_list.InitAndEnableFeature(kMemoryPressureBasedSourceBufferGC);
// Verify that effective MSE memory limit is reduced under memory pressure.
- STREAM_OP(OnMemoryPressure(
- DecodeTimestamp::FromMilliseconds(0),
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, false));
+ stream_->OnMemoryPressure(
+ base::TimeDelta::FromMilliseconds(0),
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, false);
// Effective memory limit is now 8 buffers, but we still will not collect any
// data between the current playback position 3 and last append position 15.
@@ -5267,9 +5147,9 @@ TEST_P(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
// If memory pressure becomes critical, the garbage collection algorithm
// becomes even more aggressive and collects everything up to the current
// playback position.
- STREAM_OP(OnMemoryPressure(
- DecodeTimestamp::FromMilliseconds(0),
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, false));
+ stream_->OnMemoryPressure(
+ base::TimeDelta::FromMilliseconds(0),
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, false);
EXPECT_TRUE(GarbageCollect(base::TimeDelta::FromMilliseconds(13), 0));
CheckExpectedRangesByTimestamp("{ [12,16) }");
@@ -5282,7 +5162,7 @@ TEST_P(SourceBufferStreamTest, GarbageCollectionUnderMemoryPressure) {
CheckExpectedRangesByTimestamp("{ [12,28) }");
}
-TEST_P(SourceBufferStreamTest, InstantGarbageCollectionUnderMemoryPressure) {
+TEST_F(SourceBufferStreamTest, InstantGarbageCollectionUnderMemoryPressure) {
SetMemoryLimit(16);
NewCodedFrameGroupAppend("0K 1 2 3K 4 5 6K 7 8 9K 10 11 12K 13 14 15K");
CheckExpectedRangesByTimestamp("{ [0,16) }");
@@ -5294,18 +5174,18 @@ TEST_P(SourceBufferStreamTest, InstantGarbageCollectionUnderMemoryPressure) {
// |scoped_feature_list_|.)
base::test::ScopedFeatureList scoped_feature_list;
scoped_feature_list.InitAndEnableFeature(kMemoryPressureBasedSourceBufferGC);
- STREAM_OP(OnMemoryPressure(
- DecodeTimestamp::FromMilliseconds(7),
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, true));
+ stream_->OnMemoryPressure(
+ base::TimeDelta::FromMilliseconds(7),
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, true);
CheckExpectedRangesByTimestamp("{ [6,16) }");
- STREAM_OP(OnMemoryPressure(
- DecodeTimestamp::FromMilliseconds(9),
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, true));
+ stream_->OnMemoryPressure(
+ base::TimeDelta::FromMilliseconds(9),
+ base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, true);
CheckExpectedRangesByTimestamp("{ [9,16) }");
}
-TEST_P(SourceBufferStreamTest, GCFromFrontThenExplicitRemoveFromMiddleToEnd) {
- // Attempts to exercise SBRByPts::GetBufferIndexAt() after its
+TEST_F(SourceBufferStreamTest, GCFromFrontThenExplicitRemoveFromMiddleToEnd) {
+ // Attempts to exercise SourceBufferRange::GetBufferIndexAt() after its
// |keyframe_map_index_base_| has been increased, and when there is a GOP
// following the search timestamp. GC followed by an explicit remove may
// trigger that code path.
@@ -5320,7 +5200,7 @@ TEST_P(SourceBufferStreamTest, GCFromFrontThenExplicitRemoveFromMiddleToEnd) {
CheckExpectedRangesByTimestamp("{ [0,150) }");
// Seek to the second GOP's keyframe to allow GC to collect all of the first
- // GOP (ostensibly increasing SBR's |keyframe_map_index_base_|).
+ // GOP (ostensibly increasing SourceBufferRange's |keyframe_map_index_base_|).
SeekToTimestampMs(50);
GarbageCollect(base::TimeDelta::FromMilliseconds(50), 0);
CheckExpectedRangesByTimestamp("{ [50,150) }");
@@ -5330,26 +5210,18 @@ TEST_P(SourceBufferStreamTest, GCFromFrontThenExplicitRemoveFromMiddleToEnd) {
CheckExpectedRangesByTimestamp("{ [50,60) }");
}
-TEST_P(SourceBufferStreamTest, BFrames_WithoutEditList) {
+TEST_F(SourceBufferStreamTest, BFrames_WithoutEditList) {
// Simulates B-frame content where MP4 edit lists are not used to shift PTS so
// it matches DTS. From acolwell@chromium.org in https://crbug.com/398130
Seek(0);
-
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- NewCodedFrameGroupAppend(base::TimeDelta(),
- "60|0K 180|30 90|60 120|90 150|120");
- CheckExpectedRangesByTimestamp("{ [0,150) }");
- } else {
- NewCodedFrameGroupAppend(base::TimeDelta::FromMilliseconds(60),
- "60|0K 180|30 90|60 120|90 150|120");
- CheckExpectedRangesByTimestamp("{ [60,210) }");
- }
-
+ NewCodedFrameGroupAppend(base::TimeDelta::FromMilliseconds(60),
+ "60|0K 180|30 90|60 120|90 150|120");
+ CheckExpectedRangesByTimestamp("{ [60,210) }");
CheckExpectedBuffers("60|0K 180|30 90|60 120|90 150|120");
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, OverlapSameTimestampWithinSameGOP) {
+TEST_F(SourceBufferStreamTest, OverlapSameTimestampWithinSameGOP) {
// We use distinct appends here to make sure the intended frame durations
// are respected by the test helpers (which the OneByOne helper doesn't
// respect always). We need granular appends of this GOP for at least the
@@ -5373,7 +5245,7 @@ struct VideoEndTimeCase {
int64_t expected_end_time;
};
-TEST_P(SourceBufferStreamTest, VideoRangeEndTimeCases) {
+TEST_F(SourceBufferStreamTest, VideoRangeEndTimeCases) {
// With a basic range containing just a single keyframe [10,20), verify
// various keyframe overlap append cases' results on the range end time.
const VideoEndTimeCase kCases[] = {
@@ -5418,7 +5290,7 @@ struct AudioEndTimeCase {
bool expect_splice;
};
-TEST_P(SourceBufferStreamTest, AudioRangeEndTimeCases) {
+TEST_F(SourceBufferStreamTest, AudioRangeEndTimeCases) {
// With a basic range containing just a single keyframe [10,20), verify
// various keyframe overlap append cases' results on the range end time.
const AudioEndTimeCase kCases[] = {
@@ -5461,7 +5333,7 @@ TEST_P(SourceBufferStreamTest, AudioRangeEndTimeCases) {
}
}
-TEST_P(SourceBufferStreamTest, SameTimestampEstimatedDurations_Video) {
+TEST_F(SourceBufferStreamTest, SameTimestampEstimatedDurations_Video) {
// Start a coded frame group with a frame having a non-estimated duration.
NewCodedFrameGroupAppend("10D10K");
@@ -5485,7 +5357,7 @@ TEST_P(SourceBufferStreamTest, SameTimestampEstimatedDurations_Video) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, RangeIsNextInPTS_Simple) {
+TEST_F(SourceBufferStreamTest, RangeIsNextInPTS_Simple) {
// Append a simple GOP where DTS==PTS, perform basic PTS continuity checks.
NewCodedFrameGroupAppend("10D10K");
CheckIsNextInPTSSequenceWithFirstRange(9, false);
@@ -5495,7 +5367,7 @@ TEST_P(SourceBufferStreamTest, RangeIsNextInPTS_Simple) {
CheckIsNextInPTSSequenceWithFirstRange(31, false);
}
-TEST_P(SourceBufferStreamTest, RangeIsNextInPTS_OutOfOrder) {
+TEST_F(SourceBufferStreamTest, RangeIsNextInPTS_OutOfOrder) {
// Append a GOP where DTS != PTS such that a timestamp used as DTS would not
// be continuous, but used as PTS is, and verify PTS continuity.
NewCodedFrameGroupAppend("1000|0K 1120|30 1030|60 1060|90 1090|120");
@@ -5516,7 +5388,7 @@ TEST_P(SourceBufferStreamTest, RangeIsNextInPTS_OutOfOrder) {
CheckIsNextInPTSSequenceWithFirstRange(1181, false);
}
-TEST_P(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_1) {
+TEST_F(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_1) {
// Change the fudge room (by increasing frame duration) and verify coalescence
// behavior.
NewCodedFrameGroupAppend("0K 10K");
@@ -5548,7 +5420,7 @@ TEST_P(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_1) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_2) {
+TEST_F(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_2) {
// Change the fudge room (by increasing frame duration) and verify coalescence
// behavior.
NewCodedFrameGroupAppend("0K 10K");
@@ -5572,7 +5444,7 @@ TEST_P(SourceBufferStreamTest, RangeCoalescenceOnFudgeRoomIncrease_2) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, NoRangeGapWhenIncrementallyOverlapped) {
+TEST_F(SourceBufferStreamTest, NoRangeGapWhenIncrementallyOverlapped) {
// Append 2 SAP-Type-1 GOPs continuous in DTS and PTS interval and with frame
// durations and number of frames per GOP such that the first keyframe by
// itself would not be considered "adjacent" to the second GOP by our fudge
@@ -5619,7 +5491,7 @@ TEST_P(SourceBufferStreamTest, NoRangeGapWhenIncrementallyOverlapped) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, AllowIncrementalAppendsToCoalesceRangeGap) {
+TEST_F(SourceBufferStreamTest, AllowIncrementalAppendsToCoalesceRangeGap) {
// Append a SAP-Type-1 GOP with a coded frame group start time far before the
// timestamp of the first GOP (beyond any fudge room possible in this test).
// This simulates one of multiple muxed tracks with jagged start times
@@ -5683,7 +5555,7 @@ TEST_P(SourceBufferStreamTest, AllowIncrementalAppendsToCoalesceRangeGap) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_1) {
+TEST_F(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_1) {
// Appends an audio frame, A, which is then immediately followed by a
// subsequent frame, B. Then appends a new frame, C, which precisely overlaps
// frame B, and verifies that there is exactly 1 buffered range resulting.
@@ -5705,15 +5577,11 @@ TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_1) {
CheckExpectedBuffers("0K 10K");
CheckNoNextBuffer();
- // Frame C. FrameProcessor won't signal a new CFG here when buffering by DTS,
- // because the DTS remains continuous per MSE spec. When buffering by PTS,
- // though, FrameProcessor signals new CFG more granularly, including in this
- // case.
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- AppendBuffers("10D10K");
- } else {
- NewCodedFrameGroupAppend("10D10K");
- }
+ // Frame C.
+ // Though DTS is continuous per MSE spec, FrameProcessor signals new CFG more
+ // granularly, including in this case.
+ NewCodedFrameGroupAppend("10D10K");
+
SeekToTimestampMs(0);
CheckExpectedRangesByTimestamp("{ [0,20) }");
CheckExpectedRangeEndTimes("{ <10,20> }");
@@ -5721,7 +5589,7 @@ TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_1) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_2) {
+TEST_F(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_2) {
// Appends an audio frame, A, which is then splice-trim-truncated by a
// subsequent frame, B. Then appends a new frame, C, which precisely overlaps
// frame B, and verifies that there is exactly 1 buffered range resulting.
@@ -5744,15 +5612,11 @@ TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_2) {
CheckExpectedBuffers("0K 60K");
CheckNoNextBuffer();
- // Frame C. FrameProcessor won't signal a new CFG here when buffering by DTS,
- // because the DTS remains continuous per MSE spec. When buffering by PTS,
- // though, FrameProcessor signals new CFG more granularly, including in this
- // case.
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- AppendBuffers("60D10K");
- } else {
- NewCodedFrameGroupAppend("60D10K");
- }
+ // Frame C.
+ // Though DTS is continuous per MSE spec, FrameProcessor signals new CFG more
+ // granularly, including in this case.
+ NewCodedFrameGroupAppend("60D10K");
+
SeekToTimestampMs(0);
CheckExpectedRangesByTimestamp("{ [0,70) }");
CheckExpectedRangeEndTimes("{ <60,70> }");
@@ -5760,7 +5624,7 @@ TEST_P(SourceBufferStreamTest, PreciselyOverlapLastAudioFrameAppended_2) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, ZeroDurationBuffersThenIncreasingFudgeRoom) {
+TEST_F(SourceBufferStreamTest, ZeroDurationBuffersThenIncreasingFudgeRoom) {
// Appends some zero duration buffers to result in disjoint buffered ranges.
// Verifies that increasing the fudge room allows those that become within
// adjacency threshold to merge, including those for which the new fudge room
@@ -5802,7 +5666,7 @@ TEST_P(SourceBufferStreamTest, ZeroDurationBuffersThenIncreasingFudgeRoom) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, NonZeroDurationBuffersThenIncreasingFudgeRoom) {
+TEST_F(SourceBufferStreamTest, NonZeroDurationBuffersThenIncreasingFudgeRoom) {
// Verifies that a single fudge room increase which merges more than 2
// previously disjoint ranges in a row performs the merging correctly.
NewCodedFrameGroupAppend("0D10K");
@@ -5826,7 +5690,7 @@ TEST_P(SourceBufferStreamTest, NonZeroDurationBuffersThenIncreasingFudgeRoom) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest, SapType2WithNonkeyframePtsInEarlierRange) {
+TEST_F(SourceBufferStreamTest, SapType2WithNonkeyframePtsInEarlierRange) {
// Buffer a standalone GOP [0,10).
NewCodedFrameGroupAppend("0D10K");
CheckExpectedRangesByTimestamp("{ [0,10) }");
@@ -5836,21 +5700,13 @@ TEST_P(SourceBufferStreamTest, SapType2WithNonkeyframePtsInEarlierRange) {
// range: a SAP-2 GOP with a nonkeyframe with PTS belonging to the first
// range, and a subsequent minimal GOP.
NewCodedFrameGroupAppend("30D10K 1|40D10");
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp("{ [0,10) [30,50) }");
- } else {
- CheckExpectedRangesByTimestamp("{ [0,10) [30,40) }");
- }
-
+ CheckExpectedRangesByTimestamp("{ [0,10) [30,40) }");
NewCodedFrameGroupAppend("40|50D10K");
// Verify that there are two distinct ranges, and that the SAP-2 nonkeyframe
// is buffered as part of the second range's first GOP.
- if (buffering_api_ == BufferingApi::kLegacyByDts) {
- CheckExpectedRangesByTimestamp("{ [0,10) [30,60) }");
- } else {
- CheckExpectedRangesByTimestamp("{ [0,10) [30,50) }");
- }
+ CheckExpectedRangesByTimestamp("{ [0,10) [30,50) }");
+
SeekToTimestampMs(0);
CheckExpectedBuffers("0K");
CheckNoNextBuffer();
@@ -5859,7 +5715,7 @@ TEST_P(SourceBufferStreamTest, SapType2WithNonkeyframePtsInEarlierRange) {
CheckNoNextBuffer();
}
-TEST_P(SourceBufferStreamTest,
+TEST_F(SourceBufferStreamTest,
MergeAllowedIfRangeEndTimeWithEstimatedDurationMatchesNextRangeStart) {
// Tests the edge case where fudge room is not increased when an estimated
// duration is increased due to overlap appends, causing two ranges to not be
@@ -5904,11 +5760,4 @@ TEST_P(SourceBufferStreamTest,
CheckNoNextBuffer();
}
-INSTANTIATE_TEST_SUITE_P(LegacyByDts,
- SourceBufferStreamTest,
- Values(BufferingApi::kLegacyByDts));
-INSTANTIATE_TEST_SUITE_P(NewByPts,
- SourceBufferStreamTest,
- Values(BufferingApi::kNewByPts));
-
} // namespace media
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index 5b457773322..2f29cef3692 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -209,8 +209,10 @@ static const CodecInfo kEAC3CodecInfo3 = {"mp4a.A6", CodecInfo::AUDIO, nullptr,
#endif // BUILDFLAG(ENABLE_AC3_EAC3_AUDIO_DEMUXING)
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
-static const CodecInfo kMpegHAudioCodecInfo = {
+static const CodecInfo kMpegHAudioCodecInfo1 = {
"mhm1.*", CodecInfo::AUDIO, nullptr, CodecInfo::HISTOGRAM_MPEG_H_AUDIO};
+static const CodecInfo kMpegHAudioCodecInfo2 = {
+ "mha1.*", CodecInfo::AUDIO, nullptr, CodecInfo::HISTOGRAM_MPEG_H_AUDIO};
#endif
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
@@ -230,6 +232,7 @@ static const CodecInfo kMPEG4FLACCodecInfo = {"flac", CodecInfo::AUDIO, nullptr,
CodecInfo::HISTOGRAM_FLAC};
static const CodecInfo* const kVideoMP4Codecs[] = {&kMPEG4FLACCodecInfo,
+ &kOpusCodecInfo,
&kMPEG4VP09CodecInfo,
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
&kH264AVC1CodecInfo,
@@ -249,7 +252,8 @@ static const CodecInfo* const kVideoMP4Codecs[] = {&kMPEG4FLACCodecInfo,
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
- &kMpegHAudioCodecInfo,
+ &kMpegHAudioCodecInfo1,
+ &kMpegHAudioCodecInfo2,
#endif
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
#if BUILDFLAG(ENABLE_AV1_DECODER)
@@ -263,7 +267,8 @@ static const CodecInfo* const kAudioMP4Codecs[] = {&kMPEG4FLACCodecInfo,
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
- &kMpegHAudioCodecInfo,
+ &kMpegHAudioCodecInfo1,
+ &kMpegHAudioCodecInfo2,
#endif
#if BUILDFLAG(ENABLE_AC3_EAC3_AUDIO_DEMUXING)
&kAC3CodecInfo1,
diff --git a/chromium/media/filters/video_decoder_stream_unittest.cc b/chromium/media/filters/video_decoder_stream_unittest.cc
index a66b499d40b..d68466e1d84 100644
--- a/chromium/media/filters/video_decoder_stream_unittest.cc
+++ b/chromium/media/filters/video_decoder_stream_unittest.cc
@@ -135,11 +135,12 @@ class VideoDecoderStreamTest
DCHECK(!pending_stop_);
}
- void PrepareFrame(const scoped_refptr<VideoFrame>& frame,
+ void PrepareFrame(scoped_refptr<VideoFrame> frame,
VideoDecoderStream::OutputReadyCB output_ready_cb) {
// Simulate some delay in return of the output.
message_loop_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(std::move(output_ready_cb), frame));
+ FROM_HERE,
+ base::BindOnce(std::move(output_ready_cb), std::move(frame)));
}
void OnBytesDecoded(int count) { num_decoded_bytes_unreported_ += count; }
@@ -303,7 +304,7 @@ class VideoDecoderStreamTest
// Callback for VideoDecoderStream::Read().
void FrameReady(VideoDecoderStream::Status status,
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
DCHECK(pending_read_);
frame_read_ = frame;
last_read_status_ = status;
diff --git a/chromium/media/filters/video_renderer_algorithm.cc b/chromium/media/filters/video_renderer_algorithm.cc
index fa31cc30cc5..afa76b44352 100644
--- a/chromium/media/filters/video_renderer_algorithm.cc
+++ b/chromium/media/filters/video_renderer_algorithm.cc
@@ -14,13 +14,12 @@ namespace media {
const int kMaxOutOfOrderFrameLogs = 10;
VideoRendererAlgorithm::ReadyFrame::ReadyFrame(
- const scoped_refptr<VideoFrame>& ready_frame)
- : frame(ready_frame),
+ scoped_refptr<VideoFrame> ready_frame)
+ : frame(std::move(ready_frame)),
has_estimated_end_time(true),
ideal_render_count(0),
render_count(0),
- drop_count(0) {
-}
+ drop_count(0) {}
VideoRendererAlgorithm::ReadyFrame::ReadyFrame(const ReadyFrame& other) =
default;
@@ -331,12 +330,17 @@ int64_t VideoRendererAlgorithm::GetMemoryUsage() const {
return allocation_size;
}
-void VideoRendererAlgorithm::EnqueueFrame(
- const scoped_refptr<VideoFrame>& frame) {
+void VideoRendererAlgorithm::EnqueueFrame(scoped_refptr<VideoFrame> frame) {
DCHECK(frame);
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- ReadyFrame ready_frame(frame);
+ // Note: Not all frames have duration. E.g., this class is used with WebRTC
+ // which does not provide duration information for its frames.
+ base::TimeDelta metadata_frame_duration;
+ auto has_duration = frame->metadata()->GetTimeDelta(
+ VideoFrameMetadata::FRAME_DURATION, &metadata_frame_duration);
+ auto timestamp = frame->timestamp();
+ ReadyFrame ready_frame(std::move(frame));
auto it = frame_queue_.empty()
? frame_queue_.end()
: std::lower_bound(frame_queue_.begin(), frame_queue_.end(),
@@ -349,7 +353,7 @@ void VideoRendererAlgorithm::EnqueueFrame(
if (new_frame_index <= 0 && have_rendered_frames_) {
LIMITED_MEDIA_LOG(INFO, media_log_, out_of_order_frame_logs_,
kMaxOutOfOrderFrameLogs)
- << "Dropping frame with timestamp " << frame->timestamp()
+ << "Dropping frame with timestamp " << timestamp
<< ", which is earlier than the last rendered frame ("
<< frame_queue_.front().frame->timestamp() << ").";
++frames_dropped_during_enqueue_;
@@ -359,15 +363,13 @@ void VideoRendererAlgorithm::EnqueueFrame(
// Drop any frames which are less than a millisecond apart in media time (even
// those with timestamps matching an already enqueued frame), there's no way
// we can reasonably render these frames; it's effectively a 1000fps limit.
- const base::TimeDelta delta =
- std::min(new_frame_index < frame_queue_.size()
- ? frame_queue_[new_frame_index].frame->timestamp() -
- frame->timestamp()
- : base::TimeDelta::Max(),
- new_frame_index > 0
- ? frame->timestamp() -
- frame_queue_[new_frame_index - 1].frame->timestamp()
- : base::TimeDelta::Max());
+ const base::TimeDelta delta = std::min(
+ new_frame_index < frame_queue_.size()
+ ? frame_queue_[new_frame_index].frame->timestamp() - timestamp
+ : base::TimeDelta::Max(),
+ new_frame_index > 0
+ ? timestamp - frame_queue_[new_frame_index - 1].frame->timestamp()
+ : base::TimeDelta::Max());
if (delta < base::TimeDelta::FromMilliseconds(1)) {
DVLOG(2) << "Dropping frame too close to an already enqueued frame: "
<< delta.InMicroseconds() << " us";
@@ -378,7 +380,7 @@ void VideoRendererAlgorithm::EnqueueFrame(
// Calculate an accurate start time and an estimated end time if possible for
// the new frame; this allows EffectiveFramesQueued() to be relatively correct
// immediately after a new frame is queued.
- std::vector<base::TimeDelta> media_timestamps(1, frame->timestamp());
+ std::vector<base::TimeDelta> media_timestamps(1, timestamp);
// If there are not enough frames to estimate duration based on end time, ask
// the WallClockTimeCB to convert the estimated frame duration into wall clock
@@ -386,15 +388,9 @@ void VideoRendererAlgorithm::EnqueueFrame(
//
// Note: This duration value is not compensated for playback rate and
// thus is different than |average_frame_duration_| which is compensated.
- //
- // Note: Not all frames have duration. E.g., this class is used with WebRTC
- // which does not provide duration information for its frames.
- base::TimeDelta metadata_frame_duration;
- if (!frame_duration_calculator_.count() &&
- frame->metadata()->GetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
- &metadata_frame_duration) &&
+ if (!frame_duration_calculator_.count() && has_duration &&
metadata_frame_duration > base::TimeDelta()) {
- media_timestamps.push_back(frame->timestamp() + metadata_frame_duration);
+ media_timestamps.push_back(timestamp + metadata_frame_duration);
}
std::vector<base::TimeTicks> wall_clock_times;
@@ -410,8 +406,7 @@ void VideoRendererAlgorithm::EnqueueFrame(
if (it != frame_queue_.end()) {
LIMITED_MEDIA_LOG(INFO, media_log_, out_of_order_frame_logs_,
kMaxOutOfOrderFrameLogs)
- << "Decoded frame with timestamp " << frame->timestamp()
- << " is out of order.";
+ << "Decoded frame with timestamp " << timestamp << " is out of order.";
}
frame_queue_.insert(it, ready_frame);
diff --git a/chromium/media/filters/video_renderer_algorithm.h b/chromium/media/filters/video_renderer_algorithm.h
index e2fe47c1484..c6ae18fb1a9 100644
--- a/chromium/media/filters/video_renderer_algorithm.h
+++ b/chromium/media/filters/video_renderer_algorithm.h
@@ -103,7 +103,7 @@ class MEDIA_EXPORT VideoRendererAlgorithm {
// time of the frame based on previous frames or the value of
// VideoFrameMetadata::FRAME_DURATION if no previous frames, so that
// EffectiveFramesQueued() is relatively accurate immediately after this call.
- void EnqueueFrame(const scoped_refptr<VideoFrame>& frame);
+ void EnqueueFrame(scoped_refptr<VideoFrame> frame);
// Removes all frames from the |frame_queue_| and clears predictors. The
// algorithm will be as if freshly constructed after this call. By default
@@ -181,7 +181,7 @@ class MEDIA_EXPORT VideoRendererAlgorithm {
// Metadata container for enqueued frames. See |frame_queue_| below.
struct ReadyFrame {
- ReadyFrame(const scoped_refptr<VideoFrame>& frame);
+ ReadyFrame(scoped_refptr<VideoFrame> frame);
ReadyFrame(const ReadyFrame& other);
~ReadyFrame();
diff --git a/chromium/media/filters/video_renderer_algorithm_unittest.cc b/chromium/media/filters/video_renderer_algorithm_unittest.cc
index c8b963d7b8a..6ed5aa06705 100644
--- a/chromium/media/filters/video_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/video_renderer_algorithm_unittest.cc
@@ -878,7 +878,7 @@ TEST_F(VideoRendererAlgorithmTest, BestFrameByCadence) {
RunFramePumpTest(
true, &frame_tg, &display_tg,
[&current_frame, &actual_frame_pattern, desired_frame_pattern, this](
- const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {
+ scoped_refptr<VideoFrame> frame, size_t frames_dropped) {
ASSERT_TRUE(frame);
ASSERT_EQ(0u, frames_dropped);
@@ -1124,18 +1124,18 @@ TEST_F(VideoRendererAlgorithmTest, BestFrameByFractionalCadence) {
TickGenerator display_tg(tick_clock_->NowTicks(), test_rate[1]);
scoped_refptr<VideoFrame> current_frame;
- RunFramePumpTest(
- true, &frame_tg, &display_tg,
- [&current_frame, this](const scoped_refptr<VideoFrame>& frame,
- size_t frames_dropped) {
- ASSERT_TRUE(frame);
-
- // We don't count frames dropped that cadence says we should skip.
- ASSERT_EQ(0u, frames_dropped);
- ASSERT_NE(current_frame, frame);
- ASSERT_TRUE(is_using_cadence());
- current_frame = frame;
- });
+ RunFramePumpTest(true, &frame_tg, &display_tg,
+ [&current_frame, this](scoped_refptr<VideoFrame> frame,
+ size_t frames_dropped) {
+ ASSERT_TRUE(frame);
+
+ // We don't count frames dropped that cadence says we
+ // should skip.
+ ASSERT_EQ(0u, frames_dropped);
+ ASSERT_NE(current_frame, frame);
+ ASSERT_TRUE(is_using_cadence());
+ current_frame = frame;
+ });
if (HasFatalFailure())
return;
@@ -1157,7 +1157,7 @@ TEST_F(VideoRendererAlgorithmTest, FilmCadence) {
RunFramePumpTest(
true, &frame_tg, &display_tg,
[&current_frame, &actual_frame_pattern, &desired_frame_pattern, this](
- const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {
+ scoped_refptr<VideoFrame> frame, size_t frames_dropped) {
ASSERT_TRUE(frame);
ASSERT_EQ(0u, frames_dropped);
@@ -1362,7 +1362,7 @@ TEST_P(VideoRendererAlgorithmCadenceTest, CadenceTest) {
TickGenerator display_tg(tick_clock_->NowTicks(), display_rate);
RunFramePumpTest(
true, &frame_tg, &display_tg,
- [](const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {});
+ [](scoped_refptr<VideoFrame> frame, size_t frames_dropped) {});
}
// Common display rates.
@@ -1399,7 +1399,7 @@ TEST_F(VideoRendererAlgorithmTest, VariablePlaybackRateCadence) {
time_source_.SetPlaybackRate(playback_rate);
RunFramePumpTest(
false, &frame_tg, &display_tg,
- [](const scoped_refptr<VideoFrame>& frame, size_t frames_dropped) {});
+ [](scoped_refptr<VideoFrame> frame, size_t frames_dropped) {});
if (HasFatalFailure())
return;
diff --git a/chromium/media/filters/vp9_parser.h b/chromium/media/filters/vp9_parser.h
index 95e98f9770d..e4e3be0e6bd 100644
--- a/chromium/media/filters/vp9_parser.h
+++ b/chromium/media/filters/vp9_parser.h
@@ -260,6 +260,10 @@ struct MEDIA_EXPORT Vp9FrameHeader {
Vp9FrameContext initial_frame_context;
// Current frame entropy context after header parsing.
Vp9FrameContext frame_context;
+
+ // Segmentation and loop filter params from uncompressed header
+ Vp9SegmentationParams segmentation;
+ Vp9LoopFilterParams loop_filter;
};
// A parser for VP9 bitstream.
diff --git a/chromium/media/filters/vp9_uncompressed_header_parser.cc b/chromium/media/filters/vp9_uncompressed_header_parser.cc
index d69fa929bcd..6c99ef5b468 100644
--- a/chromium/media/filters/vp9_uncompressed_header_parser.cc
+++ b/chromium/media/filters/vp9_uncompressed_header_parser.cc
@@ -1083,10 +1083,13 @@ bool Vp9UncompressedHeaderParser::Parse(const uint8_t* stream,
}
ReadLoopFilterParams();
+ // Update loop_filter in current_frame_hdr
+ fhdr->loop_filter = context_->loop_filter_;
ReadQuantizationParams(&fhdr->quant_params);
if (!ReadSegmentationParams())
return false;
-
+ // Update segmentation in current_frame_hdr
+ fhdr->segmentation = context_->segmentation_;
if (!ReadTileInfo(fhdr))
return false;
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index a851960deb1..b27a9f7e62b 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -113,7 +113,7 @@ std::string VpxVideoDecoder::GetDisplayName() const {
void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool /* low_delay */,
CdmContext* /* cdm_context */,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& /* waiting_cb */) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
@@ -122,9 +122,10 @@ void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
CloseDecoder();
- InitCB bound_init_cb = bind_callbacks_ ? BindToCurrentLoop(init_cb) : init_cb;
+ InitCB bound_init_cb = bind_callbacks_ ? BindToCurrentLoop(std::move(init_cb))
+ : std::move(init_cb);
if (config.is_encrypted() || !ConfigureDecoder(config)) {
- bound_init_cb.Run(false);
+ std::move(bound_init_cb).Run(false);
return;
}
@@ -132,11 +133,11 @@ void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
config_ = config;
state_ = kNormal;
output_cb_ = output_cb;
- bound_init_cb.Run(true);
+ std::move(bound_init_cb).Run(true);
}
void VpxVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << __func__ << ": " << buffer->AsHumanReadableString();
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer);
@@ -144,29 +145,30 @@ void VpxVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DCHECK_NE(state_, kUninitialized)
<< "Called Decode() before successful Initialize()";
- DecodeCB bound_decode_cb =
- bind_callbacks_ ? BindToCurrentLoop(decode_cb) : decode_cb;
+ DecodeCB bound_decode_cb = bind_callbacks_
+ ? BindToCurrentLoop(std::move(decode_cb))
+ : std::move(decode_cb);
if (state_ == kError) {
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
if (state_ == kDecodeFinished) {
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
return;
}
if (state_ == kNormal && buffer->end_of_stream()) {
state_ = kDecodeFinished;
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
return;
}
scoped_refptr<VideoFrame> video_frame;
if (!VpxDecode(buffer.get(), &video_frame)) {
state_ = kError;
- bound_decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(bound_decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
@@ -179,17 +181,17 @@ void VpxVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
}
// VideoDecoderShim expects |decode_cb| call after |output_cb_|.
- bound_decode_cb.Run(DecodeStatus::OK);
+ std::move(bound_decode_cb).Run(DecodeStatus::OK);
}
-void VpxVideoDecoder::Reset(const base::Closure& reset_cb) {
+void VpxVideoDecoder::Reset(base::OnceClosure reset_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
state_ = kNormal;
if (bind_callbacks_)
- BindToCurrentLoop(reset_cb).Run();
+ BindToCurrentLoop(std::move(reset_cb)).Run();
else
- reset_cb.Run();
+ std::move(reset_cb).Run();
// Allow Initialize() to be called on another thread now.
DETACH_FROM_SEQUENCE(sequence_checker_);
diff --git a/chromium/media/filters/vpx_video_decoder.h b/chromium/media/filters/vpx_video_decoder.h
index 7c33f5ee650..98eff915fe8 100644
--- a/chromium/media/filters/vpx_video_decoder.h
+++ b/chromium/media/filters/vpx_video_decoder.h
@@ -37,12 +37,11 @@ class MEDIA_EXPORT VpxVideoDecoder : public OffloadableVideoDecoder {
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& reset_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
// OffloadableVideoDecoder implementation.
void Detach() override;
diff --git a/chromium/media/filters/vpx_video_decoder_fuzzertest.cc b/chromium/media/filters/vpx_video_decoder_fuzzertest.cc
index b4279e5fced..d784d58a4b1 100644
--- a/chromium/media/filters/vpx_video_decoder_fuzzertest.cc
+++ b/chromium/media/filters/vpx_video_decoder_fuzzertest.cc
@@ -43,7 +43,7 @@ void OnInitDone(const base::Closure& quit_closure,
quit_closure.Run();
}
-void OnOutputComplete(const scoped_refptr<VideoFrame>& frame) {}
+void OnOutputComplete(scoped_refptr<VideoFrame> frame) {}
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
@@ -95,8 +95,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
auto coded_size = gfx::Size(1 + (rng() % 127), 1 + (rng() % 127));
auto visible_rect = gfx::Rect(coded_size);
auto natural_size = gfx::Size(1 + (rng() % 127), 1 + (rng() % 127));
+ uint8_t reflection = rng() % 4;
- VideoDecoderConfig config(codec, profile, pixel_format, color_space, rotation,
+ VideoDecoderConfig config(codec, profile, pixel_format, color_space,
+ VideoTransformation(rotation, reflection),
coded_size, visible_rect, natural_size,
EmptyExtraData(), Unencrypted());
diff --git a/chromium/media/filters/vpx_video_decoder_unittest.cc b/chromium/media/filters/vpx_video_decoder_unittest.cc
index 1ab0b1a7131..befb24ebbda 100644
--- a/chromium/media/filters/vpx_video_decoder_unittest.cc
+++ b/chromium/media/filters/vpx_video_decoder_unittest.cc
@@ -155,9 +155,9 @@ class VpxVideoDecoderTest : public testing::Test {
return status;
}
- void FrameReady(const scoped_refptr<VideoFrame>& frame) {
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
- output_frames_.push_back(frame);
+ output_frames_.push_back(std::move(frame));
}
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
diff --git a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
index 3c078a2f03a..6ae36597850 100644
--- a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
@@ -33,7 +33,7 @@ VideoDecoderConfig CreateFakeVideoConfig() {
gfx::Rect visible_rect(0, 0, 320, 240);
gfx::Size natural_size(320, 240);
return VideoDecoderConfig(kCodecH264, H264PROFILE_MAIN, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, coded_size,
+ VideoColorSpace(), kNoTransformation, coded_size,
visible_rect, natural_size, EmptyExtraData(),
Unencrypted());
}
diff --git a/chromium/media/formats/mp2t/es_parser_h264.cc b/chromium/media/formats/mp2t/es_parser_h264.cc
index 41d8555536e..29e577b7940 100644
--- a/chromium/media/formats/mp2t/es_parser_h264.cc
+++ b/chromium/media/formats/mp2t/es_parser_h264.cc
@@ -527,7 +527,7 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps,
VideoDecoderConfig video_decoder_config(
kCodecH264, profile, PIXEL_FORMAT_I420, VideoColorSpace::REC709(),
- VIDEO_ROTATION_0, coded_size.value(), visible_rect.value(), natural_size,
+ kNoTransformation, coded_size.value(), visible_rect.value(), natural_size,
EmptyExtraData(), scheme);
if (!video_decoder_config.IsValidConfig()) {
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
index 1a85b67d5f7..65f64281a05 100644
--- a/chromium/media/formats/mp4/avc.cc
+++ b/chromium/media/formats/mp4/avc.cc
@@ -8,10 +8,8 @@
#include <memory>
#include <utility>
-#include "base/feature_list.h"
#include "base/logging.h"
#include "media/base/decrypt_config.h"
-#include "media/base/media_switches.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/box_reader.h"
#include "media/video/h264_parser.h"
@@ -358,15 +356,12 @@ bool AVCBitstreamConverter::ConvertAndAnalyzeFrame(
subsamples));
// |is_keyframe| may be incorrect. Analyze the frame to see if it is a
- // keyframe. |is_keyframe| will be used if the analysis is inconclusive or if
- // not kMseBufferByPts.
+ // keyframe. |is_keyframe| will be used if the analysis is inconclusive.
// Also, provide the analysis result to the caller via out parameter
// |analysis_result|.
*analysis_result = Analyze(frame_buf, subsamples);
- if (base::FeatureList::IsEnabled(kMseBufferByPts)
- ? analysis_result->is_keyframe.value_or(is_keyframe)
- : is_keyframe) {
+ if (analysis_result->is_keyframe.value_or(is_keyframe)) {
// If this is a keyframe, we (re-)inject SPS and PPS headers at the start of
// a frame. If subsample info is present, we also update the clear byte
// count for that first subsample.
diff --git a/chromium/media/formats/mp4/fourccs.h b/chromium/media/formats/mp4/fourccs.h
index 723f2ec835c..adec8666fd0 100644
--- a/chromium/media/formats/mp4/fourccs.h
+++ b/chromium/media/formats/mp4/fourccs.h
@@ -71,6 +71,7 @@ enum FourCC {
FOURCC_MFRA = 0x6d667261,
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
FOURCC_MHM1 = 0x6d686d31,
+ FOURCC_MHA1 = 0x6d686131,
#endif
FOURCC_MINF = 0x6d696e66,
FOURCC_MOOF = 0x6d6f6f66,
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.cc b/chromium/media/formats/mp4/mp4_stream_parser.cc
index 623bc534833..76eb891f8e9 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser.cc
@@ -12,7 +12,6 @@
#include <vector>
#include "base/callback_helpers.h"
-#include "base/feature_list.h"
#include "base/logging.h"
#include "base/numerics/math_constants.h"
#include "base/strings/string_number_conversions.h"
@@ -21,7 +20,6 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/encryption_pattern.h"
#include "media/base/encryption_scheme.h"
-#include "media/base/media_switches.h"
#include "media/base/media_tracks.h"
#include "media/base/media_util.h"
#include "media/base/stream_parser_buffer.h"
@@ -245,12 +243,9 @@ ParseResult MP4StreamParser::ParseBox() {
return ParseResult::kOk;
}
-static inline double FixedToFloatingPoint(const int32_t& i) {
- return static_cast<double>(i >> 16);
-}
-
-VideoRotation MP4StreamParser::CalculateRotation(const TrackHeader& track,
- const MovieHeader& movie) {
+VideoTransformation MP4StreamParser::CalculateRotation(
+ const TrackHeader& track,
+ const MovieHeader& movie) {
static_assert(kDisplayMatrixDimension == 9, "Display matrix must be 3x3");
// 3x3 matrix: [ a b c ]
// [ d e f ]
@@ -275,39 +270,9 @@ VideoRotation MP4StreamParser::CalculateRotation(const TrackHeader& track,
}
}
- // Rotation by angle Θ is represented in the matrix as:
- // [ cos(Θ), -sin(Θ), ...]
- // [ sin(Θ), cos(Θ), ...]
- // [ ..., ..., 1 ]
- // But we only need cos(Θ) for the angle and sin(Θ) for the quadrant.
- double angle = acos(FixedToFloatingPoint(rotation_matrix[0]))
- * 180 / base::kPiDouble;
-
- if (angle < 0)
- angle += 360;
-
- if (angle >= 360)
- angle -= 360;
-
- // 16 bits of fixed point decimal is enough to give 6 decimals of precision
- // to cos(Θ). A delta of ±0.000001 causes acos(cos(Θ)) to differ by a minimum
- // of 0.0002, which is why we only need to check that the angle is only
- // accurate to within four decimal places. This is preferred to checking for
- // a more precise accuracy, as the 'double' type is architecture dependant and
- // ther may variance in floating point errors.
- if (abs(angle - 0) < 1e-4)
- return VIDEO_ROTATION_0;
-
- if (abs(angle - 180) < 1e-4)
- return VIDEO_ROTATION_180;
-
- if (abs(angle - 90) < 1e-4) {
- bool quadrant = asin(FixedToFloatingPoint(rotation_matrix[3])) < 0;
- return quadrant ? VIDEO_ROTATION_90 : VIDEO_ROTATION_270;
- }
-
- // TODO(tmathmeyer): Record this event and the faulty matrix somewhere.
- return VIDEO_ROTATION_0;
+ int32_t rotation_only[4] = {rotation_matrix[0], rotation_matrix[1],
+ rotation_matrix[3], rotation_matrix[4]};
+ return VideoTransformation(rotation_only);
}
bool MP4StreamParser::ParseMoov(BoxReader* reader) {
@@ -368,7 +333,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
audio_format != FOURCC_AC3 && audio_format != FOURCC_EAC3 &&
#endif
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
- audio_format != FOURCC_MHM1 &&
+ audio_format != FOURCC_MHM1 && audio_format != FOURCC_MHA1 &&
#endif
audio_format != FOURCC_MP4A) {
MEDIA_LOG(ERROR, media_log_)
@@ -406,7 +371,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
extra_data = entry.dfla.stream_info;
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
#if BUILDFLAG(ENABLE_MPEG_H_AUDIO_DEMUXING)
- } else if (audio_format == FOURCC_MHM1) {
+ } else if (audio_format == FOURCC_MHM1 || audio_format == FOURCC_MHA1) {
codec = kCodecMpegHAudio;
channel_layout = CHANNEL_LAYOUT_BITSTREAM;
sample_per_second = entry.samplerate;
@@ -850,11 +815,7 @@ ParseResult MP4StreamParser::EnqueueSample(BufferQueueMap* buffers) {
// they mismatch. If other out-of-order codecs in mp4 (e.g. HEVC, DV)
// implement keyframe analysis in their frame_bitstream_converter, we'll
// similarly trust that analysis instead of the mp4.
- // We'll only use the analysis to override the MP4 keyframeness if
- // |media::kMseBufferByPts| is enabled.
- if (base::FeatureList::IsEnabled(kMseBufferByPts)) {
- is_keyframe = analysis.is_keyframe.value();
- }
+ is_keyframe = analysis.is_keyframe.value();
}
}
}
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.h b/chromium/media/formats/mp4/mp4_stream_parser.h
index 909bf5563db..86ef7467c52 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.h
+++ b/chromium/media/formats/mp4/mp4_stream_parser.h
@@ -52,8 +52,8 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
bool Parse(const uint8_t* buf, int size) override;
// Calculates the rotation value from the track header display matricies.
- VideoRotation CalculateRotation(const TrackHeader& track,
- const MovieHeader& movie);
+ VideoTransformation CalculateRotation(const TrackHeader& track,
+ const MovieHeader& movie);
private:
enum State {
diff --git a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
index afd3e559407..38169606d2d 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -17,7 +17,6 @@
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/test/metrics/histogram_tester.h"
-#include "base/test/scoped_feature_list.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
@@ -353,37 +352,13 @@ TEST_F(MP4StreamParserTest, AVC_KeyAndNonKeyframeness_Match_Container) {
ParseMP4File("bear-640x360-v-2frames_frag.mp4", 512);
}
-TEST_F(MP4StreamParserTest, LegacyByDts_AVC_Keyframeness_Mismatches_Container) {
+TEST_F(MP4StreamParserTest, AVC_Keyframeness_Mismatches_Container) {
// The first AVC video frame's keyframe-ness metadata matches the MP4:
// Frame 0: AVC IDR, trun.first_sample_flags: NOT sync sample, DEPENDS on
// others.
// Frame 1: AVC Non-IDR, tfhd.default_sample_flags: not sync sample, depends
// on others.
InSequence s; // The EXPECT* sequence matters for this test.
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitAndDisableFeature(kMseBufferByPts);
- auto params = GetDefaultInitParametersExpectations();
- params.detected_audio_track_count = 0;
- InitializeParserWithInitParametersExpectations(params);
- verifying_keyframeness_sequence_ = true;
- EXPECT_MEDIA_LOG(DebugLog(
- "ISO-BMFF container metadata for video frame indicates that the frame is "
- "not a keyframe, but the video frame contents indicate the opposite."));
- EXPECT_CALL(*this, ParsedNonKeyframe());
- EXPECT_CALL(*this, ParsedNonKeyframe());
- ParseMP4File("bear-640x360-v-2frames-keyframe-is-non-sync-sample_frag.mp4",
- 512);
-}
-
-TEST_F(MP4StreamParserTest, NewByPts_AVC_Keyframeness_Mismatches_Container) {
- // The first AVC video frame's keyframe-ness metadata matches the MP4:
- // Frame 0: AVC IDR, trun.first_sample_flags: NOT sync sample, DEPENDS on
- // others.
- // Frame 1: AVC Non-IDR, tfhd.default_sample_flags: not sync sample, depends
- // on others.
- InSequence s; // The EXPECT* sequence matters for this test.
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitAndEnableFeature(kMseBufferByPts);
auto params = GetDefaultInitParametersExpectations();
params.detected_audio_track_count = 0;
InitializeParserWithInitParametersExpectations(params);
@@ -397,38 +372,13 @@ TEST_F(MP4StreamParserTest, NewByPts_AVC_Keyframeness_Mismatches_Container) {
512);
}
-TEST_F(MP4StreamParserTest,
- LegacyByDts_AVC_NonKeyframeness_Mismatches_Container) {
- // The second AVC video frame's keyframe-ness metadata matches the MP4:
- // Frame 0: AVC IDR, trun.first_sample_flags: sync sample that doesn't
- // depend on others.
- // Frame 1: AVC Non-IDR, tfhd.default_sample_flags: SYNC sample, DOES NOT
- // depend on others.
- InSequence s; // The EXPECT* sequence matters for this test.
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitAndDisableFeature(kMseBufferByPts);
- auto params = GetDefaultInitParametersExpectations();
- params.detected_audio_track_count = 0;
- InitializeParserWithInitParametersExpectations(params);
- verifying_keyframeness_sequence_ = true;
- EXPECT_CALL(*this, ParsedKeyframe());
- EXPECT_MEDIA_LOG(DebugLog(
- "ISO-BMFF container metadata for video frame indicates that the frame is "
- "a keyframe, but the video frame contents indicate the opposite."));
- EXPECT_CALL(*this, ParsedKeyframe());
- ParseMP4File("bear-640x360-v-2frames-nonkeyframe-is-sync-sample_frag.mp4",
- 512);
-}
-
-TEST_F(MP4StreamParserTest, NewByPts_AVC_NonKeyframeness_Mismatches_Container) {
+TEST_F(MP4StreamParserTest, AVC_NonKeyframeness_Mismatches_Container) {
// The second AVC video frame's keyframe-ness metadata matches the MP4:
// Frame 0: AVC IDR, trun.first_sample_flags: sync sample that doesn't
// depend on others.
// Frame 1: AVC Non-IDR, tfhd.default_sample_flags: SYNC sample, DOES NOT
// depend on others.
InSequence s; // The EXPECT* sequence matters for this test.
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitAndEnableFeature(kMseBufferByPts);
auto params = GetDefaultInitParametersExpectations();
params.detected_audio_track_count = 0;
InitializeParserWithInitParametersExpectations(params);
@@ -738,7 +688,8 @@ TEST_F(MP4StreamParserTest, MultiTrackFile) {
}
// <cos(θ), sin(θ), θ expressed as a rotation Enum>
-using MatrixRotationTestCaseParam = std::tuple<double, double, VideoRotation>;
+using MatrixRotationTestCaseParam =
+ std::tuple<double, double, VideoTransformation>;
class MP4StreamParserRotationMatrixEvaluatorTest
: public ::testing::TestWithParam<MatrixRotationTestCaseParam> {
@@ -771,17 +722,23 @@ TEST_P(MP4StreamParserRotationMatrixEvaluatorTest, RotationCalculation) {
track_header.display_matrix[1] = -(std::get<1>(data) * (1 << 16));
track_header.display_matrix[3] = std::get<1>(data) * (1 << 16);
- EXPECT_EQ(parser_->CalculateRotation(track_header, movie_header),
- std::get<2>(data));
+ VideoTransformation expected = std::get<2>(data);
+ VideoTransformation actual =
+ parser_->CalculateRotation(track_header, movie_header);
+ EXPECT_EQ(actual.rotation, expected.rotation);
+ EXPECT_EQ(actual.mirrored, expected.mirrored);
}
MatrixRotationTestCaseParam rotation_test_cases[6] = {
- {1, 0, VIDEO_ROTATION_0}, // cos(0) = 1, sin(0) = 0
- {0, -1, VIDEO_ROTATION_90}, // cos(90) = 0, sin(90) =-1
- {-1, 0, VIDEO_ROTATION_180}, // cos(180)=-1, sin(180)= 0
- {0, 1, VIDEO_ROTATION_270}, // cos(270)= 0, sin(270)= 1
- {1, 1, VIDEO_ROTATION_0}, // Error case
- {5, 5, VIDEO_ROTATION_0}, // Error case
+ {1, 0, VideoTransformation(VIDEO_ROTATION_0)}, // cos(0) = 1, sin(0) = 0
+ {0, -1,
+ VideoTransformation(VIDEO_ROTATION_90)}, // cos(90) = 0, sin(90) =-1
+ {-1, 0,
+ VideoTransformation(VIDEO_ROTATION_180)}, // cos(180)=-1, sin(180)= 0
+ {0, 1,
+ VideoTransformation(VIDEO_ROTATION_270)}, // cos(270)= 0, sin(270)= 1
+ {1, 1, VideoTransformation(VIDEO_ROTATION_0)}, // Error case
+ {5, 5, VideoTransformation(VIDEO_ROTATION_0)}, // Error case
};
INSTANTIATE_TEST_SUITE_P(CheckMath,
MP4StreamParserRotationMatrixEvaluatorTest,
diff --git a/chromium/media/formats/webm/webm_colour_parser.cc b/chromium/media/formats/webm/webm_colour_parser.cc
index c8b65f081a6..a8049316bb0 100644
--- a/chromium/media/formats/webm/webm_colour_parser.cc
+++ b/chromium/media/formats/webm/webm_colour_parser.cc
@@ -190,17 +190,25 @@ WebMColorMetadata WebMColourParser::GetWebMColorMetadata() const {
color_metadata.color_space = VideoColorSpace(
primaries_, transfer_characteristics_, matrix_coefficients_, range_id);
- if (max_content_light_level_ != -1)
- color_metadata.hdr_metadata.max_content_light_level =
- max_content_light_level_;
+ if (max_content_light_level_ != -1 || max_frame_average_light_level_ != -1 ||
+ mastering_metadata_parsed_) {
+ color_metadata.hdr_metadata = HDRMetadata();
- if (max_frame_average_light_level_ != -1)
- color_metadata.hdr_metadata.max_frame_average_light_level =
- max_frame_average_light_level_;
+ if (max_content_light_level_ != -1) {
+ color_metadata.hdr_metadata->max_content_light_level =
+ max_content_light_level_;
+ }
+
+ if (max_frame_average_light_level_ != -1) {
+ color_metadata.hdr_metadata->max_frame_average_light_level =
+ max_frame_average_light_level_;
+ }
- if (mastering_metadata_parsed_)
- color_metadata.hdr_metadata.mastering_metadata =
- mastering_metadata_parser_.GetMasteringMetadata();
+ if (mastering_metadata_parsed_) {
+ color_metadata.hdr_metadata->mastering_metadata =
+ mastering_metadata_parser_.GetMasteringMetadata();
+ }
+ }
return color_metadata;
}
diff --git a/chromium/media/formats/webm/webm_colour_parser.h b/chromium/media/formats/webm/webm_colour_parser.h
index 636bdc98bf6..af05479174a 100644
--- a/chromium/media/formats/webm/webm_colour_parser.h
+++ b/chromium/media/formats/webm/webm_colour_parser.h
@@ -6,6 +6,7 @@
#define MEDIA_FORMATS_WEBM_WEBM_COLOUR_PARSER_H_
#include "base/macros.h"
+#include "base/optional.h"
#include "media/base/hdr_metadata.h"
#include "media/base/video_color_space.h"
#include "media/formats/webm/webm_parser.h"
@@ -25,7 +26,7 @@ struct MEDIA_EXPORT WebMColorMetadata {
VideoColorSpace color_space;
- HDRMetadata hdr_metadata;
+ base::Optional<HDRMetadata> hdr_metadata;
WebMColorMetadata();
WebMColorMetadata(const WebMColorMetadata& rhs);
diff --git a/chromium/media/formats/webm/webm_video_client.cc b/chromium/media/formats/webm/webm_video_client.cc
index 231723bba05..9a986badfa3 100644
--- a/chromium/media/formats/webm/webm_video_client.cc
+++ b/chromium/media/formats/webm/webm_video_client.cc
@@ -13,14 +13,15 @@ namespace media {
namespace {
// Tries to parse |data| to extract the VP9 Profile ID, or returns Profile 0.
-media::VideoCodecProfile GetVP9CodecProfile(const std::vector<uint8_t>& data) {
+media::VideoCodecProfile GetVP9CodecProfile(const std::vector<uint8_t>& data,
+ bool is_probably_10bit) {
// VP9 CodecPrivate (http://wiki.webmproject.org/vp9-codecprivate) might have
// Profile information in the first field, if present.
constexpr uint8_t kVP9ProfileFieldId = 0x01;
constexpr uint8_t kVP9ProfileFieldLength = 1;
if (data.size() < 3 || data[0] != kVP9ProfileFieldId ||
data[1] != kVP9ProfileFieldLength || data[2] > 3) {
- return VP9PROFILE_PROFILE0;
+ return is_probably_10bit ? VP9PROFILE_PROFILE2 : VP9PROFILE_PROFILE0;
}
return static_cast<VideoCodecProfile>(
@@ -56,6 +57,16 @@ bool WebMVideoClient::InitializeConfig(
VideoDecoderConfig* config) {
DCHECK(config);
+ bool is_8bit = true;
+ VideoColorSpace color_space = VideoColorSpace::REC709();
+ if (colour_parsed_) {
+ WebMColorMetadata color_metadata = colour_parser_.GetWebMColorMetadata();
+ color_space = color_metadata.color_space;
+ if (color_metadata.hdr_metadata.has_value())
+ config->set_hdr_metadata(*color_metadata.hdr_metadata);
+ is_8bit = color_metadata.BitsPerChannel <= 8;
+ }
+
VideoCodec video_codec = kUnknownVideoCodec;
VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
if (codec_id == "V_VP8") {
@@ -63,7 +74,9 @@ bool WebMVideoClient::InitializeConfig(
profile = VP8PROFILE_ANY;
} else if (codec_id == "V_VP9") {
video_codec = kCodecVP9;
- profile = GetVP9CodecProfile(codec_private);
+ profile = GetVP9CodecProfile(
+ codec_private, color_space.ToGfxColorSpace().IsHDR() ||
+ config->hdr_metadata().has_value() || !is_8bit);
#if BUILDFLAG(ENABLE_AV1_DECODER)
} else if (codec_id == "V_AV1") {
// TODO(dalecurtis): AV1 profiles in WebM are not finalized, this needs
@@ -121,14 +134,8 @@ bool WebMVideoClient::InitializeConfig(
}
gfx::Size natural_size = gfx::Size(display_width_, display_height_);
- VideoColorSpace color_space = VideoColorSpace::REC709();
- if (colour_parsed_) {
- WebMColorMetadata color_metadata = colour_parser_.GetWebMColorMetadata();
- color_space = color_metadata.color_space;
- config->set_hdr_metadata(color_metadata.hdr_metadata);
- }
config->Initialize(video_codec, profile, format, color_space,
- VIDEO_ROTATION_0, coded_size, visible_rect, natural_size,
+ kNoTransformation, coded_size, visible_rect, natural_size,
codec_private, encryption_scheme);
return config->IsValidConfig();
}
diff --git a/chromium/media/formats/webm/webm_video_client_unittest.cc b/chromium/media/formats/webm/webm_video_client_unittest.cc
index dd9e33da747..b3ebf0beae0 100644
--- a/chromium/media/formats/webm/webm_video_client_unittest.cc
+++ b/chromium/media/formats/webm/webm_video_client_unittest.cc
@@ -41,12 +41,93 @@ class WebMVideoClientTest : public testing::TestWithParam<CodecTestParams> {
webm_video_client_.OnUInt(kWebMIdPixelHeight, kCodedSize.height());
}
+ WebMParserClient* OnListStart(int id) {
+ return webm_video_client_.OnListStart(id);
+ }
+
+ void OnListEnd(int id) { webm_video_client_.OnListEnd(id); }
+
testing::StrictMock<MockMediaLog> media_log_;
WebMVideoClient webm_video_client_;
DISALLOW_COPY_AND_ASSIGN(WebMVideoClientTest);
};
+TEST_P(WebMVideoClientTest, AutodetectVp9Profile2NoDetection) {
+ const bool has_valid_codec_private = GetParam().codec_private.size() > 3;
+
+ auto* parser = OnListStart(kWebMIdColour);
+ // Set 8bit and SDR fields.
+ parser->OnUInt(kWebMIdBitsPerChannel, 8);
+ parser->OnUInt(kWebMIdTransferCharacteristics,
+ static_cast<int64_t>(VideoColorSpace::TransferID::BT709));
+ OnListEnd(kWebMIdColour);
+
+ VideoDecoderConfig config;
+ EXPECT_TRUE(webm_video_client_.InitializeConfig(
+ "V_VP9", GetParam().codec_private, EncryptionScheme(), &config));
+
+ if (!has_valid_codec_private)
+ EXPECT_EQ(config.profile(), VP9PROFILE_PROFILE0);
+ else
+ EXPECT_EQ(config.profile(), GetParam().profile);
+}
+
+TEST_P(WebMVideoClientTest, AutodetectVp9Profile2BitsPerChannel) {
+ const bool has_valid_codec_private = GetParam().codec_private.size() > 3;
+
+ auto* parser = OnListStart(kWebMIdColour);
+ parser->OnUInt(kWebMIdBitsPerChannel, 10);
+ OnListEnd(kWebMIdColour);
+
+ VideoDecoderConfig config;
+ EXPECT_TRUE(webm_video_client_.InitializeConfig(
+ "V_VP9", GetParam().codec_private, EncryptionScheme(), &config));
+
+ if (!has_valid_codec_private)
+ EXPECT_EQ(config.profile(), VP9PROFILE_PROFILE2);
+ else
+ EXPECT_EQ(config.profile(), GetParam().profile);
+}
+
+TEST_P(WebMVideoClientTest, AutodetectVp9Profile2HDRMetaData) {
+ const bool has_valid_codec_private = GetParam().codec_private.size() > 3;
+
+ auto* color_parser = OnListStart(kWebMIdColour);
+ auto* metadata_parser = color_parser->OnListStart(kWebMIdMasteringMetadata);
+ metadata_parser->OnFloat(kWebMIdPrimaryRChromaticityX, 1.0);
+ color_parser->OnListEnd(kWebMIdMasteringMetadata);
+ OnListEnd(kWebMIdColour);
+
+ VideoDecoderConfig config;
+ EXPECT_TRUE(webm_video_client_.InitializeConfig(
+ "V_VP9", GetParam().codec_private, EncryptionScheme(), &config));
+
+ if (!has_valid_codec_private)
+ EXPECT_EQ(config.profile(), VP9PROFILE_PROFILE2);
+ else
+ EXPECT_EQ(config.profile(), GetParam().profile);
+}
+
+TEST_P(WebMVideoClientTest, AutodetectVp9Profile2HDRColorSpace) {
+ const bool has_valid_codec_private = GetParam().codec_private.size() > 3;
+
+ auto* parser = OnListStart(kWebMIdColour);
+ parser->OnUInt(
+ kWebMIdTransferCharacteristics,
+ static_cast<int64_t>(VideoColorSpace::TransferID::SMPTEST2084));
+ OnListEnd(kWebMIdColour);
+
+ VideoDecoderConfig config;
+ EXPECT_TRUE(webm_video_client_.InitializeConfig(
+ "V_VP9", GetParam().codec_private, EncryptionScheme(), &config));
+
+ if (!has_valid_codec_private)
+ EXPECT_EQ(config.profile(), VP9PROFILE_PROFILE2);
+ else
+ EXPECT_EQ(config.profile(), GetParam().profile);
+}
+
TEST_P(WebMVideoClientTest, InitializeConfigVP9Profiles) {
const std::string kCodecId = "V_VP9";
const VideoCodecProfile profile = GetParam().profile;
@@ -58,7 +139,7 @@ TEST_P(WebMVideoClientTest, InitializeConfigVP9Profiles) {
VideoDecoderConfig expected_config(
kCodecVP9, profile, PIXEL_FORMAT_I420, VideoColorSpace::REC709(),
- VIDEO_ROTATION_0, kCodedSize, gfx::Rect(kCodedSize), kCodedSize,
+ kNoTransformation, kCodedSize, gfx::Rect(kCodedSize), kCodedSize,
codec_private, Unencrypted());
EXPECT_TRUE(config.Matches(expected_config))
diff --git a/chromium/media/gpu/BUILD.gn b/chromium/media/gpu/BUILD.gn
index 6eed6db69ad..fa578804a74 100644
--- a/chromium/media/gpu/BUILD.gn
+++ b/chromium/media/gpu/BUILD.gn
@@ -5,6 +5,7 @@
import("//build/buildflag_header.gni")
import("//build/config/features.gni")
import("//build/config/ui.gni")
+import("//gpu/vulkan/features.gni")
import("//media/gpu/args.gni")
import("//media/media_options.gni")
import("//testing/test.gni")
@@ -50,16 +51,6 @@ component("gpu") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
- "command_buffer_helper.cc",
- "command_buffer_helper.h",
- "fake_mjpeg_decode_accelerator.cc",
- "fake_mjpeg_decode_accelerator.h",
- "gles2_decoder_helper.cc",
- "gles2_decoder_helper.h",
- "gpu_jpeg_encode_accelerator_factory.cc",
- "gpu_jpeg_encode_accelerator_factory.h",
- "gpu_mjpeg_decode_accelerator_factory.cc",
- "gpu_mjpeg_decode_accelerator_factory.h",
"gpu_video_accelerator_util.cc",
"gpu_video_accelerator_util.h",
"gpu_video_decode_accelerator_factory.cc",
@@ -70,6 +61,7 @@ component("gpu") {
public_deps = [
":buildflags",
+ ":command_buffer_helper",
":common",
":image_processor",
":video_frame_mapper",
@@ -80,9 +72,6 @@ component("gpu") {
"//ui/gfx/geometry",
]
deps = [
- "//gpu/command_buffer/common:gles2_utils",
- "//gpu/command_buffer/service:gles2",
- "//gpu/ipc/service",
"//third_party/libyuv",
"//ui/base",
"//ui/display/types",
@@ -158,6 +147,9 @@ component("gpu") {
"//services/service_manager/public/cpp:cpp",
"//ui/gl:gl_jni_headers",
]
+ if (enable_vulkan) {
+ deps += [ "//gpu/vulkan:vulkan" ]
+ }
# TODO(crbug.com/789435): This is needed for AVDA to access the CDM
# directly. Remove this dependency after VDAs are also running as part of
@@ -181,6 +173,8 @@ component("gpu") {
"windows/d3d11_h264_accelerator.h",
"windows/d3d11_picture_buffer.cc",
"windows/d3d11_picture_buffer.h",
+ "windows/d3d11_texture_wrapper.cc",
+ "windows/d3d11_texture_wrapper.h",
"windows/d3d11_video_context_wrapper.cc",
"windows/d3d11_video_context_wrapper.h",
"windows/d3d11_video_decoder.cc",
@@ -257,6 +251,8 @@ source_set("common") {
"h264_dpb.cc",
"h264_dpb.h",
"macros.h",
+ "video_frame_converter.cc",
+ "video_frame_converter.h",
"vp9_decoder.cc",
"vp9_decoder.h",
"vp9_picture.cc",
@@ -280,12 +276,33 @@ source_set("common") {
":buildflags",
"//base",
"//media",
+ "//media/parsers",
"//ui/gfx:buffer_types",
"//ui/gfx:memory_buffer",
"//ui/gfx/geometry",
]
}
+source_set("command_buffer_helper") {
+ defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
+ sources = [
+ "command_buffer_helper.cc",
+ "command_buffer_helper.h",
+ "gles2_decoder_helper.cc",
+ "gles2_decoder_helper.h",
+ ]
+
+ public_deps = [
+ "//base",
+ "//gpu/command_buffer/common",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//gpu/command_buffer/service",
+ "//gpu/command_buffer/service:gles2",
+ "//gpu/ipc/service",
+ "//ui/gl",
+ ]
+}
+
source_set("image_processor") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
@@ -299,6 +316,7 @@ source_set("image_processor") {
]
deps = [
+ ":video_frame_mapper",
"//third_party/libyuv",
"//ui/gl",
]
@@ -306,9 +324,6 @@ source_set("image_processor") {
if (use_v4l2_codec) {
deps += [ "//media/gpu/v4l2" ]
}
- if (is_linux) {
- deps += [ ":video_frame_mapper" ]
- }
}
source_set("image_processor_common") {
@@ -488,39 +503,10 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) {
"//base/test:test_support",
"//media:test_support",
"//media/gpu",
+ "//media/parsers",
"//mojo/core/embedder",
"//testing/gtest",
- "//ui/base",
- "//ui/gfx",
- "//ui/gfx:test_support",
- "//ui/gfx/geometry",
- "//ui/gl",
- "//ui/gl:test_support",
- ]
- configs += [ "//third_party/libyuv:libyuv_config" ]
- sources = [
- "video_encode_accelerator_unittest.cc",
- ]
- if (use_x11) {
- deps += [ "//ui/gfx/x" ]
- }
- if (use_ozone) {
- deps += [ "//ui/ozone" ]
- }
- }
-}
-
-if (use_v4l2_codec || use_vaapi) {
- test("jpeg_encode_accelerator_unittest") {
- deps = [
- "test:helpers",
- "//base",
- "//base/test:test_support",
- "//media:test_support",
- "//media/gpu",
- "//mojo/core/embedder",
- "//testing/gtest",
- "//third_party:jpeg",
+ "//third_party/ffmpeg",
"//third_party/libyuv",
"//ui/base",
"//ui/gfx",
@@ -531,49 +517,8 @@ if (use_v4l2_codec || use_vaapi) {
]
configs += [ "//third_party/libyuv:libyuv_config" ]
sources = [
- "jpeg_encode_accelerator_unittest.cc",
- ]
- if (use_x11) {
- deps += [ "//ui/gfx/x" ]
- }
- if (use_ozone) {
- deps += [ "//ui/ozone" ]
- }
- }
-}
-
-if (is_chromeos || is_linux) {
- test("jpeg_decode_accelerator_unittest") {
- deps = [
- "test:helpers",
- "//base",
- "//media:test_support",
- "//media/gpu",
- "//media/mojo/services",
- "//mojo/core/embedder",
- "//testing/gtest",
- "//third_party/libyuv",
- "//ui/base",
- "//ui/gfx",
- "//ui/gfx:test_support",
- "//ui/gfx/geometry",
- "//ui/gl",
- "//ui/gl:test_support",
- ]
- configs += [ "//third_party/libyuv:libyuv_config" ]
- sources = [
- "jpeg_decode_accelerator_unittest.cc",
- ]
- data = [
- "//media/test/data/peach_pi-1280x720.jpg",
- "//media/test/data/peach_pi-40x23.jpg",
- "//media/test/data/peach_pi-41x22.jpg",
- "//media/test/data/peach_pi-41x23.jpg",
+ "video_encode_accelerator_unittest.cc",
]
- if (use_vaapi) {
- deps += [ "//media/gpu/vaapi:jpeg_decoder_unit_test" ]
- data += [ "//media/test/data/pixel-1280x720.jpg" ]
- }
if (use_x11) {
deps += [ "//ui/gfx/x" ]
}
@@ -591,10 +536,8 @@ static_library("test_support") {
"test/fake_command_buffer_helper.h",
]
configs += [ "//media:media_config" ]
- deps = [
- ":gpu",
- ]
public_deps = [
+ ":gpu",
"//base",
"//media",
]
diff --git a/chromium/media/gpu/DEPS b/chromium/media/gpu/DEPS
index b01fca34695..b3dc163b21a 100644
--- a/chromium/media/gpu/DEPS
+++ b/chromium/media/gpu/DEPS
@@ -13,11 +13,16 @@ include_rules = [
"+ui/display/manager",
"+ui/display/types",
"+ui/platform_window",
+ "+components/viz/common/gpu/vulkan_context_provider.h",
# media/gpu is not part of "media" target and should not use MEDIA_EXPORT.
"-media/base/media_export.h",
# SharedImageVideo uses it.
"+components/viz/common/resources/resource_format_utils.h",
- "+components/viz/common/resources/resource_sizes.h"
+ "+components/viz/common/resources/resource_sizes.h",
+
+ # Chrome OS specific JEA/MJDA.
+ "+components/chromeos_camera/jpeg_encode_accelerator.h",
+ "+components/chromeos_camera/mjpeg_decode_accelerator.h",
]
diff --git a/chromium/media/gpu/OWNERS b/chromium/media/gpu/OWNERS
index 1ae3f8c0918..5aaff9603d0 100644
--- a/chromium/media/gpu/OWNERS
+++ b/chromium/media/gpu/OWNERS
@@ -10,6 +10,3 @@ sandersd@chromium.org
# For Android media gpu files.
per-file *android*=liberato@chromium.org
per-file *avda*=liberato@chromium.org
-
-# For Mac encoder files.
-per-file *vt_video_encode*=emircan@chromium.org
diff --git a/chromium/media/gpu/accelerated_video_decoder.h b/chromium/media/gpu/accelerated_video_decoder.h
index fb7acd82a36..11c4ad9699e 100644
--- a/chromium/media/gpu/accelerated_video_decoder.h
+++ b/chromium/media/gpu/accelerated_video_decoder.h
@@ -11,6 +11,7 @@
#include "base/macros.h"
#include "media/base/decrypt_config.h"
#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -66,11 +67,12 @@ class MEDIA_GPU_EXPORT AcceleratedVideoDecoder {
// we need a new set of them, or when an error occurs.
virtual DecodeResult Decode() WARN_UNUSED_RESULT = 0;
- // Return dimensions/required number of pictures that client should be ready
- // to provide for the decoder to function properly (of which up to
- // GetNumReferenceFrames() might be needed for internal decoding). To be used
- // after Decode() returns kAllocateNewSurfaces.
+ // Return dimensions/visible rectangle/required number of pictures that client
+ // should be ready to provide for the decoder to function properly (of which
+ // up to GetNumReferenceFrames() might be needed for internal decoding). To be
+ // used after Decode() returns kAllocateNewSurfaces.
virtual gfx::Size GetPicSize() const = 0;
+ virtual gfx::Rect GetVisibleRect() const = 0;
virtual size_t GetRequiredNumOfPictures() const = 0;
virtual size_t GetNumReferenceFrames() const = 0;
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.cc b/chromium/media/gpu/android/android_video_decode_accelerator.cc
index ddb37504362..934c4a7b281 100644
--- a/chromium/media/gpu/android/android_video_decode_accelerator.cc
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.cc
@@ -244,17 +244,16 @@ static AVDAManager* GetManager() {
}
AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
- const BitstreamBuffer& bitstream_buffer)
- : buffer(bitstream_buffer) {
+ BitstreamBuffer bitstream_buffer)
+ : buffer(std::move(bitstream_buffer)) {
if (buffer.id() != -1) {
memory.reset(
- new UnalignedSharedMemory(buffer.handle(), buffer.size(), true));
+ new UnalignedSharedMemory(buffer.TakeRegion(), buffer.size(), true));
}
}
AndroidVideoDecodeAccelerator::BitstreamRecord::BitstreamRecord(
- BitstreamRecord&& other)
- : buffer(std::move(other.buffer)), memory(std::move(other.memory)) {}
+ BitstreamRecord&& other) = default;
AndroidVideoDecodeAccelerator::BitstreamRecord::~BitstreamRecord() {}
@@ -638,9 +637,10 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
DCHECK_NE(input_buf_index, -1);
- BitstreamBuffer bitstream_buffer = pending_bitstream_records_.front().buffer;
+ BitstreamBuffer* bitstream_buffer =
+ &pending_bitstream_records_.front().buffer;
- if (bitstream_buffer.id() == -1) {
+ if (bitstream_buffer->id() == -1) {
pending_bitstream_records_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_records_.size());
@@ -653,19 +653,18 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
if (pending_input_buf_index_ == -1) {
// When |pending_input_buf_index_| is not -1, the buffer is already dequeued
- // from MediaCodec, filled with data and bitstream_buffer.handle() is
- // closed.
+ // from MediaCodec and filled with data. The buffer shared memory handle is
+ // held by the front pending bitstream record.
shm = std::move(pending_bitstream_records_.front().memory);
- auto* buffer = &pending_bitstream_records_.front().buffer;
- if (!shm->MapAt(buffer->offset(), buffer->size())) {
+ if (!shm->MapAt(bitstream_buffer->offset(), bitstream_buffer->size())) {
NOTIFY_ERROR(UNREADABLE_INPUT, "UnalignedSharedMemory::Map() failed");
return false;
}
}
const base::TimeDelta presentation_timestamp =
- bitstream_buffer.presentation_timestamp();
+ bitstream_buffer->presentation_timestamp();
DCHECK(presentation_timestamp != kNoTimestamp)
<< "Bitstream buffers must have valid presentation timestamps";
@@ -675,25 +674,27 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
// buffer id in the returned Pictures to map a bitstream buffer back to a
// timestamp on their side, so either one of the bitstream buffer ids will
// result in them finding the right timestamp.
- bitstream_buffers_in_decoder_[presentation_timestamp] = bitstream_buffer.id();
+ bitstream_buffers_in_decoder_[presentation_timestamp] =
+ bitstream_buffer->id();
// Notice that |memory| will be null if we repeatedly enqueue the same buffer,
// this happens after MEDIA_CODEC_NO_KEY.
const uint8_t* memory =
shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr;
- const std::string& key_id = bitstream_buffer.key_id();
- const std::string& iv = bitstream_buffer.iv();
- const std::vector<SubsampleEntry>& subsamples = bitstream_buffer.subsamples();
+ const std::string& key_id = bitstream_buffer->key_id();
+ const std::string& iv = bitstream_buffer->iv();
+ const std::vector<SubsampleEntry>& subsamples =
+ bitstream_buffer->subsamples();
MediaCodecStatus status;
if (key_id.empty() || iv.empty()) {
status = media_codec_->QueueInputBuffer(input_buf_index, memory,
- bitstream_buffer.size(),
+ bitstream_buffer->size(),
presentation_timestamp);
} else {
// VDAs only support "cenc" encryption scheme.
status = media_codec_->QueueSecureInputBuffer(
- input_buf_index, memory, bitstream_buffer.size(), key_id, iv,
+ input_buf_index, memory, bitstream_buffer->size(), key_id, iv,
subsamples, AesCtrEncryptionScheme(), presentation_timestamp);
}
@@ -711,6 +712,8 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
}
pending_input_buf_index_ = -1;
+ // Popping the pending record invalides |bitstream_buffer|.
+ int32_t pending_bitstream_buffer_id = bitstream_buffer->id();
pending_bitstream_records_.pop();
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_records_.size());
@@ -724,8 +727,9 @@ bool AndroidVideoDecodeAccelerator::QueueInput() {
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer,
- weak_this_factory_.GetWeakPtr(), bitstream_buffer.id()));
- bitstreams_notified_in_advance_.push_back(bitstream_buffer.id());
+ weak_this_factory_.GetWeakPtr(),
+ pending_bitstream_buffer_id));
+ bitstreams_notified_in_advance_.push_back(pending_bitstream_buffer_id);
if (status != MEDIA_CODEC_OK) {
NOTIFY_ERROR(PLATFORM_FAILURE, "QueueInputBuffer failed:" << status);
@@ -975,8 +979,7 @@ void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient(
picture_buffer);
}
-void AndroidVideoDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer) {
+void AndroidVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
DCHECK(thread_checker_.CalledOnValidThread());
// If we deferred getting a surface, then start getting one now.
@@ -1000,13 +1003,10 @@ void AndroidVideoDecodeAccelerator::Decode(
}
if (bitstream_buffer.id() >= 0 && bitstream_buffer.size() > 0) {
- DecodeBuffer(bitstream_buffer);
+ DecodeBuffer(std::move(bitstream_buffer));
return;
}
- if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
- base::SharedMemory::CloseHandle(bitstream_buffer.handle());
-
if (bitstream_buffer.id() < 0) {
NOTIFY_ERROR(INVALID_ARGUMENT,
"Invalid bistream_buffer, id: " << bitstream_buffer.id());
@@ -1020,8 +1020,8 @@ void AndroidVideoDecodeAccelerator::Decode(
}
void AndroidVideoDecodeAccelerator::DecodeBuffer(
- const BitstreamBuffer& bitstream_buffer) {
- pending_bitstream_records_.push(BitstreamRecord(bitstream_buffer));
+ BitstreamBuffer bitstream_buffer) {
+ pending_bitstream_records_.push(BitstreamRecord(std::move(bitstream_buffer)));
TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount",
pending_bitstream_records_.size());
@@ -1182,7 +1182,8 @@ void AndroidVideoDecodeAccelerator::StartCodecDrain(DrainType drain_type) {
// Queue EOS if one is not already queued.
if (!previous_drain_type)
- DecodeBuffer(BitstreamBuffer(-1, base::SharedMemoryHandle(), 0));
+ DecodeBuffer(BitstreamBuffer(-1, base::SharedMemoryHandle(),
+ false /* read_only */, 0));
}
bool AndroidVideoDecodeAccelerator::IsDrainingForResetOrDestroy() const {
diff --git a/chromium/media/gpu/android/android_video_decode_accelerator.h b/chromium/media/gpu/android/android_video_decode_accelerator.h
index 5fc9bd4cf2f..d338828d408 100644
--- a/chromium/media/gpu/android/android_video_decode_accelerator.h
+++ b/chromium/media/gpu/android/android_video_decode_accelerator.h
@@ -62,7 +62,7 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// VideoDecodeAccelerator implementation:
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void Decode(BitstreamBuffer bitstream_buffer) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
void ReusePictureBuffer(int32_t picture_buffer_id) override;
void Flush() override;
@@ -193,7 +193,7 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// Decode the content in the |bitstream_buffer|. Note that a
// |bitstream_buffer| of id as -1 indicates a flush command.
- void DecodeBuffer(const BitstreamBuffer& bitstream_buffer);
+ void DecodeBuffer(BitstreamBuffer bitstream_buffer);
// Called during Initialize() for encrypted streams to set up the CDM.
void InitializeCdm();
@@ -308,10 +308,12 @@ class MEDIA_GPU_EXPORT AndroidVideoDecodeAccelerator
// if any. The goal is to prevent leaving a BitstreamBuffer's shared memory
// handle open.
struct BitstreamRecord {
- BitstreamRecord(const BitstreamBuffer&);
+ BitstreamRecord(BitstreamBuffer);
BitstreamRecord(BitstreamRecord&& other);
~BitstreamRecord();
+ // The region in this buffer will not be valid, as it will have been passed
+ // to |memory|, below.
BitstreamBuffer buffer;
// |memory| may be null if buffer has no data.
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.cc b/chromium/media/gpu/android/android_video_encode_accelerator.cc
index da452f63269..c5270706bc6 100644
--- a/chromium/media/gpu/android/android_video_encode_accelerator.cc
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.cc
@@ -229,9 +229,8 @@ void AndroidVideoEncodeAccelerator::MaybeStopIOTimer() {
}
}
-void AndroidVideoEncodeAccelerator::Encode(
- const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) {
+void AndroidVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
DVLOG(3) << __PRETTY_FUNCTION__ << ": " << force_keyframe;
DCHECK(thread_checker_.CalledOnValidThread());
RETURN_ON_FAILURE(frame->format() == PIXEL_FORMAT_I420, "Unexpected format",
@@ -251,15 +250,15 @@ void AndroidVideoEncodeAccelerator::Encode(
kInvalidArgumentError);
pending_frames_.push(
- std::make_tuple(frame, force_keyframe, base::Time::Now()));
+ std::make_tuple(std::move(frame), force_keyframe, base::Time::Now()));
DoIOTask();
}
void AndroidVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOG(3) << __PRETTY_FUNCTION__ << ": bitstream_buffer_id=" << buffer.id();
DCHECK(thread_checker_.CalledOnValidThread());
- available_bitstream_buffers_.push_back(buffer);
+ available_bitstream_buffers_.push_back(std::move(buffer));
DoIOTask();
}
@@ -422,10 +421,11 @@ void AndroidVideoEncodeAccelerator::DequeueOutput() {
const base::TimeDelta frame_timestamp = it->second;
frame_timestamp_map_.erase(it);
- BitstreamBuffer bitstream_buffer = available_bitstream_buffers_.back();
+ BitstreamBuffer bitstream_buffer =
+ std::move(available_bitstream_buffers_.back());
available_bitstream_buffers_.pop_back();
auto shm = std::make_unique<UnalignedSharedMemory>(
- bitstream_buffer.handle(), bitstream_buffer.size(), false);
+ bitstream_buffer.TakeRegion(), bitstream_buffer.size(), false);
RETURN_ON_FAILURE(
shm->MapAt(bitstream_buffer.offset(), bitstream_buffer.size()),
"Failed to map SHM", kPlatformFailureError);
diff --git a/chromium/media/gpu/android/android_video_encode_accelerator.h b/chromium/media/gpu/android/android_video_encode_accelerator.h
index f7bd9015bc6..bc0d850f197 100644
--- a/chromium/media/gpu/android/android_video_encode_accelerator.h
+++ b/chromium/media/gpu/android/android_video_encode_accelerator.h
@@ -41,9 +41,8 @@ class MEDIA_GPU_EXPORT AndroidVideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void Destroy() override;
diff --git a/chromium/media/gpu/android/android_video_surface_chooser.h b/chromium/media/gpu/android/android_video_surface_chooser.h
index c5254c6e8a4..3b88a20655e 100644
--- a/chromium/media/gpu/android/android_video_surface_chooser.h
+++ b/chromium/media/gpu/android/android_video_surface_chooser.h
@@ -10,7 +10,7 @@
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "media/base/android/android_overlay.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/rect.h"
diff --git a/chromium/media/gpu/android/codec_wrapper.cc b/chromium/media/gpu/android/codec_wrapper.cc
index b12a163a3ac..3da6c191f04 100644
--- a/chromium/media/gpu/android/codec_wrapper.cc
+++ b/chromium/media/gpu/android/codec_wrapper.cc
@@ -418,17 +418,9 @@ bool CodecWrapperImpl::ReleaseCodecOutputBuffer(int64_t id, bool render) {
if (!valid)
return false;
- // Discard the buffers preceding the one we're releasing. The buffers are in
- // presentation order because the ids are generated in presentation order.
- for (auto it = buffer_ids_.begin(); it < buffer_it; ++it) {
- int index = it->second;
- codec_->ReleaseOutputBuffer(index, false);
- DVLOG(2) << __func__ << " discarded " << index;
- }
-
int index = buffer_it->second;
codec_->ReleaseOutputBuffer(index, render);
- buffer_ids_.erase(buffer_ids_.begin(), buffer_it + 1);
+ buffer_ids_.erase(buffer_it);
if (output_buffer_release_cb_) {
output_buffer_release_cb_.Run(state_ == State::kDrained ||
state_ == State::kDraining);
diff --git a/chromium/media/gpu/android/codec_wrapper_unittest.cc b/chromium/media/gpu/android/codec_wrapper_unittest.cc
index dc9ea6e896b..222a567c828 100644
--- a/chromium/media/gpu/android/codec_wrapper_unittest.cc
+++ b/chromium/media/gpu/android/codec_wrapper_unittest.cc
@@ -169,11 +169,11 @@ TEST_F(CodecWrapperTest, DeletingCodecOutputBuffersAfterTheCodecIsSafe) {
codec_buffer = nullptr;
}
-TEST_F(CodecWrapperTest, CodecOutputBufferReleaseInvalidatesEarlierOnes) {
+TEST_F(CodecWrapperTest, CodecOutputBufferReleaseDoesNotInvalidateEarlierOnes) {
auto codec_buffer1 = DequeueCodecOutputBuffer();
auto codec_buffer2 = DequeueCodecOutputBuffer();
codec_buffer2->ReleaseToSurface();
- ASSERT_FALSE(codec_buffer1->ReleaseToSurface());
+ EXPECT_TRUE(codec_buffer1->ReleaseToSurface());
}
TEST_F(CodecWrapperTest, CodecOutputBufferReleaseDoesNotInvalidateLaterOnes) {
diff --git a/chromium/media/gpu/android/image_reader_gl_owner.cc b/chromium/media/gpu/android/image_reader_gl_owner.cc
index 400194433d7..e2d764f8b98 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner.cc
+++ b/chromium/media/gpu/android/image_reader_gl_owner.cc
@@ -27,6 +27,23 @@
namespace media {
+namespace {
+bool IsSurfaceControl(TextureOwner::Mode mode) {
+ switch (mode) {
+ case TextureOwner::Mode::kAImageReaderInsecureSurfaceControl:
+ case TextureOwner::Mode::kAImageReaderSecureSurfaceControl:
+ return true;
+ case TextureOwner::Mode::kAImageReaderInsecure:
+ return false;
+ case TextureOwner::Mode::kSurfaceTextureInsecure:
+ NOTREACHED();
+ return false;
+ }
+ NOTREACHED();
+ return false;
+}
+} // namespace
+
// FrameAvailableEvent_ImageReader is a RefCounted wrapper for a WaitableEvent
// (it's not possible to put one in RefCountedData). This lets us safely signal
// an event on any thread.
@@ -69,8 +86,10 @@ class ImageReaderGLOwner::ScopedHardwareBufferImpl
}
void SetReadFence(base::ScopedFD fence_fd, bool has_context) final {
- DCHECK(!read_fence_.is_valid());
- read_fence_ = std::move(fence_fd);
+ // Client can call this method multiple times for a hardware buffer. Hence
+ // all the client provided sync_fd should be merged. Eg: BeginReadAccess()
+ // can be called multiple times for a SharedImageVideo representation.
+ read_fence_ = gl::MergeFDs(std::move(read_fence_), std::move(fence_fd));
}
private:
@@ -94,21 +113,31 @@ ImageReaderGLOwner::ImageReaderGLOwner(
// Set the width, height and format to some default value. This parameters
// are/maybe overriden by the producer sending buffers to this imageReader's
// Surface.
- int32_t width = 1, height = 1, max_images = 4;
- AIMAGE_FORMATS format = mode == Mode::kAImageReaderSecure
+ int32_t width = 1, height = 1;
+
+ // This should be as small as possible to limit the memory usage.
+ // ImageReader needs 2 images to mimic the behavior of SurfaceTexture. For
+ // SurfaceControl we need 3 images instead of 2 since 1 frame(and hence image
+ // associated with it) will be with system compositor and 2 frames will be in
+ // flight. Also note that we always acquire an image before deleting the
+ // previous acquired image. This causes 2 acquired images to be in flight at
+ // the image acquisition point until the previous image is deleted.
+ max_images_ = IsSurfaceControl(mode) ? 3 : 2;
+ AIMAGE_FORMATS format = mode == Mode::kAImageReaderSecureSurfaceControl
? AIMAGE_FORMAT_PRIVATE
: AIMAGE_FORMAT_YUV_420_888;
AImageReader* reader = nullptr;
+
// The usage flag below should be used when the buffer will be read from by
// the GPU as a texture.
- uint64_t usage = mode == Mode::kAImageReaderSecure
+ uint64_t usage = mode == Mode::kAImageReaderSecureSurfaceControl
? AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT
: AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
usage |= gl::SurfaceControl::RequiredUsage();
// Create a new reader for images of the desired size and format.
media_status_t return_code = loader_.AImageReader_newWithUsage(
- width, height, format, usage, max_images, &reader);
+ width, height, format, usage, max_images_, &reader);
if (return_code != AMEDIA_OK) {
LOG(ERROR) << " Image reader creation failed.";
if (return_code == AMEDIA_ERROR_INVALID_PARAMETER)
@@ -210,8 +239,19 @@ void ImageReaderGLOwner::UpdateTexImage() {
AImage* image = nullptr;
int acquire_fence_fd = -1;
media_status_t return_code = AMEDIA_OK;
- return_code = loader_.AImageReader_acquireLatestImageAsync(
- image_reader_, &image, &acquire_fence_fd);
+ DCHECK_GT(max_images_, static_cast<int32_t>(image_refs_.size()));
+ if (max_images_ - image_refs_.size() < 2) {
+ // acquireNextImageAsync is required here since as per the spec calling
+ // AImageReader_acquireLatestImage with less than two images of margin, that
+ // is (maxImages - currentAcquiredImages < 2) will not discard as expected.
+ // We always have currentAcquiredImages as 1 since we delete a previous
+ // image only after acquiring a new image.
+ return_code = loader_.AImageReader_acquireNextImageAsync(
+ image_reader_, &image, &acquire_fence_fd);
+ } else {
+ return_code = loader_.AImageReader_acquireLatestImageAsync(
+ image_reader_, &image, &acquire_fence_fd);
+ }
// TODO(http://crbug.com/846050).
// Need to add some better error handling if below error occurs. Currently we
diff --git a/chromium/media/gpu/android/image_reader_gl_owner.h b/chromium/media/gpu/android/image_reader_gl_owner.h
index 6ef30b305ee..8d9fd9c3236 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner.h
+++ b/chromium/media/gpu/android/image_reader_gl_owner.h
@@ -46,6 +46,7 @@ class MEDIA_GPU_EXPORT ImageReaderGLOwner : public TextureOwner {
GetAHardwareBuffer() override;
const AImageReader* image_reader_for_testing() const { return image_reader_; }
+ int32_t max_images_for_testing() const { return max_images_; }
protected:
void OnTextureDestroyed(gpu::gles2::AbstractTexture*) override;
@@ -125,6 +126,7 @@ class MEDIA_GPU_EXPORT ImageReaderGLOwner : public TextureOwner {
// IgnorePendingRelease() or WaitForFrameAvailable() have been called since.
base::TimeTicks release_time_;
scoped_refptr<FrameAvailableEvent_ImageReader> frame_available_event_;
+ int32_t max_images_ = 0;
THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(ImageReaderGLOwner);
diff --git a/chromium/media/gpu/android/image_reader_gl_owner_unittest.cc b/chromium/media/gpu/android/image_reader_gl_owner_unittest.cc
index 791059c0a98..d31a025c0c8 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner_unittest.cc
+++ b/chromium/media/gpu/android/image_reader_gl_owner_unittest.cc
@@ -143,14 +143,25 @@ TEST_F(ImageReaderGLOwnerTest, DestructionWorksWithWrongContext) {
new_surface = nullptr;
}
-class ImageReaderGLOwnerSecureTest : public ImageReaderGLOwnerTest {
+// The max number of images used by the ImageReader must be 2 for non-Surface
+// control.
+TEST_F(ImageReaderGLOwnerTest, MaxImageExpectation) {
+ if (!IsImageReaderSupported())
+ return;
+ EXPECT_EQ(static_cast<ImageReaderGLOwner*>(image_reader_.get())
+ ->max_images_for_testing(),
+ 2);
+}
+
+class ImageReaderGLOwnerSecureSurfaceControlTest
+ : public ImageReaderGLOwnerTest {
public:
TextureOwner::Mode SecureMode() final {
- return TextureOwner::Mode::kAImageReaderSecure;
+ return TextureOwner::Mode::kAImageReaderSecureSurfaceControl;
}
};
-TEST_F(ImageReaderGLOwnerSecureTest, CreatesSecureAImageReader) {
+TEST_F(ImageReaderGLOwnerSecureSurfaceControlTest, CreatesSecureAImageReader) {
if (!IsImageReaderSupported())
return;
@@ -163,4 +174,32 @@ TEST_F(ImageReaderGLOwnerSecureTest, CreatesSecureAImageReader) {
EXPECT_EQ(format, AIMAGE_FORMAT_PRIVATE);
}
+// The max number of images used by the ImageReader must be 3 for Surface
+// control.
+TEST_F(ImageReaderGLOwnerSecureSurfaceControlTest, MaxImageExpectation) {
+ if (!IsImageReaderSupported())
+ return;
+ EXPECT_EQ(static_cast<ImageReaderGLOwner*>(image_reader_.get())
+ ->max_images_for_testing(),
+ 3);
+}
+
+class ImageReaderGLOwnerInsecureSurfaceControlTest
+ : public ImageReaderGLOwnerTest {
+ public:
+ TextureOwner::Mode SecureMode() final {
+ return TextureOwner::Mode::kAImageReaderInsecureSurfaceControl;
+ }
+};
+
+// The max number of images used by the ImageReader must be 3 for Surface
+// control.
+TEST_F(ImageReaderGLOwnerInsecureSurfaceControlTest, MaxImageExpectation) {
+ if (!IsImageReaderSupported())
+ return;
+ EXPECT_EQ(static_cast<ImageReaderGLOwner*>(image_reader_.get())
+ ->max_images_for_testing(),
+ 3);
+}
+
} // namespace media
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index a4f26750a39..a63fa947d8e 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -254,7 +254,7 @@ void MediaCodecVideoDecoder::Destroy() {
void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DCHECK(output_cb);
@@ -265,21 +265,21 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
<< " MCVD with config: " << config.AsHumanReadableString()
<< ", cdm_context = " << cdm_context;
- InitCB bound_init_cb = BindToCurrentLoop(init_cb);
if (!ConfigSupported(config, device_info_)) {
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
// Disallow codec changes when reinitializing.
if (!first_init && decoder_config_.codec() != config.codec()) {
DVLOG(1) << "Codec changed: cannot reinitialize";
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
decoder_config_ = config;
- surface_chooser_helper_.SetVideoRotation(decoder_config_.video_rotation());
+ surface_chooser_helper_.SetVideoRotation(
+ decoder_config_.video_transformation().rotation);
output_cb_ = output_cb;
waiting_cb_ = waiting_cb;
@@ -294,22 +294,21 @@ void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
// encrypted config later.
if (first_init && cdm_context && cdm_context->GetMediaCryptoContext()) {
DCHECK(media_crypto_.is_null());
- SetCdm(cdm_context, init_cb);
+ SetCdm(cdm_context, std::move(init_cb));
return;
}
if (config.is_encrypted() && media_crypto_.is_null()) {
DVLOG(1) << "No MediaCrypto to handle encrypted config";
- bound_init_cb.Run(false);
+ BindToCurrentLoop(std::move(init_cb)).Run(false);
return;
}
// Do the rest of the initialization lazily on the first decode.
- init_cb.Run(true);
+ BindToCurrentLoop(std::move(init_cb)).Run(true);
}
-void MediaCodecVideoDecoder::SetCdm(CdmContext* cdm_context,
- const InitCB& init_cb) {
+void MediaCodecVideoDecoder::SetCdm(CdmContext* cdm_context, InitCB init_cb) {
DVLOG(1) << __func__;
DCHECK(cdm_context) << "No CDM provided";
DCHECK(cdm_context->GetMediaCryptoContext());
@@ -319,12 +318,12 @@ void MediaCodecVideoDecoder::SetCdm(CdmContext* cdm_context,
// Register CDM callbacks. The callbacks registered will be posted back to
// this thread via BindToCurrentLoop.
media_crypto_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop(
- base::Bind(&MediaCodecVideoDecoder::OnMediaCryptoReady,
- weak_factory_.GetWeakPtr(), init_cb)));
+ base::BindOnce(&MediaCodecVideoDecoder::OnMediaCryptoReady,
+ weak_factory_.GetWeakPtr(), std::move(init_cb))));
}
void MediaCodecVideoDecoder::OnMediaCryptoReady(
- const InitCB& init_cb,
+ InitCB init_cb,
JavaObjectPtr media_crypto,
bool requires_secure_video_codec) {
DVLOG(1) << __func__
@@ -334,21 +333,20 @@ void MediaCodecVideoDecoder::OnMediaCryptoReady(
DCHECK(media_crypto);
if (media_crypto->is_null()) {
- media_crypto_context_->SetMediaCryptoReadyCB(
- MediaCryptoContext::MediaCryptoReadyCB());
+ media_crypto_context_->SetMediaCryptoReadyCB(base::NullCallback());
media_crypto_context_ = nullptr;
if (decoder_config_.is_encrypted()) {
LOG(ERROR) << "MediaCrypto is not available";
EnterTerminalState(State::kError);
- init_cb.Run(false);
+ std::move(init_cb).Run(false);
return;
}
// MediaCrypto is not available, but the stream is clear. So we can still
// play the current stream. But if we switch to an encrypted stream playback
// will fail.
- init_cb.Run(true);
+ std::move(init_cb).Run(true);
return;
}
@@ -373,7 +371,7 @@ void MediaCodecVideoDecoder::OnMediaCryptoReady(
: SurfaceChooserHelper::SecureSurfaceMode::kRequested);
// Signal success, and create the codec lazily on the first decode.
- init_cb.Run(true);
+ std::move(init_cb).Run(true);
}
void MediaCodecVideoDecoder::OnKeyAdded() {
@@ -596,10 +594,10 @@ void MediaCodecVideoDecoder::OnCodecConfigured(
}
void MediaCodecVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << __func__ << ": " << buffer->AsHumanReadableString();
if (state_ == State::kError) {
- decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
pending_decodes_.emplace_back(std::move(buffer), std::move(decode_cb));
@@ -752,7 +750,7 @@ bool MediaCodecVideoDecoder::QueueInput() {
DCHECK(!eos_decode_cb_);
eos_decode_cb_ = std::move(pending_decode.decode_cb);
} else {
- pending_decode.decode_cb.Run(DecodeStatus::OK);
+ std::move(pending_decode.decode_cb).Run(DecodeStatus::OK);
}
pending_decodes_.pop_front();
return true;
@@ -849,14 +847,22 @@ void MediaCodecVideoDecoder::RunEosDecodeCb(int reset_generation) {
void MediaCodecVideoDecoder::ForwardVideoFrame(
int reset_generation,
std::unique_ptr<ScopedAsyncTrace> async_trace,
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
DVLOG(3) << __func__ << " : "
<< (frame ? frame->AsHumanReadableString() : "null");
+
+ // No |frame| indicates an error creating it.
+ if (!frame) {
+ DLOG(ERROR) << __func__ << " |frame| is null";
+ EnterTerminalState(State::kError);
+ return;
+ }
+
if (reset_generation == reset_generation_) {
// TODO(liberato): We might actually have a SW decoder. Consider setting
// this to false if so, especially for higher bitrates.
frame->metadata()->SetBoolean(VideoFrameMetadata::POWER_EFFICIENT, true);
- output_cb_.Run(frame);
+ output_cb_.Run(std::move(frame));
}
}
@@ -864,7 +870,7 @@ void MediaCodecVideoDecoder::ForwardVideoFrame(
// After |closure| runs:
// 1) no VideoFrames from before the Reset() will be output, and
// 2) no DecodeCBs (including EOS) from before the Reset() will be run.
-void MediaCodecVideoDecoder::Reset(const base::Closure& closure) {
+void MediaCodecVideoDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << __func__;
DCHECK(!reset_cb_);
reset_generation_++;
@@ -961,7 +967,7 @@ bool MediaCodecVideoDecoder::InTerminalState() {
void MediaCodecVideoDecoder::CancelPendingDecodes(DecodeStatus status) {
for (auto& pending_decode : pending_decodes_)
- pending_decode.decode_cb.Run(status);
+ std::move(pending_decode.decode_cb).Run(status);
pending_decodes_.clear();
if (eos_decode_cb_)
std::move(eos_decode_cb_).Run(status);
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index ecb467c2915..abb5c639ba6 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -75,12 +75,11 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::Closure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
@@ -92,11 +91,11 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
// Set up |cdm_context| as part of initialization. Guarantees that |init_cb|
// will be called depending on the outcome, though not necessarily before this
// function returns.
- void SetCdm(CdmContext* cdm_context, const InitCB& init_cb);
+ void SetCdm(CdmContext* cdm_context, InitCB init_cb);
// Called when the Cdm provides |media_crypto|. Will signal |init_cb| based
// on the result, and set the codec config properly.
- void OnMediaCryptoReady(const InitCB& init_cb,
+ void OnMediaCryptoReady(InitCB init_cb,
JavaObjectPtr media_crypto,
bool requires_secure_video_codec);
@@ -177,7 +176,7 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
// started when we dequeued the corresponding output buffer.
void ForwardVideoFrame(int reset_generation,
std::unique_ptr<ScopedAsyncTrace> async_trace,
- const scoped_refptr<VideoFrame>& frame);
+ scoped_refptr<VideoFrame> frame);
// Starts draining the codec by queuing an EOS if required. It skips the drain
// if possible.
@@ -224,7 +223,7 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
base::Optional<DrainType> drain_type_;
// The current reset cb if a Reset() is in progress.
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
// A generation counter that's incremented every time Reset() is called.
int reset_generation_ = 0;
diff --git a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
index c89270a976b..145dd8b8032 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
@@ -38,8 +38,8 @@ namespace media {
namespace {
void OutputCb(scoped_refptr<VideoFrame>* output,
- const scoped_refptr<VideoFrame>& frame) {
- *output = frame;
+ scoped_refptr<VideoFrame> frame) {
+ *output = std::move(frame);
}
std::unique_ptr<AndroidOverlay> CreateAndroidOverlayCb(
@@ -184,19 +184,21 @@ class MediaCodecVideoDecoderTest : public testing::TestWithParam<VideoCodec> {
CreateMcvd();
bool result = false;
auto init_cb = [](bool* result_out, bool result) { *result_out = result; };
- mcvd_->Initialize(config, false, cdm_.get(), base::Bind(init_cb, &result),
- base::BindRepeating(&OutputCb, &most_recent_frame_),
- base::DoNothing());
+ mcvd_->Initialize(
+ config, false, cdm_.get(), base::BindOnce(init_cb, &result),
+ base::BindRepeating(&OutputCb, &most_recent_frame_), base::DoNothing());
base::RunLoop().RunUntilIdle();
// If there is a CDM available, then we expect that MCVD will be waiting
// for the media crypto object.
// TODO(liberato): why does CreateJavaObjectPtr() not link?
if (cdm_ && cdm_->media_crypto_ready_cb) {
- cdm_->media_crypto_ready_cb.Run(
- std::make_unique<base::android::ScopedJavaGlobalRef<jobject>>(
- media_crypto_),
- require_secure_video_decoder_);
+ std::move(cdm_->media_crypto_ready_cb)
+ .Run(std::make_unique<base::android::ScopedJavaGlobalRef<jobject>>(
+ media_crypto_),
+ require_secure_video_decoder_);
+ // The callback is consumed, mark that we ran it so tests can verify.
+ cdm_->ran_media_crypto_ready_cb = true;
base::RunLoop().RunUntilIdle();
}
@@ -838,7 +840,7 @@ TEST_P(MediaCodecVideoDecoderTest, CdmInitializationWorksForL3) {
TestVideoConfig::NormalEncrypted(codec_));
ASSERT_TRUE(!!cdm_->new_key_cb);
ASSERT_TRUE(!!cdm_->cdm_unset_cb);
- ASSERT_TRUE(!!cdm_->media_crypto_ready_cb);
+ ASSERT_TRUE(!!cdm_->ran_media_crypto_ready_cb);
ASSERT_EQ(surface_chooser_->current_state_.is_secure, true);
ASSERT_EQ(surface_chooser_->current_state_.is_required, false);
ASSERT_FALSE(codec_allocator_->most_recent_config->requires_secure_codec);
@@ -857,7 +859,7 @@ TEST_P(MediaCodecVideoDecoderTest, CdmInitializationWorksForL1) {
TestVideoConfig::NormalEncrypted(codec_));
ASSERT_TRUE(!!cdm_->new_key_cb);
ASSERT_TRUE(!!cdm_->cdm_unset_cb);
- ASSERT_TRUE(!!cdm_->media_crypto_ready_cb);
+ ASSERT_TRUE(!!cdm_->ran_media_crypto_ready_cb);
ASSERT_EQ(surface_chooser_->current_state_.is_secure, true);
ASSERT_EQ(surface_chooser_->current_state_.is_required, true);
ASSERT_TRUE(codec_allocator_->most_recent_config->requires_secure_codec);
@@ -876,7 +878,7 @@ TEST_P(MediaCodecVideoDecoderTest, CdmIsSetEvenForClearStream) {
InitializeWithOverlay_OneDecodePending(TestVideoConfig::Large(codec_));
ASSERT_TRUE(!!cdm_->new_key_cb);
ASSERT_TRUE(!!cdm_->cdm_unset_cb);
- ASSERT_TRUE(!!cdm_->media_crypto_ready_cb);
+ ASSERT_TRUE(!!cdm_->ran_media_crypto_ready_cb);
ASSERT_EQ(surface_chooser_->current_state_.is_secure, true);
ASSERT_EQ(surface_chooser_->current_state_.is_required, false);
ASSERT_FALSE(codec_allocator_->most_recent_config->requires_secure_codec);
@@ -895,6 +897,7 @@ TEST_P(MediaCodecVideoDecoderTest, NoMediaCryptoContext_ClearStream) {
ASSERT_FALSE(!!cdm_->new_key_cb);
ASSERT_FALSE(!!cdm_->cdm_unset_cb);
ASSERT_FALSE(!!cdm_->media_crypto_ready_cb);
+ ASSERT_FALSE(!!cdm_->ran_media_crypto_ready_cb);
ASSERT_EQ(surface_chooser_->current_state_.is_secure, false);
ASSERT_EQ(surface_chooser_->current_state_.is_required, false);
ASSERT_FALSE(codec_allocator_->most_recent_config->requires_secure_codec);
diff --git a/chromium/media/gpu/android/shared_image_video.cc b/chromium/media/gpu/android/shared_image_video.cc
index 26ecfa18121..49e6dd39ad3 100644
--- a/chromium/media/gpu/android/shared_image_video.cc
+++ b/chromium/media/gpu/android/shared_image_video.cc
@@ -6,24 +6,115 @@
#include <utility>
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
+#include "base/android/scoped_hardware_buffer_handle.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/abstract_texture.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_fence_helper.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+#include "gpu/vulkan/vulkan_util.h"
#include "media/gpu/android/codec_image.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
namespace media {
+namespace {
+sk_sp<SkPromiseImageTexture> CreatePromiseTexture(
+ viz::VulkanContextProvider* context_provider,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ gfx::Size size,
+ viz::ResourceFormat format) {
+ gpu::VulkanImplementation* vk_implementation =
+ context_provider->GetVulkanImplementation();
+ VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
+ VkPhysicalDevice vk_physical_device =
+ context_provider->GetDeviceQueue()->GetVulkanPhysicalDevice();
+
+ // Create a VkImage and import AHB.
+ VkImage vk_image;
+ VkImageCreateInfo vk_image_info;
+ VkDeviceMemory vk_device_memory;
+ VkDeviceSize mem_allocation_size;
+ gpu::VulkanYCbCrInfo ycbcr_info;
+ if (!vk_implementation->CreateVkImageAndImportAHB(
+ vk_device, vk_physical_device, size, std::move(ahb_handle), &vk_image,
+ &vk_image_info, &vk_device_memory, &mem_allocation_size,
+ &ycbcr_info)) {
+ return nullptr;
+ }
+
+ GrVkYcbcrConversionInfo fYcbcrConversionInfo(
+ static_cast<VkSamplerYcbcrModelConversion>(
+ ycbcr_info.suggested_ycbcr_model),
+ static_cast<VkSamplerYcbcrRange>(ycbcr_info.suggested_ycbcr_range),
+ static_cast<VkChromaLocation>(ycbcr_info.suggested_xchroma_offset),
+ static_cast<VkChromaLocation>(ycbcr_info.suggested_ychroma_offset),
+ VK_FILTER_LINEAR, // VkFilter
+ 0, // VkBool32 forceExplicitReconstruction
+ ycbcr_info.external_format,
+ static_cast<VkFormatFeatureFlags>(ycbcr_info.format_features));
+
+ // Create backend texture from the VkImage.
+ GrVkAlloc alloc = {vk_device_memory, 0, mem_allocation_size, 0};
+ GrVkImageInfo vk_info = {vk_image,
+ alloc,
+ vk_image_info.tiling,
+ vk_image_info.initialLayout,
+ vk_image_info.format,
+ vk_image_info.mipLevels,
+ VK_QUEUE_FAMILY_EXTERNAL,
+ fYcbcrConversionInfo};
+
+ // TODO(bsalomon): Determine whether it makes sense to attempt to reuse this
+ // if the vk_info stays the same on subsequent calls.
+ auto promise_texture = SkPromiseImageTexture::Make(
+ GrBackendTexture(size.width(), size.height(), vk_info));
+ if (!promise_texture) {
+ vkDestroyImage(vk_device, vk_image, nullptr);
+ vkFreeMemory(vk_device, vk_device_memory, nullptr);
+ return nullptr;
+ }
+
+ return promise_texture;
+}
+
+void DestroyVkPromiseTexture(viz::VulkanContextProvider* context_provider,
+ sk_sp<SkPromiseImageTexture> promise_texture) {
+ DCHECK(promise_texture);
+ DCHECK(promise_texture->unique());
+
+ GrVkImageInfo vk_image_info;
+ bool result =
+ promise_texture->backendTexture().getVkImageInfo(&vk_image_info);
+ DCHECK(result);
+
+ gpu::VulkanFenceHelper* fence_helper =
+ context_provider->GetDeviceQueue()->GetFenceHelper();
+ fence_helper->EnqueueImageCleanupForSubmittedWork(
+ vk_image_info.fImage, vk_image_info.fAlloc.fMemory);
+}
+
+} // namespace
+
SharedImageVideo::SharedImageVideo(
const gpu::Mailbox& mailbox,
const gfx::ColorSpace color_space,
scoped_refptr<CodecImage> codec_image,
std::unique_ptr<gpu::gles2::AbstractTexture> abstract_texture,
- scoped_refptr<gpu::SharedContextState> shared_context_state,
+ scoped_refptr<gpu::SharedContextState> context_state,
bool is_thread_safe)
: SharedImageBacking(
mailbox,
@@ -37,19 +128,19 @@ SharedImageVideo::SharedImageVideo(
is_thread_safe),
codec_image_(std::move(codec_image)),
abstract_texture_(std::move(abstract_texture)),
- shared_context_state_(std::move(shared_context_state)) {
+ context_state_(std::move(context_state)) {
DCHECK(codec_image_);
- DCHECK(shared_context_state_);
+ DCHECK(context_state_);
// Currently this backing is not thread safe.
DCHECK(!is_thread_safe);
- shared_context_state_->AddContextLostObserver(this);
+ context_state_->AddContextLostObserver(this);
}
SharedImageVideo::~SharedImageVideo() {
codec_image_->ReleaseCodecBuffer();
- if (shared_context_state_)
- shared_context_state_->RemoveContextLostObserver(this);
+ if (context_state_)
+ context_state_->RemoveContextLostObserver(this);
}
bool SharedImageVideo::IsCleared() const {
@@ -81,11 +172,41 @@ void SharedImageVideo::OnContextLost() {
// texture owner's texture was created on shared context. Once shared context
// is lost, no one should try to use that texture.
codec_image_->ReleaseCodecBuffer();
- shared_context_state_->RemoveContextLostObserver(this);
- shared_context_state_ = nullptr;
+ context_state_->RemoveContextLostObserver(this);
+ context_state_ = nullptr;
}
-// Representation of a SharedImageCodecImage as a GL Texture.
+base::Optional<gpu::VulkanYCbCrInfo> SharedImageVideo::GetYcbcrInfo() {
+ // For non-vulkan context, return null.
+ if (!context_state_->GrContextIsVulkan())
+ return base::nullopt;
+
+ // Render the codec image.
+ codec_image_->RenderToFrontBuffer();
+
+ // Get the AHB from the latest image.
+ auto scoped_hardware_buffer =
+ codec_image_->texture_owner()->GetAHardwareBuffer();
+ if (!scoped_hardware_buffer) {
+ return base::nullopt;
+ }
+
+ DCHECK(scoped_hardware_buffer->buffer());
+ auto* context_provider = context_state_->vk_context_provider();
+ gpu::VulkanImplementation* vk_implementation =
+ context_provider->GetVulkanImplementation();
+ VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
+
+ gpu::VulkanYCbCrInfo ycbcr_info;
+ if (!vk_implementation->GetSamplerYcbcrConversionInfo(
+ vk_device, scoped_hardware_buffer->TakeBuffer(), &ycbcr_info)) {
+ LOG(ERROR) << "Failed to get the ycbcr info.";
+ return base::nullopt;
+ }
+ return base::Optional<gpu::VulkanYCbCrInfo>(ycbcr_info);
+}
+
+// Representation of SharedImageVideo as a GL Texture.
class SharedImageRepresentationGLTextureVideo
: public gpu::SharedImageRepresentationGLTexture {
public:
@@ -101,61 +222,280 @@ class SharedImageRepresentationGLTextureVideo
bool BeginAccess(GLenum mode) override {
auto* video_backing = static_cast<SharedImageVideo*>(backing());
DCHECK(video_backing);
-
- // For (old) overlays, we don't have a texture owner, but overlay promotion
- // might not happen for some reasons. In that case, it will try to draw
- // which should results in no image.
- if (!texture_owner())
- return true;
+ auto* codec_image = video_backing->codec_image_.get();
+ auto* texture_owner = codec_image->texture_owner().get();
// Render the codec image.
- codec_image()->RenderToFrontBuffer();
+ codec_image->RenderToFrontBuffer();
- // Bind the tex image if its not already bound.
- if (!texture_owner()->binds_texture_on_update())
- texture_owner()->EnsureTexImageBound();
+ // Bind the tex image if it's not already bound.
+ if (!texture_owner->binds_texture_on_update())
+ texture_owner->EnsureTexImageBound();
return true;
}
void EndAccess() override {}
private:
- SharedImageVideo* video_backing() {
+ gpu::gles2::Texture* texture_;
+
+ DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureVideo);
+};
+
+// GL backed Skia representation of SharedImageVideo.
+class SharedImageRepresentationVideoSkiaGL
+ : public gpu::SharedImageRepresentationSkia {
+ public:
+ SharedImageRepresentationVideoSkiaGL(gpu::SharedImageManager* manager,
+ gpu::SharedImageBacking* backing,
+ gpu::MemoryTypeTracker* tracker)
+ : gpu::SharedImageRepresentationSkia(manager, backing, tracker) {}
+
+ ~SharedImageRepresentationVideoSkiaGL() override = default;
+
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ // Writes are not intended to used for video backed representations.
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+
+ void EndWriteAccess(sk_sp<SkSurface> surface) override { NOTIMPLEMENTED(); }
+
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ if (promise_texture_)
+ return promise_texture_;
+
auto* video_backing = static_cast<SharedImageVideo*>(backing());
- return video_backing;
+ DCHECK(video_backing);
+ auto* codec_image = video_backing->codec_image_.get();
+ auto* texture_owner = codec_image->texture_owner().get();
+
+ // Render the codec image.
+ codec_image->RenderToFrontBuffer();
+
+ // Bind the tex image if it's not already bound.
+ if (!texture_owner->binds_texture_on_update())
+ texture_owner->EnsureTexImageBound();
+ GrBackendTexture backend_texture;
+ if (!gpu::GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ GL_TEXTURE_EXTERNAL_OES, size(),
+ texture_owner->GetTextureId(), format(),
+ &backend_texture)) {
+ return nullptr;
+ }
+ promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ return promise_texture_;
}
- CodecImage* codec_image() {
- DCHECK(video_backing());
- return video_backing()->codec_image_.get();
+ void EndReadAccess() override {}
+
+ private:
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+};
+
+// Vulkan backed Skia representation of SharedImageVideo.
+class SharedImageRepresentationVideoSkiaVk
+ : public gpu::SharedImageRepresentationSkia {
+ public:
+ SharedImageRepresentationVideoSkiaVk(
+ gpu::SharedImageManager* manager,
+ gpu::SharedImageBacking* backing,
+ scoped_refptr<gpu::SharedContextState> context_state,
+ gpu::MemoryTypeTracker* tracker)
+ : gpu::SharedImageRepresentationSkia(manager, backing, tracker),
+ context_state_(std::move(context_state)) {
+ DCHECK(context_state_);
+ DCHECK(context_state_->vk_context_provider());
}
- TextureOwner* texture_owner() { return codec_image()->texture_owner().get(); }
+ ~SharedImageRepresentationVideoSkiaVk() override {
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
- gpu::gles2::Texture* texture_;
+ // |promise_texture_| could be null if we never being read.
+ if (!promise_texture_)
+ return;
+ DestroyVkPromiseTexture(context_state_->vk_context_provider(),
+ std::move(promise_texture_));
+ }
- DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureVideo);
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ // Writes are not intended to used for video backed representations.
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+
+ void EndWriteAccess(sk_sp<SkSurface> surface) override { NOTIMPLEMENTED(); }
+
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ if (!scoped_hardware_buffer_) {
+ auto* video_backing = static_cast<SharedImageVideo*>(backing());
+ DCHECK(video_backing);
+ auto* codec_image = video_backing->codec_image_.get();
+ auto* texture_owner = codec_image->texture_owner().get();
+
+ // Render the codec image and get AHB from latest image.
+ codec_image->RenderToFrontBuffer();
+ scoped_hardware_buffer_ = texture_owner->GetAHardwareBuffer();
+ if (!scoped_hardware_buffer_) {
+ LOG(ERROR) << "Failed to get the hardware buffer.";
+ return nullptr;
+ }
+ }
+ DCHECK(scoped_hardware_buffer_->buffer());
+
+ // Wait on the sync fd attached to the buffer to make sure buffer is
+ // ready before the read. This is done by inserting the sync fd semaphore
+ // into begin_semaphore vector which client will wait on.
+ base::ScopedFD sync_fd = scoped_hardware_buffer_->TakeFence();
+ if (!BeginRead(begin_semaphores, end_semaphores, std::move(sync_fd))) {
+ return nullptr;
+ }
+
+ if (!promise_texture_) {
+ // Create the promise texture.
+ promise_texture_ = CreatePromiseTexture(
+ context_state_->vk_context_provider(),
+ scoped_hardware_buffer_->TakeBuffer(), size(), format());
+ }
+ return promise_texture_;
+ }
+
+ void EndReadAccess() override {
+ DCHECK(end_access_semaphore_ != VK_NULL_HANDLE);
+
+ gpu::SemaphoreHandle semaphore_handle =
+ vk_implementation()->GetSemaphoreHandle(vk_device(),
+ end_access_semaphore_);
+ auto sync_fd = semaphore_handle.TakeHandle();
+ DCHECK(sync_fd.is_valid());
+
+ // Pass the end access sync fd to the scoped hardware buffer. This will make
+ // sure that the AImage associated with the hardware buffer will be deleted
+ // only when the read access is ending.
+ scoped_hardware_buffer_->SetReadFence(std::move(sync_fd), true);
+ fence_helper()->EnqueueSemaphoreCleanupForSubmittedWork(
+ end_access_semaphore_);
+ end_access_semaphore_ = VK_NULL_HANDLE;
+ }
+
+ private:
+ bool BeginRead(std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores,
+ base::ScopedFD sync_fd) {
+ DCHECK(begin_semaphores);
+ DCHECK(end_semaphores);
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
+
+ VkSemaphore begin_access_semaphore = VK_NULL_HANDLE;
+ if (sync_fd.is_valid()) {
+ begin_access_semaphore = vk_implementation()->ImportSemaphoreHandle(
+ vk_device(),
+ gpu::SemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ std::move(sync_fd)));
+ if (begin_access_semaphore == VK_NULL_HANDLE) {
+ DLOG(ERROR) << "Failed to import semaphore from sync_fd.";
+ return false;
+ }
+ }
+
+ end_access_semaphore_ =
+ vk_implementation()->CreateExternalSemaphore(vk_device());
+
+ if (end_access_semaphore_ == VK_NULL_HANDLE) {
+ DLOG(ERROR) << "Failed to create the external semaphore.";
+ if (begin_access_semaphore != VK_NULL_HANDLE) {
+ vkDestroySemaphore(vk_device(), begin_access_semaphore,
+ nullptr /* pAllocator */);
+ }
+ return false;
+ }
+ end_semaphores->emplace_back();
+ end_semaphores->back().initVulkan(end_access_semaphore_);
+
+ if (begin_access_semaphore != VK_NULL_HANDLE) {
+ begin_semaphores->emplace_back();
+ begin_semaphores->back().initVulkan(begin_access_semaphore);
+ }
+ return true;
+ }
+
+ VkDevice vk_device() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ }
+
+ gpu::VulkanImplementation* vk_implementation() {
+ return context_state_->vk_context_provider()->GetVulkanImplementation();
+ }
+
+ gpu::VulkanFenceHelper* fence_helper() {
+ return context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ }
+
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+ scoped_refptr<gpu::SharedContextState> context_state_;
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ scoped_hardware_buffer_;
+ VkSemaphore end_access_semaphore_ = VK_NULL_HANDLE;
};
+// TODO(vikassoni): Currently GLRenderer doesn't support overlays with shared
+// image. Add support for overlays in GLRenderer as well as overlay
+// representations of shared image.
std::unique_ptr<gpu::SharedImageRepresentationGLTexture>
SharedImageVideo::ProduceGLTexture(gpu::SharedImageManager* manager,
gpu::MemoryTypeTracker* tracker) {
- // TODO(vikassoni): Also fix how overlays work with shared images to enable
- // this representation. To make overlays work, we need to add a new overlay
- // representation which can notify promotion hints and schedule overlay
- // planes via |codec_image_|.
- NOTIMPLEMENTED();
- return nullptr;
+ // For (old) overlays, we don't have a texture owner, but overlay promotion
+ // might not happen for some reasons. In that case, it will try to draw
+ // which should result in no image.
+ if (!codec_image_->texture_owner())
+ return nullptr;
+ auto* texture = gpu::gles2::Texture::CheckedCast(
+ codec_image_->texture_owner()->GetTextureBase());
+ DCHECK(texture);
+
+ return std::make_unique<SharedImageRepresentationGLTextureVideo>(
+ manager, this, tracker, texture);
}
+// Currently SkiaRenderer doesn't support overlays.
std::unique_ptr<gpu::SharedImageRepresentationSkia>
SharedImageVideo::ProduceSkia(
gpu::SharedImageManager* manager,
gpu::MemoryTypeTracker* tracker,
scoped_refptr<gpu::SharedContextState> context_state) {
- // TODO(vikassoni): Implement in follow up patch.
- NOTIMPLEMENTED();
- return nullptr;
+ DCHECK(context_state);
+
+ // For (old) overlays, we don't have a texture owner, but overlay promotion
+ // might not happen for some reasons. In that case, it will try to draw
+ // which should result in no image.
+ if (!codec_image_->texture_owner())
+ return nullptr;
+
+ if (context_state->GrContextIsVulkan()) {
+ return std::make_unique<SharedImageRepresentationVideoSkiaVk>(
+ manager, this, std::move(context_state), tracker);
+ }
+
+ DCHECK(context_state->GrContextIsGL());
+ // In GL mode, use the texture id of the TextureOwner.
+ return std::make_unique<SharedImageRepresentationVideoSkiaGL>(manager, this,
+ tracker);
}
} // namespace media
diff --git a/chromium/media/gpu/android/shared_image_video.h b/chromium/media/gpu/android/shared_image_video.h
index 0cd0875d7d5..dc4d567c584 100644
--- a/chromium/media/gpu/android/shared_image_video.h
+++ b/chromium/media/gpu/android/shared_image_video.h
@@ -8,8 +8,10 @@
#include <memory>
#include "base/memory/scoped_refptr.h"
+#include "base/optional.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "media/gpu/media_gpu_export.h"
namespace gpu {
@@ -53,6 +55,10 @@ class MEDIA_GPU_EXPORT SharedImageVideo
// SharedContextState::ContextLostObserver implementation.
void OnContextLost() override;
+ // Returns ycbcr information. This is only valid in vulkan context and
+ // nullopt for other context.
+ base::Optional<gpu::VulkanYCbCrInfo> GetYcbcrInfo();
+
protected:
std::unique_ptr<gpu::SharedImageRepresentationGLTexture> ProduceGLTexture(
gpu::SharedImageManager* manager,
@@ -68,12 +74,14 @@ class MEDIA_GPU_EXPORT SharedImageVideo
private:
friend class SharedImageRepresentationGLTextureVideo;
+ friend class SharedImageRepresentationVideoSkiaGL;
+ friend class SharedImageRepresentationVideoSkiaVk;
scoped_refptr<CodecImage> codec_image_;
// |abstract_texture_| is only used for legacy mailbox.
std::unique_ptr<gpu::gles2::AbstractTexture> abstract_texture_;
- scoped_refptr<gpu::SharedContextState> shared_context_state_;
+ scoped_refptr<gpu::SharedContextState> context_state_;
DISALLOW_COPY_AND_ASSIGN(SharedImageVideo);
};
diff --git a/chromium/media/gpu/android/surface_chooser_helper.h b/chromium/media/gpu/android/surface_chooser_helper.h
index 5d77f8adb08..a019d031e15 100644
--- a/chromium/media/gpu/android/surface_chooser_helper.h
+++ b/chromium/media/gpu/android/surface_chooser_helper.h
@@ -9,7 +9,7 @@
#include "base/macros.h"
#include "base/time/time.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/gpu/android/android_video_surface_chooser.h"
#include "media/gpu/android/promotion_hint_aggregator.h"
#include "media/gpu/media_gpu_export.h"
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc b/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc
index 3d4245801ed..9eace9c4737 100644
--- a/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc
+++ b/chromium/media/gpu/android/surface_texture_gl_owner_unittest.cc
@@ -10,7 +10,7 @@
#include "base/bind.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "media/gpu/android/mock_abstract_texture.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -74,7 +74,7 @@ class SurfaceTextureGLOwnerTest : public testing::Test {
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gl::GLSurface> surface_;
- base::MessageLoop message_loop_;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
};
TEST_F(SurfaceTextureGLOwnerTest, OwnerReturnsServiceId) {
diff --git a/chromium/media/gpu/android/texture_owner.cc b/chromium/media/gpu/android/texture_owner.cc
index d00fd6abedd..8e6ed6d4c32 100644
--- a/chromium/media/gpu/android/texture_owner.cc
+++ b/chromium/media/gpu/android/texture_owner.cc
@@ -43,9 +43,9 @@ scoped_refptr<TextureOwner> TextureOwner::Create(
std::unique_ptr<gpu::gles2::AbstractTexture> texture,
Mode mode) {
switch (mode) {
- case Mode::kAImageReaderSecure:
- return new ImageReaderGLOwner(std::move(texture), mode);
case Mode::kAImageReaderInsecure:
+ case Mode::kAImageReaderInsecureSurfaceControl:
+ case Mode::kAImageReaderSecureSurfaceControl:
return new ImageReaderGLOwner(std::move(texture), mode);
case Mode::kSurfaceTextureInsecure:
return new SurfaceTextureGLOwner(std::move(texture));
@@ -57,7 +57,7 @@ scoped_refptr<TextureOwner> TextureOwner::Create(
// static
std::unique_ptr<gpu::gles2::AbstractTexture> TextureOwner::CreateTexture(
- gpu::SharedContextState* context_state) {
+ scoped_refptr<gpu::SharedContextState> context_state) {
DCHECK(context_state);
// This assumes a non-passthrough (validating) command decoder, which is safe
@@ -70,7 +70,7 @@ std::unique_ptr<gpu::gles2::AbstractTexture> TextureOwner::CreateTexture(
0, // height
1, // depth
0, // border
- GL_RGBA, GL_UNSIGNED_BYTE, context_state);
+ GL_RGBA, GL_UNSIGNED_BYTE, std::move(context_state));
}
GLuint TextureOwner::GetTextureId() const {
diff --git a/chromium/media/gpu/android/texture_owner.h b/chromium/media/gpu/android/texture_owner.h
index 93edf5bd235..af807f09a19 100644
--- a/chromium/media/gpu/android/texture_owner.h
+++ b/chromium/media/gpu/android/texture_owner.h
@@ -47,10 +47,12 @@ class MEDIA_GPU_EXPORT TextureOwner
// |texture| should be either from CreateAbstractTexture() or a mock. The
// corresponding GL context must be current.
// Mode indicates which framework API to use and whether the video textures
- // created using this owner should be hardware protected.
+ // created using this owner should be hardware protected. It also indicates
+ // whether SurfaceControl is being used or not.
enum class Mode {
- kAImageReaderSecure,
kAImageReaderInsecure,
+ kAImageReaderInsecureSurfaceControl,
+ kAImageReaderSecureSurfaceControl,
kSurfaceTextureInsecure
};
static scoped_refptr<TextureOwner> Create(
@@ -59,7 +61,7 @@ class MEDIA_GPU_EXPORT TextureOwner
// Create a texture that's appropriate for a TextureOwner.
static std::unique_ptr<gpu::gles2::AbstractTexture> CreateTexture(
- gpu::SharedContextState* context_state);
+ scoped_refptr<gpu::SharedContextState> context_state);
scoped_refptr<base::SingleThreadTaskRunner> task_runner() {
return task_runner_;
diff --git a/chromium/media/gpu/android/video_frame_factory.h b/chromium/media/gpu/android/video_frame_factory.h
index c4708dae9b7..1650038e63f 100644
--- a/chromium/media/gpu/android/video_frame_factory.h
+++ b/chromium/media/gpu/android/video_frame_factory.h
@@ -32,8 +32,7 @@ class MEDIA_GPU_EXPORT VideoFrameFactory {
public:
using GetStubCb = base::Callback<gpu::CommandBufferStub*()>;
using InitCb = base::RepeatingCallback<void(scoped_refptr<TextureOwner>)>;
- using OnceOutputCb =
- base::OnceCallback<void(const scoped_refptr<VideoFrame>&)>;
+ using OnceOutputCb = base::OnceCallback<void(scoped_refptr<VideoFrame>)>;
VideoFrameFactory() = default;
virtual ~VideoFrameFactory() = default;
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.cc b/chromium/media/gpu/android/video_frame_factory_impl.cc
index 06e74b98202..c65ed6ad333 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.cc
+++ b/chromium/media/gpu/android/video_frame_factory_impl.cc
@@ -60,10 +60,10 @@ TextureOwner::Mode GetTextureOwnerMode(
: TextureOwner::Mode::kSurfaceTextureInsecure;
case VideoFrameFactory::OverlayMode::kSurfaceControlSecure:
DCHECK(a_image_reader_supported);
- return TextureOwner::Mode::kAImageReaderSecure;
+ return TextureOwner::Mode::kAImageReaderSecureSurfaceControl;
case VideoFrameFactory::OverlayMode::kSurfaceControlInsecure:
DCHECK(a_image_reader_supported);
- return TextureOwner::Mode::kAImageReaderInsecure;
+ return TextureOwner::Mode::kAImageReaderInsecureSurfaceControl;
}
NOTREACHED();
@@ -91,9 +91,12 @@ using gpu::gles2::AbstractTexture;
VideoFrameFactoryImpl::VideoFrameFactoryImpl(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb)
+ GetStubCb get_stub_cb,
+ const gpu::GpuPreferences& gpu_preferences)
: gpu_task_runner_(std::move(gpu_task_runner)),
- get_stub_cb_(std::move(get_stub_cb)) {}
+ get_stub_cb_(std::move(get_stub_cb)),
+ enable_threaded_texture_mailboxes_(
+ gpu_preferences.enable_threaded_texture_mailboxes) {}
VideoFrameFactoryImpl::~VideoFrameFactoryImpl() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -105,6 +108,7 @@ void VideoFrameFactoryImpl::Initialize(OverlayMode overlay_mode,
InitCb init_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(!gpu_video_frame_factory_);
+ overlay_mode_ = overlay_mode;
gpu_video_frame_factory_ = std::make_unique<GpuVideoFrameFactory>();
base::PostTaskAndReplyWithResult(
gpu_task_runner_.get(), FROM_HERE,
@@ -151,16 +155,122 @@ void VideoFrameFactoryImpl::CreateVideoFrame(
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
OnceOutputCb output_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ gfx::Size coded_size = output_buffer->size();
+ gfx::Rect visible_rect(coded_size);
+ // The pixel format doesn't matter as long as it's valid for texture frames.
+ VideoPixelFormat pixel_format = PIXEL_FORMAT_ARGB;
+
+ // Check that we can create a VideoFrame for this config before trying to
+ // create the textures for it.
+ if (!VideoFrame::IsValidConfig(pixel_format, VideoFrame::STORAGE_OPAQUE,
+ coded_size, visible_rect, natural_size)) {
+ LOG(ERROR) << __func__ << " unsupported video frame format";
+ std::move(output_cb).Run(nullptr);
+ return;
+ }
+
+ auto image_ready_cb = base::BindOnce(
+ &VideoFrameFactoryImpl::OnImageReady, std::move(output_cb), timestamp,
+ coded_size, natural_size, texture_owner_, pixel_format, overlay_mode_,
+ enable_threaded_texture_mailboxes_);
+
gpu_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(&GpuVideoFrameFactory::CreateVideoFrame,
+ base::BindOnce(&GpuVideoFrameFactory::CreateImage,
base::Unretained(gpu_video_frame_factory_.get()),
- base::Passed(&output_buffer), texture_owner_, timestamp,
- natural_size, std::move(promotion_hint_cb),
- std::move(output_cb),
+ base::Passed(&output_buffer), texture_owner_,
+ std::move(promotion_hint_cb), std::move(image_ready_cb),
base::ThreadTaskRunnerHandle::Get()));
}
+// static
+void VideoFrameFactoryImpl::OnImageReady(
+ OnceOutputCb output_cb,
+ base::TimeDelta timestamp,
+ gfx::Size coded_size,
+ gfx::Size natural_size,
+ scoped_refptr<TextureOwner> texture_owner,
+ VideoPixelFormat pixel_format,
+ OverlayMode overlay_mode,
+ bool enable_threaded_texture_mailboxes,
+ gpu::Mailbox mailbox,
+ VideoFrame::ReleaseMailboxCB release_cb,
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info) {
+ TRACE_EVENT0("media", "VideoVideoFrameFactoryImpl::OnVideoFrameImageReady");
+
+ gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
+ mailbox_holders[0] =
+ gpu::MailboxHolder(mailbox, gpu::SyncToken(), GL_TEXTURE_EXTERNAL_OES);
+
+ // TODO(liberato): We should set the promotion hint cb here on the image. We
+ // should also set the output buffer params; we shouldn't send the output
+ // buffer to the gpu thread, since the codec image isn't in use anyway. We
+ // can access it on any thread. We'll also need to get new images when we
+ // switch texture owners. That's left for future work.
+
+ // TODO(liberato): When we switch to a pool, we need to provide some way to
+ // call MaybeRenderEarly that doesn't depend on |release_cb|. I suppose we
+ // could get a RepeatingCallback that's a "reuse cb", that we'd attach to the
+ // VideoFrame's release cb, since we have to wait for the sync token anyway.
+ // That would run on the gpu thread, and could MaybeRenderEarly.
+
+ gfx::Rect visible_rect(coded_size);
+
+ auto frame = VideoFrame::WrapNativeTextures(
+ pixel_format, mailbox_holders, VideoFrame::ReleaseMailboxCB(), coded_size,
+ visible_rect, natural_size, timestamp);
+
+ frame->set_ycbcr_info(ycbcr_info);
+ // If, for some reason, we failed to create a frame, then fail. Note that we
+ // don't need to call |release_cb|; dropping it is okay since the api says so.
+ if (!frame) {
+ LOG(ERROR) << __func__ << " failed to create video frame";
+ std::move(output_cb).Run(nullptr);
+ return;
+ }
+
+ // The frames must be copied when threaded texture mailboxes are in use
+ // (http://crbug.com/582170).
+ if (enable_threaded_texture_mailboxes)
+ frame->metadata()->SetBoolean(VideoFrameMetadata::COPY_REQUIRED, true);
+
+ const bool is_surface_control =
+ overlay_mode == OverlayMode::kSurfaceControlSecure ||
+ overlay_mode == OverlayMode::kSurfaceControlInsecure;
+ const bool wants_promotion_hints =
+ overlay_mode == OverlayMode::kRequestPromotionHints;
+
+ // Remember that we can't access |texture_owner|, but we can check if we have
+ // one here.
+ bool allow_overlay = false;
+ if (is_surface_control) {
+ DCHECK(texture_owner);
+ allow_overlay = true;
+ } else {
+ // We unconditionally mark the picture as overlayable, even if
+ // |!texture_owner|, if we want to get hints. It's required, else we won't
+ // get hints.
+ allow_overlay = !texture_owner || wants_promotion_hints;
+ }
+
+ frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
+ allow_overlay);
+ frame->metadata()->SetBoolean(VideoFrameMetadata::WANTS_PROMOTION_HINT,
+ wants_promotion_hints);
+ frame->metadata()->SetBoolean(VideoFrameMetadata::TEXTURE_OWNER,
+ !!texture_owner);
+
+ frame->SetReleaseMailboxCB(std::move(release_cb));
+
+ // Note that we don't want to handle the CodecImageGroup here. It needs to be
+ // accessed on the gpu thread. Once we move to pooling, only the initial
+ // create / destroy operations will affect it anyway, so it might as well stay
+ // on the gpu thread.
+
+ std::move(output_cb).Run(std::move(frame));
+}
+
void VideoFrameFactoryImpl::RunAfterPendingVideoFrames(
base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -183,7 +293,6 @@ scoped_refptr<TextureOwner> GpuVideoFrameFactory::Initialize(
VideoFrameFactoryImpl::OverlayMode overlay_mode,
VideoFrameFactoryImpl::GetStubCb get_stub_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- overlay_mode_ = overlay_mode;
stub_ = get_stub_cb.Run();
if (!MakeContextCurrent(stub_))
return nullptr;
@@ -209,26 +318,26 @@ scoped_refptr<TextureOwner> GpuVideoFrameFactory::Initialize(
ContextStateResultUMA(result);
return nullptr;
}
- return TextureOwner::Create(TextureOwner::CreateTexture(shared_context.get()),
- GetTextureOwnerMode(overlay_mode_));
+ return TextureOwner::Create(TextureOwner::CreateTexture(shared_context),
+ GetTextureOwnerMode(overlay_mode));
}
-void GpuVideoFrameFactory::CreateVideoFrame(
+void GpuVideoFrameFactory::CreateImage(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<TextureOwner> texture_owner_,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
- VideoFrameFactory::OnceOutputCb output_cb,
+ VideoFrameFactoryImpl::ImageReadyCB image_ready_cb,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- scoped_refptr<VideoFrame> frame;
- CodecImage* codec_image = nullptr;
- CreateVideoFrameInternal(std::move(output_buffer), std::move(texture_owner_),
- timestamp, natural_size,
- std::move(promotion_hint_cb), &frame, &codec_image);
+
+ // Generate a shared image mailbox.
+ auto mailbox = gpu::Mailbox::GenerateForSharedImage();
+
+ bool success =
+ CreateImageInternal(std::move(output_buffer), std::move(texture_owner_),
+ mailbox, std::move(promotion_hint_cb));
TRACE_EVENT0("media", "GpuVideoFrameFactory::CreateVideoFrame");
- if (!frame)
+ if (!success)
return;
// Try to render this frame if possible.
@@ -241,48 +350,35 @@ void GpuVideoFrameFactory::CreateVideoFrame(
// image stub destruction will cause all the shared images to be destroyed.
auto destroy_shared_image =
stub_->channel()->shared_image_stub()->GetSharedImageDestructionCallback(
- frame->mailbox_holder(0).mailbox);
+ mailbox);
// Guarantee that the SharedImage is destroyed even if the VideoFrame is
// dropped. Otherwise we could keep shared images we don't need alive.
auto release_cb = mojo::WrapCallbackWithDefaultInvokeIfNotRun(
BindToCurrentLoop(std::move(destroy_shared_image)), gpu::SyncToken());
- frame->SetReleaseMailboxCB(std::move(release_cb));
+
task_runner->PostTask(FROM_HERE,
- base::BindOnce(std::move(output_cb), std::move(frame)));
+ base::BindOnce(std::move(image_ready_cb), mailbox,
+ std::move(release_cb), ycbcr_info_));
}
-void GpuVideoFrameFactory::CreateVideoFrameInternal(
+bool GpuVideoFrameFactory::CreateImageInternal(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<TextureOwner> texture_owner_,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
- PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
- scoped_refptr<VideoFrame>* video_frame_out,
- CodecImage** codec_image_out) {
+ gpu::Mailbox mailbox,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (!MakeContextCurrent(stub_))
- return;
+ return false;
gpu::gles2::ContextGroup* group = stub_->decoder_context()->GetContextGroup();
if (!group)
- return;
+ return false;
gpu::gles2::TextureManager* texture_manager = group->texture_manager();
if (!texture_manager)
- return;
+ return false;
gfx::Size size = output_buffer->size();
- gfx::Rect visible_rect(size);
- // The pixel format doesn't matter as long as it's valid for texture frames.
- VideoPixelFormat pixel_format = PIXEL_FORMAT_ARGB;
-
- // Check that we can create a VideoFrame for this config before creating the
- // TextureRef so that we don't have to clean up the TextureRef if creating the
- // frame fails.
- if (!VideoFrame::IsValidConfig(pixel_format, VideoFrame::STORAGE_OPAQUE, size,
- visible_rect, natural_size)) {
- return;
- }
// Create a Texture and a CodecImage to back it.
std::unique_ptr<AbstractTexture> texture = decoder_helper_->CreateTexture(
@@ -291,7 +387,6 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
auto image = base::MakeRefCounted<CodecImage>(
std::move(output_buffer), texture_owner_, std::move(promotion_hint_cb));
images_.push_back(image.get());
- *codec_image_out = image.get();
// Add |image| to our current image group. This makes sure that any overlay
// lasts as long as the images. For TextureOwner, it doesn't do much.
@@ -317,12 +412,9 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
if (!shared_context) {
LOG(ERROR) << "GpuVideoFrameFactory: Unable to get a shared context.";
ContextStateResultUMA(result);
- return;
+ return false;
}
- // Generate a shared image mailbox.
- auto mailbox = gpu::Mailbox::GenerateForSharedImage();
-
// Create a shared image.
// TODO(vikassoni): Hardcoding colorspace to SRGB. Figure how if media has a
// colorspace and wire it here.
@@ -333,51 +425,18 @@ void GpuVideoFrameFactory::CreateVideoFrameInternal(
std::move(texture), std::move(shared_context),
false /* is_thread_safe */);
- // Register it with shared image mailbox as well as legacy mailbox.
+ if (!ycbcr_info_)
+ ycbcr_info_ = shared_image->GetYcbcrInfo();
+
+ // Register it with shared image mailbox as well as legacy mailbox. This
+ // keeps |shared_image| around until its destruction cb is called.
// NOTE: Currently none of the video mailbox consumer uses shared image
// mailbox.
DCHECK(stub_->channel()->gpu_channel_manager()->shared_image_manager());
stub_->channel()->shared_image_stub()->factory()->RegisterBacking(
std::move(shared_image), /* legacy_mailbox */ true);
- gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
- mailbox_holders[0] =
- gpu::MailboxHolder(mailbox, gpu::SyncToken(), GL_TEXTURE_EXTERNAL_OES);
-
- auto frame = VideoFrame::WrapNativeTextures(
- pixel_format, mailbox_holders, VideoFrame::ReleaseMailboxCB(), size,
- visible_rect, natural_size, timestamp);
-
- // The frames must be copied when threaded texture mailboxes are in use
- // (http://crbug.com/582170).
- if (group->gpu_preferences().enable_threaded_texture_mailboxes)
- frame->metadata()->SetBoolean(VideoFrameMetadata::COPY_REQUIRED, true);
-
- const bool is_surface_control =
- overlay_mode_ == VideoFrameFactory::OverlayMode::kSurfaceControlSecure ||
- overlay_mode_ == VideoFrameFactory::OverlayMode::kSurfaceControlInsecure;
- const bool wants_promotion_hints =
- overlay_mode_ == VideoFrameFactory::OverlayMode::kRequestPromotionHints;
-
- bool allow_overlay = false;
- if (is_surface_control) {
- DCHECK(texture_owner_);
- allow_overlay = true;
- } else {
- // We unconditionally mark the picture as overlayable, even if
- // |!texture_owner_|, if we want to get hints. It's required, else we won't
- // get hints.
- allow_overlay = !texture_owner_ || wants_promotion_hints;
- }
-
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY,
- allow_overlay);
- frame->metadata()->SetBoolean(VideoFrameMetadata::WANTS_PROMOTION_HINT,
- wants_promotion_hints);
- frame->metadata()->SetBoolean(VideoFrameMetadata::TEXTURE_OWNER,
- !!texture_owner_);
-
- *video_frame_out = std::move(frame);
+ return true;
}
void GpuVideoFrameFactory::OnWillDestroyStub(bool have_context) {
diff --git a/chromium/media/gpu/android/video_frame_factory_impl.h b/chromium/media/gpu/android/video_frame_factory_impl.h
index e6d4918c401..85e5a7aaceb 100644
--- a/chromium/media/gpu/android/video_frame_factory_impl.h
+++ b/chromium/media/gpu/android/video_frame_factory_impl.h
@@ -13,6 +13,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "media/base/video_frame.h"
#include "media/gpu/android/codec_image.h"
@@ -34,10 +35,19 @@ class GpuVideoFrameFactory;
// to a helper class hosted on the gpu thread.
class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
public:
+ // Callback used to return a mailbox and release callback for an image. The
+ // release callback may be dropped without being run, and the image will be
+ // cleaned up properly. The release callback may be called from any thread.
+ using ImageReadyCB =
+ base::OnceCallback<void(gpu::Mailbox mailbox,
+ VideoFrame::ReleaseMailboxCB release_cb,
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info)>;
+
// |get_stub_cb| will be run on |gpu_task_runner|.
VideoFrameFactoryImpl(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
- GetStubCb get_stub_cb);
+ GetStubCb get_stub_cb,
+ const gpu::GpuPreferences& gpu_preferences);
~VideoFrameFactoryImpl() override;
void Initialize(OverlayMode overlay_mode, InitCb init_cb) override;
@@ -52,6 +62,29 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
void RunAfterPendingVideoFrames(base::OnceClosure closure) override;
private:
+ // ImageReadyCB that will construct a VideoFrame, and forward it to
+ // |output_cb| if construction succeeds. This is static for two reasons.
+ // First, we want to snapshot the state of the world when the request is made,
+ // in case things like the texture owner change before it's returned. While
+ // it's unclear that MCVD would actually do this (it drains output buffers
+ // before switching anything, which guarantees that the VideoFrame has been
+ // created and sent to the renderer), it's still much simpler to think about
+ // if this uses the same state as the CreateVideoFrame call.
+ //
+ // Second, this way we don't care about the lifetime of |this|; |output_cb|
+ // can worry about it.
+ static void OnImageReady(OnceOutputCb output_cb,
+ base::TimeDelta timestamp,
+ gfx::Size coded_size,
+ gfx::Size natural_size,
+ scoped_refptr<TextureOwner> texture_owner,
+ VideoPixelFormat pixel_format,
+ OverlayMode overlay_mode,
+ bool enable_threaded_texture_mailboxes,
+ gpu::Mailbox mailbox,
+ VideoFrame::ReleaseMailboxCB release_cb,
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info);
+
// The gpu thread side of the implementation.
std::unique_ptr<GpuVideoFrameFactory> gpu_video_frame_factory_;
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
@@ -60,6 +93,11 @@ class MEDIA_GPU_EXPORT VideoFrameFactoryImpl : public VideoFrameFactory {
// The texture owner that video frames should use, or nullptr.
scoped_refptr<TextureOwner> texture_owner_;
+ OverlayMode overlay_mode_ = OverlayMode::kDontRequestPromotionHints;
+
+ // Is the sync mailbox manager enabled?
+ bool enable_threaded_texture_mailboxes_ = false;
+
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(VideoFrameFactoryImpl);
};
@@ -76,14 +114,12 @@ class GpuVideoFrameFactory
VideoFrameFactory::OverlayMode overlay_mode,
VideoFrameFactory::GetStubCb get_stub_cb);
- // Creates and returns a VideoFrame with its ReleaseMailboxCB.
- void CreateVideoFrame(
+ // Creates a SharedImage for |output_buffer|, and returns it via the callback.
+ void CreateImage(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<TextureOwner> texture_owner,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
- VideoFrameFactory::OnceOutputCb output_cb,
+ VideoFrameFactoryImpl::ImageReadyCB image_ready_cb,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
// Set our image group. Must be called before the first call to
@@ -91,15 +127,12 @@ class GpuVideoFrameFactory
void SetImageGroup(scoped_refptr<CodecImageGroup> image_group);
private:
- // Creates an AbstractTexture and VideoFrame.
- void CreateVideoFrameInternal(
+ // Creates a SharedImage for |mailbox|, and returns success or failure.
+ bool CreateImageInternal(
std::unique_ptr<CodecOutputBuffer> output_buffer,
scoped_refptr<TextureOwner> texture_owner,
- base::TimeDelta timestamp,
- gfx::Size natural_size,
- PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb,
- scoped_refptr<VideoFrame>* video_frame_out,
- CodecImage** codec_image_out);
+ gpu::Mailbox mailbox,
+ PromotionHintAggregator::NotifyPromotionHintCB promotion_hint_cb);
void OnWillDestroyStub(bool have_context) override;
@@ -111,12 +144,6 @@ class GpuVideoFrameFactory
gpu::CommandBufferStub* stub_ = nullptr;
- // Callback to notify us that an image has been destroyed.
- CodecImage::DestructionCb destruction_cb_;
-
- VideoFrameFactory::OverlayMode overlay_mode_ =
- VideoFrameFactory::OverlayMode::kDontRequestPromotionHints;
-
// A helper for creating textures. Only valid while |stub_| is valid.
std::unique_ptr<GLES2DecoderHelper> decoder_helper_;
@@ -124,6 +151,10 @@ class GpuVideoFrameFactory
// replace this when SetImageGroup() is called.
scoped_refptr<CodecImageGroup> image_group_;
+ // Sampler conversion information which is used in vulkan context. This is
+ // constant for all the frames in a video and hence we cache it.
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info_;
+
THREAD_CHECKER(thread_checker_);
base::WeakPtrFactory<GpuVideoFrameFactory> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuVideoFrameFactory);
diff --git a/chromium/media/gpu/fake_mjpeg_decode_accelerator.cc b/chromium/media/gpu/fake_mjpeg_decode_accelerator.cc
deleted file mode 100644
index 66ce3ddc208..00000000000
--- a/chromium/media/gpu/fake_mjpeg_decode_accelerator.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/gpu/fake_mjpeg_decode_accelerator.h"
-
-#include "base/bind.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "media/base/unaligned_shared_memory.h"
-
-namespace media {
-
-FakeMjpegDecodeAccelerator::FakeMjpegDecodeAccelerator(
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
- : client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- io_task_runner_(std::move(io_task_runner)),
- decoder_thread_("FakeMjpegDecoderThread"),
- weak_factory_(this) {}
-
-FakeMjpegDecodeAccelerator::~FakeMjpegDecodeAccelerator() {
- DCHECK(client_task_runner_->BelongsToCurrentThread());
-}
-
-bool FakeMjpegDecodeAccelerator::Initialize(
- MjpegDecodeAccelerator::Client* client) {
- DCHECK(client_task_runner_->BelongsToCurrentThread());
- client_ = client;
-
- if (!decoder_thread_.Start()) {
- DLOG(ERROR) << "Failed to start decoding thread.";
- return false;
- }
- decoder_task_runner_ = decoder_thread_.task_runner();
-
- return true;
-}
-
-void FakeMjpegDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) {
- DCHECK(io_task_runner_->BelongsToCurrentThread());
-
- std::unique_ptr<WritableUnalignedMapping> src_shm(
- new WritableUnalignedMapping(bitstream_buffer.handle(),
- bitstream_buffer.size(),
- bitstream_buffer.offset()));
- if (!src_shm->IsValid()) {
- DLOG(ERROR) << "Unable to map shared memory in FakeMjpegDecodeAccelerator";
- NotifyError(bitstream_buffer.id(),
- MjpegDecodeAccelerator::UNREADABLE_INPUT);
- return;
- }
-
- // Unretained |this| is safe because |this| owns |decoder_thread_|.
- decoder_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&FakeMjpegDecodeAccelerator::DecodeOnDecoderThread,
- base::Unretained(this), bitstream_buffer, video_frame,
- base::Passed(&src_shm)));
-}
-
-void FakeMjpegDecodeAccelerator::DecodeOnDecoderThread(
- const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame,
- std::unique_ptr<WritableUnalignedMapping> src_shm) {
- DCHECK(decoder_task_runner_->BelongsToCurrentThread());
-
- // Do not actually decode the Jpeg data.
- // Instead, just fill the output buffer with zeros.
- size_t allocation_size =
- VideoFrame::AllocationSize(PIXEL_FORMAT_I420, video_frame->coded_size());
- memset(video_frame->data(0), 0, allocation_size);
-
- client_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&FakeMjpegDecodeAccelerator::OnDecodeDoneOnClientThread,
- weak_factory_.GetWeakPtr(), bitstream_buffer.id()));
-}
-
-bool FakeMjpegDecodeAccelerator::IsSupported() {
- return true;
-}
-
-void FakeMjpegDecodeAccelerator::NotifyError(int32_t bitstream_buffer_id,
- Error error) {
- client_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&FakeMjpegDecodeAccelerator::NotifyErrorOnClientThread,
- weak_factory_.GetWeakPtr(), bitstream_buffer_id, error));
-}
-
-void FakeMjpegDecodeAccelerator::NotifyErrorOnClientThread(
- int32_t bitstream_buffer_id,
- Error error) {
- DCHECK(client_task_runner_->BelongsToCurrentThread());
- client_->NotifyError(bitstream_buffer_id, error);
-}
-
-void FakeMjpegDecodeAccelerator::OnDecodeDoneOnClientThread(
- int32_t input_buffer_id) {
- DCHECK(client_task_runner_->BelongsToCurrentThread());
- client_->VideoFrameReady(input_buffer_id);
-}
-
-} // namespace media
diff --git a/chromium/media/gpu/fake_mjpeg_decode_accelerator.h b/chromium/media/gpu/fake_mjpeg_decode_accelerator.h
deleted file mode 100644
index 30d91788200..00000000000
--- a/chromium/media/gpu/fake_mjpeg_decode_accelerator.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_GPU_FAKE_MJPEG_DECODE_ACCELERATOR_H_
-#define MEDIA_GPU_FAKE_MJPEG_DECODE_ACCELERATOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/thread.h"
-#include "media/base/bitstream_buffer.h"
-#include "media/gpu/media_gpu_export.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-}
-
-namespace media {
-
-// Uses software-based decoding. The purpose of this class is to enable testing
-// of communication to the MjpegDecodeAccelerator without requiring an actual
-// hardware decoder.
-class MEDIA_GPU_EXPORT FakeMjpegDecodeAccelerator
- : public MjpegDecodeAccelerator {
- public:
- FakeMjpegDecodeAccelerator(
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
- ~FakeMjpegDecodeAccelerator() override;
-
- // MjpegDecodeAccelerator implementation.
- bool Initialize(MjpegDecodeAccelerator::Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) override;
- bool IsSupported() override;
-
- private:
- void DecodeOnDecoderThread(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame,
- std::unique_ptr<WritableUnalignedMapping> src_shm);
- void NotifyError(int32_t bitstream_buffer_id, Error error);
- void NotifyErrorOnClientThread(int32_t bitstream_buffer_id, Error error);
- void OnDecodeDoneOnClientThread(int32_t input_buffer_id);
-
- // Task runner for calls to |client_|.
- const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner_;
-
- // GPU IO task runner.
- const scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
-
- Client* client_ = nullptr;
-
- base::Thread decoder_thread_;
- scoped_refptr<base::SingleThreadTaskRunner> decoder_task_runner_;
-
- base::WeakPtrFactory<FakeMjpegDecodeAccelerator> weak_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeMjpegDecodeAccelerator);
-};
-
-} // namespace media
-
-#endif // MEDIA_GPU_FAKE_MJPEG_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/gpu/format_utils.cc b/chromium/media/gpu/format_utils.cc
index fc4029e9d4a..007bebc733a 100644
--- a/chromium/media/gpu/format_utils.cc
+++ b/chromium/media/gpu/format_utils.cc
@@ -52,6 +52,12 @@ gfx::BufferFormat VideoPixelFormatToGfxBufferFormat(
case PIXEL_FORMAT_NV12:
return gfx::BufferFormat::YUV_420_BIPLANAR;
+ case PIXEL_FORMAT_ABGR:
+ return gfx::BufferFormat::RGBA_8888;
+
+ case PIXEL_FORMAT_XBGR:
+ return gfx::BufferFormat::RGBX_8888;
+
default:
LOG(FATAL) << "Unsupported VideoPixelFormat: " << pixel_format;
return gfx::BufferFormat::BGRX_8888;
diff --git a/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.cc b/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.cc
deleted file mode 100644
index bde032c8778..00000000000
--- a/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/gpu/gpu_jpeg_encode_accelerator_factory.h"
-
-#include "base/bind.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "media/base/media_switches.h"
-#include "media/gpu/buildflags.h"
-
-#if BUILDFLAG(USE_V4L2_CODEC) && defined(ARCH_CPU_ARM_FAMILY)
-#define USE_V4L2_JEA
-#endif
-
-#if BUILDFLAG(USE_VAAPI)
-#include "media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h"
-#endif
-
-#if defined(USE_V4L2_JEA)
-#include "media/gpu/v4l2/v4l2_device.h"
-#include "media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h"
-#endif
-
-namespace media {
-
-namespace {
-
-#if defined(USE_V4L2_JEA)
-std::unique_ptr<JpegEncodeAccelerator> CreateV4L2JEA(
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
- return std::make_unique<V4L2JpegEncodeAccelerator>(std::move(io_task_runner));
-}
-#endif
-
-#if BUILDFLAG(USE_VAAPI)
-std::unique_ptr<JpegEncodeAccelerator> CreateVaapiJEA(
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
- return std::make_unique<VaapiJpegEncodeAccelerator>(
- std::move(io_task_runner));
-}
-#endif
-
-} // namespace
-
-// static
-bool GpuJpegEncodeAcceleratorFactory::IsAcceleratedJpegEncodeSupported() {
- auto accelerator_factory_functions = GetAcceleratorFactories();
- return !accelerator_factory_functions.empty();
-}
-
-// static
-std::vector<GpuJpegEncodeAcceleratorFactory::CreateAcceleratorCB>
-GpuJpegEncodeAcceleratorFactory::GetAcceleratorFactories() {
- // This list is ordered by priority of use.
- std::vector<CreateAcceleratorCB> result;
-#if defined(USE_V4L2_JEA)
- result.push_back(base::BindRepeating(&CreateV4L2JEA));
-#endif
-#if BUILDFLAG(USE_VAAPI)
- result.push_back(base::BindRepeating(&CreateVaapiJEA));
-#endif
- return result;
-}
-
-} // namespace media
diff --git a/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.h b/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.h
deleted file mode 100644
index 765562eef9a..00000000000
--- a/chromium/media/gpu/gpu_jpeg_encode_accelerator_factory.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_GPU_GPU_JPEG_ENCODE_ACCELERATOR_FACTORY_H_
-#define MEDIA_GPU_GPU_JPEG_ENCODE_ACCELERATOR_FACTORY_H_
-
-#include "base/memory/ref_counted.h"
-#include "media/gpu/media_gpu_export.h"
-#include "media/video/jpeg_encode_accelerator.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-}
-
-namespace media {
-
-class MEDIA_GPU_EXPORT GpuJpegEncodeAcceleratorFactory {
- public:
- using CreateAcceleratorCB =
- base::RepeatingCallback<std::unique_ptr<JpegEncodeAccelerator>(
- scoped_refptr<base::SingleThreadTaskRunner>)>;
-
- // Static query for JPEG supported. This query calls the appropriate
- // platform-specific version.
- static bool IsAcceleratedJpegEncodeSupported();
-
- static std::vector<CreateAcceleratorCB> GetAcceleratorFactories();
-};
-
-} // namespace media
-
-#endif // MEDIA_GPU_GPU_JPEG_ENCODE_ACCELERATOR_FACTORY_H_
diff --git a/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.cc b/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.cc
deleted file mode 100644
index 2fc34021882..00000000000
--- a/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/gpu/gpu_mjpeg_decode_accelerator_factory.h"
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "media/base/media_switches.h"
-#include "media/gpu/buildflags.h"
-#include "media/gpu/fake_mjpeg_decode_accelerator.h"
-
-#if BUILDFLAG(USE_V4L2_CODEC) && defined(ARCH_CPU_ARM_FAMILY)
-#define USE_V4L2_MJPEG_DECODE_ACCELERATOR
-#endif
-
-#if BUILDFLAG(USE_VAAPI)
-#include "media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h"
-#endif
-
-#if defined(USE_V4L2_MJPEG_DECODE_ACCELERATOR)
-#include "media/gpu/v4l2/v4l2_device.h"
-#include "media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h"
-#endif
-
-namespace media {
-
-namespace {
-
-#if defined(USE_V4L2_MJPEG_DECODE_ACCELERATOR)
-std::unique_ptr<MjpegDecodeAccelerator> CreateV4L2MjpegDecodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
- std::unique_ptr<MjpegDecodeAccelerator> decoder;
- scoped_refptr<V4L2Device> device = V4L2Device::Create();
- if (device) {
- decoder.reset(
- new V4L2MjpegDecodeAccelerator(device, std::move(io_task_runner)));
- }
- return decoder;
-}
-#endif
-
-#if BUILDFLAG(USE_VAAPI)
-std::unique_ptr<MjpegDecodeAccelerator> CreateVaapiMjpegDecodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
- return std::make_unique<VaapiMjpegDecodeAccelerator>(
- std::move(io_task_runner));
-}
-#endif
-
-std::unique_ptr<MjpegDecodeAccelerator> CreateFakeMjpegDecodeAccelerator(
- scoped_refptr<base::SingleThreadTaskRunner> io_task_runner) {
- return std::make_unique<FakeMjpegDecodeAccelerator>(
- std::move(io_task_runner));
-}
-
-} // namespace
-
-// static
-bool GpuMjpegDecodeAcceleratorFactory::IsAcceleratedJpegDecodeSupported() {
- auto accelerator_factory_functions = GetAcceleratorFactories();
- for (const auto& factory_function : accelerator_factory_functions) {
- std::unique_ptr<MjpegDecodeAccelerator> accelerator =
- factory_function.Run(base::ThreadTaskRunnerHandle::Get());
- if (accelerator && accelerator->IsSupported())
- return true;
- }
- return false;
-}
-
-// static
-std::vector<GpuMjpegDecodeAcceleratorFactory::CreateAcceleratorCB>
-GpuMjpegDecodeAcceleratorFactory::GetAcceleratorFactories() {
- if (base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kUseFakeMjpegDecodeAccelerator)) {
- return {base::Bind(&CreateFakeMjpegDecodeAccelerator)};
- }
-
- // This list is ordered by priority of use.
- std::vector<CreateAcceleratorCB> result;
-#if defined(USE_V4L2_MJPEG_DECODE_ACCELERATOR)
- result.push_back(base::Bind(&CreateV4L2MjpegDecodeAccelerator));
-#endif
-#if BUILDFLAG(USE_VAAPI)
- result.push_back(base::Bind(&CreateVaapiMjpegDecodeAccelerator));
-#endif
- return result;
-}
-
-} // namespace media
diff --git a/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.h b/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.h
deleted file mode 100644
index c31ac3a6baa..00000000000
--- a/chromium/media/gpu/gpu_mjpeg_decode_accelerator_factory.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_GPU_GPU_MJPEG_DECODE_ACCELERATOR_FACTORY_H_
-#define MEDIA_GPU_GPU_MJPEG_DECODE_ACCELERATOR_FACTORY_H_
-
-#include "base/memory/ref_counted.h"
-#include "media/gpu/media_gpu_export.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace base {
-class SingleThreadTaskRunner;
-}
-
-namespace media {
-
-class MEDIA_GPU_EXPORT GpuMjpegDecodeAcceleratorFactory {
- public:
- using CreateAcceleratorCB =
- base::Callback<std::unique_ptr<MjpegDecodeAccelerator>(
- scoped_refptr<base::SingleThreadTaskRunner>)>;
-
- // Static query for JPEG supported. This query calls the appropriate
- // platform-specific version.
- static bool IsAcceleratedJpegDecodeSupported();
-
- static std::vector<CreateAcceleratorCB> GetAcceleratorFactories();
-};
-
-} // namespace media
-
-#endif // MEDIA_GPU_GPU_MJPEG_DECODE_ACCELERATOR_FACTORY_H_
diff --git a/chromium/media/gpu/h264_decoder.cc b/chromium/media/gpu/h264_decoder.cc
index f2b3df50731..f6216952d04 100644
--- a/chromium/media/gpu/h264_decoder.cc
+++ b/chromium/media/gpu/h264_decoder.cc
@@ -446,20 +446,17 @@ void H264Decoder::ConstructReferencePicListsB(
}
// See 8.2.4
-int H264Decoder::PicNumF(const scoped_refptr<H264Picture>& pic) {
- if (!pic)
- return -1;
-
- if (!pic->long_term)
- return pic->pic_num;
+int H264Decoder::PicNumF(const H264Picture& pic) {
+ if (!pic.long_term)
+ return pic.pic_num;
else
return max_pic_num_;
}
// See 8.2.4
-int H264Decoder::LongTermPicNumF(const scoped_refptr<H264Picture>& pic) {
- if (pic->ref && pic->long_term)
- return pic->long_term_pic_num;
+int H264Decoder::LongTermPicNumF(const H264Picture& pic) {
+ if (pic.ref && pic.long_term)
+ return pic.long_term_pic_num;
else
return 2 * (max_long_term_frame_idx_ + 1);
}
@@ -469,7 +466,7 @@ int H264Decoder::LongTermPicNumF(const scoped_refptr<H264Picture>& pic) {
static void ShiftRightAndInsert(H264Picture::Vector* v,
int from,
int to,
- const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<H264Picture> pic) {
// Security checks, do not disable in Debug mode.
CHECK(from <= to);
CHECK(to <= std::numeric_limits<int>::max() - 2);
@@ -484,7 +481,7 @@ static void ShiftRightAndInsert(H264Picture::Vector* v,
for (int i = to + 1; i > from; --i)
(*v)[i] = (*v)[i - 1];
- (*v)[from] = pic;
+ (*v)[from] = std::move(pic);
}
bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
@@ -573,7 +570,9 @@ bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
for (int src = ref_idx_lx, dst = ref_idx_lx;
src <= num_ref_idx_lX_active_minus1 + 1; ++src) {
- if (PicNumF((*ref_pic_listx)[src]) != pic_num_lx)
+ auto* src_pic = (*ref_pic_listx)[src].get();
+ int src_pic_num_lx = src_pic ? PicNumF(*src_pic) : -1;
+ if (src_pic_num_lx != pic_num_lx)
(*ref_pic_listx)[dst++] = (*ref_pic_listx)[src];
}
break;
@@ -594,7 +593,7 @@ bool H264Decoder::ModifyReferencePicList(const H264SliceHeader* slice_hdr,
for (int src = ref_idx_lx, dst = ref_idx_lx;
src <= num_ref_idx_lX_active_minus1 + 1; ++src) {
- if (LongTermPicNumF((*ref_pic_listx)[src]) !=
+ if (LongTermPicNumF(*(*ref_pic_listx)[src]) !=
static_cast<int>(list_mod->long_term_pic_num))
(*ref_pic_listx)[dst++] = (*ref_pic_listx)[src];
}
@@ -1417,6 +1416,10 @@ gfx::Size H264Decoder::GetPicSize() const {
return pic_size_;
}
+gfx::Rect H264Decoder::GetVisibleRect() const {
+ return visible_rect_;
+}
+
size_t H264Decoder::GetRequiredNumOfPictures() const {
constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
return GetNumReferenceFrames() + kPicsInPipeline;
diff --git a/chromium/media/gpu/h264_decoder.h b/chromium/media/gpu/h264_decoder.h
index 66bb08917ed..0c8250ff676 100644
--- a/chromium/media/gpu/h264_decoder.h
+++ b/chromium/media/gpu/h264_decoder.h
@@ -88,7 +88,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) = 0;
+ scoped_refptr<H264Picture> pic) = 0;
// Submit one slice for the current frame, passing the current |pps| and
// |pic| (same as in SubmitFrameMetadata()), the parsed header for the
@@ -106,7 +106,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) = 0;
@@ -116,7 +116,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// the previous call to SubmitDecode().
// Returns kOk if successful, kFail if there are errors, or kTryAgain if
// the accelerator needs additional data before being able to proceed.
- virtual Status SubmitDecode(const scoped_refptr<H264Picture>& pic) = 0;
+ virtual Status SubmitDecode(scoped_refptr<H264Picture> pic) = 0;
// Schedule output (display) of |pic|. Note that returning from this
// method does not mean that |pic| has already been outputted (displayed),
@@ -124,7 +124,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// as this method was called for them. Decoder may drop its reference
// to |pic| after calling this method.
// Return true if successful.
- virtual bool OutputPicture(const scoped_refptr<H264Picture>& pic) = 0;
+ virtual bool OutputPicture(scoped_refptr<H264Picture> pic) = 0;
// Reset any current state that may be cached in the accelerator, dropping
// any cached parameters/slices that have not been committed yet.
@@ -159,6 +159,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
void Reset() override;
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
+ gfx::Rect GetVisibleRect() const override;
size_t GetRequiredNumOfPictures() const override;
size_t GetNumReferenceFrames() const override;
@@ -234,8 +235,8 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
void ConstructReferencePicListsB(const H264SliceHeader* slice_hdr);
// Helper functions for reference list construction, per spec.
- int PicNumF(const scoped_refptr<H264Picture>& pic);
- int LongTermPicNumF(const scoped_refptr<H264Picture>& pic);
+ int PicNumF(const H264Picture& pic);
+ int LongTermPicNumF(const H264Picture& pic);
// Perform the reference picture lists' modification (reordering), as
// specified in spec (8.2.4).
diff --git a/chromium/media/gpu/h264_decoder_unittest.cc b/chromium/media/gpu/h264_decoder_unittest.cc
index f48003fb177..d4d94a285e1 100644
--- a/chromium/media/gpu/h264_decoder_unittest.cc
+++ b/chromium/media/gpu/h264_decoder_unittest.cc
@@ -47,8 +47,7 @@ const std::string kHighFrame3 = "bear-320x192-high-frame-3.h264";
// Checks whether the decrypt config in the picture matches the decrypt config
// passed to this matcher.
MATCHER_P(DecryptConfigMatches, decrypt_config, "") {
- const scoped_refptr<H264Picture>& pic = arg;
- return pic->decrypt_config()->Matches(*decrypt_config);
+ return arg->decrypt_config()->Matches(*decrypt_config);
}
MATCHER(SubsampleSizeMatches, "Verify subsample sizes match buffer size") {
@@ -107,7 +106,7 @@ class MockH264Accelerator : public H264Decoder::H264Accelerator {
MockH264Accelerator() = default;
MOCK_METHOD0(CreateH264Picture, scoped_refptr<H264Picture>());
- MOCK_METHOD1(SubmitDecode, Status(const scoped_refptr<H264Picture>& pic));
+ MOCK_METHOD1(SubmitDecode, Status(scoped_refptr<H264Picture> pic));
MOCK_METHOD7(SubmitFrameMetadata,
Status(const H264SPS* sps,
const H264PPS* pps,
@@ -115,17 +114,17 @@ class MockH264Accelerator : public H264Decoder::H264Accelerator {
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic));
+ scoped_refptr<H264Picture> pic));
MOCK_METHOD8(SubmitSlice,
Status(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples));
- MOCK_METHOD1(OutputPicture, bool(const scoped_refptr<H264Picture>& pic));
+ MOCK_METHOD1(OutputPicture, bool(scoped_refptr<H264Picture> pic));
MOCK_METHOD2(SetStream,
Status(base::span<const uint8_t> stream,
const DecryptConfig* decrypt_config));
@@ -204,12 +203,11 @@ AcceleratedVideoDecoder::DecodeResult H264DecoderTest::Decode() {
}
// To have better description on mismatch.
-class WithPocMatcher
- : public MatcherInterface<const scoped_refptr<H264Picture>&> {
+class WithPocMatcher : public MatcherInterface<scoped_refptr<H264Picture>> {
public:
explicit WithPocMatcher(int expected_poc) : expected_poc_(expected_poc) {}
- bool MatchAndExplain(const scoped_refptr<H264Picture>& p,
+ bool MatchAndExplain(scoped_refptr<H264Picture> p,
MatchResultListener* listener) const override {
if (p->pic_order_cnt == expected_poc_)
return true;
@@ -225,7 +223,7 @@ class WithPocMatcher
int expected_poc_;
};
-inline Matcher<const scoped_refptr<H264Picture>&> WithPoc(int expected_poc) {
+inline Matcher<scoped_refptr<H264Picture>> WithPoc(int expected_poc) {
return MakeMatcher(new WithPocMatcher(expected_poc));
}
diff --git a/chromium/media/gpu/image_processor_test.cc b/chromium/media/gpu/image_processor_test.cc
index e0d15935cc7..2a515d21b23 100644
--- a/chromium/media/gpu/image_processor_test.cc
+++ b/chromium/media/gpu/image_processor_test.cc
@@ -19,6 +19,7 @@
#include "media/gpu/test/image_processor/image_processor_client.h"
#include "media/gpu/test/video_frame_helpers.h"
#include "media/gpu/test/video_frame_validator.h"
+#include "media/gpu/test/video_test_environment.h"
#include "mojo/core/embedder/embedder.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/size.h"
@@ -26,8 +27,11 @@
namespace media {
namespace {
-constexpr const base::FilePath::CharType* kI420Image =
- FILE_PATH_LITERAL("bear_320x192.i420.yuv");
+// TODO(crbug.com/944822): Use kI420Image for I420 -> NV12 test case. It is
+// currently disabled because there is currently no way of creating DMABUF I420
+// buffer by NativePixmap.
+// constexpr const base::FilePath::CharType* kI420Image =
+// FILE_PATH_LITERAL("bear_320x192.i420.yuv");
constexpr const base::FilePath::CharType* kNV12Image =
FILE_PATH_LITERAL("bear_320x192.nv12.yuv");
constexpr const base::FilePath::CharType* kRGBAImage =
@@ -42,13 +46,14 @@ class ImageProcessorSimpleParamTest
public ::testing::WithParamInterface<
std::tuple<base::FilePath, base::FilePath>> {
public:
- // TODO(crbug.com/917951): Initialize Ozone once.
void SetUp() override {}
void TearDown() override {}
std::unique_ptr<test::ImageProcessorClient> CreateImageProcessorClient(
const test::Image& input_image,
- const test::Image& output_image) {
+ const std::vector<VideoFrame::StorageType>& input_storage_types,
+ const test::Image& output_image,
+ const std::vector<VideoFrame::StorageType>& output_storage_types) {
const VideoPixelFormat input_format = input_image.PixelFormat();
const VideoPixelFormat output_format = output_image.PixelFormat();
auto input_config_layout = test::CreateVideoFrameLayout(
@@ -58,12 +63,10 @@ class ImageProcessorSimpleParamTest
VideoFrame::NumPlanes(output_format));
LOG_ASSERT(input_config_layout);
LOG_ASSERT(output_config_layout);
- ImageProcessor::PortConfig input_config(*input_config_layout,
- input_image.Size(),
- {VideoFrame::STORAGE_OWNED_MEMORY});
+ ImageProcessor::PortConfig input_config(
+ *input_config_layout, input_image.Size(), input_storage_types);
ImageProcessor::PortConfig output_config(
- *output_config_layout, output_image.Size(),
- {VideoFrame::STORAGE_OWNED_MEMORY});
+ *output_config_layout, output_image.Size(), output_storage_types);
// TODO(crbug.com/917951): Select more appropriate number of buffers.
constexpr size_t kNumBuffers = 1;
LOG_ASSERT(output_image.IsMetadataLoaded());
@@ -81,7 +84,7 @@ class ImageProcessorSimpleParamTest
}
};
-TEST_P(ImageProcessorSimpleParamTest, ConvertOneTimeFromMemToMem) {
+TEST_P(ImageProcessorSimpleParamTest, ConvertOneTime_MemToMem) {
// Load the test input image. We only need the output image's metadata so we
// can compare checksums.
test::Image input_image(std::get<0>(GetParam()));
@@ -89,23 +92,54 @@ TEST_P(ImageProcessorSimpleParamTest, ConvertOneTimeFromMemToMem) {
ASSERT_TRUE(input_image.Load());
ASSERT_TRUE(output_image.LoadMetadata());
- auto ip_client = CreateImageProcessorClient(input_image, output_image);
+ auto ip_client = CreateImageProcessorClient(
+ input_image, {VideoFrame::STORAGE_OWNED_MEMORY}, output_image,
+ {VideoFrame::STORAGE_OWNED_MEMORY});
+
+ ip_client->Process(input_image, output_image);
+
+ EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
+ EXPECT_EQ(ip_client->GetErrorCount(), 0u);
+ EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
+ EXPECT_TRUE(ip_client->WaitForFrameProcessors());
+}
+
+#if defined(OS_CHROMEOS)
+// We don't yet have the function to create Dmabuf-backed VideoFrame on
+// platforms except ChromeOS. So MemToDmabuf test is limited on ChromeOS.
+TEST_P(ImageProcessorSimpleParamTest, ConvertOneTime_MemToDmabuf) {
+ // Load the test input image. We only need the output image's metadata so we
+ // can compare checksums.
+ test::Image input_image(std::get<0>(GetParam()));
+ test::Image output_image(std::get<1>(GetParam()));
+ ASSERT_TRUE(input_image.Load());
+ ASSERT_TRUE(output_image.LoadMetadata());
+
+ auto ip_client = CreateImageProcessorClient(
+ input_image, {VideoFrame::STORAGE_OWNED_MEMORY}, output_image,
+ {VideoFrame::STORAGE_OWNED_MEMORY});
+
ip_client->Process(input_image, output_image);
+
EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
EXPECT_EQ(ip_client->GetErrorCount(), 0u);
EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
EXPECT_TRUE(ip_client->WaitForFrameProcessors());
}
+#endif // defined(OS_CHROMEOS)
-// BGRA->NV12
-// I420->NV12
-// RGBA->NV12
-// YV12->NV12
+// BGRA -> NV12
+// I420 -> NV12
+// RGBA -> NV12
+// YV12 -> NV12
INSTANTIATE_TEST_SUITE_P(
ConvertToNV12,
ImageProcessorSimpleParamTest,
::testing::Values(std::make_tuple(kBGRAImage, kNV12Image),
- std::make_tuple(kI420Image, kNV12Image),
+ // TODO(crbug.com/944822): Add I420 -> NV12 test case.
+ // There is currently no way of creating DMABUF
+ // I420 buffer by NativePixmap.
+ // std::make_tuple(kI420Image, kNV12Image),
std::make_tuple(kRGBAImage, kNV12Image),
std::make_tuple(kYV12Image, kNV12Image)));
@@ -121,14 +155,8 @@ INSTANTIATE_TEST_SUITE_P(
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
base::CommandLine::Init(argc, argv);
- // Using shared memory requires mojo to be initialized (crbug.com/849207).
- mojo::core::Init();
- base::ShadowingAtExitManager at_exit_manager;
-
- // Needed to enable DVLOG through --vmodule.
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- LOG_ASSERT(logging::InitLogging(settings));
+ auto* const test_environment = new media::test::VideoTestEnvironment;
+ testing::AddGlobalTestEnvironment(test_environment);
return RUN_ALL_TESTS();
}
diff --git a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
index 4f2cf300836..350a47d762a 100644
--- a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
+++ b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.cc
@@ -104,21 +104,23 @@ bool GpuVideoDecodeAcceleratorHost::Initialize(const Config& config,
return true;
}
-void GpuVideoDecodeAcceleratorHost::Decode(
- const BitstreamBuffer& bitstream_buffer) {
+void GpuVideoDecodeAcceleratorHost::Decode(BitstreamBuffer bitstream_buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!channel_)
return;
- BitstreamBuffer buffer_to_send = bitstream_buffer;
- base::SharedMemoryHandle handle =
- channel_->ShareToGpuProcess(bitstream_buffer.handle());
- if (!base::SharedMemory::IsHandleValid(handle)) {
- NOTREACHED() << "Failed to duplicate buffer handler";
- return;
+ if (channel_->IsLost()) {
+ Send(new AcceleratedVideoDecoderMsg_Decode(
+ decoder_route_id_,
+ BitstreamBuffer(bitstream_buffer.id(),
+ base::subtle::PlatformSharedMemoryRegion(),
+ bitstream_buffer.size(), bitstream_buffer.offset(),
+ bitstream_buffer.presentation_timestamp())));
+ } else {
+ // The legacy IPC call will duplicate the shared memory region in
+ // bitstream_buffer.
+ Send(new AcceleratedVideoDecoderMsg_Decode(decoder_route_id_,
+ bitstream_buffer));
}
- buffer_to_send.set_handle(handle);
- Send(
- new AcceleratedVideoDecoderMsg_Decode(decoder_route_id_, buffer_to_send));
}
void GpuVideoDecodeAcceleratorHost::AssignPictureBuffers(
diff --git a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.h b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.h
index 48351b115b2..3b4c840abcf 100644
--- a/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.h
+++ b/chromium/media/gpu/ipc/client/gpu_video_decode_accelerator_host.h
@@ -42,7 +42,7 @@ class GpuVideoDecodeAcceleratorHost
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void Decode(BitstreamBuffer bitstream_buffer) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
void ReusePictureBuffer(int32_t picture_buffer_id) override;
void Flush() override;
diff --git a/chromium/media/gpu/ipc/common/media_messages.h b/chromium/media/gpu/ipc/common/media_messages.h
index cbb7fd3d43d..3f47f08cdf6 100644
--- a/chromium/media/gpu/ipc/common/media_messages.h
+++ b/chromium/media/gpu/ipc/common/media_messages.h
@@ -14,7 +14,6 @@
#include "ipc/param_traits_macros.h"
#include "media/base/overlay_info.h"
#include "media/gpu/ipc/common/media_param_traits.h"
-#include "media/video/mjpeg_decode_accelerator.h"
#include "media/video/video_decode_accelerator.h"
#include "ui/gfx/ipc/color/gfx_param_traits.h"
#include "ui/gfx/ipc/gfx_param_traits.h"
diff --git a/chromium/media/gpu/ipc/common/media_param_traits.cc b/chromium/media/gpu/ipc/common/media_param_traits.cc
index 3ef6325d665..334d9d4596c 100644
--- a/chromium/media/gpu/ipc/common/media_param_traits.cc
+++ b/chromium/media/gpu/ipc/common/media_param_traits.cc
@@ -24,7 +24,7 @@ void ParamTraits<media::BitstreamBuffer>::Write(base::Pickle* m,
WriteParam(m, p.iv());
WriteParam(m, p.subsamples());
}
- WriteParam(m, p.handle());
+ WriteParam(m, p.DuplicateRegion());
}
bool ParamTraits<media::BitstreamBuffer>::Read(const base::Pickle* m,
@@ -58,7 +58,7 @@ bool ParamTraits<media::BitstreamBuffer>::Read(const base::Pickle* m,
return false;
}
- return ReadParam(m, iter, &r->handle_);
+ return ReadParam(m, iter, &r->region_);
}
void ParamTraits<media::BitstreamBuffer>::Log(const param_type& p,
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
index 206c9ec59f0..a2975051ab2 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.cc
@@ -408,10 +408,9 @@ bool GpuVideoDecodeAccelerator::Initialize(
// Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded,
// otherwise on the main thread.
-void GpuVideoDecodeAccelerator::OnDecode(
- const BitstreamBuffer& bitstream_buffer) {
+void GpuVideoDecodeAccelerator::OnDecode(BitstreamBuffer bitstream_buffer) {
DCHECK(video_decode_accelerator_);
- video_decode_accelerator_->Decode(bitstream_buffer);
+ video_decode_accelerator_->Decode(std::move(bitstream_buffer));
}
void GpuVideoDecodeAccelerator::OnAssignPictureBuffers(
diff --git a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
index d3b652ec0ae..7afdc8ccbe0 100644
--- a/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
+++ b/chromium/media/gpu/ipc/service/gpu_video_decode_accelerator.h
@@ -91,7 +91,7 @@ class GpuVideoDecodeAccelerator
~GpuVideoDecodeAccelerator() override;
// Handlers for IPC messages.
- void OnDecode(const BitstreamBuffer& bitstream_buffer);
+ void OnDecode(BitstreamBuffer bitstream_buffer);
void OnAssignPictureBuffers(
const std::vector<int32_t>& buffer_ids,
const std::vector<PictureBuffer::TextureIds>& texture_ids);
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.cc b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
index bec30f19855..57f15d568c4 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
@@ -212,7 +212,7 @@ std::string VdaVideoDecoder::GetDisplayName() const {
void VdaVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(1) << __func__ << "(" << config.AsHumanReadableString() << ")";
@@ -224,7 +224,8 @@ void VdaVideoDecoder::Initialize(const VideoDecoderConfig& config,
DCHECK(decode_cbs_.empty());
if (has_error_) {
- parent_task_runner_->PostTask(FROM_HERE, base::BindOnce(init_cb, false));
+ parent_task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(std::move(init_cb), false));
return;
}
@@ -232,7 +233,7 @@ void VdaVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Store |init_cb| ASAP so that EnterErrorState() can use it. Leave |config_|
// alone for now so that the checks can inspect it.
- init_cb_ = init_cb;
+ init_cb_ = std::move(init_cb);
output_cb_ = output_cb;
// Verify that the configuration is supported.
@@ -397,7 +398,7 @@ void VdaVideoDecoder::InitializeDone(bool status) {
}
void VdaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << __func__ << "(" << (buffer->end_of_stream() ? "EOS" : "") << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(!init_cb_);
@@ -407,13 +408,14 @@ void VdaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
if (has_error_) {
parent_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(decode_cb, DecodeStatus::DECODE_ERROR));
+ FROM_HERE,
+ base::BindOnce(std::move(decode_cb), DecodeStatus::DECODE_ERROR));
return;
}
// Convert EOS frame to Flush().
if (buffer->end_of_stream()) {
- flush_cb_ = decode_cb;
+ flush_cb_ = std::move(decode_cb);
gpu_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VideoDecodeAccelerator::Flush, gpu_weak_vda_));
@@ -423,7 +425,7 @@ void VdaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
// Assign a bitstream buffer ID and record the decode request.
int32_t bitstream_buffer_id = NextID(&bitstream_buffer_id_);
timestamps_.Put(bitstream_buffer_id, buffer->timestamp());
- decode_cbs_[bitstream_buffer_id] = decode_cb;
+ decode_cbs_[bitstream_buffer_id] = std::move(decode_cb);
if (decode_on_parent_thread_) {
vda_->Decode(std::move(buffer), bitstream_buffer_id);
@@ -447,7 +449,7 @@ void VdaVideoDecoder::DecodeOnGpuThread(scoped_refptr<DecoderBuffer> buffer,
vda_->Decode(std::move(buffer), bitstream_id);
}
-void VdaVideoDecoder::Reset(const base::RepeatingClosure& reset_cb) {
+void VdaVideoDecoder::Reset(base::OnceClosure reset_cb) {
DVLOG(2) << __func__;
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(!init_cb_);
@@ -456,11 +458,11 @@ void VdaVideoDecoder::Reset(const base::RepeatingClosure& reset_cb) {
DCHECK(!reset_cb_);
if (has_error_) {
- parent_task_runner_->PostTask(FROM_HERE, reset_cb);
+ parent_task_runner_->PostTask(FROM_HERE, std::move(reset_cb));
return;
}
- reset_cb_ = reset_cb;
+ reset_cb_ = std::move(reset_cb);
gpu_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoDecodeAccelerator::Reset, gpu_weak_vda_));
}
@@ -666,9 +668,9 @@ void VdaVideoDecoder::NotifyEndOfBitstreamBufferOnParentThread(
}
// Run a local copy in case the decode callback modifies |decode_cbs_|.
- DecodeCB decode_cb = decode_cb_it->second;
+ DecodeCB decode_cb = std::move(decode_cb_it->second);
decode_cbs_.erase(decode_cb_it);
- decode_cb.Run(DecodeStatus::OK);
+ std::move(decode_cb).Run(DecodeStatus::OK);
}
void VdaVideoDecoder::NotifyFlushDone() {
@@ -723,10 +725,10 @@ void VdaVideoDecoder::NotifyResetDoneOnParentThread() {
// them.
base::WeakPtr<VdaVideoDecoder> weak_this = parent_weak_this_;
- std::map<int32_t, DecodeCB> local_decode_cbs = decode_cbs_;
+ std::map<int32_t, DecodeCB> local_decode_cbs = std::move(decode_cbs_);
decode_cbs_.clear();
- for (const auto& it : local_decode_cbs) {
- it.second.Run(DecodeStatus::ABORTED);
+ for (auto& it : local_decode_cbs) {
+ std::move(it.second).Run(DecodeStatus::ABORTED);
if (!weak_this)
return;
}
@@ -799,10 +801,10 @@ void VdaVideoDecoder::DestroyCallbacks() {
// when |has_error_| is set.
base::WeakPtr<VdaVideoDecoder> weak_this = parent_weak_this_;
- std::map<int32_t, DecodeCB> local_decode_cbs = decode_cbs_;
+ std::map<int32_t, DecodeCB> local_decode_cbs = std::move(decode_cbs_);
decode_cbs_.clear();
- for (const auto& it : local_decode_cbs) {
- it.second.Run(DecodeStatus::DECODE_ERROR);
+ for (auto& it : local_decode_cbs) {
+ std::move(it.second).Run(DecodeStatus::DECODE_ERROR);
if (!weak_this)
return;
}
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.h b/chromium/media/gpu/ipc/service/vda_video_decoder.h
index 39c6478968b..7299594cc23 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.h
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.h
@@ -101,12 +101,11 @@ class VdaVideoDecoder : public VideoDecoder,
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::RepeatingClosure& reset_cb) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure reset_cb) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
@@ -179,7 +178,7 @@ class VdaVideoDecoder : public VideoDecoder,
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB flush_cb_;
- base::RepeatingClosure reset_cb_;
+ base::OnceClosure reset_cb_;
int32_t bitstream_buffer_id_ = 0;
std::map<int32_t, DecodeCB> decode_cbs_;
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
index c7c79fe1803..4e34ed6cfe7 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
@@ -23,7 +23,7 @@
#include "media/base/simple_sync_token_client.h"
#include "media/base/video_codecs.h"
#include "media/base/video_frame.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/base/video_types.h"
#include "media/gpu/ipc/service/picture_buffer_manager.h"
#include "media/gpu/test/fake_command_buffer_helper.h"
@@ -142,7 +142,7 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
EXPECT_CALL(init_cb_, Run(true));
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420,
- VideoColorSpace::REC709(), VIDEO_ROTATION_0, gfx::Size(1920, 1088),
+ VideoColorSpace::REC709(), kNoTransformation, gfx::Size(1920, 1088),
gfx::Rect(1920, 1080), gfx::Size(1920, 1080), EmptyExtraData(),
Unencrypted()));
RunUntilIdle();
@@ -319,7 +319,7 @@ TEST_P(VdaVideoDecoderTest, Initialize) {
TEST_P(VdaVideoDecoderTest, Initialize_UnsupportedSize) {
InitializeWithConfig(
VideoDecoderConfig(kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420,
- VideoColorSpace::REC601(), VIDEO_ROTATION_0,
+ VideoColorSpace::REC601(), kNoTransformation,
gfx::Size(320, 240), gfx::Rect(320, 240),
gfx::Size(320, 240), EmptyExtraData(), Unencrypted()));
EXPECT_CALL(init_cb_, Run(false));
@@ -329,7 +329,7 @@ TEST_P(VdaVideoDecoderTest, Initialize_UnsupportedSize) {
TEST_P(VdaVideoDecoderTest, Initialize_UnsupportedCodec) {
InitializeWithConfig(VideoDecoderConfig(
kCodecH264, H264PROFILE_BASELINE, PIXEL_FORMAT_I420,
- VideoColorSpace::REC709(), VIDEO_ROTATION_0, gfx::Size(1920, 1088),
+ VideoColorSpace::REC709(), kNoTransformation, gfx::Size(1920, 1088),
gfx::Rect(1920, 1080), gfx::Size(1920, 1080), EmptyExtraData(),
Unencrypted()));
EXPECT_CALL(init_cb_, Run(false));
@@ -340,7 +340,7 @@ TEST_P(VdaVideoDecoderTest, Initialize_RejectedByVda) {
EXPECT_CALL(*vda_, Initialize(_, vdavd_.get())).WillOnce(Return(false));
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420,
- VideoColorSpace::REC709(), VIDEO_ROTATION_0, gfx::Size(1920, 1088),
+ VideoColorSpace::REC709(), kNoTransformation, gfx::Size(1920, 1088),
gfx::Rect(1920, 1080), gfx::Size(1920, 1080), EmptyExtraData(),
Unencrypted()));
EXPECT_CALL(init_cb_, Run(false));
@@ -423,7 +423,7 @@ TEST_P(VdaVideoDecoderTest, Decode_Output_MaintainsAspect) {
.WillOnce(Return(GetParam()));
InitializeWithConfig(VideoDecoderConfig(
kCodecVP9, VP9PROFILE_PROFILE0, PIXEL_FORMAT_I420,
- VideoColorSpace::REC709(), VIDEO_ROTATION_0, gfx::Size(640, 480),
+ VideoColorSpace::REC709(), kNoTransformation, gfx::Size(640, 480),
gfx::Rect(640, 480), gfx::Size(1280, 480), EmptyExtraData(),
Unencrypted()));
EXPECT_CALL(init_cb_, Run(true));
diff --git a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc b/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
deleted file mode 100644
index ec5e528274d..00000000000
--- a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
+++ /dev/null
@@ -1,903 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
-
-#include <memory>
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/files/file_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/path_service.h"
-#include "base/stl_util.h"
-#include "base/strings/string_split.h"
-#include "base/strings/stringprintf.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/timer/elapsed_timer.h"
-#include "build/build_config.h"
-#include "media/base/test_data_util.h"
-#include "media/filters/jpeg_parser.h"
-#include "media/gpu/buildflags.h"
-#include "media/gpu/gpu_mjpeg_decode_accelerator_factory.h"
-#include "media/gpu/test/video_accelerator_unittest_helpers.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-#include "mojo/core/embedder/embedder.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/libyuv/include/libyuv.h"
-#include "ui/gfx/codec/jpeg_codec.h"
-#include "ui/gfx/codec/png_codec.h"
-
-#if BUILDFLAG(USE_VAAPI)
-#include "media/gpu/vaapi/vaapi_wrapper.h"
-#endif
-
-namespace media {
-namespace {
-
-// Default test image file.
-const base::FilePath::CharType* kDefaultJpegFilename =
- FILE_PATH_LITERAL("peach_pi-1280x720.jpg");
-// Images with at least one odd dimension.
-const base::FilePath::CharType* kOddJpegFilenames[] = {
- FILE_PATH_LITERAL("peach_pi-40x23.jpg"),
- FILE_PATH_LITERAL("peach_pi-41x22.jpg"),
- FILE_PATH_LITERAL("peach_pi-41x23.jpg")};
-constexpr int kDefaultPerfDecodeTimes = 600;
-// Decide to save decode results to files or not. Output files will be saved
-// in the same directory with unittest. File name is like input file but
-// changing the extension to "yuv".
-bool g_save_to_file = false;
-// Threshold for mean absolute difference of hardware and software decode.
-// Absolute difference is to calculate the difference between each pixel in two
-// images. This is used for measuring of the similarity of two images.
-constexpr double kDecodeSimilarityThreshold = 1.25;
-
-// Environment to create test data for all test cases.
-class JpegDecodeAcceleratorTestEnvironment;
-JpegDecodeAcceleratorTestEnvironment* g_env;
-
-// This struct holds a parsed, complete JPEG blob. It can be created from a
-// FilePath or can be simply a black image.
-struct ParsedJpegImage {
- static std::unique_ptr<ParsedJpegImage> CreateFromFile(
- const base::FilePath& file_path) {
- auto image = std::make_unique<ParsedJpegImage>(file_path);
-
- LOG_ASSERT(base::ReadFileToString(file_path, &image->data_str))
- << file_path;
-
- JpegParseResult parse_result;
- LOG_ASSERT(ParseJpegPicture(
- reinterpret_cast<const uint8_t*>(image->data_str.data()),
- image->data_str.size(), &parse_result));
-
- image->InitializeSizes(parse_result.frame_header.visible_width,
- parse_result.frame_header.visible_height);
- return image;
- }
-
- static std::unique_ptr<ParsedJpegImage> CreateBlackImage(
- int width,
- int height,
- SkJpegEncoder::Downsample downsample = SkJpegEncoder::Downsample::k420) {
- // Generate a black image with the specified resolution.
- constexpr size_t kBytesPerPixel = 4;
- const std::vector<unsigned char> input_buffer(width * height *
- kBytesPerPixel);
- const SkImageInfo info = SkImageInfo::Make(
- width, height, kRGBA_8888_SkColorType, kOpaque_SkAlphaType);
- const SkPixmap src(info, input_buffer.data(), width * kBytesPerPixel);
-
- // Encode the generated image in the JPEG format, the output buffer will be
- // automatically resized while encoding.
- constexpr int kJpegQuality = 100;
- std::vector<unsigned char> encoded;
- LOG_ASSERT(gfx::JPEGCodec::Encode(src, kJpegQuality, downsample, &encoded));
-
- base::FilePath filename;
- LOG_ASSERT(base::GetTempDir(&filename));
- filename =
- filename.Append(base::StringPrintf("black-%dx%d.jpg", width, height));
-
- auto image = std::make_unique<ParsedJpegImage>(filename);
- image->data_str.append(encoded.begin(), encoded.end());
- image->InitializeSizes(width, height);
- return image;
- }
-
- explicit ParsedJpegImage(const base::FilePath& path) : file_path(path) {}
-
- void InitializeSizes(int width, int height) {
- visible_size.SetSize(width, height);
- // The parse result yields a coded size that rounds up to a whole MCU.
- // However, we can use a smaller coded size for the decode result. Here, we
- // simply round up to the next even dimension. That way, when we are
- // building the video frame to hold the result of the decoding, the strides
- // and pointers for the UV planes are computed correctly for JPEGs that
- // require even-sized allocation (see
- // VideoFrame::RequiresEvenSizeAllocation()) and whose visible size has at
- // least one odd dimension.
- coded_size.SetSize((visible_size.width() + 1) & ~1,
- (visible_size.height() + 1) & ~1);
- // The JPEG decoder will always return the decoded frame in I420 format.
- output_size = VideoFrame::AllocationSize(PIXEL_FORMAT_I420, coded_size);
- }
-
- const base::FilePath::StringType& filename() const {
- return file_path.value();
- }
-
- const base::FilePath file_path;
-
- std::string data_str;
- gfx::Size visible_size;
- gfx::Size coded_size;
- size_t output_size;
-};
-
-// Global singleton to hold on to common data and other user-defined options.
-class JpegDecodeAcceleratorTestEnvironment : public ::testing::Environment {
- public:
- JpegDecodeAcceleratorTestEnvironment(
- const base::FilePath::CharType* jpeg_filenames,
- const base::FilePath::CharType* test_data_path,
- int perf_decode_times)
- : perf_decode_times_(perf_decode_times ? perf_decode_times
- : kDefaultPerfDecodeTimes),
- user_jpeg_filenames_(jpeg_filenames ? jpeg_filenames
- : kDefaultJpegFilename),
- test_data_path_(test_data_path) {}
-
- void SetUp() override;
-
- // Resolve the specified file path. The file path can be either an absolute
- // path, relative to the current directory, or relative to the test data path.
- // This is either a custom test data path provided by --test_data_path, or the
- // default test data path (//media/test/data).
- base::FilePath GetOriginalOrTestDataFilePath(const std::string& file_path) {
- const base::FilePath original_file_path = base::FilePath(file_path);
- if (base::PathExists(original_file_path))
- return original_file_path;
- if (test_data_path_)
- return base::FilePath(test_data_path_).Append(original_file_path);
- return GetTestDataFilePath(file_path);
- }
-
- // Used for InputSizeChange test case. The image size should be smaller than
- // |kDefaultJpegFilename|.
- std::unique_ptr<ParsedJpegImage> image_data_1280x720_black_;
- // Used for ResolutionChange test case.
- std::unique_ptr<ParsedJpegImage> image_data_640x368_black_;
- // Used for testing some drivers which will align the output resolution to a
- // multiple of 16. 640x360 will be aligned to 640x368.
- std::unique_ptr<ParsedJpegImage> image_data_640x360_black_;
- // Generated black image used to test different JPEG sampling formats.
- std::unique_ptr<ParsedJpegImage> image_data_640x368_422_black_;
- // Parsed data of "peach_pi-1280x720.jpg".
- std::unique_ptr<ParsedJpegImage> image_data_1280x720_default_;
- // Parsed data of failure image.
- std::unique_ptr<ParsedJpegImage> image_data_invalid_;
- // Parsed data for images with at least one odd dimension.
- std::vector<std::unique_ptr<ParsedJpegImage>> image_data_odd_;
- // Parsed data from command line.
- std::vector<std::unique_ptr<ParsedJpegImage>> image_data_user_;
- // Decode times for performance measurement.
- int perf_decode_times_;
-
- private:
- const base::FilePath::CharType* user_jpeg_filenames_;
- const base::FilePath::CharType* test_data_path_;
-};
-
-void JpegDecodeAcceleratorTestEnvironment::SetUp() {
- image_data_1280x720_black_ = ParsedJpegImage::CreateBlackImage(1280, 720);
- image_data_640x368_black_ = ParsedJpegImage::CreateBlackImage(640, 368);
- image_data_640x360_black_ = ParsedJpegImage::CreateBlackImage(640, 360);
- image_data_640x368_422_black_ = ParsedJpegImage::CreateBlackImage(
- 640, 368, SkJpegEncoder::Downsample::k422);
-
- image_data_1280x720_default_ = ParsedJpegImage::CreateFromFile(
- GetOriginalOrTestDataFilePath(kDefaultJpegFilename));
-
- image_data_invalid_ =
- std::make_unique<ParsedJpegImage>(base::FilePath("failure.jpg"));
- image_data_invalid_->data_str.resize(100, 0);
- image_data_invalid_->InitializeSizes(1280, 720);
-
- // Load test images with at least one odd dimension.
- for (const auto* filename : kOddJpegFilenames) {
- const base::FilePath input_file = GetOriginalOrTestDataFilePath(filename);
- auto image_data = ParsedJpegImage::CreateFromFile(input_file);
- image_data_odd_.push_back(std::move(image_data));
- }
-
- // |user_jpeg_filenames_| may include many files and use ';' as delimiter.
- std::vector<base::FilePath::StringType> filenames = base::SplitString(
- user_jpeg_filenames_, base::FilePath::StringType(1, ';'),
- base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- for (const auto& filename : filenames) {
- const base::FilePath input_file = GetOriginalOrTestDataFilePath(filename);
- auto image_data = ParsedJpegImage::CreateFromFile(input_file);
- image_data_user_.push_back(std::move(image_data));
- }
-}
-
-enum ClientState {
- CS_CREATED,
- CS_INITIALIZED,
- CS_DECODE_PASS,
- CS_ERROR,
-};
-
-class JpegClient : public MjpegDecodeAccelerator::Client {
- public:
- // JpegClient takes ownership of |note|.
- JpegClient(
- const std::vector<ParsedJpegImage*>& test_image_files,
- std::unique_ptr<media::test::ClientStateNotification<ClientState>> note,
- bool is_skip);
- ~JpegClient() override;
- void CreateJpegDecoder();
- void StartDecode(int32_t bitstream_buffer_id, bool do_prepare_memory = true);
- void PrepareMemory(int32_t bitstream_buffer_id);
- bool GetSoftwareDecodeResult(int32_t bitstream_buffer_id);
-
- // MjpegDecodeAccelerator::Client implementation.
- void VideoFrameReady(int32_t bitstream_buffer_id) override;
- void NotifyError(int32_t bitstream_buffer_id,
- MjpegDecodeAccelerator::Error error) override;
-
- // Accessors.
- media::test::ClientStateNotification<ClientState>* note() const {
- return note_.get();
- }
-
- private:
- FRIEND_TEST_ALL_PREFIXES(JpegClientTest, GetMeanAbsoluteDifference);
-
- void SetState(ClientState new_state);
-
- // Save a video frame that contains a decoded JPEG. The output is a PNG file.
- // The suffix will be added before the .png extension.
- void SaveToFile(int32_t bitstream_buffer_id,
- const scoped_refptr<VideoFrame>& in_frame,
- const std::string& suffix = "");
-
- // Calculate mean absolute difference of hardware and software decode results
- // to check the similarity.
- double GetMeanAbsoluteDifference();
-
- // JpegClient doesn't own |test_image_files_|.
- const std::vector<ParsedJpegImage*>& test_image_files_;
-
- ClientState state_;
-
- // Used to notify another thread about the state. JpegClient owns this.
- std::unique_ptr<media::test::ClientStateNotification<ClientState>> note_;
-
- // Skip JDA decode result. Used for testing performance.
- bool is_skip_;
-
- // Mapped memory of input file.
- std::unique_ptr<base::SharedMemory> in_shm_;
- // Mapped memory of output buffer from hardware decoder.
- std::unique_ptr<base::SharedMemory> hw_out_shm_;
- // Video frame corresponding to the output of the hardware decoder.
- scoped_refptr<VideoFrame> hw_out_frame_;
- // Mapped memory of output buffer from software decoder.
- std::unique_ptr<base::SharedMemory> sw_out_shm_;
- // Video frame corresponding to the output of the software decoder.
- scoped_refptr<VideoFrame> sw_out_frame_;
-
- // This should be the first member to get destroyed because |decoder_|
- // potentially uses other members in the JpegClient instance. For example,
- // as decode tasks finish in a new thread spawned by |decoder_|, |hw_out_shm_|
- // can be accessed.
- std::unique_ptr<MjpegDecodeAccelerator> decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(JpegClient);
-};
-
-JpegClient::JpegClient(
- const std::vector<ParsedJpegImage*>& test_image_files,
- std::unique_ptr<media::test::ClientStateNotification<ClientState>> note,
- bool is_skip)
- : test_image_files_(test_image_files),
- state_(CS_CREATED),
- note_(std::move(note)),
- is_skip_(is_skip) {}
-
-JpegClient::~JpegClient() {}
-
-void JpegClient::CreateJpegDecoder() {
- decoder_ = nullptr;
-
- auto jda_factories =
- GpuMjpegDecodeAcceleratorFactory::GetAcceleratorFactories();
- if (jda_factories.empty()) {
- LOG(ERROR) << "JpegDecodeAccelerator not supported on this platform.";
- SetState(CS_ERROR);
- return;
- }
-
- for (const auto& create_jda_func : jda_factories) {
- decoder_ = create_jda_func.Run(base::ThreadTaskRunnerHandle::Get());
- if (decoder_)
- break;
- }
- if (!decoder_) {
- LOG(ERROR) << "Failed to create JpegDecodeAccelerator.";
- SetState(CS_ERROR);
- return;
- }
-
- if (!decoder_->Initialize(this)) {
- LOG(ERROR) << "JpegDecodeAccelerator::Initialize() failed";
- SetState(CS_ERROR);
- return;
- }
- SetState(CS_INITIALIZED);
-}
-
-void JpegClient::VideoFrameReady(int32_t bitstream_buffer_id) {
- if (is_skip_) {
- SetState(CS_DECODE_PASS);
- return;
- }
-
- if (!GetSoftwareDecodeResult(bitstream_buffer_id)) {
- SetState(CS_ERROR);
- return;
- }
- if (g_save_to_file) {
- SaveToFile(bitstream_buffer_id, hw_out_frame_, "_hw");
- SaveToFile(bitstream_buffer_id, sw_out_frame_, "_sw");
- }
-
- double difference = GetMeanAbsoluteDifference();
- if (difference <= kDecodeSimilarityThreshold) {
- SetState(CS_DECODE_PASS);
- } else {
- LOG(ERROR) << "The mean absolute difference between software and hardware "
- << "decode is " << difference;
- SetState(CS_ERROR);
- }
-}
-
-void JpegClient::NotifyError(int32_t bitstream_buffer_id,
- MjpegDecodeAccelerator::Error error) {
- LOG(ERROR) << "Notifying of error " << error << " for buffer id "
- << bitstream_buffer_id;
- SetState(CS_ERROR);
-}
-
-void JpegClient::PrepareMemory(int32_t bitstream_buffer_id) {
- ParsedJpegImage* image_file = test_image_files_[bitstream_buffer_id];
-
- size_t input_size = image_file->data_str.size();
- if (!in_shm_.get() || input_size > in_shm_->mapped_size()) {
- in_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(in_shm_->CreateAndMapAnonymous(input_size));
- }
- memcpy(in_shm_->memory(), image_file->data_str.data(), input_size);
-
- if (!hw_out_shm_.get() ||
- image_file->output_size > hw_out_shm_->mapped_size()) {
- hw_out_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(hw_out_shm_->CreateAndMapAnonymous(image_file->output_size));
- }
- memset(hw_out_shm_->memory(), 0, image_file->output_size);
-
- if (!sw_out_shm_.get() ||
- image_file->output_size > sw_out_shm_->mapped_size()) {
- sw_out_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(sw_out_shm_->CreateAndMapAnonymous(image_file->output_size));
- }
- memset(sw_out_shm_->memory(), 0, image_file->output_size);
-}
-
-void JpegClient::SetState(ClientState new_state) {
- DVLOG(2) << "Changing state " << state_ << "->" << new_state;
- note_->Notify(new_state);
- state_ = new_state;
-}
-
-void JpegClient::SaveToFile(int32_t bitstream_buffer_id,
- const scoped_refptr<VideoFrame>& in_frame,
- const std::string& suffix) {
- LOG_ASSERT(in_frame.get());
- ParsedJpegImage* image_file = test_image_files_[bitstream_buffer_id];
-
- // First convert to ARGB format. Note that in our case, the coded size and the
- // visible size will be the same.
- scoped_refptr<VideoFrame> argb_out_frame = VideoFrame::CreateFrame(
- VideoPixelFormat::PIXEL_FORMAT_ARGB, image_file->visible_size,
- gfx::Rect(image_file->visible_size), image_file->visible_size,
- base::TimeDelta());
- LOG_ASSERT(argb_out_frame);
- LOG_ASSERT(in_frame->visible_rect() == argb_out_frame->visible_rect());
-
- // Note that we use J420ToARGB instead of I420ToARGB so that the
- // kYuvJPEGConstants YUV-to-RGB conversion matrix is used.
- const int conversion_status =
- libyuv::J420ToARGB(in_frame->data(VideoFrame::kYPlane),
- in_frame->stride(VideoFrame::kYPlane),
- in_frame->data(VideoFrame::kUPlane),
- in_frame->stride(VideoFrame::kUPlane),
- in_frame->data(VideoFrame::kVPlane),
- in_frame->stride(VideoFrame::kVPlane),
- argb_out_frame->data(VideoFrame::kARGBPlane),
- argb_out_frame->stride(VideoFrame::kARGBPlane),
- argb_out_frame->visible_rect().width(),
- argb_out_frame->visible_rect().height());
- LOG_ASSERT(conversion_status == 0);
-
- // Save as a PNG.
- std::vector<uint8_t> png_output;
- const bool png_encode_status = gfx::PNGCodec::Encode(
- argb_out_frame->data(VideoFrame::kARGBPlane), gfx::PNGCodec::FORMAT_BGRA,
- argb_out_frame->visible_rect().size(),
- argb_out_frame->stride(VideoFrame::kARGBPlane),
- true, /* discard_transparency */
- std::vector<gfx::PNGCodec::Comment>(), &png_output);
- LOG_ASSERT(png_encode_status);
- const base::FilePath in_filename(image_file->filename());
- const base::FilePath out_filename =
- in_filename.ReplaceExtension(".png").InsertBeforeExtension(suffix);
- const int size = base::checked_cast<int>(png_output.size());
- const int file_written_bytes = base::WriteFile(
- out_filename, reinterpret_cast<char*>(png_output.data()), size);
- LOG_ASSERT(file_written_bytes == size);
-}
-
-double JpegClient::GetMeanAbsoluteDifference() {
- double mean_abs_difference = 0;
- size_t num_samples = 0;
- const size_t planes[] = {VideoFrame::kYPlane, VideoFrame::kUPlane,
- VideoFrame::kVPlane};
- for (size_t plane : planes) {
- const uint8_t* hw_data = hw_out_frame_->data(plane);
- const uint8_t* sw_data = sw_out_frame_->data(plane);
- LOG_ASSERT(hw_out_frame_->visible_rect() == sw_out_frame_->visible_rect());
- const size_t rows = VideoFrame::Rows(
- plane, PIXEL_FORMAT_I420, hw_out_frame_->visible_rect().height());
- const size_t columns = VideoFrame::Columns(
- plane, PIXEL_FORMAT_I420, hw_out_frame_->visible_rect().width());
- LOG_ASSERT(hw_out_frame_->stride(plane) == sw_out_frame_->stride(plane));
- const int stride = hw_out_frame_->stride(plane);
- for (size_t row = 0; row < rows; ++row) {
- for (size_t col = 0; col < columns; ++col)
- mean_abs_difference += std::abs(hw_data[col] - sw_data[col]);
- hw_data += stride;
- sw_data += stride;
- }
- num_samples += rows * columns;
- }
- LOG_ASSERT(num_samples > 0);
- mean_abs_difference /= num_samples;
- return mean_abs_difference;
-}
-
-void JpegClient::StartDecode(int32_t bitstream_buffer_id,
- bool do_prepare_memory) {
- DCHECK_LT(static_cast<size_t>(bitstream_buffer_id), test_image_files_.size());
- ParsedJpegImage* image_file = test_image_files_[bitstream_buffer_id];
-
- if (do_prepare_memory)
- PrepareMemory(bitstream_buffer_id);
-
- base::SharedMemoryHandle dup_handle;
- dup_handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(bitstream_buffer_id, dup_handle,
- image_file->data_str.size());
-
- hw_out_frame_ = VideoFrame::WrapExternalSharedMemory(
- PIXEL_FORMAT_I420, image_file->coded_size,
- gfx::Rect(image_file->visible_size), image_file->visible_size,
- static_cast<uint8_t*>(hw_out_shm_->memory()), image_file->output_size,
- hw_out_shm_->handle(), 0, base::TimeDelta());
- LOG_ASSERT(hw_out_frame_.get());
-
- decoder_->Decode(bitstream_buffer, hw_out_frame_);
-}
-
-bool JpegClient::GetSoftwareDecodeResult(int32_t bitstream_buffer_id) {
- ParsedJpegImage* image_file = test_image_files_[bitstream_buffer_id];
- sw_out_frame_ = VideoFrame::WrapExternalSharedMemory(
- PIXEL_FORMAT_I420, image_file->coded_size,
- gfx::Rect(image_file->visible_size), image_file->visible_size,
- static_cast<uint8_t*>(sw_out_shm_->memory()), image_file->output_size,
- sw_out_shm_->handle(), 0, base::TimeDelta());
- LOG_ASSERT(sw_out_shm_.get());
-
- if (libyuv::ConvertToI420(static_cast<uint8_t*>(in_shm_->memory()),
- image_file->data_str.size(),
- sw_out_frame_->data(VideoFrame::kYPlane),
- sw_out_frame_->stride(VideoFrame::kYPlane),
- sw_out_frame_->data(VideoFrame::kUPlane),
- sw_out_frame_->stride(VideoFrame::kUPlane),
- sw_out_frame_->data(VideoFrame::kVPlane),
- sw_out_frame_->stride(VideoFrame::kVPlane), 0, 0,
- sw_out_frame_->visible_rect().width(),
- sw_out_frame_->visible_rect().height(),
- sw_out_frame_->visible_rect().width(),
- sw_out_frame_->visible_rect().height(),
- libyuv::kRotate0, libyuv::FOURCC_MJPG) != 0) {
- LOG(ERROR) << "Software decode " << image_file->filename() << " failed.";
- return false;
- }
- return true;
-}
-
-// This class holds a |client| that will be deleted on |task_runner|. This is
-// necessary because |client->decoder_| expects to be destroyed on the thread on
-// which it was created.
-class ScopedJpegClient {
- public:
- ScopedJpegClient(scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- std::unique_ptr<JpegClient> client)
- : task_runner_(task_runner), client_(std::move(client)) {}
- ~ScopedJpegClient() {
- task_runner_->DeleteSoon(FROM_HERE, std::move(client_));
- }
- JpegClient* client() const { return client_.get(); }
-
- private:
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- std::unique_ptr<JpegClient> client_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedJpegClient);
-};
-
-class JpegDecodeAcceleratorTest : public ::testing::Test {
- protected:
- JpegDecodeAcceleratorTest() = default;
-
- void TestDecode(const std::vector<ParsedJpegImage*>& images,
- const std::vector<ClientState>& expected_status,
- size_t num_concurrent_decoders = 1);
- void PerfDecodeByJDA(int decode_times,
- const std::vector<ParsedJpegImage*>& images);
- void PerfDecodeBySW(int decode_times,
- const std::vector<ParsedJpegImage*>& images);
-
- protected:
- DISALLOW_COPY_AND_ASSIGN(JpegDecodeAcceleratorTest);
-};
-
-void JpegDecodeAcceleratorTest::TestDecode(
- const std::vector<ParsedJpegImage*>& images,
- const std::vector<ClientState>& expected_status,
- size_t num_concurrent_decoders) {
- LOG_ASSERT(images.size() >= expected_status.size());
- base::Thread decoder_thread("DecoderThread");
- ASSERT_TRUE(decoder_thread.Start());
-
- std::vector<std::unique_ptr<ScopedJpegClient>> scoped_clients;
-
- for (size_t i = 0; i < num_concurrent_decoders; i++) {
- auto client = std::make_unique<JpegClient>(
- images,
- std::make_unique<media::test::ClientStateNotification<ClientState>>(),
- false /* is_skip */);
- scoped_clients.emplace_back(
- new ScopedJpegClient(decoder_thread.task_runner(), std::move(client)));
-
- decoder_thread.task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&JpegClient::CreateJpegDecoder,
- base::Unretained(scoped_clients.back()->client())));
- ASSERT_EQ(scoped_clients.back()->client()->note()->Wait(), CS_INITIALIZED);
- }
-
- for (size_t index = 0; index < images.size(); index++) {
- for (const auto& scoped_client : scoped_clients) {
- decoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::StartDecode,
- base::Unretained(scoped_client->client()),
- index, true));
- }
- if (index < expected_status.size()) {
- for (const auto& scoped_client : scoped_clients) {
- ASSERT_EQ(scoped_client->client()->note()->Wait(),
- expected_status[index]);
- }
- }
- }
-}
-
-void JpegDecodeAcceleratorTest::PerfDecodeByJDA(
- int decode_times,
- const std::vector<ParsedJpegImage*>& images) {
- LOG_ASSERT(images.size() == 1);
- base::Thread decoder_thread("DecoderThread");
- ASSERT_TRUE(decoder_thread.Start());
-
- auto client = std::make_unique<JpegClient>(
- images,
- std::make_unique<media::test::ClientStateNotification<ClientState>>(),
- true /* is_skip */);
- auto scoped_client = std::make_unique<ScopedJpegClient>(
- decoder_thread.task_runner(), std::move(client));
-
- decoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::CreateJpegDecoder,
- base::Unretained(scoped_client->client())));
- ASSERT_EQ(scoped_client->client()->note()->Wait(), CS_INITIALIZED);
-
- const int32_t bitstream_buffer_id = 0;
- scoped_client->client()->PrepareMemory(bitstream_buffer_id);
- const base::ElapsedTimer timer;
- for (int index = 0; index < decode_times; index++) {
- decoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::StartDecode,
- base::Unretained(scoped_client->client()),
- bitstream_buffer_id, false));
- ASSERT_EQ(scoped_client->client()->note()->Wait(), CS_DECODE_PASS);
- }
- const base::TimeDelta elapsed_time = timer.Elapsed();
- LOG(INFO) << elapsed_time << " for " << decode_times
- << " iterations (avg: " << elapsed_time / decode_times << ") -- "
- << images[0]->visible_size.ToString() << ", ("
- << images[0]->visible_size.GetArea() << " pixels) "
- << images[0]->filename();
-}
-
-void JpegDecodeAcceleratorTest::PerfDecodeBySW(
- int decode_times,
- const std::vector<ParsedJpegImage*>& images) {
- LOG_ASSERT(images.size() == 1);
-
- std::unique_ptr<JpegClient> client = std::make_unique<JpegClient>(
- images,
- std::make_unique<media::test::ClientStateNotification<ClientState>>(),
- true /* is_skip */);
-
- const int32_t bitstream_buffer_id = 0;
- client->PrepareMemory(bitstream_buffer_id);
- const base::ElapsedTimer timer;
- for (int index = 0; index < decode_times; index++)
- client->GetSoftwareDecodeResult(bitstream_buffer_id);
- const base::TimeDelta elapsed_time = timer.Elapsed();
- LOG(INFO) << elapsed_time << " for " << decode_times
- << " iterations (avg: " << elapsed_time / decode_times << ") -- "
- << images[0]->visible_size.ToString() << ", ("
- << images[0]->visible_size.GetArea() << " pixels) "
- << images[0]->filename();
-}
-
-// Returns a VideoFrame that contains YUV data using 4:2:0 subsampling. The
-// visible size is 3x3, and the coded size is 4x4 which is 3x3 rounded up to the
-// next even dimensions.
-scoped_refptr<VideoFrame> GetTestDecodedData() {
- scoped_refptr<VideoFrame> frame = VideoFrame::CreateZeroInitializedFrame(
- PIXEL_FORMAT_I420, gfx::Size(4, 4) /* coded_size */,
- gfx::Rect(3, 3) /* visible_rect */, gfx::Size(3, 3) /* natural_size */,
- base::TimeDelta());
- LOG_ASSERT(frame.get());
- uint8_t* y_data = frame->data(VideoFrame::kYPlane);
- int y_stride = frame->stride(VideoFrame::kYPlane);
- uint8_t* u_data = frame->data(VideoFrame::kUPlane);
- int u_stride = frame->stride(VideoFrame::kUPlane);
- uint8_t* v_data = frame->data(VideoFrame::kVPlane);
- int v_stride = frame->stride(VideoFrame::kVPlane);
-
- // Data for the Y plane.
- memcpy(&y_data[0 * y_stride], "\x01\x02\x03", 3);
- memcpy(&y_data[1 * y_stride], "\x04\x05\x06", 3);
- memcpy(&y_data[2 * y_stride], "\x07\x08\x09", 3);
-
- // Data for the U plane.
- memcpy(&u_data[0 * u_stride], "\x0A\x0B", 2);
- memcpy(&u_data[1 * u_stride], "\x0C\x0D", 2);
-
- // Data for the V plane.
- memcpy(&v_data[0 * v_stride], "\x0E\x0F", 2);
- memcpy(&v_data[1 * v_stride], "\x10\x11", 2);
-
- return frame;
-}
-
-TEST(JpegClientTest, GetMeanAbsoluteDifference) {
- JpegClient client(std::vector<ParsedJpegImage*>(), nullptr, false);
- client.hw_out_frame_ = GetTestDecodedData();
- client.sw_out_frame_ = GetTestDecodedData();
-
- uint8_t* y_data = client.sw_out_frame_->data(VideoFrame::kYPlane);
- const int y_stride = client.sw_out_frame_->stride(VideoFrame::kYPlane);
- uint8_t* u_data = client.sw_out_frame_->data(VideoFrame::kUPlane);
- const int u_stride = client.sw_out_frame_->stride(VideoFrame::kUPlane);
- uint8_t* v_data = client.sw_out_frame_->data(VideoFrame::kVPlane);
- const int v_stride = client.sw_out_frame_->stride(VideoFrame::kVPlane);
-
- // Change some visible data in the software decoding result.
- double expected_abs_mean_diff = 0;
- y_data[0] = 0xF0; // Previously 0x01.
- expected_abs_mean_diff += 0xF0 - 0x01;
- y_data[y_stride + 1] = 0x8A; // Previously 0x05.
- expected_abs_mean_diff += 0x8A - 0x05;
- u_data[u_stride] = 0x02; // Previously 0x0C.
- expected_abs_mean_diff += 0x0C - 0x02;
- v_data[v_stride + 1] = 0x54; // Previously 0x11.
- expected_abs_mean_diff += 0x54 - 0x11;
- expected_abs_mean_diff /= 3 * 3 + 2 * 2 * 2;
-
- constexpr double kMaxAllowedDifference = 1e-7;
- EXPECT_NEAR(expected_abs_mean_diff, client.GetMeanAbsoluteDifference(),
- kMaxAllowedDifference);
-
- // Change some non-visible data in the software decoding result, i.e., part of
- // the stride padding. This should not affect the absolute mean difference.
- y_data[3] = 0xAB;
- EXPECT_NEAR(expected_abs_mean_diff, client.GetMeanAbsoluteDifference(),
- kMaxAllowedDifference);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, SimpleDecode) {
- std::vector<ParsedJpegImage*> images;
- for (auto& image : g_env->image_data_user_)
- images.push_back(image.get());
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, MultipleDecoders) {
- std::vector<ParsedJpegImage*> images;
- for (auto& image : g_env->image_data_user_)
- images.push_back(image.get());
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status, 3 /* num_concurrent_decoders */);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, OddDimensions) {
- std::vector<ParsedJpegImage*> images;
- for (auto& image : g_env->image_data_odd_)
- images.push_back(image.get());
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, InputSizeChange) {
- // The size of |image_data_1280x720_black_| is smaller than
- // |image_data_1280x720_default_|.
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_1280x720_black_.get(),
- g_env->image_data_1280x720_default_.get(),
- g_env->image_data_1280x720_black_.get()};
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, ResolutionChange) {
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_640x368_black_.get(),
- g_env->image_data_1280x720_default_.get(),
- g_env->image_data_640x368_black_.get()};
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, CodedSizeAlignment) {
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_640x360_black_.get()};
- const std::vector<ClientState> expected_status = {CS_DECODE_PASS};
- TestDecode(images, expected_status);
-}
-
-// Tests whether different JPEG sampling formats will be decoded correctly.
-TEST_F(JpegDecodeAcceleratorTest, SamplingFormatChange) {
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_640x368_black_.get(),
- g_env->image_data_640x368_422_black_.get()};
- const std::vector<ClientState> expected_status(images.size(), CS_DECODE_PASS);
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, FailureJpeg) {
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_invalid_.get()};
- const std::vector<ClientState> expected_status = {CS_ERROR};
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, KeepDecodeAfterFailure) {
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_invalid_.get(),
- g_env->image_data_1280x720_default_.get()};
- const std::vector<ClientState> expected_status = {CS_ERROR, CS_DECODE_PASS};
- TestDecode(images, expected_status);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, Abort) {
- constexpr size_t kNumOfJpegToDecode = 5;
- const std::vector<ParsedJpegImage*> images(
- kNumOfJpegToDecode, g_env->image_data_1280x720_default_.get());
- // Verify only one decode success to ensure both decoders have started the
- // decoding. Then destroy the first decoder when it is still decoding. The
- // kernel should not crash during this test.
- const std::vector<ClientState> expected_status = {CS_DECODE_PASS};
- TestDecode(images, expected_status, 2 /* num_concurrent_decoders */);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, PerfJDA) {
- // Only the first image will be used for perf testing.
- ASSERT_GE(g_env->image_data_user_.size(), 1u);
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_user_[0].get()};
- PerfDecodeByJDA(g_env->perf_decode_times_, images);
-}
-
-TEST_F(JpegDecodeAcceleratorTest, PerfSW) {
- // Only the first image will be used for perf testing.
- ASSERT_GE(g_env->image_data_user_.size(), 1u);
- const std::vector<ParsedJpegImage*> images = {
- g_env->image_data_user_[0].get()};
- PerfDecodeBySW(g_env->perf_decode_times_, images);
-}
-
-} // namespace
-} // namespace media
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- base::CommandLine::Init(argc, argv);
- mojo::core::Init();
- base::ShadowingAtExitManager at_exit_manager;
-
- // Needed to enable DVLOG through --vmodule.
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- LOG_ASSERT(logging::InitLogging(settings));
-
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- DCHECK(cmd_line);
-
- const base::FilePath::CharType* jpeg_filenames = nullptr;
- const base::FilePath::CharType* test_data_path = nullptr;
- int perf_decode_times = 0;
- base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
- for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
- it != switches.end(); ++it) {
- // jpeg_filenames can include one or many files and use ';' as delimiter.
- if (it->first == "jpeg_filenames") {
- jpeg_filenames = it->second.c_str();
- continue;
- }
- if (it->first == "test_data_path") {
- test_data_path = it->second.c_str();
- continue;
- }
- if (it->first == "perf_decode_times") {
- perf_decode_times = std::stoi(it->second);
- continue;
- }
- if (it->first == "save_to_file") {
- media::g_save_to_file = true;
- continue;
- }
- if (it->first == "v" || it->first == "vmodule")
- continue;
- if (it->first == "h" || it->first == "help")
- continue;
- LOG(ERROR) << "Unexpected switch: " << it->first << ":" << it->second;
- return -EINVAL;
- }
-#if BUILDFLAG(USE_VAAPI)
- media::VaapiWrapper::PreSandboxInitialization();
-#endif
-
- media::g_env = reinterpret_cast<media::JpegDecodeAcceleratorTestEnvironment*>(
- testing::AddGlobalTestEnvironment(
- new media::JpegDecodeAcceleratorTestEnvironment(
- jpeg_filenames, test_data_path, perf_decode_times)));
-
- return RUN_ALL_TESTS();
-}
diff --git a/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc b/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc
deleted file mode 100644
index f6fe7bccc44..00000000000
--- a/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc
+++ /dev/null
@@ -1,762 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
-
-#include <memory>
-
-#include "base/at_exit.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/files/file_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/path_service.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
-#include "base/strings/string_split.h"
-#include "base/strings/stringprintf.h"
-#include "base/test/scoped_task_environment.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "media/base/test_data_util.h"
-#include "media/filters/jpeg_parser.h"
-#include "media/gpu/buildflags.h"
-#include "media/gpu/gpu_jpeg_encode_accelerator_factory.h"
-#include "media/gpu/test/video_accelerator_unittest_helpers.h"
-#include "media/video/jpeg_encode_accelerator.h"
-#include "mojo/core/embedder/embedder.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/libyuv/include/libyuv.h"
-#include "ui/gfx/codec/jpeg_codec.h"
-
-#if BUILDFLAG(USE_VAAPI)
-#include "media/gpu/vaapi/vaapi_wrapper.h"
-#endif
-
-namespace media {
-namespace {
-
-// Default test image file.
-const base::FilePath::CharType kDefaultYuvFilename[] =
- FILE_PATH_LITERAL("bali_640x360_P420.yuv:640x360");
-// Whether to save encode results to files. Output files will be saved in the
-// same directory as the input files, with the '.jpg' extension appended to
-// their names. The encode result of generated images is written to the current
-// folder using HxW_[black|white].jpg as output file name.
-bool g_save_to_file = false;
-
-const double kMeanDiffThreshold = 10.0;
-const int kJpegDefaultQuality = 90;
-
-// Environment to create test data for all test cases.
-class JpegEncodeAcceleratorTestEnvironment;
-JpegEncodeAcceleratorTestEnvironment* g_env;
-
-struct TestImage {
- TestImage(std::vector<uint8_t> image_data,
- const gfx::Size& visible_size,
- const base::FilePath output_filename)
- : image_data(std::move(image_data)),
- visible_size(visible_size),
- output_filename(output_filename) {}
-
- // Test image data.
- std::vector<uint8_t> image_data;
- gfx::Size visible_size;
-
- // Output filename, only used when '--save_to_file' is specified.
- base::FilePath output_filename;
- size_t output_size = 0;
-};
-
-enum class ClientState {
- CREATED,
- INITIALIZED,
- ENCODE_PASS,
- ERROR,
-};
-
-class JpegEncodeAcceleratorTestEnvironment : public ::testing::Environment {
- public:
- JpegEncodeAcceleratorTestEnvironment(
- const base::FilePath::CharType* yuv_filenames,
- const base::FilePath log_path,
- const int repeat)
- : repeat_(repeat), log_path_(log_path) {
- user_yuv_files_ = yuv_filenames ? yuv_filenames : kDefaultYuvFilename;
- }
- void SetUp() override;
- void TearDown() override;
-
- void LogToFile(const std::string& key, const std::string& value);
-
- // Read image from |filename| to |image_data|.
- std::unique_ptr<TestImage> ReadTestYuvImage(const base::FilePath& filename,
- const gfx::Size& image_size);
-
- // Returns a file path for a file in what name specified or media/test/data
- // directory. If the original file path is existed, returns it first.
- base::FilePath GetOriginalOrTestDataFilePath(const std::string& name);
-
- // Parsed data from command line.
- std::vector<std::unique_ptr<TestImage>> image_data_user_;
-
- // Generated 2560x1920 white I420 image.
- std::unique_ptr<TestImage> image_data_2560x1920_white_;
- // Scarlet doesn't support 1080 width, it only suports 1088 width.
- // Generated 1280x720 white I420 image.
- std::unique_ptr<TestImage> image_data_1280x720_white_;
- // Generated 640x480 black I420 image.
- std::unique_ptr<TestImage> image_data_640x480_black_;
- // Generated 640x368 black I420 image.
- std::unique_ptr<TestImage> image_data_640x368_black_;
- // Generated 640x360 black I420 image.
- std::unique_ptr<TestImage> image_data_640x360_black_;
-
- // Number of times SimpleEncodeTest should repeat for an image.
- const size_t repeat_;
-
- private:
- // Create black or white test image with specified |size|.
- std::unique_ptr<TestImage> CreateTestYuvImage(const gfx::Size& image_size,
- bool is_black);
-
- const base::FilePath::CharType* user_yuv_files_;
- const base::FilePath log_path_;
- std::unique_ptr<base::File> log_file_;
-};
-
-void JpegEncodeAcceleratorTestEnvironment::SetUp() {
- // Since base::test::ScopedTaskEnvironment will call
- // TestTimeouts::action_max_timeout(), TestTimeouts::Initialize() needs to be
- // called in advance.
- TestTimeouts::Initialize();
-
- if (!log_path_.empty()) {
- log_file_.reset(new base::File(
- log_path_, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE));
- LOG_ASSERT(log_file_->IsValid());
- }
-
- image_data_2560x1920_white_ =
- CreateTestYuvImage(gfx::Size(2560, 1920), false);
- image_data_1280x720_white_ = CreateTestYuvImage(gfx::Size(1280, 720), false);
- image_data_640x480_black_ = CreateTestYuvImage(gfx::Size(640, 480), true);
- image_data_640x368_black_ = CreateTestYuvImage(gfx::Size(640, 368), true);
- image_data_640x360_black_ = CreateTestYuvImage(gfx::Size(640, 360), true);
-
- // |user_yuv_files_| may include many files and use ';' as delimiter.
- std::vector<base::FilePath::StringType> files =
- base::SplitString(user_yuv_files_, base::FilePath::StringType(1, ';'),
- base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- for (const auto& file : files) {
- std::vector<base::FilePath::StringType> filename_and_size =
- base::SplitString(file, base::FilePath::StringType(1, ':'),
- base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- ASSERT_EQ(2u, filename_and_size.size());
- base::FilePath::StringType filename(filename_and_size[0]);
-
- std::vector<base::FilePath::StringType> image_resolution =
- base::SplitString(filename_and_size[1],
- base::FilePath::StringType(1, 'x'),
- base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
- ASSERT_EQ(2u, image_resolution.size());
- int width = 0, height = 0;
- ASSERT_TRUE(base::StringToInt(image_resolution[0], &width));
- ASSERT_TRUE(base::StringToInt(image_resolution[1], &height));
-
- gfx::Size image_size(width, height);
- ASSERT_TRUE(!image_size.IsEmpty());
-
- base::FilePath input_file = GetOriginalOrTestDataFilePath(filename);
- auto image_data = ReadTestYuvImage(input_file, image_size);
- image_data_user_.push_back(std::move(image_data));
- }
-}
-
-void JpegEncodeAcceleratorTestEnvironment::TearDown() {
- log_file_.reset();
-}
-
-void JpegEncodeAcceleratorTestEnvironment::LogToFile(const std::string& key,
- const std::string& value) {
- std::string s = base::StringPrintf("%s: %s\n", key.c_str(), value.c_str());
- LOG(INFO) << s;
- if (log_file_) {
- log_file_->WriteAtCurrentPos(s.data(), static_cast<int>(s.length()));
- }
-}
-
-std::unique_ptr<TestImage>
-JpegEncodeAcceleratorTestEnvironment::CreateTestYuvImage(
- const gfx::Size& image_size,
- bool is_black) {
- const size_t num_pixels = image_size.width() * image_size.height();
- std::vector<uint8_t> image_data(num_pixels * 3 / 2);
-
- // Fill in Y values.
- std::fill(image_data.begin(), image_data.begin() + num_pixels,
- is_black ? 0 : 255);
- // Fill in U and V values.
- std::fill(image_data.begin() + num_pixels, image_data.end(), 128);
-
- base::FilePath output_filename(std::to_string(image_size.width()) + "x" +
- std::to_string(image_size.height()) +
- (is_black ? "_black.jpg" : "_white.jpg"));
- return std::make_unique<TestImage>(std::move(image_data), image_size,
- output_filename);
-}
-
-std::unique_ptr<TestImage>
-JpegEncodeAcceleratorTestEnvironment::ReadTestYuvImage(
- const base::FilePath& input_file,
- const gfx::Size& image_size) {
- int64_t file_size = 0;
- LOG_ASSERT(GetFileSize(input_file, &file_size));
- std::vector<uint8_t> image_data(file_size);
- LOG_ASSERT(ReadFile(input_file, reinterpret_cast<char*>(image_data.data()),
- file_size) == file_size);
-
- base::FilePath output_filename = input_file.AddExtension(".jpg");
- return std::make_unique<TestImage>(std::move(image_data), image_size,
- output_filename);
-}
-
-base::FilePath
-JpegEncodeAcceleratorTestEnvironment::GetOriginalOrTestDataFilePath(
- const std::string& name) {
- base::FilePath file_path = base::FilePath(name);
- if (!PathExists(file_path)) {
- file_path = GetTestDataFilePath(name);
- }
- VLOG(3) << "Using file path " << file_path.value();
- return file_path;
-}
-
-class JpegClient : public JpegEncodeAccelerator::Client {
- public:
- JpegClient(const std::vector<TestImage*>& test_aligned_images,
- const std::vector<TestImage*>& test_images,
- media::test::ClientStateNotification<ClientState>* note);
- ~JpegClient() override;
- void CreateJpegEncoder();
- void DestroyJpegEncoder();
- void StartEncode(int32_t bitstream_buffer_id);
-
- // JpegEncodeAccelerator::Client implementation.
- void VideoFrameReady(int32_t buffer_id, size_t encoded_picture_size) override;
- void NotifyError(int32_t buffer_id,
- JpegEncodeAccelerator::Status status) override;
-
- private:
- // Get the related test image file.
- TestImage* GetTestImage(int32_t bitstream_buffer_id);
- void PrepareMemory(int32_t bitstream_buffer_id);
- void SetState(ClientState new_state);
- void SaveToFile(TestImage* test_image, size_t hw_size, size_t sw_size);
- bool CompareHardwareAndSoftwareResults(int width,
- int height,
- size_t hw_encoded_size,
- size_t sw_encoded_size);
-
- // Calculate mean absolute difference of hardware and software encode results
- // for verifying the similarity.
- double GetMeanAbsoluteDifference(uint8_t* hw_yuv_result,
- uint8_t* sw_yuv_result,
- size_t yuv_size);
-
- // Generate software encode result and populate it into |sw_out_shm_|.
- bool GetSoftwareEncodeResult(int width,
- int height,
- size_t* sw_encoded_size,
- base::TimeDelta* sw_encode_time);
-
- // JpegClient doesn't own |test_aligned_images_|.
- // The resolutions of these images are all aligned. HW Accelerator must
- // support them.
- const std::vector<TestImage*>& test_aligned_images_;
-
- // JpegClient doesn't own |test_images_|.
- // The resolutions of these images may be unaligned.
- const std::vector<TestImage*>& test_images_;
-
- // A map that stores HW encoding start timestamp for each output buffer id.
- std::map<int, base::TimeTicks> buffer_id_to_start_time_;
-
- std::unique_ptr<JpegEncodeAccelerator> encoder_;
- ClientState state_;
-
- // Used to notify another thread about the state. JpegClient does not own
- // this.
- media::test::ClientStateNotification<ClientState>* note_;
-
- // Output buffer prepared for JpegEncodeAccelerator.
- std::unique_ptr<BitstreamBuffer> encoded_buffer_;
-
- // Mapped memory of input file.
- std::unique_ptr<base::SharedMemory> in_shm_;
- // Mapped memory of output buffer from hardware encoder.
- std::unique_ptr<base::SharedMemory> hw_out_shm_;
- // Mapped memory of output buffer from software encoder.
- std::unique_ptr<base::SharedMemory> sw_out_shm_;
-
- DISALLOW_COPY_AND_ASSIGN(JpegClient);
-};
-
-JpegClient::JpegClient(const std::vector<TestImage*>& test_aligned_images,
- const std::vector<TestImage*>& test_images,
- media::test::ClientStateNotification<ClientState>* note)
- : test_aligned_images_(test_aligned_images),
- test_images_(test_images),
- state_(ClientState::CREATED),
- note_(note) {}
-
-JpegClient::~JpegClient() {}
-
-void JpegClient::CreateJpegEncoder() {
- auto jea_factories =
- GpuJpegEncodeAcceleratorFactory::GetAcceleratorFactories();
- if (jea_factories.size() == 0) {
- LOG(ERROR) << "JpegEncodeAccelerator is not supported on this platform.";
- SetState(ClientState::ERROR);
- return;
- }
-
- for (const auto& create_jea_func : jea_factories) {
- encoder_ = create_jea_func.Run(base::ThreadTaskRunnerHandle::Get());
- if (encoder_)
- break;
- }
-
- if (!encoder_) {
- LOG(ERROR) << "Failed to create JpegEncodeAccelerator.";
- SetState(ClientState::ERROR);
- return;
- }
-
- JpegEncodeAccelerator::Status status = encoder_->Initialize(this);
- if (status != JpegEncodeAccelerator::ENCODE_OK) {
- LOG(ERROR) << "JpegEncodeAccelerator::Initialize() failed: " << status;
- SetState(ClientState::ERROR);
- return;
- }
- SetState(ClientState::INITIALIZED);
-}
-
-void JpegClient::DestroyJpegEncoder() {
- encoder_.reset();
-}
-
-void JpegClient::VideoFrameReady(int32_t buffer_id, size_t hw_encoded_size) {
- base::TimeTicks hw_encode_end = base::TimeTicks::Now();
- base::TimeDelta elapsed_hw =
- hw_encode_end - buffer_id_to_start_time_[buffer_id];
-
- TestImage* test_image;
- if (buffer_id < static_cast<int32_t>(test_aligned_images_.size())) {
- test_image = test_aligned_images_[buffer_id];
- } else {
- test_image = test_images_[buffer_id - test_aligned_images_.size()];
- }
-
- size_t sw_encoded_size = 0;
- base::TimeDelta elapsed_sw;
- LOG_ASSERT(GetSoftwareEncodeResult(test_image->visible_size.width(),
- test_image->visible_size.height(),
- &sw_encoded_size, &elapsed_sw));
-
- g_env->LogToFile("hw_encode_time",
- base::NumberToString(elapsed_hw.InMicroseconds()));
- g_env->LogToFile("sw_encode_time",
- base::NumberToString(elapsed_sw.InMicroseconds()));
-
- if (g_save_to_file) {
- SaveToFile(test_image, hw_encoded_size, sw_encoded_size);
- }
-
- if (!CompareHardwareAndSoftwareResults(test_image->visible_size.width(),
- test_image->visible_size.height(),
- hw_encoded_size, sw_encoded_size)) {
- SetState(ClientState::ERROR);
- } else {
- SetState(ClientState::ENCODE_PASS);
- }
-
- encoded_buffer_.reset(nullptr);
-}
-
-bool JpegClient::GetSoftwareEncodeResult(int width,
- int height,
- size_t* sw_encoded_size,
- base::TimeDelta* sw_encode_time) {
- base::TimeTicks sw_encode_start = base::TimeTicks::Now();
- int y_stride = width;
- int u_stride = width / 2;
- int v_stride = u_stride;
- uint8_t* yuv_src = static_cast<uint8_t*>(in_shm_->memory());
- const int kBytesPerPixel = 4;
- std::vector<uint8_t> rgba_buffer(width * height * kBytesPerPixel);
- std::vector<uint8_t> encoded;
- libyuv::I420ToABGR(yuv_src, y_stride, yuv_src + y_stride * height, u_stride,
- yuv_src + y_stride * height + u_stride * height / 2,
- v_stride, rgba_buffer.data(), width * kBytesPerPixel,
- width, height);
-
- SkImageInfo info = SkImageInfo::Make(width, height, kRGBA_8888_SkColorType,
- kOpaque_SkAlphaType);
- SkPixmap src(info, &rgba_buffer[0], width * kBytesPerPixel);
- if (!gfx::JPEGCodec::Encode(src, kJpegDefaultQuality, &encoded)) {
- return false;
- }
-
- memcpy(sw_out_shm_->memory(), encoded.data(), encoded.size());
- *sw_encoded_size = encoded.size();
- *sw_encode_time = base::TimeTicks::Now() - sw_encode_start;
- return true;
-}
-
-bool JpegClient::CompareHardwareAndSoftwareResults(int width,
- int height,
- size_t hw_encoded_size,
- size_t sw_encoded_size) {
- size_t yuv_size = width * height * 3 / 2;
- uint8_t* hw_yuv_result = new uint8_t[yuv_size];
- int y_stride = width;
- int u_stride = width / 2;
- int v_stride = u_stride;
- if (libyuv::ConvertToI420(
- static_cast<const uint8_t*>(hw_out_shm_->memory()), hw_encoded_size,
- hw_yuv_result, y_stride, hw_yuv_result + y_stride * height, u_stride,
- hw_yuv_result + y_stride * height + u_stride * height / 2, v_stride,
- 0, 0, width, height, width, height, libyuv::kRotate0,
- libyuv::FOURCC_MJPG)) {
- LOG(ERROR) << "Convert HW encoded result to YUV failed";
- }
-
- uint8_t* sw_yuv_result = new uint8_t[yuv_size];
- if (libyuv::ConvertToI420(
- static_cast<const uint8_t*>(sw_out_shm_->memory()), sw_encoded_size,
- sw_yuv_result, y_stride, sw_yuv_result + y_stride * height, u_stride,
- sw_yuv_result + y_stride * height + u_stride * height / 2, v_stride,
- 0, 0, width, height, width, height, libyuv::kRotate0,
- libyuv::FOURCC_MJPG)) {
- LOG(ERROR) << "Convert SW encoded result to YUV failed";
- }
-
- double difference =
- GetMeanAbsoluteDifference(hw_yuv_result, sw_yuv_result, yuv_size);
- delete[] hw_yuv_result;
- delete[] sw_yuv_result;
-
- if (difference > kMeanDiffThreshold) {
- LOG(ERROR) << "HW and SW encode results are not similar enough. diff = "
- << difference;
- return false;
- } else {
- return true;
- }
-}
-
-double JpegClient::GetMeanAbsoluteDifference(uint8_t* hw_yuv_result,
- uint8_t* sw_yuv_result,
- size_t yuv_size) {
- double total_difference = 0;
- for (size_t i = 0; i < yuv_size; i++)
- total_difference += std::abs(hw_yuv_result[i] - sw_yuv_result[i]);
- return total_difference / yuv_size;
-}
-
-void JpegClient::NotifyError(int32_t buffer_id,
- JpegEncodeAccelerator::Status status) {
- LOG(ERROR) << "Notifying of error " << status << " for output buffer id "
- << buffer_id;
- SetState(ClientState::ERROR);
- encoded_buffer_.reset(nullptr);
-}
-
-TestImage* JpegClient::GetTestImage(int32_t bitstream_buffer_id) {
- DCHECK_LT(static_cast<size_t>(bitstream_buffer_id),
- test_aligned_images_.size() + test_images_.size());
- TestImage* image_file;
- if (bitstream_buffer_id < static_cast<int32_t>(test_aligned_images_.size())) {
- image_file = test_aligned_images_[bitstream_buffer_id];
- } else {
- image_file =
- test_images_[bitstream_buffer_id - test_aligned_images_.size()];
- }
-
- return image_file;
-}
-
-void JpegClient::PrepareMemory(int32_t bitstream_buffer_id) {
- TestImage* test_image = GetTestImage(bitstream_buffer_id);
-
- size_t input_size = test_image->image_data.size();
- if (!in_shm_.get() || input_size > in_shm_->mapped_size()) {
- in_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(in_shm_->CreateAndMapAnonymous(input_size));
- }
- memcpy(in_shm_->memory(), test_image->image_data.data(), input_size);
-
- if (!hw_out_shm_.get() ||
- test_image->output_size > hw_out_shm_->mapped_size()) {
- hw_out_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(hw_out_shm_->CreateAndMapAnonymous(test_image->output_size));
- }
- memset(hw_out_shm_->memory(), 0, test_image->output_size);
-
- if (!sw_out_shm_.get() ||
- test_image->output_size > sw_out_shm_->mapped_size()) {
- sw_out_shm_.reset(new base::SharedMemory);
- LOG_ASSERT(sw_out_shm_->CreateAndMapAnonymous(test_image->output_size));
- }
- memset(sw_out_shm_->memory(), 0, test_image->output_size);
-}
-
-void JpegClient::SetState(ClientState new_state) {
- DVLOG(2) << "Changing state "
- << static_cast<std::underlying_type<ClientState>::type>(state_)
- << "->"
- << static_cast<std::underlying_type<ClientState>::type>(new_state);
- note_->Notify(new_state);
- state_ = new_state;
-}
-
-void JpegClient::SaveToFile(TestImage* test_image,
- size_t hw_size,
- size_t sw_size) {
- DCHECK_NE(nullptr, test_image);
-
- base::FilePath out_filename_hw = test_image->output_filename;
- LOG(INFO) << "Writing HW encode results to "
- << out_filename_hw.MaybeAsASCII();
- ASSERT_EQ(
- static_cast<int>(hw_size),
- base::WriteFile(out_filename_hw,
- static_cast<char*>(hw_out_shm_->memory()), hw_size));
-
- base::FilePath out_filename_sw = out_filename_hw.InsertBeforeExtension("_sw");
- LOG(INFO) << "Writing SW encode results to "
- << out_filename_sw.MaybeAsASCII();
- ASSERT_EQ(
- static_cast<int>(sw_size),
- base::WriteFile(out_filename_sw,
- static_cast<char*>(sw_out_shm_->memory()), sw_size));
-}
-
-void JpegClient::StartEncode(int32_t bitstream_buffer_id) {
- TestImage* test_image = GetTestImage(bitstream_buffer_id);
-
- test_image->output_size =
- encoder_->GetMaxCodedBufferSize(test_image->visible_size);
- PrepareMemory(bitstream_buffer_id);
-
- base::SharedMemoryHandle dup_handle;
- dup_handle = base::SharedMemory::DuplicateHandle(hw_out_shm_->handle());
- encoded_buffer_ = std::make_unique<BitstreamBuffer>(
- bitstream_buffer_id, dup_handle, test_image->output_size);
- scoped_refptr<VideoFrame> input_frame_ = VideoFrame::WrapExternalSharedMemory(
- PIXEL_FORMAT_I420, test_image->visible_size,
- gfx::Rect(test_image->visible_size), test_image->visible_size,
- static_cast<uint8_t*>(in_shm_->memory()), test_image->image_data.size(),
- in_shm_->handle(), 0, base::TimeDelta());
-
- LOG_ASSERT(input_frame_.get());
-
- buffer_id_to_start_time_[bitstream_buffer_id] = base::TimeTicks::Now();
- encoder_->Encode(input_frame_, kJpegDefaultQuality, nullptr,
- *encoded_buffer_);
-}
-
-class JpegEncodeAcceleratorTest : public ::testing::Test {
- protected:
- JpegEncodeAcceleratorTest() {}
-
- void TestEncode(size_t num_concurrent_encoders);
-
- // This is needed to allow the usage of methods in post_task.h in
- // JpegEncodeAccelerator implementations.
- base::test::ScopedTaskEnvironment scoped_task_environment_;
-
- // The elements of |test_aligned_images_| and |test_images_| are
- // owned by JpegEncodeAcceleratorTestEnvironment.
- std::vector<TestImage*> test_aligned_images_;
- std::vector<TestImage*> test_images_;
-
- protected:
- DISALLOW_COPY_AND_ASSIGN(JpegEncodeAcceleratorTest);
-};
-
-void JpegEncodeAcceleratorTest::TestEncode(size_t num_concurrent_encoders) {
- base::Thread encoder_thread("EncoderThread");
- ASSERT_TRUE(encoder_thread.Start());
-
- std::vector<
- std::unique_ptr<media::test::ClientStateNotification<ClientState>>>
- notes;
- std::vector<std::unique_ptr<JpegClient>> clients;
-
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
- notes.push_back(
- std::make_unique<media::test::ClientStateNotification<ClientState>>());
- clients.push_back(std::make_unique<JpegClient>(
- test_aligned_images_, test_images_, notes.back().get()));
- encoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::CreateJpegEncoder,
- base::Unretained(clients.back().get())));
- ASSERT_EQ(notes[i]->Wait(), ClientState::INITIALIZED);
- }
-
- for (size_t index = 0; index < test_aligned_images_.size(); index++) {
- VLOG(3) << index
- << ",width:" << test_aligned_images_[index]->visible_size.width();
- VLOG(3) << index
- << ",height:" << test_aligned_images_[index]->visible_size.height();
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
- encoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::StartEncode,
- base::Unretained(clients[i].get()), index));
- }
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
- ASSERT_EQ(notes[i]->Wait(), ClientState::ENCODE_PASS);
- }
- }
-
- for (size_t index = 0; index < test_images_.size(); index++) {
- int buffer_id = index + test_aligned_images_.size();
- VLOG(3) << buffer_id
- << ",width:" << test_images_[index]->visible_size.width();
- VLOG(3) << buffer_id
- << ",height:" << test_images_[index]->visible_size.height();
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
- encoder_thread.task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&JpegClient::StartEncode,
- base::Unretained(clients[i].get()), buffer_id));
- }
-
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
-// For unaligned images, V4L2 may not be able to encode them.
-#if BUILDFLAG(USE_V4L2_CODEC) && defined(ARCH_CPU_ARM_FAMILY)
- ClientState status = notes[i]->Wait();
- ASSERT_TRUE(status == ClientState::ENCODE_PASS ||
- status == ClientState::ERROR);
-#else
- ASSERT_EQ(notes[i]->Wait(), ClientState::ENCODE_PASS);
-#endif
- }
- }
-
- for (size_t i = 0; i < num_concurrent_encoders; i++) {
- encoder_thread.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&JpegClient::DestroyJpegEncoder,
- base::Unretained(clients[i].get())));
- }
- encoder_thread.Stop();
- VLOG(1) << "Exit TestEncode";
-}
-
-TEST_F(JpegEncodeAcceleratorTest, SimpleEncode) {
- for (size_t i = 0; i < g_env->repeat_; i++) {
- for (auto& image : g_env->image_data_user_) {
- test_images_.push_back(image.get());
- }
- }
- TestEncode(1);
-}
-
-TEST_F(JpegEncodeAcceleratorTest, MultipleEncoders) {
- for (auto& image : g_env->image_data_user_) {
- test_images_.push_back(image.get());
- }
- TestEncode(3);
-}
-
-TEST_F(JpegEncodeAcceleratorTest, ResolutionChange) {
- test_images_.push_back(g_env->image_data_640x368_black_.get());
- test_images_.push_back(g_env->image_data_640x360_black_.get());
- test_aligned_images_.push_back(g_env->image_data_1280x720_white_.get());
- TestEncode(1);
-}
-
-TEST_F(JpegEncodeAcceleratorTest, AlignedSizes) {
- test_aligned_images_.push_back(g_env->image_data_2560x1920_white_.get());
- test_aligned_images_.push_back(g_env->image_data_1280x720_white_.get());
- test_aligned_images_.push_back(g_env->image_data_640x480_black_.get());
- TestEncode(1);
-}
-
-TEST_F(JpegEncodeAcceleratorTest, CodedSizeAlignment) {
- test_images_.push_back(g_env->image_data_640x360_black_.get());
- TestEncode(1);
-}
-
-} // namespace
-} // namespace media
-
-int main(int argc, char** argv) {
- testing::InitGoogleTest(&argc, argv);
- base::CommandLine::Init(argc, argv);
- mojo::core::Init();
- base::ShadowingAtExitManager at_exit_manager;
-
- // Needed to enable DVLOG through --vmodule.
- logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
- LOG_ASSERT(logging::InitLogging(settings));
-
- const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- DCHECK(cmd_line);
-
- const base::FilePath::CharType* yuv_filenames = nullptr;
- base::FilePath log_path;
- size_t repeat = 1;
- base::CommandLine::SwitchMap switches = cmd_line->GetSwitches();
- for (base::CommandLine::SwitchMap::const_iterator it = switches.begin();
- it != switches.end(); ++it) {
- // yuv_filenames can include one or many files and use ';' as delimiter.
- // For each file, it should follow the format "[filename]:[width]x[height]".
- // For example, "lake.yuv:4160x3120".
- if (it->first == "yuv_filenames") {
- yuv_filenames = it->second.c_str();
- continue;
- }
- if (it->first == "output_log") {
- log_path = base::FilePath(
- base::FilePath::StringType(it->second.begin(), it->second.end()));
- continue;
- }
- if (it->first == "repeat") {
- if (!base::StringToSizeT(it->second, &repeat)) {
- LOG(INFO) << "Can't parse parameter |repeat|: " << it->second;
- repeat = 1;
- }
- continue;
- }
- if (it->first == "save_to_file") {
- media::g_save_to_file = true;
- continue;
- }
- if (it->first == "v" || it->first == "vmodule")
- continue;
- if (it->first == "h" || it->first == "help")
- continue;
- LOG(ERROR) << "Unexpected switch: " << it->first << ":" << it->second;
- return -EINVAL;
- }
-#if BUILDFLAG(USE_VAAPI)
- media::VaapiWrapper::PreSandboxInitialization();
-#endif
-
- media::g_env = reinterpret_cast<media::JpegEncodeAcceleratorTestEnvironment*>(
- testing::AddGlobalTestEnvironment(
- new media::JpegEncodeAcceleratorTestEnvironment(yuv_filenames,
- log_path, repeat)));
-
- return RUN_ALL_TESTS();
-}
diff --git a/chromium/media/gpu/libyuv_image_processor.cc b/chromium/media/gpu/libyuv_image_processor.cc
index 12ed6fe705f..09c053f96d7 100644
--- a/chromium/media/gpu/libyuv_image_processor.cc
+++ b/chromium/media/gpu/libyuv_image_processor.cc
@@ -9,6 +9,8 @@
#include "base/memory/ptr_util.h"
#include "media/base/bind_to_current_loop.h"
#include "media/gpu/macros.h"
+#include "media/gpu/video_frame_mapper.h"
+#include "media/gpu/video_frame_mapper_factory.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/convert_from.h"
#include "third_party/libyuv/include/libyuv/convert_from_argb.h"
@@ -57,6 +59,7 @@ LibYUVImageProcessor::LibYUVImageProcessor(
const VideoFrameLayout& output_layout,
const gfx::Size& output_visible_size,
VideoFrame::StorageType output_storage_type,
+ std::unique_ptr<VideoFrameMapper> video_frame_mapper,
ErrorCB error_cb)
: ImageProcessor(input_layout,
input_storage_type,
@@ -65,6 +68,7 @@ LibYUVImageProcessor::LibYUVImageProcessor(
OutputMode::IMPORT),
input_visible_rect_(input_visible_size),
output_visible_rect_(output_visible_size),
+ video_frame_mapper_(std::move(video_frame_mapper)),
error_cb_(error_cb),
process_thread_("LibYUVImageProcessorThread") {}
@@ -82,9 +86,22 @@ std::unique_ptr<LibYUVImageProcessor> LibYUVImageProcessor::Create(
const ImageProcessor::OutputMode output_mode,
ErrorCB error_cb) {
VLOGF(2);
+
+ std::unique_ptr<VideoFrameMapper> video_frame_mapper;
// LibYUVImageProcessor supports only memory-based video frame for input.
VideoFrame::StorageType input_storage_type = VideoFrame::STORAGE_UNKNOWN;
for (auto input_type : input_config.preferred_storage_types) {
+#if defined(OS_LINUX)
+ if (input_type == VideoFrame::STORAGE_DMABUFS) {
+ video_frame_mapper = VideoFrameMapperFactory::CreateMapper(
+ input_config.layout.format(), true);
+ if (video_frame_mapper) {
+ input_storage_type = input_type;
+ break;
+ }
+ }
+#endif // defined(OS_LINUX)
+
if (VideoFrame::IsStorageTypeMappable(input_type)) {
input_storage_type = input_type;
break;
@@ -124,6 +141,7 @@ std::unique_ptr<LibYUVImageProcessor> LibYUVImageProcessor::Create(
auto processor = base::WrapUnique(new LibYUVImageProcessor(
input_config.layout, input_config.visible_size, input_storage_type,
output_config.layout, output_config.visible_size, output_storage_type,
+ std::move(video_frame_mapper),
media::BindToCurrentLoop(std::move(error_cb))));
if (res == SupportResult::SupportedWithPivot) {
processor->intermediate_frame_ =
@@ -166,7 +184,8 @@ bool LibYUVImageProcessor::ProcessInternal(
DCHECK(input_frame->layout().coded_size() == input_layout_.coded_size());
DCHECK_EQ(output_frame->layout().format(), output_layout_.format());
DCHECK(output_frame->layout().coded_size() == output_layout_.coded_size());
- DCHECK(VideoFrame::IsStorageTypeMappable(input_frame->storage_type()));
+ DCHECK(input_storage_type_ == input_frame->storage_type() ||
+ VideoFrame::IsStorageTypeMappable(input_frame->storage_type()));
DCHECK(VideoFrame::IsStorageTypeMappable(output_frame->storage_type()));
// Since process_thread_ is owned by this class. base::Unretained(this) and
@@ -184,6 +203,17 @@ void LibYUVImageProcessor::ProcessTask(scoped_refptr<VideoFrame> input_frame,
FrameReadyCB cb) {
DCHECK(process_thread_.task_runner()->BelongsToCurrentThread());
DVLOGF(4);
+#if defined(OS_LINUX)
+ if (input_frame->storage_type() == VideoFrame::STORAGE_DMABUFS) {
+ DCHECK_NE(video_frame_mapper_.get(), nullptr);
+ input_frame = video_frame_mapper_->Map(std::move(input_frame));
+ if (!input_frame) {
+ VLOGF(1) << "Failed to map input VideoFrame";
+ NotifyError();
+ return;
+ }
+ }
+#endif // defined(OS_LINUX)
int res = DoConversion(input_frame.get(), output_frame.get());
if (res != 0) {
diff --git a/chromium/media/gpu/libyuv_image_processor.h b/chromium/media/gpu/libyuv_image_processor.h
index 57f80389f77..5a86b7c68ba 100644
--- a/chromium/media/gpu/libyuv_image_processor.h
+++ b/chromium/media/gpu/libyuv_image_processor.h
@@ -26,6 +26,8 @@
namespace media {
+class VideoFrameMapper;
+
// A software image processor which uses libyuv to perform format conversion.
// It expects input VideoFrame is mapped into CPU space, and output VideoFrame
// is allocated in user space.
@@ -53,6 +55,7 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessor : public ImageProcessor {
const VideoFrameLayout& output_layout,
const gfx::Size& output_visible_size,
VideoFrame::StorageType output_storage_type,
+ std::unique_ptr<VideoFrameMapper> video_frame_mapper,
ErrorCB error_cb);
// ImageProcessor override
@@ -76,6 +79,8 @@ class MEDIA_GPU_EXPORT LibYUVImageProcessor : public ImageProcessor {
const gfx::Rect input_visible_rect_;
const gfx::Rect output_visible_rect_;
+ std::unique_ptr<VideoFrameMapper> video_frame_mapper_;
+
// A VideoFrame for intermediate format conversion when there is no direct
// conversion method in libyuv, e.g., RGBA -> I420 (pivot) -> NV12.
scoped_refptr<VideoFrame> intermediate_frame_;
diff --git a/chromium/media/gpu/linux/BUILD.gn b/chromium/media/gpu/linux/BUILD.gn
index 034c9a2f49a..de42139a999 100644
--- a/chromium/media/gpu/linux/BUILD.gn
+++ b/chromium/media/gpu/linux/BUILD.gn
@@ -10,6 +10,8 @@ assert(is_linux)
source_set("linux") {
defines = [ "MEDIA_GPU_IMPLEMENTATION" ]
sources = [
+ "mailbox_video_frame_converter.cc",
+ "mailbox_video_frame_converter.h",
"platform_video_frame_utils.cc",
"platform_video_frame_utils.h",
]
@@ -17,9 +19,11 @@ source_set("linux") {
deps = [
"//base",
"//media",
+ "//media/gpu:command_buffer_helper",
"//media/gpu:common",
"//ui/gfx:buffer_types",
"//ui/gfx:memory_buffer",
+ "//ui/gl",
]
if (use_ozone) {
@@ -37,6 +41,7 @@ source_set("video_frame_mapper") {
deps = [
"//base",
"//media",
+ "//media/gpu:command_buffer_helper",
"//media/gpu:common",
"//media/gpu:video_frame_mapper_common",
]
diff --git a/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.cc b/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.cc
index cf1d1a64132..f7b59c02e77 100644
--- a/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.cc
+++ b/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.cc
@@ -11,6 +11,7 @@
#include <vector>
#include "base/bind.h"
+#include "base/memory/ptr_util.h"
#include "media/gpu/macros.h"
namespace media {
@@ -35,7 +36,7 @@ void MunmapBuffers(const std::vector<std::pair<uint8_t*, size_t>>& chunks,
}
}
-// Create VideoFrame whose dtor deallocates memory in mapped planes referred
+// Create VideoFrame whose dtor unmaps memory in mapped planes referred
// by |plane_addrs|. |plane_addrs| are addresses to (Y, U, V) in this order.
// |chunks| is the vector of pair of (address, size) to be called in munmap().
// |src_video_frame| is the video frame that owns dmabufs to the mapped planes.
@@ -63,9 +64,10 @@ scoped_refptr<VideoFrame> CreateMappedVideoFrame(
<< ", plane_size=" << plane_size;
return nullptr;
}
- video_frame = VideoFrame::WrapExternalData(
- layout.format(), layout.coded_size(), visible_rect, visible_rect.size(),
- plane_addrs[0], plane_size, src_video_frame->timestamp());
+
+ video_frame = VideoFrame::WrapExternalDataWithLayout(
+ layout, visible_rect, visible_rect.size(), plane_addrs[0], plane_size,
+ src_video_frame->timestamp());
}
if (!video_frame) {
return nullptr;
@@ -79,10 +81,15 @@ scoped_refptr<VideoFrame> CreateMappedVideoFrame(
bool IsFormatSupported(VideoPixelFormat format) {
constexpr VideoPixelFormat supported_formats[] = {
+ // RGB pixel formats.
+ PIXEL_FORMAT_ABGR,
+ PIXEL_FORMAT_ARGB,
+ PIXEL_FORMAT_XBGR,
+
+ // YUV pixel formats.
PIXEL_FORMAT_I420,
- PIXEL_FORMAT_YV12,
PIXEL_FORMAT_NV12,
- PIXEL_FORMAT_RGB32,
+ PIXEL_FORMAT_YV12,
};
return std::find(std::cbegin(supported_formats), std::cend(supported_formats),
format);
@@ -90,6 +97,20 @@ bool IsFormatSupported(VideoPixelFormat format) {
} // namespace
+// static
+std::unique_ptr<GenericDmaBufVideoFrameMapper>
+GenericDmaBufVideoFrameMapper::Create(VideoPixelFormat format) {
+ if (!IsFormatSupported(format)) {
+ VLOGF(1) << "Unsupported format: " << format;
+ return nullptr;
+ }
+ return base::WrapUnique(new GenericDmaBufVideoFrameMapper(format));
+}
+
+GenericDmaBufVideoFrameMapper::GenericDmaBufVideoFrameMapper(
+ VideoPixelFormat format)
+ : VideoFrameMapper(format) {}
+
scoped_refptr<VideoFrame> GenericDmaBufVideoFrameMapper::Map(
scoped_refptr<const VideoFrame> video_frame) const {
if (video_frame->storage_type() != VideoFrame::StorageType::STORAGE_DMABUFS) {
@@ -98,10 +119,9 @@ scoped_refptr<VideoFrame> GenericDmaBufVideoFrameMapper::Map(
return nullptr;
}
- // TODO(crbug.com/952147): Create GenericDmaBufVideoFrameMapper with pixel
- // format, and only the format should be acceptable here.
- if (!IsFormatSupported(video_frame->format())) {
- VLOGF(1) << "Unsupported format: " << video_frame->format();
+ if (video_frame->format() != format_) {
+ VLOGF(1) << "Unexpected format: " << video_frame->format()
+ << ", expected: " << format_;
return nullptr;
}
@@ -112,9 +132,9 @@ scoped_refptr<VideoFrame> GenericDmaBufVideoFrameMapper::Map(
std::vector<std::pair<uint8_t*, size_t>> chunks;
const auto& buffer_sizes = layout.buffer_sizes();
std::vector<uint8_t*> buffer_addrs(buffer_sizes.size(), nullptr);
- DCHECK_EQ(buffer_addrs.size(), dmabuf_fds.size());
+ DCHECK_LE(buffer_addrs.size(), dmabuf_fds.size());
DCHECK_LE(buffer_addrs.size(), VideoFrame::kMaxPlanes);
- for (size_t i = 0; i < dmabuf_fds.size(); i++) {
+ for (size_t i = 0; i < buffer_sizes.size(); i++) {
buffer_addrs[i] = Mmap(buffer_sizes[i], dmabuf_fds[i].get());
if (!buffer_addrs[i]) {
MunmapBuffers(chunks, std::move(video_frame));
@@ -129,14 +149,10 @@ scoped_refptr<VideoFrame> GenericDmaBufVideoFrameMapper::Map(
const auto& planes = layout.planes();
const size_t num_of_planes = layout.num_planes();
uint8_t* plane_addrs[VideoFrame::kMaxPlanes] = {};
- if (dmabuf_fds.size() == 1) {
- for (size_t i = 0; i < num_of_planes; i++) {
- plane_addrs[i] = buffer_addrs[0] + planes[i].offset;
- }
- } else {
- for (size_t i = 0; i < num_of_planes; i++) {
- plane_addrs[i] = buffer_addrs[i] + planes[i].offset;
- }
+ for (size_t i = 0; i < num_of_planes; i++) {
+ uint8_t* buffer =
+ i < buffer_addrs.size() ? buffer_addrs[i] : buffer_addrs.back();
+ plane_addrs[i] = buffer + planes[i].offset;
}
return CreateMappedVideoFrame(std::move(video_frame), plane_addrs, chunks);
}
diff --git a/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.h b/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.h
index dddd56cb3ad..89d19411a43 100644
--- a/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.h
+++ b/chromium/media/gpu/linux/generic_dmabuf_video_frame_mapper.h
@@ -14,12 +14,17 @@ namespace media {
// backed video frames into memory.
class MEDIA_GPU_EXPORT GenericDmaBufVideoFrameMapper : public VideoFrameMapper {
public:
- GenericDmaBufVideoFrameMapper() = default;
- ~GenericDmaBufVideoFrameMapper() override = default;
+ static std::unique_ptr<GenericDmaBufVideoFrameMapper> Create(
+ VideoPixelFormat format);
+ ~GenericDmaBufVideoFrameMapper() override = default;
// VideoFrameMapper implementation.
scoped_refptr<VideoFrame> Map(
scoped_refptr<const VideoFrame> video_frame) const override;
+
+ private:
+ explicit GenericDmaBufVideoFrameMapper(VideoPixelFormat format);
+
DISALLOW_COPY_AND_ASSIGN(GenericDmaBufVideoFrameMapper);
};
diff --git a/chromium/media/gpu/linux/mailbox_video_frame_converter.cc b/chromium/media/gpu/linux/mailbox_video_frame_converter.cc
new file mode 100644
index 00000000000..6b85bafc30b
--- /dev/null
+++ b/chromium/media/gpu/linux/mailbox_video_frame_converter.cc
@@ -0,0 +1,260 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/linux/mailbox_video_frame_converter.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
+#include "base/optional.h"
+#include "base/task/post_task.h"
+#include "media/gpu/format_utils.h"
+#include "media/gpu/linux/platform_video_frame_utils.h"
+#include "media/gpu/macros.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/native_pixmap.h"
+#include "ui/gl/gl_image_native_pixmap.h"
+#include "ui/gl/scoped_binders.h"
+
+#if defined(USE_OZONE)
+#include "ui/ozone/public/ozone_platform.h"
+#include "ui/ozone/public/surface_factory_ozone.h"
+#endif // defined(USE_OZONE)
+
+namespace media {
+
+namespace {
+
+constexpr GLenum kTextureTarget = GL_TEXTURE_EXTERNAL_OES;
+
+// Destroy the GL texture. This is called when the origin DMA-buf VideoFrame
+// is destroyed.
+void DestroyTexture(scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ scoped_refptr<CommandBufferHelper> command_buffer_helper,
+ GLuint service_id) {
+ DVLOGF(4);
+
+ if (!gpu_task_runner->BelongsToCurrentThread()) {
+ gpu_task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(&DestroyTexture, std::move(gpu_task_runner),
+ std::move(command_buffer_helper), service_id));
+ return;
+ }
+
+ if (!command_buffer_helper->MakeContextCurrent()) {
+ VLOGF(1) << "Failed to make context current";
+ return;
+ }
+ command_buffer_helper->DestroyTexture(service_id);
+}
+
+// ReleaseMailbox callback of the mailbox VideoFrame.
+// Keep the wrapped DMA-buf VideoFrame until WaitForSyncToken() is done.
+void WaitForSyncToken(
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ scoped_refptr<CommandBufferHelper> command_buffer_helper,
+ scoped_refptr<VideoFrame> frame,
+ const gpu::SyncToken& sync_token) {
+ DVLOGF(4);
+
+ gpu_task_runner->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ &CommandBufferHelper::WaitForSyncToken,
+ std::move(command_buffer_helper), sync_token,
+ base::BindOnce(base::DoNothing::Once<scoped_refptr<VideoFrame>>(),
+ std::move(frame))));
+}
+
+} // namespace
+
+MailboxVideoFrameConverter::MailboxVideoFrameConverter(
+ UnwrapFrameCB unwrap_frame_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ GetCommandBufferStubCB get_stub_cb)
+ : unwrap_frame_cb_(std::move(unwrap_frame_cb)),
+ gpu_task_runner_(std::move(gpu_task_runner)),
+ get_stub_cb_(std::move(get_stub_cb)),
+ weak_this_factory_(this) {
+ weak_this_ = weak_this_factory_.GetWeakPtr();
+}
+
+MailboxVideoFrameConverter::~MailboxVideoFrameConverter() {
+ DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
+
+ weak_this_factory_.InvalidateWeakPtrs();
+}
+
+bool MailboxVideoFrameConverter::CreateCommandBufferHelper() {
+ DCHECK(gpu_task_runner_->BelongsToCurrentThread());
+ DCHECK(get_stub_cb_);
+ DVLOGF(4);
+
+ gpu::CommandBufferStub* stub = std::move(get_stub_cb_).Run();
+ if (!stub) {
+ VLOGF(1) << "Failed to obtain command buffer stub";
+ return false;
+ }
+
+ command_buffer_helper_ = CommandBufferHelper::Create(stub);
+ return command_buffer_helper_ != nullptr;
+}
+
+scoped_refptr<VideoFrame> MailboxVideoFrameConverter::ConvertFrame(
+ scoped_refptr<VideoFrame> frame) {
+ DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
+ DVLOGF(4);
+
+ if (!frame) {
+ DVLOGF(1) << "nullptr input.";
+ return nullptr;
+ }
+ if (!frame->HasDmaBufs()) {
+ DVLOGF(1) << "Only converting DMA-buf frames is supported.";
+ return nullptr;
+ }
+
+ VideoFrame* origin_frame = unwrap_frame_cb_.Run(*frame);
+ gpu::Mailbox mailbox;
+ auto it = mailbox_table_.find(origin_frame->unique_id());
+ if (it != mailbox_table_.end())
+ mailbox = it->second;
+
+ if (mailbox.IsZero()) {
+ base::WaitableEvent event;
+ // We wait until GenerateMailbox() finished, so base::Unretained(this) is
+ // safe.
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&MailboxVideoFrameConverter::GenerateMailbox,
+ base::Unretained(this), base::Unretained(origin_frame),
+ base::Unretained(&mailbox), base::Unretained(&event)));
+ event.Wait();
+
+ if (mailbox.IsZero()) {
+ VLOGF(1) << "Failed to create mailbox.";
+ return nullptr;
+ }
+
+ RegisterMailbox(origin_frame, mailbox);
+ }
+
+ gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
+ mailbox_holders[0] =
+ gpu::MailboxHolder(mailbox, gpu::SyncToken(), kTextureTarget);
+ scoped_refptr<VideoFrame> mailbox_frame = VideoFrame::WrapNativeTextures(
+ frame->format(), mailbox_holders,
+ base::BindOnce(&WaitForSyncToken, gpu_task_runner_,
+ command_buffer_helper_, frame),
+ frame->coded_size(), frame->visible_rect(), frame->natural_size(),
+ frame->timestamp());
+ mailbox_frame->metadata()->MergeMetadataFrom(frame->metadata());
+ return mailbox_frame;
+}
+
+void MailboxVideoFrameConverter::GenerateMailbox(VideoFrame* origin_frame,
+ gpu::Mailbox* mailbox,
+ base::WaitableEvent* event) {
+ DCHECK(gpu_task_runner_->BelongsToCurrentThread());
+ DVLOGF(4);
+
+ // Signal the event when leaving the method.
+ base::ScopedClosureRunner signal_event(
+ base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(event)));
+
+ // CreateCommandBufferHelper() should be called on |gpu_task_runner_| so we
+ // call it here lazily instead of at constructor.
+ if (!command_buffer_helper_ && !CreateCommandBufferHelper()) {
+ VLOGF(1) << "Failed to create command buffer helper.";
+ return;
+ }
+
+ // Get NativePixmap.
+ scoped_refptr<gfx::NativePixmap> pixmap;
+ gfx::BufferFormat buffer_format =
+ VideoPixelFormatToGfxBufferFormat(origin_frame->format());
+#if defined(USE_OZONE)
+ gfx::GpuMemoryBufferHandle handle = CreateGpuMemoryBufferHandle(origin_frame);
+ pixmap = ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreateNativePixmapFromHandle(
+ gfx::kNullAcceleratedWidget, origin_frame->coded_size(),
+ buffer_format, std::move(handle.native_pixmap_handle));
+#endif // defined(USE_OZONE)
+ if (!pixmap) {
+ VLOGF(1) << "Cannot create NativePixmap.";
+ return;
+ }
+
+ // Create GLImage.
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(
+ origin_frame->coded_size(), buffer_format);
+ if (!image->Initialize(std::move(pixmap))) {
+ VLOGF(1) << "Failed to initialize GLImage.";
+ return;
+ }
+
+ // Create texture and bind image to texture.
+ if (!command_buffer_helper_->MakeContextCurrent()) {
+ VLOGF(1) << "Failed to make context current.";
+ return;
+ }
+ GLuint service_id = command_buffer_helper_->CreateTexture(
+ kTextureTarget, GL_RGBA, origin_frame->coded_size().width(),
+ origin_frame->coded_size().height(), GL_RGBA, GL_UNSIGNED_BYTE);
+ DCHECK(service_id);
+ gl::ScopedTextureBinder bind_restore(kTextureTarget, service_id);
+ bool ret = image->BindTexImage(kTextureTarget);
+ DCHECK(ret);
+ command_buffer_helper_->BindImage(service_id, image.get(), true);
+ command_buffer_helper_->SetCleared(service_id);
+ *mailbox = command_buffer_helper_->CreateMailbox(service_id);
+
+ // Destroy the texture after the DMA-buf VideoFrame is destructed.
+ origin_frame->AddDestructionObserver(base::BindOnce(
+ &DestroyTexture, gpu_task_runner_, command_buffer_helper_, service_id));
+ return;
+}
+
+void MailboxVideoFrameConverter::RegisterMailbox(VideoFrame* origin_frame,
+ const gpu::Mailbox& mailbox) {
+ DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
+ DCHECK(!mailbox.IsZero());
+ DVLOGF(4);
+
+ auto ret =
+ mailbox_table_.insert(std::make_pair(origin_frame->unique_id(), mailbox));
+ DCHECK(ret.second);
+ origin_frame->AddDestructionObserver(base::BindOnce(
+ &MailboxVideoFrameConverter::UnregisterMailboxThunk, parent_task_runner_,
+ weak_this_, origin_frame->unique_id()));
+}
+
+// static
+void MailboxVideoFrameConverter::UnregisterMailboxThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<MailboxVideoFrameConverter>> converter,
+ int origin_frame_id) {
+ DCHECK(converter);
+ DVLOGF(4);
+
+ // MailboxVideoFrameConverter might have already been destroyed when this
+ // method is called. In this case, the WeakPtr will have been invalidated at
+ // |parent_task_runner_|, and UnregisterMailbox() will not get executed.
+ task_runner->PostTask(
+ FROM_HERE, base::BindOnce(&MailboxVideoFrameConverter::UnregisterMailbox,
+ *converter, origin_frame_id));
+}
+
+void MailboxVideoFrameConverter::UnregisterMailbox(int origin_frame_id) {
+ DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
+ DVLOGF(4);
+
+ auto it = mailbox_table_.find(origin_frame_id);
+ DCHECK(it != mailbox_table_.end());
+ mailbox_table_.erase(it);
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/linux/mailbox_video_frame_converter.h b/chromium/media/gpu/linux/mailbox_video_frame_converter.h
new file mode 100644
index 00000000000..67a5d235e22
--- /dev/null
+++ b/chromium/media/gpu/linux/mailbox_video_frame_converter.h
@@ -0,0 +1,112 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_LINUX_MAILBOX_VIDEO_FRAME_CONVERTER_H_
+#define MEDIA_GPU_LINUX_MAILBOX_VIDEO_FRAME_CONVERTER_H_
+
+#include <map>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "media/base/video_decoder.h"
+#include "media/base/video_frame.h"
+#include "media/gpu/command_buffer_helper.h"
+#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/video_frame_converter.h"
+
+namespace media {
+
+// The linux VideoDecoder implementations request DMA-buf VideoFrame from the
+// DmabufVideoFramePool, and store the decoded data into DMA-buf. However the
+// client of the VideoDecoder may only accept mailbox VideoFrame.
+// This class is used for converting DMA-buf VideoFrame to mailbox VideoFrame.
+// After conversion, the mailbox VideoFrame will retain a reference of the
+// VideoFrame passed to ConvertFrame().
+class MEDIA_GPU_EXPORT MailboxVideoFrameConverter : public VideoFrameConverter {
+ public:
+ using UnwrapFrameCB =
+ base::RepeatingCallback<VideoFrame*(const VideoFrame& wrapped_frame)>;
+ using GetCommandBufferStubCB = base::OnceCallback<gpu::CommandBufferStub*()>;
+
+ // In order to recycle VideoFrame, the DmabufVideoFramePool implementation may
+ // wrap the frame. We want to create texture only once for the same buffer, so
+ // we need to get the original frame at ConvertFrame(). |unwrap_frame_cb| is
+ // the callback used to get the original frame.
+ // |get_stub_cb| is the callback used to get the CommandBufferStub, which is
+ // used to create CommandBufferHelper.
+ MailboxVideoFrameConverter(
+ UnwrapFrameCB unwrap_frame_cb,
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
+ GetCommandBufferStubCB get_stub_cb);
+ ~MailboxVideoFrameConverter() override;
+
+ // Convert DMA-buf VideoFrame to mailbox VideoFrame.
+ // For each frame, we bind DMA-buf to GL texture and create mailbox at GPU
+ // thread, and block working thread waiting for the result.
+ // The mailbox of each frame will be stored at |mailbox_table_|. When
+ // converting a frame second time, we just lookup the table instead of
+ // creating texture and mailbox at GPU thread.
+ scoped_refptr<VideoFrame> ConvertFrame(
+ scoped_refptr<VideoFrame> frame) override;
+
+ private:
+ bool CreateCommandBufferHelper();
+
+ // Generate mailbox for the DMA-buf VideoFrame. This method runs on the GPU
+ // thread.
+ // |origin_frame| is unwrapped from |frame| passed from ConvertFrame().
+ void GenerateMailbox(VideoFrame* origin_frame,
+ gpu::Mailbox* mailbox,
+ base::WaitableEvent* event);
+
+ // Register the mapping between DMA-buf VideoFrame and the mailbox.
+ void RegisterMailbox(VideoFrame* origin_frame, const gpu::Mailbox& mailbox);
+
+ // Thunk for calling UnregisterMailbox() on |task_runner|.
+ // Because this thunk may be called in any thread, We cannot dereference
+ // WeakPtr. Therefore we wrap the WeakPtr by base::Optional to avoid task
+ // runner defererence the WeakPtr.
+ static void UnregisterMailboxThunk(
+ scoped_refptr<base::SequencedTaskRunner> task_runner,
+ base::Optional<base::WeakPtr<MailboxVideoFrameConverter>> converter,
+ int origin_frame_id);
+ // Remove the mapping between DMA-buf VideoFrame and the mailbox.
+ void UnregisterMailbox(int origin_frame_id);
+
+ // Destruction callback of converted frame. |frame| is the frame passed from
+ // ConvertFrame().
+ void OnMailboxHoldersReleased(scoped_refptr<VideoFrame> frame,
+ const gpu::SyncToken& sync_token);
+
+ // In DmabufVideoFramePool, we recycle the unused frames. To do that, each
+ // time a frame is requested from the pool it is wrapped inside another frame.
+ // A destruction callback is then added to this wrapped frame to automatically
+ // return it to the pool upon destruction. Unfortunately this means that a new
+ // frame is returned each time, and we need a way to uniquely identify the
+ // underlying frame to avoid converting the same frame multiple times.
+ // |unwrap_frame_cb_| is used to get the origin frame.
+ UnwrapFrameCB unwrap_frame_cb_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
+ GetCommandBufferStubCB get_stub_cb_;
+ // The interface to communicate with command buffer. We use this to create and
+ // destroy texture, wait for SyncToken, and generate mailbox.
+ scoped_refptr<CommandBufferHelper> command_buffer_helper_;
+
+ // Mapping from the unique_id of origin frame to its corresponding mailbox.
+ std::map<int, gpu::Mailbox> mailbox_table_;
+
+ // The weak pointer of this, bound to |parent_task_runner_|.
+ // Used at the VideoFrame destruction callback.
+ base::WeakPtr<MailboxVideoFrameConverter> weak_this_;
+ base::WeakPtrFactory<MailboxVideoFrameConverter> weak_this_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(MailboxVideoFrameConverter);
+};
+
+} // namespace media
+#endif // MEDIA_GPU_LINUX_MAILBOX_VIDEO_FRAME_CONVERTER_H_
diff --git a/chromium/media/gpu/linux/platform_video_frame_utils.cc b/chromium/media/gpu/linux/platform_video_frame_utils.cc
index 72cf6f1956a..d71bb94b7ce 100644
--- a/chromium/media/gpu/linux/platform_video_frame_utils.cc
+++ b/chromium/media/gpu/linux/platform_video_frame_utils.cc
@@ -37,8 +37,11 @@ scoped_refptr<VideoFrame> CreateVideoFrameOzone(VideoPixelFormat pixel_format,
gfx::BufferFormat buffer_format =
VideoPixelFormatToGfxBufferFormat(pixel_format);
- auto pixmap = factory->CreateNativePixmap(
- gfx::kNullAcceleratedWidget, coded_size, buffer_format, buffer_usage);
+ auto pixmap =
+ factory->CreateNativePixmap(gfx::kNullAcceleratedWidget, VK_NULL_HANDLE,
+ coded_size, buffer_format, buffer_usage);
+ if (!pixmap)
+ return nullptr;
const size_t num_planes = VideoFrame::NumPlanes(pixel_format);
std::vector<VideoFrameLayout::Plane> planes(num_planes);
@@ -46,14 +49,15 @@ scoped_refptr<VideoFrame> CreateVideoFrameOzone(VideoPixelFormat pixel_format,
for (size_t i = 0; i < num_planes; ++i) {
planes[i].stride = pixmap->GetDmaBufPitch(i);
planes[i].offset = pixmap->GetDmaBufOffset(i);
- planes[i].modifier = pixmap->GetDmaBufModifier(i);
buffer_sizes[i] = planes[i].offset +
planes[i].stride * VideoFrame::Rows(i, pixel_format,
coded_size.height());
}
-
auto layout = VideoFrameLayout::CreateWithPlanes(
- pixel_format, coded_size, std::move(planes), std::move(buffer_sizes));
+ pixel_format, coded_size, std::move(planes), std::move(buffer_sizes),
+ VideoFrameLayout::kBufferAddressAlignment,
+ pixmap->GetBufferFormatModifier());
+
if (!layout)
return nullptr;
@@ -111,16 +115,14 @@ gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandle(
const size_t num_planes = VideoFrame::NumPlanes(video_frame->format());
const size_t num_buffers = video_frame->layout().buffer_sizes().size();
DCHECK_EQ(video_frame->layout().planes().size(), num_planes);
-
- // TODO(crbug.com/946880): Handles case that num_planes mismatches num_buffers
+ handle.native_pixmap_handle.modifier = video_frame->layout().modifier();
for (size_t i = 0; i < num_planes; ++i) {
const auto& plane = video_frame->layout().planes()[i];
size_t buffer_size = 0;
if (i < num_buffers)
buffer_size = video_frame->layout().buffer_sizes()[i];
handle.native_pixmap_handle.planes.emplace_back(
- plane.stride, plane.offset, buffer_size, std::move(duped_fds[i]),
- plane.modifier);
+ plane.stride, plane.offset, buffer_size, std::move(duped_fds[i]));
}
#else
NOTREACHED();
diff --git a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
index f01a29b09f1..e92a946ce42 100644
--- a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
+++ b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.cc
@@ -1113,7 +1113,7 @@ void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
ProcessWorkQueues();
}
-void VTVideoDecodeAccelerator::Decode(const BitstreamBuffer& bitstream) {
+void VTVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream) {
Decode(bitstream.ToDecoderBuffer(), bitstream.id());
}
diff --git a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
index 5ecad5a8a38..07ae070c023 100644
--- a/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
+++ b/chromium/media/gpu/mac/vt_video_decode_accelerator_mac.h
@@ -45,7 +45,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream) override;
+ void Decode(BitstreamBuffer bitstream) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(
diff --git a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
index 243bf4b1a91..84ba11e6f85 100644
--- a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
+++ b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.cc
@@ -6,6 +6,8 @@
#include <memory>
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/mac/video_frame_mac.h"
@@ -71,11 +73,11 @@ struct VTVideoEncodeAccelerator::EncodeOutput {
struct VTVideoEncodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(int32_t id,
- std::unique_ptr<base::SharedMemory> shm,
+ base::WritableSharedMemoryMapping mapping,
size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ : id(id), mapping(std::move(mapping)), size(size) {}
const int32_t id;
- const std::unique_ptr<base::SharedMemory> shm;
+ const base::WritableSharedMemoryMapping mapping;
const size_t size;
private:
@@ -177,18 +179,19 @@ bool VTVideoEncodeAccelerator::Initialize(const Config& config,
return true;
}
-void VTVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+void VTVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
bool force_keyframe) {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
encoder_thread_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&VTVideoEncodeAccelerator::EncodeTask,
- base::Unretained(this), frame, force_keyframe));
+ FROM_HERE,
+ base::BindOnce(&VTVideoEncodeAccelerator::EncodeTask,
+ base::Unretained(this), std::move(frame), force_keyframe));
}
void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOG(3) << __func__ << ": buffer size=" << buffer.size();
DCHECK(thread_checker_.CalledOnValidThread());
@@ -199,16 +202,16 @@ void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- std::unique_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ auto mapping =
+ base::UnsafeSharedMemoryRegion::Deserialize(buffer.TakeRegion()).Map();
+ if (!mapping.IsValid()) {
DLOG(ERROR) << "Failed mapping shared memory.";
client_->NotifyError(kPlatformFailureError);
return;
}
std::unique_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(mapping), buffer.size()));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
@@ -249,9 +252,8 @@ void VTVideoEncodeAccelerator::Destroy() {
delete this;
}
-void VTVideoEncodeAccelerator::EncodeTask(
- const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) {
+void VTVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK(compression_session_);
DCHECK(frame);
@@ -452,7 +454,7 @@ void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
size_t used_buffer_size = 0;
const bool copy_rv = video_toolbox::CopySampleBufferToAnnexBBuffer(
encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
- static_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
+ static_cast<char*>(buffer_ref->mapping.memory()), &used_buffer_size);
if (!copy_rv) {
DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
used_buffer_size = 0;
diff --git a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.h b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.h
index 255bd5ddf24..5f955e53721 100644
--- a/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.h
+++ b/chromium/media/gpu/mac/vt_video_encode_accelerator_mac.h
@@ -33,9 +33,8 @@ class MEDIA_GPU_EXPORT VTVideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void Destroy() override;
@@ -51,7 +50,7 @@ class MEDIA_GPU_EXPORT VTVideoEncodeAccelerator
struct BitstreamBufferRef;
// Encoding tasks to be run on |encoder_thread_|.
- void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
+ void EncodeTask(scoped_refptr<VideoFrame> frame, bool force_keyframe);
void UseOutputBitstreamBufferTask(
std::unique_ptr<BitstreamBufferRef> buffer_ref);
void RequestEncodingParametersChangeTask(uint32_t bitrate,
diff --git a/chromium/media/gpu/v4l2/BUILD.gn b/chromium/media/gpu/v4l2/BUILD.gn
index 1ba24ce5e5d..906be540c54 100644
--- a/chromium/media/gpu/v4l2/BUILD.gn
+++ b/chromium/media/gpu/v4l2/BUILD.gn
@@ -37,10 +37,6 @@ source_set("v4l2") {
"v4l2_h264_accelerator.h",
"v4l2_image_processor.cc",
"v4l2_image_processor.h",
- "v4l2_jpeg_encode_accelerator.cc",
- "v4l2_jpeg_encode_accelerator.h",
- "v4l2_mjpeg_decode_accelerator.cc",
- "v4l2_mjpeg_decode_accelerator.h",
"v4l2_slice_video_decode_accelerator.cc",
"v4l2_slice_video_decode_accelerator.h",
"v4l2_stateful_workaround.cc",
@@ -85,6 +81,21 @@ source_set("v4l2") {
if (use_v4lplugin) {
deps += [ ":libv4l2_stubs" ]
}
+
+ if (is_chromeos) {
+ sources += [
+ "v4l2_jpeg_encode_accelerator.cc",
+ "v4l2_jpeg_encode_accelerator.h",
+ "v4l2_mjpeg_decode_accelerator.cc",
+ "v4l2_mjpeg_decode_accelerator.h",
+ ]
+
+ deps += [
+ "//components/chromeos_camera:jpeg_encode_accelerator",
+ "//components/chromeos_camera:mjpeg_decode_accelerator",
+ "//media/parsers",
+ ]
+ }
}
source_set("unit_test") {
diff --git a/chromium/media/gpu/v4l2/generic_v4l2_device.cc b/chromium/media/gpu/v4l2/generic_v4l2_device.cc
index d6bb0c60983..530a7f81b9c 100644
--- a/chromium/media/gpu/v4l2/generic_v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/generic_v4l2_device.cc
@@ -200,7 +200,7 @@ std::vector<base::ScopedFD> GenericV4L2Device::GetDmabufsForV4L2Buffer(
bool GenericV4L2Device::CanCreateEGLImageFrom(uint32_t v4l2_pixfmt) {
static uint32_t kEGLImageDrmFmtsSupported[] = {
DRM_FORMAT_ARGB8888,
-#if defined(ARCH_CPU_ARMEL)
+#if defined(ARCH_CPU_ARM_FAMILY)
DRM_FORMAT_NV12,
DRM_FORMAT_YVU420,
#endif
diff --git a/chromium/media/gpu/v4l2/v4l2_device.cc b/chromium/media/gpu/v4l2/v4l2_device.cc
index 65b3ae4b35e..1945fb1b11c 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device.cc
@@ -45,7 +45,7 @@ class V4L2Buffer {
void* GetPlaneMapping(const size_t plane);
size_t GetMemoryUsage() const;
const struct v4l2_buffer* v4l2_buffer() const { return &v4l2_buffer_; }
- const scoped_refptr<VideoFrame>& GetVideoFrame();
+ scoped_refptr<VideoFrame> GetVideoFrame();
private:
V4L2Buffer(scoped_refptr<V4L2Device> device,
@@ -185,7 +185,7 @@ scoped_refptr<VideoFrame> V4L2Buffer::CreateVideoFrame() {
*layout, gfx::Rect(size), size, std::move(dmabuf_fds), base::TimeDelta());
}
-const scoped_refptr<VideoFrame>& V4L2Buffer::GetVideoFrame() {
+scoped_refptr<VideoFrame> V4L2Buffer::GetVideoFrame() {
// We can create the VideoFrame only when using MMAP buffers.
if (v4l2_buffer_.memory != V4L2_MEMORY_MMAP) {
DVLOGF(1) << "Cannot create video frame from non-MMAP buffer";
@@ -262,7 +262,7 @@ class V4L2BufferRefBase {
bool QueueBuffer();
void* GetPlaneMapping(const size_t plane);
- const scoped_refptr<VideoFrame>& GetVideoFrame();
+ scoped_refptr<VideoFrame> GetVideoFrame();
// Data from the buffer, that users can query and/or write.
struct v4l2_buffer v4l2_buffer_;
@@ -330,7 +330,7 @@ void* V4L2BufferRefBase::GetPlaneMapping(const size_t plane) {
return queue_->buffers_[BufferId()]->GetPlaneMapping(plane);
}
-const scoped_refptr<VideoFrame>& V4L2BufferRefBase::GetVideoFrame() {
+scoped_refptr<VideoFrame> V4L2BufferRefBase::GetVideoFrame() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Used so we can return a const scoped_refptr& in all cases.
@@ -383,7 +383,7 @@ V4L2WritableBufferRef& V4L2WritableBufferRef::operator=(
return *this;
}
-const scoped_refptr<VideoFrame>& V4L2WritableBufferRef::GetVideoFrame() {
+scoped_refptr<VideoFrame> V4L2WritableBufferRef::GetVideoFrame() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return buffer_data_->GetVideoFrame();
@@ -564,7 +564,7 @@ V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer* v4l2_buffer,
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
-const scoped_refptr<VideoFrame>& V4L2ReadableBuffer::GetVideoFrame() {
+scoped_refptr<VideoFrame> V4L2ReadableBuffer::GetVideoFrame() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return buffer_data_->GetVideoFrame();
diff --git a/chromium/media/gpu/v4l2/v4l2_device.h b/chromium/media/gpu/v4l2/v4l2_device.h
index 2e9fd584ed5..bc209f2355d 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.h
+++ b/chromium/media/gpu/v4l2/v4l2_device.h
@@ -112,7 +112,7 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// V4L2ReadableBufferRef if both references point to the same V4L2 buffer.
// Note: at the moment, this method is valid for MMAP buffers only. It will
// return nullptr for any other buffer type.
- const scoped_refptr<VideoFrame>& GetVideoFrame() WARN_UNUSED_RESULT;
+ scoped_refptr<VideoFrame> GetVideoFrame() WARN_UNUSED_RESULT;
// Return the V4L2 buffer ID of the underlying buffer.
// TODO(acourbot) This is used for legacy clients but should be ultimately
@@ -171,7 +171,7 @@ class MEDIA_GPU_EXPORT V4L2ReadableBuffer
// V4L2ReadableBufferRef if both references point to the same V4L2 buffer.
// Note: at the moment, this method is valid for MMAP buffers only. It will
// return nullptr for any other buffer type.
- const scoped_refptr<VideoFrame>& GetVideoFrame() WARN_UNUSED_RESULT;
+ scoped_refptr<VideoFrame> GetVideoFrame() WARN_UNUSED_RESULT;
private:
friend class V4L2BufferRefFactory;
@@ -302,8 +302,6 @@ class MEDIA_GPU_EXPORT V4L2Queue
// Callback to call in this queue's destructor.
base::OnceClosure destroy_cb_;
- base::WeakPtrFactory<V4L2Queue> weak_this_factory_;
-
V4L2Queue(scoped_refptr<V4L2Device> dev,
enum v4l2_buf_type type,
base::OnceClosure destroy_cb);
@@ -312,6 +310,9 @@ class MEDIA_GPU_EXPORT V4L2Queue
friend class base::RefCountedThreadSafe<V4L2Queue>;
SEQUENCE_CHECKER(sequence_checker_);
+
+ base::WeakPtrFactory<V4L2Queue> weak_this_factory_;
+
DISALLOW_COPY_AND_ASSIGN(V4L2Queue);
};
diff --git a/chromium/media/gpu/v4l2/v4l2_device_unittest.cc b/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
index df0f2455283..f6c34125751 100644
--- a/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
@@ -78,20 +78,20 @@ TEST(V4L2DeviceTest, V4L2FormatToVideoFrameLayoutNV12) {
ASSERT_TRUE(layout.has_value());
EXPECT_EQ(PIXEL_FORMAT_NV12, layout->format());
EXPECT_EQ(gfx::Size(300, 180), layout->coded_size());
- constexpr uint64_t kNoModifier = gfx::NativePixmapPlane::kNoModifier;
std::vector<VideoFrameLayout::Plane> expected_planes(
- {{320, 0u, kNoModifier}, {320, 57600u, kNoModifier}});
+ {{320, 0u}, {320, 57600u}});
EXPECT_EQ(expected_planes, layout->planes());
EXPECT_EQ(std::vector<size_t>({86400u}), layout->buffer_sizes());
EXPECT_EQ(86400u, layout->GetTotalBufferSize());
std::ostringstream ostream;
ostream << *layout;
- const std::string kNoModifierStr = std::to_string(kNoModifier);
+ const std::string kNoModifierStr =
+ std::to_string(gfx::NativePixmapHandle::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 300x180, "
- "planes (stride, offset, modifier): [(320, 0, " +
- kNoModifierStr + "), (320, 57600, " + kNoModifierStr +
- ")], buffer_sizes: [86400])");
+ "planes (stride, offset): [(320, 0), (320, 57600)], buffer_sizes: "
+ "[86400], modifier: " +
+ kNoModifierStr + ")");
}
// Test V4L2FormatToVideoFrameLayout with YUV420 pixelformat, which has one
@@ -103,23 +103,20 @@ TEST(V4L2DeviceTest, V4L2FormatToVideoFrameLayoutYUV420) {
ASSERT_TRUE(layout.has_value());
EXPECT_EQ(PIXEL_FORMAT_I420, layout->format());
EXPECT_EQ(gfx::Size(300, 180), layout->coded_size());
- constexpr uint64_t kNoModifier = gfx::NativePixmapPlane::kNoModifier;
std::vector<VideoFrameLayout::Plane> expected_planes(
- {{320, 0u, kNoModifier},
- {160, 57600u, kNoModifier},
- {160, 72000u, kNoModifier}});
+ {{320, 0u}, {160, 57600u}, {160, 72000u}});
EXPECT_EQ(expected_planes, layout->planes());
EXPECT_EQ(std::vector<size_t>({86400u}), layout->buffer_sizes());
EXPECT_EQ(86400u, layout->GetTotalBufferSize());
std::ostringstream ostream;
ostream << *layout;
- const std::string kNoModifierStr = std::to_string(kNoModifier);
+ const std::string kNoModifierStr =
+ std::to_string(gfx::NativePixmapHandle::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_I420, coded_size: 300x180, "
- "planes (stride, offset, modifier): [(320, 0, " +
- kNoModifierStr + "), (160, 57600, " + kNoModifierStr +
- "), (160, 72000, " + kNoModifierStr +
- ")], buffer_sizes: [86400])");
+ "planes (stride, offset): [(320, 0), (160, 57600), (160, 72000)], "
+ "buffer_sizes: [86400], modifier: " +
+ kNoModifierStr + ")");
}
// Test V4L2FormatToVideoFrameLayout with single planar v4l2_format.
diff --git a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc
index 5fab11f50ea..f648e539525 100644
--- a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc
@@ -76,7 +76,7 @@ void V4L2H264Accelerator::H264DPBToV4L2DPB(
int index = VIDEO_MAX_FRAME;
if (!pic->nonexisting) {
scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
+ H264PictureToV4L2DecodeSurface(pic.get());
index = dec_surface->GetReferenceID();
ref_surfaces->push_back(dec_surface);
}
@@ -99,7 +99,7 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitFrameMetadata(
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<H264Picture> pic) {
struct v4l2_ext_control ctrl;
std::vector<struct v4l2_ext_control> ctrls;
@@ -251,7 +251,7 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitFrameMetadata(
ctrls.push_back(ctrl);
scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
+ H264PictureToV4L2DecodeSurface(pic.get());
struct v4l2_ext_controls ext_ctrls;
memset(&ext_ctrls, 0, sizeof(ext_ctrls));
@@ -282,7 +282,7 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitSlice(
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) {
@@ -384,7 +384,7 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitSlice(
v4l2_slice_param.ref_pic_list1);
scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
+ H264PictureToV4L2DecodeSurface(pic.get());
v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc;
@@ -402,9 +402,9 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitSlice(
}
H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitDecode(
- const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<H264Picture> pic) {
scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
+ H264PictureToV4L2DecodeSurface(pic.get());
v4l2_decode_param_.num_slices = num_slices_;
v4l2_decode_param_.idr_pic_flag = pic->idr;
@@ -443,9 +443,9 @@ H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitDecode(
return Status::kOk;
}
-bool V4L2H264Accelerator::OutputPicture(const scoped_refptr<H264Picture>& pic) {
+bool V4L2H264Accelerator::OutputPicture(scoped_refptr<H264Picture> pic) {
// TODO(crbug.com/647725): Insert correct color space.
- surface_handler_->SurfaceReady(H264PictureToV4L2DecodeSurface(pic),
+ surface_handler_->SurfaceReady(H264PictureToV4L2DecodeSurface(pic.get()),
pic->bitstream_id(), pic->visible_rect(),
VideoColorSpace());
return true;
@@ -458,8 +458,7 @@ void V4L2H264Accelerator::Reset() {
}
scoped_refptr<V4L2DecodeSurface>
-V4L2H264Accelerator::H264PictureToV4L2DecodeSurface(
- const scoped_refptr<H264Picture>& pic) {
+V4L2H264Accelerator::H264PictureToV4L2DecodeSurface(H264Picture* pic) {
V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture();
CHECK(v4l2_pic);
return v4l2_pic->dec_surface();
diff --git a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h
index 2a6c179c535..f15e00ac3b3 100644
--- a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h
@@ -36,17 +36,17 @@ class V4L2H264Accelerator : public H264Decoder::H264Accelerator {
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) override;
+ scoped_refptr<H264Picture> pic) override;
Status SubmitSlice(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) override;
- Status SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
- bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
+ Status SubmitDecode(scoped_refptr<H264Picture> pic) override;
+ bool OutputPicture(scoped_refptr<H264Picture> pic) override;
void Reset() override;
private:
@@ -61,7 +61,7 @@ class V4L2H264Accelerator : public H264Decoder::H264Accelerator {
const H264DPB& dpb,
std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces);
scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface(
- const scoped_refptr<H264Picture>& pic);
+ H264Picture* pic);
size_t num_slices_;
V4L2DecodeSurfaceHandler* const surface_handler_;
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor.cc b/chromium/media/gpu/v4l2/v4l2_image_processor.cc
index f8bd3bf5e53..f56b3e3ffe4 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor.cc
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor.cc
@@ -732,13 +732,15 @@ void V4L2ImageProcessor::Dequeue() {
case V4L2_MEMORY_MMAP:
// Wrap the V4L2 VideoFrame into another one with a destruction observer
// so we can reuse the MMAP buffer once the client is done with it.
- output_frame = buffer->GetVideoFrame();
- output_frame = VideoFrame::WrapVideoFrame(
- output_frame, output_frame->format(), output_frame->visible_rect(),
- output_frame->natural_size());
- output_frame->AddDestructionObserver(BindToCurrentLoop(
- base::BindOnce(&V4L2ImageProcessor::V4L2VFDestructionObserver,
- weak_this_factory_.GetWeakPtr(), buffer)));
+ {
+ const auto& orig_frame = buffer->GetVideoFrame();
+ output_frame = VideoFrame::WrapVideoFrame(
+ *orig_frame, orig_frame->format(), orig_frame->visible_rect(),
+ orig_frame->natural_size());
+ output_frame->AddDestructionObserver(BindToCurrentLoop(
+ base::BindOnce(&V4L2ImageProcessor::V4L2VFDestructionObserver,
+ weak_this_factory_.GetWeakPtr(), buffer)));
+ }
break;
case V4L2_MEMORY_DMABUF:
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
index 74a2c58bbe4..25de2a4d490 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.cc
@@ -62,16 +62,16 @@ V4L2JpegEncodeAccelerator::JpegBufferRecord::~JpegBufferRecord() {}
V4L2JpegEncodeAccelerator::JobRecord::JobRecord(
scoped_refptr<VideoFrame> input_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer)
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer)
: input_frame_(input_frame),
quality(quality),
buffer_id_(output_buffer.id()),
- output_shm(output_buffer.handle(), output_buffer.size(), false),
+ output_shm(output_buffer.TakeRegion(), output_buffer.size(), false),
output_offset(output_buffer.offset()),
exif_shm(nullptr) {
if (exif_buffer) {
- exif_shm.reset(new UnalignedSharedMemory(exif_buffer->handle(),
+ exif_shm.reset(new UnalignedSharedMemory(exif_buffer->TakeRegion(),
exif_buffer->size(), true));
exif_offset = exif_buffer->offset();
}
@@ -965,8 +965,9 @@ void V4L2JpegEncodeAccelerator::NotifyError(int32_t buffer_id, Status status) {
client_->NotifyError(buffer_id, status);
}
-JpegEncodeAccelerator::Status V4L2JpegEncodeAccelerator::Initialize(
- Client* client) {
+chromeos_camera::JpegEncodeAccelerator::Status
+V4L2JpegEncodeAccelerator::Initialize(
+ chromeos_camera::JpegEncodeAccelerator::Client* client) {
DCHECK(child_task_runner_->BelongsToCurrentThread());
std::unique_ptr<EncodedInstance> encoded_device(new EncodedInstance(this));
@@ -998,8 +999,8 @@ size_t V4L2JpegEncodeAccelerator::GetMaxCodedBufferSize(
void V4L2JpegEncodeAccelerator::Encode(
scoped_refptr<media::VideoFrame> video_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer) {
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "buffer_id=" << output_buffer.id()
@@ -1025,8 +1026,8 @@ void V4L2JpegEncodeAccelerator::Encode(
}
}
- std::unique_ptr<JobRecord> job_record(
- new JobRecord(video_frame, quality, exif_buffer, output_buffer));
+ std::unique_ptr<JobRecord> job_record(new JobRecord(
+ video_frame, quality, exif_buffer, std::move(output_buffer)));
encoder_task_runner_->PostTask(
FROM_HERE,
@@ -1034,6 +1035,15 @@ void V4L2JpegEncodeAccelerator::Encode(
base::Unretained(this), base::Passed(&job_record)));
}
+void V4L2JpegEncodeAccelerator::EncodeWithDmaBuf(
+ scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int quality,
+ int32_t task_id,
+ BitstreamBuffer* exif_buffer) {
+ NOTIMPLEMENTED();
+}
+
void V4L2JpegEncodeAccelerator::EncodeTask(
std::unique_ptr<JobRecord> job_record) {
DCHECK(encoder_task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
index 48b9d1f434a..9ccc3401e91 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
@@ -17,13 +17,13 @@
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
+#include "components/chromeos_camera/jpeg_encode_accelerator.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame.h"
-#include "media/filters/jpeg_parser.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/v4l2/v4l2_device.h"
-#include "media/video/jpeg_encode_accelerator.h"
+#include "media/parsers/jpeg_parser.h"
namespace {
@@ -46,19 +46,26 @@ static_assert(
namespace media {
class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
- : public JpegEncodeAccelerator {
+ : public chromeos_camera::JpegEncodeAccelerator {
public:
V4L2JpegEncodeAccelerator(
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
~V4L2JpegEncodeAccelerator() override;
// JpegEncodeAccelerator implementation.
- JpegEncodeAccelerator::Status Initialize(Client* client) override;
+ chromeos_camera::JpegEncodeAccelerator::Status Initialize(
+ chromeos_camera::JpegEncodeAccelerator::Client* client) override;
size_t GetMaxCodedBufferSize(const gfx::Size& picture_size) override;
void Encode(scoped_refptr<media::VideoFrame> video_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer) override;
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer) override;
+
+ void EncodeWithDmaBuf(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int quality,
+ int32_t task_id,
+ BitstreamBuffer* exif_buffer) override;
private:
// Record for input buffers.
@@ -92,8 +99,8 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
struct JobRecord {
JobRecord(scoped_refptr<VideoFrame> input_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer);
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer);
~JobRecord();
// Input frame buffer.
@@ -260,7 +267,7 @@ class MEDIA_GPU_EXPORT V4L2JpegEncodeAccelerator
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
// The client of this class.
- Client* client_;
+ chromeos_camera::JpegEncodeAccelerator::Client* client_;
// Thread to communicate with the device.
base::Thread encoder_thread_;
diff --git a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
index 7b4f30fa32e..d1f4194c9ee 100644
--- a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.cc
@@ -16,8 +16,8 @@
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "media/filters/jpeg_parser.h"
#include "media/gpu/macros.h"
+#include "media/parsers/jpeg_parser.h"
#include "third_party/libyuv/include/libyuv.h"
#define IOCTL_OR_ERROR_RETURN_VALUE(type, arg, value, type_name) \
@@ -128,10 +128,12 @@ V4L2MjpegDecodeAccelerator::BufferRecord::BufferRecord() : at_device(false) {
V4L2MjpegDecodeAccelerator::BufferRecord::~BufferRecord() {}
V4L2MjpegDecodeAccelerator::JobRecord::JobRecord(
- const BitstreamBuffer& bitstream_buffer,
+ BitstreamBuffer bitstream_buffer,
scoped_refptr<VideoFrame> video_frame)
: bitstream_buffer_id(bitstream_buffer.id()),
- shm(bitstream_buffer.handle(), bitstream_buffer.size(), true),
+ shm(bitstream_buffer.TakeRegion(),
+ bitstream_buffer.size(),
+ false /* read_only */),
offset(bitstream_buffer.offset()),
out_frame(video_frame) {}
@@ -201,7 +203,8 @@ void V4L2MjpegDecodeAccelerator::PostNotifyError(int32_t bitstream_buffer_id,
weak_ptr_, bitstream_buffer_id, error));
}
-bool V4L2MjpegDecodeAccelerator::Initialize(Client* client) {
+bool V4L2MjpegDecodeAccelerator::Initialize(
+ chromeos_camera::MjpegDecodeAccelerator::Client* client) {
DCHECK(child_task_runner_->BelongsToCurrentThread());
if (!device_->Open(V4L2Device::Type::kJpegDecoder, V4L2_PIX_FMT_JPEG)) {
@@ -247,17 +250,14 @@ bool V4L2MjpegDecodeAccelerator::Initialize(Client* client) {
return true;
}
-void V4L2MjpegDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) {
+void V4L2MjpegDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer,
+ scoped_refptr<VideoFrame> video_frame) {
DVLOGF(4) << "input_id=" << bitstream_buffer.id()
<< ", size=" << bitstream_buffer.size();
DCHECK(io_task_runner_->BelongsToCurrentThread());
if (bitstream_buffer.id() < 0) {
VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
- if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle()))
- base::SharedMemory::CloseHandle(bitstream_buffer.handle());
PostNotifyError(bitstream_buffer.id(), INVALID_ARGUMENT);
return;
}
@@ -268,7 +268,7 @@ void V4L2MjpegDecodeAccelerator::Decode(
}
std::unique_ptr<JobRecord> job_record(
- new JobRecord(bitstream_buffer, video_frame));
+ new JobRecord(std::move(bitstream_buffer), std::move(video_frame)));
decoder_task_runner_->PostTask(
FROM_HERE,
@@ -672,7 +672,7 @@ void V4L2MjpegDecodeAccelerator::EnqueueOutput() {
bool V4L2MjpegDecodeAccelerator::ConvertOutputImage(
const BufferRecord& output_buffer,
- const scoped_refptr<VideoFrame>& dst_frame) {
+ VideoFrame* dst_frame) {
uint8_t* dst_y = dst_frame->data(VideoFrame::kYPlane);
uint8_t* dst_u = dst_frame->data(VideoFrame::kUPlane);
uint8_t* dst_v = dst_frame->data(VideoFrame::kVPlane);
@@ -822,7 +822,7 @@ void V4L2MjpegDecodeAccelerator::Dequeue() {
// Copy the decoded data from output buffer to the buffer provided by the
// client. Do format conversion when output format is not
// V4L2_PIX_FMT_YUV420.
- if (!ConvertOutputImage(output_record, job_record->out_frame)) {
+ if (!ConvertOutputImage(output_record, job_record->out_frame.get())) {
PostNotifyError(job_record->bitstream_buffer_id, PLATFORM_FAILURE);
return;
}
diff --git a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h
index c41dd7e72b7..b0c9442ce36 100644
--- a/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_mjpeg_decode_accelerator.h
@@ -17,17 +17,17 @@
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
+#include "components/chromeos_camera/mjpeg_decode_accelerator.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/v4l2/v4l2_device.h"
-#include "media/video/mjpeg_decode_accelerator.h"
namespace media {
class MEDIA_GPU_EXPORT V4L2MjpegDecodeAccelerator
- : public MjpegDecodeAccelerator {
+ : public chromeos_camera::MjpegDecodeAccelerator {
public:
V4L2MjpegDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
@@ -35,9 +35,10 @@ class MEDIA_GPU_EXPORT V4L2MjpegDecodeAccelerator
~V4L2MjpegDecodeAccelerator() override;
// MjpegDecodeAccelerator implementation.
- bool Initialize(Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) override;
+ bool Initialize(
+ chromeos_camera::MjpegDecodeAccelerator::Client* client) override;
+ void Decode(BitstreamBuffer bitstream_buffer,
+ scoped_refptr<VideoFrame> video_frame) override;
bool IsSupported() override;
private:
@@ -59,7 +60,7 @@ class MEDIA_GPU_EXPORT V4L2MjpegDecodeAccelerator
// the time of submission we may not have one available (and don't need one
// to submit input to the device).
struct JobRecord {
- JobRecord(const BitstreamBuffer& bitstream_buffer,
+ JobRecord(BitstreamBuffer bitstream_buffer,
scoped_refptr<VideoFrame> video_frame);
~JobRecord();
@@ -89,7 +90,7 @@ class MEDIA_GPU_EXPORT V4L2MjpegDecodeAccelerator
// - V4L2_PIX_FMT_YUV_420M
// - V4L2_PIX_FMT_YUV_422M
bool ConvertOutputImage(const BufferRecord& output_buffer,
- const scoped_refptr<VideoFrame>& dst_frame);
+ VideoFrame* dst_frame);
// Return the number of input/output buffers enqueued to the device.
size_t InputBufferQueuedCount();
@@ -146,7 +147,7 @@ class MEDIA_GPU_EXPORT V4L2MjpegDecodeAccelerator
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
// The client of this class.
- Client* client_;
+ chromeos_camera::MjpegDecodeAccelerator::Client* client_;
// The V4L2Device this class is operating upon.
scoped_refptr<V4L2Device> device_;
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index 29e5625887b..8ae6c270e77 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -538,12 +538,12 @@ bool V4L2SliceVideoDecodeAccelerator::CreateOutputBuffers() {
VideoPixelFormat pixel_format =
V4L2Device::V4L2PixFmtToVideoPixelFormat(output_format_fourcc_);
-
child_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(&VideoDecodeAccelerator::Client::ProvidePictureBuffers,
- client_, num_pictures, pixel_format, 1, coded_size_,
- device_->GetTextureTarget()));
+ base::BindOnce(
+ &VideoDecodeAccelerator::Client::ProvidePictureBuffersWithVisibleRect,
+ client_, num_pictures, pixel_format, 1, coded_size_,
+ decoder_->GetVisibleRect(), device_->GetTextureTarget()));
// Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
// or event handling while we are waiting for AssignPictureBuffers(). Not
@@ -1044,8 +1044,7 @@ bool V4L2SliceVideoDecodeAccelerator::StopDevicePoll(bool keep_input_state) {
return true;
}
-void V4L2SliceVideoDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer) {
+void V4L2SliceVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
Decode(bitstream_buffer.ToDecoderBuffer(), bitstream_buffer.id());
}
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
index eb73f699338..905df661b8e 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
@@ -52,7 +52,7 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void Decode(BitstreamBuffer bitstream_buffer) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
diff --git a/chromium/media/gpu/v4l2/v4l2_stateful_workaround.cc b/chromium/media/gpu/v4l2/v4l2_stateful_workaround.cc
index 7f504a41797..8ce8798497a 100644
--- a/chromium/media/gpu/v4l2/v4l2_stateful_workaround.cc
+++ b/chromium/media/gpu/v4l2/v4l2_stateful_workaround.cc
@@ -11,7 +11,7 @@
#include "base/containers/small_map.h"
#include "base/memory/ptr_util.h"
#include "media/gpu/macros.h"
-#include "media/filters/vp8_parser.h"
+#include "media/parsers/vp8_parser.h"
#include "media/video/video_decode_accelerator.h"
namespace media {
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
index 614b4c7a3e2..7bd8ca696ef 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
@@ -339,8 +339,7 @@ bool V4L2VideoDecodeAccelerator::CheckConfig(const Config& config) {
return true;
}
-void V4L2VideoDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer) {
+void V4L2VideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
Decode(bitstream_buffer.ToDecoderBuffer(), bitstream_buffer.id());
}
@@ -2565,9 +2564,10 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
: PIXEL_FORMAT_UNKNOWN;
child_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&Client::ProvidePictureBuffers, client_,
- buffer_count, pixel_format, 1, egl_image_size_,
- device_->GetTextureTarget()));
+ FROM_HERE,
+ base::BindOnce(&Client::ProvidePictureBuffersWithVisibleRect, client_,
+ buffer_count, pixel_format, 1, egl_image_size_,
+ gfx::Rect(visible_size_), device_->GetTextureTarget()));
// Go into kAwaitingPictureBuffers to prevent us from doing any more decoding
// or event handling while we are waiting for AssignPictureBuffers(). Not
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
index ffa9c2a2358..e42749a1fa1 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
@@ -110,7 +110,7 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// VideoDecodeAccelerator implementation.
// Note: Initialize() and Destroy() are synchronous.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void Decode(BitstreamBuffer bitstream_buffer) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index e75c42e4ba4..bd430c0acce 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -152,6 +152,7 @@ V4L2VideoEncodeAccelerator::~V4L2VideoEncodeAccelerator() {
bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
Client* client) {
+ TRACE_EVENT0("media,gpu", "V4L2VEA::Initialize");
VLOGF(2) << ": " << config.AsHumanReadableString();
visible_size_ = config.input_visible_size;
@@ -362,7 +363,7 @@ void V4L2VideoEncodeAccelerator::ImageProcessorError() {
NOTIFY_ERROR(kPlatformFailureError);
}
-void V4L2VideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+void V4L2VideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
bool force_keyframe) {
DVLOGF(4) << "force_keyframe=" << force_keyframe;
DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -382,7 +383,7 @@ void V4L2VideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
ImageProcessor::OutputMode::IMPORT) {
const auto& buf = image_processor_output_buffers_[output_buffer_index];
auto output_frame = VideoFrame::WrapVideoFrame(
- buf, buf->format(), buf->visible_rect(), buf->natural_size());
+ *buf, buf->format(), buf->visible_rect(), buf->natural_size());
// We have to bind |weak_this| for FrameProcessed, because child
// thread is outlive this V4L2VideoEncodeAccelerator.
@@ -404,18 +405,18 @@ void V4L2VideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
}
}
} else {
- image_processor_input_queue_.emplace(frame, force_keyframe);
+ image_processor_input_queue_.emplace(std::move(frame), force_keyframe);
}
} else {
encoder_thread_.task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&V4L2VideoEncodeAccelerator::EncodeTask,
- base::Unretained(this), frame, force_keyframe));
+ FROM_HERE, base::BindOnce(&V4L2VideoEncodeAccelerator::EncodeTask,
+ base::Unretained(this), std::move(frame),
+ force_keyframe));
}
}
void V4L2VideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOGF(4) << "id=" << buffer.id();
DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -424,7 +425,7 @@ void V4L2VideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- auto shm = std::make_unique<UnalignedSharedMemory>(buffer.handle(),
+ auto shm = std::make_unique<UnalignedSharedMemory>(buffer.TakeRegion(),
buffer.size(), false);
if (!shm->MapAt(buffer.offset(), buffer.size())) {
NOTIFY_ERROR(kPlatformFailureError);
@@ -627,9 +628,8 @@ size_t V4L2VideoEncodeAccelerator::CopyIntoOutputBuffer(
return buffer_ref->shm->size() - remaining_dst_size;
}
-void V4L2VideoEncodeAccelerator::EncodeTask(
- const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) {
+void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
DVLOGF(4) << "force_keyframe=" << force_keyframe;
DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK_NE(encoder_state_, kUninitialized);
@@ -639,7 +639,7 @@ void V4L2VideoEncodeAccelerator::EncodeTask(
return;
}
- encoder_input_queue_.emplace(frame, force_keyframe);
+ encoder_input_queue_.emplace(std::move(frame), force_keyframe);
Enqueue();
}
@@ -720,6 +720,7 @@ void V4L2VideoEncodeAccelerator::ServiceDeviceTask() {
void V4L2VideoEncodeAccelerator::Enqueue() {
DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
+ TRACE_EVENT0("media,gpu", "V4L2VEA::Enqueue");
DVLOGF(4) << "free_input_buffers: " << free_input_buffers_.size()
<< "input_queue: " << encoder_input_queue_.size();
@@ -792,6 +793,7 @@ void V4L2VideoEncodeAccelerator::Enqueue() {
void V4L2VideoEncodeAccelerator::Dequeue() {
DVLOGF(4);
DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
+ TRACE_EVENT0("media,gpu", "V4L2VEA::Dequeue");
// Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free
// list.
@@ -893,6 +895,7 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
DVLOGF(4);
DCHECK(!free_input_buffers_.empty());
DCHECK(!encoder_input_queue_.empty());
+ TRACE_EVENT0("media,gpu", "V4L2VEA::EnqueueInputRecord");
// Enqueue an input (VIDEO_OUTPUT) buffer.
InputFrameInfo frame_info = encoder_input_queue_.front();
@@ -989,6 +992,7 @@ bool V4L2VideoEncodeAccelerator::EnqueueOutputRecord() {
DVLOGF(4);
DCHECK(!free_output_buffers_.empty());
DCHECK(!encoder_output_queue_.empty());
+ TRACE_EVENT0("media,gpu", "V4L2VEA::EnqueueOutputRecord");
// Enqueue an output (VIDEO_CAPTURE) buffer.
const int index = free_output_buffers_.back();
@@ -1139,6 +1143,8 @@ void V4L2VideoEncodeAccelerator::RequestEncodingParametersChangeTask(
uint32_t framerate) {
VLOGF(2) << "bitrate=" << bitrate << ", framerate=" << framerate;
DCHECK(encoder_thread_.task_runner()->BelongsToCurrentThread());
+ TRACE_EVENT2("media,gpu", "V4L2VEA::RequestEncodingParametersChangeTask",
+ "bitrate", bitrate, "framerate", framerate);
DCHECK_GT(bitrate, 0u);
DCHECK_GT(framerate, 0u);
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
index aff19bb782a..05a88fabe07 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
@@ -50,9 +50,8 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void Destroy() override;
@@ -127,7 +126,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// Encoding tasks, to be run on encode_thread_.
//
- void EncodeTask(const scoped_refptr<VideoFrame>& frame, bool force_keyframe);
+ void EncodeTask(scoped_refptr<VideoFrame> frame, bool force_keyframe);
// Add a BitstreamBuffer to the queue of buffers ready to be used for encoder
// output.
diff --git a/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc
index 314bc49278e..90a4a38df9e 100644
--- a/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc
@@ -11,12 +11,12 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
-#include "media/filters/vp8_parser.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_decode_surface.h"
#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/vp8_picture.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
namespace {
diff --git a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc
index 32e14f84444..00aa0a7fb2c 100644
--- a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc
@@ -160,8 +160,8 @@ void FillVp9FrameContext(struct v4l2_vp9_entropy_ctx& v4l2_entropy_ctx,
class V4L2VP9Picture : public VP9Picture {
public:
- explicit V4L2VP9Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface)
- : dec_surface_(dec_surface) {}
+ explicit V4L2VP9Picture(scoped_refptr<V4L2DecodeSurface> dec_surface)
+ : dec_surface_(std::move(dec_surface)) {}
V4L2VP9Picture* AsV4L2VP9Picture() override { return this; }
scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
@@ -202,10 +202,10 @@ scoped_refptr<VP9Picture> V4L2VP9Accelerator::CreateVP9Picture() {
if (!dec_surface)
return nullptr;
- return new V4L2VP9Picture(dec_surface);
+ return new V4L2VP9Picture(std::move(dec_surface));
}
-bool V4L2VP9Accelerator::SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+bool V4L2VP9Accelerator::SubmitDecode(scoped_refptr<VP9Picture> pic,
const Vp9SegmentationParams& segm_params,
const Vp9LoopFilterParams& lf_params,
const Vp9ReferenceFrameVector& ref_frames,
@@ -280,7 +280,7 @@ bool V4L2VP9Accelerator::SubmitDecode(const scoped_refptr<VP9Picture>& pic,
auto ref_pic = ref_frames.GetFrame(i);
if (ref_pic) {
scoped_refptr<V4L2DecodeSurface> ref_surface =
- VP9PictureToV4L2DecodeSurface(ref_pic);
+ VP9PictureToV4L2DecodeSurface(ref_pic.get());
v4l2_decode_param.ref_frames[i] = ref_surface->GetReferenceID();
ref_surfaces.push_back(ref_surface);
@@ -304,7 +304,7 @@ bool V4L2VP9Accelerator::SubmitDecode(const scoped_refptr<VP9Picture>& pic,
scoped_refptr<VP9Picture> ref_pic = ref_frames.GetFrame(idx);
if (ref_pic) {
scoped_refptr<V4L2DecodeSurface> ref_surface =
- VP9PictureToV4L2DecodeSurface(ref_pic);
+ VP9PictureToV4L2DecodeSurface(ref_pic.get());
v4l2_ref_frame->buf_index = ref_surface->GetReferenceID();
#define REF_TO_V4L2_REF(a) v4l2_ref_frame->a = ref_pic->frame_hdr->a
REF_TO_V4L2_REF(frame_width);
@@ -344,7 +344,7 @@ bool V4L2VP9Accelerator::SubmitDecode(const scoped_refptr<VP9Picture>& pic,
}
scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP9PictureToV4L2DecodeSurface(pic);
+ VP9PictureToV4L2DecodeSurface(pic.get());
struct v4l2_ext_controls ext_ctrls;
memset(&ext_ctrls, 0, sizeof(ext_ctrls));
@@ -368,15 +368,15 @@ bool V4L2VP9Accelerator::SubmitDecode(const scoped_refptr<VP9Picture>& pic,
return true;
}
-bool V4L2VP9Accelerator::OutputPicture(const scoped_refptr<VP9Picture>& pic) {
+bool V4L2VP9Accelerator::OutputPicture(scoped_refptr<VP9Picture> pic) {
// TODO(crbug.com/647725): Insert correct color space.
- surface_handler_->SurfaceReady(VP9PictureToV4L2DecodeSurface(pic),
+ surface_handler_->SurfaceReady(VP9PictureToV4L2DecodeSurface(pic.get()),
pic->bitstream_id(), pic->visible_rect(),
VideoColorSpace());
return true;
}
-bool V4L2VP9Accelerator::GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+bool V4L2VP9Accelerator::GetFrameContext(scoped_refptr<VP9Picture> pic,
Vp9FrameContext* frame_ctx) {
struct v4l2_ctrl_vp9_entropy v4l2_entropy;
memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
@@ -388,7 +388,7 @@ bool V4L2VP9Accelerator::GetFrameContext(const scoped_refptr<VP9Picture>& pic,
ctrl.p_vp9_entropy = &v4l2_entropy;
scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP9PictureToV4L2DecodeSurface(pic);
+ VP9PictureToV4L2DecodeSurface(pic.get());
struct v4l2_ext_controls ext_ctrls;
memset(&ext_ctrls, 0, sizeof(ext_ctrls));
@@ -409,8 +409,7 @@ bool V4L2VP9Accelerator::IsFrameContextRequired() const {
}
scoped_refptr<V4L2DecodeSurface>
-V4L2VP9Accelerator::VP9PictureToV4L2DecodeSurface(
- const scoped_refptr<VP9Picture>& pic) {
+V4L2VP9Accelerator::VP9PictureToV4L2DecodeSurface(VP9Picture* pic) {
V4L2VP9Picture* v4l2_pic = pic->AsV4L2VP9Picture();
CHECK(v4l2_pic);
return v4l2_pic->dec_surface();
diff --git a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h
index 31f1dc1ed28..a85265cad52 100644
--- a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h
@@ -28,22 +28,22 @@ class V4L2VP9Accelerator : public VP9Decoder::VP9Accelerator {
// VP9Decoder::VP9Accelerator implementation.
scoped_refptr<VP9Picture> CreateVP9Picture() override;
- bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+ bool SubmitDecode(scoped_refptr<VP9Picture> pic,
const Vp9SegmentationParams& segm_params,
const Vp9LoopFilterParams& lf_params,
const Vp9ReferenceFrameVector& reference_frames,
const base::Closure& done_cb) override;
- bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
+ bool OutputPicture(scoped_refptr<VP9Picture> pic) override;
- bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ bool GetFrameContext(scoped_refptr<VP9Picture> pic,
Vp9FrameContext* frame_ctx) override;
bool IsFrameContextRequired() const override;
private:
scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface(
- const scoped_refptr<VP9Picture>& pic);
+ VP9Picture* pic);
bool device_needs_frame_context_;
diff --git a/chromium/media/gpu/vaapi/BUILD.gn b/chromium/media/gpu/vaapi/BUILD.gn
index 36c50c85981..2bac145bbcf 100644
--- a/chromium/media/gpu/vaapi/BUILD.gn
+++ b/chromium/media/gpu/vaapi/BUILD.gn
@@ -41,14 +41,12 @@ source_set("vaapi") {
"vaapi_dmabuf_video_frame_mapper.h",
"vaapi_h264_accelerator.cc",
"vaapi_h264_accelerator.h",
+ "vaapi_jpeg_decode_accelerator_worker.cc",
+ "vaapi_jpeg_decode_accelerator_worker.h",
"vaapi_jpeg_decoder.cc",
"vaapi_jpeg_decoder.h",
- "vaapi_jpeg_encode_accelerator.cc",
- "vaapi_jpeg_encode_accelerator.h",
"vaapi_jpeg_encoder.cc",
"vaapi_jpeg_encoder.h",
- "vaapi_mjpeg_decode_accelerator.cc",
- "vaapi_mjpeg_decode_accelerator.h",
"vaapi_picture.cc",
"vaapi_picture.h",
"vaapi_picture_factory.cc",
@@ -76,14 +74,33 @@ source_set("vaapi") {
deps = [
":libva_stubs",
"//base",
+ "//gpu/ipc/common",
"//gpu/ipc/service",
"//media",
"//media/gpu:common",
"//media/gpu:video_frame_mapper_common",
+ "//media/gpu/linux",
+ "//media/parsers",
+ "//mojo/public/cpp/bindings",
"//third_party/libyuv",
+ "//ui/gfx",
"//ui/gfx/geometry",
]
+ if (is_chromeos) {
+ sources += [
+ "vaapi_jpeg_encode_accelerator.cc",
+ "vaapi_jpeg_encode_accelerator.h",
+ "vaapi_mjpeg_decode_accelerator.cc",
+ "vaapi_mjpeg_decode_accelerator.h",
+ ]
+
+ deps += [
+ "//components/chromeos_camera:jpeg_encode_accelerator",
+ "//components/chromeos_camera:mjpeg_decode_accelerator",
+ ]
+ }
+
if (is_linux) {
configs += [ "//build/config/linux/libva" ]
deps += [ "//media/gpu/linux" ]
@@ -144,9 +161,10 @@ source_set("jpeg_decoder_unit_test") {
]
deps = [
":vaapi",
+ ":vaapi_utils_unittest",
"//base",
- "//base/test:test_support",
"//media:test_support",
+ "//media/parsers",
"//skia",
"//testing/gtest",
"//third_party/libyuv:libyuv",
@@ -154,3 +172,20 @@ source_set("jpeg_decoder_unit_test") {
"//ui/gfx/geometry",
]
}
+
+source_set("vaapi_utils_unittest") {
+ testonly = true
+ sources = [
+ "vaapi_utils_unittest.cc",
+ ]
+ deps = [
+ ":vaapi",
+ "//base",
+ "//base/test:test_support",
+ "//testing/gtest",
+ "//ui/gfx/geometry",
+
+ # Indirect dependency from vaapi_wrapper.h.
+ "//skia",
+ ]
+}
diff --git a/chromium/media/gpu/vaapi/h264_encoder.cc b/chromium/media/gpu/vaapi/h264_encoder.cc
index 68843519782..ddde6ee49a9 100644
--- a/chromium/media/gpu/vaapi/h264_encoder.cc
+++ b/chromium/media/gpu/vaapi/h264_encoder.cc
@@ -254,8 +254,9 @@ void H264Encoder::UpdateSPS() {
// constrained and non-constrained baseline profiles. Since many codecs
// can't do non-constrained, and constrained is usually what we mean (and
// it's a subset of non-constrained), default to it.
- current_sps_.profile_idc = H264SPS::kProfileIDCBaseline;
+ current_sps_.profile_idc = H264SPS::kProfileIDCConstrainedBaseline;
current_sps_.constraint_set0_flag = true;
+ current_sps_.constraint_set1_flag = true;
break;
case H264PROFILE_MAIN:
current_sps_.profile_idc = H264SPS::kProfileIDCMain;
diff --git a/chromium/media/gpu/vaapi/va_surface.cc b/chromium/media/gpu/vaapi/va_surface.cc
index 66006daf492..64d213c050f 100644
--- a/chromium/media/gpu/vaapi/va_surface.cc
+++ b/chromium/media/gpu/vaapi/va_surface.cc
@@ -9,16 +9,16 @@ namespace media {
VASurface::VASurface(VASurfaceID va_surface_id,
const gfx::Size& size,
unsigned int format,
- const ReleaseCB& release_cb)
+ ReleaseCB release_cb)
: va_surface_id_(va_surface_id),
size_(size),
format_(format),
- release_cb_(release_cb) {
+ release_cb_(std::move(release_cb)) {
DCHECK(release_cb_);
}
VASurface::~VASurface() {
- release_cb_.Run(va_surface_id_);
+ std::move(release_cb_).Run(va_surface_id_);
}
} // namespace media
diff --git a/chromium/media/gpu/vaapi/va_surface.h b/chromium/media/gpu/vaapi/va_surface.h
index 79f8cbe020d..a6f4ac5ea2f 100644
--- a/chromium/media/gpu/vaapi/va_surface.h
+++ b/chromium/media/gpu/vaapi/va_surface.h
@@ -85,15 +85,12 @@ namespace media {
//
class VASurface : public base::RefCountedThreadSafe<VASurface> {
public:
- // Provided by user, will be called when all references to the surface
- // are released.
- // TODO(mcasas): make this a OnceCallback, https://crbug.com/822346.
- using ReleaseCB = base::Callback<void(VASurfaceID)>;
+ using ReleaseCB = base::OnceCallback<void(VASurfaceID)>;
VASurface(VASurfaceID va_surface_id,
const gfx::Size& size,
unsigned int format,
- const ReleaseCB& release_cb);
+ ReleaseCB release_cb);
VASurfaceID id() const { return va_surface_id_; }
const gfx::Size& size() const { return size_; }
@@ -106,7 +103,7 @@ class VASurface : public base::RefCountedThreadSafe<VASurface> {
const VASurfaceID va_surface_id_;
const gfx::Size size_;
const unsigned int format_;
- const ReleaseCB release_cb_;
+ ReleaseCB release_cb_;
DISALLOW_COPY_AND_ASSIGN(VASurface);
};
diff --git a/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.cc b/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.cc
index d3007fdaa90..2235b37fa4d 100644
--- a/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.cc
@@ -75,14 +75,25 @@ scoped_refptr<VideoFrame> CreateMappedVideoFrame(
return video_frame;
}
+bool IsFormatSupported(VideoPixelFormat format) {
+ return format == PIXEL_FORMAT_NV12;
+}
+
} // namespace
// static
-std::unique_ptr<VideoFrameMapper> VaapiDmaBufVideoFrameMapper::Create() {
- auto video_frame_mapper = base::WrapUnique(new VaapiDmaBufVideoFrameMapper);
- if (video_frame_mapper->vaapi_wrapper_ == nullptr) {
+std::unique_ptr<VideoFrameMapper> VaapiDmaBufVideoFrameMapper::Create(
+ VideoPixelFormat format) {
+ if (!IsFormatSupported(format)) {
+ VLOGF(1) << " Unsupported format: " << format;
return nullptr;
}
+
+ auto video_frame_mapper =
+ base::WrapUnique(new VaapiDmaBufVideoFrameMapper(format));
+ if (!video_frame_mapper->vaapi_wrapper_)
+ return nullptr;
+
return video_frame_mapper;
}
@@ -90,8 +101,10 @@ std::unique_ptr<VideoFrameMapper> VaapiDmaBufVideoFrameMapper::Create() {
// not required for VaapiWrapper to perform pixel format conversion.
// TODO(crbug.com/898423): Create a VaapiWrapper only for pixel format
// conversion. Either mode or profile isn't required to create the VaapiWrapper.
-VaapiDmaBufVideoFrameMapper::VaapiDmaBufVideoFrameMapper()
- : vaapi_wrapper_(VaapiWrapper::CreateForVideoCodec(VaapiWrapper::kDecode,
+VaapiDmaBufVideoFrameMapper::VaapiDmaBufVideoFrameMapper(
+ VideoPixelFormat format)
+ : VideoFrameMapper(format),
+ vaapi_wrapper_(VaapiWrapper::CreateForVideoCodec(VaapiWrapper::kDecode,
H264PROFILE_MAIN,
base::DoNothing())),
vaapi_picture_factory_(new VaapiPictureFactory()) {}
@@ -105,8 +118,8 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map(
if (!video_frame->HasDmaBufs()) {
return nullptr;
}
- if (video_frame->format() != PIXEL_FORMAT_NV12) {
- NOTIMPLEMENTED() << " Unsupported PixelFormat: " << video_frame->format();
+ if (video_frame->format() != format_) {
+ VLOGF(1) << "Unexpected format: " << video_frame->format();
return nullptr;
}
diff --git a/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.h b/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.h
index db3aff962e2..0f1efc554d3 100644
--- a/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_dmabuf_video_frame_mapper.h
@@ -21,16 +21,16 @@ class VaapiWrapper;
// VideoFrame and use the VaapiWrapper to access the memory there.
class MEDIA_GPU_EXPORT VaapiDmaBufVideoFrameMapper : public VideoFrameMapper {
public:
- ~VaapiDmaBufVideoFrameMapper() override;
+ static std::unique_ptr<VideoFrameMapper> Create(VideoPixelFormat format);
- static std::unique_ptr<VideoFrameMapper> Create();
+ ~VaapiDmaBufVideoFrameMapper() override;
// VideoFrameMapper override.
scoped_refptr<VideoFrame> Map(
scoped_refptr<const VideoFrame> video_frame) const override;
private:
- VaapiDmaBufVideoFrameMapper();
+ explicit VaapiDmaBufVideoFrameMapper(VideoPixelFormat format);
// Vaapi components for mapping.
const scoped_refptr<VaapiWrapper> vaapi_wrapper_;
diff --git a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
index 2775fdde266..bab74119aeb 100644
--- a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
@@ -72,7 +72,7 @@ Status VaapiH264Accelerator::SubmitFrameMetadata(
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<H264Picture> pic) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
VAPictureParameterBufferH264 pic_param;
memset(&pic_param, 0, sizeof(pic_param));
@@ -131,7 +131,7 @@ Status VaapiH264Accelerator::SubmitFrameMetadata(
pic_param.frame_num = pic->frame_num;
InitVAPicture(&pic_param.CurrPic);
- FillVAPicture(&pic_param.CurrPic, pic);
+ FillVAPicture(&pic_param.CurrPic, std::move(pic));
// Init reference pictures' array.
for (int i = 0; i < 16; ++i)
@@ -185,7 +185,7 @@ Status VaapiH264Accelerator::SubmitSlice(
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) {
@@ -286,8 +286,7 @@ Status VaapiH264Accelerator::SubmitSlice(
: Status::kFail;
}
-Status VaapiH264Accelerator::SubmitDecode(
- const scoped_refptr<H264Picture>& pic) {
+Status VaapiH264Accelerator::SubmitDecode(scoped_refptr<H264Picture> pic) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
const bool success = vaapi_wrapper_->ExecuteAndDestroyPendingBuffers(
@@ -295,8 +294,7 @@ Status VaapiH264Accelerator::SubmitDecode(
return success ? Status::kOk : Status::kFail;
}
-bool VaapiH264Accelerator::OutputPicture(
- const scoped_refptr<H264Picture>& pic) {
+bool VaapiH264Accelerator::OutputPicture(scoped_refptr<H264Picture> pic) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
const VaapiH264Picture* vaapi_pic = pic->AsVaapiH264Picture();
diff --git a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.h b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.h
index 0182cbe8ed3..f817558e549 100644
--- a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.h
@@ -33,17 +33,17 @@ class VaapiH264Accelerator : public H264Decoder::H264Accelerator {
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) override;
+ scoped_refptr<H264Picture> pic) override;
Status SubmitSlice(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) override;
- Status SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
- bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
+ Status SubmitDecode(scoped_refptr<H264Picture> pic) override;
+ bool OutputPicture(scoped_refptr<H264Picture> pic) override;
void Reset() override;
private:
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.cc
new file mode 100644
index 00000000000..df73e051fa8
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.cc
@@ -0,0 +1,114 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.h"
+
+#include <utility>
+
+#include <va/va.h>
+
+#include "base/bind.h"
+#include "base/containers/span.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/sequenced_task_runner.h"
+#include "base/task/post_task.h"
+#include "base/trace_event/trace_event.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/vaapi/va_surface.h"
+#include "media/gpu/vaapi/vaapi_jpeg_decoder.h"
+#include "media/gpu/vaapi/vaapi_utils.h"
+#include "mojo/public/cpp/bindings/callback_helpers.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+
+namespace {
+
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused.
+enum class VAJDAWorkerDecoderFailure {
+ kVaapiError = 0,
+ kMaxValue = kVaapiError,
+};
+
+void ReportToVAJDAWorkerDecoderFailureUMA(VAJDAWorkerDecoderFailure failure) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VAJDAWorker.DecoderFailure", failure);
+}
+
+// Uses |decoder| to decode the JPEG corresponding to |encoded_data|.
+// |decode_cb| is called when finished or when an error is encountered. We don't
+// support decoding to scale, so |output_size| is only used for tracing.
+void DecodeTask(
+ VaapiJpegDecoder* decoder,
+ std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ gpu::ImageDecodeAcceleratorWorker::CompletedDecodeCB decode_cb) {
+ TRACE_EVENT2("jpeg", "VaapiJpegDecodeAcceleratorWorker::DecodeTask",
+ "encoded_bytes", encoded_data.size(), "output_size",
+ output_size.ToString());
+ gpu::ImageDecodeAcceleratorWorker::CompletedDecodeCB scoped_decode_callback =
+ mojo::WrapCallbackWithDefaultInvokeIfNotRun(std::move(decode_cb),
+ nullptr);
+ VaapiJpegDecodeStatus status;
+ decoder->Decode(
+ base::make_span<const uint8_t>(encoded_data.data(), encoded_data.size()),
+ &status);
+ if (status != VaapiJpegDecodeStatus::kSuccess) {
+ VLOGF(1) << "Failed to decode - status = " << static_cast<uint32_t>(status);
+ return;
+ }
+ std::unique_ptr<ScopedVAImage> scoped_image =
+ decoder->GetImage(VA_FOURCC_RGBX /* preferred_image_fourcc */, &status);
+ if (status != VaapiJpegDecodeStatus::kSuccess) {
+ VLOGF(1) << "Failed to get image - status = "
+ << static_cast<uint32_t>(status);
+ return;
+ }
+
+ // TODO(crbug.com/868400): output the decoded data.
+ DCHECK(scoped_image);
+ std::move(scoped_decode_callback).Run(nullptr);
+}
+
+} // namespace
+
+VaapiJpegDecodeAcceleratorWorker::VaapiJpegDecodeAcceleratorWorker()
+ : decoder_(std::make_unique<VaapiJpegDecoder>()) {
+ if (!decoder_->Initialize(
+ base::BindRepeating(&ReportToVAJDAWorkerDecoderFailureUMA,
+ VAJDAWorkerDecoderFailure::kVaapiError))) {
+ return;
+ }
+ decoder_task_runner_ = base::CreateSequencedTaskRunnerWithTraits({});
+ DCHECK(decoder_task_runner_);
+}
+
+VaapiJpegDecodeAcceleratorWorker::~VaapiJpegDecodeAcceleratorWorker() {
+ if (decoder_task_runner_)
+ decoder_task_runner_->DeleteSoon(FROM_HERE, std::move(decoder_));
+}
+
+bool VaapiJpegDecodeAcceleratorWorker::IsValid() const {
+ // If |decoder_task_runner_| is nullptr, it means that the initialization of
+ // |decoder_| failed.
+ return !!decoder_task_runner_;
+}
+
+void VaapiJpegDecodeAcceleratorWorker::Decode(std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ CompletedDecodeCB decode_cb) {
+ if (!IsValid()) {
+ NOTREACHED();
+ return;
+ }
+ DCHECK(!decoder_task_runner_->RunsTasksInCurrentSequence());
+ decoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&DecodeTask, decoder_.get(), std::move(encoded_data),
+ output_size, std::move(decode_cb)));
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.h b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.h
new file mode 100644
index 00000000000..86e45fa6428
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_worker.h
@@ -0,0 +1,61 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VAAPI_VAAPI_JPEG_DECODE_ACCELERATOR_WORKER_H_
+#define MEDIA_GPU_VAAPI_VAAPI_JPEG_DECODE_ACCELERATOR_WORKER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "gpu/ipc/service/image_decode_accelerator_worker.h"
+
+namespace base {
+class SequencedTaskRunner;
+}
+
+namespace gfx {
+class Size;
+}
+
+namespace media {
+
+class VaapiJpegDecoder;
+
+// This class uses the VAAPI to provide JPEG decode acceleration. The
+// interaction with the VAAPI is done on |decoder_task_runner_|. Objects of this
+// class can be created/destroyed on any thread, and the public interface of
+// this class is thread-safe.
+class VaapiJpegDecodeAcceleratorWorker
+ : public gpu::ImageDecodeAcceleratorWorker {
+ public:
+ VaapiJpegDecodeAcceleratorWorker();
+ ~VaapiJpegDecodeAcceleratorWorker() override;
+
+ // Returns true if the internal state was initialized correctly. If false,
+ // clients should not call Decode().
+ bool IsValid() const;
+
+ // gpu::ImageDecodeAcceleratorWorker implementation.
+ void Decode(std::vector<uint8_t> encoded_data,
+ const gfx::Size& output_size,
+ CompletedDecodeCB decode_cb) override;
+
+ private:
+ // We delegate the decoding to |decoder_| which is constructed on the ctor and
+ // then used and destroyed on |decoder_task_runner_| (unless initialization
+ // failed, in which case it doesn't matter where it's destroyed since no tasks
+ // using |decoder_| should have been posted to |decoder_task_runner_|).
+ std::unique_ptr<VaapiJpegDecoder> decoder_;
+ scoped_refptr<base::SequencedTaskRunner> decoder_task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(VaapiJpegDecodeAcceleratorWorker);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_VAAPI_VAAPI_JPEG_DECODE_ACCELERATOR_WORKER_H_
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.cc
index 026bc8b0736..364a874f7ce 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.cc
@@ -12,14 +12,16 @@
#include <va/va.h>
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "media/base/video_types.h"
-#include "media/filters/jpeg_parser.h"
#include "media/gpu/macros.h"
+#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_utils.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
+#include "media/parsers/jpeg_parser.h"
namespace media {
@@ -223,9 +225,8 @@ bool VaapiJpegDecoder::Initialize(const base::RepeatingClosure& error_uma_cb) {
return true;
}
-std::unique_ptr<ScopedVAImage> VaapiJpegDecoder::DoDecode(
+scoped_refptr<VASurface> VaapiJpegDecoder::Decode(
base::span<const uint8_t> encoded_image,
- uint32_t preferred_image_fourcc,
VaapiJpegDecodeStatus* status) {
if (!vaapi_wrapper_) {
VLOGF(1) << "VaapiJpegDecoder has not been initialized";
@@ -332,7 +333,21 @@ std::unique_ptr<ScopedVAImage> VaapiJpegDecoder::DoDecode(
return nullptr;
}
- // Get the decode output as a ScopedVAImage.
+ *status = VaapiJpegDecodeStatus::kSuccess;
+ return base::MakeRefCounted<VASurface>(va_surface_id_, coded_size_,
+ va_rt_format_,
+ base::DoNothing() /* release_cb */);
+}
+
+std::unique_ptr<ScopedVAImage> VaapiJpegDecoder::GetImage(
+ uint32_t preferred_image_fourcc,
+ VaapiJpegDecodeStatus* status) {
+ if (va_surface_id_ == VA_INVALID_ID) {
+ VLOGF(1) << "No decoded JPEG available";
+ *status = VaapiJpegDecodeStatus::kInvalidState;
+ return nullptr;
+ }
+
uint32_t image_fourcc;
if (!VaapiWrapper::GetJpegDecodeSuitableImageFourCC(
va_rt_format_, preferred_image_fourcc, &image_fourcc)) {
@@ -354,11 +369,4 @@ std::unique_ptr<ScopedVAImage> VaapiJpegDecoder::DoDecode(
return scoped_image;
}
-std::unique_ptr<ScopedVAImage> VaapiJpegDecoder::DoDecode(
- base::span<const uint8_t> encoded_image,
- VaapiJpegDecodeStatus* status) {
- return DoDecode(encoded_image, VA_FOURCC_I420 /* preferred_image_fourcc */,
- status);
-}
-
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.h b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.h
index 2b544f6efba..c5fcdb4f877 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.h
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder.h
@@ -11,7 +11,6 @@
#include "base/callback_forward.h"
#include "base/containers/span.h"
-#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "ui/gfx/geometry/size.h"
@@ -23,9 +22,10 @@ namespace media {
struct JpegFrameHeader;
class ScopedVAImage;
+class VASurface;
class VaapiWrapper;
-enum class VaapiJpegDecodeStatus {
+enum class VaapiJpegDecodeStatus : uint32_t {
kSuccess,
kParseJpegFailed,
kUnsupportedJpeg,
@@ -49,6 +49,10 @@ constexpr unsigned int kInvalidVaRtFormat = 0u;
// or 4:4:4, returns kInvalidVaRtFormat.
unsigned int VaSurfaceFormatForJpeg(const JpegFrameHeader& frame_header);
+// Encapsulates a VaapiWrapper for the purpose of performing
+// hardware-accelerated JPEG decodes. Objects of this class are not thread-safe,
+// but they are also not thread-affine, i.e., the caller is free to call the
+// methods on any thread, but calls must be synchronized externally.
class VaapiJpegDecoder final {
public:
VaapiJpegDecoder();
@@ -60,27 +64,22 @@ class VaapiJpegDecoder final {
// Decodes a JPEG picture. It will fill VA-API parameters and call the
// corresponding VA-API methods according to the JPEG in |encoded_image|.
- // Decoded data will be returned as a ScopedVAImage. The VAImage's format will
- // be either |preferred_image_fourcc| if the conversion from the internal
- // format is supported or a fallback FOURCC (see
+ // The image will be decoded into an internally allocated VA surface. It
+ // will be returned as an unowned VASurface, which remains valid until the
+ // next Decode() call or destruction of this class. Returns nullptr on
+ // failure and sets *|status| to the reason for failure.
+ scoped_refptr<VASurface> Decode(base::span<const uint8_t> encoded_image,
+ VaapiJpegDecodeStatus* status);
+
+ // Get the decoded data from the last Decode() call as a ScopedVAImage. The
+ // VAImage's format will be either |preferred_image_fourcc| if the conversion
+ // from the internal format is supported or a fallback FOURCC (see
// VaapiWrapper::GetJpegDecodeSuitableImageFourCC() for details). Returns
// nullptr on failure and sets *|status| to the reason for failure.
- std::unique_ptr<ScopedVAImage> DoDecode(
- base::span<const uint8_t> encoded_image,
- uint32_t preferred_image_fourcc,
- VaapiJpegDecodeStatus* status);
-
- // Calls DoDecode() above with |preferred_image_fourcc| = VA_FOURCC_I420.
- std::unique_ptr<ScopedVAImage> DoDecode(
- base::span<const uint8_t> encoded_image,
- VaapiJpegDecodeStatus* status);
+ std::unique_ptr<ScopedVAImage> GetImage(uint32_t preferred_image_fourcc,
+ VaapiJpegDecodeStatus* status);
private:
- // TODO(andrescj): move vaapi_utils tests out of vaapi_jpeg_decoder_unittest
- // and remove this friend declaration.
- friend class VaapiJpegDecoderTest;
- FRIEND_TEST_ALL_PREFIXES(VaapiJpegDecoderTest, ScopedVAImage);
-
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
// The current VA surface for decoding.
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
index 5bd8da28988..a410bcb3a69 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decoder_unittest.cc
@@ -21,17 +21,15 @@
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/logging.h"
+#include "base/memory/scoped_refptr.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/string_util.h"
-#include "base/synchronization/lock.h"
-#include "base/test/gtest_util.h"
-#include "base/thread_annotations.h"
#include "media/base/test_data_util.h"
#include "media/base/video_types.h"
-#include "media/filters/jpeg_parser.h"
#include "media/gpu/vaapi/vaapi_jpeg_decoder.h"
#include "media/gpu/vaapi/vaapi_utils.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
+#include "media/parsers/jpeg_parser.h"
#include "third_party/libyuv/include/libyuv.h"
#include "third_party/skia/include/core/SkColor.h"
#include "third_party/skia/include/core/SkImageInfo.h"
@@ -65,12 +63,6 @@ constexpr double kMinSsim = 0.997;
// the decode result.
constexpr const char* kUnsupportedFilename = "pixel-1280x720-grayscale.jpg";
-constexpr VAImageFormat kImageFormatI420 = {
- .fourcc = VA_FOURCC_I420,
- .byte_order = VA_LSB_FIRST,
- .bits_per_pixel = 12,
-};
-
// The size of the minimum coded unit for a YUV 4:2:0 image (both the width and
// the height of the MCU are the same for 4:2:0).
constexpr int k420MCUSize = 16;
@@ -342,6 +334,8 @@ int GetMaxSupportedDimension(int max_surface_supported) {
} // namespace
+class VASurface;
+
class VaapiJpegDecoderTest : public testing::TestWithParam<TestParam> {
protected:
VaapiJpegDecoderTest() {
@@ -366,16 +360,6 @@ class VaapiJpegDecoderTest : public testing::TestWithParam<TestParam> {
base::span<const uint8_t> encoded_image,
VaapiJpegDecodeStatus* status = nullptr);
- base::Lock* GetVaapiWrapperLock() const
- LOCK_RETURNED(decoder_.vaapi_wrapper_->va_lock_) {
- return decoder_.vaapi_wrapper_->va_lock_;
- }
-
- VADisplay GetVaapiWrapperVaDisplay() const
- EXCLUSIVE_LOCKS_REQUIRED(decoder_.vaapi_wrapper_->va_lock_) {
- return decoder_.vaapi_wrapper_->va_display_;
- }
-
protected:
std::string test_data_path_;
VaapiJpegDecoder decoder_;
@@ -399,12 +383,22 @@ std::unique_ptr<ScopedVAImage> VaapiJpegDecoderTest::Decode(
base::span<const uint8_t> encoded_image,
uint32_t preferred_fourcc,
VaapiJpegDecodeStatus* status) {
- VaapiJpegDecodeStatus tmp_status;
- std::unique_ptr<ScopedVAImage> scoped_image =
- decoder_.DoDecode(encoded_image, preferred_fourcc, &tmp_status);
- EXPECT_EQ(!!scoped_image, tmp_status == VaapiJpegDecodeStatus::kSuccess);
- if (status)
- *status = tmp_status;
+ VaapiJpegDecodeStatus decode_status;
+ scoped_refptr<VASurface> surface =
+ decoder_.Decode(encoded_image, &decode_status);
+ EXPECT_EQ(!!surface, decode_status == VaapiJpegDecodeStatus::kSuccess);
+
+ // Still try to get image when decode fails.
+ VaapiJpegDecodeStatus image_status;
+ std::unique_ptr<ScopedVAImage> scoped_image;
+ scoped_image = decoder_.GetImage(preferred_fourcc, &image_status);
+ EXPECT_EQ(!!scoped_image, image_status == VaapiJpegDecodeStatus::kSuccess);
+
+ // Record the first fail status.
+ if (status) {
+ *status = decode_status != VaapiJpegDecodeStatus::kSuccess ? decode_status
+ : image_status;
+ }
return scoped_image;
}
@@ -662,79 +656,6 @@ TEST_F(VaapiJpegDecoderTest, DecodeFails) {
EXPECT_EQ(VaapiJpegDecodeStatus::kUnsupportedSubsampling, status);
}
-// This test exercises the usual ScopedVAImage lifetime.
-//
-// TODO(andrescj): move ScopedVAImage and ScopedVABufferMapping to a separate
-// file so that we don't have to use |decoder_.vaapi_wrapper_|. See
-// https://crbug.com/924310.
-TEST_F(VaapiJpegDecoderTest, ScopedVAImage) {
- std::vector<VASurfaceID> va_surfaces;
- const gfx::Size coded_size(64, 64);
- ASSERT_TRUE(decoder_.vaapi_wrapper_->CreateContextAndSurfaces(
- VA_RT_FORMAT_YUV420, coded_size, 1, &va_surfaces));
- ASSERT_EQ(va_surfaces.size(), 1u);
-
- std::unique_ptr<ScopedVAImage> scoped_image;
- {
- // On Stoney-Ridge devices the output image format is dependent on the
- // surface format. However when DoDecode() is not called the output image
- // format seems to default to I420. https://crbug.com/828119
- VAImageFormat va_image_format = kImageFormatI420;
- base::AutoLock auto_lock(*GetVaapiWrapperLock());
- scoped_image = std::make_unique<ScopedVAImage>(
- GetVaapiWrapperLock(), GetVaapiWrapperVaDisplay(), va_surfaces[0],
- &va_image_format, coded_size);
-
- EXPECT_TRUE(scoped_image->image());
- ASSERT_TRUE(scoped_image->IsValid());
- EXPECT_TRUE(scoped_image->va_buffer()->IsValid());
- EXPECT_TRUE(scoped_image->va_buffer()->data());
- }
-}
-
-// This test exercises creation of a ScopedVAImage with a bad VASurfaceID.
-TEST_F(VaapiJpegDecoderTest, BadScopedVAImage) {
-#if DCHECK_IS_ON()
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
-#endif
-
- const std::vector<VASurfaceID> va_surfaces = {VA_INVALID_ID};
- const gfx::Size coded_size(64, 64);
-
- std::unique_ptr<ScopedVAImage> scoped_image;
- {
- VAImageFormat va_image_format = kImageFormatI420;
- base::AutoLock auto_lock(*GetVaapiWrapperLock());
- scoped_image = std::make_unique<ScopedVAImage>(
- GetVaapiWrapperLock(), GetVaapiWrapperVaDisplay(), va_surfaces[0],
- &va_image_format, coded_size);
-
- EXPECT_TRUE(scoped_image->image());
- EXPECT_FALSE(scoped_image->IsValid());
-#if DCHECK_IS_ON()
- EXPECT_DCHECK_DEATH(scoped_image->va_buffer());
-#else
- EXPECT_FALSE(scoped_image->va_buffer());
-#endif
- }
-}
-
-// This test exercises creation of a ScopedVABufferMapping with bad VABufferIDs.
-TEST_F(VaapiJpegDecoderTest, BadScopedVABufferMapping) {
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- base::AutoLock auto_lock(*GetVaapiWrapperLock());
-
- // A ScopedVABufferMapping with a VA_INVALID_ID VABufferID is DCHECK()ed.
- EXPECT_DCHECK_DEATH(std::make_unique<ScopedVABufferMapping>(
- GetVaapiWrapperLock(), GetVaapiWrapperVaDisplay(), VA_INVALID_ID));
-
- // This should not hit any DCHECK() but will create an invalid
- // ScopedVABufferMapping.
- auto scoped_buffer = std::make_unique<ScopedVABufferMapping>(
- GetVaapiWrapperLock(), GetVaapiWrapperVaDisplay(), VA_INVALID_ID - 1);
- EXPECT_FALSE(scoped_buffer->IsValid());
-}
-
std::string TestParamToString(
const testing::TestParamInfo<TestParam>& param_info) {
return param_info.param.test_name;
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
index 046b9348446..2c67d19b868 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
@@ -10,17 +10,22 @@
#include <utility>
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
+#include "base/memory/writable_shared_memory_region.h"
#include "base/metrics/histogram_macros.h"
#include "base/sequence_checker.h"
#include "base/task/post_task.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
+#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/video_frame.h"
-#include "media/filters/jpeg_parser.h"
+#include "media/gpu/linux/platform_video_frame_utils.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/vaapi_jpeg_encoder.h"
+#include "media/parsers/jpeg_parser.h"
+#include "ui/gfx/linux/native_pixmap_dmabuf.h"
namespace media {
@@ -42,12 +47,12 @@ static void ReportToUMA(VAJEAEncoderResult result) {
} // namespace
VaapiJpegEncodeAccelerator::EncodeRequest::EncodeRequest(
- int32_t buffer_id,
+ int32_t task_id,
scoped_refptr<VideoFrame> video_frame,
std::unique_ptr<UnalignedSharedMemory> exif_shm,
std::unique_ptr<UnalignedSharedMemory> output_shm,
int quality)
- : buffer_id(buffer_id),
+ : task_id(task_id),
video_frame(std::move(video_frame)),
exif_shm(std::move(exif_shm)),
output_shm(std::move(output_shm)),
@@ -62,6 +67,14 @@ class VaapiJpegEncodeAccelerator::Encoder {
base::RepeatingCallback<void(int32_t, Status)> notify_error_cb);
~Encoder();
+ // Processes one encode task with DMA-buf.
+ void EncodeWithDmaBufTask(
+ scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int32_t task_id,
+ int quality,
+ std::unique_ptr<WritableUnalignedMapping> exif_mapping);
+
// Processes one encode |request|.
void EncodeTask(std::unique_ptr<EncodeRequest> request);
@@ -75,6 +88,7 @@ class VaapiJpegEncodeAccelerator::Encoder {
std::unique_ptr<VaapiJpegEncoder> jpeg_encoder_;
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
+ std::unique_ptr<gpu::GpuMemoryBufferSupport> gpu_memory_buffer_support_;
base::RepeatingCallback<void(int32_t, size_t)> video_frame_ready_cb_;
base::RepeatingCallback<void(int32_t, Status)> notify_error_cb_;
@@ -96,6 +110,7 @@ VaapiJpegEncodeAccelerator::Encoder::Encoder(
: cached_output_buffer_size_(0),
jpeg_encoder_(new VaapiJpegEncoder(vaapi_wrapper)),
vaapi_wrapper_(std::move(vaapi_wrapper)),
+ gpu_memory_buffer_support_(new gpu::GpuMemoryBufferSupport()),
video_frame_ready_cb_(std::move(video_frame_ready_cb)),
notify_error_cb_(std::move(notify_error_cb)),
va_surface_id_(VA_INVALID_SURFACE) {
@@ -106,13 +121,156 @@ VaapiJpegEncodeAccelerator::Encoder::~Encoder() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
}
+void VaapiJpegEncodeAccelerator::Encoder::EncodeWithDmaBufTask(
+ scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int32_t task_id,
+ int quality,
+ std::unique_ptr<WritableUnalignedMapping> exif_mapping) {
+ DVLOGF(4);
+ TRACE_EVENT0("jpeg", "EncodeWithDmaBufTask");
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
+ gfx::Size input_size = input_frame->coded_size();
+
+ // Construct GBM Handle from VideoFrame.
+ gfx::GpuMemoryBufferHandle input_gmb_handle =
+ CreateGpuMemoryBufferHandle(input_frame.get());
+ if (input_gmb_handle.is_null()) {
+ VLOGF(1) << "Failed to create input gmb handle";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+
+ // Create pixmap for input handle and create VA surface.
+ auto num_planes_input = VideoFrame::NumPlanes(input_frame->format());
+
+ // We only support NV12 format currently. Modify the check and |buffer_format|
+ // once we support other formats.
+ DCHECK(num_planes_input == 2);
+ gfx::BufferFormat buffer_format = gfx::BufferFormat::YUV_420_BIPLANAR;
+
+ auto va_surface = vaapi_wrapper_->CreateVASurfaceForPixmap(
+ base::WrapRefCounted(new gfx::NativePixmapDmaBuf(
+ input_size, buffer_format,
+ std::move(input_gmb_handle.native_pixmap_handle))));
+ if (!va_surface) {
+ VLOGF(1) << "Failed to create input va surface";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+ va_surface_id_ = va_surface->id();
+
+ vaapi_wrapper_->DestroyContextAndSurfaces();
+ const bool success = vaapi_wrapper_->CreateContext(
+ VaapiWrapper::BufferFormatToVARTFormat(buffer_format), input_size);
+ if (!success) {
+ VLOGF(1) << "Failed to create context";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+
+ // Create output buffer for encoding result.
+ size_t max_coded_buffer_size =
+ VaapiJpegEncoder::GetMaxCodedBufferSize(input_size);
+ if (max_coded_buffer_size > cached_output_buffer_size_) {
+ vaapi_wrapper_->DestroyVABuffers();
+ cached_output_buffer_size_ = 0;
+
+ VABufferID output_buffer_id;
+ if (!vaapi_wrapper_->CreateVABuffer(max_coded_buffer_size,
+ &output_buffer_id)) {
+ VLOGF(1) << "Failed to create VA buffer for encoding output";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+ cached_output_buffer_size_ = max_coded_buffer_size;
+ cached_output_buffer_id_ = output_buffer_id;
+ }
+
+ // Prepare exif.
+ const uint8_t* exif_buffer;
+ size_t exif_buffer_size = 0;
+ if (exif_mapping) {
+ exif_buffer = static_cast<const uint8_t*>(exif_mapping->memory());
+ exif_buffer_size = exif_mapping->size();
+ } else {
+ exif_buffer = nullptr;
+ }
+ // When the exif buffer contains a thumbnail, the VAAPI encoder would
+ // generate a corrupted JPEG. We can work around the problem by supplying an
+ // all-zero buffer with the same size and fill in the real exif buffer after
+ // encoding.
+ // TODO(shenghao): Remove this mechanism after b/79840013 is fixed.
+ std::vector<uint8_t> exif_buffer_dummy(exif_buffer_size, 0);
+ size_t exif_offset = 0;
+
+ if (!jpeg_encoder_->Encode(input_size, exif_buffer_dummy.data(),
+ exif_buffer_size, quality, va_surface_id_,
+ cached_output_buffer_id_, &exif_offset)) {
+ VLOGF(1) << "Encode JPEG failed";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+
+ // Create gmb buffer from output VideoFrame.
+ auto output_gmb_handle = CreateGpuMemoryBufferHandle(output_frame.get());
+ if (output_gmb_handle.is_null()) {
+ VLOGF(1) << "Failed to create GpuMemoryBufferHandle";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+ auto output_gmb_buffer =
+ gpu_memory_buffer_support_->CreateGpuMemoryBufferImplFromHandle(
+ std::move(output_gmb_handle), output_frame->coded_size(),
+ gfx::BufferFormat::R_8, gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
+ base::DoNothing());
+ if (output_gmb_buffer == nullptr) {
+ VLOGF(1) << "Failed to create GpuMemoryBufferImpl from handle";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+
+ bool isMapped = output_gmb_buffer->Map();
+ if (!isMapped) {
+ VLOGF(1) << "Map the output gmb buffer failed";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+ return;
+ }
+
+ // Get the encoded output. DownloadFromVABuffer() is a blocking call. It
+ // would wait until encoding is finished.
+ uint8_t* output_memory = static_cast<uint8_t*>(output_gmb_buffer->memory(0));
+ size_t encoded_size = 0;
+ // Since the format of |output_gmb_buffer| is gfx::BufferFormat::R_8, we can
+ // use its area as the maximum bytes we need to download to avoid buffer
+ // overflow.
+ if (!vaapi_wrapper_->DownloadFromVABuffer(
+ cached_output_buffer_id_, va_surface_id_,
+ static_cast<uint8_t*>(output_memory),
+ output_gmb_buffer->GetSize().GetArea(), &encoded_size)) {
+ VLOGF(1) << "Failed to retrieve output image from VA coded buffer";
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
+
+ output_gmb_buffer->Unmap();
+ return;
+ }
+
+ // Copy the real exif buffer into preserved space.
+ memcpy(static_cast<uint8_t*>(output_memory) + exif_offset, exif_buffer,
+ exif_buffer_size);
+
+ output_gmb_buffer->Unmap();
+ video_frame_ready_cb_.Run(task_id, encoded_size);
+}
+
void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
std::unique_ptr<EncodeRequest> request) {
DVLOGF(4);
TRACE_EVENT0("jpeg", "EncodeTask");
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- const int buffer_id = request->buffer_id;
+ const int task_id = request->task_id;
gfx::Size input_size = request->video_frame->coded_size();
// Recreate VASurface if the video frame's size changed.
@@ -125,17 +283,17 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
if (!vaapi_wrapper_->CreateContextAndSurfaces(
VA_RT_FORMAT_YUV420, input_size, 1, &va_surfaces)) {
VLOGF(1) << "Failed to create VA surface";
- notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
return;
}
va_surface_id_ = va_surfaces[0];
surface_size_ = input_size;
}
- if (!vaapi_wrapper_->UploadVideoFrameToSurface(request->video_frame,
+ if (!vaapi_wrapper_->UploadVideoFrameToSurface(*request->video_frame,
va_surface_id_)) {
VLOGF(1) << "Failed to upload video frame to VA surface";
- notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
return;
}
@@ -150,7 +308,7 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
if (!vaapi_wrapper_->CreateVABuffer(max_coded_buffer_size,
&output_buffer_id)) {
VLOGF(1) << "Failed to create VA buffer for encoding output";
- notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
return;
}
cached_output_buffer_size_ = max_coded_buffer_size;
@@ -175,7 +333,7 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
exif_buffer_size, request->quality, va_surface_id_,
cached_output_buffer_id_, &exif_offset)) {
VLOGF(1) << "Encode JPEG failed";
- notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
return;
}
@@ -187,14 +345,14 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
static_cast<uint8_t*>(request->output_shm->memory()),
request->output_shm->size(), &encoded_size)) {
VLOGF(1) << "Failed to retrieve output image from VA coded buffer";
- notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
+ notify_error_cb_.Run(task_id, PLATFORM_FAILURE);
}
// Copy the real exif buffer into preserved space.
memcpy(static_cast<uint8_t*>(request->output_shm->memory()) + exif_offset,
exif_buffer, exif_buffer_size);
- video_frame_ready_cb_.Run(buffer_id, encoded_size);
+ video_frame_ready_cb_.Run(task_id, encoded_size);
}
VaapiJpegEncodeAccelerator::VaapiJpegEncodeAccelerator(
@@ -216,25 +374,25 @@ VaapiJpegEncodeAccelerator::~VaapiJpegEncodeAccelerator() {
}
}
-void VaapiJpegEncodeAccelerator::NotifyError(int32_t buffer_id, Status status) {
+void VaapiJpegEncodeAccelerator::NotifyError(int32_t task_id, Status status) {
DCHECK(task_runner_->BelongsToCurrentThread());
- VLOGF(1) << "output_buffer_id=" << buffer_id << ", status=" << status;
+ VLOGF(1) << "task_id=" << task_id << ", status=" << status;
DCHECK(client_);
- client_->NotifyError(buffer_id, status);
+ client_->NotifyError(task_id, status);
}
-void VaapiJpegEncodeAccelerator::VideoFrameReady(int32_t buffer_id,
+void VaapiJpegEncodeAccelerator::VideoFrameReady(int32_t task_id,
size_t encoded_picture_size) {
- DVLOGF(4) << "output_buffer_id=" << buffer_id
- << ", size=" << encoded_picture_size;
+ DVLOGF(4) << "task_id=" << task_id << ", size=" << encoded_picture_size;
DCHECK(task_runner_->BelongsToCurrentThread());
ReportToUMA(VAJEAEncoderResult::VAAPI_SUCCESS);
- client_->VideoFrameReady(buffer_id, encoded_picture_size);
+ client_->VideoFrameReady(task_id, encoded_picture_size);
}
-JpegEncodeAccelerator::Status VaapiJpegEncodeAccelerator::Initialize(
- JpegEncodeAccelerator::Client* client) {
+chromeos_camera::JpegEncodeAccelerator::Status
+VaapiJpegEncodeAccelerator::Initialize(
+ chromeos_camera::JpegEncodeAccelerator::Client* client) {
VLOGF(2);
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -277,58 +435,58 @@ size_t VaapiJpegEncodeAccelerator::GetMaxCodedBufferSize(
void VaapiJpegEncodeAccelerator::Encode(scoped_refptr<VideoFrame> video_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer) {
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer) {
DVLOGF(4);
DCHECK(io_task_runner_->BelongsToCurrentThread());
- int32_t buffer_id = output_buffer.id();
- TRACE_EVENT1("jpeg", "Encode", "output_buffer_id", buffer_id);
+ int32_t task_id = output_buffer.id();
+ TRACE_EVENT1("jpeg", "Encode", "task_id", task_id);
// TODO(shenghao): support other YUV formats.
if (video_frame->format() != VideoPixelFormat::PIXEL_FORMAT_I420) {
VLOGF(1) << "Unsupported input format: " << video_frame->format();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
- weak_this_, buffer_id, INVALID_ARGUMENT));
+ weak_this_, task_id, INVALID_ARGUMENT));
return;
}
std::unique_ptr<UnalignedSharedMemory> exif_shm;
if (exif_buffer) {
- // |exif_shm| will take ownership of the |exif_buffer->handle()|.
+ // |exif_shm| will take ownership of the |exif_buffer->region()|.
exif_shm = std::make_unique<UnalignedSharedMemory>(
- exif_buffer->handle(), exif_buffer->size(), true);
+ exif_buffer->TakeRegion(), exif_buffer->size(), false);
if (!exif_shm->MapAt(exif_buffer->offset(), exif_buffer->size())) {
VLOGF(1) << "Failed to map exif buffer";
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
- weak_this_, buffer_id, PLATFORM_FAILURE));
+ weak_this_, task_id, PLATFORM_FAILURE));
return;
}
if (exif_shm->size() > kMaxMarkerSizeAllowed) {
VLOGF(1) << "Exif buffer too big: " << exif_shm->size();
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
- weak_this_, buffer_id, INVALID_ARGUMENT));
+ weak_this_, task_id, INVALID_ARGUMENT));
return;
}
}
// |output_shm| will take ownership of the |output_buffer.handle()|.
auto output_shm = std::make_unique<UnalignedSharedMemory>(
- output_buffer.handle(), output_buffer.size(), false);
+ output_buffer.TakeRegion(), output_buffer.size(), false);
if (!output_shm->MapAt(output_buffer.offset(), output_buffer.size())) {
VLOGF(1) << "Failed to map output buffer";
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError, weak_this_,
- buffer_id, INACCESSIBLE_OUTPUT_BUFFER));
+ task_id, INACCESSIBLE_OUTPUT_BUFFER));
return;
}
auto request = std::make_unique<EncodeRequest>(
- buffer_id, std::move(video_frame), std::move(exif_shm),
+ task_id, std::move(video_frame), std::move(exif_shm),
std::move(output_shm), quality);
encoder_task_runner_->PostTask(
@@ -337,4 +495,59 @@ void VaapiJpegEncodeAccelerator::Encode(scoped_refptr<VideoFrame> video_frame,
base::Unretained(encoder_.get()), std::move(request)));
}
+void VaapiJpegEncodeAccelerator::EncodeWithDmaBuf(
+ scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int quality,
+ int32_t task_id,
+ BitstreamBuffer* exif_buffer) {
+ DVLOGF(4);
+ DCHECK(io_task_runner_->BelongsToCurrentThread());
+ TRACE_EVENT1("jpeg", "Encode", "task_id", task_id);
+
+ // TODO(wtlee): Supports other formats.
+ if (input_frame->format() != VideoPixelFormat::PIXEL_FORMAT_NV12) {
+ VLOGF(1) << "Unsupported input format: " << input_frame->format();
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
+ weak_this_, task_id, INVALID_ARGUMENT));
+ return;
+ }
+ if (output_frame->format() != VideoPixelFormat::PIXEL_FORMAT_MJPEG) {
+ VLOGF(1) << "Unsupported output format: " << output_frame->format();
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
+ weak_this_, task_id, INVALID_ARGUMENT));
+ return;
+ }
+
+ std::unique_ptr<WritableUnalignedMapping> exif_mapping;
+ if (exif_buffer) {
+ // |exif_mapping| will take ownership of the |exif_buffer->region()|.
+ exif_mapping = std::make_unique<WritableUnalignedMapping>(
+ base::UnsafeSharedMemoryRegion::Deserialize(exif_buffer->TakeRegion()),
+ exif_buffer->size(), exif_buffer->offset());
+ if (!exif_mapping->IsValid()) {
+ LOG(ERROR) << "Failed to map exif buffer";
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
+ weak_this_, task_id, PLATFORM_FAILURE));
+ return;
+ }
+ if (exif_mapping->size() > kMaxMarkerSizeAllowed) {
+ LOG(ERROR) << "Exif buffer too big: " << exif_mapping->size();
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&VaapiJpegEncodeAccelerator::NotifyError,
+ weak_this_, task_id, INVALID_ARGUMENT));
+ return;
+ }
+ }
+
+ encoder_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VaapiJpegEncodeAccelerator::Encoder::EncodeWithDmaBufTask,
+ base::Unretained(encoder_.get()), input_frame,
+ output_frame, task_id, quality, std::move(exif_mapping)));
+}
+
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
index 315b439f406..fe857ee4b23 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.h
@@ -10,11 +10,11 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
+#include "components/chromeos_camera/jpeg_encode_accelerator.h"
#include "media/base/bitstream_buffer.h"
#include "media/base/unaligned_shared_memory.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
-#include "media/video/jpeg_encode_accelerator.h"
namespace media {
@@ -28,34 +28,41 @@ namespace media {
// a weak this can be run on the encoder thread because it can assume
// VaapiJpegEncodeAccelerator is still alive.
class MEDIA_GPU_EXPORT VaapiJpegEncodeAccelerator
- : public JpegEncodeAccelerator {
+ : public chromeos_camera::JpegEncodeAccelerator {
public:
explicit VaapiJpegEncodeAccelerator(
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner);
~VaapiJpegEncodeAccelerator() override;
// JpegEncodeAccelerator implementation.
- Status Initialize(JpegEncodeAccelerator::Client* client) override;
+ chromeos_camera::JpegEncodeAccelerator::Status Initialize(
+ chromeos_camera::JpegEncodeAccelerator::Client* client) override;
size_t GetMaxCodedBufferSize(const gfx::Size& picture_size) override;
// Currently only I420 format is supported for |video_frame|.
void Encode(scoped_refptr<VideoFrame> video_frame,
int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer) override;
+ BitstreamBuffer* exif_buffer,
+ BitstreamBuffer output_buffer) override;
+
+ void EncodeWithDmaBuf(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ int quality,
+ int32_t task_id,
+ BitstreamBuffer* exif_buffer) override;
private:
// An input video frame and the corresponding output buffer awaiting
// consumption, provided by the client.
struct EncodeRequest {
- EncodeRequest(int32_t buffer_id,
+ EncodeRequest(int32_t task_id,
scoped_refptr<VideoFrame> video_frame,
std::unique_ptr<UnalignedSharedMemory> exif_shm,
std::unique_ptr<UnalignedSharedMemory> output_shm,
int quality);
~EncodeRequest();
- int32_t buffer_id;
+ int32_t task_id;
scoped_refptr<VideoFrame> video_frame;
std::unique_ptr<UnalignedSharedMemory> exif_shm;
std::unique_ptr<UnalignedSharedMemory> output_shm;
@@ -70,9 +77,9 @@ class MEDIA_GPU_EXPORT VaapiJpegEncodeAccelerator
// Notifies the client that an error has occurred and encoding cannot
// continue.
- void NotifyError(int32_t buffer_id, Status status);
+ void NotifyError(int32_t task_id, Status status);
- void VideoFrameReady(int32_t buffer_id, size_t encoded_picture_size);
+ void VideoFrameReady(int32_t task_id, size_t encoded_picture_size);
// ChildThread's task runner.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
index e4bd4f1738f..ebfaa37ae7f 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
@@ -13,9 +13,9 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
-#include "media/filters/jpeg_parser.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
+#include "media/parsers/jpeg_parser.h"
namespace media {
diff --git a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
index 07ec901f206..35eabc431b7 100644
--- a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.cc
@@ -24,6 +24,7 @@
#include "media/base/video_frame.h"
#include "media/base/video_types.h"
#include "media/gpu/macros.h"
+#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_utils.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "third_party/libyuv/include/libyuv.h"
@@ -45,23 +46,23 @@ static void ReportToVAJDADecoderFailureUMA(VAJDADecoderFailure failure) {
}
static void ReportToVAJDAResponseToClientUMA(
- MjpegDecodeAccelerator::Error response) {
+ chromeos_camera::MjpegDecodeAccelerator::Error response) {
UMA_HISTOGRAM_ENUMERATION(
"Media.VAJDA.ResponseToClient", response,
- MjpegDecodeAccelerator::Error::MJDA_ERROR_CODE_MAX + 1);
+ chromeos_camera::MjpegDecodeAccelerator::Error::MJDA_ERROR_CODE_MAX + 1);
}
-static MjpegDecodeAccelerator::Error VaapiJpegDecodeStatusToError(
- VaapiJpegDecodeStatus status) {
+static chromeos_camera::MjpegDecodeAccelerator::Error
+VaapiJpegDecodeStatusToError(VaapiJpegDecodeStatus status) {
switch (status) {
case VaapiJpegDecodeStatus::kSuccess:
- return MjpegDecodeAccelerator::Error::NO_ERRORS;
+ return chromeos_camera::MjpegDecodeAccelerator::Error::NO_ERRORS;
case VaapiJpegDecodeStatus::kParseJpegFailed:
- return MjpegDecodeAccelerator::Error::PARSE_JPEG_FAILED;
+ return chromeos_camera::MjpegDecodeAccelerator::Error::PARSE_JPEG_FAILED;
case VaapiJpegDecodeStatus::kUnsupportedSubsampling:
- return MjpegDecodeAccelerator::Error::UNSUPPORTED_JPEG;
+ return chromeos_camera::MjpegDecodeAccelerator::Error::UNSUPPORTED_JPEG;
default:
- return MjpegDecodeAccelerator::Error::PLATFORM_FAILURE;
+ return chromeos_camera::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE;
}
}
@@ -94,7 +95,7 @@ void VaapiMjpegDecodeAccelerator::NotifyError(int32_t bitstream_buffer_id,
VLOGF(1) << "Notifying of error " << error;
// |error| shouldn't be NO_ERRORS because successful decodes should be handled
// by VideoFrameReady().
- DCHECK_NE(MjpegDecodeAccelerator::Error::NO_ERRORS, error);
+ DCHECK_NE(chromeos_camera::MjpegDecodeAccelerator::Error::NO_ERRORS, error);
ReportToVAJDAResponseToClientUMA(error);
DCHECK(client_);
client_->NotifyError(bitstream_buffer_id, error);
@@ -102,7 +103,8 @@ void VaapiMjpegDecodeAccelerator::NotifyError(int32_t bitstream_buffer_id,
void VaapiMjpegDecodeAccelerator::VideoFrameReady(int32_t bitstream_buffer_id) {
DCHECK(task_runner_->BelongsToCurrentThread());
- ReportToVAJDAResponseToClientUMA(MjpegDecodeAccelerator::Error::NO_ERRORS);
+ ReportToVAJDAResponseToClientUMA(
+ chromeos_camera::MjpegDecodeAccelerator::Error::NO_ERRORS);
client_->VideoFrameReady(bitstream_buffer_id);
}
@@ -122,7 +124,8 @@ VaapiMjpegDecodeAccelerator::~VaapiMjpegDecodeAccelerator() {
decoder_thread_.Stop();
}
-bool VaapiMjpegDecodeAccelerator::Initialize(Client* client) {
+bool VaapiMjpegDecodeAccelerator::Initialize(
+ chromeos_camera::MjpegDecodeAccelerator::Client* client) {
VLOGF(2);
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -145,7 +148,7 @@ bool VaapiMjpegDecodeAccelerator::Initialize(Client* client) {
bool VaapiMjpegDecodeAccelerator::OutputPictureOnTaskRunner(
std::unique_ptr<ScopedVAImage> scoped_image,
int32_t input_buffer_id,
- const scoped_refptr<VideoFrame>& video_frame) {
+ scoped_refptr<VideoFrame> video_frame) {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
TRACE_EVENT1("jpeg", "VaapiMjpegDecodeAccelerator::OutputPictureOnTaskRunner",
@@ -224,25 +227,30 @@ void VaapiMjpegDecodeAccelerator::DecodeTask(
TRACE_EVENT0("jpeg", "DecodeTask");
VaapiJpegDecodeStatus status;
- std::unique_ptr<ScopedVAImage> image = decoder_.DoDecode(
- base::make_span<const uint8_t>(static_cast<const uint8_t*>(shm->memory()),
- shm->size()),
+ decoder_.Decode(
+ base::make_span(static_cast<const uint8_t*>(shm->memory()), shm->size()),
&status);
if (status != VaapiJpegDecodeStatus::kSuccess) {
NotifyError(bitstream_buffer_id, VaapiJpegDecodeStatusToError(status));
return;
}
+ std::unique_ptr<ScopedVAImage> image =
+ decoder_.GetImage(VA_FOURCC_I420 /* preferred_image_fourcc */, &status);
+ if (status != VaapiJpegDecodeStatus::kSuccess) {
+ NotifyError(bitstream_buffer_id, VaapiJpegDecodeStatusToError(status));
+ return;
+ }
if (!OutputPictureOnTaskRunner(std::move(image), bitstream_buffer_id,
- video_frame)) {
+ std::move(video_frame))) {
VLOGF(1) << "Output picture failed";
NotifyError(bitstream_buffer_id, PLATFORM_FAILURE);
}
}
void VaapiMjpegDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) {
+ BitstreamBuffer bitstream_buffer,
+ scoped_refptr<VideoFrame> video_frame) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
TRACE_EVENT1("jpeg", "Decode", "input_id", bitstream_buffer.id());
@@ -251,7 +259,8 @@ void VaapiMjpegDecodeAccelerator::Decode(
// UnalignedSharedMemory will take over the |bitstream_buffer.handle()|.
auto shm = std::make_unique<UnalignedSharedMemory>(
- bitstream_buffer.handle(), bitstream_buffer.size(), true);
+ bitstream_buffer.TakeRegion(), bitstream_buffer.size(),
+ false /* read_only */);
if (bitstream_buffer.id() < 0) {
VLOGF(1) << "Invalid bitstream_buffer, id: " << bitstream_buffer.id();
diff --git a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
index 06b03c2a7e5..c32ceb5bc22 100644
--- a/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_mjpeg_decode_accelerator.h
@@ -13,9 +13,9 @@
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
+#include "components/chromeos_camera/mjpeg_decode_accelerator.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/vaapi_jpeg_decoder.h"
-#include "media/video/mjpeg_decode_accelerator.h"
namespace base {
class SingleThreadTaskRunner;
@@ -37,16 +37,17 @@ class VideoFrame;
// stopped during |this->Destroy()|, so any tasks posted to the decoder thread
// can assume |*this| is still alive. See |weak_this_| below for more details.
class MEDIA_GPU_EXPORT VaapiMjpegDecodeAccelerator
- : public MjpegDecodeAccelerator {
+ : public chromeos_camera::MjpegDecodeAccelerator {
public:
VaapiMjpegDecodeAccelerator(
const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
~VaapiMjpegDecodeAccelerator() override;
- // MjpegDecodeAccelerator implementation.
- bool Initialize(MjpegDecodeAccelerator::Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) override;
+ // chromeos_camera::MjpegDecodeAccelerator implementation.
+ bool Initialize(
+ chromeos_camera::MjpegDecodeAccelerator::Client* client) override;
+ void Decode(BitstreamBuffer bitstream_buffer,
+ scoped_refptr<VideoFrame> video_frame) override;
bool IsSupported() override;
private:
@@ -68,7 +69,7 @@ class MEDIA_GPU_EXPORT VaapiMjpegDecodeAccelerator
// |input_buffer_id| of the resulting picture to client for output.
bool OutputPictureOnTaskRunner(std::unique_ptr<ScopedVAImage> image,
int32_t input_buffer_id,
- const scoped_refptr<VideoFrame>& video_frame);
+ scoped_refptr<VideoFrame> video_frame);
// ChildThread's task runner.
const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
@@ -77,7 +78,7 @@ class MEDIA_GPU_EXPORT VaapiMjpegDecodeAccelerator
const scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
// The client of this class.
- Client* client_;
+ chromeos_camera::MjpegDecodeAccelerator::Client* client_;
VaapiJpegDecoder decoder_;
diff --git a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
index a4a9445b34e..0c3a6d8f520 100644
--- a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
+++ b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
@@ -97,9 +97,9 @@ bool VaapiPictureNativePixmapOzone::Allocate(gfx::BufferFormat format) {
ui::OzonePlatform* platform = ui::OzonePlatform::GetInstance();
ui::SurfaceFactoryOzone* factory = platform->GetSurfaceFactoryOzone();
- pixmap_ =
- factory->CreateNativePixmap(gfx::kNullAcceleratedWidget, size_, format,
- gfx::BufferUsage::SCANOUT_VDA_WRITE);
+ pixmap_ = factory->CreateNativePixmap(gfx::kNullAcceleratedWidget,
+ VK_NULL_HANDLE, size_, format,
+ gfx::BufferUsage::SCANOUT_VDA_WRITE);
if (!pixmap_) {
LOG(ERROR) << "Failed allocating a pixmap";
return false;
diff --git a/chromium/media/gpu/vaapi/vaapi_utils_unittest.cc b/chromium/media/gpu/vaapi/vaapi_utils_unittest.cc
new file mode 100644
index 00000000000..f019f6a59d1
--- /dev/null
+++ b/chromium/media/gpu/vaapi/vaapi_utils_unittest.cc
@@ -0,0 +1,122 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <vector>
+
+#include <va/va.h>
+
+// This has to be included first.
+// See http://code.google.com/p/googletest/issues/detail?id=371
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/synchronization/lock.h"
+#include "base/test/gtest_util.h"
+#include "media/gpu/vaapi/vaapi_utils.h"
+#include "media/gpu/vaapi/vaapi_wrapper.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+namespace {
+
+constexpr VAImageFormat kImageFormatI420 = {
+ .fourcc = VA_FOURCC_I420,
+ .byte_order = VA_LSB_FIRST,
+ .bits_per_pixel = 12,
+};
+
+} // namespace
+
+class VaapiUtilsTest : public testing::Test {
+ protected:
+ VaapiUtilsTest() = default;
+
+ void SetUp() override {
+ // Create a VaapiWrapper for testing.
+ vaapi_wrapper_ = VaapiWrapper::Create(
+ VaapiWrapper::kDecode, VAProfileJPEGBaseline,
+ base::BindRepeating([]() { LOG(FATAL) << "Oh noes! Decoder failed"; }));
+ ASSERT_TRUE(vaapi_wrapper_);
+ }
+
+ protected:
+ scoped_refptr<VaapiWrapper> vaapi_wrapper_;
+
+ DISALLOW_COPY_AND_ASSIGN(VaapiUtilsTest);
+};
+
+// This test exercises the usual ScopedVAImage lifetime.
+TEST_F(VaapiUtilsTest, ScopedVAImage) {
+ std::vector<VASurfaceID> va_surfaces;
+ const gfx::Size coded_size(64, 64);
+ ASSERT_TRUE(vaapi_wrapper_->CreateContextAndSurfaces(
+ VA_RT_FORMAT_YUV420, coded_size, 1, &va_surfaces));
+ ASSERT_EQ(va_surfaces.size(), 1u);
+
+ std::unique_ptr<ScopedVAImage> scoped_image;
+ {
+ // On Stoney-Ridge devices the output image format is dependent on the
+ // surface format. However when context has not been executed the output
+ // image format seems to default to I420. https://crbug.com/828119
+ VAImageFormat va_image_format = kImageFormatI420;
+ base::AutoLock auto_lock(*vaapi_wrapper_->va_lock_);
+ scoped_image = std::make_unique<ScopedVAImage>(
+ vaapi_wrapper_->va_lock_, vaapi_wrapper_->va_display_, va_surfaces[0],
+ &va_image_format, coded_size);
+
+ EXPECT_TRUE(scoped_image->image());
+ ASSERT_TRUE(scoped_image->IsValid());
+ EXPECT_TRUE(scoped_image->va_buffer()->IsValid());
+ EXPECT_TRUE(scoped_image->va_buffer()->data());
+ }
+}
+
+// This test exercises creation of a ScopedVAImage with a bad VASurfaceID.
+TEST_F(VaapiUtilsTest, BadScopedVAImage) {
+#if DCHECK_IS_ON()
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+#endif
+
+ const std::vector<VASurfaceID> va_surfaces = {VA_INVALID_ID};
+ const gfx::Size coded_size(64, 64);
+
+ std::unique_ptr<ScopedVAImage> scoped_image;
+ {
+ VAImageFormat va_image_format = kImageFormatI420;
+ base::AutoLock auto_lock(*vaapi_wrapper_->va_lock_);
+ scoped_image = std::make_unique<ScopedVAImage>(
+ vaapi_wrapper_->va_lock_, vaapi_wrapper_->va_display_, va_surfaces[0],
+ &va_image_format, coded_size);
+
+ EXPECT_TRUE(scoped_image->image());
+ EXPECT_FALSE(scoped_image->IsValid());
+#if DCHECK_IS_ON()
+ EXPECT_DCHECK_DEATH(scoped_image->va_buffer());
+#else
+ EXPECT_FALSE(scoped_image->va_buffer());
+#endif
+ }
+}
+
+// This test exercises creation of a ScopedVABufferMapping with bad VABufferIDs.
+TEST_F(VaapiUtilsTest, BadScopedVABufferMapping) {
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ base::AutoLock auto_lock(*vaapi_wrapper_->va_lock_);
+
+ // A ScopedVABufferMapping with a VA_INVALID_ID VABufferID is DCHECK()ed.
+ EXPECT_DCHECK_DEATH(std::make_unique<ScopedVABufferMapping>(
+ vaapi_wrapper_->va_lock_, vaapi_wrapper_->va_display_, VA_INVALID_ID));
+
+ // This should not hit any DCHECK() but will create an invalid
+ // ScopedVABufferMapping.
+ auto scoped_buffer = std::make_unique<ScopedVABufferMapping>(
+ vaapi_wrapper_->va_lock_, vaapi_wrapper_->va_display_, VA_INVALID_ID - 1);
+ EXPECT_FALSE(scoped_buffer->IsValid());
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
index 3c1316dca64..ac0c9e15464 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
@@ -157,8 +157,8 @@ VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
bind_image_cb_(bind_image_cb),
weak_this_factory_(this) {
weak_this_ = weak_this_factory_.GetWeakPtr();
- va_surface_release_cb_ = BindToCurrentLoop(
- base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
+ va_surface_release_cb_ = BindToCurrentLoop(base::BindRepeating(
+ &VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "media::VaapiVideoDecodeAccelerator",
base::ThreadTaskRunnerHandle::Get());
@@ -454,7 +454,8 @@ void VaapiVideoDecodeAccelerator::DecodeTask() {
base::BindOnce(
&VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange,
weak_this_, decoder_->GetRequiredNumOfPictures(),
- decoder_->GetPicSize(), decoder_->GetNumReferenceFrames()));
+ decoder_->GetPicSize(), decoder_->GetNumReferenceFrames(),
+ decoder_->GetVisibleRect()));
// We'll get rescheduled once ProvidePictureBuffers() finishes.
return;
@@ -494,7 +495,8 @@ void VaapiVideoDecodeAccelerator::DecodeTask() {
void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(
size_t num_pics,
gfx::Size size,
- size_t num_reference_frames) {
+ size_t num_reference_frames,
+ const gfx::Rect& visible_rect) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!awaiting_va_surfaces_recycle_);
DCHECK_GT(num_pics, num_reference_frames);
@@ -508,6 +510,7 @@ void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(
awaiting_va_surfaces_recycle_ = true;
requested_pic_size_ = size;
+ requested_visible_rect_ = visible_rect;
if (buffer_allocation_mode_ == BufferAllocationMode::kSuperReduced) {
// Add one to the reference frames for the one being currently egressed.
@@ -578,15 +581,14 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
const VideoPixelFormat format = GfxBufferFormatToVideoPixelFormat(
vaapi_picture_factory_->GetBufferFormat());
task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&Client::ProvidePictureBuffers, client_,
- requested_num_pics_, format, 1, requested_pic_size_,
- vaapi_picture_factory_->GetGLTextureTarget()));
+ FROM_HERE, base::BindOnce(&Client::ProvidePictureBuffersWithVisibleRect,
+ client_, requested_num_pics_, format, 1,
+ requested_pic_size_, requested_visible_rect_,
+ vaapi_picture_factory_->GetGLTextureTarget()));
// |client_| may respond via AssignPictureBuffers().
}
-void VaapiVideoDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer) {
+void VaapiVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream_buffer) {
Decode(bitstream_buffer.ToDecoderBuffer(), bitstream_buffer.id());
}
@@ -1011,7 +1013,7 @@ scoped_refptr<VASurface> VaapiVideoDecodeAccelerator::CreateSurface() {
return new VASurface(id, requested_pic_size_,
vaapi_wrapper_->va_surface_format(),
- va_surface_release_cb_);
+ base::BindOnce(va_surface_release_cb_));
}
// Find the first |available_va_surfaces_| id such that the associated
@@ -1027,7 +1029,7 @@ scoped_refptr<VASurface> VaapiVideoDecodeAccelerator::CreateSurface() {
base::Erase(available_va_surfaces_, va_surface_id);
return new VASurface(va_surface_id, requested_pic_size_,
vaapi_wrapper_->va_surface_format(),
- va_surface_release_cb_);
+ base::BindOnce(va_surface_release_cb_));
}
}
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
index c6fff14fb0d..2d7fb0b0469 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
@@ -67,7 +67,7 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream_buffer) override;
+ void Decode(BitstreamBuffer bitstream_buffer) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
@@ -183,7 +183,8 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// by |decoder_|.
void InitiateSurfaceSetChange(size_t num_pics,
gfx::Size size,
- size_t num_reference_frames);
+ size_t num_reference_frames,
+ const gfx::Rect& visible_rect);
// Check if the surfaces have been released or post ourselves for later.
void TryFinishSurfaceSetChange();
@@ -289,7 +290,7 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
// Callback used when creating VASurface objects. Only used on |task_runner_|.
- VASurface::ReleaseCB va_surface_release_cb_;
+ base::RepeatingCallback<void(VASurfaceID)> va_surface_release_cb_;
// To expose client callbacks from VideoDecodeAccelerator. Used only on
// |task_runner_|.
@@ -313,9 +314,11 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// to be returned before we can free them. Only used on |task_runner_|.
bool awaiting_va_surfaces_recycle_;
- // Last requested number/resolution of output PictureBuffers.
+ // Last requested number/resolution/visible rectangle of output
+ // PictureBuffers.
size_t requested_num_pics_;
gfx::Size requested_pic_size_;
+ gfx::Rect requested_visible_rect_;
// Potential extra PictureBuffers to request, used only on
// BufferAllocationMode::kNone, see DecideBufferAllocationMode().
size_t num_extra_pics_ = 0;
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
index acb803ea127..ebff5f270f8 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
@@ -60,6 +60,7 @@ class MockAcceleratedVideoDecoder : public AcceleratedVideoDecoder {
MOCK_METHOD0(Reset, void());
MOCK_METHOD0(Decode, DecodeResult());
MOCK_CONST_METHOD0(GetPicSize, gfx::Size());
+ MOCK_CONST_METHOD0(GetVisibleRect, gfx::Rect());
MOCK_CONST_METHOD0(GetRequiredNumOfPictures, size_t());
MOCK_CONST_METHOD0(GetNumReferenceFrames, size_t());
};
@@ -178,8 +179,7 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
~VaapiVideoDecodeAcceleratorTest() {}
void SetUp() override {
- in_shm_.reset(new base::SharedMemory);
- ASSERT_TRUE(in_shm_->CreateAndMapAnonymous(kInputSize));
+ in_shm_ = base::UnsafeSharedMemoryRegion::Create(kInputSize);
}
void SetVdaStateToUnitialized() {
@@ -187,9 +187,9 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
vda_.state_ = VaapiVideoDecodeAccelerator::kUninitialized;
}
- void QueueInputBuffer(const BitstreamBuffer& bitstream_buffer) {
- vda_.QueueInputBuffer(bitstream_buffer.ToDecoderBuffer(),
- bitstream_buffer.id());
+ void QueueInputBuffer(BitstreamBuffer bitstream_buffer) {
+ auto id = bitstream_buffer.id();
+ vda_.QueueInputBuffer(bitstream_buffer.ToDecoderBuffer(), id);
}
void AssignPictureBuffers(const std::vector<PictureBuffer>& picture_buffers) {
@@ -247,11 +247,12 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
1, picture_size, _))
.WillOnce(RunClosure(quit_closure));
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(bitstream_id, handle, kInputSize);
+ auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ in_shm_.Duplicate());
+ BitstreamBuffer bitstream_buffer(bitstream_id, std::move(region),
+ kInputSize);
- QueueInputBuffer(bitstream_buffer);
+ QueueInputBuffer(std::move(bitstream_buffer));
run_loop.Run();
}
@@ -330,11 +331,11 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
EXPECT_CALL(*this, NotifyEndOfBitstreamBuffer(bitstream_id))
.WillOnce(RunClosure(quit_closure));
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(bitstream_id, handle, kInputSize);
+ auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ in_shm_.Duplicate());
+ QueueInputBuffer(
+ BitstreamBuffer(bitstream_id, std::move(region), kInputSize));
- QueueInputBuffer(bitstream_buffer);
run_loop.Run();
}
@@ -363,7 +364,7 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
scoped_refptr<MockVaapiWrapper> mock_vaapi_wrapper_;
scoped_refptr<MockVaapiWrapper> mock_vpp_vaapi_wrapper_;
- std::unique_ptr<base::SharedMemory> in_shm_;
+ base::UnsafeSharedMemoryRegion in_shm_;
private:
base::WeakPtrFactory<VaapiVideoDecodeAcceleratorTest> weak_ptr_factory_;
@@ -391,20 +392,20 @@ TEST_P(VaapiVideoDecodeAcceleratorTest, SupportedPlatforms) {
TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndError) {
SetVdaStateToUnitialized();
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(kBitstreamId, handle, kInputSize);
+ auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ in_shm_.Duplicate());
+ BitstreamBuffer bitstream_buffer(kBitstreamId, std::move(region), kInputSize);
EXPECT_CALL(*this,
NotifyError(VaapiVideoDecodeAccelerator::PLATFORM_FAILURE));
- QueueInputBuffer(bitstream_buffer);
+ QueueInputBuffer(std::move(bitstream_buffer));
}
// Verifies that Decode() returning kDecodeError ends up pinging NotifyError().
TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeError) {
- base::SharedMemoryHandle handle;
- handle = base::SharedMemory::DuplicateHandle(in_shm_->handle());
- BitstreamBuffer bitstream_buffer(kBitstreamId, handle, kInputSize);
+ auto region = base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ in_shm_.Duplicate());
+ BitstreamBuffer bitstream_buffer(kBitstreamId, std::move(region), kInputSize);
base::RunLoop run_loop;
base::Closure quit_closure = run_loop.QuitClosure();
@@ -414,7 +415,7 @@ TEST_P(VaapiVideoDecodeAcceleratorTest, QueueInputBufferAndDecodeError) {
EXPECT_CALL(*this, NotifyError(VaapiVideoDecodeAccelerator::PLATFORM_FAILURE))
.WillOnce(RunClosure(quit_closure));
- QueueInputBuffer(bitstream_buffer);
+ QueueInputBuffer(std::move(bitstream_buffer));
run_loop.Run();
}
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
index 0a70709bbf3..ea93b9bb575 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
@@ -130,9 +130,9 @@ struct VaapiVideoEncodeAccelerator::InputFrameRef {
};
struct VaapiVideoEncodeAccelerator::BitstreamBufferRef {
- BitstreamBufferRef(int32_t id, const BitstreamBuffer& buffer)
+ BitstreamBufferRef(int32_t id, BitstreamBuffer buffer)
: id(id),
- shm(std::make_unique<UnalignedSharedMemory>(buffer.handle(),
+ shm(std::make_unique<UnalignedSharedMemory>(buffer.TakeRegion(),
buffer.size(),
false)),
offset(buffer.offset()) {}
@@ -212,7 +212,8 @@ class VaapiVideoEncodeAccelerator::VP9Accelerator
AcceleratedVideoEncoder::EncodeJob* job,
const VP9Encoder::EncodeParams& encode_params,
scoped_refptr<VP9Picture> pic,
- const Vp9ReferenceFrameVector& ref_frames) override;
+ const Vp9ReferenceFrameVector& ref_frames,
+ const std::array<bool, kVp9NumRefsPerFrame>& ref_frames_used) override;
private:
VaapiVideoEncodeAccelerator* const vea_;
@@ -375,8 +376,8 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) {
DVLOGF(1) << "Frames in flight: " << num_frames_in_flight;
va_surface_release_cb_ = BindToCurrentLoop(
- base::Bind(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID,
- base::Unretained(this)));
+ base::BindRepeating(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID,
+ base::Unretained(this)));
va_surfaces_per_video_frame_ =
kNumSurfacesForOutputPicture +
@@ -417,7 +418,7 @@ void VaapiVideoEncodeAccelerator::UploadFrame(scoped_refptr<VideoFrame> frame,
VASurfaceID va_surface_id) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "frame is uploading: " << va_surface_id;
- if (!vaapi_wrapper_->UploadVideoFrameToSurface(frame, va_surface_id))
+ if (!vaapi_wrapper_->UploadVideoFrameToSurface(*frame, va_surface_id))
NOTIFY_ERROR(kPlatformFailureError, "Failed to upload frame");
}
@@ -499,15 +500,16 @@ void VaapiVideoEncodeAccelerator::ReturnBitstreamBuffer(
buffer->id, encode_job->Metadata(data_size)));
}
-void VaapiVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+void VaapiVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
bool force_keyframe) {
DVLOGF(4) << "Frame timestamp: " << frame->timestamp().InMilliseconds()
<< " force_keyframe: " << force_keyframe;
DCHECK(child_task_runner_->BelongsToCurrentThread());
encoder_thread_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&VaapiVideoEncodeAccelerator::EncodeTask,
- base::Unretained(this), frame, force_keyframe));
+ FROM_HERE,
+ base::BindOnce(&VaapiVideoEncodeAccelerator::EncodeTask,
+ base::Unretained(this), std::move(frame), force_keyframe));
}
void VaapiVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
@@ -515,7 +517,8 @@ void VaapiVideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK_NE(state_, kUninitialized);
- input_queue_.push(std::make_unique<InputFrameRef>(frame, force_keyframe));
+ input_queue_.push(
+ std::make_unique<InputFrameRef>(std::move(frame), force_keyframe));
EncodePendingInputs();
}
@@ -579,11 +582,13 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
scoped_refptr<VASurface> input_surface = new VASurface(
va_input_surface_id, coded_size_, vaapi_wrapper_->va_surface_format(),
- native_input_mode_ ? base::DoNothing() : va_surface_release_cb_);
+ native_input_mode_ ? base::DoNothing()
+ : base::BindOnce(va_surface_release_cb_));
- scoped_refptr<VASurface> reconstructed_surface = new VASurface(
- available_va_surface_ids_.back(), coded_size_,
- vaapi_wrapper_->va_surface_format(), va_surface_release_cb_);
+ scoped_refptr<VASurface> reconstructed_surface =
+ new VASurface(available_va_surface_ids_.back(), coded_size_,
+ vaapi_wrapper_->va_surface_format(),
+ base::BindOnce(va_surface_release_cb_));
available_va_surface_ids_.pop_back();
auto job = base::MakeRefCounted<VaapiEncodeJob>(
@@ -636,7 +641,7 @@ void VaapiVideoEncodeAccelerator::EncodePendingInputs() {
}
void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOGF(4) << "id: " << buffer.id();
DCHECK(child_task_runner_->BelongsToCurrentThread());
@@ -645,7 +650,8 @@ void VaapiVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- auto buffer_ref = std::make_unique<BitstreamBufferRef>(buffer.id(), buffer);
+ auto buffer_ref =
+ std::make_unique<BitstreamBufferRef>(buffer.id(), std::move(buffer));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
@@ -1229,7 +1235,8 @@ bool VaapiVideoEncodeAccelerator::VP9Accelerator::SubmitFrameParameters(
AcceleratedVideoEncoder::EncodeJob* job,
const VP9Encoder::EncodeParams& encode_params,
scoped_refptr<VP9Picture> pic,
- const Vp9ReferenceFrameVector& ref_frames) {
+ const Vp9ReferenceFrameVector& ref_frames,
+ const std::array<bool, kVp9NumRefsPerFrame>& ref_frames_used) {
VAEncSequenceParameterBufferVP9 seq_param = {};
const auto& frame_header = pic->frame_hdr;
@@ -1265,11 +1272,17 @@ bool VaapiVideoEncodeAccelerator::VP9Accelerator::SubmitFrameParameters(
if (frame_header->IsKeyframe()) {
pic_param.ref_flags.bits.force_kf = true;
} else {
- // use golden, altref and last for prediction
- pic_param.ref_flags.bits.ref_frame_ctrl_l0 = 0x07;
- pic_param.ref_flags.bits.ref_last_idx = frame_header->ref_frame_idx[0];
- pic_param.ref_flags.bits.ref_gf_idx = frame_header->ref_frame_idx[1];
- pic_param.ref_flags.bits.ref_arf_idx = frame_header->ref_frame_idx[2];
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ if (ref_frames_used[i])
+ pic_param.ref_flags.bits.ref_frame_ctrl_l0 |= (1 << i);
+ }
+
+ if (ref_frames_used[0])
+ pic_param.ref_flags.bits.ref_last_idx = frame_header->ref_frame_idx[0];
+ if (ref_frames_used[1])
+ pic_param.ref_flags.bits.ref_gf_idx = frame_header->ref_frame_idx[1];
+ if (ref_frames_used[2])
+ pic_param.ref_flags.bits.ref_arf_idx = frame_header->ref_frame_idx[2];
}
pic_param.pic_flags.bits.frame_type = frame_header->frame_type;
@@ -1295,15 +1308,7 @@ bool VaapiVideoEncodeAccelerator::VP9Accelerator::SubmitFrameParameters(
pic_param.luma_dc_qindex_delta = frame_header->quant_params.delta_q_y_dc;
pic_param.chroma_ac_qindex_delta = frame_header->quant_params.delta_q_uv_ac;
pic_param.chroma_dc_qindex_delta = frame_header->quant_params.delta_q_uv_dc;
-
- // TODO(crbug.com/924786): Unlike the current vp8 implementation,
- // SegmentationParams and LoopFilterParams are the part of Parser structure
- // rather than included them in FrameHeader. So, for now, we are not taking
- // segmentation and loopfilter related parameter from frame_hdr. But since the
- // filter level may affect on quality at lower bitrates, we set a constant
- // value (== 10) which is what other VA-API implementations like libyami and
- // gstreamer-vaapi are using.
- pic_param.filter_level = 10;
+ pic_param.filter_level = frame_header->loop_filter.level;
pic_param.log2_tile_rows = frame_header->tile_rows_log2;
pic_param.log2_tile_columns = frame_header->tile_cols_log2;
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
index 16e5891f898..9aef0e07204 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.h
@@ -37,9 +37,8 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void RequestEncodingParametersChange(
@@ -186,7 +185,7 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
std::vector<VABufferID> available_va_buffer_ids_;
// Callback via which finished VA surfaces are returned to us.
- VASurface::ReleaseCB va_surface_release_cb_;
+ base::RepeatingCallback<void(VASurfaceID)> va_surface_release_cb_;
// Queue of input frames to be encoded.
base::queue<std::unique_ptr<InputFrameRef>> input_queue_;
diff --git a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
index a02b3604730..a3bb9f04f91 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_vp8_accelerator.h
@@ -6,8 +6,8 @@
#define MEDIA_GPU_VAAPI_VAAPI_VP8_ACCELERATOR_H_
#include "base/sequence_checker.h"
-#include "media/filters/vp8_parser.h"
#include "media/gpu/vp8_decoder.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
diff --git a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
index 049322112b7..8ab85af30d6 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
@@ -39,7 +39,7 @@ scoped_refptr<VP9Picture> VaapiVP9Accelerator::CreateVP9Picture() {
}
bool VaapiVP9Accelerator::SubmitDecode(
- const scoped_refptr<VP9Picture>& pic,
+ scoped_refptr<VP9Picture> pic,
const Vp9SegmentationParams& seg,
const Vp9LoopFilterParams& lf,
const Vp9ReferenceFrameVector& ref_frames,
@@ -158,7 +158,7 @@ bool VaapiVP9Accelerator::SubmitDecode(
pic->AsVaapiVP9Picture()->va_surface()->id());
}
-bool VaapiVP9Accelerator::OutputPicture(const scoped_refptr<VP9Picture>& pic) {
+bool VaapiVP9Accelerator::OutputPicture(scoped_refptr<VP9Picture> pic) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
const VaapiVP9Picture* vaapi_pic = pic->AsVaapiVP9Picture();
@@ -172,7 +172,7 @@ bool VaapiVP9Accelerator::IsFrameContextRequired() const {
return false;
}
-bool VaapiVP9Accelerator::GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+bool VaapiVP9Accelerator::GetFrameContext(scoped_refptr<VP9Picture> pic,
Vp9FrameContext* frame_ctx) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
NOTIMPLEMENTED() << "Frame context update not supported";
diff --git a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.h b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.h
index 7fc9969ca05..b7e553cb52a 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.h
@@ -24,15 +24,15 @@ class VaapiVP9Accelerator : public VP9Decoder::VP9Accelerator {
// VP9Decoder::VP9Accelerator implementation.
scoped_refptr<VP9Picture> CreateVP9Picture() override;
- bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+ bool SubmitDecode(scoped_refptr<VP9Picture> pic,
const Vp9SegmentationParams& seg,
const Vp9LoopFilterParams& lf,
const Vp9ReferenceFrameVector& reference_frames,
const base::Closure& done_cb) override;
- bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
+ bool OutputPicture(scoped_refptr<VP9Picture> pic) override;
bool IsFrameContextRequired() const override;
- bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ bool GetFrameContext(scoped_refptr<VP9Picture> pic,
Vp9FrameContext* frame_ctx) override;
private:
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.cc b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
index f6008d28861..f66ede4e80f 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
@@ -31,6 +31,7 @@
#include "build/build_config.h"
#include "media/base/media_switches.h"
+#include "media/base/video_frame.h"
#include "media/base/video_types.h"
// Auto-generated for dlopen libva libraries
@@ -109,23 +110,6 @@ uint32_t BufferFormatToVAFourCC(gfx::BufferFormat fmt) {
}
}
-uint32_t BufferFormatToVARTFormat(gfx::BufferFormat fmt) {
- switch (fmt) {
- case gfx::BufferFormat::UYVY_422:
- return VA_RT_FORMAT_YUV422;
- case gfx::BufferFormat::BGRX_8888:
- case gfx::BufferFormat::BGRA_8888:
- case gfx::BufferFormat::RGBX_8888:
- return VA_RT_FORMAT_RGB32;
- case gfx::BufferFormat::YVU_420:
- case gfx::BufferFormat::YUV_420_BIPLANAR:
- return VA_RT_FORMAT_YUV420;
- default:
- NOTREACHED();
- return 0;
- }
-}
-
} // namespace
namespace media {
@@ -1206,6 +1190,24 @@ VaapiWrapper::GetSupportedImageFormatsForTesting() {
return VASupportedImageFormats::Get().GetSupportedImageFormats();
}
+// static
+uint32_t VaapiWrapper::BufferFormatToVARTFormat(gfx::BufferFormat fmt) {
+ switch (fmt) {
+ case gfx::BufferFormat::UYVY_422:
+ return VA_RT_FORMAT_YUV422;
+ case gfx::BufferFormat::BGRX_8888:
+ case gfx::BufferFormat::BGRA_8888:
+ case gfx::BufferFormat::RGBX_8888:
+ return VA_RT_FORMAT_RGB32;
+ case gfx::BufferFormat::YVU_420:
+ case gfx::BufferFormat::YUV_420_BIPLANAR:
+ return VA_RT_FORMAT_YUV420;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
bool VaapiWrapper::CreateContextAndSurfaces(
unsigned int va_format,
const gfx::Size& size,
@@ -1287,19 +1289,16 @@ void VaapiWrapper::DestroyContextAndSurfaces() {
scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
const scoped_refptr<gfx::NativePixmap>& pixmap) {
- // Create a VASurface for a NativePixmap by importing the underlying dmabufs.
- VASurfaceAttribExternalBuffers va_attrib_extbuf;
- memset(&va_attrib_extbuf, 0, sizeof(va_attrib_extbuf));
+ const gfx::BufferFormat buffer_format = pixmap->GetBufferFormat();
- va_attrib_extbuf.pixel_format =
- BufferFormatToVAFourCC(pixmap->GetBufferFormat());
- gfx::Size size = pixmap->GetBufferSize();
+ // Create a VASurface for a NativePixmap by importing the underlying dmabufs.
+ const gfx::Size size = pixmap->GetBufferSize();
+ VASurfaceAttribExternalBuffers va_attrib_extbuf{};
+ va_attrib_extbuf.pixel_format = BufferFormatToVAFourCC(buffer_format);
va_attrib_extbuf.width = size.width();
va_attrib_extbuf.height = size.height();
- size_t num_planes =
- gfx::NumberOfPlanesForBufferFormat(pixmap->GetBufferFormat());
- std::vector<uintptr_t> fds(num_planes);
+ const size_t num_planes = gfx::NumberOfPlanesForBufferFormat(buffer_format);
for (size_t i = 0; i < num_planes; ++i) {
va_attrib_extbuf.pitches[i] = pixmap->GetDmaBufPitch(i);
va_attrib_extbuf.offsets[i] = pixmap->GetDmaBufOffset(i);
@@ -1318,8 +1317,8 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
va_attrib_extbuf.buffers = &fd;
va_attrib_extbuf.num_buffers = 1u;
- va_attrib_extbuf.flags = 0;
- va_attrib_extbuf.private_data = NULL;
+ DCHECK_EQ(va_attrib_extbuf.flags, 0u);
+ DCHECK_EQ(va_attrib_extbuf.private_data, nullptr);
std::vector<VASurfaceAttrib> va_attribs(2);
@@ -1333,8 +1332,7 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
va_attribs[1].value.type = VAGenericValueTypePointer;
va_attribs[1].value.value.p = &va_attrib_extbuf;
- const unsigned int va_format =
- BufferFormatToVARTFormat(pixmap->GetBufferFormat());
+ const unsigned int va_format = BufferFormatToVARTFormat(buffer_format);
VASurfaceID va_surface_id = VA_INVALID_ID;
{
@@ -1345,11 +1343,8 @@ scoped_refptr<VASurface> VaapiWrapper::CreateVASurfaceForPixmap(
VA_SUCCESS_OR_RETURN(va_res, "Failed to create unowned VASurface", nullptr);
}
- // It's safe to use Unretained() here, because the caller takes care of the
- // destruction order. All the surfaces will be destroyed before VaapiWrapper.
- return new VASurface(
- va_surface_id, size, va_format,
- base::Bind(&VaapiWrapper::DestroySurface, base::Unretained(this)));
+ return new VASurface(va_surface_id, size, va_format,
+ base::BindOnce(&VaapiWrapper::DestroySurface, this));
}
bool VaapiWrapper::SubmitBuffer(VABufferType va_buffer_type,
@@ -1476,12 +1471,11 @@ std::unique_ptr<ScopedVAImage> VaapiWrapper::CreateVaImage(
return scoped_image->IsValid() ? std::move(scoped_image) : nullptr;
}
-bool VaapiWrapper::UploadVideoFrameToSurface(
- const scoped_refptr<VideoFrame>& frame,
- VASurfaceID va_surface_id) {
+bool VaapiWrapper::UploadVideoFrameToSurface(const VideoFrame& frame,
+ VASurfaceID va_surface_id) {
base::AutoLock auto_lock(*va_lock_);
- const gfx::Size size = frame->coded_size();
+ const gfx::Size size = frame.coded_size();
bool va_create_put_fallback = false;
VAImage image;
VAStatus va_res = vaDeriveImage(va_display_, va_surface_id, &image);
@@ -1518,30 +1512,28 @@ bool VaapiWrapper::UploadVideoFrameToSurface(
int ret = 0;
{
base::AutoUnlock auto_unlock(*va_lock_);
- switch (frame->format()) {
+ switch (frame.format()) {
case PIXEL_FORMAT_I420:
- ret = libyuv::I420ToNV12(frame->data(VideoFrame::kYPlane),
- frame->stride(VideoFrame::kYPlane),
- frame->data(VideoFrame::kUPlane),
- frame->stride(VideoFrame::kUPlane),
- frame->data(VideoFrame::kVPlane),
- frame->stride(VideoFrame::kVPlane),
- image_ptr + image.offsets[0], image.pitches[0],
- image_ptr + image.offsets[1], image.pitches[1],
- image.width, image.height);
+ ret = libyuv::I420ToNV12(
+ frame.data(VideoFrame::kYPlane), frame.stride(VideoFrame::kYPlane),
+ frame.data(VideoFrame::kUPlane), frame.stride(VideoFrame::kUPlane),
+ frame.data(VideoFrame::kVPlane), frame.stride(VideoFrame::kVPlane),
+ image_ptr + image.offsets[0], image.pitches[0],
+ image_ptr + image.offsets[1], image.pitches[1], image.width,
+ image.height);
break;
case PIXEL_FORMAT_NV12:
- libyuv::CopyPlane(frame->data(VideoFrame::kYPlane),
- frame->stride(VideoFrame::kYPlane),
+ libyuv::CopyPlane(frame.data(VideoFrame::kYPlane),
+ frame.stride(VideoFrame::kYPlane),
image_ptr + image.offsets[0], image.pitches[0],
image.width, image.height);
- libyuv::CopyPlane(frame->data(VideoFrame::kUVPlane),
- frame->stride(VideoFrame::kUVPlane),
+ libyuv::CopyPlane(frame.data(VideoFrame::kUVPlane),
+ frame.stride(VideoFrame::kUVPlane),
image_ptr + image.offsets[1], image.pitches[1],
image.width, image.height / 2);
break;
default:
- LOG(ERROR) << "Unsupported pixel format: " << frame->format();
+ LOG(ERROR) << "Unsupported pixel format: " << frame.format();
return false;
}
}
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.h b/chromium/media/gpu/vaapi/vaapi_wrapper.h
index d9e29195d8d..c2d0cda1111 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.h
@@ -20,15 +20,13 @@
#include <va/va.h>
#include "base/files/file.h"
+#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
-#include "media/base/video_decoder_config.h"
-#include "media/base/video_frame.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/vaapi/va_surface.h"
-#include "media/video/mjpeg_decode_accelerator.h"
#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/geometry/size.h"
@@ -44,6 +42,7 @@ class NativePixmap;
namespace media {
class ScopedVAImage;
+class VideoFrame;
// This class handles VA-API calls and ensures proper locking of VA-API calls
// to libva, the userspace shim to the HW codec driver. libva is not
@@ -141,6 +140,8 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// Returns the list of VAImageFormats supported by the driver.
static const std::vector<VAImageFormat>& GetSupportedImageFormatsForTesting();
+ static uint32_t BufferFormatToVARTFormat(gfx::BufferFormat fmt);
+
// Creates |num_surfaces| backing surfaces in driver for VASurfaces of
// |va_format|, each of size |size| and initializes |va_context_id_| with
// |format| and |size|. Returns true when successful, with the created IDs in
@@ -221,7 +222,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
const gfx::Size& size);
// Upload contents of |frame| into |va_surface_id| for encode.
- bool UploadVideoFrameToSurface(const scoped_refptr<VideoFrame>& frame,
+ bool UploadVideoFrameToSurface(const VideoFrame& frame,
VASurfaceID va_surface_id);
// Create a buffer of |size| bytes to be used as encode output.
@@ -276,7 +277,10 @@ class MEDIA_GPU_EXPORT VaapiWrapper
private:
friend class base::RefCountedThreadSafe<VaapiWrapper>;
- friend class VaapiJpegDecoderTest;
+
+ FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, ScopedVAImage);
+ FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, BadScopedVAImage);
+ FRIEND_TEST_ALL_PREFIXES(VaapiUtilsTest, BadScopedVABufferMapping);
bool Initialize(CodecMode mode, VAProfile va_profile);
void Deinitialize();
diff --git a/chromium/media/gpu/vaapi/vp8_encoder.h b/chromium/media/gpu/vaapi/vp8_encoder.h
index 5fb94b3f258..9395461c57a 100644
--- a/chromium/media/gpu/vaapi/vp8_encoder.h
+++ b/chromium/media/gpu/vaapi/vp8_encoder.h
@@ -11,10 +11,10 @@
#include "base/macros.h"
#include "base/sequence_checker.h"
#include "media/base/video_bitrate_allocation.h"
-#include "media/filters/vp8_parser.h"
#include "media/gpu/vaapi/accelerated_video_encoder.h"
#include "media/gpu/vp8_picture.h"
#include "media/gpu/vp8_reference_frame_vector.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
diff --git a/chromium/media/gpu/vaapi/vp9_encoder.cc b/chromium/media/gpu/vaapi/vp9_encoder.cc
index 41d53675d15..36d013d0153 100644
--- a/chromium/media/gpu/vaapi/vp9_encoder.cc
+++ b/chromium/media/gpu/vaapi/vp9_encoder.cc
@@ -21,6 +21,11 @@ constexpr int kCPBWindowSizeMs = 500;
constexpr int kMinQP = 4;
constexpr int kMaxQP = 112;
constexpr int kDefaultQP = (3 * kMinQP + kMaxQP) / 4;
+
+// filter level may affect on quality at lower bitrates; for now,
+// we set a constant value (== 10) which is what other VA-API
+// implementations like libyami and gstreamer-vaapi are using.
+constexpr uint8_t kDefaultLfLevel = 10;
} // namespace
VP9Encoder::EncodeParams::EncodeParams()
@@ -112,8 +117,12 @@ bool VP9Encoder::PrepareEncodeJob(EncodeJob* encode_job) {
*picture->frame_hdr = current_frame_hdr_;
+ // Use last, golden and altref for references.
+ constexpr std::array<bool, kVp9NumRefsPerFrame> ref_frames_used = {true, true,
+ true};
if (!accelerator_->SubmitFrameParameters(encode_job, current_params_, picture,
- reference_frames_)) {
+ reference_frames_,
+ ref_frames_used)) {
LOG(ERROR) << "Failed submitting frame parameters";
return false;
}
@@ -157,7 +166,7 @@ void VP9Encoder::InitializeFrameHeader() {
DCHECK_EQ(current_params_.initial_qp, kDefaultQP);
constexpr uint8_t kDefaultQPACQIndex = 24;
current_frame_hdr_.quant_params.base_q_idx = kDefaultQPACQIndex;
-
+ current_frame_hdr_.loop_filter.level = kDefaultLfLevel;
current_frame_hdr_.show_frame = true;
}
diff --git a/chromium/media/gpu/vaapi/vp9_encoder.h b/chromium/media/gpu/vaapi/vp9_encoder.h
index 60e2cf2af21..2ba6094a873 100644
--- a/chromium/media/gpu/vaapi/vp9_encoder.h
+++ b/chromium/media/gpu/vaapi/vp9_encoder.h
@@ -59,12 +59,16 @@ class VP9Encoder : public AcceleratedVideoEncoder {
// Initializes |job| to use the provided |encode_params| as its parameters,
// and |pic| as the target, as well as |ref_frames| as reference frames for
- // it. Returns true on success.
+ // it. |ref_frames_used| is to specify whether each of |ref_frame_idx| of
+ // VP9FrameHeader in |pic| is used. If |ref_frames_used[i]| is true,
+ // ref_frame_idx[i] will be used as a reference frame. Returns true on
+ // success.
virtual bool SubmitFrameParameters(
EncodeJob* job,
const VP9Encoder::EncodeParams& encode_params,
scoped_refptr<VP9Picture> pic,
- const Vp9ReferenceFrameVector& ref_frames) = 0;
+ const Vp9ReferenceFrameVector& ref_frames,
+ const std::array<bool, kVp9NumRefsPerFrame>& ref_frames_used) = 0;
DISALLOW_COPY_AND_ASSIGN(Accelerator);
};
diff --git a/chromium/media/gpu/video_decode_accelerator_perf_tests.cc b/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
index 27f35597eb2..54aec2c388d 100644
--- a/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
+++ b/chromium/media/gpu/video_decode_accelerator_perf_tests.cc
@@ -56,9 +56,12 @@ struct PerformanceMetrics {
// Total measurement duration.
base::TimeDelta total_duration_;
// The number of frames decoded.
- size_t frame_decoded_count_ = 0;
+ size_t frames_decoded_ = 0;
// The overall number of frames decoded per second.
double frames_per_second_ = 0.0;
+ // The number of frames dropped because of the decoder running behind, only
+ // relevant for capped performance tests.
+ size_t frames_dropped_ = 0;
// The average time between subsequent frame deliveries.
double avg_frame_delivery_time_ms_ = 0.0;
// The median time between decode start and frame delivery.
@@ -71,6 +74,11 @@ struct PerformanceMetrics {
// enabled as this affects test results.
class PerformanceEvaluator : public VideoFrameProcessor {
public:
+ // Create a new performance evaluator. The caller should makes sure
+ // |frame_renderer| outlives the performance evaluator.
+ explicit PerformanceEvaluator(const FrameRendererDummy* const frame_renderer)
+ : frame_renderer_(frame_renderer) {}
+
// Interface VideoFrameProcessor
void ProcessVideoFrame(scoped_refptr<const VideoFrame> video_frame,
size_t frame_index) override;
@@ -97,6 +105,10 @@ class PerformanceEvaluator : public VideoFrameProcessor {
// Collection of various performance metrics.
PerformanceMetrics perf_metrics_;
+
+ // Frame renderer used to get the dropped frame rate, owned by the creator of
+ // the performance evaluator.
+ const FrameRendererDummy* const frame_renderer_;
};
void PerformanceEvaluator::ProcessVideoFrame(
@@ -111,7 +123,7 @@ void PerformanceEvaluator::ProcessVideoFrame(
base::TimeDelta decode_time = now.since_origin() - video_frame->timestamp();
frame_decode_times_.push_back(decode_time.InMillisecondsF());
- perf_metrics_.frame_decoded_count_++;
+ perf_metrics_.frames_decoded_++;
}
void PerformanceEvaluator::StartMeasuring() {
@@ -122,12 +134,13 @@ void PerformanceEvaluator::StartMeasuring() {
void PerformanceEvaluator::StopMeasuring() {
end_time_ = base::TimeTicks::Now();
perf_metrics_.total_duration_ = end_time_ - start_time_;
- perf_metrics_.frames_per_second_ = perf_metrics_.frame_decoded_count_ /
+ perf_metrics_.frames_per_second_ = perf_metrics_.frames_decoded_ /
perf_metrics_.total_duration_.InSecondsF();
+ perf_metrics_.frames_dropped_ = frame_renderer_->FramesDropped();
perf_metrics_.avg_frame_delivery_time_ms_ =
perf_metrics_.total_duration_.InMillisecondsF() /
- perf_metrics_.frame_decoded_count_;
+ perf_metrics_.frames_decoded_;
std::sort(frame_decode_times_.begin(), frame_decode_times_.end());
size_t median_index = frame_decode_times_.size() / 2;
@@ -138,10 +151,11 @@ void PerformanceEvaluator::StopMeasuring() {
frame_decode_times_[median_index]) /
2.0;
- VLOG(0) << "Number of frames decoded: " << perf_metrics_.frame_decoded_count_;
- VLOG(0) << "Total duration: "
+ VLOG(0) << "Frames decoded: " << perf_metrics_.frames_decoded_;
+ VLOG(0) << "Total duration: "
<< perf_metrics_.total_duration_.InMillisecondsF() << "ms";
- VLOG(0) << "FPS: " << perf_metrics_.frames_per_second_;
+ VLOG(0) << "FPS: " << perf_metrics_.frames_per_second_;
+ VLOG(0) << "Frames Dropped: " << perf_metrics_.frames_dropped_;
VLOG(0) << "Avg. frame delivery time: "
<< perf_metrics_.avg_frame_delivery_time_ms_ << "ms";
VLOG(0) << "Median frame decode time: "
@@ -150,11 +164,12 @@ void PerformanceEvaluator::StopMeasuring() {
void PerformanceEvaluator::WriteMetricsToFile() const {
std::string str = base::StringPrintf(
- "Number of frames decoded: %zu\nTotal duration: %fms\nFPS: %f\nAvg. "
- "frame delivery time: %fms\nMedian frame decode time: %fms\n",
- perf_metrics_.frame_decoded_count_,
+ "Frames decoded: %zu\nTotal duration: %fms\nFPS: %f\n"
+ "Frames dropped: %zu\nAvg. frame delivery time: %fms\n"
+ "Median frame decode time: %fms\n",
+ perf_metrics_.frames_decoded_,
perf_metrics_.total_duration_.InMillisecondsF(),
- perf_metrics_.frames_per_second_,
+ perf_metrics_.frames_per_second_, perf_metrics_.frames_dropped_,
perf_metrics_.avg_frame_delivery_time_ms_,
perf_metrics_.median_frame_decode_time_ms_);
@@ -189,10 +204,28 @@ void PerformanceEvaluator::WriteMetricsToFile() const {
// Video decode test class. Performs setup and teardown for each single test.
class VideoDecoderTest : public ::testing::Test {
public:
- std::unique_ptr<VideoPlayer> CreateVideoPlayer(const Video* video) {
+ // Create a new video player instance. |render_frame_rate| is the rate at
+ // which the video player will simulate rendering frames, if 0 no rendering is
+ // simulated. The |vsync_rate| is used during simulated rendering, if 0 Vsync
+ // is disabled.
+ std::unique_ptr<VideoPlayer> CreateVideoPlayer(const Video* video,
+ uint32_t render_frame_rate = 0,
+ uint32_t vsync_rate = 0) {
LOG_ASSERT(video);
+
+ // Create dummy frame renderer, simulates rendering at specified frame rate.
+ base::TimeDelta frame_duration;
+ base::TimeDelta vsync_interval_duration;
+ if (render_frame_rate > 0) {
+ frame_duration = base::TimeDelta::FromSeconds(1) / render_frame_rate;
+ vsync_interval_duration = base::TimeDelta::FromSeconds(1) / vsync_rate;
+ }
+ auto frame_renderer =
+ FrameRendererDummy::Create(frame_duration, vsync_interval_duration);
+
std::vector<std::unique_ptr<VideoFrameProcessor>> frame_processors;
- auto performance_evaluator = std::make_unique<PerformanceEvaluator>();
+ auto performance_evaluator =
+ std::make_unique<PerformanceEvaluator>(frame_renderer.get());
performance_evaluator_ = performance_evaluator.get();
frame_processors.push_back(std::move(performance_evaluator));
@@ -200,7 +233,7 @@ class VideoDecoderTest : public ::testing::Test {
VideoDecoderClientConfig config;
config.use_vd = g_env->UseVD();
- return VideoPlayer::Create(video, FrameRendererDummy::Create(),
+ return VideoPlayer::Create(video, std::move(frame_renderer),
std::move(frame_processors), config);
}
@@ -209,9 +242,9 @@ class VideoDecoderTest : public ::testing::Test {
} // namespace
-// Play video from start to end while measuring performance.
-// TODO(dstaessens@) Add a test to measure capped decode performance, measuring
-// the number of frames dropped.
+// Play video from start to end while measuring uncapped performance. This test
+// will decode a video as fast as possible, and gives an idea about the maximum
+// output of the decoder.
TEST_F(VideoDecoderTest, MeasureUncappedPerformance) {
auto tvp = CreateVideoPlayer(g_env->Video());
@@ -225,6 +258,23 @@ TEST_F(VideoDecoderTest, MeasureUncappedPerformance) {
EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->Video()->NumFrames());
}
+// Play video from start to end while measuring capped performance. This test
+// will simulate rendering the video at its actual frame rate, and will
+// calculate the number of frames that were dropped. Vsync is enabled at 60 FPS.
+TEST_F(VideoDecoderTest, MeasureCappedPerformance) {
+ auto tvp = CreateVideoPlayer(g_env->Video(), g_env->Video()->FrameRate(), 60);
+
+ performance_evaluator_->StartMeasuring();
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+ tvp->WaitForRenderer();
+ performance_evaluator_->StopMeasuring();
+ performance_evaluator_->WriteMetricsToFile();
+
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->Video()->NumFrames());
+}
+
} // namespace test
} // namespace media
diff --git a/chromium/media/gpu/video_decode_accelerator_unittest.cc b/chromium/media/gpu/video_decode_accelerator_unittest.cc
index 56b88d3b70e..f9b23614085 100644
--- a/chromium/media/gpu/video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_decode_accelerator_unittest.cc
@@ -868,17 +868,13 @@ void GLRenderingVDAClient::DecodeNextFragment() {
base::SharedMemory shm;
LOG_ASSERT(shm.CreateAndMapAnonymous(next_fragment_size));
memcpy(shm.memory(), next_fragment_bytes.data(), next_fragment_size);
- base::SharedMemoryHandle dup_handle = shm.handle().Duplicate();
- LOG_ASSERT(dup_handle.IsValid());
- // TODO(erikchen): This may leak the SharedMemoryHandle.
- // https://crbug.com/640840.
- BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_, dup_handle,
- next_fragment_size);
+ BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_, shm.handle(),
+ false /* read_only */, next_fragment_size);
decode_start_time_[next_bitstream_buffer_id_] = base::TimeTicks::Now();
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
- decoder_->Decode(bitstream_buffer);
+ decoder_->Decode(std::move(bitstream_buffer));
++outstanding_decodes_;
if (IsLastPlayThrough() &&
-config_.delete_decoder_state == next_bitstream_buffer_id_) {
@@ -1756,7 +1752,8 @@ int main(int argc, char** argv) {
// Needed to enable DVLOG through --vmodule.
logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ settings.logging_dest =
+ logging::LOG_TO_SYSTEM_DEBUG_LOG | logging::LOG_TO_STDERR;
LOG_ASSERT(logging::InitLogging(settings));
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
diff --git a/chromium/media/gpu/video_encode_accelerator_unittest.cc b/chromium/media/gpu/video_encode_accelerator_unittest.cc
index 72984ea8622..bc1c9c385e2 100644
--- a/chromium/media/gpu/video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_encode_accelerator_unittest.cc
@@ -44,27 +44,32 @@
#include "media/base/bitstream_buffer.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/media.h"
#include "media/base/media_switches.h"
#include "media/base/media_util.h"
#include "media/base/test_data_util.h"
#include "media/base/video_decoder.h"
#include "media/base/video_frame.h"
+#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_video_decoder.h"
+#include "media/filters/in_memory_url_protocol.h"
#include "media/filters/ivf_parser.h"
-#include "media/filters/vp8_parser.h"
#include "media/filters/vp9_parser.h"
+#include "media/filters/vpx_video_decoder.h"
#include "media/gpu/buildflags.h"
#include "media/gpu/gpu_video_encode_accelerator_factory.h"
#include "media/gpu/h264_decoder.h"
#include "media/gpu/h264_dpb.h"
#include "media/gpu/test/video_accelerator_unittest_helpers.h"
#include "media/gpu/test/video_frame_helpers.h"
+#include "media/parsers/vp8_parser.h"
#include "media/video/fake_video_encode_accelerator.h"
#include "media/video/h264_level_limits.h"
#include "media/video/h264_parser.h"
#include "media/video/video_encode_accelerator.h"
#include "mojo/core/embedder/embedder.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/libyuv/include/libyuv/planar_functions.h"
#if BUILDFLAG(USE_VAAPI)
#include "media/gpu/vaapi/vaapi_wrapper.h"
@@ -159,25 +164,25 @@ const unsigned int kFlushTimeoutMs = 2000;
// h264_parser.h. Use kDefaultH264Level if not provided.
#if defined(OS_CHROMEOS) || defined(OS_LINUX)
-const char* g_default_in_filename = "bear_320x192_40frames.yuv";
-const base::FilePath::CharType* g_default_in_parameters =
+const char kDefaultInputFileName[] = "bear_320x192_40frames.yuv.webm";
+const base::FilePath::CharType kDefaultInputParameters[] =
FILE_PATH_LITERAL(":320:192:1:out.h264:200000");
#elif defined(OS_MACOSX)
// VideoToolbox falls back to SW encoder with resolutions lower than this.
-const char* g_default_in_filename = "bear_640x384_40frames.yuv";
-const base::FilePath::CharType* g_default_in_parameters =
+const char kDefaultInputFileName[] = "bear_640x384_40frames.yuv.webm";
+const base::FilePath::CharType kDefaultInputParameters[] =
FILE_PATH_LITERAL(":640:384:1:out.h264:200000");
#elif defined(OS_WIN)
-const char* g_default_in_filename = "bear_320x192_40frames.yuv";
-const base::FilePath::CharType* g_default_in_parameters =
+const char kDefaultInputFileName[] = "bear_320x192_40frames.yuv.webm";
+const base::FilePath::CharType kDefaultInputParameters[] =
FILE_PATH_LITERAL(",320,192,0,out.h264,200000");
#endif // defined(OS_CHROMEOS) || defined(OS_LINUX)
// Default params that can be overriden via command line.
std::unique_ptr<base::FilePath::StringType> g_test_stream_data(
new base::FilePath::StringType(
- media::GetTestDataFilePath(media::g_default_in_filename).value() +
- media::g_default_in_parameters));
+ media::GetTestDataFilePath(media::kDefaultInputFileName).value() +
+ media::kDefaultInputParameters));
base::FilePath g_log_path;
@@ -341,6 +346,105 @@ static std::string FilePathStringTypeToString(
#endif // defined(OS_WIN)
}
+// Decodes webm vp9 |src_file| into |test_stream_->aligned_in_file_data|. Used
+// to save storage size in media/test/data since raw YUV files are huge.
+static bool DecodeFile(const base::FilePath& src_file,
+ TestStream* test_stream) {
+ InitializeMediaLibrary();
+
+ const int file_size = base::checked_cast<int>([src_file]() {
+ int64_t tmp = 0;
+ CHECK(base::GetFileSize(src_file, &tmp))
+ << "Failed to get file size for '" << src_file << "'";
+ return tmp;
+ }());
+
+ // Read file data into memory.
+ auto buffer = base::MakeRefCounted<DecoderBuffer>(file_size);
+ auto* data = reinterpret_cast<char*>(buffer->writable_data());
+ CHECK_EQ(file_size, base::ReadFile(src_file, data, file_size))
+ << "Failed to read '" << src_file << "'";
+
+ // Initialize ffmpeg with the file data.
+ InMemoryUrlProtocol protocol(buffer->data(), buffer->data_size(), false);
+ FFmpegGlue glue(&protocol);
+ CHECK(glue.OpenContext());
+
+ // Find first vp9 stream in the file.
+ int stream_index = -1;
+ VideoDecoderConfig config;
+ for (size_t i = 0; i < glue.format_context()->nb_streams; ++i) {
+ AVStream* stream = glue.format_context()->streams[i];
+ const AVCodecParameters* codec_parameters = stream->codecpar;
+ const AVMediaType codec_type = codec_parameters->codec_type;
+ const AVCodecID codec_id = codec_parameters->codec_id;
+ if (codec_type == AVMEDIA_TYPE_VIDEO && codec_id == AV_CODEC_ID_VP9) {
+ CHECK(AVStreamToVideoDecoderConfig(stream, &config));
+ stream_index = i;
+ break;
+ }
+ }
+
+ CHECK(config.IsValidConfig());
+
+ test_stream->num_frames = 0;
+ test_stream->aligned_in_file_data.clear();
+
+ // Writes VideoFrames into the |test_stream_->aligned_in_file_data| structure.
+ class FrameWriter {
+ public:
+ explicit FrameWriter(TestStream* test_stream) : test_stream_(test_stream) {}
+ ~FrameWriter() = default;
+
+ void FrameReady(scoped_refptr<VideoFrame> frame) {
+ const size_t previous_end = test_stream_->aligned_in_file_data.size();
+
+ ++test_stream_->num_frames;
+ test_stream_->aligned_in_file_data.resize(
+ test_stream_->num_frames * test_stream_->aligned_buffer_size);
+ uint8_t* dest = reinterpret_cast<uint8_t*>(
+ &test_stream_->aligned_in_file_data[previous_end]);
+
+ for (size_t plane = 0;
+ plane < VideoFrame::NumPlanes(test_stream_->pixel_format); plane++) {
+ libyuv::CopyPlane(
+ frame->data(plane), frame->stride(plane), dest,
+ VideoFrame::RowBytes(plane, test_stream_->pixel_format,
+ test_stream_->coded_size.width()),
+ VideoFrame::RowBytes(plane, test_stream_->pixel_format,
+ test_stream_->visible_size.width()),
+ VideoFrame::Rows(plane, test_stream_->pixel_format,
+ test_stream_->visible_size.height()));
+ dest += test_stream_->aligned_plane_size[plane];
+ }
+ }
+
+ private:
+ TestStream* const test_stream_;
+ DISALLOW_COPY_AND_ASSIGN(FrameWriter);
+ } frame_writer(test_stream);
+
+ // Setup decoder.
+ VpxVideoDecoder decoder;
+ decoder.Initialize(config, false, nullptr, base::DoNothing(),
+ base::BindRepeating(&FrameWriter::FrameReady,
+ base::Unretained(&frame_writer)),
+ base::NullCallback());
+
+ // Decode frames. No need to flush since VpxVideoDecoder is 1 in 1 out.
+ AVPacket packet = {};
+ while (av_read_frame(glue.format_context(), &packet) >= 0) {
+ if (packet.stream_index == stream_index) {
+ decoder.Decode(DecoderBuffer::CopyFrom(packet.data, packet.size),
+ base::DoNothing());
+ base::RunLoop().RunUntilIdle();
+ }
+ av_packet_unref(&packet);
+ }
+
+ return true;
+}
+
// Some platforms may have requirements on physical memory buffer alignment.
// Since we are just mapping and passing chunks of the input file directly to
// the VEA as input frames, to avoid copying large chunks of raw data on each
@@ -394,6 +498,13 @@ static void CreateAlignedInputStreamFile(const gfx::Size& coded_size,
}
base::FilePath src_file(StringToFilePathStringType(test_stream->in_filename));
+
+ // File is encoded and must be decoded first.
+ if (src_file.MatchesExtension(FILE_PATH_LITERAL(".webm"))) {
+ ASSERT_TRUE(DecodeFile(src_file, test_stream));
+ return;
+ }
+
int64_t src_file_size = 0;
LOG_ASSERT(base::GetFileSize(src_file, &src_file_size));
@@ -939,7 +1050,7 @@ class VideoFrameQualityValidator
void InitializeCB(bool success);
void DecodeDone(DecodeStatus status);
void FlushDone(DecodeStatus status);
- void VerifyOutputFrame(const scoped_refptr<VideoFrame>& output_frame);
+ void VerifyOutputFrame(scoped_refptr<VideoFrame> output_frame);
void Decode();
void WriteFrameStats();
@@ -958,9 +1069,6 @@ class VideoFrameQualityValidator
const VideoPixelFormat pixel_format_;
const bool verify_quality_;
std::unique_ptr<FFmpegVideoDecoder> decoder_;
- VideoDecoder::DecodeCB decode_cb_;
- // Decode callback of an EOS buffer.
- VideoDecoder::DecodeCB eos_decode_cb_;
// Callback of Flush(). Called after all frames are decoded.
const base::Closure flush_complete_cb_;
const base::Closure decode_error_cb_;
@@ -981,10 +1089,6 @@ VideoFrameQualityValidator::VideoFrameQualityValidator(
pixel_format_(pixel_format),
verify_quality_(verify_quality),
decoder_(new FFmpegVideoDecoder(&media_log_)),
- decode_cb_(base::BindRepeating(&VideoFrameQualityValidator::DecodeDone,
- AsWeakPtr())),
- eos_decode_cb_(base::BindRepeating(&VideoFrameQualityValidator::FlushDone,
- AsWeakPtr())),
flush_complete_cb_(flush_complete_cb),
decode_error_cb_(decode_error_cb),
decoder_state_(UNINITIALIZED) {
@@ -1003,17 +1107,17 @@ void VideoFrameQualityValidator::Initialize(const gfx::Size& coded_size,
VideoDecoderConfig config;
if (IsVP8(profile_)) {
config.Initialize(kCodecVP8, VP8PROFILE_ANY, pixel_format_,
- VideoColorSpace(), VIDEO_ROTATION_0, coded_size,
+ VideoColorSpace(), kNoTransformation, coded_size,
visible_size, natural_size, EmptyExtraData(),
Unencrypted());
} else if (IsVP9(profile_)) {
config.Initialize(kCodecVP9, VP9PROFILE_PROFILE0, pixel_format_,
- VideoColorSpace(), VIDEO_ROTATION_0, coded_size,
+ VideoColorSpace(), kNoTransformation, coded_size,
visible_size, natural_size, EmptyExtraData(),
Unencrypted());
} else if (IsH264(profile_)) {
config.Initialize(kCodecH264, H264PROFILE_MAIN, pixel_format_,
- VideoColorSpace(), VIDEO_ROTATION_0, coded_size,
+ VideoColorSpace(), kNoTransformation, coded_size,
visible_size, natural_size, EmptyExtraData(),
Unencrypted());
} else {
@@ -1126,10 +1230,15 @@ void VideoFrameQualityValidator::Decode() {
scoped_refptr<DecoderBuffer> next_buffer = decode_buffers_.front();
decode_buffers_.pop();
decoder_state_ = DECODING;
- if (next_buffer->end_of_stream())
- decoder_->Decode(next_buffer, eos_decode_cb_);
- else
- decoder_->Decode(next_buffer, decode_cb_);
+ if (next_buffer->end_of_stream()) {
+ decoder_->Decode(
+ next_buffer,
+ base::BindOnce(&VideoFrameQualityValidator::FlushDone, AsWeakPtr()));
+ } else {
+ decoder_->Decode(
+ next_buffer,
+ base::BindOnce(&VideoFrameQualityValidator::DecodeDone, AsWeakPtr()));
+ }
}
}
@@ -1285,7 +1394,7 @@ VideoFrameQualityValidator::CompareFrames(const VideoFrame& original_frame,
}
void VideoFrameQualityValidator::VerifyOutputFrame(
- const scoped_refptr<VideoFrame>& output_frame) {
+ scoped_refptr<VideoFrame> output_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
scoped_refptr<VideoFrame> original_frame = original_frames_.front();
@@ -2061,18 +2170,13 @@ void VEAClient::FeedEncoderWithOutput(base::SharedMemory* shm) {
if (state_ != CS_ENCODING && state_ != CS_FLUSHING)
return;
- base::SharedMemoryHandle dup_handle = shm->handle().Duplicate();
- LOG_ASSERT(dup_handle.IsValid());
-
- // TODO(erikchen): This may leak the SharedMemoryHandle.
- // https://crbug.com/640840.
- BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, dup_handle,
- output_buffer_size_);
+ BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, shm->handle(),
+ false /* read_only */, output_buffer_size_);
LOG_ASSERT(output_buffers_at_client_
.insert(std::make_pair(bitstream_buffer.id(), shm))
.second);
- encoder_->UseOutputBitstreamBuffer(bitstream_buffer);
+ encoder_->UseOutputBitstreamBuffer(std::move(bitstream_buffer));
}
bool VEAClient::HandleEncodedFrame(bool keyframe,
@@ -2390,8 +2494,8 @@ void SimpleVEAClientBase::FeedEncoderWithOutput(base::SharedMemory* shm,
LOG_ASSERT(dup_handle.IsValid());
BitstreamBuffer bitstream_buffer(next_output_buffer_id_++, dup_handle,
- output_size);
- encoder_->UseOutputBitstreamBuffer(bitstream_buffer);
+ false /* read_only */, output_size);
+ encoder_->UseOutputBitstreamBuffer(std::move(bitstream_buffer));
}
// This client is only used to make sure the encoder does not return an encoded
@@ -2983,7 +3087,8 @@ int main(int argc, char** argv) {
// Needed to enable DVLOG through --vmodule.
logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ settings.logging_dest =
+ logging::LOG_TO_SYSTEM_DEBUG_LOG | logging::LOG_TO_STDERR;
LOG_ASSERT(logging::InitLogging(settings));
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
diff --git a/chromium/media/gpu/video_frame_converter.cc b/chromium/media/gpu/video_frame_converter.cc
new file mode 100644
index 00000000000..8d46b05d759
--- /dev/null
+++ b/chromium/media/gpu/video_frame_converter.cc
@@ -0,0 +1,25 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/video_frame_converter.h"
+
+namespace media {
+
+VideoFrameConverter::VideoFrameConverter() = default;
+
+VideoFrameConverter::~VideoFrameConverter() = default;
+
+void VideoFrameConverter::set_parent_task_runner(
+ scoped_refptr<base::SequencedTaskRunner> task_runner) {
+ parent_task_runner_ = std::move(task_runner);
+}
+
+scoped_refptr<VideoFrame> VideoFrameConverter::ConvertFrame(
+ scoped_refptr<VideoFrame> frame) {
+ DCHECK(parent_task_runner_->RunsTasksInCurrentSequence());
+
+ return frame;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/video_frame_converter.h b/chromium/media/gpu/video_frame_converter.h
new file mode 100644
index 00000000000..4139aa0f7c4
--- /dev/null
+++ b/chromium/media/gpu/video_frame_converter.h
@@ -0,0 +1,44 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_VIDEO_FRAME_CONVERTER_H_
+#define MEDIA_GPU_VIDEO_FRAME_CONVERTER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/sequenced_task_runner.h"
+#include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace media {
+
+// Video decoders make use of a video frame pool to allocate output frames,
+// which are sent to the client after decoding. However, the storage type of the
+// allocated frame can be different from what the client expects. This class
+// can be used to convert the type of output video frame in this case.
+class MEDIA_GPU_EXPORT VideoFrameConverter {
+ public:
+ VideoFrameConverter();
+ virtual ~VideoFrameConverter();
+
+ // Setter method of |parent_task_runner_|. This method should be called before
+ // any ConvertFrame() is called.
+ void set_parent_task_runner(
+ scoped_refptr<base::SequencedTaskRunner> parent_task_runner);
+
+ // Convert the frame. The default implementation returns the passed frame
+ // as-is.
+ virtual scoped_refptr<VideoFrame> ConvertFrame(
+ scoped_refptr<VideoFrame> frame);
+
+ protected:
+ // The working task runner. ConvertFrame() should be called on this.
+ scoped_refptr<base::SequencedTaskRunner> parent_task_runner_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoFrameConverter);
+};
+
+} // namespace media
+#endif // MEDIA_GPU_VIDEO_FRAME_CONVERTER_H_
diff --git a/chromium/media/gpu/video_frame_mapper.h b/chromium/media/gpu/video_frame_mapper.h
index 9ba8c051b0c..9dcffc730e6 100644
--- a/chromium/media/gpu/video_frame_mapper.h
+++ b/chromium/media/gpu/video_frame_mapper.h
@@ -8,6 +8,7 @@
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "media/base/video_frame.h"
+#include "media/base/video_types.h"
#include "media/gpu/media_gpu_export.h"
namespace media {
@@ -24,8 +25,15 @@ class MEDIA_GPU_EXPORT VideoFrameMapper {
virtual scoped_refptr<VideoFrame> Map(
scoped_refptr<const VideoFrame> video_frame) const = 0;
+ // Returns the allowed pixel format of video frames on Map().
+ VideoPixelFormat pixel_format() const { return format_; }
+
protected:
- VideoFrameMapper() = default;
+ explicit VideoFrameMapper(VideoPixelFormat format) : format_(format) {}
+
+ // The allowed pixel format of video frames on Map().
+ VideoPixelFormat format_;
+
DISALLOW_COPY_AND_ASSIGN(VideoFrameMapper);
};
diff --git a/chromium/media/gpu/video_frame_mapper_factory.cc b/chromium/media/gpu/video_frame_mapper_factory.cc
index a3f3ad5ea69..41f452b0f6b 100644
--- a/chromium/media/gpu/video_frame_mapper_factory.cc
+++ b/chromium/media/gpu/video_frame_mapper_factory.cc
@@ -18,24 +18,26 @@
namespace media {
// static
-std::unique_ptr<VideoFrameMapper> VideoFrameMapperFactory::CreateMapper() {
+std::unique_ptr<VideoFrameMapper> VideoFrameMapperFactory::CreateMapper(
+ VideoPixelFormat format) {
#if BUILDFLAG(USE_VAAPI) && defined(OS_LINUX)
- return CreateMapper(false);
+ return CreateMapper(format, false);
#else
- return CreateMapper(true);
+ return CreateMapper(format, true);
#endif // BUILDFLAG(USE_VAAPI) && defined(OS_LINUX)
}
// static
std::unique_ptr<VideoFrameMapper> VideoFrameMapperFactory::CreateMapper(
+ VideoPixelFormat format,
bool linear_buffer_mapper) {
#if defined(OS_LINUX)
if (linear_buffer_mapper)
- return std::make_unique<GenericDmaBufVideoFrameMapper>();
+ return GenericDmaBufVideoFrameMapper::Create(format);
#endif // defined(OS_LINUX)
#if BUILDFLAG(USE_VAAPI) && defined(OS_LINUX)
- return VaapiDmaBufVideoFrameMapper::Create();
+ return VaapiDmaBufVideoFrameMapper::Create(format);
#endif // BUILDFLAG(USE_VAAPI) && defined(OS_LINUX)
return nullptr;
diff --git a/chromium/media/gpu/video_frame_mapper_factory.h b/chromium/media/gpu/video_frame_mapper_factory.h
index d237d035486..6f4ca117143 100644
--- a/chromium/media/gpu/video_frame_mapper_factory.h
+++ b/chromium/media/gpu/video_frame_mapper_factory.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "media/base/video_types.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/video_frame_mapper.h"
@@ -17,11 +18,13 @@ namespace media {
class MEDIA_GPU_EXPORT VideoFrameMapperFactory {
public:
// Create an instance of the frame mapper.
- static std::unique_ptr<VideoFrameMapper> CreateMapper();
+ static std::unique_ptr<VideoFrameMapper> CreateMapper(
+ VideoPixelFormat format);
// |linear_buffer_mapper| stands for a created mapper type. If true, the
// mapper will expect frames passed to it to be in linear format.
static std::unique_ptr<VideoFrameMapper> CreateMapper(
+ VideoPixelFormat format,
bool force_linear_buffer_mapper);
};
diff --git a/chromium/media/gpu/vp8_decoder.cc b/chromium/media/gpu/vp8_decoder.cc
index 94570a6fac1..840b5faebf0 100644
--- a/chromium/media/gpu/vp8_decoder.cc
+++ b/chromium/media/gpu/vp8_decoder.cc
@@ -168,6 +168,10 @@ gfx::Size VP8Decoder::GetPicSize() const {
return pic_size_;
}
+gfx::Rect VP8Decoder::GetVisibleRect() const {
+ return gfx::Rect(pic_size_);
+}
+
size_t VP8Decoder::GetRequiredNumOfPictures() const {
constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
return kVP8NumFramesActive + kPicsInPipeline;
diff --git a/chromium/media/gpu/vp8_decoder.h b/chromium/media/gpu/vp8_decoder.h
index 8ed398b89f6..ecc773106d1 100644
--- a/chromium/media/gpu/vp8_decoder.h
+++ b/chromium/media/gpu/vp8_decoder.h
@@ -12,10 +12,10 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "media/filters/vp8_parser.h"
#include "media/gpu/accelerated_video_decoder.h"
#include "media/gpu/vp8_picture.h"
#include "media/gpu/vp8_reference_frame_vector.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
@@ -71,6 +71,7 @@ class MEDIA_GPU_EXPORT VP8Decoder : public AcceleratedVideoDecoder {
void Reset() override;
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
+ gfx::Rect GetVisibleRect() const override;
size_t GetRequiredNumOfPictures() const override;
size_t GetNumReferenceFrames() const override;
diff --git a/chromium/media/gpu/vp8_picture.h b/chromium/media/gpu/vp8_picture.h
index 1e02dede13e..2db751093fb 100644
--- a/chromium/media/gpu/vp8_picture.h
+++ b/chromium/media/gpu/vp8_picture.h
@@ -6,8 +6,8 @@
#define MEDIA_GPU_VP8_PICTURE_H_
#include "base/macros.h"
-#include "media/filters/vp8_parser.h"
#include "media/gpu/codec_picture.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
diff --git a/chromium/media/gpu/vp8_reference_frame_vector.h b/chromium/media/gpu/vp8_reference_frame_vector.h
index cdb7c564020..6f0604fe4b6 100644
--- a/chromium/media/gpu/vp8_reference_frame_vector.h
+++ b/chromium/media/gpu/vp8_reference_frame_vector.h
@@ -9,7 +9,7 @@
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
-#include "media/filters/vp8_parser.h"
+#include "media/parsers/vp8_parser.h"
namespace media {
diff --git a/chromium/media/gpu/vp9_decoder.cc b/chromium/media/gpu/vp9_decoder.cc
index d0cf58a980f..6cb9071a6b5 100644
--- a/chromium/media/gpu/vp9_decoder.cc
+++ b/chromium/media/gpu/vp9_decoder.cc
@@ -135,8 +135,17 @@ VP9Decoder::DecodeResult VP9Decoder::Decode() {
gfx::Size new_pic_size(curr_frame_hdr_->frame_width,
curr_frame_hdr_->frame_height);
- DCHECK(!new_pic_size.IsEmpty());
+ gfx::Rect new_render_rect(curr_frame_hdr_->render_width,
+ curr_frame_hdr_->render_height);
+ // For safety, check the validity of render size or leave it as (0, 0).
+ if (!gfx::Rect(new_pic_size).Contains(new_render_rect)) {
+ DVLOG(1) << "Render size exceeds picture size. render size: "
+ << new_render_rect.ToString()
+ << ", picture size: " << new_pic_size.ToString();
+ new_render_rect = gfx::Rect();
+ }
+ DCHECK(!new_pic_size.IsEmpty());
if (new_pic_size != pic_size_) {
DVLOG(1) << "New resolution: " << new_pic_size.ToString();
@@ -164,6 +173,7 @@ VP9Decoder::DecodeResult VP9Decoder::Decode() {
ref_frames_.Clear();
pic_size_ = new_pic_size;
+ visible_rect_ = new_render_rect;
size_change_failure_counter_ = 0;
return kAllocateNewSurfaces;
}
@@ -171,16 +181,6 @@ VP9Decoder::DecodeResult VP9Decoder::Decode() {
scoped_refptr<VP9Picture> pic = accelerator_->CreateVP9Picture();
if (!pic)
return kRanOutOfSurfaces;
-
- gfx::Rect new_render_rect(curr_frame_hdr_->render_width,
- curr_frame_hdr_->render_height);
- // For safety, check the validity of render size or leave it as (0, 0).
- if (!gfx::Rect(pic_size_).Contains(new_render_rect)) {
- DVLOG(1) << "Render size exceeds picture size. render size: "
- << new_render_rect.ToString()
- << ", picture size: " << pic_size_.ToString();
- new_render_rect = gfx::Rect();
- }
DVLOG(2) << "Render resolution: " << new_render_rect.ToString();
pic->set_visible_rect(new_render_rect);
@@ -204,13 +204,13 @@ VP9Decoder::DecodeResult VP9Decoder::Decode() {
}
void VP9Decoder::UpdateFrameContext(
- const scoped_refptr<VP9Picture>& pic,
+ scoped_refptr<VP9Picture> pic,
const base::Callback<void(const Vp9FrameContext&)>& context_refresh_cb) {
DCHECK(context_refresh_cb);
Vp9FrameContext frame_ctx;
memset(&frame_ctx, 0, sizeof(frame_ctx));
- if (!accelerator_->GetFrameContext(pic, &frame_ctx)) {
+ if (!accelerator_->GetFrameContext(std::move(pic), &frame_ctx)) {
SetError();
return;
}
@@ -239,7 +239,7 @@ bool VP9Decoder::DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic) {
return false;
}
- ref_frames_.Refresh(pic);
+ ref_frames_.Refresh(std::move(pic));
return true;
}
@@ -252,6 +252,10 @@ gfx::Size VP9Decoder::GetPicSize() const {
return pic_size_;
}
+gfx::Rect VP9Decoder::GetVisibleRect() const {
+ return visible_rect_;
+}
+
size_t VP9Decoder::GetRequiredNumOfPictures() const {
constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
return kPicsInPipeline + GetNumReferenceFrames();
diff --git a/chromium/media/gpu/vp9_decoder.h b/chromium/media/gpu/vp9_decoder.h
index 50cd8f7abf4..0bc33f06861 100644
--- a/chromium/media/gpu/vp9_decoder.h
+++ b/chromium/media/gpu/vp9_decoder.h
@@ -60,7 +60,7 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
// |lf_params| does not need to remain valid after this method returns.
//
// Return true when successful, false otherwise.
- virtual bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+ virtual bool SubmitDecode(scoped_refptr<VP9Picture> pic,
const Vp9SegmentationParams& segm_params,
const Vp9LoopFilterParams& lf_params,
const Vp9ReferenceFrameVector& reference_frames,
@@ -76,7 +76,7 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
// immediately after calling this method.
//
// Return true when successful, false otherwise.
- virtual bool OutputPicture(const scoped_refptr<VP9Picture>& pic) = 0;
+ virtual bool OutputPicture(scoped_refptr<VP9Picture> pic) = 0;
// Return true if the accelerator requires the client to provide frame
// context in order to decode. If so, the Vp9FrameHeader provided by the
@@ -85,7 +85,7 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
// Set |frame_ctx| to the state after decoding |pic|, returning true on
// success, false otherwise.
- virtual bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ virtual bool GetFrameContext(scoped_refptr<VP9Picture> pic,
Vp9FrameContext* frame_ctx) = 0;
private:
@@ -106,13 +106,11 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
void Reset() override;
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
+ gfx::Rect GetVisibleRect() const override;
size_t GetRequiredNumOfPictures() const override;
size_t GetNumReferenceFrames() const override;
private:
- // Update ref_frames_ based on the information in current frame header.
- void RefreshReferenceFrames(const scoped_refptr<VP9Picture>& pic);
-
// Decode and possibly output |pic| (if the picture is to be shown).
// Return true on success, false otherwise.
bool DecodeAndOutputPicture(scoped_refptr<VP9Picture> pic);
@@ -120,7 +118,7 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
// Get frame context state after decoding |pic| from the accelerator, and call
// |context_refresh_cb| with the acquired state.
void UpdateFrameContext(
- const scoped_refptr<VP9Picture>& pic,
+ scoped_refptr<VP9Picture> pic,
const base::Callback<void(const Vp9FrameContext&)>& context_refresh_cb);
// Called on error, when decoding cannot continue. Sets state_ to kError and
@@ -152,6 +150,9 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
// Current coded resolution.
gfx::Size pic_size_;
+ // Visible rectangle on the most recent allocation.
+ gfx::Rect visible_rect_;
+
size_t size_change_failure_counter_ = 0;
const std::unique_ptr<VP9Accelerator> accelerator_;
diff --git a/chromium/media/gpu/windows/OWNERS b/chromium/media/gpu/windows/OWNERS
deleted file mode 100644
index badaf4dea92..00000000000
--- a/chromium/media/gpu/windows/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# For encoder files.
-per-file *encode*=emircan@chromium.org
diff --git a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
index d189d2234a5..edfcb6bbbcc 100644
--- a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
@@ -97,7 +97,7 @@ Status D3D11H264Accelerator::SubmitFrameMetadata(
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<H264Picture> pic) {
const bool is_encrypted = pic->decrypt_config();
std::unique_ptr<D3D11_VIDEO_DECODER_BEGIN_FRAME_CRYPTO_SESSION> content_key;
@@ -123,13 +123,11 @@ Status D3D11H264Accelerator::SubmitFrameMetadata(
pic->decrypt_config()->iv().end());
}
- scoped_refptr<D3D11H264Picture> our_pic(
- static_cast<D3D11H264Picture*>(pic.get()));
-
HRESULT hr;
for (;;) {
hr = video_context_->DecoderBeginFrame(
- video_decoder_.Get(), our_pic->picture->output_view().Get(),
+ video_decoder_.Get(),
+ static_cast<D3D11H264Picture*>(pic.get())->picture->output_view().Get(),
content_key ? sizeof(*content_key) : 0, content_key.get());
if (hr == E_PENDING || hr == D3DERR_WASSTILLDRAWING) {
@@ -158,8 +156,7 @@ Status D3D11H264Accelerator::SubmitFrameMetadata(
int i = 0;
for (auto it = dpb.begin(); it != dpb.end(); i++, it++) {
- scoped_refptr<D3D11H264Picture> our_ref_pic(
- static_cast<D3D11H264Picture*>(it->get()));
+ D3D11H264Picture* our_ref_pic = static_cast<D3D11H264Picture*>(it->get());
if (!our_ref_pic->ref)
continue;
ref_frame_list_[i].Index7Bits = our_ref_pic->level_;
@@ -305,9 +302,8 @@ void D3D11H264Accelerator::PicParamsFromSliceHeader(
pic_param->IntraPicFlag = slice_hdr->IsISlice();
}
-void D3D11H264Accelerator::PicParamsFromPic(
- DXVA_PicParams_H264* pic_param,
- const scoped_refptr<H264Picture>& pic) {
+void D3D11H264Accelerator::PicParamsFromPic(DXVA_PicParams_H264* pic_param,
+ scoped_refptr<H264Picture> pic) {
pic_param->CurrPic.Index7Bits =
static_cast<D3D11H264Picture*>(pic.get())->level_;
pic_param->RefPicFlag = pic->ref;
@@ -330,12 +326,11 @@ Status D3D11H264Accelerator::SubmitSlice(
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) {
- scoped_refptr<D3D11H264Picture> our_pic(
- static_cast<D3D11H264Picture*>(pic.get()));
+ const bool is_encrypted = pic->decrypt_config();
DXVA_PicParams_H264 pic_param = {};
FillPicParamsWithConstants(&pic_param);
@@ -343,7 +338,7 @@ Status D3D11H264Accelerator::SubmitSlice(
if (!PicParamsFromPPS(&pic_param, pps))
return Status::kFail;
PicParamsFromSliceHeader(&pic_param, slice_hdr);
- PicParamsFromPic(&pic_param, pic);
+ PicParamsFromPic(&pic_param, std::move(pic));
memcpy(pic_param.RefFrameList, ref_frame_list_,
sizeof pic_param.RefFrameList);
@@ -423,8 +418,6 @@ Status D3D11H264Accelerator::SubmitSlice(
size_t remaining_bitstream = out_bitstream_size;
size_t start_location = 0;
- const bool is_encrypted = pic->decrypt_config();
-
if (is_encrypted) {
// For now, the entire frame has to fit into the bitstream buffer. This way
// the subsample ClearSize adjustment below should work.
@@ -574,8 +567,7 @@ bool D3D11H264Accelerator::SubmitSliceData() {
return true;
}
-Status D3D11H264Accelerator::SubmitDecode(
- const scoped_refptr<H264Picture>& pic) {
+Status D3D11H264Accelerator::SubmitDecode(scoped_refptr<H264Picture> pic) {
if (!SubmitSliceData()) {
RecordFailure("SubmitSliceData failed");
return Status::kFail;
@@ -603,12 +595,10 @@ void D3D11H264Accelerator::Reset() {
CHECK(SUCCEEDED(hr));
}
-bool D3D11H264Accelerator::OutputPicture(
- const scoped_refptr<H264Picture>& pic) {
- scoped_refptr<D3D11H264Picture> our_pic(
- static_cast<D3D11H264Picture*>(pic.get()));
+bool D3D11H264Accelerator::OutputPicture(scoped_refptr<H264Picture> pic) {
+ D3D11H264Picture* our_pic = static_cast<D3D11H264Picture*>(pic.get());
- client_->OutputResult(pic.get(), our_pic->picture);
+ client_->OutputResult(our_pic, our_pic->picture);
return true;
}
diff --git a/chromium/media/gpu/windows/d3d11_h264_accelerator.h b/chromium/media/gpu/windows/d3d11_h264_accelerator.h
index 653b2bb38a9..997d56956fa 100644
--- a/chromium/media/gpu/windows/d3d11_h264_accelerator.h
+++ b/chromium/media/gpu/windows/d3d11_h264_accelerator.h
@@ -49,18 +49,18 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
const H264Picture::Vector& ref_pic_listp0,
const H264Picture::Vector& ref_pic_listb0,
const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) override;
+ scoped_refptr<H264Picture> pic) override;
Status SubmitSlice(const H264PPS* pps,
const H264SliceHeader* slice_hdr,
const H264Picture::Vector& ref_pic_list0,
const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+ scoped_refptr<H264Picture> pic,
const uint8_t* data,
size_t size,
const std::vector<SubsampleEntry>& subsamples) override;
- Status SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
+ Status SubmitDecode(scoped_refptr<H264Picture> pic) override;
void Reset() override;
- bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
+ bool OutputPicture(scoped_refptr<H264Picture> pic) override;
// Gets a pic params struct with the constant fields set.
void FillPicParamsWithConstants(DXVA_PicParams_H264* pic_param);
@@ -78,7 +78,7 @@ class D3D11H264Accelerator : public H264Decoder::H264Accelerator {
const H264SliceHeader* pps);
void PicParamsFromPic(DXVA_PicParams_H264* pic_param,
- const scoped_refptr<H264Picture>& pic);
+ scoped_refptr<H264Picture> pic);
private:
bool SubmitSliceData();
diff --git a/chromium/media/gpu/windows/d3d11_picture_buffer.cc b/chromium/media/gpu/windows/d3d11_picture_buffer.cc
index 960d85d653a..19c2838f911 100644
--- a/chromium/media/gpu/windows/d3d11_picture_buffer.cc
+++ b/chromium/media/gpu/windows/d3d11_picture_buffer.cc
@@ -7,177 +7,71 @@
#include <d3d11.h>
#include <d3d11_1.h>
#include <windows.h>
+#include <wrl/client.h>
#include <memory>
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/texture_manager.h"
+#include "media/base/media_log.h"
#include "media/gpu/windows/return_on_failure.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
#include "ui/gfx/color_space.h"
-#include "ui/gl/gl_bindings.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_image_dxgi.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/scoped_binders.h"
namespace media {
-D3D11PictureBuffer::D3D11PictureBuffer(GLenum target,
- gfx::Size size,
- size_t level)
- : target_(target), size_(size), level_(level) {}
+D3D11PictureBuffer::D3D11PictureBuffer(
+ GLenum target,
+ std::unique_ptr<Texture2DWrapper> texture_wrapper,
+ gfx::Size size,
+ size_t level)
+ : target_(target),
+ texture_wrapper_(std::move(texture_wrapper)),
+ size_(size),
+ level_(level) {}
D3D11PictureBuffer::~D3D11PictureBuffer() {
// TODO(liberato): post destruction of |gpu_resources_| to the gpu thread.
}
-bool D3D11PictureBuffer::Init(
- base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()> get_helper_cb,
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device,
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture,
- const GUID& decoder_guid,
- int textures_per_picture) {
- texture_ = texture;
+bool D3D11PictureBuffer::Init(GetCommandBufferHelperCB get_helper_cb,
+ ComD3D11VideoDevice video_device,
+ const GUID& decoder_guid,
+ int textures_per_picture,
+ std::unique_ptr<MediaLog> media_log) {
D3D11_VIDEO_DECODER_OUTPUT_VIEW_DESC view_desc = {};
view_desc.DecodeProfile = decoder_guid;
view_desc.ViewDimension = D3D11_VDOV_DIMENSION_TEXTURE2D;
view_desc.Texture2D.ArraySlice = (UINT)level_;
- HRESULT hr = video_device->CreateVideoDecoderOutputView(
- texture.Get(), &view_desc, output_view_.GetAddressOf());
-
- if (!SUCCEEDED(hr))
+ if (!texture_wrapper_->Init(std::move(get_helper_cb), level_, target_, size_,
+ textures_per_picture)) {
+ media_log->AddEvent(
+ media_log->CreateStringEvent(MediaLogEvent::MEDIA_ERROR_LOG_ENTRY,
+ "error", "Failed to Init the wrapper"));
return false;
-
- // Generate mailboxes and holders.
- std::vector<gpu::Mailbox> mailboxes;
- for (int texture_idx = 0; texture_idx < textures_per_picture; texture_idx++) {
- mailboxes.push_back(gpu::Mailbox::Generate());
- mailbox_holders_[texture_idx] = gpu::MailboxHolder(
- mailboxes[texture_idx], gpu::SyncToken(), GL_TEXTURE_EXTERNAL_OES);
}
- // Start construction of the GpuResources.
- // We send the texture itself, since we assume that we're using the angle
- // device for decoding. Sharing seems not to work very well. Otherwise, we
- // would create the texture with KEYED_MUTEX and NTHANDLE, then send along
- // a handle that we get from |texture| as an IDXGIResource1.
- // TODO(liberato): this should happen on the gpu thread.
- gpu_resources_ = std::make_unique<GpuResources>();
- if (!gpu_resources_->Init(std::move(get_helper_cb), level_,
- std::move(mailboxes), target_, size_, texture,
- textures_per_picture))
+ HRESULT hr = video_device->CreateVideoDecoderOutputView(
+ Texture().Get(), &view_desc, &output_view_);
+
+ if (!SUCCEEDED(hr)) {
+ media_log->AddEvent(media_log->CreateStringEvent(
+ MediaLogEvent::MEDIA_ERROR_LOG_ENTRY, "error",
+ "Failed to CreateVideoDecoderOutputView"));
return false;
+ }
return true;
}
-D3D11PictureBuffer::GpuResources::GpuResources() {}
-
-D3D11PictureBuffer::GpuResources::~GpuResources() {
- if (helper_ && helper_->MakeContextCurrent()) {
- for (uint32_t service_id : service_ids_)
- helper_->DestroyTexture(service_id);
- }
+const MailboxHolderArray& D3D11PictureBuffer::ProcessTexture() const {
+ return texture_wrapper_->ProcessTexture(this);
}
-bool D3D11PictureBuffer::GpuResources::Init(
- base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()> get_helper_cb,
- int level,
- const std::vector<gpu::Mailbox> mailboxes,
- GLenum target,
- gfx::Size size,
- Microsoft::WRL::ComPtr<ID3D11Texture2D> angle_texture,
- int textures_per_picture) {
- helper_ = get_helper_cb.Run();
-
- if (!helper_ || !helper_->MakeContextCurrent())
- return false;
-
- // Create the textures and attach them to the mailboxes.
- for (int texture_idx = 0; texture_idx < textures_per_picture; texture_idx++) {
- uint32_t service_id =
- helper_->CreateTexture(target, GL_RGBA, size.width(), size.height(),
- GL_RGBA, GL_UNSIGNED_BYTE);
- service_ids_.push_back(service_id);
- helper_->ProduceTexture(mailboxes[texture_idx], service_id);
- }
-
- // Create the stream for zero-copy use by gl.
- EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
- const EGLint stream_attributes[] = {
- EGL_CONSUMER_LATENCY_USEC_KHR,
- 0,
- EGL_CONSUMER_ACQUIRE_TIMEOUT_USEC_KHR,
- 0,
- EGL_NONE,
- };
- EGLStreamKHR stream = eglCreateStreamKHR(egl_display, stream_attributes);
- RETURN_ON_FAILURE(!!stream, "Could not create stream", false);
-
- // |stream| will be destroyed when the GLImage is.
- // TODO(liberato): for tests, it will be destroyed pretty much at the end of
- // this function unless |helper_| retains it. Also, this won't work if we
- // have a FakeCommandBufferHelper since the service IDs aren't meaningful.
- scoped_refptr<gl::GLImage> gl_image =
- base::MakeRefCounted<gl::GLImageDXGI>(size, stream);
- gl::ScopedActiveTexture texture0(GL_TEXTURE0);
- gl::ScopedTextureBinder texture0_binder(GL_TEXTURE_EXTERNAL_OES,
- service_ids_[0]);
- gl::ScopedActiveTexture texture1(GL_TEXTURE1);
- gl::ScopedTextureBinder texture1_binder(GL_TEXTURE_EXTERNAL_OES,
- service_ids_[1]);
-
- EGLAttrib consumer_attributes[] = {
- EGL_COLOR_BUFFER_TYPE,
- EGL_YUV_BUFFER_EXT,
- EGL_YUV_NUMBER_OF_PLANES_EXT,
- 2,
- EGL_YUV_PLANE0_TEXTURE_UNIT_NV,
- 0,
- EGL_YUV_PLANE1_TEXTURE_UNIT_NV,
- 1,
- EGL_NONE,
- };
- EGLBoolean result = eglStreamConsumerGLTextureExternalAttribsNV(
- egl_display, stream, consumer_attributes);
- RETURN_ON_FAILURE(result, "Could not set stream consumer", false);
-
- EGLAttrib producer_attributes[] = {
- EGL_NONE,
- };
-
- result = eglCreateStreamProducerD3DTextureANGLE(egl_display, stream,
- producer_attributes);
- RETURN_ON_FAILURE(result, "Could not create stream", false);
-
- EGLAttrib frame_attributes[] = {
- EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE, level, EGL_NONE,
- };
-
- result = eglStreamPostD3DTextureANGLE(egl_display, stream,
- static_cast<void*>(angle_texture.Get()),
- frame_attributes);
- RETURN_ON_FAILURE(result, "Could not post texture", false);
-
- result = eglStreamConsumerAcquireKHR(egl_display, stream);
-
- RETURN_ON_FAILURE(result, "Could not post acquire stream", false);
- gl::GLImageDXGI* gl_image_dxgi =
- static_cast<gl::GLImageDXGI*>(gl_image.get());
-
- gl_image_dxgi->SetTexture(angle_texture, level);
-
- // Bind the image to each texture.
- for (size_t texture_idx = 0; texture_idx < service_ids_.size();
- texture_idx++) {
- helper_->BindImage(service_ids_[texture_idx], gl_image.get(),
- false /* client_managed */);
- }
-
- return true;
+ComD3D11Texture2D D3D11PictureBuffer::Texture() const {
+ return texture_wrapper_->Texture();
}
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_picture_buffer.h b/chromium/media/gpu/windows/d3d11_picture_buffer.h
index 677a9a43ebb..26874eeeb45 100644
--- a/chromium/media/gpu/windows/d3d11_picture_buffer.h
+++ b/chromium/media/gpu/windows/d3d11_picture_buffer.h
@@ -9,15 +9,19 @@
#include <dxva.h>
#include <wrl/client.h>
+#include <memory>
#include <vector>
#include "base/memory/ref_counted.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/ipc/service/command_buffer_stub.h"
+#include "media/base/media_log.h"
#include "media/base/video_frame.h"
#include "media/gpu/command_buffer_helper.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/windows/d3d11_texture_wrapper.h"
#include "media/video/picture.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
@@ -25,6 +29,8 @@
namespace media {
+class Texture2DWrapper;
+
// PictureBuffer that owns Chrome Textures to display it, and keep a reference
// to the D3D texture that backs the image.
//
@@ -41,20 +47,26 @@ namespace media {
class MEDIA_GPU_EXPORT D3D11PictureBuffer
: public base::RefCountedThreadSafe<D3D11PictureBuffer> {
public:
- using MailboxHolderArray = gpu::MailboxHolder[VideoFrame::kMaxPlanes];
-
- D3D11PictureBuffer(GLenum target, gfx::Size size, size_t level);
-
- bool Init(base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()>
- get_helper_cb,
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device,
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture,
+ // |texture_wrapper| is responsible for controlling mailbox access to
+ // the ID3D11Texture2D,
+ // |level| is the picturebuffer index inside the Array-type ID3D11Texture2D.
+ D3D11PictureBuffer(GLenum target,
+ std::unique_ptr<Texture2DWrapper> texture_wrapper,
+ gfx::Size size,
+ size_t level);
+
+ bool Init(GetCommandBufferHelperCB get_helper_cb,
+ ComD3D11VideoDevice video_device,
const GUID& decoder_guid,
- int textures_per_picture);
+ int textures_per_picture,
+ std::unique_ptr<MediaLog> media_log);
+
+ // Return the mailbox holders that can be used to create a VideoFrame for us.
+ const MailboxHolderArray& ProcessTexture() const;
+ ComD3D11Texture2D Texture() const;
const gfx::Size& size() const { return size_; }
size_t level() const { return level_; }
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture() const { return texture_; }
// Is this PictureBuffer backing a VideoFrame right now?
bool in_client_use() const { return in_client_use_; }
@@ -65,14 +77,10 @@ class MEDIA_GPU_EXPORT D3D11PictureBuffer
void set_in_client_use(bool use) { in_client_use_ = use; }
void set_in_picture_use(bool use) { in_picture_use_ = use; }
- const Microsoft::WRL::ComPtr<ID3D11VideoDecoderOutputView>& output_view()
- const {
+ const ComD3D11VideoDecoderOutputView& output_view() const {
return output_view_;
}
- // Return the mailbox holders that can be used to create a VideoFrame for us.
- const MailboxHolderArray& mailbox_holders() const { return mailbox_holders_; }
-
// Shouldn't be here, but simpler for now.
base::TimeDelta timestamp_;
@@ -81,46 +89,13 @@ class MEDIA_GPU_EXPORT D3D11PictureBuffer
friend class base::RefCountedThreadSafe<D3D11PictureBuffer>;
GLenum target_;
+ std::unique_ptr<Texture2DWrapper> texture_wrapper_;
gfx::Size size_;
bool in_picture_use_ = false;
bool in_client_use_ = false;
size_t level_;
- // TODO(liberato): I don't think that we need to remember |texture_|. The
- // GLImage will do so, so it will last long enough for any VideoFrames that
- // reference it.
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture_;
- Microsoft::WRL::ComPtr<ID3D11VideoDecoderOutputView> output_view_;
-
- MailboxHolderArray mailbox_holders_;
-
- // Things that are to be accessed / freed only on the main thread. In
- // addition to setting up the textures to render from a D3D11 texture,
- // these also hold the chrome GL Texture objects so that the client
- // can use the mailbox.
- class GpuResources {
- public:
- GpuResources();
- ~GpuResources();
-
- bool Init(base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()>
- get_helper_cb,
- int level,
- const std::vector<gpu::Mailbox> mailboxes,
- GLenum target,
- gfx::Size size,
- Microsoft::WRL::ComPtr<ID3D11Texture2D> angle_texture,
- int textures_per_picture);
-
- std::vector<uint32_t> service_ids_;
-
- private:
- scoped_refptr<CommandBufferHelper> helper_;
-
- DISALLOW_COPY_AND_ASSIGN(GpuResources);
- };
-
- std::unique_ptr<GpuResources> gpu_resources_;
+ ComD3D11VideoDecoderOutputView output_view_;
DISALLOW_COPY_AND_ASSIGN(D3D11PictureBuffer);
};
diff --git a/chromium/media/gpu/windows/d3d11_texture_wrapper.cc b/chromium/media/gpu/windows/d3d11_texture_wrapper.cc
new file mode 100644
index 00000000000..66b9e73ad58
--- /dev/null
+++ b/chromium/media/gpu/windows/d3d11_texture_wrapper.cc
@@ -0,0 +1,170 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/windows/d3d11_texture_wrapper.h"
+
+#include <memory>
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "media/gpu/windows/return_on_failure.h"
+
+namespace media {
+
+Texture2DWrapper::Texture2DWrapper(ComD3D11Texture2D texture)
+ : texture_(texture) {}
+
+Texture2DWrapper::~Texture2DWrapper() {}
+
+const ComD3D11Texture2D Texture2DWrapper::Texture() const {
+ return texture_;
+}
+
+DefaultTexture2DWrapper::DefaultTexture2DWrapper(ComD3D11Texture2D texture)
+ : Texture2DWrapper(texture) {}
+DefaultTexture2DWrapper::~DefaultTexture2DWrapper() {}
+
+const MailboxHolderArray& DefaultTexture2DWrapper::ProcessTexture(
+ const D3D11PictureBuffer* owner_pb) {
+ return mailbox_holders_;
+}
+
+bool DefaultTexture2DWrapper::Init(GetCommandBufferHelperCB get_helper_cb,
+ size_t array_slice,
+ GLenum target,
+ gfx::Size size,
+ int textures_per_picture) {
+ gpu_resources_ = std::make_unique<GpuResources>();
+ if (!gpu_resources_)
+ return false;
+
+ // Generate mailboxes and holders.
+ std::vector<gpu::Mailbox> mailboxes;
+ for (int texture_idx = 0; texture_idx < textures_per_picture; texture_idx++) {
+ mailboxes.push_back(gpu::Mailbox::Generate());
+ mailbox_holders_[texture_idx] = gpu::MailboxHolder(
+ mailboxes[texture_idx], gpu::SyncToken(), GL_TEXTURE_EXTERNAL_OES);
+ }
+
+ // Start construction of the GpuResources.
+ // We send the texture itself, since we assume that we're using the angle
+ // device for decoding. Sharing seems not to work very well. Otherwise, we
+ // would create the texture with KEYED_MUTEX and NTHANDLE, then send along
+ // a handle that we get from |texture| as an IDXGIResource1.
+ // TODO(liberato): this should happen on the gpu thread.
+ return gpu_resources_->Init(std::move(get_helper_cb), array_slice,
+ std::move(mailboxes), target, size, Texture(),
+ textures_per_picture);
+
+ return true;
+}
+
+DefaultTexture2DWrapper::GpuResources::GpuResources() {}
+
+DefaultTexture2DWrapper::GpuResources::~GpuResources() {
+ if (helper_ && helper_->MakeContextCurrent()) {
+ for (uint32_t service_id : service_ids_)
+ helper_->DestroyTexture(service_id);
+ }
+}
+
+bool DefaultTexture2DWrapper::GpuResources::Init(
+ GetCommandBufferHelperCB get_helper_cb,
+ int array_slice,
+ const std::vector<gpu::Mailbox> mailboxes,
+ GLenum target,
+ gfx::Size size,
+ ComD3D11Texture2D angle_texture,
+ int textures_per_picture) {
+ helper_ = get_helper_cb.Run();
+
+ if (!helper_ || !helper_->MakeContextCurrent())
+ return false;
+
+ // Create the textures and attach them to the mailboxes.
+ for (int texture_idx = 0; texture_idx < textures_per_picture; texture_idx++) {
+ uint32_t service_id =
+ helper_->CreateTexture(target, GL_RGBA, size.width(), size.height(),
+ GL_RGBA, GL_UNSIGNED_BYTE);
+ service_ids_.push_back(service_id);
+ helper_->ProduceTexture(mailboxes[texture_idx], service_id);
+ }
+
+ // Create the stream for zero-copy use by gl.
+ EGLDisplay egl_display = gl::GLSurfaceEGL::GetHardwareDisplay();
+ const EGLint stream_attributes[] = {
+ EGL_CONSUMER_LATENCY_USEC_KHR,
+ 0,
+ EGL_CONSUMER_ACQUIRE_TIMEOUT_USEC_KHR,
+ 0,
+ EGL_NONE,
+ };
+ EGLStreamKHR stream = eglCreateStreamKHR(egl_display, stream_attributes);
+ RETURN_ON_FAILURE(!!stream, "Could not create stream", false);
+
+ // |stream| will be destroyed when the GLImage is.
+ // TODO(liberato): for tests, it will be destroyed pretty much at the end of
+ // this function unless |helper_| retains it. Also, this won't work if we
+ // have a FakeCommandBufferHelper since the service IDs aren't meaningful.
+ scoped_refptr<gl::GLImage> gl_image =
+ base::MakeRefCounted<gl::GLImageDXGI>(size, stream);
+ gl::ScopedActiveTexture texture0(GL_TEXTURE0);
+ gl::ScopedTextureBinder texture0_binder(GL_TEXTURE_EXTERNAL_OES,
+ service_ids_[0]);
+ gl::ScopedActiveTexture texture1(GL_TEXTURE1);
+ gl::ScopedTextureBinder texture1_binder(GL_TEXTURE_EXTERNAL_OES,
+ service_ids_[1]);
+
+ EGLAttrib consumer_attributes[] = {
+ EGL_COLOR_BUFFER_TYPE,
+ EGL_YUV_BUFFER_EXT,
+ EGL_YUV_NUMBER_OF_PLANES_EXT,
+ 2,
+ EGL_YUV_PLANE0_TEXTURE_UNIT_NV,
+ 0,
+ EGL_YUV_PLANE1_TEXTURE_UNIT_NV,
+ 1,
+ EGL_NONE,
+ };
+ EGLBoolean result = eglStreamConsumerGLTextureExternalAttribsNV(
+ egl_display, stream, consumer_attributes);
+ RETURN_ON_FAILURE(result, "Could not set stream consumer", false);
+
+ EGLAttrib producer_attributes[] = {
+ EGL_NONE,
+ };
+
+ result = eglCreateStreamProducerD3DTextureANGLE(egl_display, stream,
+ producer_attributes);
+ RETURN_ON_FAILURE(result, "Could not create stream", false);
+
+ EGLAttrib frame_attributes[] = {
+ EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE,
+ array_slice,
+ EGL_NONE,
+ };
+
+ result = eglStreamPostD3DTextureANGLE(egl_display, stream,
+ static_cast<void*>(angle_texture.Get()),
+ frame_attributes);
+ RETURN_ON_FAILURE(result, "Could not post texture", false);
+
+ result = eglStreamConsumerAcquireKHR(egl_display, stream);
+
+ RETURN_ON_FAILURE(result, "Could not post acquire stream", false);
+ gl::GLImageDXGI* gl_image_dxgi =
+ static_cast<gl::GLImageDXGI*>(gl_image.get());
+
+ gl_image_dxgi->SetTexture(angle_texture, array_slice);
+
+ // Bind the image to each texture.
+ for (size_t texture_idx = 0; texture_idx < service_ids_.size();
+ texture_idx++) {
+ helper_->BindImage(service_ids_[texture_idx], gl_image.get(),
+ false /* client_managed */);
+ }
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_texture_wrapper.h b/chromium/media/gpu/windows/d3d11_texture_wrapper.h
new file mode 100644
index 00000000000..0ed14d7f450
--- /dev/null
+++ b/chromium/media/gpu/windows/d3d11_texture_wrapper.h
@@ -0,0 +1,109 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_WINDOWS_D3D11_TEXTURE_WRAPPER_H_
+#define MEDIA_GPU_WINDOWS_D3D11_TEXTURE_WRAPPER_H_
+
+#include <d3d11.h>
+#include <wrl/client.h>
+#include <memory>
+#include <vector>
+
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "media/base/video_frame.h"
+#include "media/gpu/command_buffer_helper.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_image_dxgi.h"
+#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/scoped_binders.h"
+
+namespace media {
+
+using CommandBufferHelperPtr = scoped_refptr<CommandBufferHelper>;
+using MailboxHolderArray = gpu::MailboxHolder[VideoFrame::kMaxPlanes];
+using GetCommandBufferHelperCB =
+ base::RepeatingCallback<CommandBufferHelperPtr()>;
+using ComD3D11VideoDevice = Microsoft::WRL::ComPtr<ID3D11VideoDevice>;
+using ComD3D11Texture2D = Microsoft::WRL::ComPtr<ID3D11Texture2D>;
+using ComD3D11VideoDecoderOutputView =
+ Microsoft::WRL::ComPtr<ID3D11VideoDecoderOutputView>;
+
+class D3D11PictureBuffer;
+
+// Support different strategies for processing pictures - some may need copying,
+// for example.
+class MEDIA_GPU_EXPORT Texture2DWrapper {
+ public:
+ Texture2DWrapper(ComD3D11Texture2D texture);
+ virtual ~Texture2DWrapper();
+
+ virtual const ComD3D11Texture2D Texture() const;
+
+ // This pointer can be raw, since each Texture2DWrapper is directly owned
+ // by the D3D11PictureBuffer through a unique_ptr.
+ virtual const MailboxHolderArray& ProcessTexture(
+ const D3D11PictureBuffer* owner_pb) = 0;
+
+ virtual bool Init(GetCommandBufferHelperCB get_helper_cb,
+ size_t array_slice,
+ GLenum target,
+ gfx::Size size,
+ int textures_per_picture) = 0;
+
+ private:
+ ComD3D11Texture2D texture_;
+};
+
+// The default texture wrapper that uses GPUResources to talk to hardware
+// on behalf of a Texture2D.
+class MEDIA_GPU_EXPORT DefaultTexture2DWrapper : public Texture2DWrapper {
+ public:
+ DefaultTexture2DWrapper(ComD3D11Texture2D texture);
+ ~DefaultTexture2DWrapper() override;
+
+ bool Init(GetCommandBufferHelperCB get_helper_cb,
+ size_t array_slice,
+ GLenum target,
+ gfx::Size size,
+ int textures_per_picture) override;
+
+ const MailboxHolderArray& ProcessTexture(
+ const D3D11PictureBuffer* owner_pb) override;
+
+ private:
+ // Things that are to be accessed / freed only on the main thread. In
+ // addition to setting up the textures to render from a D3D11 texture,
+ // these also hold the chrome GL Texture objects so that the client
+ // can use the mailbox.
+ class GpuResources {
+ public:
+ GpuResources();
+ ~GpuResources();
+
+ bool Init(GetCommandBufferHelperCB get_helper_cb,
+ int array_slice,
+ const std::vector<gpu::Mailbox> mailboxes,
+ GLenum target,
+ gfx::Size size,
+ ComD3D11Texture2D angle_texture,
+ int textures_per_picture);
+
+ std::vector<uint32_t> service_ids_;
+
+ private:
+ scoped_refptr<CommandBufferHelper> helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(GpuResources);
+ };
+
+ std::unique_ptr<GpuResources> gpu_resources_;
+ MailboxHolderArray mailbox_holders_;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_WINDOWS_D3D11_TEXTURE_WRAPPER_H_
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.cc b/chromium/media/gpu/windows/d3d11_video_decoder.cc
index cd6f6370e71..5f4d9ca662a 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.cc
@@ -12,7 +12,6 @@
#include "base/feature_list.h"
#include "base/memory/ref_counted_delete_on_sequence.h"
#include "base/metrics/histogram_macros.h"
-#include "gpu/config/gpu_finch_features.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
@@ -26,6 +25,7 @@
#include "media/gpu/windows/d3d11_video_context_wrapper.h"
#include "media/gpu/windows/d3d11_video_decoder_impl.h"
#include "ui/gl/gl_angle_util_win.h"
+#include "ui/gl/gl_switches.h"
namespace media {
@@ -264,7 +264,7 @@ HRESULT D3D11VideoDecoder::InitializeAcceleratedDecoder(
void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -274,7 +274,7 @@ void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
state_ = State::kInitializing;
config_ = config;
- init_cb_ = init_cb;
+ init_cb_ = std::move(init_cb);
output_cb_ = output_cb;
waiting_cb_ = waiting_cb;
@@ -497,16 +497,17 @@ void D3D11VideoDecoder::OnGpuInitComplete(bool success) {
}
void D3D11VideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (state_ == State::kError) {
// TODO(liberato): consider posting, though it likely doesn't matter.
- decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ std::move(decode_cb).Run(DecodeStatus::DECODE_ERROR);
return;
}
- input_buffer_queue_.push_back(std::make_pair(std::move(buffer), decode_cb));
+ input_buffer_queue_.push_back(
+ std::make_pair(std::move(buffer), std::move(decode_cb)));
// Post, since we're not supposed to call back before this returns. It
// probably doesn't matter since we're in the gpu process anyway.
@@ -529,7 +530,7 @@ void D3D11VideoDecoder::DoDecode() {
return;
}
current_buffer_ = std::move(input_buffer_queue_.front().first);
- current_decode_cb_ = input_buffer_queue_.front().second;
+ current_decode_cb_ = std::move(input_buffer_queue_.front().second);
input_buffer_queue_.pop_front();
if (current_buffer_->end_of_stream()) {
// Flush, then signal the decode cb once all pictures have been output.
@@ -591,7 +592,7 @@ void D3D11VideoDecoder::DoDecode() {
base::BindOnce(&D3D11VideoDecoder::DoDecode, weak_factory_.GetWeakPtr()));
}
-void D3D11VideoDecoder::Reset(const base::RepeatingClosure& closure) {
+void D3D11VideoDecoder::Reset(base::OnceClosure closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(state_, State::kInitializing);
@@ -600,7 +601,7 @@ void D3D11VideoDecoder::Reset(const base::RepeatingClosure& closure) {
std::move(current_decode_cb_).Run(DecodeStatus::ABORTED);
for (auto& queue_pair : input_buffer_queue_)
- queue_pair.second.Run(DecodeStatus::ABORTED);
+ std::move(queue_pair.second).Run(DecodeStatus::ABORTED);
input_buffer_queue_.clear();
// TODO(liberato): how do we signal an error?
@@ -621,7 +622,7 @@ void D3D11VideoDecoder::Reset(const base::RepeatingClosure& closure) {
if (state_ == State::kWaitingForNewKey || state_ == State::kWaitingForReset)
state_ = State::kRunning;
- closure.Run();
+ std::move(closure).Run();
}
bool D3D11VideoDecoder::NeedsBitstreamConversion() const {
@@ -670,11 +671,12 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
// Create each picture buffer.
const int textures_per_picture = 2; // From the VDA
for (size_t i = 0; i < TextureSelector::BUFFER_COUNT; i++) {
- picture_buffers_.push_back(
- new D3D11PictureBuffer(GL_TEXTURE_EXTERNAL_OES, size, i));
- if (!picture_buffers_[i]->Init(get_helper_cb_, video_device_, out_texture,
+ auto processor = std::make_unique<DefaultTexture2DWrapper>(out_texture);
+ picture_buffers_.push_back(new D3D11PictureBuffer(
+ GL_TEXTURE_EXTERNAL_OES, std::move(processor), size, i));
+ if (!picture_buffers_[i]->Init(get_helper_cb_, video_device_,
texture_selector_->decoder_guid,
- textures_per_picture)) {
+ textures_per_picture, media_log_->Clone())) {
NotifyError("Unable to allocate PictureBuffer");
return;
}
@@ -712,7 +714,7 @@ void D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
base::TimeDelta timestamp = picture_buffer->timestamp_;
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTextures(
- texture_selector_->pixel_format, picture_buffer->mailbox_holders(),
+ texture_selector_->pixel_format, picture_buffer->ProcessTexture(),
VideoFrame::ReleaseMailboxCB(), picture_buffer->size(), visible_rect,
GetNaturalSize(visible_rect, pixel_aspect_ratio), timestamp);
@@ -779,7 +781,7 @@ void D3D11VideoDecoder::NotifyError(const char* reason) {
std::move(current_decode_cb_).Run(DecodeStatus::DECODE_ERROR);
for (auto& queue_pair : input_buffer_queue_)
- queue_pair.second.Run(DecodeStatus::DECODE_ERROR);
+ std::move(queue_pair.second).Run(DecodeStatus::DECODE_ERROR);
input_buffer_queue_.clear();
}
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.h b/chromium/media/gpu/windows/d3d11_video_decoder.h
index 82660c56fb0..e54297adc5c 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.h
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.h
@@ -103,12 +103,11 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) override;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) override;
- void Reset(const base::RepeatingClosure& closure) override;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
+ void Reset(base::OnceClosure closure) override;
bool NeedsBitstreamConversion() const override;
bool CanReadWithoutStalling() const override;
int GetMaxDecodeRequests() const override;
diff --git a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
index c035e54f558..c2157b5c8cd 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
@@ -70,12 +70,12 @@ scoped_refptr<VP9Picture> D3D11VP9Accelerator::CreateVP9Picture() {
return base::MakeRefCounted<D3D11VP9Picture>(picture_buffer);
}
-bool D3D11VP9Accelerator::BeginFrame(D3D11VP9Picture* pic) {
+bool D3D11VP9Accelerator::BeginFrame(const D3D11VP9Picture& pic) {
// This |decrypt_context| has to be outside the if block because pKeyInfo in
// D3D11_VIDEO_DECODER_BEGIN_FRAME_CRYPTO_SESSION is a pointer (to a GUID).
base::Optional<CdmProxyContext::D3D11DecryptContext> decrypt_context;
std::unique_ptr<D3D11_VIDEO_DECODER_BEGIN_FRAME_CRYPTO_SESSION> content_key;
- if (const DecryptConfig* config = pic->decrypt_config()) {
+ if (const DecryptConfig* config = pic.decrypt_config()) {
DCHECK(cdm_proxy_context_) << "No CdmProxyContext but picture is encrypted";
decrypt_context = cdm_proxy_context_->GetD3D11DecryptContext(
CdmProxy::KeyType::kDecryptAndDecode, config->key_id());
@@ -96,7 +96,7 @@ bool D3D11VP9Accelerator::BeginFrame(D3D11VP9Picture* pic) {
HRESULT hr;
do {
hr = video_context_->DecoderBeginFrame(
- video_decoder_.Get(), pic->picture_buffer()->output_view().Get(),
+ video_decoder_.Get(), pic.picture_buffer()->output_view().Get(),
content_key ? sizeof(*content_key) : 0, content_key.get());
} while (hr == E_PENDING || hr == D3DERR_WASSTILLDRAWING);
@@ -108,10 +108,9 @@ bool D3D11VP9Accelerator::BeginFrame(D3D11VP9Picture* pic) {
return true;
}
-void D3D11VP9Accelerator::CopyFrameParams(
- const scoped_refptr<D3D11VP9Picture>& pic,
- DXVA_PicParams_VP9* pic_params) {
-#define SET_PARAM(a, b) pic_params->a = pic->frame_hdr->b
+void D3D11VP9Accelerator::CopyFrameParams(const D3D11VP9Picture& pic,
+ DXVA_PicParams_VP9* pic_params) {
+#define SET_PARAM(a, b) pic_params->a = pic.frame_hdr->b
#define COPY_PARAM(a) SET_PARAM(a, a)
COPY_PARAM(profile);
@@ -132,10 +131,11 @@ void D3D11VP9Accelerator::CopyFrameParams(
// extra_plane, BitDepthMinus8Luma, and BitDepthMinus8Chroma are initialized
// at 0 already.
- pic_params->CurrPic.Index7Bits = pic->level();
- pic_params->frame_type = !pic->frame_hdr->IsKeyframe();
- pic_params->subsampling_x = pic->frame_hdr->subsampling_x;
- pic_params->subsampling_y = pic->frame_hdr->subsampling_y;
+ pic_params->CurrPic.Index7Bits = pic.level();
+ pic_params->frame_type = !pic.frame_hdr->IsKeyframe();
+
+ COPY_PARAM(subsampling_x);
+ COPY_PARAM(subsampling_y);
SET_PARAM(width, frame_width);
SET_PARAM(height, frame_height);
@@ -147,11 +147,11 @@ void D3D11VP9Accelerator::CopyFrameParams(
}
void D3D11VP9Accelerator::CopyReferenceFrames(
- const scoped_refptr<D3D11VP9Picture>& pic,
+ const D3D11VP9Picture& pic,
DXVA_PicParams_VP9* pic_params,
const Vp9ReferenceFrameVector& ref_frames) {
D3D11_TEXTURE2D_DESC texture_descriptor;
- pic->picture_buffer()->texture()->GetDesc(&texture_descriptor);
+ pic.picture_buffer()->Texture()->GetDesc(&texture_descriptor);
for (size_t i = 0; i < base::size(pic_params->ref_frame_map); i++) {
auto ref_pic = ref_frames.GetFrame(i);
@@ -169,16 +169,15 @@ void D3D11VP9Accelerator::CopyReferenceFrames(
}
}
-void D3D11VP9Accelerator::CopyFrameRefs(
- DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic) {
+void D3D11VP9Accelerator::CopyFrameRefs(DXVA_PicParams_VP9* pic_params,
+ const D3D11VP9Picture& pic) {
for (size_t i = 0; i < base::size(pic_params->frame_refs); i++) {
pic_params->frame_refs[i] =
- pic_params->ref_frame_map[pic->frame_hdr->ref_frame_idx[i]];
+ pic_params->ref_frame_map[pic.frame_hdr->ref_frame_idx[i]];
}
for (size_t i = 0; i < base::size(pic_params->ref_frame_sign_bias); i++) {
- pic_params->ref_frame_sign_bias[i] = pic->frame_hdr->ref_frame_sign_bias[i];
+ pic_params->ref_frame_sign_bias[i] = pic.frame_hdr->ref_frame_sign_bias[i];
}
}
@@ -210,10 +209,9 @@ void D3D11VP9Accelerator::CopyLoopFilterParams(
}
}
-void D3D11VP9Accelerator::CopyQuantParams(
- DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic) {
-#define SET_PARAM(a, b) pic_params->a = pic->frame_hdr->quant_params.b
+void D3D11VP9Accelerator::CopyQuantParams(DXVA_PicParams_VP9* pic_params,
+ const D3D11VP9Picture& pic) {
+#define SET_PARAM(a, b) pic_params->a = pic.frame_hdr->quant_params.b
SET_PARAM(base_qindex, base_q_idx);
SET_PARAM(y_dc_delta_q, delta_q_y_dc);
SET_PARAM(uv_dc_delta_q, delta_q_uv_dc);
@@ -250,13 +248,12 @@ void D3D11VP9Accelerator::CopySegmentationParams(
#undef SET_PARAM
}
-void D3D11VP9Accelerator::CopyHeaderSizeAndID(
- DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic) {
+void D3D11VP9Accelerator::CopyHeaderSizeAndID(DXVA_PicParams_VP9* pic_params,
+ const D3D11VP9Picture& pic) {
pic_params->uncompressed_header_size_byte_aligned =
- static_cast<USHORT>(pic->frame_hdr->uncompressed_header_size);
+ static_cast<USHORT>(pic.frame_hdr->uncompressed_header_size);
pic_params->first_partition_size =
- static_cast<USHORT>(pic->frame_hdr->header_size_in_bytes);
+ static_cast<USHORT>(pic.frame_hdr->header_size_in_bytes);
// StatusReportFeedbackNumber "should not be equal to 0".
pic_params->StatusReportFeedbackNumber = ++status_feedback_;
@@ -264,7 +261,7 @@ void D3D11VP9Accelerator::CopyHeaderSizeAndID(
bool D3D11VP9Accelerator::SubmitDecoderBuffer(
const DXVA_PicParams_VP9& pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic) {
+ const D3D11VP9Picture& pic) {
#define GET_BUFFER(type) \
RETURN_ON_HR_FAILURE(GetDecoderBuffer, \
video_context_->GetDecoderBuffer( \
@@ -282,15 +279,15 @@ bool D3D11VP9Accelerator::SubmitDecoderBuffer(
RELEASE_BUFFER(D3D11_VIDEO_DECODER_BUFFER_PICTURE_PARAMETERS);
size_t buffer_offset = 0;
- while (buffer_offset < pic->frame_hdr->frame_size) {
+ while (buffer_offset < pic.frame_hdr->frame_size) {
GET_BUFFER(D3D11_VIDEO_DECODER_BUFFER_BITSTREAM);
- size_t copy_size = pic->frame_hdr->frame_size - buffer_offset;
+ size_t copy_size = pic.frame_hdr->frame_size - buffer_offset;
bool contains_end = true;
if (copy_size > buffer_size) {
copy_size = buffer_size;
contains_end = false;
}
- memcpy(buffer, pic->frame_hdr->data + buffer_offset, copy_size);
+ memcpy(buffer, pic.frame_hdr->data + buffer_offset, copy_size);
RELEASE_BUFFER(D3D11_VIDEO_DECODER_BUFFER_BITSTREAM);
DXVA_Slice_VPx_Short slice_info;
@@ -325,7 +322,7 @@ bool D3D11VP9Accelerator::SubmitDecoderBuffer(
buffers[2].DataOffset = 0;
buffers[2].DataSize = copy_size;
- const DecryptConfig* config = pic->decrypt_config();
+ const DecryptConfig* config = pic.decrypt_config();
if (config) {
buffers[2].pIV = const_cast<char*>(config->iv().data());
buffers[2].IVSize = config->iv().size();
@@ -349,27 +346,26 @@ bool D3D11VP9Accelerator::SubmitDecoderBuffer(
}
bool D3D11VP9Accelerator::SubmitDecode(
- const scoped_refptr<VP9Picture>& picture,
+ scoped_refptr<VP9Picture> picture,
const Vp9SegmentationParams& segmentation_params,
const Vp9LoopFilterParams& loop_filter_params,
const Vp9ReferenceFrameVector& reference_frames,
const base::Closure& on_finished_cb) {
- scoped_refptr<D3D11VP9Picture> pic(
- static_cast<D3D11VP9Picture*>(picture.get()));
+ D3D11VP9Picture* pic = static_cast<D3D11VP9Picture*>(picture.get());
- if (!BeginFrame(pic.get()))
+ if (!BeginFrame(*pic))
return false;
DXVA_PicParams_VP9 pic_params = {};
- CopyFrameParams(pic, &pic_params);
- CopyReferenceFrames(pic, &pic_params, reference_frames);
- CopyFrameRefs(&pic_params, pic);
+ CopyFrameParams(*pic, &pic_params);
+ CopyReferenceFrames(*pic, &pic_params, reference_frames);
+ CopyFrameRefs(&pic_params, *pic);
CopyLoopFilterParams(&pic_params, loop_filter_params);
- CopyQuantParams(&pic_params, pic);
+ CopyQuantParams(&pic_params, *pic);
CopySegmentationParams(&pic_params, segmentation_params);
- CopyHeaderSizeAndID(&pic_params, pic);
+ CopyHeaderSizeAndID(&pic_params, *pic);
- if (!SubmitDecoderBuffer(pic_params, pic))
+ if (!SubmitDecoderBuffer(pic_params, *pic))
return false;
RETURN_ON_HR_FAILURE(DecoderEndFrame,
@@ -379,8 +375,7 @@ bool D3D11VP9Accelerator::SubmitDecode(
return true;
}
-bool D3D11VP9Accelerator::OutputPicture(
- const scoped_refptr<VP9Picture>& picture) {
+bool D3D11VP9Accelerator::OutputPicture(scoped_refptr<VP9Picture> picture) {
D3D11VP9Picture* pic = static_cast<D3D11VP9Picture*>(picture.get());
client_->OutputResult(picture.get(), pic->picture_buffer());
return true;
@@ -390,9 +385,8 @@ bool D3D11VP9Accelerator::IsFrameContextRequired() const {
return false;
}
-bool D3D11VP9Accelerator::GetFrameContext(
- const scoped_refptr<VP9Picture>& picture,
- Vp9FrameContext* frame_context) {
+bool D3D11VP9Accelerator::GetFrameContext(scoped_refptr<VP9Picture> picture,
+ Vp9FrameContext* frame_context) {
return false;
}
diff --git a/chromium/media/gpu/windows/d3d11_vp9_accelerator.h b/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
index a96e8b80db8..c75505d7a5b 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
+++ b/chromium/media/gpu/windows/d3d11_vp9_accelerator.h
@@ -31,42 +31,41 @@ class D3D11VP9Accelerator : public VP9Decoder::VP9Accelerator {
scoped_refptr<VP9Picture> CreateVP9Picture() override;
- // TODO(crbug/890054): Use constref instead of scoped_refptr.
- bool SubmitDecode(const scoped_refptr<VP9Picture>& picture,
+ bool SubmitDecode(scoped_refptr<VP9Picture> picture,
const Vp9SegmentationParams& segmentation_params,
const Vp9LoopFilterParams& loop_filter_params,
const Vp9ReferenceFrameVector& reference_frames,
const base::Closure& on_finished_cb) override;
- bool OutputPicture(const scoped_refptr<VP9Picture>& picture) override;
+ bool OutputPicture(scoped_refptr<VP9Picture> picture) override;
bool IsFrameContextRequired() const override;
- bool GetFrameContext(const scoped_refptr<VP9Picture>& picture,
+ bool GetFrameContext(scoped_refptr<VP9Picture> picture,
Vp9FrameContext* frame_context) override;
private:
// Helper methods for SubmitDecode
- bool BeginFrame(D3D11VP9Picture* pic);
+ bool BeginFrame(const D3D11VP9Picture& pic);
// TODO(crbug/890054): Use constref instead of scoped_refptr.
- void CopyFrameParams(const scoped_refptr<D3D11VP9Picture>& pic,
+ void CopyFrameParams(const D3D11VP9Picture& pic,
DXVA_PicParams_VP9* pic_params);
- void CopyReferenceFrames(const scoped_refptr<D3D11VP9Picture>& pic,
+ void CopyReferenceFrames(const D3D11VP9Picture& pic,
DXVA_PicParams_VP9* pic_params,
const Vp9ReferenceFrameVector& ref_frames);
void CopyFrameRefs(DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& picture);
+ const D3D11VP9Picture& picture);
void CopyLoopFilterParams(DXVA_PicParams_VP9* pic_params,
const Vp9LoopFilterParams& loop_filter_params);
void CopyQuantParams(DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic);
+ const D3D11VP9Picture& pic);
void CopySegmentationParams(DXVA_PicParams_VP9* pic_params,
const Vp9SegmentationParams& segmentation_params);
void CopyHeaderSizeAndID(DXVA_PicParams_VP9* pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic);
+ const D3D11VP9Picture& pic);
bool SubmitDecoderBuffer(const DXVA_PicParams_VP9& pic_params,
- const scoped_refptr<D3D11VP9Picture>& pic);
+ const D3D11VP9Picture& pic);
void RecordFailure(const std::string& fail_type, const std::string& reason);
diff --git a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
index bef6ac5741a..1e82740ab54 100644
--- a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
+++ b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
@@ -312,9 +312,12 @@ bool PbufferPictureBuffer::CopyOutputSampleDataToPictureBuffer(
// when we receive a notification that the copy was completed or when the
// DXVAPictureBuffer instance is destroyed.
decoder_dx11_texture_ = dx11_texture;
- decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.Get(),
- dx11_keyed_mutex_, keyed_mutex_value_, id(),
- input_buffer_id, color_space_);
+ if (!decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.Get(),
+ dx11_keyed_mutex_, keyed_mutex_value_, id(),
+ input_buffer_id, color_space_)) {
+ // |this| might be destroyed.
+ return false;
+ }
return true;
}
D3DSURFACE_DESC surface_desc;
@@ -772,9 +775,12 @@ bool EGLStreamCopyPictureBuffer::CopyOutputSampleDataToPictureBuffer(
// when we receive a notification that the copy was completed or when the
// DXVAPictureBuffer instance is destroyed.
dx11_decoding_texture_ = dx11_texture;
- decoder->CopyTexture(dx11_texture, decoder_copy_texture_.Get(),
- dx11_keyed_mutex_, keyed_mutex_value_, id(),
- input_buffer_id, color_space_);
+ if (!decoder->CopyTexture(dx11_texture, decoder_copy_texture_.Get(),
+ dx11_keyed_mutex_, keyed_mutex_value_, id(),
+ input_buffer_id, color_space_)) {
+ // |this| might be destroyed
+ return false;
+ }
// The texture copy will acquire the current keyed mutex value and release
// with the value + 1.
keyed_mutex_value_++;
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
index e8106226265..1e5ba7fdf62 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
@@ -44,7 +44,6 @@
#include "base/win/windows_version.h"
#include "build/build_config.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
-#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/gpu_preferences.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
@@ -63,39 +62,12 @@
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_surface_egl.h"
+#include "ui/gl/gl_switches.h"
namespace {
-#if defined(ARCH_CPU_X86_FAMILY)
-// AMD
-// Path is appended on to the PROGRAM_FILES base path.
-const wchar_t kAMDVPXDecoderDLLPath[] =
- L"Common Files\\ATI Technologies\\Multimedia\\";
-
-const wchar_t kAMDVP9DecoderDLLName[] =
-#if defined(ARCH_CPU_X86)
- L"amf-mft-decvp9-decoder32.dll";
-#elif defined(ARCH_CPU_X86_64)
- L"amf-mft-decvp9-decoder64.dll";
-#else
-#error Unsupported Windows CPU Architecture
-#endif
-
-const CLSID CLSID_AMDWebmMfVp9Dec = {
- 0x2d2d728a,
- 0x67d6,
- 0x48ab,
- {0x89, 0xfb, 0xa6, 0xec, 0x65, 0x55, 0x49, 0x70}};
-#endif
-
const wchar_t kMSVP9DecoderDLLName[] = L"MSVP9DEC.dll";
-const CLSID MEDIASUBTYPE_VP80 = {
- 0x30385056,
- 0x0000,
- 0x0010,
- {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
-
const CLSID MEDIASUBTYPE_VP90 = {
0x30395056,
0x0000,
@@ -269,12 +241,13 @@ bool IsLegacyGPU(ID3D11Device* device) {
// on the given |video_device|.
bool IsResolutionSupportedForDevice(const gfx::Size& resolution_to_test,
const GUID& decoder_guid,
- ID3D11VideoDevice* video_device) {
+ ID3D11VideoDevice* video_device,
+ DXGI_FORMAT format) {
D3D11_VIDEO_DECODER_DESC desc = {
decoder_guid, // Guid
resolution_to_test.width(), // SampleWidth
resolution_to_test.height(), // SampleHeight
- DXGI_FORMAT_NV12 // OutputFormat
+ format // OutputFormat
};
// We've chosen the least expensive test for identifying if a given resolution
@@ -301,7 +274,8 @@ ResolutionPair GetMaxResolutionsForGUIDs(
const gfx::Size& default_max,
ID3D11VideoDevice* video_device,
const std::vector<GUID>& valid_guids,
- const std::vector<gfx::Size>& resolutions_to_test) {
+ const std::vector<gfx::Size>& resolutions_to_test,
+ DXGI_FORMAT format = DXGI_FORMAT_NV12) {
TRACE_EVENT0("gpu,startup", "GetMaxResolutionsForGUIDs");
ResolutionPair result(default_max, gfx::Size());
@@ -328,16 +302,20 @@ ResolutionPair GetMaxResolutionsForGUIDs(
}));
for (const auto& res : resolutions_to_test) {
- if (!IsResolutionSupportedForDevice(res, decoder_guid, video_device))
+ if (!IsResolutionSupportedForDevice(res, decoder_guid, video_device,
+ format)) {
break;
+ }
result.first = res;
}
// The max supported portrait resolution should be just be a w/h flip of the
// max supported landscape resolution.
gfx::Size flipped(result.first.height(), result.first.width());
- if (IsResolutionSupportedForDevice(flipped, decoder_guid, video_device))
+ if (IsResolutionSupportedForDevice(flipped, decoder_guid, video_device,
+ format)) {
result.second = flipped;
+ }
return result;
}
@@ -347,8 +325,8 @@ ResolutionPair GetMaxResolutionsForGUIDs(
namespace media {
static const VideoCodecProfile kSupportedProfiles[] = {
- H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_HIGH,
- VP8PROFILE_ANY, VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE2};
+ H264PROFILE_BASELINE, H264PROFILE_MAIN, H264PROFILE_HIGH,
+ VP9PROFILE_PROFILE0, VP9PROFILE_PROFILE2};
CreateDXGIDeviceManager
DXVAVideoDecodeAccelerator::create_dxgi_device_manager_ = NULL;
@@ -723,9 +701,7 @@ DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
use_keyed_mutex_(false),
using_angle_device_(false),
enable_accelerated_vpx_decode_(
- workarounds.disable_accelerated_vpx_decode
- ? gpu::GpuPreferences::VpxDecodeVendors::VPX_VENDOR_NONE
- : gpu_preferences.enable_accelerated_vpx_decode),
+ !workarounds.disable_accelerated_vpx_decode),
processing_config_changed_(false),
weak_this_factory_(this) {
weak_ptr_ = weak_this_factory_.GetWeakPtr();
@@ -773,7 +749,7 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
break;
}
}
- RETURN_ON_FAILURE(profile_supported, "Unsupported h.264, vp8, or vp9 profile",
+ RETURN_ON_FAILURE(profile_supported, "Unsupported h.264 or vp9 profile",
false);
if (config.profile == VP9PROFILE_PROFILE2 ||
@@ -806,7 +782,7 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
// copy does not exist on Windows 7. Look into an alternate approach
// and enable the code below.
#if defined(ENABLE_DX11_FOR_WIN7)
- if (base::win::GetVersion() == base::win::VERSION_WIN7) {
+ if (base::win::GetVersion() == base::win::Version::WIN7) {
dxgi_manager_dll = ::GetModuleHandle(L"mshtmlmedia.dll");
RETURN_ON_FAILURE(dxgi_manager_dll,
"mshtmlmedia.dll is required for decoding", false);
@@ -1122,7 +1098,7 @@ bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() {
return true;
}
-void DXVAVideoDecodeAccelerator::Decode(const BitstreamBuffer& bitstream) {
+void DXVAVideoDecodeAccelerator::Decode(BitstreamBuffer bitstream) {
Decode(bitstream.ToDecoderBuffer(), bitstream.id());
}
@@ -1443,10 +1419,11 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles(
// 1920 x 1088. We use 1088 to account for 16x16 macroblocks.
ResolutionPair max_h264_resolutions(gfx::Size(1920, 1088), gfx::Size());
- // VPX has no default resolutions since it may not even be supported.
- ResolutionPair max_vpx_resolutions;
+ // VP9 has no default resolutions since it may not even be supported.
+ ResolutionPair max_vp9_profile0_resolutions;
+ ResolutionPair max_vp9_profile2_resolutions;
- if (base::win::GetVersion() > base::win::VERSION_WIN7) {
+ if (base::win::GetVersion() > base::win::Version::WIN7) {
// To detect if a driver supports the desired resolutions, we try and create
// a DXVA decoder instance for that resolution and profile. If that succeeds
// we assume that the driver supports decoding for that resolution.
@@ -1464,57 +1441,72 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles(
{gfx::Size(2560, 1440), gfx::Size(3840, 2160),
gfx::Size(4096, 2160), gfx::Size(4096, 2304)});
- // Despite the name this is the GUID for VP8/VP9.
- if (preferences.enable_accelerated_vpx_decode &&
- !workarounds.disable_accelerated_vpx_decode) {
- max_vpx_resolutions = GetMaxResolutionsForGUIDs(
- max_vpx_resolutions.first, video_device.Get(),
+ if (!workarounds.disable_accelerated_vpx_decode) {
+ max_vp9_profile0_resolutions = GetMaxResolutionsForGUIDs(
+ max_vp9_profile0_resolutions.first, video_device.Get(),
{D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0},
{gfx::Size(4096, 2160), gfx::Size(4096, 2304),
gfx::Size(7680, 4320), gfx::Size(8192, 4320),
gfx::Size(8192, 8192)});
+
+ // RS3 has issues with VP9.2 decoding. See https://crbug.com/937108.
+ if (base::win::GetVersion() != base::win::Version::WIN10_RS3) {
+ max_vp9_profile2_resolutions = GetMaxResolutionsForGUIDs(
+ max_vp9_profile2_resolutions.first, video_device.Get(),
+ {D3D11_DECODER_PROFILE_VP9_VLD_10BIT_PROFILE2},
+ {gfx::Size(4096, 2160), gfx::Size(4096, 2304),
+ gfx::Size(7680, 4320), gfx::Size(8192, 4320),
+ gfx::Size(8192, 8192)},
+ DXGI_FORMAT_P010);
+ }
}
}
}
}
for (const auto& supported_profile : kSupportedProfiles) {
- const bool kIsVPX = supported_profile >= VP8PROFILE_MIN &&
+ const bool is_h264 = supported_profile >= H264PROFILE_MIN &&
+ supported_profile <= H264PROFILE_MAX;
+ const bool is_vp9 = supported_profile >= VP9PROFILE_MIN &&
supported_profile <= VP9PROFILE_MAX;
+ DCHECK(is_h264 || is_vp9);
+
+ ResolutionPair max_resolutions;
+ if (is_h264) {
+ max_resolutions = max_h264_resolutions;
+ } else if (supported_profile == VP9PROFILE_PROFILE0) {
+ max_resolutions = max_vp9_profile0_resolutions;
+ } else if (supported_profile == VP9PROFILE_PROFILE2) {
+ max_resolutions = max_vp9_profile2_resolutions;
+ }
- // Skip adding VPX profiles if it's not supported or disabled.
- if (kIsVPX && max_vpx_resolutions.first.IsEmpty())
+ // Skip adding VP9 profiles if it's not supported or disabled.
+ if (is_vp9 && max_resolutions.first.IsEmpty())
continue;
- const bool kIsH264 = supported_profile >= H264PROFILE_MIN &&
- supported_profile <= H264PROFILE_MAX;
- DCHECK(kIsH264 || kIsVPX);
-
// Windows Media Foundation H.264 decoding does not support decoding videos
// with any dimension smaller than 48 pixels:
// http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
//
// TODO(dalecurtis): These values are too low. We should only be using
// hardware decode for videos above ~360p, see http://crbug.com/684792.
- const gfx::Size kMinResolution =
- kIsH264 ? gfx::Size(48, 48) : gfx::Size(16, 16);
+ const gfx::Size min_resolution =
+ is_h264 ? gfx::Size(48, 48) : gfx::Size(16, 16);
{
SupportedProfile profile;
profile.profile = supported_profile;
- profile.min_resolution = kMinResolution;
- profile.max_resolution =
- kIsH264 ? max_h264_resolutions.first : max_vpx_resolutions.first;
+ profile.min_resolution = min_resolution;
+ profile.max_resolution = max_resolutions.first;
profiles.push_back(profile);
}
- const gfx::Size kPortraitMax =
- kIsH264 ? max_h264_resolutions.second : max_vpx_resolutions.second;
- if (!kPortraitMax.IsEmpty()) {
+ const gfx::Size portrait_max_resolution = max_resolutions.second;
+ if (!portrait_max_resolution.IsEmpty()) {
SupportedProfile profile;
profile.profile = supported_profile;
- profile.min_resolution = kMinResolution;
- profile.max_resolution = kPortraitMax;
+ profile.min_resolution = min_resolution;
+ profile.max_resolution = portrait_max_resolution;
profiles.push_back(profile);
}
}
@@ -1528,7 +1520,7 @@ void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
::LoadLibrary(mfdll);
::LoadLibrary(L"dxva2.dll");
- if (base::win::GetVersion() >= base::win::VERSION_WIN8) {
+ if (base::win::GetVersion() >= base::win::Version::WIN8) {
LoadLibrary(L"msvproc.dll");
} else {
#if defined(ENABLE_DX11_FOR_WIN7)
@@ -1566,43 +1558,13 @@ bool DXVAVideoDecodeAccelerator::InitDecoder(VideoCodecProfile profile) {
codec_ = kCodecH264;
clsid = __uuidof(CMSH264DecoderMFT);
} else if (enable_accelerated_vpx_decode_ &&
- (profile == VP8PROFILE_ANY || profile == VP9PROFILE_PROFILE0 ||
- profile == VP9PROFILE_PROFILE1 ||
- profile == VP9PROFILE_PROFILE2 ||
- profile == VP9PROFILE_PROFILE3)) {
- if (profile != VP8PROFILE_ANY &&
- (enable_accelerated_vpx_decode_ &
- gpu::GpuPreferences::VPX_VENDOR_MICROSOFT)) {
- codec_ = kCodecVP9;
- clsid = CLSID_MSVPxDecoder;
- decoder_dll = ::LoadLibrary(kMSVP9DecoderDLLName);
- if (decoder_dll)
- using_ms_vp9_mft_ = true;
- }
-
- int program_files_key = base::DIR_PROGRAM_FILES;
- if (base::win::OSInfo::GetInstance()->wow64_status() ==
- base::win::OSInfo::WOW64_ENABLED) {
- program_files_key = base::DIR_PROGRAM_FILES6432;
- }
-
-// Avoid loading AMD VP9 decoder on Windows ARM64.
-#if defined(ARCH_CPU_X86_FAMILY)
- // AMD
- if (!decoder_dll &&
- enable_accelerated_vpx_decode_ & gpu::GpuPreferences::VPX_VENDOR_AMD &&
- profile == VP9PROFILE_PROFILE0) {
- base::FilePath dll_path;
- if (base::PathService::Get(program_files_key, &dll_path)) {
- codec_ = media::kCodecVP9;
- dll_path = dll_path.Append(kAMDVPXDecoderDLLPath);
- dll_path = dll_path.Append(kAMDVP9DecoderDLLName);
- clsid = CLSID_AMDWebmMfVp9Dec;
- decoder_dll = ::LoadLibraryEx(dll_path.value().data(), NULL,
- LOAD_WITH_ALTERED_SEARCH_PATH);
- }
- }
-#endif
+ (profile >= VP9PROFILE_PROFILE0 &&
+ profile <= VP9PROFILE_PROFILE3)) {
+ codec_ = kCodecVP9;
+ clsid = CLSID_MSVPxDecoder;
+ decoder_dll = ::LoadLibrary(kMSVP9DecoderDLLName);
+ if (decoder_dll)
+ using_ms_vp9_mft_ = true;
}
if (!decoder_dll) {
@@ -1784,8 +1746,6 @@ bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
if (codec_ == kCodecH264) {
hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
- } else if (codec_ == kCodecVP8) {
- hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP80);
} else if (codec_ == kCodecVP9) {
hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP90);
} else {
@@ -2745,7 +2705,7 @@ void DXVAVideoDecodeAccelerator::BindPictureBufferToSample(
base::Unretained(this)));
}
-void DXVAVideoDecodeAccelerator::CopyTexture(
+bool DXVAVideoDecodeAccelerator::CopyTexture(
ID3D11Texture2D* src_texture,
ID3D11Texture2D* dest_texture,
Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
@@ -2770,7 +2730,7 @@ void DXVAVideoDecodeAccelerator::CopyTexture(
color_space)) {
RETURN_AND_NOTIFY_ON_FAILURE(false,
"Failed to initialize D3D11 video processor.",
- PLATFORM_FAILURE, );
+ PLATFORM_FAILURE, false);
}
OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
@@ -2793,6 +2753,7 @@ void DXVAVideoDecodeAccelerator::CopyTexture(
dest_keyed_mutex, keyed_mutex_value,
input_sample_for_conversion, picture_buffer_id,
input_buffer_id));
+ return true;
}
void DXVAVideoDecodeAccelerator::CopyTextureOnDecoderThread(
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
index c5d45ad88de..93ec74c5f5a 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
@@ -103,7 +103,7 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
// VideoDecodeAccelerator implementation.
bool Initialize(const Config& config, Client* client) override;
- void Decode(const BitstreamBuffer& bitstream) override;
+ void Decode(BitstreamBuffer bitstream) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
int32_t bitstream_id) override;
void AssignPictureBuffers(const std::vector<PictureBuffer>& buffers) override;
@@ -300,8 +300,8 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
int input_buffer_id);
// Copies the source texture |src_texture| to the destination |dest_texture|.
- // The copying is done on the decoder thread.
- void CopyTexture(ID3D11Texture2D* src_texture,
+ // The copying is done on the decoder thread. Returns true on success.
+ bool CopyTexture(ID3D11Texture2D* src_texture,
ID3D11Texture2D* dest_texture,
Microsoft::WRL::ComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
uint64_t keyed_mutex_value,
@@ -559,8 +559,8 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
// Set to true if we are sharing ANGLE's device.
bool using_angle_device_;
- // Enables experimental hardware acceleration for VP8/VP9 video decoding.
- const gpu::GpuPreferences::VpxDecodeVendors enable_accelerated_vpx_decode_;
+ // Enables hardware acceleration for VP9 video decoding.
+ const bool enable_accelerated_vpx_decode_;
// The media foundation H.264 decoder has problems handling changes like
// resolution change, bitrate change etc. If we reinitialize the decoder
diff --git a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
index a493a9b4af5..593a8dc8c2b 100644
--- a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.cc
@@ -16,6 +16,8 @@
#include <utility>
#include <vector>
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "base/win/scoped_co_mem.h"
@@ -57,7 +59,7 @@ eAVEncH264VProfile GetH264VProfile(VideoCodecProfile profile) {
return eAVEncH264VProfile_Main;
case H264PROFILE_HIGH: {
// eAVEncH264VProfile_High requires Windows 8.
- if (base::win::GetVersion() < base::win::VERSION_WIN8) {
+ if (base::win::GetVersion() < base::win::Version::WIN8) {
return eAVEncH264VProfile_unknown;
}
return eAVEncH264VProfile_High;
@@ -89,11 +91,11 @@ class MediaFoundationVideoEncodeAccelerator::EncodeOutput {
struct MediaFoundationVideoEncodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(int32_t id,
- std::unique_ptr<base::SharedMemory> shm,
+ base::WritableSharedMemoryMapping mapping,
size_t size)
- : id(id), shm(std::move(shm)), size(size) {}
+ : id(id), mapping(std::move(mapping)), size(size) {}
const int32_t id;
- const std::unique_ptr<base::SharedMemory> shm;
+ const base::WritableSharedMemoryMapping mapping;
const size_t size;
private:
@@ -254,7 +256,7 @@ bool MediaFoundationVideoEncodeAccelerator::Initialize(const Config& config,
}
void MediaFoundationVideoEncodeAccelerator::Encode(
- const scoped_refptr<VideoFrame>& frame,
+ scoped_refptr<VideoFrame> frame,
bool force_keyframe) {
DVLOG(3) << __func__;
DCHECK(main_client_task_runner_->BelongsToCurrentThread());
@@ -262,12 +264,12 @@ void MediaFoundationVideoEncodeAccelerator::Encode(
encoder_thread_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&MediaFoundationVideoEncodeAccelerator::EncodeTask,
- encoder_task_weak_factory_.GetWeakPtr(), frame,
+ encoder_task_weak_factory_.GetWeakPtr(), std::move(frame),
force_keyframe));
}
void MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOG(3) << __func__ << ": buffer size=" << buffer.size();
DCHECK(main_client_task_runner_->BelongsToCurrentThread());
@@ -278,16 +280,19 @@ void MediaFoundationVideoEncodeAccelerator::UseOutputBitstreamBuffer(
return;
}
- std::unique_ptr<base::SharedMemory> shm(
- new base::SharedMemory(buffer.handle(), false));
- if (!shm->Map(buffer.size())) {
+ auto region =
+ base::UnsafeSharedMemoryRegion::Deserialize(buffer.TakeRegion());
+ auto mapping = region.Map();
+ if (!region.IsValid() || !mapping.IsValid()) {
DLOG(ERROR) << "Failed mapping shared memory.";
NotifyError(kPlatformFailureError);
return;
}
+ // After mapping, |region| is no longer necessary and it can be
+ // destroyed. |mapping| will keep the shared memory region open.
std::unique_ptr<BitstreamBufferRef> buffer_ref(
- new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
+ new BitstreamBufferRef(buffer.id(), std::move(mapping), buffer.size()));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
@@ -343,7 +348,7 @@ bool MediaFoundationVideoEncodeAccelerator::CreateHardwareEncoderMFT() {
DCHECK(main_client_task_runner_->BelongsToCurrentThread());
if (!compatible_with_win7_ &&
- base::win::GetVersion() < base::win::VERSION_WIN8) {
+ base::win::GetVersion() < base::win::Version::WIN8) {
DVLOG(ERROR) << "Windows versions earlier than 8 are not supported.";
return false;
}
@@ -661,7 +666,7 @@ void MediaFoundationVideoEncodeAccelerator::ProcessOutput() {
{
MediaBufferScopedPointer scoped_buffer(output_buffer.Get());
- memcpy(buffer_ref->shm->memory(), scoped_buffer.get(), size);
+ memcpy(buffer_ref->mapping.memory(), scoped_buffer.get(), size);
}
main_client_task_runner_->PostTask(
@@ -699,7 +704,7 @@ void MediaFoundationVideoEncodeAccelerator::ReturnBitstreamBuffer(
DVLOG(3) << __func__;
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
- memcpy(buffer_ref->shm->memory(), encode_output->memory(),
+ memcpy(buffer_ref->mapping.memory(), encode_output->memory(),
encode_output->size());
main_client_task_runner_->PostTask(
FROM_HERE,
diff --git a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
index 4355387004b..df7052d9fa0 100644
--- a/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
+++ b/chromium/media/gpu/windows/media_foundation_video_encode_accelerator_win.h
@@ -41,9 +41,8 @@ class MEDIA_GPU_EXPORT MediaFoundationVideoEncodeAccelerator
// VideoEncodeAccelerator implementation.
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void Destroy() override;
diff --git a/chromium/media/gpu/windows/output_with_release_mailbox_cb.h b/chromium/media/gpu/windows/output_with_release_mailbox_cb.h
index 499d6e5a352..d653c3a1d32 100644
--- a/chromium/media/gpu/windows/output_with_release_mailbox_cb.h
+++ b/chromium/media/gpu/windows/output_with_release_mailbox_cb.h
@@ -18,8 +18,7 @@ namespace deprecated {
// Similar to VideoFrame::ReleaseMailboxCB for now.
using ReleaseMailboxCB = base::OnceCallback<void(const gpu::SyncToken&)>;
using OutputWithReleaseMailboxCB =
- base::RepeatingCallback<void(ReleaseMailboxCB,
- const scoped_refptr<VideoFrame>&)>;
+ base::RepeatingCallback<void(ReleaseMailboxCB, scoped_refptr<VideoFrame>)>;
} // namespace deprecated
} // namespace media
diff --git a/chromium/media/learning/common/BUILD.gn b/chromium/media/learning/common/BUILD.gn
index 20e2cac8fd8..4e208d3a9b7 100644
--- a/chromium/media/learning/common/BUILD.gn
+++ b/chromium/media/learning/common/BUILD.gn
@@ -38,6 +38,7 @@ component("common") {
deps = [
"//base",
+ "//services/metrics/public/cpp:metrics_cpp",
]
}
diff --git a/chromium/media/learning/common/DEPS b/chromium/media/learning/common/DEPS
new file mode 100644
index 00000000000..cde7e700d55
--- /dev/null
+++ b/chromium/media/learning/common/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+services/metrics",
+]
diff --git a/chromium/media/learning/common/learning_task.cc b/chromium/media/learning/common/learning_task.cc
index 07cc4079005..fa5c088c8e2 100644
--- a/chromium/media/learning/common/learning_task.cc
+++ b/chromium/media/learning/common/learning_task.cc
@@ -4,6 +4,8 @@
#include "media/learning/common/learning_task.h"
+#include "base/hash/hash.h"
+
namespace media {
namespace learning {
@@ -23,5 +25,9 @@ LearningTask::LearningTask(const LearningTask&) = default;
LearningTask::~LearningTask() = default;
+LearningTask::Id LearningTask::GetId() const {
+ return base::PersistentHash(name);
+}
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/common/learning_task.h b/chromium/media/learning/common/learning_task.h
index ed5f4f76ac3..258dd39016f 100644
--- a/chromium/media/learning/common/learning_task.h
+++ b/chromium/media/learning/common/learning_task.h
@@ -23,6 +23,9 @@ namespace learning {
// TODO(liberato): should this be in impl? Probably not if we want to allow
// registering tasks.
struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
+ // Numeric ID for this task for UKM reporting.
+ using Id = uint64_t;
+
// Not all models support all feature / target descriptions. For example,
// NaiveBayes requires kUnordered features. Similarly, LogLinear woudln't
// support kUnordered features or targets. kRandomForest might support more
@@ -84,7 +87,11 @@ struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
LearningTask(const LearningTask&);
~LearningTask();
- // Unique name for this learner.
+ // Return a stable, unique numeric ID for this task. This requires a stable,
+ // unique |name| for the task. This is used to identify this task in UKM.
+ Id GetId() const;
+
+ // Unique name for this task.
std::string name;
Model model = Model::kExtraTrees;
@@ -171,7 +178,13 @@ struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
// into different confusion matrices in the same histogram, evenly spaced
// from 0 to |max_reporting_weight|, with one additional bucket for everything
// larger than that. The number of buckets is |num_reporting_weight_buckets|.
- double max_reporting_weight = 99.;
+ // The default value of 0 is special; it means that we should split up the
+ // buckets such that the last bucket means "entirely full training set", while
+ // the remainder are evenly spaced. This is the same as setting it to
+ // |max_data_set_size - 1|. Of course, |max_data_set_size| is a number of
+ // examples, not a weight, so this only makes any sense at all if all of the
+ // examples have the default weight of 1.
+ double max_reporting_weight = 0.;
// Number of buckets that we'll use to split out the confusion matrix by
// training weight. The last one is reserved for "all", while the others are
@@ -183,6 +196,20 @@ struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
// [0-9] [10-19] ... [90-99] [100 and up]. This makes sense if the training
// set maximum size is the default of 100, and each example has a weight of 1.
int num_reporting_weight_buckets = 11;
+
+ // If set, then we'll record results to UKM. Note that this may require an
+ // additional privacy review for your learning task! Also note that it is
+ // currently exclusive with |uma_hacky_confusion_matrix| for no technical
+ // reason whatsoever.
+ bool report_via_ukm = false;
+
+ // When reporting via UKM, we will scale observed / predicted values. These
+ // are the minimum and maximum target / observed values that will be
+ // representable. The UKM record will scale / translate this range into
+ // 0-100 integer, inclusive. This is intended for regression targets.
+ // Classification will do something else.
+ double ukm_min_input_value = 0.0;
+ double ukm_max_input_value = 1.0;
};
} // namespace learning
diff --git a/chromium/media/learning/common/learning_task_controller.h b/chromium/media/learning/common/learning_task_controller.h
index 1e224bde59e..ef098330458 100644
--- a/chromium/media/learning/common/learning_task_controller.h
+++ b/chromium/media/learning/common/learning_task_controller.h
@@ -11,6 +11,7 @@
#include "base/unguessable_token.h"
#include "media/learning/common/labelled_example.h"
#include "media/learning/common/learning_task.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
namespace media {
namespace learning {
@@ -21,13 +22,16 @@ namespace learning {
// SourceId, which most callers don't care about.
struct ObservationCompletion {
ObservationCompletion() = default;
- /* implicit */ ObservationCompletion(const TargetValue& target)
- : target_value(target) {}
- ObservationCompletion(const TargetValue& target, WeightType w)
- : target_value(target), weight(w) {}
+ /* implicit */ ObservationCompletion(const TargetValue& target,
+ WeightType w = 1.,
+ ukm::SourceId id = ukm::kInvalidSourceId)
+ : target_value(target), weight(w), source_id(id) {}
TargetValue target_value;
- WeightType weight = 1u;
+ WeightType weight;
+
+ // Optional, and ignored from the renderer.
+ ukm::SourceId source_id;
};
// Client for a single learning task. Intended to be the primary API for client
diff --git a/chromium/media/learning/impl/BUILD.gn b/chromium/media/learning/impl/BUILD.gn
index 27ed8c2dd8b..32ffc7471d8 100644
--- a/chromium/media/learning/impl/BUILD.gn
+++ b/chromium/media/learning/impl/BUILD.gn
@@ -50,6 +50,7 @@ component("impl") {
deps = [
"//base",
"//services/metrics/public/cpp:metrics_cpp",
+ "//services/metrics/public/cpp:ukm_builders",
]
public_deps = [
@@ -80,6 +81,7 @@ source_set("unit_tests") {
deps = [
":impl",
"//base/test:test_support",
+ "//components/ukm:test_support",
"//media:test_support",
"//media/learning/impl",
"//testing/gtest",
diff --git a/chromium/media/learning/impl/DEPS b/chromium/media/learning/impl/DEPS
new file mode 100644
index 00000000000..9eaf6b73a54
--- /dev/null
+++ b/chromium/media/learning/impl/DEPS
@@ -0,0 +1,9 @@
+include_rules = [
+ "+services/metrics",
+]
+
+specific_include_rules = {
+ "distribution_reporter_unittest.cc" : [
+ "+components/ukm",
+ ],
+}
diff --git a/chromium/media/learning/impl/distribution_reporter.cc b/chromium/media/learning/impl/distribution_reporter.cc
index a42f2c504a9..3f110be746e 100644
--- a/chromium/media/learning/impl/distribution_reporter.cc
+++ b/chromium/media/learning/impl/distribution_reporter.cc
@@ -6,6 +6,8 @@
#include "base/bind.h"
#include "base/metrics/histogram_functions.h"
+#include "services/metrics/public/cpp/ukm_builders.h"
+#include "services/metrics/public/cpp/ukm_recorder.h"
namespace media {
namespace learning {
@@ -144,6 +146,12 @@ class UmaRegressionReporter : public DistributionReporter {
const int n_buckets = task().num_reporting_weight_buckets;
DCHECK_LE(n_buckets, max_buckets);
+ // If the max reporting weight is zero, then default to splitting the
+ // buckets evenly, with the last bucket being "completely full set".
+ const int max_reporting_weight = task().max_reporting_weight
+ ? task().max_reporting_weight
+ : task().max_data_set_size - 1;
+
// We use one fewer buckets, to save one for the overflow. Buckets are
// numbered from 0 to |n_buckets-1|, inclusive. In other words, when the
// training weight is equal to |max_reporting_weight|, we still want to
@@ -151,7 +159,7 @@ class UmaRegressionReporter : public DistributionReporter {
// we divide; only things over the max go into the last bucket.
uma_bucket_number =
std::min<int>((n_buckets - 1) * info.total_training_weight /
- (task().max_reporting_weight + 1),
+ (max_reporting_weight + 1),
n_buckets - 1);
std::string base(kByTrainingWeightBase);
@@ -162,16 +170,68 @@ class UmaRegressionReporter : public DistributionReporter {
}
};
+// Ukm-based reporter.
+class UkmRegressionReporter : public DistributionReporter {
+ public:
+ UkmRegressionReporter(const LearningTask& task)
+ : DistributionReporter(task) {}
+
+ void OnPrediction(const PredictionInfo& info,
+ TargetHistogram predicted) override {
+ DCHECK_EQ(task().target_description.ordering,
+ LearningTask::Ordering::kNumeric);
+
+ DCHECK_NE(info.source_id, ukm::kInvalidSourceId);
+
+ ukm::UkmRecorder* ukm_recorder = ukm::UkmRecorder::Get();
+ if (!ukm_recorder)
+ return;
+
+ ukm::builders::Media_Learning_PredictionRecord builder(info.source_id);
+ builder.SetLearningTask(task().GetId());
+ builder.SetObservedValue(Bucketize(info.observed.value()));
+ builder.SetPredictedValue(Bucketize(predicted.Average()));
+ builder.SetTrainingDataTotalWeight(info.total_training_weight);
+ builder.SetTrainingDataSize(info.total_training_examples);
+ // TODO(liberato): we'd add feature subsets here.
+
+ builder.Record(ukm_recorder);
+ }
+
+ // Scale and translate |value| from the range specified in the task to 0-100.
+ // We scale it so that the buckets have an equal amount of the input range in
+ // each of them.
+ int Bucketize(double value) {
+ const int output_min = 0;
+ const int output_max = 100;
+ // Scale it so that input_min -> output_min and input_max -> output_max.
+ // Note that the input width is |input_max - input_min|, but there are
+ // |output_max - output_min + 1| output buckets. That's why we don't
+ // add one to the denominator, but we do add one to the numerator.
+ double scaled_value =
+ ((output_max - output_min + 1) * (value - task().ukm_min_input_value)) /
+ (task().ukm_max_input_value - task().ukm_min_input_value) +
+ output_min;
+ // Clip to [0, 100] and truncate to an integer.
+ return std::min(std::max(static_cast<int>(scaled_value), output_min),
+ output_max);
+ }
+};
+
std::unique_ptr<DistributionReporter> DistributionReporter::Create(
const LearningTask& task) {
// We only know how to report regression tasks right now.
if (task.target_description.ordering != LearningTask::Ordering::kNumeric)
return nullptr;
+ // We can report hacky UMA or non-hacky UKM. We could report both if we had
+ // a DistributionReporter that forwarded predictions to each, but we don't.
if (task.uma_hacky_aggregate_confusion_matrix ||
task.uma_hacky_by_training_weight_confusion_matrix ||
task.uma_hacky_by_feature_subset_confusion_matrix) {
return std::make_unique<UmaRegressionReporter>(task);
+ } else if (task.report_via_ukm) {
+ return std::make_unique<UkmRegressionReporter>(task);
}
return nullptr;
diff --git a/chromium/media/learning/impl/distribution_reporter.h b/chromium/media/learning/impl/distribution_reporter.h
index 99310aa0ed8..349302e5535 100644
--- a/chromium/media/learning/impl/distribution_reporter.h
+++ b/chromium/media/learning/impl/distribution_reporter.h
@@ -15,6 +15,7 @@
#include "media/learning/common/learning_task.h"
#include "media/learning/impl/model.h"
#include "media/learning/impl/target_histogram.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
namespace media {
namespace learning {
@@ -29,6 +30,19 @@ class COMPONENT_EXPORT(LEARNING_IMPL) DistributionReporter {
// What value was observed?
TargetValue observed;
+ // UKM source id to use when logging this result.
+ // This will be filled in by the LearningTaskController. For example, the
+ // MojoLearningTaskControllerService will be created in the browser by the
+ // MediaMetricsProvider, which gets the SourceId via callback from the
+ // RenderFrameHostDelegate on construction.
+ //
+ // TODO(liberato): Right now, this is not filled in anywhere. When the
+ // mojo service is created (MediaMetricsProvider), record the source id and
+ // memorize it in any MojoLearningTaskControllerService that's created by
+ // the MediaMetricsProvider, either directly or in a wrapper for the
+ // mojo controller.
+ ukm::SourceId source_id = ukm::kInvalidSourceId;
+
// Total weight of the training data used to create this model.
double total_training_weight = 0.;
diff --git a/chromium/media/learning/impl/distribution_reporter_unittest.cc b/chromium/media/learning/impl/distribution_reporter_unittest.cc
index 43679be9a14..55f1f4a2f47 100644
--- a/chromium/media/learning/impl/distribution_reporter_unittest.cc
+++ b/chromium/media/learning/impl/distribution_reporter_unittest.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/test/scoped_task_environment.h"
+#include "components/ukm/test_ukm_recorder.h"
#include "media/learning/common/learning_task.h"
#include "media/learning/impl/distribution_reporter.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,7 +17,9 @@ namespace learning {
class DistributionReporterTest : public testing::Test {
public:
- DistributionReporterTest() {
+ DistributionReporterTest()
+ : ukm_recorder_(std::make_unique<ukm::TestAutoSetUkmRecorder>()),
+ source_id_(123) {
task_.name = "TaskName";
// UMA reporting requires a numeric target.
task_.target_description.ordering = LearningTask::Ordering::kNumeric;
@@ -24,9 +27,19 @@ class DistributionReporterTest : public testing::Test {
base::test::ScopedTaskEnvironment scoped_task_environment_;
+ std::unique_ptr<ukm::TestAutoSetUkmRecorder> ukm_recorder_;
+
LearningTask task_;
+ ukm::SourceId source_id_;
+
std::unique_ptr<DistributionReporter> reporter_;
+
+ TargetHistogram HistogramFor(double value) {
+ TargetHistogram histogram;
+ histogram += TargetValue(value);
+ return histogram;
+ }
};
TEST_F(DistributionReporterTest, DistributionReporterDoesNotCrash) {
@@ -50,8 +63,47 @@ TEST_F(DistributionReporterTest, DistributionReporterDoesNotCrash) {
std::move(cb).Run(predicted);
}
-TEST_F(DistributionReporterTest, DistributionReporterMustBeRequested) {
+TEST_F(DistributionReporterTest, CallbackRecordsRegressionPredictions) {
+ // Make sure that |reporter_| records everything correctly for regressions.
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ // Scale 1-2 => 0->100.
+ task_.ukm_min_input_value = 1.;
+ task_.ukm_max_input_value = 2.;
+ task_.report_via_ukm = true;
+ reporter_ = DistributionReporter::Create(task_);
+ EXPECT_NE(reporter_, nullptr);
+
+ DistributionReporter::PredictionInfo info;
+ info.observed = TargetValue(1.1); // => 10
+ info.source_id = source_id_;
+ auto cb = reporter_->GetPredictionCallback(info);
+
+ TargetHistogram predicted;
+ const TargetValue One(1);
+ const TargetValue Five(5);
+ // Predict an average of 1.5 => 50 in the 0-100 scale.
+ predicted[One] = 70;
+ predicted[Five] = 10;
+ ASSERT_EQ(predicted.Average(), 1.5);
+ std::move(cb).Run(predicted);
+
+ // The record should show the correct averages, scaled by |fixed_point_scale|.
+ std::vector<const ukm::mojom::UkmEntry*> entries =
+ ukm_recorder_->GetEntriesByName("Media.Learning.PredictionRecord");
+ EXPECT_EQ(entries.size(), 1u);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[0], "LearningTask",
+ task_.GetId());
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[0], "ObservedValue", 10);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[0], "PredictedValue", 50);
+}
+
+TEST_F(DistributionReporterTest, DistributionReporterNeedsUmaNameOrUkm) {
// Make sure that we don't get a reporter if we don't request any reporting.
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ task_.uma_hacky_aggregate_confusion_matrix = false;
+ task_.uma_hacky_by_training_weight_confusion_matrix = false;
+ task_.uma_hacky_by_feature_subset_confusion_matrix = false;
+ task_.report_via_ukm = false;
reporter_ = DistributionReporter::Create(task_);
EXPECT_EQ(reporter_, nullptr);
}
@@ -83,5 +135,59 @@ TEST_F(DistributionReporterTest, ProvidesByFeatureSubsetReporter) {
EXPECT_NE(reporter_, nullptr);
}
+TEST_F(DistributionReporterTest, UkmBucketizesProperly) {
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ // Scale [1000, 2000] => [0, 100]
+ task_.ukm_min_input_value = 1000;
+ task_.ukm_max_input_value = 2000;
+ task_.report_via_ukm = true;
+
+ reporter_ = DistributionReporter::Create(task_);
+ DistributionReporter::PredictionInfo info;
+ info.source_id = source_id_;
+
+ // Add a few predictions / observations. We rotate the predicted / observed
+ // just to be sure they end up in the right UKM field.
+
+ // Inputs less than min scale to 0.
+ info.observed = TargetValue(900);
+ reporter_->GetPredictionCallback(info).Run(HistogramFor(1500));
+
+ // Inputs exactly at min scale to 0.
+ info.observed = TargetValue(1000);
+ reporter_->GetPredictionCallback(info).Run(HistogramFor(2000));
+
+ // Inputs in the middle scale to 50.
+ info.observed = TargetValue(1500);
+ reporter_->GetPredictionCallback(info).Run(HistogramFor(2100));
+
+ // Inputs at max scale to 100.
+ info.observed = TargetValue(2000);
+ reporter_->GetPredictionCallback(info).Run(HistogramFor(900));
+
+ // Inputs greater than max scale to 100.
+ info.observed = TargetValue(2100);
+ reporter_->GetPredictionCallback(info).Run(HistogramFor(1000));
+
+ std::vector<const ukm::mojom::UkmEntry*> entries =
+ ukm_recorder_->GetEntriesByName("Media.Learning.PredictionRecord");
+ EXPECT_EQ(entries.size(), 5u);
+
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[0], "ObservedValue", 0);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[0], "PredictedValue", 50);
+
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[1], "ObservedValue", 0);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[1], "PredictedValue", 100);
+
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[2], "ObservedValue", 50);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[2], "PredictedValue", 100);
+
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[3], "ObservedValue", 100);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[3], "PredictedValue", 0);
+
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[4], "ObservedValue", 100);
+ ukm::TestUkmRecorder::ExpectEntryMetric(entries[4], "PredictedValue", 0);
+}
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/learning_session_impl.h b/chromium/media/learning/impl/learning_session_impl.h
index 01402f5d163..1ada2b5a28d 100644
--- a/chromium/media/learning/impl/learning_session_impl.h
+++ b/chromium/media/learning/impl/learning_session_impl.h
@@ -24,7 +24,8 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningSessionImpl
: public LearningSession {
public:
// We will create LearningTaskControllers that run on |task_runner|.
- LearningSessionImpl(scoped_refptr<base::SequencedTaskRunner> task_runner);
+ explicit LearningSessionImpl(
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
~LearningSessionImpl() override;
// Create a SequenceBound controller for |task| on |task_runner|.
diff --git a/chromium/media/learning/impl/learning_session_impl_unittest.cc b/chromium/media/learning/impl/learning_session_impl_unittest.cc
index 2cb878a19d6..d69ec98280d 100644
--- a/chromium/media/learning/impl/learning_session_impl_unittest.cc
+++ b/chromium/media/learning/impl/learning_session_impl_unittest.cc
@@ -100,6 +100,13 @@ class LearningSessionImplTest : public testing::Test {
task_1_.name = "task_1";
}
+ ~LearningSessionImplTest() override {
+ // To prevent a memory leak, reset the session. This will post destruction
+ // of other objects, so RunUntilIdle().
+ session_.reset();
+ scoped_task_environment_.RunUntilIdle();
+ }
+
base::test::ScopedTaskEnvironment scoped_task_environment_;
scoped_refptr<base::SequencedTaskRunner> task_runner_;
diff --git a/chromium/media/learning/impl/learning_task_controller_helper.cc b/chromium/media/learning/impl/learning_task_controller_helper.cc
index 01c8972e67b..6b0506759c8 100644
--- a/chromium/media/learning/impl/learning_task_controller_helper.cc
+++ b/chromium/media/learning/impl/learning_task_controller_helper.cc
@@ -50,6 +50,7 @@ void LearningTaskControllerHelper::CompleteObservation(
iter->second.example.target_value = completion.target_value;
iter->second.example.weight = completion.weight;
iter->second.target_done = true;
+ iter->second.source_id = completion.source_id;
ProcessExampleIfFinished(std::move(iter));
}
@@ -98,7 +99,7 @@ void LearningTaskControllerHelper::ProcessExampleIfFinished(
if (!iter->second.features_done || !iter->second.target_done)
return;
- add_example_cb_.Run(std::move(iter->second.example));
+ add_example_cb_.Run(std::move(iter->second.example), iter->second.source_id);
pending_examples_.erase(iter);
// TODO(liberato): If we receive FeatureVector f1 then f2, and start filling
diff --git a/chromium/media/learning/impl/learning_task_controller_helper.h b/chromium/media/learning/impl/learning_task_controller_helper.h
index 7e9bb6f5a7f..28318655246 100644
--- a/chromium/media/learning/impl/learning_task_controller_helper.h
+++ b/chromium/media/learning/impl/learning_task_controller_helper.h
@@ -16,6 +16,7 @@
#include "base/threading/sequence_bound.h"
#include "media/learning/common/learning_task_controller.h"
#include "media/learning/impl/feature_provider.h"
+#include "services/metrics/public/cpp/ukm_source_id.h"
namespace media {
namespace learning {
@@ -35,7 +36,8 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningTaskControllerHelper
: public base::SupportsWeakPtr<LearningTaskControllerHelper> {
public:
// Callback to add labelled examples as training data.
- using AddExampleCB = base::RepeatingCallback<void(LabelledExample)>;
+ using AddExampleCB =
+ base::RepeatingCallback<void(LabelledExample, ukm::SourceId)>;
// TODO(liberato): Consider making the FP not optional.
LearningTaskControllerHelper(const LearningTask& task,
@@ -61,6 +63,8 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningTaskControllerHelper
// Has the client added a TargetValue?
// TODO(liberato): Should it provide a weight with the target value?
bool target_done = false;
+
+ ukm::SourceId source_id = ukm::kInvalidSourceId;
};
// [non-repeating int] = example
diff --git a/chromium/media/learning/impl/learning_task_controller_helper_unittest.cc b/chromium/media/learning/impl/learning_task_controller_helper_unittest.cc
index de756f0cf48..606c7a7ece2 100644
--- a/chromium/media/learning/impl/learning_task_controller_helper_unittest.cc
+++ b/chromium/media/learning/impl/learning_task_controller_helper_unittest.cc
@@ -47,6 +47,13 @@ class LearningTaskControllerHelperTest : public testing::Test {
id_ = base::UnguessableToken::Create();
}
+ ~LearningTaskControllerHelperTest() override {
+ // To prevent a memory leak, reset the helper. This will post destruction
+ // of other objects, so RunUntilIdle().
+ helper_.reset();
+ scoped_task_environment_.RunUntilIdle();
+ }
+
void CreateClient(bool include_fp) {
// Create the fake feature provider, and get a pointer to it.
base::SequenceBound<FakeFeatureProvider> sb_fp;
@@ -65,8 +72,9 @@ class LearningTaskControllerHelperTest : public testing::Test {
std::move(sb_fp));
}
- void OnLabelledExample(LabelledExample example) {
+ void OnLabelledExample(LabelledExample example, ukm::SourceId source_id) {
most_recent_example_ = std::move(example);
+ most_recent_source_id_ = source_id;
}
// Since we're friends but the tests aren't.
@@ -86,6 +94,7 @@ class LearningTaskControllerHelperTest : public testing::Test {
// Most recently added example via OnLabelledExample, if any.
base::Optional<LabelledExample> most_recent_example_;
+ ukm::SourceId most_recent_source_id_;
LearningTask task_;
@@ -98,13 +107,16 @@ TEST_F(LearningTaskControllerHelperTest, AddingAnExampleWithoutFPWorks) {
// A helper that doesn't use a FeatureProvider should forward examples as soon
// as they're done.
CreateClient(false);
+ ukm::SourceId source_id = 2;
helper_->BeginObservation(id_, example_.features);
EXPECT_EQ(pending_example_count(), 1u);
helper_->CompleteObservation(
- id_, ObservationCompletion(example_.target_value, example_.weight));
+ id_,
+ ObservationCompletion(example_.target_value, example_.weight, source_id));
EXPECT_TRUE(most_recent_example_);
EXPECT_EQ(*most_recent_example_, example_);
EXPECT_EQ(most_recent_example_->weight, example_.weight);
+ EXPECT_EQ(most_recent_source_id_, source_id);
EXPECT_EQ(pending_example_count(), 0u);
}
diff --git a/chromium/media/learning/impl/learning_task_controller_impl.cc b/chromium/media/learning/impl/learning_task_controller_impl.cc
index 544218aaef6..50a89482cdb 100644
--- a/chromium/media/learning/impl/learning_task_controller_impl.cc
+++ b/chromium/media/learning/impl/learning_task_controller_impl.cc
@@ -75,7 +75,8 @@ void LearningTaskControllerImpl::CancelObservation(base::UnguessableToken id) {
helper_->CancelObservation(id);
}
-void LearningTaskControllerImpl::AddFinishedExample(LabelledExample example) {
+void LearningTaskControllerImpl::AddFinishedExample(LabelledExample example,
+ ukm::SourceId source_id) {
// Verify that we have a trainer and that we got the right number of features.
// We don't compare to |task_.feature_descriptions.size()| since that has been
// adjusted to the subset size already. We expect the original count.
@@ -110,6 +111,7 @@ void LearningTaskControllerImpl::AddFinishedExample(LabelledExample example) {
DistributionReporter::PredictionInfo info;
info.observed = example.target_value;
+ info.source_id = source_id;
info.total_training_weight = last_training_weight_;
info.total_training_examples = last_training_size_;
reporter_->GetPredictionCallback(info).Run(predicted);
@@ -127,6 +129,10 @@ void LearningTaskControllerImpl::AddFinishedExample(LabelledExample example) {
num_untrained_examples_ = 0;
+ // Record these for metrics.
+ last_training_weight_ = training_data_->total_weight();
+ last_training_size_ = training_data_->size();
+
TrainedModelCB model_cb =
base::BindOnce(&LearningTaskControllerImpl::OnModelTrained, AsWeakPtr(),
training_data_->total_weight(), training_data_->size());
diff --git a/chromium/media/learning/impl/learning_task_controller_impl.h b/chromium/media/learning/impl/learning_task_controller_impl.h
index eae031be307..06df120b045 100644
--- a/chromium/media/learning/impl/learning_task_controller_impl.h
+++ b/chromium/media/learning/impl/learning_task_controller_impl.h
@@ -54,7 +54,7 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningTaskControllerImpl
private:
// Add |example| to the training data, and process it.
- void AddFinishedExample(LabelledExample example);
+ void AddFinishedExample(LabelledExample example, ukm::SourceId source_id);
// Called by |training_cb_| when the model is trained. |training_weight| and
// |training_size| are the training set's total weight and number of examples.
diff --git a/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc b/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc
index e1620c0858d..9daec3aeaf1 100644
--- a/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc
+++ b/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc
@@ -108,6 +108,13 @@ class LearningTaskControllerImplTest : public testing::Test {
task_.min_new_data_fraction = 0.1;
}
+ ~LearningTaskControllerImplTest() override {
+ // To prevent a memory leak, reset the controller. This may post
+ // destruction of other objects, so RunUntilIdle().
+ controller_.reset();
+ scoped_task_environment_.RunUntilIdle();
+ }
+
void CreateController(SequenceBoundFeatureProvider feature_provider =
SequenceBoundFeatureProvider()) {
std::unique_ptr<FakeDistributionReporter> reporter =
diff --git a/chromium/media/media_options.gni b/chromium/media/media_options.gni
index 0c62e9b4070..b335dfa7571 100644
--- a/chromium/media/media_options.gni
+++ b/chromium/media/media_options.gni
@@ -85,8 +85,7 @@ declare_args() {
# are combined and we could override more logging than expected.
enable_logging_override = !use_jumbo_build && is_chromecast
- # TODO(crbug.com/930300): Fix media_unittests for fuchsia X64.
- enable_dav1d_decoder = !is_android && !is_ios && !is_fuchsia
+ enable_dav1d_decoder = !is_android && !is_ios
}
declare_args() {
diff --git a/chromium/media/midi/BUILD.gn b/chromium/media/midi/BUILD.gn
index 278e308b917..8e723950944 100644
--- a/chromium/media/midi/BUILD.gn
+++ b/chromium/media/midi/BUILD.gn
@@ -137,7 +137,7 @@ component("midi") {
}
if (is_win) {
- deps += [ "//device/usb" ]
+ deps += [ "//services/device/public/cpp/usb" ]
sources += [
"midi_manager_win.cc",
"midi_manager_win.h",
@@ -145,6 +145,8 @@ component("midi") {
"midi_manager_winrt.h",
]
+ libs += [ "setupapi.lib" ]
+
# This library is included in base in static builds.
if (is_component_build) {
libs += [ "cfgmgr32.lib" ]
diff --git a/chromium/media/midi/midi_device_android.cc b/chromium/media/midi/midi_device_android.cc
index 26e8524de83..608c160c402 100644
--- a/chromium/media/midi/midi_device_android.cc
+++ b/chromium/media/midi/midi_device_android.cc
@@ -32,21 +32,14 @@ MidiDeviceAndroid::MidiDeviceAndroid(JNIEnv* env,
: raw_device_(raw_device) {
ScopedJavaLocalRef<jobjectArray> raw_input_ports =
Java_MidiDeviceAndroid_getInputPorts(env, raw_device);
- jsize num_input_ports = env->GetArrayLength(raw_input_ports.obj());
-
- for (jsize i = 0; i < num_input_ports; ++i) {
- ScopedJavaLocalRef<jobject> j_port(
- env, env->GetObjectArrayElement(raw_input_ports.obj(), i));
+ for (auto j_port : raw_input_ports.ReadElements<jobject>()) {
input_ports_.push_back(
std::make_unique<MidiInputPortAndroid>(env, j_port.obj(), delegate));
}
ScopedJavaLocalRef<jobjectArray> raw_output_ports =
Java_MidiDeviceAndroid_getOutputPorts(env, raw_device);
- jsize num_output_ports = env->GetArrayLength(raw_output_ports.obj());
- for (jsize i = 0; i < num_output_ports; ++i) {
- ScopedJavaLocalRef<jobject> j_port(
- env, env->GetObjectArrayElement(raw_output_ports.obj(), i));
+ for (auto j_port : raw_output_ports.ReadElements<jobject>()) {
output_ports_.push_back(
std::make_unique<MidiOutputPortAndroid>(env, j_port.obj()));
}
diff --git a/chromium/media/midi/midi_manager_android.cc b/chromium/media/midi/midi_manager_android.cc
index 5f6500afcd2..662d0268821 100644
--- a/chromium/media/midi/midi_manager_android.cc
+++ b/chromium/media/midi/midi_manager_android.cc
@@ -120,11 +120,7 @@ void MidiManagerAndroid::OnInitialized(
JNIEnv* env,
const JavaParamRef<jobject>& caller,
const JavaParamRef<jobjectArray>& devices) {
- jsize length = env->GetArrayLength(devices);
-
- for (jsize i = 0; i < length; ++i) {
- base::android::ScopedJavaLocalRef<jobject> raw_device(
- env, env->GetObjectArrayElement(devices, i));
+ for (auto raw_device : devices.ReadElements<jobject>()) {
AddDevice(std::make_unique<MidiDeviceAndroid>(env, raw_device, this));
}
service()->task_service()->PostBoundTask(
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
index e543077db38..5215411262d 100644
--- a/chromium/media/midi/midi_manager_win.cc
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -13,7 +13,9 @@
#include <algorithm>
#include <limits>
+#include <map>
#include <string>
+#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -27,12 +29,12 @@
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/lock.h"
#include "base/win/windows_version.h"
-#include "device/usb/usb_ids.h"
#include "media/midi/message_util.h"
#include "media/midi/midi_manager_winrt.h"
#include "media/midi/midi_service.h"
#include "media/midi/midi_service.mojom.h"
#include "media/midi/midi_switches.h"
+#include "services/device/public/cpp/usb/usb_ids.h"
namespace midi {
@@ -891,7 +893,7 @@ void MidiManagerWin::SendOnTaskRunner(MidiManagerClient* client,
MidiManager* MidiManager::Create(MidiService* service) {
if (base::FeatureList::IsEnabled(features::kMidiManagerWinrt) &&
- base::win::GetVersion() >= base::win::VERSION_WIN10) {
+ base::win::GetVersion() >= base::win::Version::WIN10) {
return new MidiManagerWinrt(service);
}
return new MidiManagerWin(service);
diff --git a/chromium/media/midi/midi_manager_winrt.cc b/chromium/media/midi/midi_manager_winrt.cc
index 4c0fdedefef..d89b59d92b5 100644
--- a/chromium/media/midi/midi_manager_winrt.cc
+++ b/chromium/media/midi/midi_manager_winrt.cc
@@ -6,6 +6,8 @@
#pragma warning(disable : 4467)
+#define INITGUID
+
#include <windows.h>
#include <cfgmgr32.h>
diff --git a/chromium/media/midi/usb_midi_device_factory_android.cc b/chromium/media/midi/usb_midi_device_factory_android.cc
index 1d1e90c7d8a..292874b40ec 100644
--- a/chromium/media/midi/usb_midi_device_factory_android.cc
+++ b/chromium/media/midi/usb_midi_device_factory_android.cc
@@ -55,11 +55,8 @@ void UsbMidiDeviceFactoryAndroid::OnUsbMidiDeviceRequestDone(
JNIEnv* env,
const JavaParamRef<jobject>& caller,
const JavaParamRef<jobjectArray>& devices) {
- size_t size = env->GetArrayLength(devices);
UsbMidiDevice::Devices devices_to_pass;
- for (size_t i = 0; i < size; ++i) {
- base::android::ScopedJavaLocalRef<jobject> raw_device(
- env, env->GetObjectArrayElement(devices, i));
+ for (auto raw_device : devices.ReadElements<jobject>()) {
devices_to_pass.push_back(
std::make_unique<UsbMidiDeviceAndroid>(raw_device, delegate_));
}
diff --git a/chromium/media/mojo/README.md b/chromium/media/mojo/README.md
index 0bcfc021016..8e3e9bb77da 100644
--- a/chromium/media/mojo/README.md
+++ b/chromium/media/mojo/README.md
@@ -65,9 +65,7 @@ following gn arguments, the media pipeline will enable `MojoRenderer` and
enable_mojo_media = true
mojo_media_services = ["renderer", "cdm"]
```
-Note that you must set `enable_mojo_media` first. Also, some remote media
-components are also controlled by run time features, e.g.
-`media::kMojoVideoDecoder`.
+Note that you must set `enable_mojo_media` first.
### Media Mojo Interface Factory
diff --git a/chromium/media/mojo/clients/BUILD.gn b/chromium/media/mojo/clients/BUILD.gn
index d6052b8baf2..e5d488f15e4 100644
--- a/chromium/media/mojo/clients/BUILD.gn
+++ b/chromium/media/mojo/clients/BUILD.gn
@@ -88,23 +88,6 @@ jumbo_source_set("clients") {
}
}
-source_set("jpeg_decode_accelerator") {
- visibility = [
- "//content/browser",
- "//media/capture:capture_lib",
- ]
-
- sources = [
- "mojo_mjpeg_decode_accelerator.cc",
- "mojo_mjpeg_decode_accelerator.h",
- ]
-
- deps = [
- "//base",
- "//media/mojo/interfaces",
- ]
-}
-
source_set("unit_tests") {
testonly = true
diff --git a/chromium/media/mojo/clients/mojo_android_overlay.cc b/chromium/media/mojo/clients/mojo_android_overlay.cc
index d3d2a896360..ae2d8a40020 100644
--- a/chromium/media/mojo/clients/mojo_android_overlay.cc
+++ b/chromium/media/mojo/clients/mojo_android_overlay.cc
@@ -52,8 +52,10 @@ void MojoAndroidOverlay::OnSurfaceReady(uint64_t surface_key) {
received_surface_ = true;
// Get the surface and notify our client.
- surface_ =
- gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(surface_key);
+ bool can_be_used_with_surface_control = false;
+ surface_ = gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface(
+ surface_key, &can_be_used_with_surface_control);
+ DCHECK(!can_be_used_with_surface_control);
// If no surface was returned, then fail instead.
if (surface_.IsEmpty()) {
diff --git a/chromium/media/mojo/clients/mojo_android_overlay_unittest.cc b/chromium/media/mojo/clients/mojo_android_overlay_unittest.cc
index a6eaac332a6..ca214cdb453 100644
--- a/chromium/media/mojo/clients/mojo_android_overlay_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_android_overlay_unittest.cc
@@ -135,8 +135,9 @@ class MojoAndroidOverlayTest : public ::testing::Test {
surface_texture_ = gl::SurfaceTexture::Create(0);
surface_ = gl::ScopedJavaSurface(surface_texture_.get());
surface_key_ = gpu::GpuSurfaceTracker::Get()->AddSurfaceForNativeWidget(
- gpu::GpuSurfaceTracker::SurfaceRecord(gfx::kNullAcceleratedWidget,
- surface_.j_surface().obj()));
+ gpu::GpuSurfaceTracker::SurfaceRecord(
+ gfx::kNullAcceleratedWidget, surface_.j_surface().obj(),
+ false /* can_be_used_with_surface_control */));
mock_provider_.client_->OnSurfaceReady(surface_key_);
base::RunLoop().RunUntilIdle();
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder.cc b/chromium/media/mojo/clients/mojo_audio_decoder.cc
index cbbf36ae74b..cfae495219a 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder.cc
+++ b/chromium/media/mojo/clients/mojo_audio_decoder.cc
@@ -44,7 +44,7 @@ bool MojoAudioDecoder::IsPlatformDecoder() const {
void MojoAudioDecoder::Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(1) << __func__;
@@ -56,7 +56,8 @@ void MojoAudioDecoder::Initialize(const AudioDecoderConfig& config,
// This could happen during reinitialization.
if (remote_decoder_.encountered_error()) {
DVLOG(1) << __func__ << ": Connection error happened.";
- task_runner_->PostTask(FROM_HERE, base::BindOnce(init_cb, false));
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(std::move(init_cb), false));
return;
}
@@ -67,11 +68,12 @@ void MojoAudioDecoder::Initialize(const AudioDecoderConfig& config,
if (config.is_encrypted() && CdmContext::kInvalidCdmId == cdm_id) {
DVLOG(1) << __func__ << ": Invalid CdmContext.";
- task_runner_->PostTask(FROM_HERE, base::BindOnce(init_cb, false));
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(std::move(init_cb), false));
return;
}
- init_cb_ = init_cb;
+ init_cb_ = std::move(init_cb);
output_cb_ = output_cb;
waiting_cb_ = waiting_cb;
@@ -109,7 +111,7 @@ void MojoAudioDecoder::Decode(scoped_refptr<DecoderBuffer> media_buffer,
base::Bind(&MojoAudioDecoder::OnDecodeStatus, base::Unretained(this)));
}
-void MojoAudioDecoder::Reset(const base::Closure& closure) {
+void MojoAudioDecoder::Reset(base::OnceClosure closure) {
DVLOG(2) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -120,12 +122,12 @@ void MojoAudioDecoder::Reset(const base::Closure& closure) {
base::BindOnce(std::move(decode_cb_), DecodeStatus::DECODE_ERROR));
}
- task_runner_->PostTask(FROM_HERE, closure);
+ task_runner_->PostTask(FROM_HERE, std::move(closure));
return;
}
DCHECK(!reset_cb_);
- reset_cb_ = closure;
+ reset_cb_ = std::move(closure);
remote_decoder_->Reset(
base::Bind(&MojoAudioDecoder::OnResetDone, base::Unretained(this)));
}
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder.h b/chromium/media/mojo/clients/mojo_audio_decoder.h
index 2c77d69afdc..5095e798629 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder.h
+++ b/chromium/media/mojo/clients/mojo_audio_decoder.h
@@ -35,12 +35,12 @@ class MojoAudioDecoder : public AudioDecoder, public mojom::AudioDecoderClient {
bool IsPlatformDecoder() const final;
void Initialize(const AudioDecoderConfig& config,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) final;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) final;
- void Reset(const base::Closure& closure) final;
+ void Reset(base::OnceClosure closure) final;
bool NeedsBitstreamConversion() const final;
// AudioDecoderClient implementation.
@@ -89,7 +89,7 @@ class MojoAudioDecoder : public AudioDecoder, public mojom::AudioDecoderClient {
// |decode_cb_| and |reset_cb_| are replaced by every by Decode() and Reset().
DecodeCB decode_cb_;
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
// Flag telling whether this decoder requires bitstream conversion.
// Passed from |remote_decoder_| as a result of its initialization.
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc b/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
index 00f2fe32bdd..f9522831748 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
@@ -78,7 +78,7 @@ class MojoAudioDecoderTest : public ::testing::Test {
// Completion callbacks.
MOCK_METHOD1(OnInitialized, void(bool));
- MOCK_METHOD1(OnOutput, void(const scoped_refptr<AudioBuffer>&));
+ MOCK_METHOD1(OnOutput, void(scoped_refptr<AudioBuffer>));
MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnDecoded, void(DecodeStatus));
MOCK_METHOD0(OnReset, void());
@@ -110,15 +110,15 @@ class MojoAudioDecoderTest : public ::testing::Test {
new StrictMock<MockAudioDecoder>());
mock_audio_decoder_ = mock_audio_decoder.get();
- EXPECT_CALL(*mock_audio_decoder_, Initialize(_, _, _, _, _))
+ EXPECT_CALL(*mock_audio_decoder_, Initialize_(_, _, _, _, _))
.WillRepeatedly(DoAll(SaveArg<3>(&output_cb_), SaveArg<4>(&waiting_cb_),
- RunCallback<2>(true)));
+ RunOnceCallback<2>(true)));
EXPECT_CALL(*mock_audio_decoder_, Decode(_, _))
.WillRepeatedly(
DoAll(InvokeWithoutArgs(this, &MojoAudioDecoderTest::ReturnOutput),
RunCallback<1>(DecodeStatus::OK)));
- EXPECT_CALL(*mock_audio_decoder_, Reset(_))
- .WillRepeatedly(RunCallback<0>());
+ EXPECT_CALL(*mock_audio_decoder_, Reset_(_))
+ .WillRepeatedly(RunOnceCallback<0>());
mojo::MakeStrongBinding(
std::make_unique<MojoAudioDecoderService>(
diff --git a/chromium/media/mojo/clients/mojo_decoder_factory.cc b/chromium/media/mojo/clients/mojo_decoder_factory.cc
index fa80f910d2a..a435f3f81fe 100644
--- a/chromium/media/mojo/clients/mojo_decoder_factory.cc
+++ b/chromium/media/mojo/clients/mojo_decoder_factory.cc
@@ -48,9 +48,6 @@ void MojoDecoderFactory::CreateVideoDecoders(
const gfx::ColorSpace& target_color_space,
std::vector<std::unique_ptr<VideoDecoder>>* video_decoders) {
#if BUILDFLAG(ENABLE_MOJO_VIDEO_DECODER)
- // If MojoVideoDecoder is not enabled, then return without adding anything.
- if (!base::FeatureList::IsEnabled(media::kMojoVideoDecoder))
- return;
mojom::VideoDecoderPtr video_decoder_ptr;
#if defined(OS_WIN)
diff --git a/chromium/media/mojo/clients/mojo_decryptor.cc b/chromium/media/mojo/clients/mojo_decryptor.cc
index f19d1d7a02f..0b07d5bd9d9 100644
--- a/chromium/media/mojo/clients/mojo_decryptor.cc
+++ b/chromium/media/mojo/clients/mojo_decryptor.cc
@@ -287,7 +287,7 @@ void MojoDecryptor::OnConnectionError(uint32_t custom_reason,
DCHECK(thread_checker_.CalledOnValidThread());
// All pending callbacks will be fired automatically because they are wrapped
- // in ScopedCallbackRunner.
+ // in WrapCallbackWithDefaultInvokeIfNotRun.
}
} // namespace media
diff --git a/chromium/media/mojo/clients/mojo_decryptor.h b/chromium/media/mojo/clients/mojo_decryptor.h
index 87ebac50687..db77f5329de 100644
--- a/chromium/media/mojo/clients/mojo_decryptor.h
+++ b/chromium/media/mojo/clients/mojo_decryptor.h
@@ -56,7 +56,8 @@ class MojoDecryptor : public Decryptor {
private:
// These are once callbacks corresponding to repeating callbacks DecryptCB,
// DecoderInitCB, AudioDecodeCB and VideoDecodeCB. They are needed so that we
- // can use ScopedCallbackRunner to make sure callbacks always run.
+ // can use WrapCallbackWithDefaultInvokeIfNotRun to make sure callbacks always
+ // run.
// TODO(xhwang): Update Decryptor to use OnceCallback. The change is easy,
// but updating tests is hard given gmock doesn't support move-only types.
// See http://crbug.com/751838
diff --git a/chromium/media/mojo/clients/mojo_decryptor_unittest.cc b/chromium/media/mojo/clients/mojo_decryptor_unittest.cc
index 2a8b00b2e94..9e0c010471d 100644
--- a/chromium/media/mojo/clients/mojo_decryptor_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_decryptor_unittest.cc
@@ -107,8 +107,7 @@ class MojoDecryptorTest : public ::testing::Test {
void(Decryptor::Status status,
const Decryptor::AudioFrames& frames));
MOCK_METHOD2(VideoDecoded,
- void(Decryptor::Status status,
- const scoped_refptr<VideoFrame>& frame));
+ void(Decryptor::Status status, scoped_refptr<VideoFrame> frame));
MOCK_METHOD0(OnConnectionClosed, void());
MOCK_METHOD0(OnFrameDestroyed, void());
diff --git a/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.cc b/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.cc
deleted file mode 100644
index 54abaa574e2..00000000000
--- a/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mojo/clients/mojo_mjpeg_decode_accelerator.h"
-
-#include <stddef.h>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_handle.h"
-#include "base/single_thread_task_runner.h"
-#include "build/build_config.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-
-namespace media {
-
-MojoMjpegDecodeAccelerator::MojoMjpegDecodeAccelerator(
- scoped_refptr<base::SequencedTaskRunner> io_task_runner,
- mojom::MjpegDecodeAcceleratorPtrInfo jpeg_decoder)
- : io_task_runner_(std::move(io_task_runner)),
- jpeg_decoder_info_(std::move(jpeg_decoder)) {}
-
-MojoMjpegDecodeAccelerator::~MojoMjpegDecodeAccelerator() {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
-}
-
-bool MojoMjpegDecodeAccelerator::Initialize(
- MjpegDecodeAccelerator::Client* /*client*/) {
- NOTIMPLEMENTED();
- return false;
-}
-
-void MojoMjpegDecodeAccelerator::InitializeAsync(Client* client,
- InitCB init_cb) {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
-
- jpeg_decoder_.Bind(std::move(jpeg_decoder_info_));
-
- // base::Unretained is safe because |this| owns |jpeg_decoder_|.
- jpeg_decoder_.set_connection_error_handler(
- base::Bind(&MojoMjpegDecodeAccelerator::OnLostConnectionToJpegDecoder,
- base::Unretained(this)));
- jpeg_decoder_->Initialize(
- base::Bind(&MojoMjpegDecodeAccelerator::OnInitializeDone,
- base::Unretained(this), std::move(init_cb), client));
-}
-
-void MojoMjpegDecodeAccelerator::Decode(
- const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
- DCHECK(jpeg_decoder_.is_bound());
-
- DCHECK(
- base::SharedMemory::IsHandleValid(video_frame->shared_memory_handle()));
-
- base::SharedMemoryHandle output_handle =
- base::SharedMemory::DuplicateHandle(video_frame->shared_memory_handle());
- if (!base::SharedMemory::IsHandleValid(output_handle)) {
- DLOG(ERROR) << "Failed to duplicate handle of VideoFrame";
- return;
- }
-
- size_t output_buffer_size = VideoFrame::AllocationSize(
- video_frame->format(), video_frame->coded_size());
- mojo::ScopedSharedBufferHandle output_frame_handle =
- mojo::WrapSharedMemoryHandle(
- output_handle, output_buffer_size,
- mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite);
-
- // base::Unretained is safe because |this| owns |jpeg_decoder_|.
- jpeg_decoder_->Decode(bitstream_buffer, video_frame->coded_size(),
- std::move(output_frame_handle),
- base::checked_cast<uint32_t>(output_buffer_size),
- base::Bind(&MojoMjpegDecodeAccelerator::OnDecodeAck,
- base::Unretained(this)));
-}
-
-bool MojoMjpegDecodeAccelerator::IsSupported() {
- return true;
-}
-
-void MojoMjpegDecodeAccelerator::OnInitializeDone(
- InitCB init_cb,
- MjpegDecodeAccelerator::Client* client,
- bool success) {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
-
- if (success)
- client_ = client;
-
- std::move(init_cb).Run(success);
-}
-
-void MojoMjpegDecodeAccelerator::OnDecodeAck(
- int32_t bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error error) {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
-
- if (!client_)
- return;
-
- if (error == ::media::MjpegDecodeAccelerator::Error::NO_ERRORS) {
- client_->VideoFrameReady(bitstream_buffer_id);
- return;
- }
-
- // Only NotifyError once.
- // Client::NotifyError() may trigger deletion of |this|, so calling it needs
- // to be the last thing done on this stack!
- Client* client = nullptr;
- std::swap(client, client_);
- client->NotifyError(bitstream_buffer_id, error);
-}
-
-void MojoMjpegDecodeAccelerator::OnLostConnectionToJpegDecoder() {
- DCHECK(io_task_runner_->RunsTasksInCurrentSequence());
- OnDecodeAck(kInvalidBitstreamBufferId,
- ::media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE);
-}
-
-} // namespace media
diff --git a/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.h b/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.h
deleted file mode 100644
index acdddbdc300..00000000000
--- a/chromium/media/mojo/clients/mojo_mjpeg_decode_accelerator.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_MOJO_CLIENTS_MOJO_MJPEG_DECODE_ACCELERATOR_H_
-#define MEDIA_MOJO_CLIENTS_MOJO_MJPEG_DECODE_ACCELERATOR_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "media/mojo/interfaces/mjpeg_decode_accelerator.mojom.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace base {
-class SequencedTaskRunner;
-}
-
-namespace media {
-
-// A MjpegDecodeAccelerator, for use in the browser process, that proxies to a
-// mojom::MjpegDecodeAccelerator. Created on the owner's thread, otherwise
-// operating and deleted on |io_task_runner|.
-class MojoMjpegDecodeAccelerator : public MjpegDecodeAccelerator {
- public:
- MojoMjpegDecodeAccelerator(
- scoped_refptr<base::SequencedTaskRunner> io_task_runner,
- mojom::MjpegDecodeAcceleratorPtrInfo jpeg_decoder);
- ~MojoMjpegDecodeAccelerator() override;
-
- // MjpegDecodeAccelerator implementation.
- // |client| is called on the IO thread, but is never called into after the
- // MojoMjpegDecodeAccelerator is destroyed.
- bool Initialize(Client* client) override;
- void InitializeAsync(Client* client, InitCB init_cb) override;
- void Decode(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<VideoFrame>& video_frame) override;
- bool IsSupported() override;
-
- private:
- void OnInitializeDone(InitCB init_cb,
- MjpegDecodeAccelerator::Client* client,
- bool success);
- void OnDecodeAck(int32_t bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error error);
- void OnLostConnectionToJpegDecoder();
-
- scoped_refptr<base::SequencedTaskRunner> io_task_runner_;
-
- Client* client_ = nullptr;
-
- // Used to safely pass the mojom::MjpegDecodeAcceleratorPtr from one thread to
- // another. It is set in the constructor and consumed in InitializeAsync().
- // TODO(mcasas): s/jpeg_decoder_/jda_/ https://crbug.com/699255.
- mojom::MjpegDecodeAcceleratorPtrInfo jpeg_decoder_info_;
-
- mojom::MjpegDecodeAcceleratorPtr jpeg_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(MojoMjpegDecodeAccelerator);
-};
-
-} // namespace media
-
-#endif // MEDIA_MOJO_CLIENTS_MOJO_MJPEG_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/mojo/clients/mojo_renderer.cc b/chromium/media/mojo/clients/mojo_renderer.cc
index 34312db0743..be6d8263ef5 100644
--- a/chromium/media/mojo/clients/mojo_renderer.cc
+++ b/chromium/media/mojo/clients/mojo_renderer.cc
@@ -265,11 +265,6 @@ void MojoRenderer::OnVideoNaturalSizeChange(const gfx::Size& size) {
client_->OnVideoNaturalSizeChange(size);
}
-void MojoRenderer::OnRemotePlayStateChange(media::MediaStatus::State state) {
- DVLOG(2) << __func__ << ": state [" << static_cast<int>(state) << "]";
- client_->OnRemotePlayStateChange(state);
-}
-
void MojoRenderer::OnVideoOpacityChange(bool opaque) {
DVLOG(2) << __func__ << ": " << opaque;
DCHECK(task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/mojo/clients/mojo_renderer.h b/chromium/media/mojo/clients/mojo_renderer.h
index 149bddc76c9..bfe05b487ce 100644
--- a/chromium/media/mojo/clients/mojo_renderer.h
+++ b/chromium/media/mojo/clients/mojo_renderer.h
@@ -76,7 +76,6 @@ class MojoRenderer : public Renderer, public mojom::RendererClient {
void OnVideoOpacityChange(bool opaque) override;
void OnWaiting(WaitingReason reason) override;
void OnStatisticsUpdate(const PipelineStatistics& stats) override;
- void OnRemotePlayStateChange(media::MediaStatus::State state) override;
// Binds |remote_renderer_| to the mojo message pipe. Can be called multiple
// times. If an error occurs during connection, OnConnectionError will be
diff --git a/chromium/media/mojo/clients/mojo_renderer_factory.cc b/chromium/media/mojo/clients/mojo_renderer_factory.cc
index 1947ec52fc6..f0e95159bf0 100644
--- a/chromium/media/mojo/clients/mojo_renderer_factory.cc
+++ b/chromium/media/mojo/clients/mojo_renderer_factory.cc
@@ -49,11 +49,14 @@ std::unique_ptr<Renderer> MojoRendererFactory::CreateRenderer(
#if defined(OS_ANDROID)
std::unique_ptr<MojoRenderer> MojoRendererFactory::CreateFlingingRenderer(
const std::string& presentation_id,
+ mojom::FlingingRendererClientExtensionPtr client_extension_ptr,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
VideoRendererSink* video_renderer_sink) {
DCHECK(interface_factory_);
mojom::RendererPtr renderer_ptr;
+
interface_factory_->CreateFlingingRenderer(presentation_id,
+ std::move(client_extension_ptr),
mojo::MakeRequest(&renderer_ptr));
return std::make_unique<MojoRenderer>(
diff --git a/chromium/media/mojo/clients/mojo_renderer_factory.h b/chromium/media/mojo/clients/mojo_renderer_factory.h
index 16fdf0f09f9..f4f08551af7 100644
--- a/chromium/media/mojo/clients/mojo_renderer_factory.h
+++ b/chromium/media/mojo/clients/mojo_renderer_factory.h
@@ -50,6 +50,7 @@ class MojoRendererFactory : public RendererFactory {
#if defined(OS_ANDROID)
std::unique_ptr<MojoRenderer> CreateFlingingRenderer(
const std::string& presentation_id,
+ mojom::FlingingRendererClientExtensionPtr client_extenion_ptr,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
VideoRendererSink* video_renderer_sink);
diff --git a/chromium/media/mojo/clients/mojo_video_decoder.cc b/chromium/media/mojo/clients/mojo_video_decoder.cc
index 32f72b3dcdb..d8f503e9a6e 100644
--- a/chromium/media/mojo/clients/mojo_video_decoder.cc
+++ b/chromium/media/mojo/clients/mojo_video_decoder.cc
@@ -33,7 +33,7 @@ namespace media {
namespace {
void ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB(
- const VideoDecoder::InitCB& init_cb,
+ VideoDecoder::InitCB init_cb,
bool success) {
// Send the same histogram as GpuVideoDecoder to avoid breaking the existing
// tests.
@@ -42,11 +42,11 @@ void ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB(
UMA_HISTOGRAM_ENUMERATION("Media.GpuVideoDecoderInitializeStatus", status,
PIPELINE_STATUS_MAX + 1);
- init_cb.Run(success);
+ std::move(init_cb).Run(success);
}
void ReportMojoVideoDecoderErrorStatusToUMAAndRunCB(
- const VideoDecoder::DecodeCB& decode_cb,
+ VideoDecoder::DecodeCB decode_cb,
DecodeStatus status) {
// Send the same histogram as GpuVideoDecoder to avoid breaking the existing
// tests.
@@ -57,7 +57,7 @@ void ReportMojoVideoDecoderErrorStatusToUMAAndRunCB(
media::VideoDecodeAccelerator::ERROR_MAX + 1);
}
- decode_cb.Run(status);
+ std::move(decode_cb).Run(status);
}
} // namespace
@@ -148,19 +148,19 @@ std::string MojoVideoDecoder::GetDisplayName() const {
void MojoVideoDecoder::Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) {
DVLOG(1) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
- InitCB bound_init_cb =
- base::Bind(&ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB, init_cb);
+ InitCB bound_init_cb = base::BindOnce(
+ &ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB, std::move(init_cb));
// Fail immediately if we know that the remote side cannot support |config|.
if (gpu_factories_ && !gpu_factories_->IsDecoderConfigSupported(
video_decoder_implementation_, config)) {
task_runner_->PostTask(FROM_HERE,
- base::BindRepeating(bound_init_cb, false));
+ base::BindOnce(std::move(bound_init_cb), false));
return;
}
@@ -174,7 +174,8 @@ void MojoVideoDecoder::Initialize(const VideoDecoderConfig& config,
// is passed for reinitialization.
if (config.is_encrypted() && CdmContext::kInvalidCdmId == cdm_id) {
DVLOG(1) << __func__ << ": Invalid CdmContext.";
- task_runner_->PostTask(FROM_HERE, base::BindOnce(bound_init_cb, false));
+ task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(std::move(bound_init_cb), false));
return;
}
@@ -183,12 +184,12 @@ void MojoVideoDecoder::Initialize(const VideoDecoderConfig& config,
if (has_connection_error_) {
task_runner_->PostTask(FROM_HERE,
- base::BindRepeating(bound_init_cb, false));
+ base::BindOnce(std::move(bound_init_cb), false));
return;
}
initialized_ = false;
- init_cb_ = bound_init_cb;
+ init_cb_ = std::move(bound_init_cb);
output_cb_ = output_cb;
waiting_cb_ = waiting_cb;
@@ -209,16 +210,17 @@ void MojoVideoDecoder::OnInitializeDone(bool status,
}
void MojoVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) {
+ DecodeCB decode_cb) {
DVLOG(3) << __func__ << ": " << buffer->AsHumanReadableString();
DCHECK(task_runner_->BelongsToCurrentThread());
- DecodeCB bound_decode_cb =
- base::Bind(&ReportMojoVideoDecoderErrorStatusToUMAAndRunCB, decode_cb);
+ DecodeCB bound_decode_cb = base::BindOnce(
+ &ReportMojoVideoDecoderErrorStatusToUMAAndRunCB, std::move(decode_cb));
if (has_connection_error_) {
task_runner_->PostTask(
- FROM_HERE, base::BindOnce(bound_decode_cb, DecodeStatus::DECODE_ERROR));
+ FROM_HERE,
+ base::BindOnce(std::move(bound_decode_cb), DecodeStatus::DECODE_ERROR));
return;
}
@@ -226,12 +228,13 @@ void MojoVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
mojo_decoder_buffer_writer_->WriteDecoderBuffer(std::move(buffer));
if (!mojo_buffer) {
task_runner_->PostTask(
- FROM_HERE, base::BindOnce(bound_decode_cb, DecodeStatus::DECODE_ERROR));
+ FROM_HERE,
+ base::BindOnce(std::move(bound_decode_cb), DecodeStatus::DECODE_ERROR));
return;
}
uint64_t decode_id = decode_counter_++;
- pending_decodes_[decode_id] = bound_decode_cb;
+ pending_decodes_[decode_id] = std::move(bound_decode_cb);
remote_decoder_->Decode(std::move(mojo_buffer),
base::Bind(&MojoVideoDecoder::OnDecodeDone,
base::Unretained(this), decode_id));
@@ -268,21 +271,21 @@ void MojoVideoDecoder::OnDecodeDone(uint64_t decode_id, DecodeStatus status) {
Stop();
return;
}
- DecodeCB decode_cb = it->second;
+ DecodeCB decode_cb = std::move(it->second);
pending_decodes_.erase(it);
- decode_cb.Run(status);
+ std::move(decode_cb).Run(status);
}
-void MojoVideoDecoder::Reset(const base::Closure& reset_cb) {
+void MojoVideoDecoder::Reset(base::OnceClosure reset_cb) {
DVLOG(2) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
if (has_connection_error_) {
- task_runner_->PostTask(FROM_HERE, reset_cb);
+ task_runner_->PostTask(FROM_HERE, std::move(reset_cb));
return;
}
- reset_cb_ = reset_cb;
+ reset_cb_ = std::move(reset_cb);
remote_decoder_->Reset(
base::Bind(&MojoVideoDecoder::OnResetDone, base::Unretained(this)));
}
@@ -406,8 +409,8 @@ void MojoVideoDecoder::Stop() {
if (!weak_this)
return;
- for (const auto& pending_decode : pending_decodes_) {
- pending_decode.second.Run(DecodeStatus::DECODE_ERROR);
+ for (auto& pending_decode : pending_decodes_) {
+ std::move(pending_decode.second).Run(DecodeStatus::DECODE_ERROR);
if (!weak_this)
return;
}
diff --git a/chromium/media/mojo/clients/mojo_video_decoder.h b/chromium/media/mojo/clients/mojo_video_decoder.h
index 086aeb2560d..91d801a8b8a 100644
--- a/chromium/media/mojo/clients/mojo_video_decoder.h
+++ b/chromium/media/mojo/clients/mojo_video_decoder.h
@@ -49,12 +49,11 @@ class MojoVideoDecoder final : public VideoDecoder,
void Initialize(const VideoDecoderConfig& config,
bool low_delay,
CdmContext* cdm_context,
- const InitCB& init_cb,
+ InitCB init_cb,
const OutputCB& output_cb,
const WaitingCB& waiting_cb) final;
- void Decode(scoped_refptr<DecoderBuffer> buffer,
- const DecodeCB& decode_cb) final;
- void Reset(const base::Closure& closure) final;
+ void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) final;
+ void Reset(base::OnceClosure closure) final;
bool NeedsBitstreamConversion() const final;
bool CanReadWithoutStalling() const final;
int GetMaxDecodeRequests() const final;
@@ -103,7 +102,7 @@ class MojoVideoDecoder final : public VideoDecoder,
WaitingCB waiting_cb_;
uint64_t decode_counter_ = 0;
std::map<uint64_t, DecodeCB> pending_decodes_;
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
mojom::VideoDecoderPtr remote_decoder_;
std::unique_ptr<MojoDecoderBufferWriter> mojo_decoder_buffer_writer_;
diff --git a/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc b/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
index d4b479f55d2..3ed37fc7d7e 100644
--- a/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
+++ b/chromium/media/mojo/clients/mojo_video_encode_accelerator.cc
@@ -5,6 +5,7 @@
#include "media/mojo/clients/mojo_video_encode_accelerator.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "gpu/ipc/client/gpu_channel_host.h"
#include "media/base/video_frame.h"
@@ -17,9 +18,6 @@ namespace media {
namespace {
-// Does nothing but keeping |frame| alive.
-void KeepVideoFrameAlive(const scoped_refptr<VideoFrame>& frame) {}
-
// File-static mojom::VideoEncodeAcceleratorClient implementation to trampoline
// method calls to its |client_|. Note that this class is thread hostile when
// bound.
@@ -117,7 +115,7 @@ bool MojoVideoEncodeAccelerator::Initialize(const Config& config,
return result;
}
-void MojoVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
+void MojoVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
bool force_keyframe) {
DVLOG(2) << __func__ << " tstamp=" << frame->timestamp();
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
@@ -157,23 +155,22 @@ void MojoVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
// this gets destroyed and probably recycle its shared_memory_handle(): keep
// the former alive until the remote end is actually finished.
DCHECK(vea_.is_bound());
- vea_->Encode(mojo_frame, force_keyframe,
- base::Bind(&KeepVideoFrameAlive, frame));
+ vea_->Encode(
+ std::move(mojo_frame), force_keyframe,
+ base::BindOnce(base::DoNothing::Once<scoped_refptr<VideoFrame>>(),
+ std::move(frame)));
}
void MojoVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
+ BitstreamBuffer buffer) {
DVLOG(2) << __func__ << " buffer.id()= " << buffer.id()
<< " buffer.size()= " << buffer.size() << "B";
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- DCHECK(buffer.handle().IsValid());
+ DCHECK(buffer.region().IsValid());
- // TODO(https://crbug.com/793446): Only wrap read-only handles here and change
- // the protection status to kReadOnly.
- mojo::ScopedSharedBufferHandle buffer_handle = mojo::WrapSharedMemoryHandle(
- buffer.handle().Duplicate(), buffer.size(),
- mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite);
+ auto buffer_handle =
+ mojo::WrapPlatformSharedMemoryRegion(buffer.TakeRegion());
vea_->UseOutputBitstreamBuffer(buffer.id(), std::move(buffer_handle));
}
diff --git a/chromium/media/mojo/clients/mojo_video_encode_accelerator.h b/chromium/media/mojo/clients/mojo_video_encode_accelerator.h
index f555e0f5985..6b620938ec1 100644
--- a/chromium/media/mojo/clients/mojo_video_encode_accelerator.h
+++ b/chromium/media/mojo/clients/mojo_video_encode_accelerator.h
@@ -35,9 +35,8 @@ class MojoVideoEncodeAccelerator : public VideoEncodeAccelerator {
// VideoEncodeAccelerator implementation.
SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate_num) override;
void RequestEncodingParametersChange(const VideoBitrateAllocation& bitrate,
diff --git a/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc b/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
index bd85a1a4946..82e39c25fca 100644
--- a/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_video_encode_accelerator_unittest.cc
@@ -202,13 +202,14 @@ TEST_F(MojoVideoEncodeAcceleratorTest, EncodeOneFrame) {
const int32_t kBitstreamBufferId = 17;
{
const int32_t kShMemSize = 10;
- base::SharedMemory shmem;
- shmem.CreateAnonymous(kShMemSize);
+ auto shmem = base::UnsafeSharedMemoryRegion::Create(kShMemSize);
EXPECT_CALL(*mock_mojo_vea(),
DoUseOutputBitstreamBuffer(kBitstreamBufferId, _));
- mojo_vea()->UseOutputBitstreamBuffer(
- BitstreamBuffer(kBitstreamBufferId, shmem.handle(), kShMemSize,
- 0 /* offset */, base::TimeDelta()));
+ mojo_vea()->UseOutputBitstreamBuffer(BitstreamBuffer(
+ kBitstreamBufferId,
+ base::UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+ std::move(shmem)),
+ kShMemSize, 0 /* offset */, base::TimeDelta()));
base::RunLoop().RunUntilIdle();
}
diff --git a/chromium/media/mojo/common/media_type_converters.cc b/chromium/media/mojo/common/media_type_converters.cc
index ea1e77d9106..012d818c5bc 100644
--- a/chromium/media/mojo/common/media_type_converters.cc
+++ b/chromium/media/mojo/common/media_type_converters.cc
@@ -119,21 +119,21 @@ TypeConverter<scoped_refptr<media::DecoderBuffer>,
// static
media::mojom::AudioBufferPtr
-TypeConverter<media::mojom::AudioBufferPtr, scoped_refptr<media::AudioBuffer>>::
- Convert(const scoped_refptr<media::AudioBuffer>& input) {
+TypeConverter<media::mojom::AudioBufferPtr, media::AudioBuffer>::Convert(
+ const media::AudioBuffer& input) {
media::mojom::AudioBufferPtr buffer(media::mojom::AudioBuffer::New());
- buffer->sample_format = input->sample_format_;
- buffer->channel_layout = input->channel_layout();
- buffer->channel_count = input->channel_count();
- buffer->sample_rate = input->sample_rate();
- buffer->frame_count = input->frame_count();
- buffer->end_of_stream = input->end_of_stream();
- buffer->timestamp = input->timestamp();
-
- if (input->data_) {
- DCHECK_GT(input->data_size(), 0u);
- buffer->data.assign(input->data_.get(),
- input->data_.get() + input->data_size_);
+ buffer->sample_format = input.sample_format_;
+ buffer->channel_layout = input.channel_layout();
+ buffer->channel_count = input.channel_count();
+ buffer->sample_rate = input.sample_rate();
+ buffer->frame_count = input.frame_count();
+ buffer->end_of_stream = input.end_of_stream();
+ buffer->timestamp = input.timestamp();
+
+ if (input.data_) {
+ DCHECK_GT(input.data_size(), 0u);
+ buffer->data.assign(input.data_.get(),
+ input.data_.get() + input.data_size_);
}
return buffer;
diff --git a/chromium/media/mojo/common/media_type_converters.h b/chromium/media/mojo/common/media_type_converters.h
index 95bd75d972a..7d512755987 100644
--- a/chromium/media/mojo/common/media_type_converters.h
+++ b/chromium/media/mojo/common/media_type_converters.h
@@ -16,7 +16,7 @@ namespace media {
class AudioBuffer;
class DecoderBuffer;
class DecryptConfig;
-}
+} // namespace media
// These are specializations of mojo::TypeConverter and have to be in the mojo
// namespace.
@@ -47,10 +47,8 @@ struct TypeConverter<scoped_refptr<media::DecoderBuffer>,
};
template <>
-struct TypeConverter<media::mojom::AudioBufferPtr,
- scoped_refptr<media::AudioBuffer>> {
- static media::mojom::AudioBufferPtr Convert(
- const scoped_refptr<media::AudioBuffer>& input);
+struct TypeConverter<media::mojom::AudioBufferPtr, media::AudioBuffer> {
+ static media::mojom::AudioBufferPtr Convert(const media::AudioBuffer& input);
};
template <>
struct TypeConverter<scoped_refptr<media::AudioBuffer>,
diff --git a/chromium/media/mojo/common/media_type_converters_unittest.cc b/chromium/media/mojo/common/media_type_converters_unittest.cc
index 1074c3bfd11..7f64616fb00 100644
--- a/chromium/media/mojo/common/media_type_converters_unittest.cc
+++ b/chromium/media/mojo/common/media_type_converters_unittest.cc
@@ -30,30 +30,30 @@ void CompareBytes(uint8_t* original_data, uint8_t* result_data, size_t length) {
}
void CompareAudioBuffers(SampleFormat sample_format,
- const scoped_refptr<AudioBuffer>& original,
- const scoped_refptr<AudioBuffer>& result) {
- EXPECT_EQ(original->frame_count(), result->frame_count());
- EXPECT_EQ(original->timestamp(), result->timestamp());
- EXPECT_EQ(original->duration(), result->duration());
- EXPECT_EQ(original->sample_rate(), result->sample_rate());
- EXPECT_EQ(original->channel_count(), result->channel_count());
- EXPECT_EQ(original->channel_layout(), result->channel_layout());
- EXPECT_EQ(original->end_of_stream(), result->end_of_stream());
+ const AudioBuffer& original,
+ const AudioBuffer& result) {
+ EXPECT_EQ(original.frame_count(), result.frame_count());
+ EXPECT_EQ(original.timestamp(), result.timestamp());
+ EXPECT_EQ(original.duration(), result.duration());
+ EXPECT_EQ(original.sample_rate(), result.sample_rate());
+ EXPECT_EQ(original.channel_count(), result.channel_count());
+ EXPECT_EQ(original.channel_layout(), result.channel_layout());
+ EXPECT_EQ(original.end_of_stream(), result.end_of_stream());
// Compare bytes in buffer.
int bytes_per_channel =
- original->frame_count() * SampleFormatToBytesPerChannel(sample_format);
+ original.frame_count() * SampleFormatToBytesPerChannel(sample_format);
if (IsPlanar(sample_format)) {
- for (int i = 0; i < original->channel_count(); ++i) {
- CompareBytes(original->channel_data()[i], result->channel_data()[i],
+ for (int i = 0; i < original.channel_count(); ++i) {
+ CompareBytes(original.channel_data()[i], result.channel_data()[i],
bytes_per_channel);
}
return;
}
DCHECK(IsInterleaved(sample_format)) << sample_format;
- CompareBytes(original->channel_data()[0], result->channel_data()[0],
- bytes_per_channel * original->channel_count());
+ CompareBytes(original.channel_data()[0], result.channel_data()[0],
+ bytes_per_channel * original.channel_count());
}
} // namespace
@@ -206,7 +206,7 @@ TEST(MediaTypeConvertersTest, ConvertAudioBuffer_EOS) {
scoped_refptr<AudioBuffer> buffer(AudioBuffer::CreateEOSBuffer());
// Convert to and back.
- mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(buffer));
+ mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(*buffer));
scoped_refptr<AudioBuffer> result(ptr.To<scoped_refptr<AudioBuffer>>());
// Compare.
@@ -223,11 +223,11 @@ TEST(MediaTypeConvertersTest, ConvertAudioBuffer_MONO) {
kSampleRate / 100, base::TimeDelta());
// Convert to and back.
- mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(buffer));
+ mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(*buffer));
scoped_refptr<AudioBuffer> result(ptr.To<scoped_refptr<AudioBuffer>>());
// Compare.
- CompareAudioBuffers(kSampleFormatU8, buffer, result);
+ CompareAudioBuffers(kSampleFormatU8, *buffer, *result);
}
TEST(MediaTypeConvertersTest, ConvertAudioBuffer_FLOAT) {
@@ -240,11 +240,11 @@ TEST(MediaTypeConvertersTest, ConvertAudioBuffer_FLOAT) {
ChannelLayoutToChannelCount(kChannelLayout), kSampleRate, 0.0f, 1.0f,
kSampleRate / 10, start_time);
// Convert to and back.
- mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(buffer));
+ mojom::AudioBufferPtr ptr(mojom::AudioBuffer::From(*buffer));
scoped_refptr<AudioBuffer> result(ptr.To<scoped_refptr<AudioBuffer>>());
// Compare.
- CompareAudioBuffers(kSampleFormatPlanarF32, buffer, result);
+ CompareAudioBuffers(kSampleFormatPlanarF32, *buffer, *result);
}
} // namespace media
diff --git a/chromium/media/mojo/interfaces/BUILD.gn b/chromium/media/mojo/interfaces/BUILD.gn
index 6da484ec893..32da32cdf5d 100644
--- a/chromium/media/mojo/interfaces/BUILD.gn
+++ b/chromium/media/mojo/interfaces/BUILD.gn
@@ -28,7 +28,6 @@ mojom("interfaces") {
"media_metrics_provider.mojom",
"media_service.mojom",
"media_types.mojom",
- "mjpeg_decode_accelerator.mojom",
"output_protection.mojom",
"platform_verification.mojom",
"provision_fetcher.mojom",
@@ -68,18 +67,13 @@ mojom("interfaces") {
public_deps += [ "//sandbox/mac/mojom" ]
}
- # Windows component builds require this to avoid link errors related to URL
- # classes. Enabling this for other builds would result in ODR violations.
- # TODO(crbug.com/921170): Remove this once the issue is resolved.
- if (is_win && is_component_build) {
- export_class_attribute_blink = "BLINK_PLATFORM_EXPORT"
- export_define_blink = "BLINK_PLATFORM_IMPLEMENTATION=1"
- export_header_blink = "third_party/blink/public/platform/web_common.h"
- }
-
if (enable_cast_renderer) {
enabled_features = [ "enable_cast_renderer" ]
}
+
+ export_class_attribute_blink = "BLINK_PLATFORM_EXPORT"
+ export_define_blink = "BLINK_PLATFORM_IMPLEMENTATION=1"
+ export_header_blink = "third_party/blink/public/platform/web_common.h"
}
mojom("constants") {
diff --git a/chromium/media/mojo/interfaces/audio_output_stream.mojom b/chromium/media/mojo/interfaces/audio_output_stream.mojom
index 9683e762127..1253a928d8e 100644
--- a/chromium/media/mojo/interfaces/audio_output_stream.mojom
+++ b/chromium/media/mojo/interfaces/audio_output_stream.mojom
@@ -22,6 +22,9 @@ interface AudioOutputStream {
// this.
Pause();
+ // Flushes buffered audio. This should not be called when playing.
+ Flush();
+
// Sets volume. Volume must be in the range [0, 1].
SetVolume(double volume);
};
diff --git a/chromium/media/mojo/interfaces/interface_factory.mojom b/chromium/media/mojo/interfaces/interface_factory.mojom
index abd68a1e4b5..38b42f10614 100644
--- a/chromium/media/mojo/interfaces/interface_factory.mojom
+++ b/chromium/media/mojo/interfaces/interface_factory.mojom
@@ -51,7 +51,9 @@ interface InterfaceFactory {
// Creates a FlingingRenderer (FlingingRendererFactory).
// The |presentation_id| is used to find an already set-up RemotePlayback
// session (see blink::RemotePlayback).
- CreateFlingingRenderer(string presentation_id, Renderer& renderer);
+ CreateFlingingRenderer(string presentation_id,
+ FlingingRendererClientExtension client_extension,
+ Renderer& renderer);
// Creates a CDM based on the |key_system| provided. A |key_system| is a
// generic term for a decryption mechanism and/or content protection provider.
diff --git a/chromium/media/mojo/interfaces/media_types.mojom b/chromium/media/mojo/interfaces/media_types.mojom
index 31b47d6ae6d..b7b72f01ca1 100644
--- a/chromium/media/mojo/interfaces/media_types.mojom
+++ b/chromium/media/mojo/interfaces/media_types.mojom
@@ -5,6 +5,7 @@
module media.mojom;
import "gpu/ipc/common/mailbox_holder.mojom";
+import "gpu/ipc/common/vulkan_ycbcr_info.mojom";
import "mojo/public/mojom/base/time.mojom";
import "mojo/public/mojom/base/values.mojom";
import "ui/gfx/geometry/mojo/geometry.mojom";
@@ -58,10 +59,16 @@ enum VideoCodecProfile;
[Native]
enum VideoPixelFormat;
-// See media/base/video_rotation.h for descriptions.
+// See media/base/video_transformation.h for descriptions.
[Native]
enum VideoRotation;
+// See media/base/video_transformation.h for descriptions.
+struct VideoTransformation {
+ VideoRotation rotation;
+ bool mirrored;
+};
+
// See media/base/waiting.h for descriptions.
[Native]
enum WaitingReason;
@@ -156,7 +163,7 @@ struct VideoDecoderConfig {
VideoCodec codec;
VideoCodecProfile profile;
VideoPixelFormat format;
- VideoRotation video_rotation;
+ VideoTransformation transformation;
gfx.mojom.Size coded_size;
gfx.mojom.Rect visible_rect;
gfx.mojom.Size natural_size;
@@ -292,6 +299,7 @@ struct SharedBufferVideoFrameData {
struct MailboxVideoFrameData {
// Size must be kept in sync with media::VideoFrame::kMaxPlanes.
array<gpu.mojom.MailboxHolder, 4> mailbox_holder;
+ gpu.mojom.VulkanYCbCrInfo? ycbcr_data;
};
struct PipelineStatistics {
diff --git a/chromium/media/mojo/interfaces/media_types.typemap b/chromium/media/mojo/interfaces/media_types.typemap
index 65ea50f26c5..4488db03ee3 100644
--- a/chromium/media/mojo/interfaces/media_types.typemap
+++ b/chromium/media/mojo/interfaces/media_types.typemap
@@ -21,19 +21,27 @@ public_headers = [
"//media/base/sample_format.h",
"//media/base/subsample_entry.h",
"//media/base/video_codecs.h",
- "//media/base/video_rotation.h",
+ "//media/base/video_transformation.h",
"//media/base/video_types.h",
"//media/base/waiting.h",
"//media/base/watch_time_keys.h",
]
-traits_headers = [ "//media/base/ipc/media_param_traits_macros.h" ]
+traits_headers = [
+ "//media/base/ipc/media_param_traits_macros.h",
+ "//media/mojo/interfaces/video_transformation_mojom_traits.h",
+]
public_deps = [
"//media",
"//media/base/ipc",
]
+sources = [
+ "//media/mojo/interfaces/video_transformation_mojom_traits.cc",
+ "//media/mojo/interfaces/video_transformation_mojom_traits.h",
+]
+
type_mappings = [
"media.mojom.AudioCodec=media::AudioCodec",
"media.mojom.BufferingState=media::BufferingState",
@@ -52,6 +60,7 @@ type_mappings = [
"media.mojom.VideoCodecProfile=media::VideoCodecProfile",
"media.mojom.VideoPixelFormat=media::VideoPixelFormat",
"media.mojom.VideoRotation=media::VideoRotation",
+ "media.mojom.VideoTransformation=media::VideoTransformation",
"media.mojom.WaitingReason=media::WaitingReason",
"media.mojom.WatchTimeKey=media::WatchTimeKey",
"media.mojom.EncryptionPattern=media::EncryptionPattern",
diff --git a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.mojom b/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.mojom
deleted file mode 100644
index a525d2366b4..00000000000
--- a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.mojom
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-module media.mojom;
-
-import "media/mojo/interfaces/media_types.mojom";
-import "mojo/public/mojom/base/time.mojom";
-import "ui/gfx/geometry/mojo/geometry.mojom";
-
-// Decode errors (see media/video/jpeg_decode_accelerator.h).
-enum DecodeError {
- NO_ERRORS,
- INVALID_ARGUMENT,
- UNREADABLE_INPUT,
- PARSE_JPEG_FAILED,
- UNSUPPORTED_JPEG,
- PLATFORM_FAILURE,
-};
-
-// This defines a mojo transport format for media::BitstreamBuffer (see
-// media/base/bitstream_buffer.h).
-struct BitstreamBuffer {
- int32 id;
- handle<shared_buffer> memory_handle;
- uint32 size;
- int64 offset;
- mojo_base.mojom.TimeDelta timestamp;
- string key_id;
- string iv;
- array<SubsampleEntry> subsamples;
-};
-
-// GPU process interface exposed to the browser for decoding MJPEG streams.
-interface MjpegDecodeAccelerator {
- // Initializes the MJPEG decoder. Should be called once per decoder
- // construction and before using Decode(). This call returns true if
- // initialization is successful.
- Initialize() => (bool success);
-
- // Decodes the given bitstream buffer that contains one JPEG image.
- // The image is decoded from shared memory |input_buffer.memory_handle|
- // with size |input_buffer.size|. The input buffer is associated with
- // |input_buffer.id|and the size of JPEG image is |coded_size|. Decoded I420
- // frame data will be put onto shared memory associated with |output_handle|
- // with allocated size |output_buffer_size|.
- // Returns |bitstream_buffer_id| and |error| in a callback to notify the
- // decode status. |bitstream_buffer_id| is the id of BitstreamBuffer
- // |input_buffer| and |error| is the error code.
- Decode(BitstreamBuffer input_buffer, gfx.mojom.Size coded_size,
- handle<shared_buffer> output_handle, uint32 output_buffer_size)
- => (int32 bitstream_buffer_id, DecodeError error);
-
- // Decodes the given buffer that contains one JPEG image.
- // |input_fd| and |output_fd| are file descriptors of shared memory.
- // The image is decoded from memory of |input_fd|
- // with size |input_buffer_size|. The input buffer is associated with
- // |buffer_id| and the size of JPEG image is |coded_size|. Decoded I420
- // frame data will be put onto memory associated with |output_fd|
- // with allocated size |output_buffer_size|.
- // Returns |buffer_id| and |error| in a callback to notify the
- // decode status. |buffer_id| is the id of |input_buffer| and |error| is the
- // error code.
- DecodeWithFD(int32 buffer_id, handle input_fd, uint32 input_buffer_size,
- int32 coded_size_width, int32 coded_size_height,
- handle output_fd, uint32 output_buffer_size)
- => (int32 buffer_id, DecodeError error);
-
- // TODO(c.padhi): This method might not be required, see
- // http://crbug.com/699255.
- Uninitialize();
-};
diff --git a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.typemap b/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.typemap
deleted file mode 100644
index ea55ccb0095..00000000000
--- a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator.typemap
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2017 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-mojom = "//media/mojo/interfaces/mjpeg_decode_accelerator.mojom"
-
-public_headers = [
- "//media/base/bitstream_buffer.h",
- "//media/video/mjpeg_decode_accelerator.h",
-]
-
-traits_headers =
- [ "//media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h" ]
-
-sources = [
- "//media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.cc",
-]
-
-deps = [
- "//base",
- "//media",
- "//media/base/ipc",
-]
-
-type_mappings = [
- "media.mojom.BitstreamBuffer=media::BitstreamBuffer",
- "media.mojom.DecodeError=media::MjpegDecodeAccelerator::Error",
-]
diff --git a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.cc b/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.cc
deleted file mode 100644
index 15cb9f82ba7..00000000000
--- a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h"
-
-#include "base/logging.h"
-#include "media/base/ipc/media_param_traits_macros.h"
-#include "mojo/public/cpp/base/time_mojom_traits.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-
-namespace mojo {
-
-// static
-media::mojom::DecodeError
-EnumTraits<media::mojom::DecodeError, media::MjpegDecodeAccelerator::Error>::
- ToMojom(media::MjpegDecodeAccelerator::Error error) {
- switch (error) {
- case media::MjpegDecodeAccelerator::NO_ERRORS:
- return media::mojom::DecodeError::NO_ERRORS;
- case media::MjpegDecodeAccelerator::INVALID_ARGUMENT:
- return media::mojom::DecodeError::INVALID_ARGUMENT;
- case media::MjpegDecodeAccelerator::UNREADABLE_INPUT:
- return media::mojom::DecodeError::UNREADABLE_INPUT;
- case media::MjpegDecodeAccelerator::PARSE_JPEG_FAILED:
- return media::mojom::DecodeError::PARSE_JPEG_FAILED;
- case media::MjpegDecodeAccelerator::UNSUPPORTED_JPEG:
- return media::mojom::DecodeError::UNSUPPORTED_JPEG;
- case media::MjpegDecodeAccelerator::PLATFORM_FAILURE:
- return media::mojom::DecodeError::PLATFORM_FAILURE;
- }
- NOTREACHED();
- return media::mojom::DecodeError::NO_ERRORS;
-}
-
-// static
-bool EnumTraits<media::mojom::DecodeError,
- media::MjpegDecodeAccelerator::Error>::
- FromMojom(media::mojom::DecodeError error,
- media::MjpegDecodeAccelerator::Error* out) {
- switch (error) {
- case media::mojom::DecodeError::NO_ERRORS:
- *out = media::MjpegDecodeAccelerator::Error::NO_ERRORS;
- return true;
- case media::mojom::DecodeError::INVALID_ARGUMENT:
- *out = media::MjpegDecodeAccelerator::Error::INVALID_ARGUMENT;
- return true;
- case media::mojom::DecodeError::UNREADABLE_INPUT:
- *out = media::MjpegDecodeAccelerator::Error::UNREADABLE_INPUT;
- return true;
- case media::mojom::DecodeError::PARSE_JPEG_FAILED:
- *out = media::MjpegDecodeAccelerator::Error::PARSE_JPEG_FAILED;
- return true;
- case media::mojom::DecodeError::UNSUPPORTED_JPEG:
- *out = media::MjpegDecodeAccelerator::Error::UNSUPPORTED_JPEG;
- return true;
- case media::mojom::DecodeError::PLATFORM_FAILURE:
- *out = media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE;
- return true;
- }
- NOTREACHED();
- return false;
-}
-
-// static
-mojo::ScopedSharedBufferHandle
-StructTraits<media::mojom::BitstreamBufferDataView, media::BitstreamBuffer>::
- memory_handle(const media::BitstreamBuffer& input) {
- base::SharedMemoryHandle input_handle =
- base::SharedMemory::DuplicateHandle(input.handle());
- if (!base::SharedMemory::IsHandleValid(input_handle)) {
- DLOG(ERROR) << "Failed to duplicate handle of BitstreamBuffer";
- return mojo::ScopedSharedBufferHandle();
- }
-
- // TODO(https://crbug.com/793446): Update this to |kReadOnly| protection once
- // BitstreamBuffer can guarantee that its handle() field always corresponds to
- // a read-only SharedMemoryHandle.
- return mojo::WrapSharedMemoryHandle(
- input_handle, input.size(),
- mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite);
-}
-
-// static
-bool StructTraits<
- media::mojom::BitstreamBufferDataView,
- media::BitstreamBuffer>::Read(media::mojom::BitstreamBufferDataView input,
- media::BitstreamBuffer* output) {
- base::TimeDelta timestamp;
- if (!input.ReadTimestamp(&timestamp))
- return false;
-
- std::string key_id;
- if (!input.ReadKeyId(&key_id))
- return false;
-
- std::string iv;
- if (!input.ReadIv(&iv))
- return false;
-
- std::vector<media::SubsampleEntry> subsamples;
- if (!input.ReadSubsamples(&subsamples))
- return false;
-
- mojo::ScopedSharedBufferHandle handle = input.TakeMemoryHandle();
- if (!handle.is_valid())
- return false;
-
- base::SharedMemoryHandle memory_handle;
- MojoResult unwrap_result = mojo::UnwrapSharedMemoryHandle(
- std::move(handle), &memory_handle, nullptr, nullptr);
- if (unwrap_result != MOJO_RESULT_OK)
- return false;
-
- media::BitstreamBuffer bitstream_buffer(
- input.id(), memory_handle, input.size(),
- base::checked_cast<off_t>(input.offset()), timestamp);
- if (key_id.size()) {
- // Note that BitstreamBuffer currently ignores how each buffer is
- // encrypted and uses the settings from the Audio/VideoDecoderConfig.
- bitstream_buffer.SetDecryptionSettings(key_id, iv, subsamples);
- }
- *output = bitstream_buffer;
-
- return true;
-}
-
-} // namespace mojo
diff --git a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h b/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h
deleted file mode 100644
index 72e1a590eaf..00000000000
--- a/chromium/media/mojo/interfaces/mjpeg_decode_accelerator_mojom_traits.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_MOJO_INTERFACES_MJPEG_DECODE_ACCELERATOR_MOJOM_TRAITS_H_
-#define MEDIA_MOJO_INTERFACES_MJPEG_DECODE_ACCELERATOR_MOJOM_TRAITS_H_
-
-#include "base/numerics/safe_conversions.h"
-#include "media/base/bitstream_buffer.h"
-#include "media/mojo/interfaces/mjpeg_decode_accelerator.mojom.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace mojo {
-
-template <>
-struct EnumTraits<media::mojom::DecodeError,
- media::MjpegDecodeAccelerator::Error> {
- static media::mojom::DecodeError ToMojom(
- media::MjpegDecodeAccelerator::Error error);
-
- static bool FromMojom(media::mojom::DecodeError input,
- media::MjpegDecodeAccelerator::Error* out);
-};
-
-template <>
-struct StructTraits<media::mojom::BitstreamBufferDataView,
- media::BitstreamBuffer> {
- static int32_t id(const media::BitstreamBuffer& input) { return input.id(); }
-
- static mojo::ScopedSharedBufferHandle memory_handle(
- const media::BitstreamBuffer& input);
-
- static uint32_t size(const media::BitstreamBuffer& input) {
- return base::checked_cast<uint32_t>(input.size());
- }
-
- static int64_t offset(const media::BitstreamBuffer& input) {
- return base::checked_cast<int64_t>(input.offset());
- }
-
- static base::TimeDelta timestamp(const media::BitstreamBuffer& input) {
- return input.presentation_timestamp();
- }
-
- static const std::string& key_id(const media::BitstreamBuffer& input) {
- return input.key_id();
- }
-
- static const std::string& iv(const media::BitstreamBuffer& input) {
- return input.iv();
- }
-
- static const std::vector<media::SubsampleEntry>& subsamples(
- const media::BitstreamBuffer& input) {
- return input.subsamples();
- }
-
- static bool Read(media::mojom::BitstreamBufferDataView input,
- media::BitstreamBuffer* output);
-};
-
-} // namespace mojo
-
-#endif // MEDIA_MOJO_INTERFACES_MJPEG_DECODE_ACCELERATOR_MOJOM_TRAITS_H_
diff --git a/chromium/media/mojo/interfaces/renderer.mojom b/chromium/media/mojo/interfaces/renderer.mojom
index 15e26bd330a..ccb0c511fdc 100644
--- a/chromium/media/mojo/interfaces/renderer.mojom
+++ b/chromium/media/mojo/interfaces/renderer.mojom
@@ -81,10 +81,4 @@ interface RendererClient {
// Called when the remote renderering service is waiting for |reason|,
// e.g. waiting for decryption key.
OnWaiting(WaitingReason reason);
-
- // Executed whenever a renderer receives notification of a status change that
- // was not originated by its owner.
- // Only used with the FlingingRenderer (when external devices play/pause the
- // video playing remotely).
- OnRemotePlayStateChange(MediaStatusState state);
};
diff --git a/chromium/media/mojo/interfaces/renderer_extensions.mojom b/chromium/media/mojo/interfaces/renderer_extensions.mojom
index c4b857f0c79..595f0b1950e 100644
--- a/chromium/media/mojo/interfaces/renderer_extensions.mojom
+++ b/chromium/media/mojo/interfaces/renderer_extensions.mojom
@@ -4,13 +4,17 @@
module media.mojom;
+import "media/mojo/interfaces/media_types.mojom";
import "mojo/public/mojom/base/time.mojom";
import "mojo/public/mojom/base/unguessable_token.mojom";
import "ui/gfx/geometry/mojo/geometry.mojom";
-// Extension of the mojo::RendererClient communication layer, exposing
-// renderer-side events handlers to the Browser process.
-// Backed by MediaPlayerRendererClient, called by MediaPlayerRenderer.
+// Extension of the mojo::RendererClient communication layer for HLS and Android
+// software rendering fallback paths.
+// This allows the Browser side to call
+// back into the Renderer side. Concretely, the MediaPlayerRenderer uses these
+// methods to propagate events it raises to the MediaPlayerRendererClient, which
+// lives in the Renderer process.
interface MediaPlayerRendererClientExtension {
// Called when the first time the metadata is updated, and whenever the
// metadata changes.
@@ -18,9 +22,11 @@ interface MediaPlayerRendererClientExtension {
OnDurationChange(mojo_base.mojom.TimeDelta duration);
};
-// Extension of the mojo::Renderer communication layer, exposing browser-side
-// methods to the Renderer process.
-// Backed by MediaPlayerRenderer, called by MediaPlayerRendererClient.
+// Extension of the mojo::RendererClient communication layer for HLS and Android
+// software rendering fallback paths.
+// This allows the Renderer side to call into the Browser side.
+// Concretely, the MediaPlayerRendererClient uses these methods to send commands
+// to MediaPlayerRenderer, which lives in the Browser process.
interface MediaPlayerRendererExtension {
// Registers a new request in the ScopedSurfaceRequestManager, and returns
// its token.
@@ -29,3 +35,15 @@ interface MediaPlayerRendererExtension {
InitiateScopedSurfaceRequest()
=> (mojo_base.mojom.UnguessableToken request_token);
};
+
+// Extension of the mojo::RendererClient communication layer for media flinging,
+// a.k.a RemotePlayback, when playing media on a remote Cast device.
+// This allows the Browser side to call back into the Renderer side.
+// Concretely, the FlingingRenderer uses these methods to propagate events it
+// raises to the FlingingRendererClient, which lives in the Renderer process.
+interface FlingingRendererClientExtension {
+ // Called when the play state of a casted device goes out of sync with WMPI's
+ // play state (e.g. when another phone play/pauses a cast device on the same
+ // network).
+ OnRemotePlayStateChange(MediaStatusState state);
+};
diff --git a/chromium/media/mojo/interfaces/typemaps.gni b/chromium/media/mojo/interfaces/typemaps.gni
index f4879add2e3..1de4153b77c 100644
--- a/chromium/media/mojo/interfaces/typemaps.gni
+++ b/chromium/media/mojo/interfaces/typemaps.gni
@@ -14,7 +14,6 @@ typemaps = [
"//media/mojo/interfaces/hdr_metadata.typemap",
"//media/mojo/interfaces/media_drm_storage.typemap",
"//media/mojo/interfaces/media_types.typemap",
- "//media/mojo/interfaces/mjpeg_decode_accelerator.typemap",
"//media/mojo/interfaces/pipeline_statistics.typemap",
"//media/mojo/interfaces/video_color_space.typemap",
"//media/mojo/interfaces/video_decoder.typemap",
diff --git a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.cc b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.cc
index ee076d7a602..df3f80f2727 100644
--- a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.cc
+++ b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.cc
@@ -23,8 +23,8 @@ bool StructTraits<media::mojom::VideoDecoderConfigDataView,
if (!input.ReadFormat(&format))
return false;
- media::VideoRotation rotation;
- if (!input.ReadVideoRotation(&rotation))
+ media::VideoTransformation transformation;
+ if (!input.ReadTransformation(&transformation))
return false;
gfx::Size coded_size;
@@ -55,8 +55,9 @@ bool StructTraits<media::mojom::VideoDecoderConfigDataView,
if (!input.ReadHdrMetadata(&hdr_metadata))
return false;
- output->Initialize(codec, profile, format, color_space, rotation, coded_size,
- visible_rect, natural_size, extra_data, encryption_scheme);
+ output->Initialize(codec, profile, format, color_space, transformation,
+ coded_size, visible_rect, natural_size, extra_data,
+ encryption_scheme);
if (hdr_metadata)
output->set_hdr_metadata(hdr_metadata.value());
diff --git a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.h b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.h
index 6161bd880c3..4f2ab00f86b 100644
--- a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.h
+++ b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits.h
@@ -11,6 +11,7 @@
#include "media/mojo/interfaces/hdr_metadata_struct_traits.h"
#include "media/mojo/interfaces/media_types.mojom.h"
#include "media/mojo/interfaces/video_color_space_struct_traits.h"
+#include "media/mojo/interfaces/video_transformation_mojom_traits.h"
#include "ui/gfx/geometry/mojo/geometry_struct_traits.h"
namespace mojo {
@@ -59,9 +60,9 @@ struct StructTraits<media::mojom::VideoDecoderConfigDataView,
return input.color_space_info();
}
- static media::VideoRotation video_rotation(
+ static media::VideoTransformation transformation(
const media::VideoDecoderConfig& input) {
- return input.video_rotation();
+ return input.video_transformation();
}
static const base::Optional<media::HDRMetadata>& hdr_metadata(
diff --git a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
index d028eb5d10b..6619e13eebf 100644
--- a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
+++ b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
@@ -26,7 +26,7 @@ TEST(VideoDecoderConfigStructTraitsTest, ConvertVideoDecoderConfig_Normal) {
const std::vector<uint8_t> kExtraDataVector(
&kExtraData[0], &kExtraData[0] + base::size(kExtraData));
VideoDecoderConfig input(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, kNaturalSize, kExtraDataVector,
Unencrypted());
std::vector<uint8_t> data =
@@ -40,7 +40,7 @@ TEST(VideoDecoderConfigStructTraitsTest, ConvertVideoDecoderConfig_Normal) {
TEST(VideoDecoderConfigStructTraitsTest,
ConvertVideoDecoderConfig_EmptyExtraData) {
VideoDecoderConfig input(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, kNaturalSize, EmptyExtraData(),
Unencrypted());
std::vector<uint8_t> data =
@@ -53,7 +53,7 @@ TEST(VideoDecoderConfigStructTraitsTest,
TEST(VideoDecoderConfigStructTraitsTest, ConvertVideoDecoderConfig_Encrypted) {
VideoDecoderConfig input(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, kNaturalSize, EmptyExtraData(),
AesCtrEncryptionScheme());
std::vector<uint8_t> data =
@@ -72,7 +72,7 @@ TEST(VideoDecoderConfigStructTraitsTest,
VideoColorSpace::TransferID::SMPTEST2084,
VideoColorSpace::MatrixID::BT2020_CL,
gfx::ColorSpace::RangeID::LIMITED),
- VIDEO_ROTATION_0, kCodedSize, kVisibleRect, kNaturalSize,
+ kNoTransformation, kCodedSize, kVisibleRect, kNaturalSize,
EmptyExtraData(), Unencrypted());
std::vector<uint8_t> data =
media::mojom::VideoDecoderConfig::Serialize(&input);
@@ -85,7 +85,7 @@ TEST(VideoDecoderConfigStructTraitsTest,
TEST(VideoDecoderConfigStructTraitsTest,
ConvertVideoDecoderConfig_HDRMetadata) {
VideoDecoderConfig input(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, kNaturalSize, EmptyExtraData(),
Unencrypted());
HDRMetadata hdr_metadata;
@@ -127,7 +127,7 @@ TEST(VideoDecoderConfigStructTraitsTest,
// Next try an non-empty invalid config. Natural size must not be zero.
const gfx::Size kInvalidNaturalSize(0, 0);
input.Initialize(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
- VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
+ VideoColorSpace(), kNoTransformation, kCodedSize,
kVisibleRect, kInvalidNaturalSize, EmptyExtraData(),
Unencrypted());
EXPECT_FALSE(input.IsValidConfig());
diff --git a/chromium/media/mojo/interfaces/video_frame_struct_traits.cc b/chromium/media/mojo/interfaces/video_frame_struct_traits.cc
index 6eb6dac3cf6..8b377289633 100644
--- a/chromium/media/mojo/interfaces/video_frame_struct_traits.cc
+++ b/chromium/media/mojo/interfaces/video_frame_struct_traits.cc
@@ -18,15 +18,15 @@ namespace mojo {
namespace {
media::mojom::VideoFrameDataPtr MakeVideoFrameData(
- const scoped_refptr<media::VideoFrame>& input) {
+ const media::VideoFrame* input) {
if (input->metadata()->IsTrue(media::VideoFrameMetadata::END_OF_STREAM)) {
return media::mojom::VideoFrameData::NewEosData(
media::mojom::EosVideoFrameData::New());
}
if (input->storage_type() == media::VideoFrame::STORAGE_MOJO_SHARED_BUFFER) {
- media::MojoSharedBufferVideoFrame* mojo_frame =
- static_cast<media::MojoSharedBufferVideoFrame*>(input.get());
+ const media::MojoSharedBufferVideoFrame* mojo_frame =
+ static_cast<const media::MojoSharedBufferVideoFrame*>(input);
// TODO(https://crbug.com/803136): This should duplicate as READ_ONLY, but
// can't because there is no guarantee that the input handle is sharable as
@@ -53,7 +53,8 @@ media::mojom::VideoFrameDataPtr MakeVideoFrameData(
for (size_t i = 0; i < num_planes; i++)
mailbox_holder[i] = input->mailbox_holder(i);
return media::mojom::VideoFrameData::NewMailboxData(
- media::mojom::MailboxVideoFrameData::New(std::move(mailbox_holder)));
+ media::mojom::MailboxVideoFrameData::New(
+ std::move(mailbox_holder), std::move(input->ycbcr_info())));
}
NOTREACHED() << "Unsupported VideoFrame conversion";
@@ -66,7 +67,7 @@ media::mojom::VideoFrameDataPtr MakeVideoFrameData(
media::mojom::VideoFrameDataPtr StructTraits<media::mojom::VideoFrameDataView,
scoped_refptr<media::VideoFrame>>::
data(const scoped_refptr<media::VideoFrame>& input) {
- return media::mojom::VideoFrameDataPtr(MakeVideoFrameData(input));
+ return media::mojom::VideoFrameDataPtr(MakeVideoFrameData(input.get()));
}
// static
@@ -133,9 +134,14 @@ bool StructTraits<media::mojom::VideoFrameDataView,
for (size_t i = 0; i < media::VideoFrame::kMaxPlanes; i++)
mailbox_holder_array[i] = mailbox_holder[i];
+ base::Optional<gpu::VulkanYCbCrInfo> ycbcr_info;
+ if (!mailbox_data.ReadYcbcrData(&ycbcr_info))
+ return false;
+
frame = media::VideoFrame::WrapNativeTextures(
format, mailbox_holder_array, media::VideoFrame::ReleaseMailboxCB(),
coded_size, visible_rect, natural_size, timestamp);
+ frame->set_ycbcr_info(ycbcr_info);
} else {
// TODO(sandersd): Switch on the union tag to avoid this ugliness?
NOTREACHED();
diff --git a/chromium/media/mojo/interfaces/video_frame_struct_traits.h b/chromium/media/mojo/interfaces/video_frame_struct_traits.h
index 418c31e4eab..6491b0f7470 100644
--- a/chromium/media/mojo/interfaces/video_frame_struct_traits.h
+++ b/chromium/media/mojo/interfaces/video_frame_struct_traits.h
@@ -6,8 +6,10 @@
#define MEDIA_MOJO_INTERFACES_VIDEO_FRAME_STRUCT_TRAITS_H_
#include "base/memory/ref_counted.h"
+#include "base/optional.h"
#include "base/values.h"
#include "gpu/ipc/common/mailbox_holder_struct_traits.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h"
#include "media/base/ipc/media_param_traits_macros.h"
#include "media/base/video_frame.h"
#include "media/mojo/interfaces/media_types.mojom.h"
@@ -60,6 +62,11 @@ struct StructTraits<media::mojom::VideoFrameDataView,
return input->ColorSpace();
}
+ static const base::Optional<gpu::VulkanYCbCrInfo>& ycbcr_info(
+ const scoped_refptr<media::VideoFrame>& input) {
+ return input->ycbcr_info();
+ }
+
static media::mojom::VideoFrameDataPtr data(
const scoped_refptr<media::VideoFrame>& input);
diff --git a/chromium/media/mojo/interfaces/video_transformation_mojom_traits.cc b/chromium/media/mojo/interfaces/video_transformation_mojom_traits.cc
new file mode 100644
index 00000000000..b8b51db6d34
--- /dev/null
+++ b/chromium/media/mojo/interfaces/video_transformation_mojom_traits.cc
@@ -0,0 +1,22 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/interfaces/video_transformation_mojom_traits.h"
+
+namespace mojo {
+
+// static
+bool StructTraits<media::mojom::VideoTransformationDataView,
+ media::VideoTransformation>::
+ Read(media::mojom::VideoTransformationDataView input,
+ media::VideoTransformation* output) {
+ if (!input.ReadRotation(&output->rotation))
+ return false;
+
+ output->mirrored = input.mirrored();
+
+ return true;
+}
+
+} // namespace mojo
diff --git a/chromium/media/mojo/interfaces/video_transformation_mojom_traits.h b/chromium/media/mojo/interfaces/video_transformation_mojom_traits.h
new file mode 100644
index 00000000000..fe9c6306563
--- /dev/null
+++ b/chromium/media/mojo/interfaces/video_transformation_mojom_traits.h
@@ -0,0 +1,32 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_INTERFACES_VIDEO_TRANSFORMATION_MOJOM_TRAITS_H_
+#define MEDIA_MOJO_INTERFACES_VIDEO_TRANSFORMATION_MOJOM_TRAITS_H_
+
+#include "media/base/ipc/media_param_traits.h"
+#include "media/base/video_transformation.h"
+#include "media/mojo/interfaces/media_types.mojom.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<media::mojom::VideoTransformationDataView,
+ media::VideoTransformation> {
+ static media::VideoRotation rotation(
+ const media::VideoTransformation& input) {
+ return input.rotation;
+ }
+
+ static bool mirrored(const media::VideoTransformation& input) {
+ return input.mirrored;
+ }
+
+ static bool Read(media::mojom::VideoTransformationDataView input,
+ media::VideoTransformation* output);
+};
+
+} // namespace mojo
+
+#endif // MEDIA_MOJO_INTERFACES_VIDEO_TRANSFORMATION_MOJOM_TRAITS_H_
diff --git a/chromium/media/mojo/services/BUILD.gn b/chromium/media/mojo/services/BUILD.gn
index 2e0cabec9e4..d43ba943cac 100644
--- a/chromium/media/mojo/services/BUILD.gn
+++ b/chromium/media/mojo/services/BUILD.gn
@@ -49,8 +49,6 @@ jumbo_component("services") {
"mojo_media_client.h",
"mojo_media_log.cc",
"mojo_media_log.h",
- "mojo_mjpeg_decode_accelerator_service.cc",
- "mojo_mjpeg_decode_accelerator_service.h",
"mojo_provision_fetcher.cc",
"mojo_provision_fetcher.h",
"mojo_renderer_service.cc",
@@ -116,14 +114,6 @@ jumbo_component("services") {
]
}
- if (is_chromeos) {
- sources += [
- "cros_mojo_jpeg_encode_accelerator_service.cc",
- "cros_mojo_jpeg_encode_accelerator_service.h",
- ]
- deps += [ "//components/chromeos_camera/common" ]
- }
-
if (enable_library_cdms) {
sources += [
"cdm_service.cc",
@@ -173,6 +163,7 @@ source_set("media_manifest") {
]
deps = [
"//base",
+ "//media/mojo:buildflags",
"//media/mojo/interfaces",
"//media/mojo/interfaces:constants",
"//services/service_manager/public/cpp",
@@ -194,7 +185,6 @@ source_set("unit_tests") {
"mojo_audio_input_stream_unittest.cc",
"mojo_audio_output_stream_provider_unittest.cc",
"mojo_audio_output_stream_unittest.cc",
- "mojo_mjpeg_decode_accelerator_service_unittest.cc",
"mojo_video_encode_accelerator_service_unittest.cc",
"test_helpers.cc",
"test_helpers.h",
@@ -227,6 +217,12 @@ source_set("unit_tests") {
deps += [ "//media/cdm:cdm_api" ]
}
+
+ if (is_chromeos) {
+ deps += [
+ "//components/chromeos_camera:mjpeg_decode_accelerator_service_unittest",
+ ]
+ }
}
# Service Tests
diff --git a/chromium/media/mojo/services/DEPS b/chromium/media/mojo/services/DEPS
index 12bf0a05164..f3ef94643cf 100644
--- a/chromium/media/mojo/services/DEPS
+++ b/chromium/media/mojo/services/DEPS
@@ -2,8 +2,5 @@ specific_include_rules = {
"media_manifest\.cc": [
"+chromecast/common/mojom",
],
- "cros_mojo_jpeg_encode_accelerator_service\.h": [
- "+components/chromeos_camera/common",
- ],
}
diff --git a/chromium/media/mojo/services/cdm_manifest.cc b/chromium/media/mojo/services/cdm_manifest.cc
index 91a0f281d50..2ed9f3381e6 100644
--- a/chromium/media/mojo/services/cdm_manifest.cc
+++ b/chromium/media/mojo/services/cdm_manifest.cc
@@ -16,9 +16,12 @@ const service_manager::Manifest& GetCdmManifest() {
service_manager::ManifestBuilder()
.WithServiceName(mojom::kCdmServiceName)
.WithDisplayName("Content Decryption Module Service")
- .WithOptions(service_manager::ManifestOptionsBuilder()
- .WithSandboxType("cdm")
- .Build())
+ .WithOptions(
+ service_manager::ManifestOptionsBuilder()
+ .WithExecutionMode(service_manager::Manifest::ExecutionMode::
+ kOutOfProcessBuiltin)
+ .WithSandboxType("cdm")
+ .Build())
.ExposeCapability(
"media:cdm",
service_manager::Manifest::InterfaceList<mojom::CdmService>())
diff --git a/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.cc b/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.cc
deleted file mode 100644
index ae0ed92eb26..00000000000
--- a/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.h"
-
-#include <stdint.h>
-
-#include <memory>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/shared_memory.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/trace_event.h"
-#include "media/base/bind_to_current_loop.h"
-#include "mojo/public/cpp/bindings/strong_binding.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace {
-
-#if defined(OS_CHROMEOS)
-const int kJpegQuality = 90;
-#endif
-
-} // namespace
-
-namespace media {
-
-// static
-void CrOSMojoJpegEncodeAcceleratorService::Create(
- mojom::JpegEncodeAcceleratorRequest request) {
- auto* jpeg_encoder = new CrOSMojoJpegEncodeAcceleratorService();
- mojo::MakeStrongBinding(base::WrapUnique(jpeg_encoder), std::move(request));
-}
-
-CrOSMojoJpegEncodeAcceleratorService::CrOSMojoJpegEncodeAcceleratorService()
- : accelerator_factory_functions_(
- GpuJpegEncodeAcceleratorFactory::GetAcceleratorFactories()) {}
-
-CrOSMojoJpegEncodeAcceleratorService::~CrOSMojoJpegEncodeAcceleratorService() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-}
-
-void CrOSMojoJpegEncodeAcceleratorService::VideoFrameReady(
- int32_t bitstream_buffer_id,
- size_t encoded_picture_size) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- NotifyEncodeStatus(bitstream_buffer_id, encoded_picture_size,
- ::media::JpegEncodeAccelerator::Status::ENCODE_OK);
-}
-
-void CrOSMojoJpegEncodeAcceleratorService::NotifyError(
- int32_t bitstream_buffer_id,
- ::media::JpegEncodeAccelerator::Status error) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- NotifyEncodeStatus(bitstream_buffer_id, 0, error);
-}
-
-void CrOSMojoJpegEncodeAcceleratorService::Initialize(
- InitializeCallback callback) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- // When adding non-chromeos platforms, VideoCaptureGpuJpegEncoder::Initialize
- // needs to be updated.
-
- std::unique_ptr<::media::JpegEncodeAccelerator> accelerator;
- for (const auto& create_jea_function : accelerator_factory_functions_) {
- std::unique_ptr<::media::JpegEncodeAccelerator> tmp_accelerator =
- create_jea_function.Run(base::ThreadTaskRunnerHandle::Get());
- if (tmp_accelerator &&
- tmp_accelerator->Initialize(this) ==
- ::media::JpegEncodeAccelerator::Status::ENCODE_OK) {
- accelerator = std::move(tmp_accelerator);
- break;
- }
- }
-
- if (!accelerator) {
- DLOG(ERROR) << "JPEG accelerator initialization failed";
- std::move(callback).Run(false);
- return;
- }
-
- accelerator_ = std::move(accelerator);
- std::move(callback).Run(true);
-}
-
-void CrOSMojoJpegEncodeAcceleratorService::EncodeWithFD(
- int32_t buffer_id,
- mojo::ScopedHandle input_handle,
- uint32_t input_buffer_size,
- int32_t coded_size_width,
- int32_t coded_size_height,
- mojo::ScopedHandle exif_handle,
- uint32_t exif_buffer_size,
- mojo::ScopedHandle output_handle,
- uint32_t output_buffer_size,
- EncodeWithFDCallback callback) {
-#if defined(OS_CHROMEOS)
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- base::PlatformFile input_fd;
- base::PlatformFile exif_fd;
- base::PlatformFile output_fd;
- MojoResult result;
-
- if (coded_size_width <= 0 || coded_size_height <= 0) {
- std::move(callback).Run(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::INVALID_ARGUMENT);
- return;
- }
-
- result = mojo::UnwrapPlatformFile(std::move(input_handle), &input_fd);
- if (result != MOJO_RESULT_OK) {
- std::move(callback).Run(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::PLATFORM_FAILURE);
- return;
- }
-
- result = mojo::UnwrapPlatformFile(std::move(exif_handle), &exif_fd);
- if (result != MOJO_RESULT_OK) {
- std::move(callback).Run(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::PLATFORM_FAILURE);
- return;
- }
-
- result = mojo::UnwrapPlatformFile(std::move(output_handle), &output_fd);
- if (result != MOJO_RESULT_OK) {
- std::move(callback).Run(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::PLATFORM_FAILURE);
- return;
- }
-
- base::UnguessableToken input_guid = base::UnguessableToken::Create();
- base::UnguessableToken exif_guid = base::UnguessableToken::Create();
- base::UnguessableToken output_guid = base::UnguessableToken::Create();
- base::SharedMemoryHandle input_shm_handle(
- base::FileDescriptor(input_fd, true), 0u, input_guid);
- base::SharedMemoryHandle exif_shm_handle(base::FileDescriptor(exif_fd, true),
- 0u, exif_guid);
- base::SharedMemoryHandle output_shm_handle(
- base::FileDescriptor(output_fd, true), 0u, output_guid);
-
- media::BitstreamBuffer output_buffer(buffer_id, output_shm_handle,
- output_buffer_size);
- std::unique_ptr<media::BitstreamBuffer> exif_buffer;
- if (exif_buffer_size > 0) {
- exif_buffer = std::make_unique<media::BitstreamBuffer>(
- buffer_id, exif_shm_handle, exif_buffer_size);
- }
- gfx::Size coded_size(coded_size_width, coded_size_height);
-
- if (encode_cb_map_.find(buffer_id) != encode_cb_map_.end()) {
- mojo::ReportBadMessage("buffer_id is already registered in encode_cb_map_");
- return;
- }
- encode_cb_map_.emplace(buffer_id, std::move(callback));
-
- auto input_shm = std::make_unique<base::SharedMemory>(input_shm_handle, true);
- if (!input_shm->Map(input_buffer_size)) {
- DLOG(ERROR) << "Could not map input shared memory for buffer id "
- << buffer_id;
- NotifyEncodeStatus(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::PLATFORM_FAILURE);
- return;
- }
-
- uint8_t* input_shm_memory = static_cast<uint8_t*>(input_shm->memory());
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalSharedMemory(
- PIXEL_FORMAT_I420, // format
- coded_size, // coded_size
- gfx::Rect(coded_size), // visible_rect
- coded_size, // natural_size
- input_shm_memory, // data
- input_buffer_size, // data_size
- input_shm_handle, // handle
- 0, // data_offset
- base::TimeDelta()); // timestamp
- if (!frame.get()) {
- LOG(ERROR) << "Could not create VideoFrame for buffer id " << buffer_id;
- NotifyEncodeStatus(
- buffer_id, 0, ::media::JpegEncodeAccelerator::Status::PLATFORM_FAILURE);
- return;
- }
- // Keep |input_shm| referenced until |frame| is destructed.
- frame->AddDestructionObserver(base::BindOnce(
- base::DoNothing::Once<std::unique_ptr<base::SharedMemory>>(),
- base::Passed(&input_shm)));
-
- DCHECK(accelerator_);
- accelerator_->Encode(frame, kJpegQuality, exif_buffer.get(), output_buffer);
-#else
- NOTREACHED();
-#endif
-}
-
-void CrOSMojoJpegEncodeAcceleratorService::NotifyEncodeStatus(
- int32_t bitstream_buffer_id,
- size_t encoded_picture_size,
- ::media::JpegEncodeAccelerator::Status error) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- auto iter = encode_cb_map_.find(bitstream_buffer_id);
- DCHECK(iter != encode_cb_map_.end());
- EncodeWithFDCallback encode_cb = std::move(iter->second);
- encode_cb_map_.erase(iter);
- std::move(encode_cb).Run(bitstream_buffer_id, encoded_picture_size, error);
-}
-
-} // namespace media
diff --git a/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.h b/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.h
deleted file mode 100644
index 2f9fc3c4dd0..00000000000
--- a/chromium/media/mojo/services/cros_mojo_jpeg_encode_accelerator_service.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_MOJO_SERVICES_CROS_MOJO_JPEG_ENCODE_ACCELERATOR_SERVICE_H_
-#define MEDIA_MOJO_SERVICES_CROS_MOJO_JPEG_ENCODE_ACCELERATOR_SERVICE_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread_checker.h"
-#include "components/chromeos_camera/common/jpeg_encode_accelerator.mojom.h"
-#include "media/gpu/gpu_jpeg_encode_accelerator_factory.h"
-#include "media/mojo/services/media_mojo_export.h"
-#include "media/video/jpeg_encode_accelerator.h"
-
-namespace media {
-
-// Implementation of a mojom::JpegEncodeAccelerator which runs in the GPU
-// process, and wraps a JpegEncodeAccelerator.
-class MEDIA_MOJO_EXPORT CrOSMojoJpegEncodeAcceleratorService
- : public mojom::JpegEncodeAccelerator,
- public JpegEncodeAccelerator::Client {
- public:
- static void Create(mojom::JpegEncodeAcceleratorRequest request);
-
- ~CrOSMojoJpegEncodeAcceleratorService() override;
-
- // JpegEncodeAccelerator::Client implementation.
- void VideoFrameReady(int32_t buffer_id, size_t encoded_picture_size) override;
- void NotifyError(int32_t buffer_id,
- ::media::JpegEncodeAccelerator::Status status) override;
-
- private:
- using EncodeCallbackMap = std::unordered_map<int32_t, EncodeWithFDCallback>;
-
- // This constructor internally calls
- // GpuJpegEncodeAcceleratorFactory::GetAcceleratorFactories() to
- // fill |accelerator_factory_functions_|.
- CrOSMojoJpegEncodeAcceleratorService();
-
- // mojom::JpegEncodeAccelerator implementation.
- void Initialize(InitializeCallback callback) override;
- void EncodeWithFD(int32_t buffer_id,
- mojo::ScopedHandle input_fd,
- uint32_t input_buffer_size,
- int32_t coded_size_width,
- int32_t coded_size_height,
- mojo::ScopedHandle exif_fd,
- uint32_t exif_buffer_size,
- mojo::ScopedHandle output_fd,
- uint32_t output_buffer_size,
- EncodeWithFDCallback callback) override;
-
- void NotifyEncodeStatus(int32_t bitstream_buffer_id,
- size_t encoded_picture_size,
- ::media::JpegEncodeAccelerator::Status status);
-
- const std::vector<GpuJpegEncodeAcceleratorFactory::CreateAcceleratorCB>
- accelerator_factory_functions_;
-
- // A map from bitstream_buffer_id to EncodeCallback.
- EncodeCallbackMap encode_cb_map_;
-
- std::unique_ptr<::media::JpegEncodeAccelerator> accelerator_;
-
- THREAD_CHECKER(thread_checker_);
-
- DISALLOW_COPY_AND_ASSIGN(CrOSMojoJpegEncodeAcceleratorService);
-};
-
-} // namespace media
-
-#endif // MEDIA_MOJO_SERVICES_CROS_MOJO_JPEG_ENCODE_ACCELERATOR_SERVICE_H_
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client.cc b/chromium/media/mojo/services/gpu_mojo_media_client.cc
index f898c21d0b8..21c0633d250 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client.cc
@@ -196,8 +196,8 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
std::make_unique<AndroidVideoSurfaceChooserImpl>(
DeviceInfo::GetInstance()->IsSetOutputSurfaceSupported()),
android_overlay_factory_cb_, std::move(request_overlay_info_cb),
- std::make_unique<VideoFrameFactoryImpl>(gpu_task_runner_,
- std::move(get_stub_cb)));
+ std::make_unique<VideoFrameFactoryImpl>(
+ gpu_task_runner_, std::move(get_stub_cb), gpu_preferences_));
#elif defined(OS_CHROMEOS) || defined(OS_MACOSX) || defined(OS_WIN) || \
defined(OS_LINUX)
video_decoder = VdaVideoDecoder::Create(
diff --git a/chromium/media/mojo/services/interface_factory_impl.cc b/chromium/media/mojo/services/interface_factory_impl.cc
index 00031915861..77a4840fa28 100644
--- a/chromium/media/mojo/services/interface_factory_impl.cc
+++ b/chromium/media/mojo/services/interface_factory_impl.cc
@@ -170,6 +170,7 @@ void InterfaceFactoryImpl::CreateMediaPlayerRenderer(
void InterfaceFactoryImpl::CreateFlingingRenderer(
const std::string& audio_device_id,
+ mojom::FlingingRendererClientExtensionPtr client_extension,
mojo::InterfaceRequest<mojom::Renderer> request) {
NOTREACHED();
}
diff --git a/chromium/media/mojo/services/interface_factory_impl.h b/chromium/media/mojo/services/interface_factory_impl.h
index 77f7e784fd7..bffc86c1a58 100644
--- a/chromium/media/mojo/services/interface_factory_impl.h
+++ b/chromium/media/mojo/services/interface_factory_impl.h
@@ -53,8 +53,10 @@ class InterfaceFactoryImpl : public DeferredDestroy<mojom::InterfaceFactory> {
mojom::RendererRequest request,
mojom::MediaPlayerRendererExtensionRequest renderer_extension_request)
final;
- void CreateFlingingRenderer(const std::string& presentation_id,
- mojom::RendererRequest request) final;
+ void CreateFlingingRenderer(
+ const std::string& presentation_id,
+ mojom::FlingingRendererClientExtensionPtr client_extension,
+ mojom::RendererRequest request) final;
#endif // defined(OS_ANDROID)
void CreateCdm(const std::string& key_system,
mojom::ContentDecryptionModuleRequest request) final;
diff --git a/chromium/media/mojo/services/main.cc b/chromium/media/mojo/services/main.cc
index b8d4318dd7c..eea5f6dbf13 100644
--- a/chromium/media/mojo/services/main.cc
+++ b/chromium/media/mojo/services/main.cc
@@ -3,17 +3,18 @@
// found in the LICENSE file.
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/task/single_thread_task_executor.h"
#include "media/mojo/services/media_service_factory.h"
#include "services/service_manager/public/cpp/service_executable/service_main.h"
#include "services/service_manager/public/mojom/service.mojom.h"
void ServiceMain(service_manager::mojom::ServiceRequest request) {
logging::LoggingSettings settings;
- settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ settings.logging_dest =
+ logging::LOG_TO_SYSTEM_DEBUG_LOG | logging::LOG_TO_STDERR;
logging::InitLogging(settings);
- base::MessageLoop message_loop;
+ base::SingleThreadTaskExecutor main_thread_task_executor;
media::CreateMediaServiceForTesting(std::move(request))
->RunUntilTermination();
}
diff --git a/chromium/media/mojo/services/media_manifest.cc b/chromium/media/mojo/services/media_manifest.cc
index 7fa40040439..e5b88567282 100644
--- a/chromium/media/mojo/services/media_manifest.cc
+++ b/chromium/media/mojo/services/media_manifest.cc
@@ -5,6 +5,7 @@
#include "media/mojo/services/media_manifest.h"
#include "base/no_destructor.h"
+#include "media/mojo/buildflags.h"
#include "media/mojo/interfaces/constants.mojom.h"
#include "media/mojo/interfaces/media_service.mojom.h"
#include "services/service_manager/public/cpp/manifest_builder.h"
@@ -20,6 +21,18 @@ const service_manager::Manifest& GetMediaManifest() {
service_manager::ManifestBuilder()
.WithServiceName(mojom::kMediaServiceName)
.WithDisplayName("Media Service")
+ .WithOptions(
+ service_manager::ManifestOptionsBuilder()
+#if BUILDFLAG(ENABLE_MOJO_MEDIA_IN_UTILITY_PROCESS) || \
+ BUILDFLAG(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS)
+ .WithExecutionMode(service_manager::Manifest::ExecutionMode::
+ kOutOfProcessBuiltin)
+ .WithSandboxType("utility")
+#else
+ .WithExecutionMode(
+ service_manager::Manifest::ExecutionMode::kInProcessBuiltin)
+#endif
+ .Build())
.ExposeCapability(
"media:media",
service_manager::Manifest::InterfaceList<mojom::MediaService>())
diff --git a/chromium/media/mojo/services/media_service_unittest.cc b/chromium/media/mojo/services/media_service_unittest.cc
index 442aa990d39..2752b1a5a48 100644
--- a/chromium/media/mojo/services/media_service_unittest.cc
+++ b/chromium/media/mojo/services/media_service_unittest.cc
@@ -120,6 +120,14 @@ ACTION_P(QuitLoop, run_loop) {
base::PostTask(FROM_HERE, run_loop->QuitClosure());
}
+service_manager::Manifest MakeMediaManifestForExecutable() {
+ service_manager::Manifest manifest = GetMediaManifest();
+ manifest.options.sandbox_type = "none";
+ manifest.options.execution_mode =
+ service_manager::Manifest::ExecutionMode::kStandaloneExecutable;
+ return manifest;
+}
+
const char kTestServiceName[] = "media_service_unittests";
// Tests MediaService built into a standalone mojo service binary (see
@@ -131,7 +139,7 @@ class MediaServiceTest : public testing::Test {
public:
MediaServiceTest()
: test_service_manager_(
- {GetMediaManifest(),
+ {MakeMediaManifestForExecutable(),
service_manager::ManifestBuilder()
.WithServiceName(kTestServiceName)
.RequireCapability(mojom::kMediaServiceName, "media:media")
diff --git a/chromium/media/mojo/services/mojo_audio_decoder_service.cc b/chromium/media/mojo/services/mojo_audio_decoder_service.cc
index eba8b656e92..72a1d7dea8e 100644
--- a/chromium/media/mojo/services/mojo_audio_decoder_service.cc
+++ b/chromium/media/mojo/services/mojo_audio_decoder_service.cc
@@ -133,11 +133,11 @@ void MojoAudioDecoderService::OnResetDone(ResetCallback callback) {
}
void MojoAudioDecoderService::OnAudioBufferReady(
- const scoped_refptr<AudioBuffer>& audio_buffer) {
+ scoped_refptr<AudioBuffer> audio_buffer) {
DVLOG(1) << __func__;
// TODO(timav): Use DataPipe.
- client_->OnBufferDecoded(mojom::AudioBuffer::From(audio_buffer));
+ client_->OnBufferDecoded(mojom::AudioBuffer::From(*audio_buffer));
}
void MojoAudioDecoderService::OnWaiting(WaitingReason reason) {
diff --git a/chromium/media/mojo/services/mojo_audio_decoder_service.h b/chromium/media/mojo/services/mojo_audio_decoder_service.h
index 2cb52f44e4e..ff7e430228e 100644
--- a/chromium/media/mojo/services/mojo_audio_decoder_service.h
+++ b/chromium/media/mojo/services/mojo_audio_decoder_service.h
@@ -57,7 +57,7 @@ class MEDIA_MOJO_EXPORT MojoAudioDecoderService : public mojom::AudioDecoder {
void OnResetDone(ResetCallback callback);
// Called by |decoder_| for each decoded buffer.
- void OnAudioBufferReady(const scoped_refptr<AudioBuffer>& audio_buffer);
+ void OnAudioBufferReady(scoped_refptr<AudioBuffer> audio_buffer);
// Called by |decoder_| when it's waiting because of |reason|, e.g. waiting
// for decryption key.
diff --git a/chromium/media/mojo/services/mojo_audio_output_stream.cc b/chromium/media/mojo/services/mojo_audio_output_stream.cc
index 27ee8f7fdec..a7944253090 100644
--- a/chromium/media/mojo/services/mojo_audio_output_stream.cc
+++ b/chromium/media/mojo/services/mojo_audio_output_stream.cc
@@ -52,6 +52,11 @@ void MojoAudioOutputStream::Pause() {
delegate_->OnPauseStream();
}
+void MojoAudioOutputStream::Flush() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ delegate_->OnFlushStream();
+}
+
void MojoAudioOutputStream::SetVolume(double volume) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (volume < 0 || volume > 1) {
diff --git a/chromium/media/mojo/services/mojo_audio_output_stream.h b/chromium/media/mojo/services/mojo_audio_output_stream.h
index 1556ca59032..5d0e84fdee4 100644
--- a/chromium/media/mojo/services/mojo_audio_output_stream.h
+++ b/chromium/media/mojo/services/mojo_audio_output_stream.h
@@ -46,6 +46,7 @@ class MEDIA_MOJO_EXPORT MojoAudioOutputStream
// mojom::AudioOutputStream implementation.
void Play() override;
void Pause() override;
+ void Flush() override;
void SetVolume(double volume) override;
// AudioOutputDelegate::EventHandler implementation.
diff --git a/chromium/media/mojo/services/mojo_audio_output_stream_provider_unittest.cc b/chromium/media/mojo/services/mojo_audio_output_stream_provider_unittest.cc
index 34543b4c715..1d6cf50db72 100644
--- a/chromium/media/mojo/services/mojo_audio_output_stream_provider_unittest.cc
+++ b/chromium/media/mojo/services/mojo_audio_output_stream_provider_unittest.cc
@@ -51,6 +51,7 @@ class FakeDelegate : public AudioOutputDelegate {
int GetStreamId() override { return 0; }
void OnPlayStream() override {}
void OnPauseStream() override {}
+ void OnFlushStream() override {}
void OnSetVolume(double) override {}
private:
diff --git a/chromium/media/mojo/services/mojo_audio_output_stream_unittest.cc b/chromium/media/mojo/services/mojo_audio_output_stream_unittest.cc
index e5d80a1234b..e1d8748485b 100644
--- a/chromium/media/mojo/services/mojo_audio_output_stream_unittest.cc
+++ b/chromium/media/mojo/services/mojo_audio_output_stream_unittest.cc
@@ -66,6 +66,7 @@ class MockDelegate : public AudioOutputDelegate {
MOCK_METHOD0(OnPlayStream, void());
MOCK_METHOD0(OnPauseStream, void());
MOCK_METHOD1(OnSetVolume, void(double));
+ MOCK_METHOD0(OnFlushStream, void());
};
class MockDelegateFactory {
@@ -228,6 +229,17 @@ TEST_F(MojoAudioOutputStreamTest, SetVolume_SetsVolume) {
base::RunLoop().RunUntilIdle();
}
+TEST_F(MojoAudioOutputStreamTest, Flush_FlushesStream) {
+ AudioOutputStreamPtr audio_output_ptr = CreateAudioOutput();
+
+ EXPECT_CALL(client_, GotNotification());
+ EXPECT_CALL(*delegate_, OnFlushStream());
+ delegate_event_handler_->OnStreamCreated(kStreamId, std::move(mem_),
+ std::move(foreign_socket_));
+ audio_output_ptr->Flush();
+ base::RunLoop().RunUntilIdle();
+}
+
TEST_F(MojoAudioOutputStreamTest, DestructWithCallPending_Safe) {
AudioOutputStreamPtr audio_output_ptr = CreateAudioOutput();
EXPECT_CALL(client_, GotNotification());
diff --git a/chromium/media/mojo/services/mojo_decryptor_service.cc b/chromium/media/mojo/services/mojo_decryptor_service.cc
index 8a08b90f305..952fad2fd20 100644
--- a/chromium/media/mojo/services/mojo_decryptor_service.cc
+++ b/chromium/media/mojo/services/mojo_decryptor_service.cc
@@ -265,7 +265,7 @@ void MojoDecryptorService::OnAudioDecoded(
// improved to use shared memory (http://crbug.com/593896).
std::vector<mojom::AudioBufferPtr> audio_buffers;
for (const auto& frame : frames)
- audio_buffers.push_back(mojom::AudioBuffer::From(frame));
+ audio_buffers.push_back(mojom::AudioBuffer::From(*frame));
std::move(callback).Run(status, std::move(audio_buffers));
}
@@ -273,7 +273,7 @@ void MojoDecryptorService::OnAudioDecoded(
void MojoDecryptorService::OnVideoDecoded(
DecryptAndDecodeVideoCallback callback,
Status status,
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
DVLOG_IF(1, status != Status::kSuccess)
<< __func__ << ": status = " << status;
DVLOG_IF(3, status == Status::kSuccess) << __func__;
diff --git a/chromium/media/mojo/services/mojo_decryptor_service.h b/chromium/media/mojo/services/mojo_decryptor_service.h
index 17e82580997..8d18baf4994 100644
--- a/chromium/media/mojo/services/mojo_decryptor_service.h
+++ b/chromium/media/mojo/services/mojo_decryptor_service.h
@@ -92,7 +92,7 @@ class MEDIA_MOJO_EXPORT MojoDecryptorService : public mojom::Decryptor {
const media::Decryptor::AudioFrames& frames);
void OnVideoDecoded(DecryptAndDecodeVideoCallback callback,
Status status,
- const scoped_refptr<VideoFrame>& frame);
+ scoped_refptr<VideoFrame> frame);
// Returns audio/video buffer reader according to the |stream_type|.
MojoDecoderBufferReader* GetBufferReader(StreamType stream_type) const;
diff --git a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.cc b/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.cc
deleted file mode 100644
index 242c694a30e..00000000000
--- a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.cc
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mojo/services/mojo_mjpeg_decode_accelerator_service.h"
-
-#include <stdint.h>
-
-#include <memory>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/shared_memory.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/trace_event/trace_event.h"
-#include "media/base/bind_to_current_loop.h"
-#include "mojo/public/cpp/bindings/strong_binding.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-#include "ui/gfx/geometry/size.h"
-
-namespace {
-
-void DecodeFinished(std::unique_ptr<base::SharedMemory> shm) {
- // Do nothing. Because VideoFrame is backed by |shm|, the purpose of this
- // function is to just keep reference of |shm| to make sure it lives until
- // decode finishes.
-}
-
-bool VerifyDecodeParams(const gfx::Size& coded_size,
- mojo::ScopedSharedBufferHandle* output_handle,
- uint32_t output_buffer_size) {
- const int kJpegMaxDimension = UINT16_MAX;
- if (coded_size.IsEmpty() || coded_size.width() > kJpegMaxDimension ||
- coded_size.height() > kJpegMaxDimension) {
- LOG(ERROR) << "invalid coded_size " << coded_size.ToString();
- return false;
- }
-
- if (!output_handle->is_valid()) {
- LOG(ERROR) << "invalid output_handle";
- return false;
- }
-
- uint32_t allocation_size =
- media::VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, coded_size);
- if (output_buffer_size < allocation_size) {
- DLOG(ERROR) << "output_buffer_size is too small: " << output_buffer_size
- << ". It needs: " << allocation_size;
- return false;
- }
-
- return true;
-}
-
-} // namespace
-
-namespace media {
-
-// static
-void MojoMjpegDecodeAcceleratorService::Create(
- mojom::MjpegDecodeAcceleratorRequest request) {
- auto* jpeg_decoder = new MojoMjpegDecodeAcceleratorService();
- mojo::MakeStrongBinding(base::WrapUnique(jpeg_decoder), std::move(request));
-}
-
-MojoMjpegDecodeAcceleratorService::MojoMjpegDecodeAcceleratorService()
- : accelerator_factory_functions_(
- GpuMjpegDecodeAcceleratorFactory::GetAcceleratorFactories()) {}
-
-MojoMjpegDecodeAcceleratorService::~MojoMjpegDecodeAcceleratorService() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-}
-
-void MojoMjpegDecodeAcceleratorService::VideoFrameReady(
- int32_t bitstream_buffer_id) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- NotifyDecodeStatus(bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error::NO_ERRORS);
-}
-
-void MojoMjpegDecodeAcceleratorService::NotifyError(
- int32_t bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error error) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- NotifyDecodeStatus(bitstream_buffer_id, error);
-}
-
-void MojoMjpegDecodeAcceleratorService::Initialize(
- InitializeCallback callback) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- // When adding non-chromeos platforms, VideoCaptureGpuJpegDecoder::Initialize
- // needs to be updated.
-
- std::unique_ptr<::media::MjpegDecodeAccelerator> accelerator;
- for (const auto& create_jda_function : accelerator_factory_functions_) {
- std::unique_ptr<::media::MjpegDecodeAccelerator> tmp_accelerator =
- create_jda_function.Run(base::ThreadTaskRunnerHandle::Get());
- if (tmp_accelerator && tmp_accelerator->Initialize(this)) {
- accelerator = std::move(tmp_accelerator);
- break;
- }
- }
-
- if (!accelerator) {
- DLOG(ERROR) << "JPEG accelerator initialization failed";
- std::move(callback).Run(false);
- return;
- }
-
- accelerator_ = std::move(accelerator);
- std::move(callback).Run(true);
-}
-
-void MojoMjpegDecodeAcceleratorService::Decode(
- const BitstreamBuffer& input_buffer,
- const gfx::Size& coded_size,
- mojo::ScopedSharedBufferHandle output_handle,
- uint32_t output_buffer_size,
- DecodeCallback callback) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- TRACE_EVENT0("jpeg", "MojoMjpegDecodeAcceleratorService::Decode");
-
- DCHECK_EQ(decode_cb_map_.count(input_buffer.id()), 0u);
- decode_cb_map_[input_buffer.id()] = std::move(callback);
-
- if (!VerifyDecodeParams(coded_size, &output_handle, output_buffer_size)) {
- NotifyDecodeStatus(
- input_buffer.id(),
- ::media::MjpegDecodeAccelerator::Error::INVALID_ARGUMENT);
- return;
- }
-
- base::SharedMemoryHandle memory_handle;
- MojoResult result = mojo::UnwrapSharedMemoryHandle(
- std::move(output_handle), &memory_handle, nullptr, nullptr);
- DCHECK_EQ(MOJO_RESULT_OK, result);
-
- std::unique_ptr<base::SharedMemory> output_shm(
- new base::SharedMemory(memory_handle, false));
- if (!output_shm->Map(output_buffer_size)) {
- LOG(ERROR) << "Could not map output shared memory for input buffer id "
- << input_buffer.id();
- NotifyDecodeStatus(
- input_buffer.id(),
- ::media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE);
- return;
- }
-
- uint8_t* shm_memory = static_cast<uint8_t*>(output_shm->memory());
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalSharedMemory(
- PIXEL_FORMAT_I420, // format
- coded_size, // coded_size
- gfx::Rect(coded_size), // visible_rect
- coded_size, // natural_size
- shm_memory, // data
- output_buffer_size, // data_size
- memory_handle, // handle
- 0, // data_offset
- base::TimeDelta()); // timestamp
- if (!frame.get()) {
- LOG(ERROR) << "Could not create VideoFrame for input buffer id "
- << input_buffer.id();
- NotifyDecodeStatus(
- input_buffer.id(),
- ::media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE);
- return;
- }
- frame->AddDestructionObserver(
- base::Bind(DecodeFinished, base::Passed(&output_shm)));
-
- DCHECK(accelerator_);
- accelerator_->Decode(input_buffer, frame);
-}
-
-void MojoMjpegDecodeAcceleratorService::DecodeWithFD(
- int32_t buffer_id,
- mojo::ScopedHandle input_handle,
- uint32_t input_buffer_size,
- int32_t coded_size_width,
- int32_t coded_size_height,
- mojo::ScopedHandle output_handle,
- uint32_t output_buffer_size,
- DecodeWithFDCallback callback) {
-#if defined(OS_CHROMEOS)
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- base::PlatformFile input_fd;
- base::PlatformFile output_fd;
- MojoResult result;
-
- result = mojo::UnwrapPlatformFile(std::move(input_handle), &input_fd);
- if (result != MOJO_RESULT_OK) {
- std::move(callback).Run(
- buffer_id, ::media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE);
- return;
- }
-
- result = mojo::UnwrapPlatformFile(std::move(output_handle), &output_fd);
- if (result != MOJO_RESULT_OK) {
- std::move(callback).Run(
- buffer_id, ::media::MjpegDecodeAccelerator::Error::PLATFORM_FAILURE);
- return;
- }
-
- base::UnguessableToken guid = base::UnguessableToken::Create();
- base::SharedMemoryHandle input_shm_handle(
- base::FileDescriptor(input_fd, true), 0u, guid);
- base::SharedMemoryHandle output_shm_handle(
- base::FileDescriptor(output_fd, true), 0u, guid);
-
- media::BitstreamBuffer in_buffer(buffer_id, input_shm_handle,
- input_buffer_size);
- gfx::Size coded_size(coded_size_width, coded_size_height);
-
- mojo::ScopedSharedBufferHandle output_scoped_handle =
- mojo::WrapSharedMemoryHandle(
- output_shm_handle, output_buffer_size,
- mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite);
-
- Decode(in_buffer, coded_size, std::move(output_scoped_handle),
- output_buffer_size, std::move(callback));
-#else
- NOTREACHED();
-#endif
-}
-
-void MojoMjpegDecodeAcceleratorService::Uninitialize() {
- // TODO(c.padhi): see http://crbug.com/699255.
- NOTIMPLEMENTED();
-}
-
-void MojoMjpegDecodeAcceleratorService::NotifyDecodeStatus(
- int32_t bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error error) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- auto iter = decode_cb_map_.find(bitstream_buffer_id);
- DCHECK(iter != decode_cb_map_.end());
- DecodeCallback decode_cb = std::move(iter->second);
- decode_cb_map_.erase(iter);
- std::move(decode_cb).Run(bitstream_buffer_id, error);
-}
-
-} // namespace media
diff --git a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.h b/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.h
deleted file mode 100644
index bb0c6349d6b..00000000000
--- a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_MOJO_SERVICES_MOJO_MJPEG_DECODE_ACCELERATOR_SERVICE_H_
-#define MEDIA_MOJO_SERVICES_MOJO_MJPEG_DECODE_ACCELERATOR_SERVICE_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread_checker.h"
-#include "media/gpu/gpu_mjpeg_decode_accelerator_factory.h"
-#include "media/mojo/interfaces/mjpeg_decode_accelerator.mojom.h"
-#include "media/mojo/services/media_mojo_export.h"
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace media {
-
-// Implementation of a mojom::MjpegDecodeAccelerator which runs in the GPU
-// process, and wraps a JpegDecodeAccelerator.
-class MEDIA_MOJO_EXPORT MojoMjpegDecodeAcceleratorService
- : public mojom::MjpegDecodeAccelerator,
- public MjpegDecodeAccelerator::Client {
- public:
- static void Create(mojom::MjpegDecodeAcceleratorRequest request);
-
- ~MojoMjpegDecodeAcceleratorService() override;
-
- // MjpegDecodeAccelerator::Client implementation.
- void VideoFrameReady(int32_t buffer_id) override;
- void NotifyError(int32_t buffer_id,
- ::media::MjpegDecodeAccelerator::Error error) override;
-
- private:
- using DecodeCallbackMap = std::unordered_map<int32_t, DecodeCallback>;
-
- // This constructor internally calls
- // GpuMjpegDecodeAcceleratorFactory::GetAcceleratorFactories() to
- // fill |accelerator_factory_functions_|.
- MojoMjpegDecodeAcceleratorService();
-
- // mojom::MjpegDecodeAccelerator implementation.
- void Initialize(InitializeCallback callback) override;
- void Decode(const BitstreamBuffer& input_buffer,
- const gfx::Size& coded_size,
- mojo::ScopedSharedBufferHandle output_handle,
- uint32_t output_buffer_size,
- DecodeCallback callback) override;
- void DecodeWithFD(int32_t buffer_id,
- mojo::ScopedHandle input_fd,
- uint32_t input_buffer_size,
- int32_t coded_size_width,
- int32_t coded_size_height,
- mojo::ScopedHandle output_fd,
- uint32_t output_buffer_size,
- DecodeWithFDCallback callback) override;
- void Uninitialize() override;
-
- void NotifyDecodeStatus(int32_t bitstream_buffer_id,
- ::media::MjpegDecodeAccelerator::Error error);
-
- const std::vector<GpuMjpegDecodeAcceleratorFactory::CreateAcceleratorCB>
- accelerator_factory_functions_;
-
- // A map from bitstream_buffer_id to DecodeCallback.
- DecodeCallbackMap decode_cb_map_;
-
- std::unique_ptr<::media::MjpegDecodeAccelerator> accelerator_;
-
- THREAD_CHECKER(thread_checker_);
-
- DISALLOW_COPY_AND_ASSIGN(MojoMjpegDecodeAcceleratorService);
-};
-
-} // namespace media
-
-#endif // MEDIA_MOJO_SERVICES_MOJO_MJPEG_DECODE_ACCELERATOR_SERVICE_H_
diff --git a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service_unittest.cc b/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service_unittest.cc
deleted file mode 100644
index 91403b4a05c..00000000000
--- a/chromium/media/mojo/services/mojo_mjpeg_decode_accelerator_service_unittest.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/mojo/services/mojo_mjpeg_decode_accelerator_service.h"
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/run_loop.h"
-#include "base/test/scoped_task_environment.h"
-#include "base/threading/thread.h"
-#include "media/base/media_switches.h"
-#include "mojo/public/cpp/system/platform_handle.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-static const int32_t kArbitraryBitstreamBufferId = 123;
-
-// Test fixture for the unit that is created via the mojom interface for
-// class MojoMjpegDecodeAcceleratorService. Uses a FakeJpegDecodeAccelerator to
-// simulate the actual decoding without the need for special hardware.
-class MojoMjpegDecodeAcceleratorServiceTest : public ::testing::Test {
- public:
- MojoMjpegDecodeAcceleratorServiceTest() = default;
- ~MojoMjpegDecodeAcceleratorServiceTest() override = default;
-
- void SetUp() override {
- base::CommandLine::ForCurrentProcess()->AppendSwitch(
- switches::kUseFakeMjpegDecodeAccelerator);
- }
-
- void OnInitializeDone(const base::Closure& continuation, bool success) {
- EXPECT_TRUE(success);
- continuation.Run();
- }
-
- void OnDecodeAck(const base::Closure& continuation,
- int32_t bitstream_buffer_id,
- MjpegDecodeAccelerator::Error error) {
- EXPECT_EQ(kArbitraryBitstreamBufferId, bitstream_buffer_id);
- continuation.Run();
- }
-
- private:
- // This is required to allow base::ThreadTaskRunnerHandle::Get() from the
- // test execution thread.
- base::test::ScopedTaskEnvironment scoped_task_environment_;
-};
-
-TEST_F(MojoMjpegDecodeAcceleratorServiceTest, InitializeAndDecode) {
- mojom::MjpegDecodeAcceleratorPtr jpeg_decoder;
- MojoMjpegDecodeAcceleratorService::Create(mojo::MakeRequest(&jpeg_decoder));
-
- base::RunLoop run_loop;
- jpeg_decoder->Initialize(
- base::Bind(&MojoMjpegDecodeAcceleratorServiceTest::OnInitializeDone,
- base::Unretained(this), run_loop.QuitClosure()));
- run_loop.Run();
-
- const size_t kInputBufferSizeInBytes = 512;
- const size_t kOutputFrameSizeInBytes = 1024;
- const gfx::Size kDummyFrameCodedSize(10, 10);
- const char kKeyId[] = "key id";
- const char kIv[] = "0123456789abcdef";
- std::vector<SubsampleEntry> subsamples;
- subsamples.push_back(SubsampleEntry(10, 5));
- subsamples.push_back(SubsampleEntry(15, 7));
-
- base::RunLoop run_loop2;
- base::SharedMemory shm;
- ASSERT_TRUE(shm.CreateAndMapAnonymous(kInputBufferSizeInBytes));
-
- mojo::ScopedSharedBufferHandle output_frame_handle =
- mojo::SharedBufferHandle::Create(kOutputFrameSizeInBytes);
-
- BitstreamBuffer bitstream_buffer(
- kArbitraryBitstreamBufferId,
- base::SharedMemory::DuplicateHandle(shm.handle()),
- kInputBufferSizeInBytes);
- bitstream_buffer.SetDecryptionSettings(kKeyId, kIv, subsamples);
-
- jpeg_decoder->Decode(
- bitstream_buffer, kDummyFrameCodedSize, std::move(output_frame_handle),
- base::checked_cast<uint32_t>(kOutputFrameSizeInBytes),
- base::Bind(&MojoMjpegDecodeAcceleratorServiceTest::OnDecodeAck,
- base::Unretained(this), run_loop2.QuitClosure()));
- run_loop2.Run();
-}
-
-} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_service.cc b/chromium/media/mojo/services/mojo_renderer_service.cc
index 7fe6e962fb8..c781657c331 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.cc
+++ b/chromium/media/mojo/services/mojo_renderer_service.cc
@@ -189,7 +189,7 @@ void MojoRendererService::OnVideoNaturalSizeChange(const gfx::Size& size) {
}
void MojoRendererService::OnRemotePlayStateChange(MediaStatus::State state) {
- client_->OnRemotePlayStateChange(state);
+ // TODO(https://crbug.com/956677, tguilbert): Remove this function.
}
void MojoRendererService::OnVideoOpacityChange(bool opaque) {
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.cc b/chromium/media/mojo/services/mojo_video_decoder_service.cc
index b35480ae8d1..b1f05f006ac 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.cc
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.cc
@@ -313,9 +313,8 @@ void MojoVideoDecoderService::OnDecoderReset() {
std::move(reset_cb_).Run();
}
-void MojoVideoDecoderService::OnDecoderOutput(
- const scoped_refptr<VideoFrame>& frame) {
- DVLOG(3) << __func__;
+void MojoVideoDecoderService::OnDecoderOutput(scoped_refptr<VideoFrame> frame) {
+ DVLOG(3) << __func__ << " pts=" << frame->timestamp().InMilliseconds();
DCHECK(client_);
DCHECK(decoder_);
TRACE_EVENT1("media", "MojoVideoDecoderService::OnDecoderOutput",
@@ -336,7 +335,8 @@ void MojoVideoDecoderService::OnDecoderOutput(
release_token = releaser->RegisterVideoFrame(frame);
}
- client_->OnVideoFrameDecoded(frame, decoder_->CanReadWithoutStalling(),
+ client_->OnVideoFrameDecoded(std::move(frame),
+ decoder_->CanReadWithoutStalling(),
std::move(release_token));
}
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.h b/chromium/media/mojo/services/mojo_video_decoder_service.h
index 39c24400e51..d509efeecab 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.h
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.h
@@ -75,7 +75,7 @@ class MEDIA_MOJO_EXPORT MojoVideoDecoderService final
void OnReaderFlushed();
void OnDecoderReset();
- void OnDecoderOutput(const scoped_refptr<VideoFrame>& frame);
+ void OnDecoderOutput(scoped_refptr<VideoFrame> frame);
void OnDecoderWaiting(WaitingReason reason);
diff --git a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
index edc143e46be..f3d1a547d73 100644
--- a/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
+++ b/chromium/media/mojo/services/mojo_video_encode_accelerator_service.cc
@@ -122,16 +122,10 @@ void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
return;
}
- base::SharedMemoryHandle handle;
- size_t memory_size = 0;
- auto result = mojo::UnwrapSharedMemoryHandle(std::move(buffer), &handle,
- &memory_size, nullptr);
- if (result != MOJO_RESULT_OK || memory_size == 0u) {
- DLOG(ERROR) << __func__ << " mojo::UnwrapSharedMemoryHandle() failed";
- NotifyError(::media::VideoEncodeAccelerator::kPlatformFailureError);
- return;
- }
+ base::subtle::PlatformSharedMemoryRegion region =
+ mojo::UnwrapPlatformSharedMemoryRegion(std::move(buffer));
+ auto memory_size = region.GetSize();
if (memory_size < output_buffer_size_) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
<< " has a size of " << memory_size
@@ -141,7 +135,7 @@ void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
}
encoder_->UseOutputBitstreamBuffer(
- BitstreamBuffer(bitstream_buffer_id, handle, memory_size));
+ BitstreamBuffer(bitstream_buffer_id, std::move(region), memory_size));
}
void MojoVideoEncodeAcceleratorService::RequestEncodingParametersChange(
diff --git a/chromium/media/mojo/services/video_decode_perf_history.cc b/chromium/media/mojo/services/video_decode_perf_history.cc
index 902b14aa044..b5bd0f55ccb 100644
--- a/chromium/media/mojo/services/video_decode_perf_history.cc
+++ b/chromium/media/mojo/services/video_decode_perf_history.cc
@@ -216,13 +216,14 @@ void VideoDecodePerfHistory::SavePerfRecord(ukm::SourceId source_id,
uint64_t player_id,
base::OnceClosure save_done_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- DVLOG(3) << __func__
- << base::StringPrintf(
- " profile:%s size:%s fps:%d decoded:%d dropped:%d",
- GetProfileName(features.profile).c_str(),
- features.video_size.ToString().c_str(),
- features.frames_per_sec, targets.frames_decoded,
- targets.frames_dropped);
+ DVLOG(3)
+ << __func__
+ << base::StringPrintf(
+ " profile:%s size:%s fps:%d decoded:%d dropped:%d efficient:%d",
+ GetProfileName(features.profile).c_str(),
+ features.video_size.ToString().c_str(), features.frames_per_sec,
+ targets.frames_decoded, targets.frames_dropped,
+ targets.frames_power_efficient);
if (db_init_status_ == FAILED) {
DVLOG(3) << __func__ << " Can't save stats. No DB!";
diff --git a/chromium/media/parsers/BUILD.gn b/chromium/media/parsers/BUILD.gn
new file mode 100644
index 00000000000..140d1aae981
--- /dev/null
+++ b/chromium/media/parsers/BUILD.gn
@@ -0,0 +1,82 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//media/media_options.gni")
+
+component("parsers") {
+ sources = [
+ "jpeg_parser.cc",
+ "jpeg_parser.h",
+ "media_parsers_export.h",
+ "vp8_bool_decoder.cc",
+ "vp8_bool_decoder.h",
+ "vp8_parser.cc",
+ "vp8_parser.h",
+ "webp_parser.cc",
+ "webp_parser.h",
+ ]
+ defines = [ "IS_MEDIA_PARSER_IMPL" ]
+ deps = [
+ "//base",
+ ]
+
+ # This target is used in GPU IPC code and cannot depend on any //media code.
+ assert_no_deps = [
+ "//media",
+ "//media:shared_memory_support",
+ ]
+}
+
+source_set("unit_tests") {
+ testonly = true
+ sources = [
+ "jpeg_parser_unittest.cc",
+ "vp8_bool_decoder_unittest.cc",
+ "vp8_parser_unittest.cc",
+ "webp_parser_unittest.cc",
+ ]
+ deps = [
+ ":parsers",
+ "//base",
+ "//media:test_support",
+ "//testing/gtest",
+ ]
+}
+
+fuzzer_test("media_jpeg_parser_picture_fuzzer") {
+ sources = [
+ "jpeg_parser_picture_fuzzertest.cc",
+ ]
+ deps = [
+ ":parsers",
+ "//base",
+ ]
+ seed_corpus = "//media/test/data"
+ dict = "//media/test/jpeg.dict"
+}
+
+fuzzer_test("media_vp8_parser_fuzzer") {
+ sources = [
+ "vp8_parser_fuzzertest.cc",
+ ]
+ deps = [
+ ":parsers",
+ "//base",
+ "//media:test_support",
+ ]
+ libfuzzer_options = [ "max_len = 400000" ]
+ dict = "//media/test/vp8.dict"
+}
+
+fuzzer_test("media_webp_parser_fuzzer") {
+ sources = [
+ "webp_parser_fuzzertest.cc",
+ ]
+ deps = [
+ ":parsers",
+ "//base",
+ ]
+ seed_corpus = "//media/test/data"
+ dict = "//media/test/webp.dict"
+}
diff --git a/chromium/media/filters/jpeg_parser.cc b/chromium/media/parsers/jpeg_parser.cc
index e2b943212c9..344f7c36995 100644
--- a/chromium/media/filters/jpeg_parser.cc
+++ b/chromium/media/parsers/jpeg_parser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/jpeg_parser.h"
+#include "media/parsers/jpeg_parser.h"
#include "base/big_endian.h"
#include "base/logging.h"
diff --git a/chromium/media/filters/jpeg_parser.h b/chromium/media/parsers/jpeg_parser.h
index 0717756e023..bd054f555b8 100644
--- a/chromium/media/filters/jpeg_parser.h
+++ b/chromium/media/parsers/jpeg_parser.h
@@ -1,14 +1,14 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-#ifndef MEDIA_FILTERS_JPEG_PARSER_H_
-#define MEDIA_FILTERS_JPEG_PARSER_H_
+
+#ifndef MEDIA_PARSERS_JPEG_PARSER_H_
+#define MEDIA_PARSERS_JPEG_PARSER_H_
#include <stddef.h>
#include <stdint.h>
-#include "base/component_export.h"
+#include "media/parsers/media_parsers_export.h"
namespace media {
@@ -81,11 +81,11 @@ struct JpegHuffmanTable {
};
// K.3.3.1 "Specification of typical tables for DC difference coding"
-COMPONENT_EXPORT(JPEG_PARSER)
+MEDIA_PARSERS_EXPORT
extern const JpegHuffmanTable kDefaultDcTable[kJpegMaxHuffmanTableNumBaseline];
// K.3.3.2 "Specification of typical tables for AC coefficient coding"
-COMPONENT_EXPORT(JPEG_PARSER)
+MEDIA_PARSERS_EXPORT
extern const JpegHuffmanTable kDefaultAcTable[kJpegMaxHuffmanTableNumBaseline];
// Parsing result of JPEG DQT marker.
@@ -94,11 +94,11 @@ struct JpegQuantizationTable {
uint8_t value[kDctSize]; // baseline only supports 8 bits quantization table
};
-COMPONENT_EXPORT(JPEG_PARSER) extern const uint8_t kZigZag8x8[64];
+MEDIA_PARSERS_EXPORT extern const uint8_t kZigZag8x8[64];
// Table K.1 Luminance quantization table
// Table K.2 Chrominance quantization table
-COMPONENT_EXPORT(JPEG_PARSER)
+MEDIA_PARSERS_EXPORT
extern const JpegQuantizationTable kDefaultQuantTable[2];
// Parsing result of a JPEG component.
@@ -146,7 +146,7 @@ struct JpegParseResult {
// Parses JPEG picture in |buffer| with |length|. Returns true iff header is
// valid and JPEG baseline sequential process is present. If parsed
// successfully, |result| is the parsed result.
-COMPONENT_EXPORT(JPEG_PARSER)
+MEDIA_PARSERS_EXPORT
bool ParseJpegPicture(const uint8_t* buffer,
size_t length,
JpegParseResult* result);
@@ -154,11 +154,11 @@ bool ParseJpegPicture(const uint8_t* buffer,
// Parses the first image of JPEG stream in |buffer| with |length|. Returns
// true iff header is valid and JPEG baseline sequential process is present.
// If parsed successfully, |result| is the parsed result.
-COMPONENT_EXPORT(JPEG_PARSER)
+MEDIA_PARSERS_EXPORT
bool ParseJpegStream(const uint8_t* buffer,
size_t length,
JpegParseResult* result);
} // namespace media
-#endif // MEDIA_FILTERS_JPEG_PARSER_H_
+#endif // MEDIA_PARSERS_JPEG_PARSER_H_
diff --git a/chromium/media/filters/jpeg_parser_picture_fuzzertest.cc b/chromium/media/parsers/jpeg_parser_picture_fuzzertest.cc
index d41fcb6a233..31d9cae7e0f 100644
--- a/chromium/media/filters/jpeg_parser_picture_fuzzertest.cc
+++ b/chromium/media/parsers/jpeg_parser_picture_fuzzertest.cc
@@ -6,7 +6,7 @@
#include <stdint.h>
#include "base/logging.h"
-#include "media/filters/jpeg_parser.h"
+#include "media/parsers/jpeg_parser.h"
struct Environment {
Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); }
diff --git a/chromium/media/filters/jpeg_parser_unittest.cc b/chromium/media/parsers/jpeg_parser_unittest.cc
index b29ebad0f99..72b9ae957aa 100644
--- a/chromium/media/filters/jpeg_parser_unittest.cc
+++ b/chromium/media/parsers/jpeg_parser_unittest.cc
@@ -8,7 +8,7 @@
#include "base/files/memory_mapped_file.h"
#include "base/path_service.h"
#include "media/base/test_data_util.h"
-#include "media/filters/jpeg_parser.h"
+#include "media/parsers/jpeg_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/parsers/media_parsers_export.h b/chromium/media/parsers/media_parsers_export.h
new file mode 100644
index 00000000000..72277ea7a92
--- /dev/null
+++ b/chromium/media/parsers/media_parsers_export.h
@@ -0,0 +1,12 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_PARSERS_MEDIA_PARSERS_EXPORT_H_
+#define MEDIA_PARSERS_MEDIA_PARSERS_EXPORT_H_
+
+#include "base/component_export.h"
+
+#define MEDIA_PARSERS_EXPORT COMPONENT_EXPORT(MEDIA_PARSER)
+
+#endif // MEDIA_PARSERS_MEDIA_PARSERS_EXPORT_H_
diff --git a/chromium/media/filters/vp8_bool_decoder.cc b/chromium/media/parsers/vp8_bool_decoder.cc
index 7ec4859c1bc..4f156ad8daa 100644
--- a/chromium/media/filters/vp8_bool_decoder.cc
+++ b/chromium/media/parsers/vp8_bool_decoder.cc
@@ -40,12 +40,13 @@
// project. (http://www.webmproject.org/code)
// It is used to decode bits from a vp8 stream.
+#include "media/parsers/vp8_bool_decoder.h"
+
#include <limits.h>
#include <algorithm>
#include "base/numerics/safe_conversions.h"
-#include "media/filters/vp8_bool_decoder.h"
namespace media {
diff --git a/chromium/media/filters/vp8_bool_decoder.h b/chromium/media/parsers/vp8_bool_decoder.h
index 16a98bdb73e..0f407cfbcc9 100644
--- a/chromium/media/filters/vp8_bool_decoder.h
+++ b/chromium/media/parsers/vp8_bool_decoder.h
@@ -40,8 +40,8 @@
// project. (http://www.webmproject.org/code)
// It is used to decode bits from a vp8 stream.
-#ifndef MEDIA_FILTERS_VP8_BOOL_DECODER_H_
-#define MEDIA_FILTERS_VP8_BOOL_DECODER_H_
+#ifndef MEDIA_PARSERS_VP8_BOOL_DECODER_H_
+#define MEDIA_PARSERS_VP8_BOOL_DECODER_H_
#include <stddef.h>
#include <stdint.h>
@@ -49,13 +49,13 @@
#include "base/logging.h"
#include "base/macros.h"
-#include "media/base/media_export.h"
+#include "media/parsers/media_parsers_export.h"
namespace media {
// A class to decode the VP8's boolean entropy coded stream. It's a variant of
// arithmetic coding. See RFC 6386 - Chapter 7. Boolean Entropy Decoder.
-class MEDIA_EXPORT Vp8BoolDecoder {
+class MEDIA_PARSERS_EXPORT Vp8BoolDecoder {
public:
Vp8BoolDecoder();
@@ -132,4 +132,4 @@ class MEDIA_EXPORT Vp8BoolDecoder {
} // namespace media
-#endif // MEDIA_FILTERS_VP8_BOOL_DECODER_H_
+#endif // MEDIA_PARSERS_VP8_BOOL_DECODER_H_
diff --git a/chromium/media/filters/vp8_bool_decoder_unittest.cc b/chromium/media/parsers/vp8_bool_decoder_unittest.cc
index f1f020b0cf9..87968f4fc26 100644
--- a/chromium/media/filters/vp8_bool_decoder_unittest.cc
+++ b/chromium/media/parsers/vp8_bool_decoder_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/filters/vp8_bool_decoder.h"
+#include "media/parsers/vp8_bool_decoder.h"
#include <stddef.h>
#include <stdint.h>
diff --git a/chromium/media/filters/vp8_parser.cc b/chromium/media/parsers/vp8_parser.cc
index cfdcce2ccca..04345b5f110 100644
--- a/chromium/media/filters/vp8_parser.cc
+++ b/chromium/media/parsers/vp8_parser.cc
@@ -5,8 +5,9 @@
// This file contains an implementation of a VP8 raw stream parser,
// as defined in RFC 6386.
+#include "media/parsers/vp8_parser.h"
+
#include "base/logging.h"
-#include "media/filters/vp8_parser.h"
namespace media {
diff --git a/chromium/media/filters/vp8_parser.h b/chromium/media/parsers/vp8_parser.h
index c1a95cc8a98..11585bc04e1 100644
--- a/chromium/media/filters/vp8_parser.h
+++ b/chromium/media/parsers/vp8_parser.h
@@ -5,15 +5,15 @@
// This file contains an implementation of a VP8 raw stream parser,
// as defined in RFC 6386.
-#ifndef MEDIA_FILTERS_VP8_PARSER_H_
-#define MEDIA_FILTERS_VP8_PARSER_H_
+#ifndef MEDIA_PARSERS_VP8_PARSER_H_
+#define MEDIA_PARSERS_VP8_PARSER_H_
#include <stddef.h>
#include <stdint.h>
#include "base/macros.h"
-#include "media/base/media_export.h"
-#include "media/filters/vp8_bool_decoder.h"
+#include "media/parsers/media_parsers_export.h"
+#include "media/parsers/vp8_bool_decoder.h"
namespace media {
@@ -23,7 +23,7 @@ const size_t kNumMBFeatureTreeProbs = 3;
// Member of Vp8FrameHeader and will be 0-initialized
// in Vp8FrameHeader's constructor.
-struct MEDIA_EXPORT Vp8SegmentationHeader {
+struct Vp8SegmentationHeader {
enum SegmentFeatureMode { FEATURE_MODE_DELTA = 0, FEATURE_MODE_ABSOLUTE = 1 };
bool segmentation_enabled;
@@ -41,7 +41,7 @@ const size_t kNumBlockContexts = 4;
// Member of Vp8FrameHeader and will be 0-initialized
// in Vp8FrameHeader's constructor.
-struct MEDIA_EXPORT Vp8LoopFilterHeader {
+struct Vp8LoopFilterHeader {
enum Type { LOOP_FILTER_TYPE_NORMAL = 0, LOOP_FILTER_TYPE_SIMPLE = 1 };
Type type;
uint8_t level;
@@ -55,7 +55,7 @@ struct MEDIA_EXPORT Vp8LoopFilterHeader {
// Member of Vp8FrameHeader and will be 0-initialized
// in Vp8FrameHeader's constructor.
-struct MEDIA_EXPORT Vp8QuantizationHeader {
+struct Vp8QuantizationHeader {
uint8_t y_ac_qi;
int8_t y_dc_delta;
int8_t y2_dc_delta;
@@ -96,7 +96,7 @@ enum Vp8RefType : size_t {
VP8_FRAME_ALTREF = 2,
};
-struct MEDIA_EXPORT Vp8FrameHeader {
+struct MEDIA_PARSERS_EXPORT Vp8FrameHeader {
Vp8FrameHeader();
enum FrameType { KEYFRAME = 0, INTERFRAME = 1 };
@@ -164,7 +164,7 @@ struct MEDIA_EXPORT Vp8FrameHeader {
};
// A parser for raw VP8 streams as specified in RFC 6386.
-class MEDIA_EXPORT Vp8Parser {
+class MEDIA_PARSERS_EXPORT Vp8Parser {
public:
Vp8Parser();
~Vp8Parser();
@@ -205,4 +205,4 @@ class MEDIA_EXPORT Vp8Parser {
} // namespace media
-#endif // MEDIA_FILTERS_VP8_PARSER_H_
+#endif // MEDIA_PARSERS_VP8_PARSER_H_
diff --git a/chromium/media/filters/vp8_parser_fuzzertest.cc b/chromium/media/parsers/vp8_parser_fuzzertest.cc
index 1d3f972e466..75cc6bb317b 100644
--- a/chromium/media/filters/vp8_parser_fuzzertest.cc
+++ b/chromium/media/parsers/vp8_parser_fuzzertest.cc
@@ -7,7 +7,7 @@
#include "base/numerics/safe_conversions.h"
#include "media/filters/ivf_parser.h"
-#include "media/filters/vp8_parser.h"
+#include "media/parsers/vp8_parser.h"
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/chromium/media/filters/vp8_parser_unittest.cc b/chromium/media/parsers/vp8_parser_unittest.cc
index 966a0b8c5d7..cbe4ff07b5f 100644
--- a/chromium/media/filters/vp8_parser_unittest.cc
+++ b/chromium/media/parsers/vp8_parser_unittest.cc
@@ -9,7 +9,7 @@
#include "base/logging.h"
#include "media/base/test_data_util.h"
#include "media/filters/ivf_parser.h"
-#include "media/filters/vp8_parser.h"
+#include "media/parsers/vp8_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/parsers/webp_parser.cc b/chromium/media/parsers/webp_parser.cc
new file mode 100644
index 00000000000..b0348037e79
--- /dev/null
+++ b/chromium/media/parsers/webp_parser.cc
@@ -0,0 +1,131 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/parsers/webp_parser.h"
+
+#include <limits.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "build/build_config.h"
+#include "media/parsers/vp8_parser.h"
+
+#if !defined(ARCH_CPU_LITTLE_ENDIAN)
+#error Big-Endian architecture not supported.
+#endif
+
+namespace media {
+
+namespace {
+
+// The byte position storing the size of the file.
+constexpr size_t kFileSizeBytePosition = 4u;
+
+// The byte position in which the WebP image data begins.
+constexpr size_t kWebPFileBeginBytePosition = 8u;
+
+// The byte position storing the size of the VP8 frame.
+constexpr size_t kVp8FrameSizePosition = 16u;
+
+// The 12 bytes that include the FourCC "WEBPVP8 " plus the VP8 chunk size info.
+constexpr size_t kWebPFileHeaderByteSize = 12u;
+
+// A valid WebP image header and VP8 chunk header require 20 bytes.
+// The VP8 Key Frame's payload also begins at byte 20.
+constexpr size_t kWebPFileAndVp8ChunkHeaderSizeInBytes = 20u;
+
+// The max WebP file size is (2^32 - 10) per the WebP spec:
+// https://developers.google.com/speed/webp/docs/riff_container#webp_file_header
+constexpr uint32_t kMaxWebPFileSize = (1ull << 32) - 10u;
+
+constexpr size_t kSizeOfUint32t = sizeof(uint32_t);
+
+} // namespace
+
+bool IsLossyWebPImage(base::span<const uint8_t> encoded_data) {
+ if (encoded_data.size() < kWebPFileAndVp8ChunkHeaderSizeInBytes)
+ return false;
+
+ DCHECK(encoded_data.data());
+
+ return !memcmp(encoded_data.data(), "RIFF", 4) &&
+ !memcmp(encoded_data.data() + kWebPFileBeginBytePosition, "WEBPVP8 ",
+ 8);
+}
+
+std::unique_ptr<Vp8FrameHeader> ParseWebPImage(
+ base::span<const uint8_t> encoded_data) {
+ if (!IsLossyWebPImage(encoded_data))
+ return nullptr;
+
+ static_assert(CHAR_BIT == 8, "Size of a char is not 8 bits.");
+ static_assert(kSizeOfUint32t == 4u, "Size of uint32_t is not 4 bytes.");
+
+ // Try to acquire the WebP file size. IsLossyWebPImage() has ensured
+ // that we have enough data to read the file size.
+ DCHECK_GE(encoded_data.size(), kFileSizeBytePosition + kSizeOfUint32t);
+
+ // No need to worry about endianness because we assert little-endianness.
+ const uint32_t file_size = *reinterpret_cast<const uint32_t*>(
+ encoded_data.data() + kFileSizeBytePosition);
+
+ // Check that |file_size| is even, per the WebP spec:
+ // https://developers.google.com/speed/webp/docs/riff_container#webp_file_header
+ if (file_size % 2 != 0)
+ return nullptr;
+
+ // Check that |file_size| <= 2^32 - 10, per the WebP spec:
+ // https://developers.google.com/speed/webp/docs/riff_container#webp_file_header
+ if (file_size > kMaxWebPFileSize)
+ return nullptr;
+
+ // Check that the file size in the header matches the encoded data's size.
+ if (base::strict_cast<size_t>(file_size) !=
+ encoded_data.size() - kWebPFileBeginBytePosition) {
+ return nullptr;
+ }
+
+ // Try to acquire the VP8 key frame size and validate that it fits within the
+ // encoded data's size.
+ DCHECK_GE(encoded_data.size(), kVp8FrameSizePosition + kSizeOfUint32t);
+
+ const uint32_t vp8_frame_size = *reinterpret_cast<const uint32_t*>(
+ encoded_data.data() + kVp8FrameSizePosition);
+
+ // Check that the VP8 frame size is bounded by the WebP size.
+ if (base::strict_cast<size_t>(file_size) - kWebPFileHeaderByteSize <
+ base::strict_cast<size_t>(vp8_frame_size)) {
+ return nullptr;
+ }
+
+ // Check that the size of the encoded data is consistent.
+ const size_t vp8_padded_frame_size =
+ base::bits::Align(base::strict_cast<size_t>(vp8_frame_size), 2u);
+ if (encoded_data.size() - kWebPFileAndVp8ChunkHeaderSizeInBytes !=
+ vp8_padded_frame_size) {
+ return nullptr;
+ }
+
+ // Check that the last byte is 0 if |vp8_frame_size| is odd per WebP specs:
+ // https://developers.google.com/speed/webp/docs/riff_container#riff_file_format
+ if (vp8_frame_size % 2 &&
+ encoded_data.data()[encoded_data.size() - 1] != 0u) {
+ return nullptr;
+ }
+
+ // Attempt to parse the VP8 frame.
+ Vp8Parser vp8_parser;
+ auto result = std::make_unique<Vp8FrameHeader>();
+ if (vp8_parser.ParseFrame(
+ encoded_data.data() + kWebPFileAndVp8ChunkHeaderSizeInBytes,
+ base::strict_cast<size_t>(vp8_frame_size), result.get())) {
+ return result;
+ }
+ return nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/parsers/webp_parser.h b/chromium/media/parsers/webp_parser.h
new file mode 100644
index 00000000000..436d6c74210
--- /dev/null
+++ b/chromium/media/parsers/webp_parser.h
@@ -0,0 +1,38 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_PARSERS_WEBP_PARSER_H_
+#define MEDIA_PARSERS_WEBP_PARSER_H_
+
+#include <stdint.h>
+#include <memory>
+
+#include "base/containers/span.h"
+#include "media/parsers/media_parsers_export.h"
+
+namespace media {
+
+struct Vp8FrameHeader;
+
+// A lightweight WebP file header parser to extract feature and size
+// information. It validates that a given data stream encodes a simple lossy
+// WebP image and populates a Vp8FrameHeader upon successful parsing.
+// For more information, see the WebP Container Specification:
+// https://developers.google.com/speed/webp/docs/riff_container
+
+// Returns true if |encoded_data| claims to encode a simple (non-extended) lossy
+// WebP image. Returns false otherwise.
+MEDIA_PARSERS_EXPORT
+bool IsLossyWebPImage(base::span<const uint8_t> encoded_data);
+
+// Parses a simple (non-extended) lossy WebP image and returns a Vp8FrameHeader
+// containing the parsed VP8 frame contained by the image. Returns nullptr on
+// failure.
+MEDIA_PARSERS_EXPORT
+std::unique_ptr<Vp8FrameHeader> ParseWebPImage(
+ base::span<const uint8_t> encoded_data);
+
+} // namespace media
+
+#endif // MEDIA_PARSERS_WEBP_PARSER_H_
diff --git a/chromium/media/parsers/webp_parser_fuzzertest.cc b/chromium/media/parsers/webp_parser_fuzzertest.cc
new file mode 100644
index 00000000000..d10c830c0af
--- /dev/null
+++ b/chromium/media/parsers/webp_parser_fuzzertest.cc
@@ -0,0 +1,24 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/containers/span.h"
+#include "base/logging.h"
+#include "media/parsers/vp8_parser.h"
+#include "media/parsers/webp_parser.h"
+
+struct Environment {
+ Environment() { logging::SetMinLogLevel(logging::LOG_FATAL); }
+};
+
+Environment* env = new Environment();
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ base::span<const uint8_t> encoded_data(data, size);
+ media::ParseWebPImage(encoded_data);
+ return 0;
+}
diff --git a/chromium/media/parsers/webp_parser_unittest.cc b/chromium/media/parsers/webp_parser_unittest.cc
new file mode 100644
index 00000000000..4c9d99765b1
--- /dev/null
+++ b/chromium/media/parsers/webp_parser_unittest.cc
@@ -0,0 +1,327 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/base_paths.h"
+#include "base/containers/span.h"
+#include "base/files/file_path.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/path_service.h"
+#include "media/parsers/vp8_parser.h"
+#include "media/parsers/webp_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+constexpr size_t kWebPFileAndVp8ChunkHeaderSizeInBytes = 20u;
+// clang-format off
+constexpr uint8_t kLossyWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00, // == 12 (little endian)
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x00, 0x00, 0x00, 0x00 // == 0
+};
+constexpr base::span<const uint8_t> kLossyWebPEncodedData(
+ kLossyWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+constexpr base::span<const uint8_t> kInvalidWebPEncodedDataSize(
+ kLossyWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes - 5u);
+
+constexpr uint8_t kLosslessWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00,
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', 'L',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t> kLosslessWebPEncodedData(
+ kLosslessWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kExtendedWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00,
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', 'X',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t> kExtendedWebPEncodedData(
+ kExtendedWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kUnknownWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00,
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', '~',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t> kUnknownWebPEncodedData(
+ kUnknownWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kInvalidRiffWebPFileHeader[] = {
+ 'X', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00,
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t> kInvalidRiffWebPEncodedData(
+ kInvalidRiffWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kInvalidOddFileSizeInWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0d, 0x00, 0x00, 0x00, // == 13 (Invalid: should be even)
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00
+};
+constexpr base::span<const uint8_t> kInvalidOddFileSizeInHeaderWebPEncodedData(
+ kInvalidOddFileSizeInWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes + 1u); // Match the reported size
+
+constexpr uint8_t kInvalidLargerThanLimitFileSizeInWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0xfe, 0xff, 0xff, 0xff, // == 2^32 - 2 (Invalid: should be <= 2^32 - 10)
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t>
+kInvalidLargerThanLimitFileSizeInHeaderWebPEncodedData(
+ kInvalidLargerThanLimitFileSizeInWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kInvalidLargerFileSizeInWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x10, 0x00, 0x00, 0x00, // == 16 (Invalid: should be 12)
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x00, 0x00, 0x00, 0x00
+};
+constexpr base::span<const uint8_t>
+kInvalidLargerFileSizeInHeaderWebPEncodedData(
+ kInvalidLargerFileSizeInWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kInvalidKeyFrameSizeInWebPFileHeader[] = {
+ 'R', 'I', 'F', 'F',
+ 0x0c, 0x00, 0x00, 0x00, // == 12
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0xc8, 0x00, 0x00, 0x00 // == 200 (Invalid: should be 0)
+};
+constexpr base::span<const uint8_t> kInvalidKeyFrameSizeInWebPEncodedData(
+ kInvalidKeyFrameSizeInWebPFileHeader,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes);
+
+constexpr uint8_t kMismatchingOddVp8FrameSizeAndDataSize[] = {
+ 'R', 'I', 'F', 'F',
+ 0x12, 0x00, 0x00, 0x00, // == 18
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x03, 0x00, 0x00, 0x00, // == 3
+ 0x11, 0xa0, 0x23, 0x00, // Valid padding byte
+ 0xfa, 0xcc // Should not exist.
+};
+constexpr base::span<const uint8_t>
+kMismatchingOddVp8FrameSizeAndDataSizeEncodedData(
+ kMismatchingOddVp8FrameSizeAndDataSize,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes + 6u);
+
+constexpr uint8_t kMismatchingEvenVp8FrameSizeAndDataSize[] = {
+ 'R', 'I', 'F', 'F',
+ 0x12, 0x00, 0x00, 0x00, // == 18
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x04, 0x00, 0x00, 0x00, // == 4
+ 0x11, 0xa0, 0x23, 0x12,
+ 0xfc, 0xcd // Should not exist.
+};
+constexpr base::span<const uint8_t>
+kMismatchingEvenVp8FrameSizeAndDataSizeEncodedData(
+ kMismatchingEvenVp8FrameSizeAndDataSize,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes + 6u);
+
+constexpr uint8_t kInvalidPaddingByteInVp8DataChunk[] = {
+ 'R', 'I', 'F', 'F',
+ 0x10, 0x00, 0x00, 0x00, // == 16
+ 'W', 'E', 'B', 'P',
+ 'V', 'P', '8', ' ',
+ 0x03, 0x00, 0x00, 0x00, // == 3
+ 0x11, 0xa0, 0x23, 0xff // Invalid: last byte should be 0
+};
+constexpr base::span<const uint8_t>
+kInvalidPaddingByteInVp8DataChunkEncodedData(
+ kInvalidPaddingByteInVp8DataChunk,
+ kWebPFileAndVp8ChunkHeaderSizeInBytes + 4u);
+// clang-format on
+
+} // namespace
+
+TEST(WebPParserTest, WebPImageFileValidator) {
+ // Verify that only lossy WebP formats pass.
+ ASSERT_TRUE(IsLossyWebPImage(kLossyWebPEncodedData));
+
+ // Verify that lossless, extended, and unknown WebP formats fail.
+ ASSERT_FALSE(IsLossyWebPImage(kLosslessWebPEncodedData));
+ ASSERT_FALSE(IsLossyWebPImage(kExtendedWebPEncodedData));
+ ASSERT_FALSE(IsLossyWebPImage(kUnknownWebPEncodedData));
+
+ // Verify that invalid WebP file headers and sizes fail.
+ ASSERT_FALSE(IsLossyWebPImage(kInvalidRiffWebPEncodedData));
+ ASSERT_FALSE(IsLossyWebPImage(kInvalidWebPEncodedDataSize));
+}
+
+TEST(WebPParserTest, ParseLossyWebP) {
+ base::FilePath data_dir;
+ ASSERT_TRUE(base::PathService::Get(base::DIR_SOURCE_ROOT, &data_dir));
+
+ base::FilePath file_path = data_dir.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII("red_green_gradient_lossy.webp");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(base::span<const uint8_t>(stream.data(), stream.length()));
+ ASSERT_TRUE(result);
+
+ ASSERT_TRUE(result->IsKeyframe());
+ ASSERT_TRUE(result->data);
+
+ // Original image is 3000x3000.
+ ASSERT_EQ(3000u, result->width);
+ ASSERT_EQ(3000u, result->height);
+}
+
+TEST(WebPParserTest, ParseLosslessWebP) {
+ base::FilePath data_dir;
+ ASSERT_TRUE(base::PathService::Get(base::DIR_SOURCE_ROOT, &data_dir));
+
+ base::FilePath file_path =
+ data_dir.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII("yellow_pink_gradient_lossless.webp");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ // Should fail because WebP parser does not parse lossless webp images.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(base::span<const uint8_t>(stream.data(), stream.length()));
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseExtendedWebP) {
+ base::FilePath data_dir;
+ ASSERT_TRUE(base::PathService::Get(base::DIR_SOURCE_ROOT, &data_dir));
+
+ base::FilePath file_path = data_dir.AppendASCII("media")
+ .AppendASCII("test")
+ .AppendASCII("data")
+ .AppendASCII("bouncy_ball.webp");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ // Should fail because WebP parser does not parse extended webp images.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(base::span<const uint8_t>(stream.data(), stream.length()));
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithUnknownFormat) {
+ // Should fail when the specifier byte at position 16 holds anything but ' '.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kUnknownWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithInvalidHeaders) {
+ // Should fail because the header is an invalid WebP container.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidRiffWebPEncodedData);
+ ASSERT_FALSE(result);
+
+ // Should fail because the header has an invalid size.
+ result = ParseWebPImage(kInvalidWebPEncodedDataSize);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithInvalidOddSizeInHeader) {
+ // Should fail because the size reported in the header is odd.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidOddFileSizeInHeaderWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithInvalidLargerThanLimitSizeInHeader) {
+ // Should fail because the size reported in the header is larger than
+ // 2^32 - 10 per the WebP spec.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidLargerThanLimitFileSizeInHeaderWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithInvalidFileSizeInHeader) {
+ // Should fail because the size reported in the header does not match the
+ // actual data size.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidLargerFileSizeInHeaderWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithEmptyVp8KeyFrameAndIncorrectKeyFrameSize) {
+ // Should fail because the reported VP8 key frame size is larger than the
+ // the existing data.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidKeyFrameSizeInWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithMismatchingVp8FrameAndDataSize) {
+ // Should fail because the reported VP8 key frame size (even or odd) does not
+ // match the encoded data's size.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kMismatchingOddVp8FrameSizeAndDataSizeEncodedData);
+ ASSERT_FALSE(result);
+
+ result = ParseWebPImage(kMismatchingEvenVp8FrameSizeAndDataSizeEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithInvalidPaddingByteInVp8DataChunk) {
+ // Should fail because the reported VP8 key frame size is odd and the added
+ // padding byte is not 0.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kInvalidPaddingByteInVp8DataChunkEncodedData);
+ ASSERT_FALSE(result);
+}
+
+TEST(WebPParserTest, ParseWebPWithEmptyVp8KeyFrame) {
+ // Should fail because the VP8 parser is passed a data chunk of size 0.
+ std::unique_ptr<Vp8FrameHeader> result =
+ ParseWebPImage(kLossyWebPEncodedData);
+ ASSERT_FALSE(result);
+}
+
+} // namespace media
diff --git a/chromium/media/remoting/fake_media_resource.cc b/chromium/media/remoting/fake_media_resource.cc
index 7fb2baff9ae..1611d2e2811 100644
--- a/chromium/media/remoting/fake_media_resource.cc
+++ b/chromium/media/remoting/fake_media_resource.cc
@@ -29,7 +29,7 @@ FakeDemuxerStream::FakeDemuxerStream(bool is_audio) {
gfx::Rect rect(0, 0, 640, 480);
video_config_.Initialize(kCodecH264, H264PROFILE_BASELINE,
PIXEL_FORMAT_I420, VideoColorSpace::REC601(),
- VIDEO_ROTATION_0, size, rect, size,
+ kNoTransformation, size, rect, size,
std::vector<uint8_t>(), Unencrypted());
}
ON_CALL(*this, Read(_))
diff --git a/chromium/media/remoting/proto_utils.cc b/chromium/media/remoting/proto_utils.cc
index ae23fe79b1a..daac2b92cfb 100644
--- a/chromium/media/remoting/proto_utils.cc
+++ b/chromium/media/remoting/proto_utils.cc
@@ -377,7 +377,7 @@ bool ConvertProtoToVideoDecoderConfig(
ToMediaVideoCodec(video_message.codec()).value(),
ToMediaVideoCodecProfile(video_message.profile()).value(),
ToMediaVideoPixelFormat(video_message.format()).value(), color_space,
- VIDEO_ROTATION_0,
+ kNoTransformation,
gfx::Size(video_message.coded_size().width(),
video_message.coded_size().height()),
gfx::Rect(video_message.visible_rect().x(),
diff --git a/chromium/media/remoting/stream_provider.cc b/chromium/media/remoting/stream_provider.cc
index df7482d17ee..12598b59e1f 100644
--- a/chromium/media/remoting/stream_provider.cc
+++ b/chromium/media/remoting/stream_provider.cc
@@ -10,7 +10,7 @@
#include "base/containers/circular_deque.h"
#include "base/logging.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/video_rotation.h"
+#include "media/base/video_transformation.h"
#include "media/remoting/proto_enum_utils.h"
#include "media/remoting/proto_utils.h"
diff --git a/chromium/media/renderers/BUILD.gn b/chromium/media/renderers/BUILD.gn
index 6105faa04a0..838b4d86a70 100644
--- a/chromium/media/renderers/BUILD.gn
+++ b/chromium/media/renderers/BUILD.gn
@@ -39,6 +39,7 @@ source_set("renderers") {
"//cc/base", # For MathUtil.
"//cc/paint",
"//components/viz/client",
+ "//components/viz/common",
"//gpu/command_buffer/client:gles2_interface",
"//gpu/command_buffer/common",
"//media:media_buildflags",
diff --git a/chromium/media/renderers/audio_renderer_impl.cc b/chromium/media/renderers/audio_renderer_impl.cc
index 167f91818d9..69d9bf01d21 100644
--- a/chromium/media/renderers/audio_renderer_impl.cc
+++ b/chromium/media/renderers/audio_renderer_impl.cc
@@ -275,6 +275,11 @@ void AudioRendererImpl::Flush(const base::Closure& callback) {
DCHECK(task_runner_->BelongsToCurrentThread());
TRACE_EVENT_ASYNC_BEGIN0("media", "AudioRendererImpl::Flush", this);
+ // Flush |sink_| now. |sink_| must only be accessed on |task_runner_| and not
+ // be called under |lock_|.
+ DCHECK(!sink_playing_);
+ sink_->Flush();
+
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kPlaying);
DCHECK(!flush_cb_);
@@ -613,7 +618,16 @@ void AudioRendererImpl::OnAudioDecoderStreamInitialized(bool success) {
// We're all good! Continue initializing the rest of the audio renderer
// based on the decoder format.
- algorithm_.reset(new AudioRendererAlgorithm());
+ auto* media_client = GetMediaClient();
+ auto params =
+ (media_client ? media_client->GetAudioRendererAlgorithmParameters(
+ audio_parameters_)
+ : base::nullopt);
+ if (params) {
+ algorithm_ = std::make_unique<AudioRendererAlgorithm>(params.value());
+ } else {
+ algorithm_ = std::make_unique<AudioRendererAlgorithm>();
+ }
algorithm_->Initialize(audio_parameters_, is_encrypted_);
ConfigureChannelMask();
@@ -693,9 +707,8 @@ void AudioRendererImpl::SetPlayDelayCBForTesting(PlayDelayCBForTesting cb) {
play_delay_cb_for_testing_ = std::move(cb);
}
-void AudioRendererImpl::DecodedAudioReady(
- AudioDecoderStream::Status status,
- const scoped_refptr<AudioBuffer>& buffer) {
+void AudioRendererImpl::DecodedAudioReady(AudioDecoderStream::Status status,
+ scoped_refptr<AudioBuffer> buffer) {
DVLOG(2) << __func__ << "(" << status << ")";
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -717,7 +730,7 @@ void AudioRendererImpl::DecodedAudioReady(
}
DCHECK_EQ(status, AudioDecoderStream::OK);
- DCHECK(buffer.get());
+ DCHECK(buffer);
if (state_ == kFlushing) {
ChangeState_Locked(kFlushed);
@@ -756,7 +769,7 @@ void AudioRendererImpl::DecodedAudioReady(
}
DCHECK(buffer_converter_);
- buffer_converter_->AddInput(buffer);
+ buffer_converter_->AddInput(std::move(buffer));
while (buffer_converter_->HasNextBuffer()) {
need_another_buffer =
@@ -783,7 +796,7 @@ void AudioRendererImpl::DecodedAudioReady(
return;
}
- need_another_buffer = HandleDecodedBuffer_Locked(buffer);
+ need_another_buffer = HandleDecodedBuffer_Locked(std::move(buffer));
}
if (!need_another_buffer && !CanRead_Locked())
@@ -793,13 +806,13 @@ void AudioRendererImpl::DecodedAudioReady(
}
bool AudioRendererImpl::HandleDecodedBuffer_Locked(
- const scoped_refptr<AudioBuffer>& buffer) {
+ scoped_refptr<AudioBuffer> buffer) {
lock_.AssertAcquired();
if (buffer->end_of_stream()) {
received_end_of_stream_ = true;
} else {
if (buffer->IsBitstreamFormat() && state_ == kPlaying) {
- if (IsBeforeStartTime(buffer))
+ if (IsBeforeStartTime(*buffer))
return true;
// Adjust the start time since we are unable to trim a compressed audio
@@ -811,7 +824,7 @@ bool AudioRendererImpl::HandleDecodedBuffer_Locked(
audio_parameters_.sample_rate()));
}
} else if (state_ == kPlaying) {
- if (IsBeforeStartTime(buffer))
+ if (IsBeforeStartTime(*buffer))
return true;
// Trim off any additional time before the start timestamp.
@@ -831,15 +844,15 @@ bool AudioRendererImpl::HandleDecodedBuffer_Locked(
return true;
}
+ // Store the timestamp of the first packet so we know when to start actual
+ // audio playback.
+ if (first_packet_timestamp_ == kNoTimestamp)
+ first_packet_timestamp_ = buffer->timestamp();
+
if (state_ != kUninitialized)
- algorithm_->EnqueueBuffer(buffer);
+ algorithm_->EnqueueBuffer(std::move(buffer));
}
- // Store the timestamp of the first packet so we know when to start actual
- // audio playback.
- if (first_packet_timestamp_ == kNoTimestamp)
- first_packet_timestamp_ = buffer->timestamp();
-
const size_t memory_usage = algorithm_->GetMemoryUsage();
PipelineStatistics stats;
stats.audio_memory_usage = memory_usage - last_audio_memory_usage_;
@@ -860,7 +873,7 @@ bool AudioRendererImpl::HandleDecodedBuffer_Locked(
return false;
case kPlaying:
- if (buffer->end_of_stream() || algorithm_->IsQueueFull()) {
+ if (received_end_of_stream_ || algorithm_->IsQueueFull()) {
if (buffering_state_ == BUFFERING_HAVE_NOTHING)
SetBufferingState_Locked(BUFFERING_HAVE_ENOUGH);
return false;
@@ -945,11 +958,10 @@ void AudioRendererImpl::SetPlaybackRate(double playback_rate) {
}
}
-bool AudioRendererImpl::IsBeforeStartTime(
- const scoped_refptr<AudioBuffer>& buffer) {
+bool AudioRendererImpl::IsBeforeStartTime(const AudioBuffer& buffer) {
DCHECK_EQ(state_, kPlaying);
- return buffer.get() && !buffer->end_of_stream() &&
- (buffer->timestamp() + buffer->duration()) < start_timestamp_;
+ return !buffer.end_of_stream() &&
+ (buffer.timestamp() + buffer.duration()) < start_timestamp_;
}
int AudioRendererImpl::Render(base::TimeDelta delay,
diff --git a/chromium/media/renderers/audio_renderer_impl.h b/chromium/media/renderers/audio_renderer_impl.h
index 8a522bb2a8a..65538e98d75 100644
--- a/chromium/media/renderers/audio_renderer_impl.h
+++ b/chromium/media/renderers/audio_renderer_impl.h
@@ -132,12 +132,12 @@ class MEDIA_EXPORT AudioRendererImpl
// Callback from the audio decoder delivering decoded audio samples.
void DecodedAudioReady(AudioDecoderStream::Status status,
- const scoped_refptr<AudioBuffer>& buffer);
+ scoped_refptr<AudioBuffer> buffer);
// Handles buffers that come out of decoder (MSE: after passing through
// |buffer_converter_|).
// Returns true if more buffers are needed.
- bool HandleDecodedBuffer_Locked(const scoped_refptr<AudioBuffer>& buffer);
+ bool HandleDecodedBuffer_Locked(scoped_refptr<AudioBuffer> buffer);
// Helper functions for DecodeStatus values passed to
// DecodedAudioReady().
@@ -182,7 +182,7 @@ class MEDIA_EXPORT AudioRendererImpl
// Returns true if the data in the buffer is all before |start_timestamp_|.
// This can only return true while in the kPlaying state.
- bool IsBeforeStartTime(const scoped_refptr<AudioBuffer>& buffer);
+ bool IsBeforeStartTime(const AudioBuffer& buffer);
// Called upon AudioDecoderStream initialization, or failure thereof
// (indicated by the value of |success|).
diff --git a/chromium/media/renderers/audio_renderer_impl_unittest.cc b/chromium/media/renderers/audio_renderer_impl_unittest.cc
index 4e78e33c75a..c553b7cf9c6 100644
--- a/chromium/media/renderers/audio_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/audio_renderer_impl_unittest.cc
@@ -23,6 +23,7 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/media_client.h"
#include "media/base/media_util.h"
+#include "media/base/mock_audio_renderer_sink.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -65,7 +66,7 @@ static double kOutputMicrosPerFrame =
kOutputSamplesPerSecond;
ACTION_P(EnterPendingDecoderInitStateAction, test) {
- test->EnterPendingDecoderInitState(arg2);
+ test->EnterPendingDecoderInitState(std::move(arg2));
}
class AudioRendererImplTest : public ::testing::Test, public RendererClient {
@@ -73,16 +74,16 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
std::vector<std::unique_ptr<AudioDecoder>> CreateAudioDecoderForTest() {
auto decoder = std::make_unique<MockAudioDecoder>();
if (!enter_pending_decoder_init_) {
- EXPECT_CALL(*decoder, Initialize(_, _, _, _, _))
+ EXPECT_CALL(*decoder, Initialize_(_, _, _, _, _))
.WillOnce(DoAll(SaveArg<3>(&output_cb_),
- RunCallback<2>(expected_init_result_)));
+ RunOnceCallback<2>(expected_init_result_)));
} else {
- EXPECT_CALL(*decoder, Initialize(_, _, _, _, _))
+ EXPECT_CALL(*decoder, Initialize_(_, _, _, _, _))
.WillOnce(EnterPendingDecoderInitStateAction(this));
}
EXPECT_CALL(*decoder, Decode(_, _))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::DecodeDecoder));
- EXPECT_CALL(*decoder, Reset(_))
+ EXPECT_CALL(*decoder, Reset_(_))
.WillRepeatedly(Invoke(this, &AudioRendererImplTest::ResetDecoder));
std::vector<std::unique_ptr<AudioDecoder>> decoders;
decoders.push_back(std::move(decoder));
@@ -162,6 +163,17 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
ConfigureDemuxerStream(true);
}
+ void ConfigureMockRenderer(const AudioParameters& params) {
+ mock_sink_ = new MockAudioRendererSink();
+ renderer_.reset(new AudioRendererImpl(
+ main_thread_task_runner_, mock_sink_.get(),
+ base::Bind(&AudioRendererImplTest::CreateAudioDecoderForTest,
+ base::Unretained(this)),
+ &media_log_));
+ testing::Mock::VerifyAndClearExpectations(&demuxer_stream_);
+ ConfigureDemuxerStream(true);
+ }
+
// RendererClient implementation.
MOCK_METHOD1(OnError, void(PipelineStatus));
void OnEnded() override {
@@ -253,8 +265,8 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
event.RunAndWaitForStatus(PIPELINE_ERROR_ABORT);
}
- void EnterPendingDecoderInitState(const AudioDecoder::InitCB& cb) {
- init_decoder_cb_ = cb;
+ void EnterPendingDecoderInitState(AudioDecoder::InitCB cb) {
+ init_decoder_cb_ = std::move(cb);
}
void FlushDuringPendingRead() {
@@ -329,7 +341,7 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
}
next_timestamp_->AddFrames(frames.value);
- DeliverBuffer(DecodeStatus::OK, buffer);
+ DeliverBuffer(DecodeStatus::OK, std::move(buffer));
}
void DeliverEndOfStream() {
@@ -465,23 +477,22 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
std::move(wait_for_pending_decode_cb_).Run();
}
- void ResetDecoder(const base::Closure& reset_cb) {
+ void ResetDecoder(base::OnceClosure& reset_cb) {
if (decode_cb_) {
// |reset_cb| will be called in DeliverBuffer(), after the decoder is
// flushed.
- reset_cb_ = reset_cb;
+ reset_cb_ = std::move(reset_cb);
return;
}
- main_thread_task_runner_->PostTask(FROM_HERE, reset_cb);
+ main_thread_task_runner_->PostTask(FROM_HERE, std::move(reset_cb));
}
- void DeliverBuffer(DecodeStatus status,
- const scoped_refptr<AudioBuffer>& buffer) {
+ void DeliverBuffer(DecodeStatus status, scoped_refptr<AudioBuffer> buffer) {
CHECK(decode_cb_);
if (buffer.get() && !buffer->end_of_stream())
- output_cb_.Run(buffer);
+ output_cb_.Run(std::move(buffer));
std::move(decode_cb_).Run(status);
if (reset_cb_)
@@ -497,6 +508,7 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
NullMediaLog media_log_;
std::unique_ptr<AudioRendererImpl> renderer_;
scoped_refptr<FakeAudioRendererSink> sink_;
+ scoped_refptr<MockAudioRendererSink> mock_sink_;
base::SimpleTestTickClock tick_clock_;
PipelineStatistics last_statistics_;
@@ -506,7 +518,7 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
// Used for satisfying reads.
AudioDecoder::OutputCB output_cb_;
AudioDecoder::DecodeCB decode_cb_;
- base::Closure reset_cb_;
+ base::OnceClosure reset_cb_;
std::unique_ptr<AudioTimestampHelper> next_timestamp_;
// Run during DecodeDecoder() to unblock WaitForPendingRead().
@@ -676,6 +688,7 @@ TEST_F(AudioRendererImplTest, Underflow_CapacityResetsAfterFlush) {
EXPECT_GT(buffer_capacity().value, initial_capacity.value);
// Verify that the buffer capacity is restored to the |initial_capacity|.
+ StopTicking();
FlushDuringPendingRead();
EXPECT_EQ(buffer_capacity().value, initial_capacity.value);
}
@@ -739,7 +752,7 @@ TEST_F(AudioRendererImplTest, ChannelMask) {
scoped_refptr<AudioBuffer> buffer = MakeAudioBuffer<float>(
kSampleFormat, hw_params.channel_layout(), hw_params.channels(),
kInputSamplesPerSecond, 1.0f, 0.0f, 256, base::TimeDelta());
- DeliverBuffer(DecodeStatus::OK, buffer);
+ DeliverBuffer(DecodeStatus::OK, std::move(buffer));
// All channels should now be enabled.
mask = channel_mask();
@@ -1227,4 +1240,22 @@ TEST_F(AudioRendererImplTest, BitstreamEndOfStream) {
SetMediaClient(nullptr);
}
+TEST_F(AudioRendererImplTest, SinkIsFlushed) {
+ ConfigureMockRenderer(AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ kChannelLayout, kOutputSamplesPerSecond,
+ 1024 * 15));
+
+ Initialize();
+ Preroll();
+ StartTicking();
+ WaitForPendingRead();
+ StopTicking();
+
+ // Start flushing.
+ EXPECT_CALL(*mock_sink_, Flush());
+ WaitableMessageLoopEvent flush_event;
+ renderer_->Flush(flush_event.GetClosure());
+ renderer_.reset();
+}
+
} // namespace media
diff --git a/chromium/media/renderers/default_decoder_factory.cc b/chromium/media/renderers/default_decoder_factory.cc
index a6a9e091cad..755b06c9c0c 100644
--- a/chromium/media/renderers/default_decoder_factory.cc
+++ b/chromium/media/renderers/default_decoder_factory.cc
@@ -11,9 +11,7 @@
#include "build/build_config.h"
#include "build/buildflag.h"
#include "media/base/decoder_factory.h"
-#include "media/base/media_log.h"
#include "media/base/media_switches.h"
-#include "media/filters/gpu_video_decoder.h"
#include "media/media_buildflags.h"
#include "media/video/gpu_video_accelerator_factories.h"
#include "third_party/libaom/libaom_buildflags.h"
@@ -93,23 +91,16 @@ void DefaultDecoderFactory::CreateVideoDecoders(
// Perfer an external decoder since one will only exist if it is hardware
// accelerated.
- if (gpu_factories && gpu_factories->IsGpuVideoAcceleratorEnabled()) {
+ if (external_decoder_factory_ && gpu_factories &&
+ gpu_factories->IsGpuVideoAcceleratorEnabled()) {
// |gpu_factories_| requires that its entry points be called on its
// |GetTaskRunner()|. Since |pipeline_| will own decoders created from the
// factories, require that their message loops are identical.
DCHECK_EQ(gpu_factories->GetTaskRunner(), task_runner);
- // MojoVideoDecoder replaces any VDA for this platform when it's enabled.
- if (external_decoder_factory_ &&
- base::FeatureList::IsEnabled(kMojoVideoDecoder)) {
- external_decoder_factory_->CreateVideoDecoders(
- task_runner, gpu_factories, media_log, request_overlay_info_cb,
- target_color_space, video_decoders);
- } else {
- video_decoders->push_back(std::make_unique<GpuVideoDecoder>(
- gpu_factories, request_overlay_info_cb, target_color_space,
- media_log));
- }
+ external_decoder_factory_->CreateVideoDecoders(
+ task_runner, gpu_factories, media_log, request_overlay_info_cb,
+ target_color_space, video_decoders);
}
#if defined(OS_FUCHSIA)
diff --git a/chromium/media/renderers/paint_canvas_video_renderer.cc b/chromium/media/renderers/paint_canvas_video_renderer.cc
index 25c4c88b70c..2c3feeb1970 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer.cc
+++ b/chromium/media/renderers/paint_canvas_video_renderer.cc
@@ -14,6 +14,7 @@
#include "cc/paint/paint_flags.h"
#include "cc/paint/paint_image.h"
#include "cc/paint/paint_image_builder.h"
+#include "components/viz/common/gpu/context_provider.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
@@ -128,10 +129,10 @@ struct YUVPlaneTextureInfo {
using YUVTexturesInfo = std::array<YUVPlaneTextureInfo, kNumYUVPlanes>;
YUVTexturesInfo GetYUVTexturesInfo(const VideoFrame* video_frame,
- const Context3D& context_3d) {
+ viz::ContextProvider* context_provider) {
YUVTexturesInfo yuv_textures_info;
- gpu::gles2::GLES2Interface* gl = context_3d.gl;
+ gpu::gles2::GLES2Interface* gl = context_provider->ContextGL();
DCHECK(gl);
// TODO(bsalomon): Use GL_RGB8 once Skia supports it.
// skbug.com/7533
@@ -179,9 +180,9 @@ YUVTexturesInfo GetYUVTexturesInfo(const VideoFrame* video_frame,
}
void DeleteYUVTextures(const VideoFrame* video_frame,
- const Context3D& context_3d,
+ viz::ContextProvider* context_provider,
const YUVTexturesInfo& yuv_textures_info) {
- gpu::gles2::GLES2Interface* gl = context_3d.gl;
+ gpu::gles2::GLES2Interface* gl = context_provider->ContextGL();
DCHECK(gl);
for (size_t i = 0; i < video_frame->NumTextures(); ++i) {
@@ -197,8 +198,10 @@ void DeleteYUVTextures(const VideoFrame* video_frame,
sk_sp<SkImage> NewSkImageFromVideoFrameYUVTextures(
const VideoFrame* video_frame,
- const Context3D& context_3d) {
+ viz::ContextProvider* context_provider) {
DCHECK(video_frame->HasTextures());
+ GrContext* gr_context = context_provider->GrContext();
+ DCHECK(gr_context);
// TODO: We should compare the DCHECK vs when UpdateLastImage calls this
// function. (crbug.com/674185)
DCHECK(video_frame->format() == PIXEL_FORMAT_I420 ||
@@ -209,7 +212,7 @@ sk_sp<SkImage> NewSkImageFromVideoFrameYUVTextures(
(ya_tex_size.height() + 1) / 2);
YUVTexturesInfo yuv_textures_info =
- GetYUVTexturesInfo(video_frame, context_3d);
+ GetYUVTexturesInfo(video_frame, context_provider);
GrBackendTexture yuv_textures[3] = {
GrBackendTexture(ya_tex_size.width(), ya_tex_size.height(),
@@ -220,22 +223,24 @@ sk_sp<SkImage> NewSkImageFromVideoFrameYUVTextures(
GrMipMapped::kNo, yuv_textures_info[2].texture),
};
- sk_sp<SkImage> img = YUVGrBackendTexturesToSkImage(
- context_3d.gr_context, video_frame->ColorSpace(), video_frame->format(),
- yuv_textures);
- context_3d.gr_context->flush();
+ sk_sp<SkImage> img =
+ YUVGrBackendTexturesToSkImage(gr_context, video_frame->ColorSpace(),
+ video_frame->format(), yuv_textures);
+ gr_context->flush();
- DeleteYUVTextures(video_frame, context_3d, yuv_textures_info);
+ DeleteYUVTextures(video_frame, context_provider, yuv_textures_info);
return img;
}
sk_sp<SkImage> NewSkImageFromVideoFrameYUVTexturesWithExternalBackend(
const VideoFrame* video_frame,
- const Context3D& context_3d,
+ viz::ContextProvider* context_provider,
unsigned int texture_target,
unsigned int texture_id) {
DCHECK(video_frame->HasTextures());
+ GrContext* gr_context = context_provider->GrContext();
+ DCHECK(gr_context);
// TODO: We should compare the DCHECK vs when UpdateLastImage calls this
// function. (https://crbug.com/674185)
DCHECK(video_frame->format() == PIXEL_FORMAT_I420 ||
@@ -248,7 +253,7 @@ sk_sp<SkImage> NewSkImageFromVideoFrameYUVTexturesWithExternalBackend(
GrGLTextureInfo backend_texture{};
YUVTexturesInfo yuv_textures_info =
- GetYUVTexturesInfo(video_frame, context_3d);
+ GetYUVTexturesInfo(video_frame, context_provider);
GrBackendTexture yuv_textures[3] = {
GrBackendTexture(ya_tex_size.width(), ya_tex_size.height(),
@@ -268,19 +273,22 @@ sk_sp<SkImage> NewSkImageFromVideoFrameYUVTexturesWithExternalBackend(
};
sk_sp<SkImage> img = YUVGrBackendTexturesToSkImage(
- context_3d.gr_context, video_frame->ColorSpace(), video_frame->format(),
+ gr_context, video_frame->ColorSpace(), video_frame->format(),
yuv_textures, result_texture);
- context_3d.gr_context->flush();
+ gr_context->flush();
- DeleteYUVTextures(video_frame, context_3d, yuv_textures_info);
+ DeleteYUVTextures(video_frame, context_provider, yuv_textures_info);
return img;
}
// Creates a SkImage from a |video_frame| backed by native resources.
// The SkImage will take ownership of the underlying resource.
-sk_sp<SkImage> NewSkImageFromVideoFrameNative(VideoFrame* video_frame,
- const Context3D& context_3d) {
+sk_sp<SkImage> NewSkImageFromVideoFrameNative(
+ VideoFrame* video_frame,
+ viz::ContextProvider* context_provider,
+ bool allow_wrap_texture,
+ bool* wrapped_video_frame_texture) {
DCHECK(PIXEL_FORMAT_ARGB == video_frame->format() ||
PIXEL_FORMAT_XRGB == video_frame->format() ||
PIXEL_FORMAT_RGB24 == video_frame->format() ||
@@ -296,24 +304,29 @@ sk_sp<SkImage> NewSkImageFromVideoFrameNative(VideoFrame* video_frame,
<< "Unsupported texture target " << std::hex << std::showbase
<< mailbox_holder.texture_target;
- gpu::gles2::GLES2Interface* gl = context_3d.gl;
+ gpu::gles2::GLES2Interface* gl = context_provider->ContextGL();
unsigned source_texture = 0;
gfx::ColorSpace color_space_for_skia;
- if (mailbox_holder.texture_target != GL_TEXTURE_2D) {
+ *wrapped_video_frame_texture =
+ mailbox_holder.texture_target == GL_TEXTURE_2D && allow_wrap_texture;
+ if (*wrapped_video_frame_texture) {
+ // Fast path where we can avoid a copy, by having last_image_ directly wrap
+ // the VideoFrame texture.
+ gl->WaitSyncTokenCHROMIUM(mailbox_holder.sync_token.GetConstData());
+ source_texture =
+ gl->CreateAndConsumeTextureCHROMIUM(mailbox_holder.mailbox.name);
+ color_space_for_skia = video_frame->ColorSpace();
+ } else {
// TODO(dcastagna): At the moment Skia doesn't support targets different
// than GL_TEXTURE_2D. Avoid this copy once
- // https://code.google.com/p/skia/issues/detail?id=3868 is addressed.
+ // https://code.google.com/p/skia/issues/detail?id=3868 is addressed, when
+ // we allow wrapping.
gl->GenTextures(1, &source_texture);
DCHECK(source_texture);
gl->BindTexture(GL_TEXTURE_2D, source_texture);
PaintCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
gl, video_frame, GL_TEXTURE_2D, source_texture, GL_RGBA, GL_RGBA,
GL_UNSIGNED_BYTE, 0, false, false);
- } else {
- gl->WaitSyncTokenCHROMIUM(mailbox_holder.sync_token.GetConstData());
- source_texture =
- gl->CreateAndConsumeTextureCHROMIUM(mailbox_holder.mailbox.name);
- color_space_for_skia = video_frame->ColorSpace();
}
GrGLTextureInfo source_texture_info;
source_texture_info.fID = source_texture;
@@ -327,8 +340,8 @@ sk_sp<SkImage> NewSkImageFromVideoFrameNative(VideoFrame* video_frame,
video_frame->coded_size().width(), video_frame->coded_size().height(),
GrMipMapped::kNo, source_texture_info);
return SkImage::MakeFromAdoptedTexture(
- context_3d.gr_context, source_backend_texture, kTopLeft_GrSurfaceOrigin,
- kRGBA_8888_SkColorType, kPremul_SkAlphaType,
+ context_provider->GrContext(), source_backend_texture,
+ kTopLeft_GrSurfaceOrigin, kRGBA_8888_SkColorType, kPremul_SkAlphaType,
color_space_for_skia.ToSkColorSpace());
}
@@ -405,11 +418,11 @@ void SynchronizeVideoFrameRead(scoped_refptr<VideoFrame> video_frame,
// Generates an RGB image from a VideoFrame. Convert YUV to RGB plain on GPU.
class VideoImageGenerator : public cc::PaintImageGenerator {
public:
- VideoImageGenerator(const scoped_refptr<VideoFrame>& frame)
+ VideoImageGenerator(scoped_refptr<VideoFrame> frame)
: cc::PaintImageGenerator(
SkImageInfo::MakeN32Premul(frame->visible_rect().width(),
frame->visible_rect().height())),
- frame_(frame) {
+ frame_(std::move(frame)) {
DCHECK(!frame_->HasTextures());
}
~VideoImageGenerator() override = default;
@@ -567,14 +580,12 @@ PaintCanvasVideoRenderer::~PaintCanvasVideoRenderer() {
ResetCache();
}
-void PaintCanvasVideoRenderer::Paint(
- const scoped_refptr<VideoFrame>& video_frame,
- cc::PaintCanvas* canvas,
- const gfx::RectF& dest_rect,
- cc::PaintFlags& flags,
- VideoRotation video_rotation,
- const Context3D& context_3d,
- gpu::ContextSupport* context_support) {
+void PaintCanvasVideoRenderer::Paint(scoped_refptr<VideoFrame> video_frame,
+ cc::PaintCanvas* canvas,
+ const gfx::RectF& dest_rect,
+ cc::PaintFlags& flags,
+ VideoTransformation video_transformation,
+ viz::ContextProvider* context_provider) {
DCHECK(thread_checker_.CalledOnValidThread());
if (flags.getAlpha() == 0) {
return;
@@ -596,8 +607,10 @@ void PaintCanvasVideoRenderer::Paint(
return;
}
- gpu::gles2::GLES2Interface* gl = context_3d.gl;
- if (!UpdateLastImage(video_frame, context_3d))
+ // Don't allow wrapping the VideoFrame texture, as we want to be able to cache
+ // the PaintImage, to avoid redundant readbacks if the canvas is software.
+ if (!UpdateLastImage(video_frame, context_provider,
+ false /* allow_wrap_texture */))
return;
cc::PaintFlags video_flags;
@@ -605,10 +618,11 @@ void PaintCanvasVideoRenderer::Paint(
video_flags.setBlendMode(flags.getBlendMode());
video_flags.setFilterQuality(flags.getFilterQuality());
- const bool need_rotation = video_rotation != VIDEO_ROTATION_0;
+ const bool need_rotation = video_transformation.rotation != VIDEO_ROTATION_0;
const bool need_scaling =
dest_rect.size() != gfx::SizeF(last_image_.width(), last_image_.height());
const bool need_translation = !dest_rect.origin().IsOrigin();
+ // TODO(tmathmeyer): apply horizontal / vertical mirroring if needed.
bool need_transform = need_rotation || need_scaling || need_translation;
if (need_transform) {
canvas->save();
@@ -616,7 +630,7 @@ void PaintCanvasVideoRenderer::Paint(
SkFloatToScalar(dest_rect.x() + (dest_rect.width() * 0.5f)),
SkFloatToScalar(dest_rect.y() + (dest_rect.height() * 0.5f)));
SkScalar angle = SkFloatToScalar(0.0f);
- switch (video_rotation) {
+ switch (video_transformation.rotation) {
case VIDEO_ROTATION_0:
break;
case VIDEO_ROTATION_90:
@@ -632,8 +646,8 @@ void PaintCanvasVideoRenderer::Paint(
canvas->rotate(angle);
gfx::SizeF rotated_dest_size = dest_rect.size();
- if (video_rotation == VIDEO_ROTATION_90 ||
- video_rotation == VIDEO_ROTATION_270) {
+ if (video_transformation.rotation == VIDEO_ROTATION_90 ||
+ video_transformation.rotation == VIDEO_ROTATION_270) {
rotated_dest_size =
gfx::SizeF(rotated_dest_size.height(), rotated_dest_size.width());
}
@@ -669,21 +683,26 @@ void PaintCanvasVideoRenderer::Paint(
if (video_frame->HasTextures()) {
// Synchronize |video_frame| with the read operations in UpdateLastImage(),
// which are triggered by canvas->flush().
- SynchronizeVideoFrameRead(video_frame, gl, context_support);
+ SynchronizeVideoFrameRead(std::move(video_frame),
+ context_provider->ContextGL(),
+ context_provider->ContextSupport());
}
+ // Because we are not retaining a reference to the VideoFrame, it would be
+ // invalid for last_image_ to directly wrap its texture(s), as they will be
+ // recycled.
+ DCHECK(!last_image_wraps_video_frame_texture_);
}
-void PaintCanvasVideoRenderer::Copy(
- const scoped_refptr<VideoFrame>& video_frame,
- cc::PaintCanvas* canvas,
- const Context3D& context_3d,
- gpu::ContextSupport* context_support) {
+void PaintCanvasVideoRenderer::Copy(scoped_refptr<VideoFrame> video_frame,
+ cc::PaintCanvas* canvas,
+ viz::ContextProvider* context_provider) {
cc::PaintFlags flags;
flags.setBlendMode(SkBlendMode::kSrc);
flags.setFilterQuality(kLow_SkFilterQuality);
- Paint(video_frame, canvas,
- gfx::RectF(gfx::SizeF(video_frame->visible_rect().size())), flags,
- media::VIDEO_ROTATION_0, context_3d, context_support);
+
+ auto dest_rect = gfx::RectF(gfx::SizeF(video_frame->visible_rect().size()));
+ Paint(std::move(video_frame), canvas, dest_rect, flags,
+ media::kNoTransformation, context_provider);
}
namespace {
@@ -1080,10 +1099,9 @@ void PaintCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
}
bool PaintCanvasVideoRenderer::CopyVideoFrameTexturesToGLTexture(
- const Context3D& context_3d,
- gpu::ContextSupport* context_support,
+ viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* destination_gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
unsigned int target,
unsigned int texture,
unsigned int internal_format,
@@ -1098,10 +1116,15 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameTexturesToGLTexture(
if (video_frame->NumTextures() > 1 ||
video_frame->metadata()->IsTrue(
VideoFrameMetadata::READ_LOCK_FENCES_ENABLED)) {
- if (!context_3d.gr_context)
+ if (!context_provider)
+ return false;
+ GrContext* gr_context = context_provider->GrContext();
+ if (!gr_context)
return false;
- if (!UpdateLastImage(video_frame, context_3d))
+ if (!UpdateLastImage(video_frame, context_provider,
+ true /* allow_wrap_texture */)) {
return false;
+ }
GrBackendTexture backend_texture =
last_image_.GetSkImage()->getBackendTexture(true);
@@ -1111,11 +1134,7 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameTexturesToGLTexture(
if (!backend_texture.getGLTextureInfo(&texture_info))
return false;
- // Synchronize |video_frame| with the read operations in UpdateLastImage(),
- // which are triggered by getBackendTexture().
- gpu::gles2::GLES2Interface* canvas_gl = context_3d.gl;
- SynchronizeVideoFrameRead(video_frame, canvas_gl, context_support);
-
+ gpu::gles2::GLES2Interface* canvas_gl = context_provider->ContextGL();
gpu::MailboxHolder mailbox_holder;
mailbox_holder.texture_target = texture_info.fTarget;
canvas_gl->ProduceTextureDirectCHROMIUM(texture_info.fID,
@@ -1143,20 +1162,33 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameTexturesToGLTexture(
gpu::SyncToken dest_sync_token;
destination_gl->GenUnverifiedSyncTokenCHROMIUM(dest_sync_token.GetData());
canvas_gl->WaitSyncTokenCHROMIUM(dest_sync_token.GetConstData());
+
+ // Because we are not retaining a reference to the VideoFrame, it would be
+ // invalid to keep last_image_ around if it directly wraps the VideoFrame
+ // texture(s), as they will be recycled.
+ if (last_image_wraps_video_frame_texture_)
+ ResetCache();
+
+ // Synchronize |video_frame| with the read operations in UpdateLastImage(),
+ // which are triggered by getBackendTexture() or CopyTextureCHROMIUM (in the
+ // case last_image_ was referencing its texture(s) directly).
+ SynchronizeVideoFrameRead(std::move(video_frame), canvas_gl,
+ context_provider->ContextSupport());
} else {
CopyVideoFrameSingleTextureToGLTexture(
destination_gl, video_frame.get(), target, texture, internal_format,
format, type, level, premultiply_alpha, flip_y);
- SynchronizeVideoFrameRead(video_frame, destination_gl, nullptr);
+ SynchronizeVideoFrameRead(std::move(video_frame), destination_gl, nullptr);
}
+ DCHECK(!last_image_wraps_video_frame_texture_);
return true;
}
bool PaintCanvasVideoRenderer::PrepareVideoFrameForWebGL(
- const Context3D& context_3d,
+ viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* destination_gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
unsigned int target,
unsigned int texture) {
DCHECK(thread_checker_.CalledOnValidThread());
@@ -1173,7 +1205,7 @@ bool PaintCanvasVideoRenderer::PrepareVideoFrameForWebGL(
return false;
}
- if (!context_3d.gr_context)
+ if (!context_provider || !context_provider->GrContext())
return false;
// Take webgl video texture as 2D texture. Setting it as external render
@@ -1184,7 +1216,7 @@ bool PaintCanvasVideoRenderer::PrepareVideoFrameForWebGL(
video_frame->coded_size().height(), 0, GL_RGBA,
GL_UNSIGNED_BYTE, nullptr);
- gpu::gles2::GLES2Interface* source_gl = context_3d.gl;
+ gpu::gles2::GLES2Interface* source_gl = context_provider->ContextGL();
gpu::MailboxHolder mailbox_holder;
mailbox_holder.texture_target = target;
destination_gl->ProduceTextureDirectCHROMIUM(texture,
@@ -1198,7 +1230,8 @@ bool PaintCanvasVideoRenderer::PrepareVideoFrameForWebGL(
uint32_t shared_texture =
source_gl->CreateAndConsumeTextureCHROMIUM(mailbox_holder.mailbox.name);
- if (!PrepareVideoFrame(video_frame, context_3d, target, shared_texture)) {
+ if (!PrepareVideoFrame(video_frame, context_provider, target,
+ shared_texture)) {
return false;
}
@@ -1223,13 +1256,14 @@ bool PaintCanvasVideoRenderer::PrepareVideoFrameForWebGL(
SyncTokenClientImpl client(source_gl);
video_frame->UpdateReleaseSyncToken(&client);
+ DCHECK(!last_image_wraps_video_frame_texture_);
return true;
}
bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
- const Context3D& context_3d,
+ viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* destination_gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ const VideoFrame& video_frame,
unsigned int target,
unsigned int texture,
unsigned int internal_format,
@@ -1238,21 +1272,23 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
int level,
bool premultiply_alpha,
bool flip_y) {
- if (!context_3d.gr_context) {
+ DCHECK(context_provider);
+ GrContext* gr_context = context_provider->GrContext();
+ if (!gr_context) {
return false;
}
- if (!video_frame || !video_frame->IsMappable()) {
+ if (!video_frame.IsMappable()) {
return false;
}
- if (video_frame->format() != media::PIXEL_FORMAT_I420) {
+ if (video_frame.format() != media::PIXEL_FORMAT_I420) {
return false;
}
// Could handle NV12 here as well. See NewSkImageFromVideoFrameYUVTextures.
static constexpr size_t kNumPlanes = 3;
- DCHECK_EQ(video_frame->NumPlanes(video_frame->format()), kNumPlanes);
+ DCHECK_EQ(video_frame.NumPlanes(video_frame.format()), kNumPlanes);
// Y,U,V GPU-side SkImages. (These must outlive the yuv_textures).
sk_sp<SkImage> yuv_images[kNumPlanes]{};
// Y,U,V GPU textures from those SkImages.
@@ -1260,13 +1296,13 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
GrBackendTexture yuv_textures[kNumPlanes]{};
// Upload the whole coded image area (not visible rect).
- gfx::Size y_tex_size = video_frame->coded_size();
+ gfx::Size y_tex_size = video_frame.coded_size();
gfx::Size uv_tex_size((y_tex_size.width() + 1) / 2,
(y_tex_size.height() + 1) / 2);
for (size_t plane = 0; plane < kNumPlanes; ++plane) {
- const uint8_t* data = video_frame->data(plane);
- int plane_stride = video_frame->stride(plane);
+ const uint8_t* data = video_frame.data(plane);
+ int plane_stride = video_frame.stride(plane);
bool is_y_plane = plane == media::VideoFrame::kYPlane;
gfx::Size tex_size = is_y_plane ? y_tex_size : uv_tex_size;
@@ -1284,8 +1320,7 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
// Upload the CPU-side SkImage into a GPU-side SkImage.
// (Note the original video_frame data is no longer used after this point.)
- yuv_images[plane] =
- plane_image_cpu->makeTextureImage(context_3d.gr_context, nullptr);
+ yuv_images[plane] = plane_image_cpu->makeTextureImage(gr_context, nullptr);
DCHECK(yuv_images[plane]);
// Extract the backend texture from the GPU-side image.
@@ -1294,8 +1329,7 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
// Decode 3 GPU-side Y,U,V SkImages into a GPU-side RGB SkImage.
sk_sp<SkImage> yuv_image = YUVGrBackendTexturesToSkImage(
- context_3d.gr_context, video_frame->ColorSpace(), video_frame->format(),
- yuv_textures);
+ gr_context, video_frame.ColorSpace(), video_frame.format(), yuv_textures);
if (!yuv_image) {
return false;
}
@@ -1303,7 +1337,7 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
GrGLTextureInfo src_texture_info{};
yuv_image->getBackendTexture(true).getGLTextureInfo(&src_texture_info);
- gpu::gles2::GLES2Interface* source_gl = context_3d.gl;
+ gpu::gles2::GLES2Interface* source_gl = context_provider->ContextGL();
gpu::MailboxHolder mailbox_holder;
mailbox_holder.texture_target = src_texture_info.fTarget;
source_gl->ProduceTextureDirectCHROMIUM(src_texture_info.fID,
@@ -1320,7 +1354,7 @@ bool PaintCanvasVideoRenderer::CopyVideoFrameYUVDataToGLTexture(
destination_gl->CreateAndConsumeTextureCHROMIUM(
mailbox_holder.mailbox.name);
VideoFrameCopyTextureOrSubTexture(
- destination_gl, video_frame->coded_size(), video_frame->visible_rect(),
+ destination_gl, video_frame.coded_size(), video_frame.visible_rect(),
intermediate_texture, target, texture, internal_format, format, type,
level, premultiply_alpha, flip_y);
destination_gl->DeleteTextures(1, &intermediate_texture);
@@ -1416,11 +1450,14 @@ void PaintCanvasVideoRenderer::ResetCache() {
// Clear cached values.
last_image_ = cc::PaintImage();
last_id_.reset();
+ last_image_wraps_video_frame_texture_ = false;
}
bool PaintCanvasVideoRenderer::UpdateLastImage(
- const scoped_refptr<VideoFrame>& video_frame,
- const Context3D& context_3d) {
+ scoped_refptr<VideoFrame> video_frame,
+ viz::ContextProvider* context_provider,
+ bool allow_wrap_texture) {
+ DCHECK(!last_image_wraps_video_frame_texture_);
if (!last_image_ || video_frame->unique_id() != last_id_ ||
!last_image_.GetSkImage()->getBackendTexture(true).isValid()) {
ResetCache();
@@ -1437,15 +1474,18 @@ bool PaintCanvasVideoRenderer::UpdateLastImage(
// Holding |video_frame| longer than this call when using GPUVideoDecoder
// could cause problems since the pool of VideoFrames has a fixed size.
if (video_frame->HasTextures()) {
- DCHECK(context_3d.gr_context);
- DCHECK(context_3d.gl);
+ DCHECK(context_provider);
+ DCHECK(context_provider->GrContext());
+ DCHECK(context_provider->ContextGL());
if (video_frame->NumTextures() > 1) {
- paint_image_builder.set_image(
- NewSkImageFromVideoFrameYUVTextures(video_frame.get(), context_3d),
- cc::PaintImage::GetNextContentId());
+ paint_image_builder.set_image(NewSkImageFromVideoFrameYUVTextures(
+ video_frame.get(), context_provider),
+ cc::PaintImage::GetNextContentId());
} else {
paint_image_builder.set_image(
- NewSkImageFromVideoFrameNative(video_frame.get(), context_3d),
+ NewSkImageFromVideoFrameNative(
+ video_frame.get(), context_provider, allow_wrap_texture,
+ &last_image_wraps_video_frame_texture_),
cc::PaintImage::GetNextContentId());
}
} else {
@@ -1465,10 +1505,13 @@ bool PaintCanvasVideoRenderer::UpdateLastImage(
}
bool PaintCanvasVideoRenderer::PrepareVideoFrame(
- const scoped_refptr<VideoFrame>& video_frame,
- const Context3D& context_3d,
+ scoped_refptr<VideoFrame> video_frame,
+ viz::ContextProvider* context_provider,
unsigned int textureTarget,
unsigned int texture) {
+ // We are about to replace last_image_, make sure to reset related state such
+ // as last_id_ or last_image_wraps_video_frame_texture_.
+ ResetCache();
auto paint_image_builder =
cc::PaintImageBuilder::WithDefault()
.set_id(renderer_stable_id_)
@@ -1481,12 +1524,13 @@ bool PaintCanvasVideoRenderer::PrepareVideoFrame(
// Holding |video_frame| longer than this call when using GPUVideoDecoder
// could cause problems since the pool of VideoFrames has a fixed size.
if (video_frame->HasTextures()) {
- DCHECK(context_3d.gr_context);
- DCHECK(context_3d.gl);
+ DCHECK(context_provider);
+ DCHECK(context_provider->GrContext());
+ DCHECK(context_provider->ContextGL());
if (video_frame->NumTextures() > 1) {
paint_image_builder.set_image(
NewSkImageFromVideoFrameYUVTexturesWithExternalBackend(
- video_frame.get(), context_3d, textureTarget, texture),
+ video_frame.get(), context_provider, textureTarget, texture),
cc::PaintImage::GetNextContentId());
} else {
// We don't support Android now.
diff --git a/chromium/media/renderers/paint_canvas_video_renderer.h b/chromium/media/renderers/paint_canvas_video_renderer.h
index 795a992ca15..5b483c9fda0 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer.h
+++ b/chromium/media/renderers/paint_canvas_video_renderer.h
@@ -20,8 +20,7 @@
#include "media/base/media_export.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
-#include "media/base/video_rotation.h"
-#include "media/filters/context_3d.h"
+#include "media/base/video_transformation.h"
namespace gfx {
class RectF;
@@ -29,7 +28,14 @@ class RectF;
namespace gpu {
struct Capabilities;
-class ContextSupport;
+
+namespace gles2 {
+class GLES2Interface;
+}
+} // namespace gpu
+
+namespace viz {
+class ContextProvider;
}
namespace media {
@@ -47,22 +53,20 @@ class MEDIA_EXPORT PaintCanvasVideoRenderer {
//
// If |video_frame| is nullptr or an unsupported format, |dest_rect| will be
// painted black.
- void Paint(const scoped_refptr<VideoFrame>& video_frame,
+ void Paint(scoped_refptr<VideoFrame> video_frame,
cc::PaintCanvas* canvas,
const gfx::RectF& dest_rect,
cc::PaintFlags& flags,
- VideoRotation video_rotation,
- const Context3D& context_3d,
- gpu::ContextSupport* context_support);
+ VideoTransformation video_transformation,
+ viz::ContextProvider* context_provider);
// Paints |video_frame| scaled to its visible size on |canvas|.
//
// If the format of |video_frame| is PIXEL_FORMAT_NATIVE_TEXTURE, |context_3d|
// and |context_support| must be provided.
- void Copy(const scoped_refptr<VideoFrame>& video_frame,
+ void Copy(scoped_refptr<VideoFrame> video_frame,
cc::PaintCanvas* canvas,
- const Context3D& context_3d,
- gpu::ContextSupport* context_support);
+ viz::ContextProvider* context_provider);
// Convert the contents of |video_frame| to raw RGB pixels. |rgb_pixels|
// should point into a buffer large enough to hold as many 32 bit RGBA pixels
@@ -91,10 +95,9 @@ class MEDIA_EXPORT PaintCanvasVideoRenderer {
//
// The format of |video_frame| must be VideoFrame::NATIVE_TEXTURE.
bool CopyVideoFrameTexturesToGLTexture(
- const Context3D& context_3d,
- gpu::ContextSupport* context_support,
+ viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* destination_gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
unsigned int target,
unsigned int texture,
unsigned int internal_format,
@@ -104,9 +107,9 @@ class MEDIA_EXPORT PaintCanvasVideoRenderer {
bool premultiply_alpha,
bool flip_y);
- bool PrepareVideoFrameForWebGL(const Context3D& context_3d,
+ bool PrepareVideoFrameForWebGL(viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
unsigned int target,
unsigned int texture);
@@ -118,9 +121,9 @@ class MEDIA_EXPORT PaintCanvasVideoRenderer {
// CorrectLastImageDimensions() ensures that the source texture will be
// cropped to |visible_rect|. Returns true on success.
bool CopyVideoFrameYUVDataToGLTexture(
- const Context3D& context_3d,
+ viz::ContextProvider* context_provider,
gpu::gles2::GLES2Interface* destination_gl,
- const scoped_refptr<VideoFrame>& video_frame,
+ const VideoFrame& video_frame,
unsigned int target,
unsigned int texture,
unsigned int internal_format,
@@ -182,19 +185,24 @@ class MEDIA_EXPORT PaintCanvasVideoRenderer {
private:
// Update the cache holding the most-recently-painted frame. Returns false
// if the image couldn't be updated.
- bool UpdateLastImage(const scoped_refptr<VideoFrame>& video_frame,
- const Context3D& context_3d);
+ bool UpdateLastImage(scoped_refptr<VideoFrame> video_frame,
+ viz::ContextProvider* context_provider,
+ bool allow_wrap_texture);
void CorrectLastImageDimensions(const SkIRect& visible_rect);
- bool PrepareVideoFrame(const scoped_refptr<VideoFrame>& video_frame,
- const Context3D& context_3d,
+ bool PrepareVideoFrame(scoped_refptr<VideoFrame> video_frame,
+ viz::ContextProvider* context_provider,
unsigned int textureTarget,
unsigned int texture);
// Last image used to draw to the canvas.
cc::PaintImage last_image_;
+ // last_image_ directly wraps a texture from a VideoFrame, in which case we
+ // need to synchronize access before releasing the VideoFrame.
+ bool last_image_wraps_video_frame_texture_ = false;
+
// VideoFrame::unique_id() of the videoframe used to generate |last_image_|.
base::Optional<int> last_id_;
diff --git a/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc b/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
index 5e7c448a269..991c8647359 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
+++ b/chromium/media/renderers/paint_canvas_video_renderer_unittest.cc
@@ -11,9 +11,12 @@
#include "base/message_loop/message_loop.h"
#include "cc/paint/paint_flags.h"
#include "cc/paint/skia_paint_canvas.h"
+#include "components/viz/common/gpu/context_provider.h"
+#include "components/viz/test/test_context_provider.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/gles2_interface_stub.h"
#include "gpu/command_buffer/common/capabilities.h"
+#include "gpu/config/gpu_feature_info.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
@@ -76,18 +79,17 @@ class PaintCanvasVideoRendererTest : public testing::Test {
// Paints the |video_frame| to the |canvas| using |renderer_|, setting the
// color of |video_frame| to |color| first.
- void Paint(const scoped_refptr<VideoFrame>& video_frame,
+ void Paint(scoped_refptr<VideoFrame> video_frame,
cc::PaintCanvas* canvas,
Color color);
- void PaintRotated(const scoped_refptr<VideoFrame>& video_frame,
+ void PaintRotated(scoped_refptr<VideoFrame> video_frame,
cc::PaintCanvas* canvas,
const gfx::RectF& dest_rect,
Color color,
SkBlendMode mode,
- VideoRotation video_rotation);
+ VideoTransformation video_transformation);
- void Copy(const scoped_refptr<VideoFrame>& video_frame,
- cc::PaintCanvas* canvas);
+ void Copy(scoped_refptr<VideoFrame> video_frame, cc::PaintCanvas* canvas);
// Getters for various frame sizes.
scoped_refptr<VideoFrame> natural_frame() { return natural_frame_; }
@@ -224,25 +226,24 @@ PaintCanvasVideoRendererTest::~PaintCanvasVideoRendererTest() = default;
void PaintCanvasVideoRendererTest::PaintWithoutFrame(cc::PaintCanvas* canvas) {
cc::PaintFlags flags;
flags.setFilterQuality(kLow_SkFilterQuality);
- renderer_.Paint(nullptr, canvas, kNaturalRect, flags, VIDEO_ROTATION_0,
- Context3D(), nullptr);
+ renderer_.Paint(nullptr, canvas, kNaturalRect, flags, kNoTransformation,
+ nullptr);
}
-void PaintCanvasVideoRendererTest::Paint(
- const scoped_refptr<VideoFrame>& video_frame,
- cc::PaintCanvas* canvas,
- Color color) {
- PaintRotated(video_frame, canvas, kNaturalRect, color, SkBlendMode::kSrcOver,
- VIDEO_ROTATION_0);
+void PaintCanvasVideoRendererTest::Paint(scoped_refptr<VideoFrame> video_frame,
+ cc::PaintCanvas* canvas,
+ Color color) {
+ PaintRotated(std::move(video_frame), canvas, kNaturalRect, color,
+ SkBlendMode::kSrcOver, kNoTransformation);
}
void PaintCanvasVideoRendererTest::PaintRotated(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
cc::PaintCanvas* canvas,
const gfx::RectF& dest_rect,
Color color,
SkBlendMode mode,
- VideoRotation video_rotation) {
+ VideoTransformation video_transformation) {
switch (color) {
case kNone:
break;
@@ -259,14 +260,13 @@ void PaintCanvasVideoRendererTest::PaintRotated(
cc::PaintFlags flags;
flags.setBlendMode(mode);
flags.setFilterQuality(kLow_SkFilterQuality);
- renderer_.Paint(video_frame, canvas, dest_rect, flags, video_rotation,
- Context3D(), nullptr);
+ renderer_.Paint(std::move(video_frame), canvas, dest_rect, flags,
+ video_transformation, nullptr);
}
-void PaintCanvasVideoRendererTest::Copy(
- const scoped_refptr<VideoFrame>& video_frame,
- cc::PaintCanvas* canvas) {
- renderer_.Copy(video_frame, canvas, Context3D(), nullptr);
+void PaintCanvasVideoRendererTest::Copy(scoped_refptr<VideoFrame> video_frame,
+ cc::PaintCanvas* canvas) {
+ renderer_.Copy(std::move(video_frame), canvas, nullptr);
}
TEST_F(PaintCanvasVideoRendererTest, NoFrame) {
@@ -281,7 +281,7 @@ TEST_F(PaintCanvasVideoRendererTest, TransparentFrame) {
PaintRotated(
VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)).get(),
target_canvas(), kNaturalRect, kNone, SkBlendMode::kSrcOver,
- VIDEO_ROTATION_0);
+ kNoTransformation);
EXPECT_EQ(static_cast<SkColor>(SK_ColorRED), bitmap()->getColor(0, 0));
}
@@ -291,7 +291,7 @@ TEST_F(PaintCanvasVideoRendererTest, TransparentFrameSrcMode) {
PaintRotated(
VideoFrame::CreateTransparentFrame(gfx::Size(kWidth, kHeight)).get(),
target_canvas(), kNaturalRect, kNone, SkBlendMode::kSrc,
- VIDEO_ROTATION_0);
+ kNoTransformation);
EXPECT_EQ(static_cast<SkColor>(SK_ColorTRANSPARENT),
bitmap()->getColor(0, 0));
}
@@ -383,7 +383,7 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Rotation_90) {
SkBitmap bitmap = AllocBitmap(kWidth, kHeight);
cc::SkiaPaintCanvas canvas(bitmap);
PaintRotated(cropped_frame(), &canvas, kNaturalRect, kNone,
- SkBlendMode::kSrcOver, VIDEO_ROTATION_90);
+ SkBlendMode::kSrcOver, VideoTransformation(VIDEO_ROTATION_90));
// Check the corners.
EXPECT_EQ(SK_ColorGREEN, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorBLACK, bitmap.getColor(kWidth - 1, 0));
@@ -395,7 +395,7 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Rotation_180) {
SkBitmap bitmap = AllocBitmap(kWidth, kHeight);
cc::SkiaPaintCanvas canvas(bitmap);
PaintRotated(cropped_frame(), &canvas, kNaturalRect, kNone,
- SkBlendMode::kSrcOver, VIDEO_ROTATION_180);
+ SkBlendMode::kSrcOver, VideoTransformation(VIDEO_ROTATION_180));
// Check the corners.
EXPECT_EQ(SK_ColorBLUE, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorGREEN, bitmap.getColor(kWidth - 1, 0));
@@ -407,7 +407,7 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Rotation_270) {
SkBitmap bitmap = AllocBitmap(kWidth, kHeight);
cc::SkiaPaintCanvas canvas(bitmap);
PaintRotated(cropped_frame(), &canvas, kNaturalRect, kNone,
- SkBlendMode::kSrcOver, VIDEO_ROTATION_270);
+ SkBlendMode::kSrcOver, VideoTransformation(VIDEO_ROTATION_270));
// Check the corners.
EXPECT_EQ(SK_ColorRED, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorBLUE, bitmap.getColor(kWidth - 1, 0));
@@ -422,7 +422,7 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Translate) {
PaintRotated(cropped_frame(), &canvas,
gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone, SkBlendMode::kSrcOver, VIDEO_ROTATION_0);
+ kNone, SkBlendMode::kSrcOver, kNoTransformation);
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor((kWidth / 2) - 1, 0));
@@ -442,7 +442,8 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Translate_Rotation_90) {
PaintRotated(cropped_frame(), &canvas,
gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone, SkBlendMode::kSrcOver, VIDEO_ROTATION_90);
+ kNone, SkBlendMode::kSrcOver,
+ VideoTransformation(VIDEO_ROTATION_90));
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor((kWidth / 2) - 1, 0));
@@ -462,7 +463,8 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Translate_Rotation_180) {
PaintRotated(cropped_frame(), &canvas,
gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone, SkBlendMode::kSrcOver, VIDEO_ROTATION_180);
+ kNone, SkBlendMode::kSrcOver,
+ VideoTransformation(VIDEO_ROTATION_180));
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor((kWidth / 2) - 1, 0));
@@ -482,7 +484,8 @@ TEST_F(PaintCanvasVideoRendererTest, Video_Translate_Rotation_270) {
PaintRotated(cropped_frame(), &canvas,
gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone, SkBlendMode::kSrcOver, VIDEO_ROTATION_270);
+ kNone, SkBlendMode::kSrcOver,
+ VideoTransformation(VIDEO_ROTATION_270));
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor(0, 0));
EXPECT_EQ(SK_ColorMAGENTA, bitmap.getColor((kWidth / 2) - 1, 0));
@@ -555,16 +558,16 @@ TEST_F(PaintCanvasVideoRendererTest, Y16) {
static_cast<unsigned char*>(base::AlignedAlloc(
byte_size, media::VideoFrame::kFrameAddressAlignment)));
const gfx::Rect rect(offset_x, offset_y, bitmap.width(), bitmap.height());
- scoped_refptr<media::VideoFrame> video_frame =
+ auto video_frame =
CreateTestY16Frame(gfx::Size(stride, offset_y + bitmap.height()), rect,
memory.get(), cropped_frame()->timestamp());
cc::SkiaPaintCanvas canvas(bitmap);
cc::PaintFlags flags;
flags.setFilterQuality(kNone_SkFilterQuality);
- renderer_.Paint(video_frame, &canvas,
+ renderer_.Paint(std::move(video_frame), &canvas,
gfx::RectF(bitmap.width(), bitmap.height()), flags,
- VIDEO_ROTATION_0, Context3D(), nullptr);
+ kNoTransformation, nullptr);
for (int j = 0; j < bitmap.height(); j++) {
for (int i = 0; i < bitmap.width(); i++) {
const int value = i + j * bitmap.width();
@@ -633,21 +636,19 @@ class TestGLES2Interface : public gpu::gles2::GLES2InterfaceStub {
const void* pixels)>
texsubimage2d_callback_;
};
+
void MailboxHoldersReleased(const gpu::SyncToken& sync_token) {}
} // namespace
// Test that PaintCanvasVideoRenderer::Paint doesn't crash when GrContext is
-// unable to wrap a video frame texture (eg due to being abandoned). The mock
-// GrContext will fail to wrap the texture even if it is not abandoned, but we
-// leave the abandonContext call in place, in case that behavior changes.
+// unable to wrap a video frame texture (eg due to being abandoned).
TEST_F(PaintCanvasVideoRendererTest, ContextLost) {
- sk_sp<GrContext> gr_context = GrContext::MakeMock(nullptr);
- gr_context->abandonContext();
+ auto context_provider = viz::TestContextProvider::Create();
+ context_provider->BindToCurrentThread();
+ context_provider->GrContext()->abandonContext();
cc::SkiaPaintCanvas canvas(AllocBitmap(kWidth, kHeight));
- TestGLES2Interface gles2;
- Context3D context_3d(&gles2, gr_context.get());
gfx::Size size(kWidth, kHeight);
gpu::MailboxHolder holders[VideoFrame::kMaxPlanes] = {gpu::MailboxHolder(
gpu::Mailbox::Generate(), gpu::SyncToken(), GL_TEXTURE_RECTANGLE_ARB)};
@@ -657,8 +658,8 @@ TEST_F(PaintCanvasVideoRendererTest, ContextLost) {
cc::PaintFlags flags;
flags.setFilterQuality(kLow_SkFilterQuality);
- renderer_.Paint(video_frame, &canvas, kNaturalRect, flags, VIDEO_ROTATION_90,
- context_3d, nullptr);
+ renderer_.Paint(std::move(video_frame), &canvas, kNaturalRect, flags,
+ kNoTransformation, context_provider.get());
}
void EmptyCallback(const gpu::SyncToken& sync_token) {}
@@ -682,8 +683,8 @@ TEST_F(PaintCanvasVideoRendererTest, CorrectFrameSizeToVisibleRect) {
gfx::RectF visible_rect(visible_size.width(), visible_size.height());
cc::PaintFlags flags;
- renderer_.Paint(video_frame, &canvas, visible_rect, flags, VIDEO_ROTATION_0,
- Context3D(), nullptr);
+ renderer_.Paint(std::move(video_frame), &canvas, visible_rect, flags,
+ kNoTransformation, nullptr);
EXPECT_EQ(fWidth / 2, renderer_.LastImageDimensionsForTesting().width());
EXPECT_EQ(fWidth / 2, renderer_.LastImageDimensionsForTesting().height());
@@ -702,7 +703,7 @@ TEST_F(PaintCanvasVideoRendererTest, TexImage2D_Y16_RGBA32F) {
static_cast<unsigned char*>(base::AlignedAlloc(
byte_size, media::VideoFrame::kFrameAddressAlignment)));
const gfx::Rect rect(offset_x, offset_y, width, height);
- scoped_refptr<media::VideoFrame> video_frame =
+ auto video_frame =
CreateTestY16Frame(gfx::Size(stride, offset_y + height), rect,
memory.get(), cropped_frame()->timestamp());
@@ -750,7 +751,7 @@ TEST_F(PaintCanvasVideoRendererTest, TexSubImage2D_Y16_R32F) {
static_cast<unsigned char*>(base::AlignedAlloc(
byte_size, media::VideoFrame::kFrameAddressAlignment)));
const gfx::Rect rect(offset_x, offset_y, width, height);
- scoped_refptr<media::VideoFrame> video_frame =
+ auto video_frame =
CreateTestY16Frame(gfx::Size(stride, offset_y + height), rect,
memory.get(), cropped_frame()->timestamp());
diff --git a/chromium/media/renderers/video_renderer_impl.cc b/chromium/media/renderers/video_renderer_impl.cc
index 8339e9d8782..c4c79da0c1d 100644
--- a/chromium/media/renderers/video_renderer_impl.cc
+++ b/chromium/media/renderers/video_renderer_impl.cc
@@ -328,6 +328,11 @@ void VideoRendererImpl::OnFrameDropped() {
algorithm_->OnLastFrameDropped();
}
+base::TimeDelta VideoRendererImpl::GetPreferredRenderInterval() {
+ base::AutoLock auto_lock(lock_);
+ return algorithm_->average_frame_duration();
+}
+
void VideoRendererImpl::OnVideoDecoderStreamInitialized(bool success) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -472,7 +477,7 @@ void VideoRendererImpl::OnTimeStopped() {
}
void VideoRendererImpl::FrameReady(VideoDecoderStream::Status status,
- const scoped_refptr<VideoFrame>& frame) {
+ scoped_refptr<VideoFrame> frame) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kPlaying);
@@ -530,7 +535,7 @@ void VideoRendererImpl::FrameReady(VideoDecoderStream::Status status,
video_decoder_stream_->AverageDuration());
}
- AddReadyFrame_Locked(frame);
+ AddReadyFrame_Locked(std::move(frame));
}
// Attempt to purge bad frames in case of underflow or backgrounding.
@@ -627,8 +632,7 @@ void VideoRendererImpl::TransitionToHaveNothing_Locked() {
weak_factory_.GetWeakPtr(), buffering_state_));
}
-void VideoRendererImpl::AddReadyFrame_Locked(
- const scoped_refptr<VideoFrame>& frame) {
+void VideoRendererImpl::AddReadyFrame_Locked(scoped_refptr<VideoFrame> frame) {
DCHECK(task_runner_->BelongsToCurrentThread());
lock_.AssertAcquired();
DCHECK(!frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
@@ -642,7 +646,7 @@ void VideoRendererImpl::AddReadyFrame_Locked(
++stats_.video_frames_decoded_power_efficient;
}
- algorithm_->EnqueueFrame(frame);
+ algorithm_->EnqueueFrame(std::move(frame));
}
void VideoRendererImpl::AttemptRead_Locked() {
diff --git a/chromium/media/renderers/video_renderer_impl.h b/chromium/media/renderers/video_renderer_impl.h
index 852e28877e6..27a85cdf032 100644
--- a/chromium/media/renderers/video_renderer_impl.h
+++ b/chromium/media/renderers/video_renderer_impl.h
@@ -84,6 +84,7 @@ class MEDIA_EXPORT VideoRendererImpl
base::TimeTicks deadline_max,
bool background_rendering) override;
void OnFrameDropped() override;
+ base::TimeDelta GetPreferredRenderInterval() override;
private:
// Callback for |video_decoder_stream_| initialization.
@@ -106,10 +107,10 @@ class MEDIA_EXPORT VideoRendererImpl
// Callback for |video_decoder_stream_| to deliver decoded video frames and
// report video decoding status.
void FrameReady(VideoDecoderStream::Status status,
- const scoped_refptr<VideoFrame>& frame);
+ scoped_refptr<VideoFrame> frame);
// Helper method for enqueueing a frame to |alogorithm_|.
- void AddReadyFrame_Locked(const scoped_refptr<VideoFrame>& frame);
+ void AddReadyFrame_Locked(scoped_refptr<VideoFrame> frame);
// Helper method that schedules an asynchronous read from the
// |video_decoder_stream_| as long as there isn't a pending read and we have
diff --git a/chromium/media/renderers/video_renderer_impl_unittest.cc b/chromium/media/renderers/video_renderer_impl_unittest.cc
index a7cdc49eef9..dbea3303460 100644
--- a/chromium/media/renderers/video_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/video_renderer_impl_unittest.cc
@@ -41,6 +41,7 @@
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::CreateFunctor;
+using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::Mock;
using ::testing::NiceMock;
@@ -61,13 +62,13 @@ class VideoRendererImplTest : public testing::Test {
decoder_ = new NiceMock<MockVideoDecoder>();
std::vector<std::unique_ptr<VideoDecoder>> decoders;
decoders.push_back(base::WrapUnique(decoder_));
- ON_CALL(*decoder_, Initialize(_, _, _, _, _, _))
+ ON_CALL(*decoder_, Initialize_(_, _, _, _, _, _))
.WillByDefault(DoAll(SaveArg<4>(&output_cb_),
- RunCallback<3>(expect_init_success_)));
+ RunOnceCallback<3>(expect_init_success_)));
// Monitor decodes from the decoder.
- ON_CALL(*decoder_, Decode(_, _))
+ ON_CALL(*decoder_, Decode_(_, _))
.WillByDefault(Invoke(this, &VideoRendererImplTest::DecodeRequested));
- ON_CALL(*decoder_, Reset(_))
+ ON_CALL(*decoder_, Reset_(_))
.WillByDefault(Invoke(this, &VideoRendererImplTest::FlushRequested));
ON_CALL(*decoder_, GetMaxDecodeRequests()).WillByDefault(Return(1));
return decoders;
@@ -459,7 +460,7 @@ class VideoRendererImplTest : public testing::Test {
// Use StrictMock<T> to catch missing/extra callbacks.
class MockCB : public MockRendererClient {
public:
- MOCK_METHOD1(FrameReceived, void(const scoped_refptr<VideoFrame>&));
+ MOCK_METHOD1(FrameReceived, void(scoped_refptr<VideoFrame>));
};
StrictMock<MockCB> mock_cb_;
@@ -470,11 +471,11 @@ class VideoRendererImplTest : public testing::Test {
private:
void DecodeRequested(scoped_refptr<DecoderBuffer> buffer,
- const VideoDecoder::DecodeCB& decode_cb) {
+ VideoDecoder::DecodeCB& decode_cb) {
EXPECT_TRUE(
task_environment_.GetMainThreadTaskRunner()->BelongsToCurrentThread());
CHECK(!decode_cb_);
- decode_cb_ = decode_cb;
+ decode_cb_ = std::move(decode_cb);
// Wake up WaitForPendingDecode() if needed.
if (wait_for_pending_decode_cb_)
@@ -489,7 +490,7 @@ class VideoRendererImplTest : public testing::Test {
SatisfyPendingDecode();
}
- void FlushRequested(const base::Closure& callback) {
+ void FlushRequested(base::OnceClosure& callback) {
EXPECT_TRUE(
task_environment_.GetMainThreadTaskRunner()->BelongsToCurrentThread());
decode_results_.clear();
@@ -498,7 +499,8 @@ class VideoRendererImplTest : public testing::Test {
SatisfyPendingDecode();
}
- task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE, callback);
+ task_environment_.GetMainThreadTaskRunner()->PostTask(FROM_HERE,
+ std::move(callback));
}
// Used to protect |time_|.
@@ -511,7 +513,7 @@ class VideoRendererImplTest : public testing::Test {
base::TimeDelta next_frame_timestamp_;
// Run during DecodeRequested() to unblock WaitForPendingDecode().
- base::Closure wait_for_pending_decode_cb_;
+ base::OnceClosure wait_for_pending_decode_cb_;
base::circular_deque<std::pair<DecodeStatus, scoped_refptr<VideoFrame>>>
decode_results_;
@@ -885,6 +887,9 @@ TEST_F(VideoRendererImplTest, RenderingStartedThenStopped) {
// the previous call, the total should be 4 * 115200.
EXPECT_EQ(115200, last_pipeline_statistics.video_memory_usage);
+ EXPECT_EQ(renderer_->GetPreferredRenderInterval(),
+ last_pipeline_statistics.video_frame_duration_average);
+
// Consider the case that rendering is faster than we setup the test event.
// In that case, when we run out of the frames, BUFFERING_HAVE_NOTHING will
// be called. And then during SatisfyPendingDecodeWithEndOfStream,
diff --git a/chromium/media/renderers/video_resource_updater.cc b/chromium/media/renderers/video_resource_updater.cc
index 5220564a42c..000466484be 100644
--- a/chromium/media/renderers/video_resource_updater.cc
+++ b/chromium/media/renderers/video_resource_updater.cc
@@ -13,7 +13,8 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
#include "base/bit_cast.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
@@ -61,14 +62,15 @@ VideoFrameResourceType ExternalResourceTypeForHardwarePlanes(
VideoPixelFormat format,
GLuint target,
int num_textures,
- gfx::BufferFormat* buffer_format,
+ gfx::BufferFormat buffer_formats[VideoFrame::kMaxPlanes],
bool use_stream_video_draw_quad) {
- *buffer_format = gfx::BufferFormat::RGBA_8888;
switch (format) {
case PIXEL_FORMAT_ARGB:
case PIXEL_FORMAT_XRGB:
case PIXEL_FORMAT_RGB32:
case PIXEL_FORMAT_UYVY:
+ DCHECK_EQ(num_textures, 1);
+ buffer_formats[0] = gfx::BufferFormat::RGBA_8888;
switch (target) {
case GL_TEXTURE_EXTERNAL_OES:
if (use_stream_video_draw_quad)
@@ -85,16 +87,25 @@ VideoFrameResourceType ExternalResourceTypeForHardwarePlanes(
}
break;
case PIXEL_FORMAT_I420:
+ DCHECK(num_textures == 3);
+ buffer_formats[0] = gfx::BufferFormat::R_8;
+ buffer_formats[1] = gfx::BufferFormat::R_8;
+ buffer_formats[2] = gfx::BufferFormat::R_8;
return VideoFrameResourceType::YUV;
case PIXEL_FORMAT_NV12:
DCHECK(target == GL_TEXTURE_EXTERNAL_OES || target == GL_TEXTURE_2D ||
target == GL_TEXTURE_RECTANGLE_ARB)
<< "Unsupported target " << gl::GLEnums::GetStringEnum(target);
+ DCHECK(num_textures <= 2);
+
// Single plane textures can be sampled as RGB.
- if (num_textures > 1)
+ if (num_textures == 2) {
+ buffer_formats[0] = gfx::BufferFormat::R_8;
+ buffer_formats[1] = gfx::BufferFormat::RG_88;
return VideoFrameResourceType::YUV;
+ }
- *buffer_format = gfx::BufferFormat::YUV_420_BIPLANAR;
+ buffer_formats[0] = gfx::BufferFormat::YUV_420_BIPLANAR;
return VideoFrameResourceType::RGB;
case PIXEL_FORMAT_YV12:
case PIXEL_FORMAT_I422:
@@ -269,14 +280,13 @@ class VideoResourceUpdater::SoftwarePlaneResource
DCHECK(shared_bitmap_reporter_);
// Allocate SharedMemory and notify display compositor of the allocation.
- shared_memory_ = viz::bitmap_allocation::AllocateMappedBitmap(
- resource_size(), viz::ResourceFormat::RGBA_8888);
- mojo::ScopedSharedBufferHandle handle =
- viz::bitmap_allocation::DuplicateAndCloseMappedBitmap(
- shared_memory_.get(), resource_size(),
- viz::ResourceFormat::RGBA_8888);
- shared_bitmap_reporter_->DidAllocateSharedBitmap(std::move(handle),
- shared_bitmap_id_);
+ base::MappedReadOnlyRegion shm =
+ viz::bitmap_allocation::AllocateSharedBitmap(
+ resource_size(), viz::ResourceFormat::RGBA_8888);
+ shared_mapping_ = std::move(shm.mapping);
+ shared_bitmap_reporter_->DidAllocateSharedBitmap(
+ viz::bitmap_allocation::ToMojoHandle(std::move(shm.region)),
+ shared_bitmap_id_);
}
~SoftwarePlaneResource() override {
shared_bitmap_reporter_->DidDeleteSharedBitmap(shared_bitmap_id_);
@@ -285,17 +295,17 @@ class VideoResourceUpdater::SoftwarePlaneResource
const viz::SharedBitmapId& shared_bitmap_id() const {
return shared_bitmap_id_;
}
- void* pixels() { return shared_memory_->memory(); }
+ void* pixels() { return shared_mapping_.memory(); }
// Returns a memory dump GUID consistent across processes.
base::UnguessableToken GetSharedMemoryGuid() const {
- return shared_memory_->mapped_id();
+ return shared_mapping_.guid();
}
private:
viz::SharedBitmapReporter* const shared_bitmap_reporter_;
const viz::SharedBitmapId shared_bitmap_id_;
- std::unique_ptr<base::SharedMemory> shared_memory_;
+ base::WritableSharedMemoryMapping shared_mapping_;
DISALLOW_COPY_AND_ASSIGN(SoftwarePlaneResource);
};
@@ -629,7 +639,7 @@ void VideoResourceUpdater::AppendQuads(viz::RenderPass* render_pass,
stream_video_quad->SetNew(shared_quad_state, quad_rect, visible_quad_rect,
needs_blending, frame_resources_[0].id,
frame_resources_[0].size_in_pixels, uv_top_left,
- uv_bottom_right);
+ uv_bottom_right, frame->ycbcr_info());
for (viz::ResourceId resource_id : stream_video_quad->resources) {
resource_provider_->ValidateResource(resource_id);
}
@@ -772,7 +782,8 @@ void VideoResourceUpdater::CopyHardwarePlane(
gpu::SyncToken sync_token = video_frame->UpdateReleaseSyncToken(&client);
auto transferable_resource = viz::TransferableResource::MakeGL(
- hardware_resource->mailbox(), GL_LINEAR, GL_TEXTURE_2D, sync_token);
+ hardware_resource->mailbox(), GL_LINEAR, GL_TEXTURE_2D, sync_token,
+ output_plane_resource_size, false /* is_overlay_candidate */);
transferable_resource.color_space = resource_color_space;
transferable_resource.format = copy_resource_format;
external_resources->resources.push_back(std::move(transferable_resource));
@@ -800,9 +811,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
if (copy_required)
target = GL_TEXTURE_2D;
- gfx::BufferFormat buffer_format;
+ gfx::BufferFormat buffer_formats[VideoFrame::kMaxPlanes];
external_resources.type = ExternalResourceTypeForHardwarePlanes(
- video_frame->format(), target, video_frame->NumTextures(), &buffer_format,
+ video_frame->format(), target, video_frame->NumTextures(), buffer_formats,
use_stream_video_draw_quad_);
if (external_resources.type == VideoFrameResourceType::NONE) {
@@ -832,7 +843,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
const size_t height =
VideoFrame::Rows(i, video_frame->format(), coded_size.height());
const gfx::Size plane_size(width, height);
- auto transfer_resource = viz::TransferableResource::MakeGLOverlay(
+ auto transfer_resource = viz::TransferableResource::MakeGL(
mailbox_holder.mailbox, GL_LINEAR, mailbox_holder.texture_target,
mailbox_holder.sync_token, plane_size,
video_frame->metadata()->IsTrue(VideoFrameMetadata::ALLOW_OVERLAY));
@@ -840,7 +851,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForHardwarePlanes(
transfer_resource.read_lock_fences_enabled =
video_frame->metadata()->IsTrue(
VideoFrameMetadata::READ_LOCK_FENCES_ENABLED);
- transfer_resource.format = viz::GetResourceFormat(buffer_format);
+ transfer_resource.format = viz::GetResourceFormat(buffer_formats[i]);
#if defined(OS_ANDROID)
transfer_resource.is_backed_by_surface_texture =
@@ -972,7 +983,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
// This is software path, so canvas and video_frame are always backed
// by software.
- video_renderer_->Copy(video_frame, &canvas, Context3D(), nullptr);
+ video_renderer_->Copy(video_frame, &canvas, nullptr);
} else {
HardwarePlaneResource* hardware_resource = plane_resource->AsHardware();
size_t bytes_per_row = viz::ResourceSizes::CheckedWidthInBytes<size_t>(
@@ -1024,7 +1035,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
? raster_context_provider_->ContextGL()
: context_provider_->ContextGL();
GenerateCompositorSyncToken(gl, &sync_token);
- transferable_resource = viz::TransferableResource::MakeGLOverlay(
+ transferable_resource = viz::TransferableResource::MakeGL(
hardware_resource->mailbox(), GL_LINEAR,
hardware_resource->texture_target(), sync_token,
hardware_resource->resource_size(),
@@ -1178,7 +1189,7 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
for (size_t i = 0; i < plane_resources.size(); ++i) {
HardwarePlaneResource* plane_resource = plane_resources[i]->AsHardware();
- auto transferable_resource = viz::TransferableResource::MakeGLOverlay(
+ auto transferable_resource = viz::TransferableResource::MakeGL(
plane_resource->mailbox(), GL_LINEAR, plane_resource->texture_target(),
sync_token, plane_resource->resource_size(),
plane_resource->overlay_candidate());
@@ -1194,10 +1205,9 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
return external_resources;
}
-void VideoResourceUpdater::ReturnTexture(
- const scoped_refptr<VideoFrame>& video_frame,
- const gpu::SyncToken& sync_token,
- bool lost_resource) {
+void VideoResourceUpdater::ReturnTexture(scoped_refptr<VideoFrame> video_frame,
+ const gpu::SyncToken& sync_token,
+ bool lost_resource) {
// TODO(dshwang): Forward to the decoder as a lost resource.
if (lost_resource)
return;
diff --git a/chromium/media/renderers/video_resource_updater.h b/chromium/media/renderers/video_resource_updater.h
index 783780c050f..90691903b8b 100644
--- a/chromium/media/renderers/video_resource_updater.h
+++ b/chromium/media/renderers/video_resource_updater.h
@@ -185,7 +185,7 @@ class MEDIA_EXPORT VideoResourceUpdater
void RecycleResource(uint32_t plane_resource_id,
const gpu::SyncToken& sync_token,
bool lost_resource);
- void ReturnTexture(const scoped_refptr<VideoFrame>& video_frame,
+ void ReturnTexture(scoped_refptr<VideoFrame> video_frame,
const gpu::SyncToken& sync_token,
bool lost_resource);
diff --git a/chromium/media/renderers/video_resource_updater_unittest.cc b/chromium/media/renderers/video_resource_updater_unittest.cc
index fb39b4dfe41..af40660c958 100644
--- a/chromium/media/renderers/video_resource_updater_unittest.cc
+++ b/chromium/media/renderers/video_resource_updater_unittest.cc
@@ -717,7 +717,8 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_DualNV12) {
EXPECT_EQ((GLenum)GL_TEXTURE_EXTERNAL_OES,
resources.resources[0].mailbox_holder.texture_target);
// |updater| doesn't set |buffer_format| in this case.
- EXPECT_EQ(viz::RGBA_8888, resources.resources[0].format);
+ EXPECT_EQ(viz::RED_8, resources.resources[0].format);
+ EXPECT_EQ(viz::RG_88, resources.resources[1].format);
video_frame = CreateTestYuvHardwareVideoFrame(media::PIXEL_FORMAT_NV12, 2,
GL_TEXTURE_RECTANGLE_ARB);
@@ -726,7 +727,8 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_DualNV12) {
EXPECT_EQ(2u, resources.resources.size());
EXPECT_EQ((GLenum)GL_TEXTURE_RECTANGLE_ARB,
resources.resources[0].mailbox_holder.texture_target);
- EXPECT_EQ(viz::RGBA_8888, resources.resources[0].format);
+ EXPECT_EQ(viz::RED_8, resources.resources[0].format);
+ EXPECT_EQ(viz::RG_88, resources.resources[1].format);
EXPECT_EQ(0u, GetSharedImageCount());
}
diff --git a/chromium/media/video/BUILD.gn b/chromium/media/video/BUILD.gn
index 6b9bc81dea7..10172beeafc 100644
--- a/chromium/media/video/BUILD.gn
+++ b/chromium/media/video/BUILD.gn
@@ -29,10 +29,6 @@ source_set("video") {
"h264_poc.h",
"half_float_maker.cc",
"half_float_maker.h",
- "jpeg_encode_accelerator.cc",
- "jpeg_encode_accelerator.h",
- "mjpeg_decode_accelerator.cc",
- "mjpeg_decode_accelerator.h",
"picture.cc",
"picture.h",
"supported_video_decoder_config.cc",
diff --git a/chromium/media/video/fake_video_encode_accelerator.cc b/chromium/media/video/fake_video_encode_accelerator.cc
index 1a311131127..2a551e34fac 100644
--- a/chromium/media/video/fake_video_encode_accelerator.cc
+++ b/chromium/media/video/fake_video_encode_accelerator.cc
@@ -58,17 +58,16 @@ bool FakeVideoEncodeAccelerator::Initialize(const Config& config,
return true;
}
-void FakeVideoEncodeAccelerator::Encode(
- const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) {
+void FakeVideoEncodeAccelerator::Encode(scoped_refptr<VideoFrame> frame,
+ bool force_keyframe) {
DCHECK(client_);
queued_frames_.push(force_keyframe);
EncodeTask();
}
void FakeVideoEncodeAccelerator::UseOutputBitstreamBuffer(
- const BitstreamBuffer& buffer) {
- available_buffers_.push_back(buffer);
+ BitstreamBuffer buffer) {
+ available_buffers_.push_back(std::move(buffer));
EncodeTask();
}
diff --git a/chromium/media/video/fake_video_encode_accelerator.h b/chromium/media/video/fake_video_encode_accelerator.h
index 445fc18ef85..f9fd28ea537 100644
--- a/chromium/media/video/fake_video_encode_accelerator.h
+++ b/chromium/media/video/fake_video_encode_accelerator.h
@@ -35,9 +35,8 @@ class FakeVideoEncodeAccelerator : public VideoEncodeAccelerator {
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
bool Initialize(const Config& config, Client* client) override;
- void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) override;
- void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) override;
+ void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) override;
+ void UseOutputBitstreamBuffer(BitstreamBuffer buffer) override;
void RequestEncodingParametersChange(uint32_t bitrate,
uint32_t framerate) override;
void RequestEncodingParametersChange(const VideoBitrateAllocation& bitrate,
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
index 78d1d08686d..e9e7777701d 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
@@ -71,7 +71,7 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
// asynchronously posting tasks to |worker_task_runner_|, while
// |frame_ready_cb| will be called on |media_task_runner_| once all the data
// has been copied.
- void CreateHardwareFrame(const scoped_refptr<VideoFrame>& video_frame,
+ void CreateHardwareFrame(scoped_refptr<VideoFrame> video_frame,
FrameReadyCB cb);
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
@@ -129,7 +129,7 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
VideoFrameCopyRequest(scoped_refptr<VideoFrame> video_frame,
FrameReadyCB frame_ready_cb,
bool passthrough)
- : video_frame(video_frame),
+ : video_frame(std::move(video_frame)),
frame_ready_cb(std::move(frame_ready_cb)),
passthrough(passthrough) {}
scoped_refptr<VideoFrame> video_frame;
@@ -144,19 +144,18 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
// Copy |video_frame| data into |frame_resources| and calls |frame_ready_cb|
// when done.
- void CopyVideoFrameToGpuMemoryBuffers(
- const scoped_refptr<VideoFrame>& video_frame,
- FrameResources* frame_resources);
+ void CopyVideoFrameToGpuMemoryBuffers(scoped_refptr<VideoFrame> video_frame,
+ FrameResources* frame_resources);
// Called when all the data has been copied.
- void OnCopiesDone(const scoped_refptr<VideoFrame>& video_frame,
+ void OnCopiesDone(scoped_refptr<VideoFrame> video_frame,
FrameResources* frame_resources);
// Prepares GL resources, mailboxes and calls |frame_ready_cb| with the new
// VideoFrame. This has to be run on |media_task_runner_| where
// |frame_ready_cb| associated with video_frame will also be run.
void BindAndCreateMailboxesHardwareFrameResources(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameResources* frame_resources);
// Return true if |resources| can be used to represent a frame for
@@ -178,7 +177,7 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
// |frame_copy_requests_| and attempts to start another copy if there are
// other |frame_copy_requests_| elements.
void CompleteCopyRequestAndMaybeStartNextCopy(
- const scoped_refptr<VideoFrame>& video_frame);
+ scoped_refptr<VideoFrame> video_frame);
// Callback called when a VideoFrame generated with GetFrameResources is no
// longer referenced.
@@ -380,7 +379,7 @@ void CopyRowsToI420Buffer(int first_row,
void CopyRowsToNV12Buffer(int first_row,
int rows,
int bytes_per_row,
- const scoped_refptr<VideoFrame>& source_frame,
+ const VideoFrame* source_frame,
uint8_t* dest_y,
int dest_stride_y,
uint8_t* dest_uv,
@@ -418,7 +417,7 @@ void CopyRowsToNV12Buffer(int first_row,
void CopyRowsToUYVYBuffer(int first_row,
int rows,
int width,
- const scoped_refptr<VideoFrame>& source_frame,
+ const VideoFrame* source_frame,
uint8_t* output,
int dest_stride,
base::OnceClosure done) {
@@ -451,7 +450,7 @@ void CopyRowsToRGB10Buffer(bool is_argb,
int first_row,
int rows,
int width,
- const scoped_refptr<VideoFrame>& source_frame,
+ const VideoFrame* source_frame,
uint8_t* output,
int dest_stride,
base::OnceClosure done) {
@@ -510,7 +509,7 @@ void CopyRowsToRGBABuffer(bool is_rgba,
int first_row,
int rows,
int width,
- const scoped_refptr<VideoFrame>& source_frame,
+ const VideoFrame* source_frame,
uint8_t* output,
int dest_stride,
base::OnceClosure done) {
@@ -546,7 +545,7 @@ void CopyRowsToRGBABuffer(bool is_rgba,
1 /* attenuate, meaning premultiply */);
}
-gfx::Size CodedSize(const scoped_refptr<VideoFrame>& video_frame,
+gfx::Size CodedSize(const VideoFrame* video_frame,
GpuVideoAcceleratorFactories::OutputFormat output_format) {
DCHECK(gfx::Rect(video_frame->coded_size())
.Contains(video_frame->visible_rect()));
@@ -582,7 +581,7 @@ gfx::Size CodedSize(const scoped_refptr<VideoFrame>& video_frame,
// |frame_ready_cb|.
// This has to be called on the thread where |media_task_runner_| is current.
void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameReadyCB frame_ready_cb) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
// Lazily initialize |output_format_| since VideoFrameOutputFormat() has to be
@@ -593,7 +592,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
// Bail if we have a change of GpuVideoAcceleratorFactories::OutputFormat;
// such changes should not happen in general (see https://crbug.com/875158).
if (output_format_ != gpu_factories_->VideoFrameOutputFormat(pixel_format)) {
- std::move(frame_ready_cb).Run(video_frame);
+ std::move(frame_ready_cb).Run(std::move(video_frame));
return;
}
@@ -667,8 +666,8 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
passthrough = true;
}
- frame_copy_requests_.emplace_back(video_frame, std::move(frame_ready_cb),
- passthrough);
+ frame_copy_requests_.emplace_back(std::move(video_frame),
+ std::move(frame_ready_cb), passthrough);
if (frame_copy_requests_.size() == 1u)
StartCopy();
}
@@ -717,7 +716,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::Abort() {
}
void GpuMemoryBufferVideoFramePool::PoolImpl::OnCopiesDone(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameResources* frame_resources) {
for (const auto& plane_resource : frame_resources->plane_resources) {
if (plane_resource.gpu_memory_buffer) {
@@ -733,7 +732,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::OnCopiesDone(
media_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&PoolImpl::BindAndCreateMailboxesHardwareFrameResources,
- this, video_frame, frame_resources));
+ this, std::move(video_frame), frame_resources));
}
void GpuMemoryBufferVideoFramePool::PoolImpl::StartCopy() {
@@ -747,10 +746,10 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::StartCopy() {
request.passthrough
? nullptr
: GetOrCreateFrameResources(
- CodedSize(request.video_frame, output_format_),
+ CodedSize(request.video_frame.get(), output_format_),
output_format_);
if (!frame_resources) {
- std::move(request.frame_ready_cb).Run(request.video_frame);
+ std::move(request.frame_ready_cb).Run(std::move(request.video_frame));
frame_copy_requests_.pop_front();
continue;
}
@@ -766,12 +765,12 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::StartCopy() {
// that will be synchronized by a barrier.
// After the barrier is passed OnCopiesDone will be called.
void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameResources* frame_resources) {
// Compute the number of tasks to post and create the barrier.
const size_t num_planes = VideoFrame::NumPlanes(VideoFormat(output_format_));
const size_t planes_per_copy = PlanesPerCopy(output_format_);
- const gfx::Size coded_size = CodedSize(video_frame, output_format_);
+ const gfx::Size coded_size = CodedSize(video_frame.get(), output_format_);
size_t copies = 0;
for (size_t i = 0; i < num_planes; i += planes_per_copy) {
const int rows =
@@ -783,6 +782,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
++copies;
}
+ // |barrier| keeps refptr of |video_frame| until all copy tasks are done.
const base::RepeatingClosure barrier = base::BarrierClosure(
copies, base::BindOnce(&PoolImpl::OnCopiesDone, this, video_frame,
frame_resources));
@@ -827,32 +827,41 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
break;
}
case GpuVideoAcceleratorFactories::OutputFormat::NV12_SINGLE_GMB:
+ // Using base::Unretained(video_frame) here is safe because |barrier|
+ // keeps refptr of |video_frame| until all copy tasks are done.
worker_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&CopyRowsToNV12Buffer, row, rows_to_copy, coded_size.width(),
- video_frame, static_cast<uint8_t*>(buffer->memory(0)),
- buffer->stride(0), static_cast<uint8_t*>(buffer->memory(1)),
- buffer->stride(1), barrier));
+ base::Unretained(video_frame.get()),
+ static_cast<uint8_t*>(buffer->memory(0)), buffer->stride(0),
+ static_cast<uint8_t*>(buffer->memory(1)), buffer->stride(1),
+ barrier));
break;
case GpuVideoAcceleratorFactories::OutputFormat::NV12_DUAL_GMB: {
gfx::GpuMemoryBuffer* buffer2 =
frame_resources->plane_resources[1].gpu_memory_buffer.get();
+ // Using base::Unretained(video_frame) here is safe because |barrier|
+ // keeps refptr of |video_frame| until all copy tasks are done.
worker_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
&CopyRowsToNV12Buffer, row, rows_to_copy, coded_size.width(),
- video_frame, static_cast<uint8_t*>(buffer->memory(0)),
- buffer->stride(0), static_cast<uint8_t*>(buffer2->memory(0)),
- buffer2->stride(0), barrier));
+ base::Unretained(video_frame.get()),
+ static_cast<uint8_t*>(buffer->memory(0)), buffer->stride(0),
+ static_cast<uint8_t*>(buffer2->memory(0)), buffer2->stride(0),
+ barrier));
break;
}
case GpuVideoAcceleratorFactories::OutputFormat::UYVY:
+ // Using base::Unretained(video_frame) here is safe because |barrier|
+ // keeps refptr of |video_frame| until all copy tasks are done.
worker_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&CopyRowsToUYVYBuffer, row, rows_to_copy,
- coded_size.width(), video_frame,
+ coded_size.width(),
+ base::Unretained(video_frame.get()),
static_cast<uint8_t*>(buffer->memory(0)),
buffer->stride(0), barrier));
break;
@@ -861,10 +870,13 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
case GpuVideoAcceleratorFactories::OutputFormat::XB30: {
const bool is_argb = output_format_ ==
GpuVideoAcceleratorFactories::OutputFormat::XR30;
+ // Using base::Unretained(video_frame) here is safe because |barrier|
+ // keeps refptr of |video_frame| until all copy tasks are done.
worker_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&CopyRowsToRGB10Buffer, is_argb, row, rows_to_copy,
- coded_size.width(), video_frame,
+ coded_size.width(),
+ base::Unretained(video_frame.get()),
static_cast<uint8_t*>(buffer->memory(0)),
buffer->stride(0), barrier));
break;
@@ -874,10 +886,13 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
case GpuVideoAcceleratorFactories::OutputFormat::BGRA: {
const bool is_rgba = output_format_ ==
GpuVideoAcceleratorFactories::OutputFormat::RGBA;
+ // Using base::Unretained(video_frame) here is safe because |barrier|
+ // keeps refptr of |video_frame| until all copy tasks are done.
worker_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&CopyRowsToRGBABuffer, is_rgba, row, rows_to_copy,
- coded_size.width(), video_frame,
+ coded_size.width(),
+ base::Unretained(video_frame.get()),
static_cast<uint8_t*>(buffer->memory(0)),
buffer->stride(0), barrier));
break;
@@ -892,16 +907,16 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
void GpuMemoryBufferVideoFramePool::PoolImpl::
BindAndCreateMailboxesHardwareFrameResources(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameResources* frame_resources) {
gpu::SharedImageInterface* sii = gpu_factories_->SharedImageInterface();
if (!sii) {
frame_resources->MarkUnused(tick_clock_->NowTicks());
- CompleteCopyRequestAndMaybeStartNextCopy(video_frame);
+ CompleteCopyRequestAndMaybeStartNextCopy(std::move(video_frame));
return;
}
- const gfx::Size coded_size = CodedSize(video_frame, output_format_);
+ const gfx::Size coded_size = CodedSize(video_frame.get(), output_format_);
gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
// Set up the planes creating the mailboxes needed to refer to the textures.
for (size_t i = 0; i < NumGpuMemoryBuffers(output_format_); i++) {
@@ -945,7 +960,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::
if (!frame) {
frame_resources->MarkUnused(tick_clock_->NowTicks());
MailboxHoldersReleased(frame_resources, gpu::SyncToken());
- CompleteCopyRequestAndMaybeStartNextCopy(video_frame);
+ CompleteCopyRequestAndMaybeStartNextCopy(std::move(video_frame));
return;
}
frame->SetReleaseMailboxCB(
@@ -992,7 +1007,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::
frame->metadata()->SetBoolean(VideoFrameMetadata::READ_LOCK_FENCES_ENABLED,
true);
- CompleteCopyRequestAndMaybeStartNextCopy(frame);
+ CompleteCopyRequestAndMaybeStartNextCopy(std::move(frame));
}
// Destroy all the resources posting one task per FrameResources
@@ -1072,10 +1087,11 @@ GpuMemoryBufferVideoFramePool::PoolImpl::GetOrCreateFrameResources(
void GpuMemoryBufferVideoFramePool::PoolImpl::
CompleteCopyRequestAndMaybeStartNextCopy(
- const scoped_refptr<VideoFrame>& video_frame) {
+ scoped_refptr<VideoFrame> video_frame) {
DCHECK(!frame_copy_requests_.empty());
- std::move(frame_copy_requests_.front().frame_ready_cb).Run(video_frame);
+ std::move(frame_copy_requests_.front().frame_ready_cb)
+ .Run(std::move(video_frame));
frame_copy_requests_.pop_front();
if (!frame_copy_requests_.empty())
StartCopy();
@@ -1182,10 +1198,11 @@ GpuMemoryBufferVideoFramePool::~GpuMemoryBufferVideoFramePool() {
}
void GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameReadyCB frame_ready_cb) {
DCHECK(video_frame);
- pool_impl_->CreateHardwareFrame(video_frame, std::move(frame_ready_cb));
+ pool_impl_->CreateHardwareFrame(std::move(video_frame),
+ std::move(frame_ready_cb));
}
void GpuMemoryBufferVideoFramePool::Abort() {
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.h b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
index 7b86cedc6ae..1f0e178e67e 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
@@ -42,8 +42,7 @@ class MEDIA_EXPORT GpuMemoryBufferVideoFramePool {
// Callback used by MaybeCreateHardwareFrame to deliver a new VideoFrame
// after it has been copied to GpuMemoryBuffers.
- using FrameReadyCB =
- base::OnceCallback<void(const scoped_refptr<VideoFrame>&)>;
+ using FrameReadyCB = base::OnceCallback<void(scoped_refptr<VideoFrame>)>;
// Calls |cb| on |media_worker_pool| with a new VideoFrame containing only
// mailboxes to native resources. |cb| will be destroyed on
@@ -52,9 +51,8 @@ class MEDIA_EXPORT GpuMemoryBufferVideoFramePool {
// |video_frame|.
// If it's not possible to create a new hardware VideoFrame, |video_frame|
// itself will passed to |cb|.
- virtual void MaybeCreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame,
- FrameReadyCB frame_ready_cb);
+ virtual void MaybeCreateHardwareFrame(scoped_refptr<VideoFrame> video_frame,
+ FrameReadyCB frame_ready_cb);
// Aborts any pending copies. Previously provided |frame_ready_cb| callbacks
// may still be called if the copy has already started.
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
index 589c643e03e..7ab1e9362ae 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
@@ -140,15 +140,15 @@ class GpuMemoryBufferVideoFramePoolTest : public ::testing::Test {
void MaybeCreateHardwareFrameCallback(
scoped_refptr<VideoFrame>* video_frame_output,
- const scoped_refptr<VideoFrame>& video_frame) {
- *video_frame_output = video_frame;
+ scoped_refptr<VideoFrame> video_frame) {
+ *video_frame_output = std::move(video_frame);
}
void MaybeCreateHardwareFrameCallbackAndTrackTime(
scoped_refptr<VideoFrame>* video_frame_output,
base::TimeTicks* output_time,
- const scoped_refptr<VideoFrame>& video_frame) {
- *video_frame_output = video_frame;
+ scoped_refptr<VideoFrame> video_frame) {
+ *video_frame_output = std::move(video_frame);
*output_time = base::TimeTicks::Now();
}
diff --git a/chromium/media/video/jpeg_encode_accelerator.cc b/chromium/media/video/jpeg_encode_accelerator.cc
deleted file mode 100644
index 91b2bf2e735..00000000000
--- a/chromium/media/video/jpeg_encode_accelerator.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/jpeg_encode_accelerator.h"
-
-namespace media {
-
-JpegEncodeAccelerator::~JpegEncodeAccelerator() = default;
-
-} // namespace media
diff --git a/chromium/media/video/jpeg_encode_accelerator.h b/chromium/media/video/jpeg_encode_accelerator.h
deleted file mode 100644
index 6653a0359b0..00000000000
--- a/chromium/media/video/jpeg_encode_accelerator.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_VIDEO_JPEG_ENCODE_ACCELERATOR_H_
-#define MEDIA_VIDEO_JPEG_ENCODE_ACCELERATOR_H_
-
-#include <stdint.h>
-
-#include "media/base/bitstream_buffer.h"
-#include "media/base/media_export.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-// JPEG encoder interface.
-class MEDIA_EXPORT JpegEncodeAccelerator {
- public:
- static constexpr int32_t kInvalidBitstreamBufferId = -1;
- enum Status {
- ENCODE_OK,
-
- HW_JPEG_ENCODE_NOT_SUPPORTED,
-
- // Eg. creation of encoder thread failed.
- THREAD_CREATION_FAILED,
-
- // Invalid argument was passed to an API method, e.g. the format of
- // VideoFrame is not supported.
- INVALID_ARGUMENT,
-
- // Output buffer is inaccessible, e.g. failed to map on another process.
- INACCESSIBLE_OUTPUT_BUFFER,
-
- // Failed to parse the incoming YUV image.
- PARSE_IMAGE_FAILED,
-
- // A fatal failure occurred in the GPU process layer or one of its
- // dependencies. Examples of such failures include hardware failures,
- // driver failures, library failures, and so on. Client is responsible for
- // destroying JEA after receiving this.
- PLATFORM_FAILURE,
-
- // Largest used enum. This should be adjusted when new errors are added.
- LARGEST_ERROR_ENUM = PLATFORM_FAILURE,
- };
-
- class MEDIA_EXPORT Client {
- public:
- // Callback called after each successful Encode().
- // Parameters:
- // |buffer_id| is |output_buffer.id()| of the corresponding Encode() call.
- // |encoded_picture_size| is the actual size of encoded JPEG image in
- // the BitstreamBuffer provided through encode().
- virtual void VideoFrameReady(int32_t buffer_id,
- size_t encoded_picture_size) = 0;
-
- // Callback to notify errors. Client is responsible for destroying JEA when
- // receiving a fatal error, i.e. PLATFORM_FAILURE. For other errors, client
- // is informed about the buffer that failed to encode and may continue
- // using the same instance of JEA.
- // Parameters:
- // |buffer_id| is |output_buffer.id()| of the corresponding Encode() call
- // that resulted in the error.
- // |status| would be one of the values of Status except ENCODE_OK.
- virtual void NotifyError(int32_t buffer_id, Status status) = 0;
-
- protected:
- virtual ~Client() {}
- };
-
- // Destroys the encoder: all pending inputs are dropped immediately. This
- // call may asynchronously free system resources, but its client-visible
- // effects are synchronous. After destructor returns, no more callbacks
- // will be made on the client.
- virtual ~JpegEncodeAccelerator() = 0;
-
- // Initializes the JPEG encoder. Should be called once per encoder
- // construction. This call is synchronous and returns ENCODE_OK iff
- // initialization is successful.
- // Parameters:
- // |client| is the Client interface for encode callback. The provided
- // pointer must be valid until destructor is called.
- virtual Status Initialize(Client* client) = 0;
-
- // Gets the maximum possible encoded result size.
- virtual size_t GetMaxCodedBufferSize(const gfx::Size& picture_size) = 0;
-
- // Encodes the given |video_frame| that contains a YUV image. Client will
- // receive the encoded result in Client::VideoFrameReady() callback with the
- // corresponding |output_buffer.id()|, or receive
- // Client::NotifyError() callback.
- // Parameters:
- // |video_frame| contains the YUV image to be encoded.
- // |quality| of JPEG image. The range is from 1~100. High value means high
- // quality.
- // |exif_buffer| contains Exif data to be inserted into JPEG image. If it's
- // nullptr, the JFIF APP0 segment will be inserted.
- // |output_buffer| that contains output buffer for encoded result. Clients
- // should call GetMaxCodedBufferSize() and allocate the buffer accordingly.
- // The buffer needs to be valid until VideoFrameReady() or NotifyError() is
- // called.
- virtual void Encode(scoped_refptr<media::VideoFrame> video_frame,
- int quality,
- const BitstreamBuffer* exif_buffer,
- const BitstreamBuffer& output_buffer) = 0;
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_JPEG_ENCODE_ACCELERATOR_H_
diff --git a/chromium/media/video/mjpeg_decode_accelerator.cc b/chromium/media/video/mjpeg_decode_accelerator.cc
deleted file mode 100644
index b538ce87c4d..00000000000
--- a/chromium/media/video/mjpeg_decode_accelerator.cc
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/mjpeg_decode_accelerator.h"
-
-namespace media {
-
-MjpegDecodeAccelerator::~MjpegDecodeAccelerator() = default;
-
-} // namespace media
diff --git a/chromium/media/video/mjpeg_decode_accelerator.h b/chromium/media/video/mjpeg_decode_accelerator.h
deleted file mode 100644
index 6d023f4fa0d..00000000000
--- a/chromium/media/video/mjpeg_decode_accelerator.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_VIDEO_MJPEG_DECODE_ACCELERATOR_H_
-#define MEDIA_VIDEO_MJPEG_DECODE_ACCELERATOR_H_
-
-#include <stdint.h>
-
-#include "media/base/bitstream_buffer.h"
-#include "media/base/media_export.h"
-#include "media/base/video_frame.h"
-
-namespace media {
-
-// MJPEG decoder interface.
-// The input are JPEG images including headers (Huffman tables may be omitted).
-// The output color format is I420. The decoder will convert the color format
-// to I420 if the color space or subsampling does not match that and if it is
-// capable of doing so. The client is responsible for allocating buffers and
-// keeps the ownership of them.
-// The intended use case of this interface is decoding MJPEG images coming
-// from camera capture. It can also be used for normal still JPEG image
-// decoding, but normal JPEG images may use more JPEG features that may not be
-// supported by a particular accelerator implementation and/or platform.
-class MEDIA_EXPORT MjpegDecodeAccelerator {
- public:
- // Callback for JPEG decoder initialization.
- typedef base::Callback<void(bool success)> InitCB;
-
- static const int32_t kInvalidBitstreamBufferId = -1;
-
- // Enumeration of decode errors generated by NotifyError callback. These
- // values are persisted to logs. Entries should not be renumbered and numeric
- // values should never be reused.
- enum Error {
- // No error. Decode succeeded.
- NO_ERRORS = 0,
- // Invalid argument was passed to an API method, e.g. the output buffer is
- // too small, JPEG width/height are too big for JDA.
- INVALID_ARGUMENT = 1,
- // Encoded input is unreadable, e.g. failed to map on another process.
- UNREADABLE_INPUT = 2,
- // Failed to parse compressed JPEG picture.
- PARSE_JPEG_FAILED = 3,
- // Failed to decode JPEG due to unsupported JPEG features, such as profiles,
- // coding mode, or color formats.
- UNSUPPORTED_JPEG = 4,
- // A fatal failure occurred in the GPU process layer or one of its
- // dependencies. Examples of such failures include hardware failures,
- // driver failures, library failures, browser programming errors, and so
- // on. Client is responsible for destroying JDA after receiving this.
- PLATFORM_FAILURE = 5,
- // Largest used enum. This should be adjusted when new errors are added.
- MJDA_ERROR_CODE_MAX = PLATFORM_FAILURE,
- };
-
- class MEDIA_EXPORT Client {
- public:
- // Callback called after each successful Decode().
- // Parameters:
- // |bitstream_buffer_id| is the id of BitstreamBuffer corresponding to
- // Decode() call.
- virtual void VideoFrameReady(int32_t bitstream_buffer_id) = 0;
-
- // Callback to notify errors. Client is responsible for destroying JDA when
- // receiving a fatal error, i.e. PLATFORM_FAILURE. For other errors, client
- // is informed about the buffer that failed to decode and may continue
- // using the same instance of JDA.
- // Parameters:
- // |error| is the error code.
- // |bitstream_buffer_id| is the bitstream buffer id that resulted in the
- // recoverable error. For PLATFORM_FAILURE, |bitstream_buffer_id| may be
- // kInvalidBitstreamBufferId if the error was not related to any
- // particular buffer being processed.
- virtual void NotifyError(int32_t bitstream_buffer_id, Error error) = 0;
-
- protected:
- virtual ~Client() {}
- };
-
- // Destroys the decoder: all pending inputs are dropped immediately. This
- // call may asynchronously free system resources, but its client-visible
- // effects are synchronous. After destructor returns, no more callbacks
- // will be made on the client.
- virtual ~MjpegDecodeAccelerator() = 0;
-
- // Initializes the MJPEG decoder. Should be called once per decoder
- // construction. This call is synchronous and returns true iff initialization
- // is successful.
- // Parameters:
- // |client| is the Client interface for decode callback. The provided
- // pointer must be valid until destructor is called.
- virtual bool Initialize(Client* client) = 0;
-
- // TODO(c.padhi): Remove the sync version and rename this to Initialize.
- // Async version of above Initialize(..) function. Executes the |init_cb|
- // upon completion.
- virtual void InitializeAsync(Client* client, InitCB init_cb) {}
-
- // Decodes the given bitstream buffer that contains one JPEG frame. It
- // supports at least baseline encoding defined in JPEG ISO/IEC 10918-1. The
- // decoder will convert the color format to I420 or return UNSUPPORTED_JPEG
- // if it cannot convert. Client still owns this buffer, but should deallocate
- // or access the buffer only after receiving a decode callback VideoFrameReady
- // with the corresponding bitstream_buffer_id, or NotifyError.
- // Parameters:
- // |bitstream_buffer| contains encoded JPEG frame.
- // |video_frame| contains an allocated video frame for the output.
- // Client is responsible for filling the coded_size of video_frame and
- // allocating its backing buffer. For now, only shared memory backed
- // VideoFrames are supported. After decode completes, the decoded JPEG frame
- // will be filled into the |video_frame|.
- // Ownership of the |bitstream_buffer| and |video_frame| remains with the
- // client. The client is not allowed to deallocate them before
- // VideoFrameReady or NotifyError() is invoked for given id of
- // |bitstream_buffer|, or destructor returns.
- virtual void Decode(const BitstreamBuffer& bitstream_buffer,
- const scoped_refptr<media::VideoFrame>& video_frame) = 0;
-
- // Returns true when the JPEG decoder is supported. This can be called before
- // Initialize().
- virtual bool IsSupported() = 0;
-};
-
-} // namespace media
-
-#endif // MEDIA_VIDEO_MJPEG_DECODE_ACCELERATOR_H_
diff --git a/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.cc
index 5534ea5e171..80af4a0264a 100644
--- a/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.cc
+++ b/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.cc
@@ -15,10 +15,10 @@ MockGpuMemoryBufferVideoFramePool::MockGpuMemoryBufferVideoFramePool(
MockGpuMemoryBufferVideoFramePool::~MockGpuMemoryBufferVideoFramePool() {}
void MockGpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame,
+ scoped_refptr<VideoFrame> video_frame,
FrameReadyCB frame_ready_cb) {
frame_ready_cbs_->push_back(
- base::BindOnce(std::move(frame_ready_cb), video_frame));
+ base::BindOnce(std::move(frame_ready_cb), std::move(video_frame)));
}
} // namespace media
diff --git a/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.h b/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.h
index c16f89b1195..2509460b8e9 100644
--- a/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.h
+++ b/chromium/media/video/mock_gpu_memory_buffer_video_frame_pool.h
@@ -17,7 +17,7 @@ class MockGpuMemoryBufferVideoFramePool : public GpuMemoryBufferVideoFramePool {
std::vector<base::OnceClosure>* frame_ready_cbs);
~MockGpuMemoryBufferVideoFramePool() override;
- void MaybeCreateHardwareFrame(const scoped_refptr<VideoFrame>& video_frame,
+ void MaybeCreateHardwareFrame(scoped_refptr<VideoFrame> video_frame,
FrameReadyCB frame_ready_cb) override;
MOCK_METHOD0(Abort, void());
diff --git a/chromium/media/video/mock_video_decode_accelerator.h b/chromium/media/video/mock_video_decode_accelerator.h
index bcbddede81d..bdb89164994 100644
--- a/chromium/media/video/mock_video_decode_accelerator.h
+++ b/chromium/media/video/mock_video_decode_accelerator.h
@@ -27,7 +27,7 @@ class MockVideoDecodeAccelerator : public VideoDecodeAccelerator {
~MockVideoDecodeAccelerator() override;
MOCK_METHOD2(Initialize, bool(const Config& config, Client* client));
- MOCK_METHOD1(Decode, void(const BitstreamBuffer& bitstream_buffer));
+ MOCK_METHOD1(Decode, void(BitstreamBuffer bitstream_buffer));
MOCK_METHOD2(Decode,
void(scoped_refptr<DecoderBuffer> buffer, int32_t bitstream_id));
MOCK_METHOD1(AssignPictureBuffers,
diff --git a/chromium/media/video/mock_video_encode_accelerator.h b/chromium/media/video/mock_video_encode_accelerator.h
index bbc4d647663..bc72c163fcc 100644
--- a/chromium/media/video/mock_video_encode_accelerator.h
+++ b/chromium/media/video/mock_video_encode_accelerator.h
@@ -23,9 +23,8 @@ class MockVideoEncodeAccelerator : public VideoEncodeAccelerator {
bool(const VideoEncodeAccelerator::Config& config,
VideoEncodeAccelerator::Client* client));
MOCK_METHOD2(Encode,
- void(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe));
- MOCK_METHOD1(UseOutputBitstreamBuffer, void(const BitstreamBuffer& buffer));
+ void(scoped_refptr<VideoFrame> frame, bool force_keyframe));
+ MOCK_METHOD1(UseOutputBitstreamBuffer, void(BitstreamBuffer buffer));
MOCK_METHOD2(RequestEncodingParametersChange,
void(uint32_t bitrate, uint32_t framerate));
MOCK_METHOD0(Destroy, void());
diff --git a/chromium/media/video/video_decode_accelerator.cc b/chromium/media/video/video_decode_accelerator.cc
index e3d4880b7ac..58f6665113c 100644
--- a/chromium/media/video/video_decode_accelerator.cc
+++ b/chromium/media/video/video_decode_accelerator.cc
@@ -29,6 +29,17 @@ void VideoDecodeAccelerator::Client::NotifyInitializationComplete(
NOTREACHED() << "By default deferred initialization is not supported.";
}
+void VideoDecodeAccelerator::Client::ProvidePictureBuffersWithVisibleRect(
+ uint32_t requested_num_of_buffers,
+ VideoPixelFormat format,
+ uint32_t textures_per_buffer,
+ const gfx::Size& dimensions,
+ const gfx::Rect& visible_rect,
+ uint32_t texture_target) {
+ ProvidePictureBuffers(requested_num_of_buffers, format, textures_per_buffer,
+ dimensions, texture_target);
+}
+
VideoDecodeAccelerator::~VideoDecodeAccelerator() = default;
void VideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index d357945e694..b0a5448e40a 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -212,6 +212,17 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
const gfx::Size& dimensions,
uint32_t texture_target) = 0;
+ // This is the same as ProvidePictureBuffers() except that |visible_rect| is
+ // also included. The default implementation of VDA would call
+ // ProvidePictureBuffers().
+ virtual void ProvidePictureBuffersWithVisibleRect(
+ uint32_t requested_num_of_buffers,
+ VideoPixelFormat format,
+ uint32_t textures_per_buffer,
+ const gfx::Size& dimensions,
+ const gfx::Rect& visible_rect,
+ uint32_t texture_target);
+
// Callback to dismiss picture buffer that was assigned earlier.
virtual void DismissPictureBuffer(int32_t picture_buffer_id) = 0;
@@ -270,7 +281,7 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
// NotifyEndOfBitstreamBuffer() with the bitstream buffer id.
// Parameters:
// |bitstream_buffer| is the input bitstream that is sent for decoding.
- virtual void Decode(const BitstreamBuffer& bitstream_buffer) = 0;
+ virtual void Decode(BitstreamBuffer bitstream_buffer) = 0;
// Decodes given decoder buffer that contains at most one frame. Once
// decoder is done with processing |buffer| it will call
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
index e58d0036051..90ca6a11fbf 100644
--- a/chromium/media/video/video_encode_accelerator.h
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -243,15 +243,14 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// Parameters:
// |frame| is the VideoFrame that is to be encoded.
// |force_keyframe| forces the encoding of a keyframe for this frame.
- virtual void Encode(const scoped_refptr<VideoFrame>& frame,
- bool force_keyframe) = 0;
+ virtual void Encode(scoped_refptr<VideoFrame> frame, bool force_keyframe) = 0;
// Send a bitstream buffer to the encoder to be used for storing future
// encoded output. Each call here with a given |buffer| will cause the buffer
// to be filled once, then returned with BitstreamBufferReady().
// Parameters:
// |buffer| is the bitstream buffer to use for output.
- virtual void UseOutputBitstreamBuffer(const BitstreamBuffer& buffer) = 0;
+ virtual void UseOutputBitstreamBuffer(BitstreamBuffer buffer) = 0;
// Request a change to the encoding parameters. This is only a request,
// fulfilled on a best-effort basis.
diff --git a/chromium/media/webrtc/audio_processor.cc b/chromium/media/webrtc/audio_processor.cc
index fb905116be0..f095b159db5 100644
--- a/chromium/media/webrtc/audio_processor.cc
+++ b/chromium/media/webrtc/audio_processor.cc
@@ -75,8 +75,6 @@ AudioProcessor::AudioProcessor(const AudioParameters& audio_parameters,
AudioProcessor::~AudioProcessor() {
StopEchoCancellationDump();
- if (audio_processing_)
- audio_processing_->UpdateHistogramsOnCallEnd();
}
// Process the audio from source and return a pointer to the processed data.
@@ -240,22 +238,6 @@ void AudioProcessor::InitializeAPM() {
// Audio processing module construction.
audio_processing_ = base::WrapUnique(ap_builder.Create(ap_config));
- // Noise suppression setup part 2.
- if (settings_.noise_suppression != NoiseSuppressionType::kDisabled) {
- int err = audio_processing_->noise_suppression()->set_level(
- webrtc::NoiseSuppression::kHigh);
- err |= audio_processing_->noise_suppression()->Enable(true);
- DCHECK_EQ(err, 0);
- }
-
- // AGC setup part 2.
- if (settings_.automatic_gain_control != AutomaticGainControlType::kDisabled) {
- int err = audio_processing_->gain_control()->set_mode(
- webrtc::GainControl::kAdaptiveAnalog);
- err |= audio_processing_->gain_control()->Enable(true);
- DCHECK_EQ(err, 0);
- }
-
webrtc::AudioProcessing::Config apm_config = audio_processing_->GetConfig();
// Typing detection setup.
@@ -275,7 +257,17 @@ void AudioProcessor::InitializeAPM() {
// High-pass filter setup.
apm_config.high_pass_filter.enabled = settings_.high_pass_filter;
- // AGC setup part 3.
+ // Noise suppression setup part 2.
+ apm_config.noise_suppression.enabled =
+ settings_.noise_suppression != NoiseSuppressionType::kDisabled;
+ apm_config.noise_suppression.level =
+ webrtc::AudioProcessing::Config::NoiseSuppression::kHigh;
+
+ // AGC setup part 2.
+ apm_config.gain_controller1.enabled =
+ settings_.automatic_gain_control != AutomaticGainControlType::kDisabled;
+ apm_config.gain_controller1.mode =
+ webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
if (settings_.automatic_gain_control ==
AutomaticGainControlType::kExperimental ||
settings_.automatic_gain_control ==
@@ -324,9 +316,7 @@ void AudioProcessor::UpdateAnalogLevel(double volume) {
DCHECK_LE(volume, 1.0);
constexpr double kWebRtcMaxVolume = 255;
const int webrtc_volume = volume * kWebRtcMaxVolume;
- int err =
- audio_processing_->gain_control()->set_stream_analog_level(webrtc_volume);
- DCHECK_EQ(err, 0) << "set_stream_analog_level() error: " << err;
+ audio_processing_->set_stream_analog_level(webrtc_volume);
}
void AudioProcessor::FeedDataToAPM(const AudioBus& source) {
@@ -360,7 +350,7 @@ base::Optional<double> AudioProcessor::GetNewVolumeFromAGC(double volume) {
constexpr double kWebRtcMaxVolume = 255;
const int webrtc_volume = volume * kWebRtcMaxVolume;
const int new_webrtc_volume =
- audio_processing_->gain_control()->stream_analog_level();
+ audio_processing_->recommended_stream_analog_level();
return new_webrtc_volume == webrtc_volume
? base::nullopt
diff --git a/chromium/media/webrtc/audio_processor_unittest.cc b/chromium/media/webrtc/audio_processor_unittest.cc
index acfb1ac18a6..31d5d1eeea2 100644
--- a/chromium/media/webrtc/audio_processor_unittest.cc
+++ b/chromium/media/webrtc/audio_processor_unittest.cc
@@ -133,14 +133,13 @@ class WebRtcAudioProcessorTest : public ::testing::Test {
EXPECT_TRUE(ap_config.echo_canceller.enabled);
EXPECT_FALSE(ap_config.echo_canceller.mobile_mode);
EXPECT_TRUE(ap_config.high_pass_filter.enabled);
+ EXPECT_TRUE(ap_config.gain_controller1.enabled);
+ EXPECT_EQ(ap_config.gain_controller1.mode,
+ ap_config.gain_controller1.kAdaptiveAnalog);
+ EXPECT_TRUE(ap_config.noise_suppression.enabled);
+ EXPECT_EQ(ap_config.noise_suppression.level,
+ ap_config.noise_suppression.kHigh);
EXPECT_TRUE(ap_config.voice_detection.enabled);
-
- EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
- EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
- webrtc::NoiseSuppression::kHigh);
- EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
- EXPECT_TRUE(audio_processing->gain_control()->mode() ==
- webrtc::GainControl::kAdaptiveAnalog);
}
AudioProcessingSettings GetEnabledAudioProcessingSettings() const {