summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-13 16:23:34 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-14 10:37:21 +0000
commit38a9a29f4f9436cace7f0e7abf9c586057df8a4e (patch)
treec4e8c458dc595bc0ddb435708fa2229edfd00bd4 /chromium/media
parente684a3455bcc29a6e3e66a004e352dea4e1141e7 (diff)
downloadqtwebengine-chromium-38a9a29f4f9436cace7f0e7abf9c586057df8a4e.tar.gz
BASELINE: Update Chromium to 73.0.3683.37
Change-Id: I08c9af2948b645f671e5d933aca1f7a90ea372f2 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/BUILD.gn23
-rw-r--r--chromium/media/OWNERS1
-rw-r--r--chromium/media/PRESUBMIT.py21
-rw-r--r--chromium/media/audio/BUILD.gn62
-rw-r--r--chromium/media/audio/alsa/alsa_input.cc14
-rw-r--r--chromium/media/audio/alsa/alsa_output_unittest.cc37
-rw-r--r--chromium/media/audio/alsa/alsa_util.cc223
-rw-r--r--chromium/media/audio/alsa/alsa_util.h21
-rw-r--r--chromium/media/audio/alsa/alsa_util_unittest.cc44
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.cc115
-rw-r--r--chromium/media/audio/alsa/alsa_wrapper.h55
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc5
-rw-r--r--chromium/media/audio/alsa/mock_alsa_wrapper.cc13
-rw-r--r--chromium/media/audio/alsa/mock_alsa_wrapper.h188
-rw-r--r--chromium/media/audio/android/opensles_input.cc12
-rw-r--r--chromium/media/audio/android/opensles_output.cc12
-rw-r--r--chromium/media/audio/audio_device_description.cc2
-rw-r--r--chromium/media/audio/audio_features.cc6
-rw-r--r--chromium/media/audio/audio_features.h4
-rw-r--r--chromium/media/audio/audio_input_device.cc47
-rw-r--r--chromium/media/audio/audio_input_device.h9
-rw-r--r--chromium/media/audio/audio_input_device_unittest.cc9
-rw-r--r--chromium/media/audio/audio_manager.cc14
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc2
-rw-r--r--chromium/media/audio/audio_output_device.cc2
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc2
-rw-r--r--chromium/media/audio/cras/cras_input.cc8
-rw-r--r--chromium/media/audio/cras/cras_unified.cc4
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler_unittest.cc4
-rw-r--r--chromium/media/audio/sounds/sounds_manager.h1
-rw-r--r--chromium/media/audio/sounds/sounds_manager_unittest.cc6
-rw-r--r--chromium/media/audio/sounds/test_data.h4
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler_unittest.cc8
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc21
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc8
-rw-r--r--chromium/media/audio/win/avrt_wrapper_win.cc4
-rw-r--r--chromium/media/base/BUILD.gn20
-rw-r--r--chromium/media/base/android/media_codec_loop.cc1
-rw-r--r--chromium/media/base/android/media_codec_loop.h4
-rw-r--r--chromium/media/base/android/media_codec_loop_unittest.cc4
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc65
-rw-r--r--chromium/media/base/android/media_drm_bridge.h21
-rw-r--r--chromium/media/base/android/media_drm_bridge_client.h4
-rw-r--r--chromium/media/base/android/media_drm_bridge_unittest.cc164
-rw-r--r--chromium/media/base/android/media_player_bridge_unittest.cc1
-rw-r--r--chromium/media/base/audio_bus_unittest.cc28
-rw-r--r--chromium/media/base/audio_decoder.h21
-rw-r--r--chromium/media/base/audio_hash.cc8
-rw-r--r--chromium/media/base/audio_parameters_unittest.cc6
-rw-r--r--chromium/media/base/audio_renderer_mixer_unittest.cc8
-rw-r--r--chromium/media/base/audio_shifter.cc8
-rw-r--r--chromium/media/base/audio_shifter.h28
-rw-r--r--chromium/media/base/audio_timestamp_helper_unittest.cc4
-rw-r--r--chromium/media/base/bit_reader_unittest.cc6
-rw-r--r--chromium/media/base/cdm_context.cc4
-rw-r--r--chromium/media/base/cdm_context.h37
-rw-r--r--chromium/media/base/cdm_promise_adapter.cc2
-rw-r--r--chromium/media/base/channel_layout.cc8
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc14
-rw-r--r--chromium/media/base/channel_mixing_matrix_unittest.cc4
-rw-r--r--chromium/media/base/container_names.cc4
-rw-r--r--chromium/media/base/data_buffer_unittest.cc8
-rw-r--r--chromium/media/base/data_source.cc9
-rw-r--r--chromium/media/base/data_source.h10
-rw-r--r--chromium/media/base/decode_capabilities.h33
-rw-r--r--chromium/media/base/decoder_buffer.h2
-rw-r--r--chromium/media/base/decoder_buffer_unittest.cc14
-rw-r--r--chromium/media/base/decryptor.cc4
-rw-r--r--chromium/media/base/decryptor.h4
-rw-r--r--chromium/media/base/fake_demuxer_stream.cc7
-rw-r--r--chromium/media/base/fallback_video_decoder.cc37
-rw-r--r--chromium/media/base/fallback_video_decoder.h28
-rw-r--r--chromium/media/base/ipc/media_param_traits_macros.h18
-rw-r--r--chromium/media/base/key_systems.cc14
-rw-r--r--chromium/media/base/key_systems_unittest.cc8
-rw-r--r--chromium/media/base/logging_override_if_enabled.h22
-rw-r--r--chromium/media/base/media.cc3
-rw-r--r--chromium/media/base/media_client.h6
-rw-r--r--chromium/media/base/media_log.cc9
-rw-r--r--chromium/media/base/media_log.h13
-rw-r--r--chromium/media/base/media_log_event.h5
-rw-r--r--chromium/media/base/media_log_unittest.cc11
-rw-r--r--chromium/media/base/media_observer.h3
-rw-r--r--chromium/media/base/media_status.h9
-rw-r--r--chromium/media/base/media_switches.cc67
-rw-r--r--chromium/media/base/media_switches.h7
-rw-r--r--chromium/media/base/media_types.h32
-rw-r--r--chromium/media/base/mime_util_internal.cc123
-rw-r--r--chromium/media/base/mime_util_internal.h8
-rw-r--r--chromium/media/base/mime_util_unittest.cc9
-rw-r--r--chromium/media/base/mock_filters.cc4
-rw-r--r--chromium/media/base/mock_filters.h47
-rw-r--r--chromium/media/base/overlay_info.h3
-rw-r--r--chromium/media/base/pipeline.h12
-rw-r--r--chromium/media/base/pipeline_impl.cc194
-rw-r--r--chromium/media/base/pipeline_impl.h3
-rw-r--r--chromium/media/base/pipeline_impl_unittest.cc6
-rw-r--r--chromium/media/base/reentrancy_checker.cc26
-rw-r--r--chromium/media/base/reentrancy_checker.h64
-rw-r--r--chromium/media/base/reentrancy_checker_unittest.cc38
-rw-r--r--chromium/media/base/renderer_client.h12
-rw-r--r--chromium/media/base/seekable_buffer_unittest.cc4
-rw-r--r--chromium/media/base/serial_runner.h1
-rw-r--r--chromium/media/base/supported_types.cc (renamed from chromium/media/base/decode_capabilities.cc)91
-rw-r--r--chromium/media/base/supported_types.h26
-rw-r--r--chromium/media/base/supported_types_unittest.cc (renamed from chromium/media/base/decode_capabilities_unittest.cc)86
-rw-r--r--chromium/media/base/test_data_util.cc5
-rw-r--r--chromium/media/base/test_helpers.cc33
-rw-r--r--chromium/media/base/test_helpers.h17
-rw-r--r--chromium/media/base/unaligned_shared_memory_unittest.cc6
-rw-r--r--chromium/media/base/video_bitrate_allocation.h2
-rw-r--r--chromium/media/base/video_decoder.h24
-rw-r--r--chromium/media/base/video_decoder_config.h32
-rw-r--r--chromium/media/base/video_frame.cc79
-rw-r--r--chromium/media/base/video_frame.h15
-rw-r--r--chromium/media/base/video_frame_layout.cc14
-rw-r--r--chromium/media/base/video_frame_layout.h8
-rw-r--r--chromium/media/base/video_frame_layout_unittest.cc34
-rw-r--r--chromium/media/base/video_frame_unittest.cc9
-rw-r--r--chromium/media/base/video_types.cc5
-rw-r--r--chromium/media/base/video_types.h4
-rw-r--r--chromium/media/base/waiting.h43
-rw-r--r--chromium/media/base/win/BUILD.gn25
-rw-r--r--chromium/media/base/win/d3d11_create_device_cb.h (renamed from chromium/media/gpu/windows/d3d11_create_device_cb.h)6
-rw-r--r--chromium/media/base/win/d3d11_mocks.cc (renamed from chromium/media/gpu/windows/d3d11_mocks.cc)3
-rw-r--r--chromium/media/base/win/d3d11_mocks.h (renamed from chromium/media/gpu/windows/d3d11_mocks.h)33
-rw-r--r--chromium/media/blink/DEPS3
-rw-r--r--chromium/media/blink/cache_util.cc6
-rw-r--r--chromium/media/blink/cache_util_unittest.cc4
-rw-r--r--chromium/media/blink/cdm_session_adapter.h7
-rw-r--r--chromium/media/blink/key_system_config_selector.cc1
-rw-r--r--chromium/media/blink/lru.h4
-rw-r--r--chromium/media/blink/multibuffer.h8
-rw-r--r--chromium/media/blink/multibuffer_data_source.cc104
-rw-r--r--chromium/media/blink/multibuffer_data_source.h9
-rw-r--r--chromium/media/blink/multibuffer_data_source_unittest.cc57
-rw-r--r--chromium/media/blink/resource_multibuffer_data_provider.cc49
-rw-r--r--chromium/media/blink/resource_multibuffer_data_provider.h8
-rw-r--r--chromium/media/blink/run_all_unittests.cc52
-rw-r--r--chromium/media/blink/url_index.cc142
-rw-r--r--chromium/media/blink/url_index.h63
-rw-r--r--chromium/media/blink/url_index_unittest.cc35
-rw-r--r--chromium/media/blink/video_frame_compositor.cc63
-rw-r--r--chromium/media/blink/video_frame_compositor.h44
-rw-r--r--chromium/media/blink/video_frame_compositor_unittest.cc40
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl_unittest.cc4
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc5
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h3
-rw-r--r--chromium/media/blink/webmediacapabilitiesclient_impl.cc7
-rw-r--r--chromium/media/blink/webmediaplayer_cast_android.cc34
-rw-r--r--chromium/media/blink/webmediaplayer_cast_android.h2
-rw-r--r--chromium/media/blink/webmediaplayer_delegate.h4
-rw-r--r--chromium/media/blink/webmediaplayer_impl.cc424
-rw-r--r--chromium/media/blink/webmediaplayer_impl.h38
-rw-r--r--chromium/media/blink/webmediaplayer_impl_unittest.cc270
-rw-r--r--chromium/media/blink/webmediaplayer_params.cc12
-rw-r--r--chromium/media/blink/webmediaplayer_params.h25
-rw-r--r--chromium/media/blink/websourcebuffer_impl.cc5
-rw-r--r--chromium/media/capabilities/BUILD.gn7
-rw-r--r--chromium/media/capabilities/in_memory_video_decode_stats_db_impl.h2
-rw-r--r--chromium/media/capabilities/learning_helper.cc78
-rw-r--r--chromium/media/capabilities/learning_helper.h35
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.cc4
-rw-r--r--chromium/media/capabilities/video_decode_stats_db_impl.h2
-rw-r--r--chromium/media/capture/BUILD.gn10
-rw-r--r--chromium/media/capture/content/capture_resolution_chooser_unittest.cc8
-rw-r--r--chromium/media/capture/content/smooth_event_sampler_unittest.cc11
-rw-r--r--chromium/media/capture/mojom/video_capture_types.mojom1
-rw-r--r--chromium/media/capture/mojom/video_capture_types_mojom_traits.cc5
-rw-r--r--chromium/media/capture/video/android/java/src/org/chromium/media/VideoCaptureCamera2.java392
-rw-r--r--chromium/media/capture/video/android/video_capture_device_android.cc13
-rw-r--r--chromium/media/capture/video/chromeos/OWNERS1
-rw-r--r--chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc3
-rw-r--r--chromium/media/capture/video/chromeos/stream_buffer_manager.cc5
-rw-r--r--chromium/media/capture/video/fake_video_capture_device.cc6
-rw-r--r--chromium/media/capture/video/linux/camera_config_chromeos_unittest.cc9
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_device.h4
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_factory_mac.mm4
-rw-r--r--chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc40
-rw-r--r--chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h45
-rw-r--r--chromium/media/capture/video/video_capture_device.cc4
-rw-r--r--chromium/media/capture/video/video_capture_device_client.cc7
-rw-r--r--chromium/media/capture/video/video_capture_device_unittest.cc17
-rw-r--r--chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc25
-rw-r--r--chromium/media/capture/video/video_capture_jpeg_decoder_impl.h15
-rw-r--r--chromium/media/capture/video/win/sink_input_pin_win.cc4
-rw-r--r--chromium/media/capture/video/win/video_capture_device_factory_win.cc19
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.cc4
-rw-r--r--chromium/media/capture/video_capture_types.cc6
-rw-r--r--chromium/media/cast/logging/log_deserializer.cc16
-rw-r--r--chromium/media/cast/logging/log_deserializer.h14
-rw-r--r--chromium/media/cast/logging/raw_event_subscriber_bundle.cc12
-rw-r--r--chromium/media/cast/logging/raw_event_subscriber_bundle.h7
-rw-r--r--chromium/media/cast/logging/serialize_deserialize_test.cc25
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.cc40
-rw-r--r--chromium/media/cast/logging/stats_event_subscriber.h3
-rw-r--r--chromium/media/cast/receiver/audio_decoder_unittest.cc4
-rw-r--r--chromium/media/cast/sender/audio_encoder_unittest.cc42
-rw-r--r--chromium/media/cast/sender/external_video_encoder.cc6
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder.cc8
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder_unittest.cc3
-rw-r--r--chromium/media/cdm/aes_decryptor.cc8
-rw-r--r--chromium/media/cdm/aes_decryptor.h5
-rw-r--r--chromium/media/cdm/aes_decryptor_unittest.cc40
-rw-r--r--chromium/media/cdm/cdm_adapter.cc9
-rw-r--r--chromium/media/cdm/cdm_adapter.h3
-rw-r--r--chromium/media/cdm/cdm_adapter_unittest.cc16
-rw-r--r--chromium/media/cdm/cenc_decryptor_unittest.cc15
-rw-r--r--chromium/media/cdm/cenc_utils.cc7
-rw-r--r--chromium/media/cdm/cenc_utils_unittest.cc42
-rw-r--r--chromium/media/cdm/json_web_key_unittest.cc73
-rw-r--r--chromium/media/cdm/library_cdm/clear_key_cdm/cdm_file_io_test.cc6
-rw-r--r--chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc2
-rw-r--r--chromium/media/device_monitors/system_message_window_win.cc8
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc5
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common_unittest.cc6
-rw-r--r--chromium/media/filters/BUILD.gn3
-rw-r--r--chromium/media/filters/android/media_codec_audio_decoder.cc22
-rw-r--r--chromium/media/filters/android/media_codec_audio_decoder.h14
-rw-r--r--chromium/media/filters/aom_video_decoder.cc13
-rw-r--r--chromium/media/filters/aom_video_decoder.h13
-rw-r--r--chromium/media/filters/audio_decoder_stream_unittest.cc4
-rw-r--r--chromium/media/filters/audio_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc4
-rw-r--r--chromium/media/filters/audio_timestamp_validator_unittest.cc5
-rw-r--r--chromium/media/filters/blocking_url_protocol.cc4
-rw-r--r--chromium/media/filters/chunk_demuxer.cc11
-rw-r--r--chromium/media/filters/chunk_demuxer.h17
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc18
-rw-r--r--chromium/media/filters/decoder_selector.cc15
-rw-r--r--chromium/media/filters/decoder_selector.h5
-rw-r--r--chromium/media/filters/decoder_selector_unittest.cc14
-rw-r--r--chromium/media/filters/decoder_stream.cc19
-rw-r--r--chromium/media/filters/decoder_stream.h5
-rw-r--r--chromium/media/filters/decoder_stream_traits.cc11
-rw-r--r--chromium/media/filters/decoder_stream_traits.h32
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc25
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.h27
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc35
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc6
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.h5
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream_unittest.cc14
-rw-r--r--chromium/media/filters/decrypting_media_resource.cc104
-rw-r--r--chromium/media/filters/decrypting_media_resource.h83
-rw-r--r--chromium/media/filters/decrypting_media_resource_unittest.cc228
-rw-r--r--chromium/media/filters/decrypting_video_decoder.cc27
-rw-r--r--chromium/media/filters/decrypting_video_decoder.h29
-rw-r--r--chromium/media/filters/decrypting_video_decoder_unittest.cc25
-rw-r--r--chromium/media/filters/demuxer_perftest.cc4
-rw-r--r--chromium/media/filters/fake_video_decoder.cc13
-rw-r--r--chromium/media/filters/fake_video_decoder.h13
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc11
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.h11
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc41
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc26
-rw-r--r--chromium/media/filters/ffmpeg_glue.cc13
-rw-r--r--chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc20
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc13
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.h13
-rw-r--r--chromium/media/filters/frame_processor.cc81
-rw-r--r--chromium/media/filters/frame_processor_unittest.cc450
-rw-r--r--chromium/media/filters/fuchsia/fuchsia_video_decoder.cc67
-rw-r--r--chromium/media/filters/fuchsia/fuchsia_video_decoder.h9
-rw-r--r--chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc17
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc62
-rw-r--r--chromium/media/filters/gpu_video_decoder.h21
-rw-r--r--chromium/media/filters/ivf_parser.cc10
-rw-r--r--chromium/media/filters/jpeg_parser.cc6
-rw-r--r--chromium/media/filters/memory_data_source.cc5
-rw-r--r--chromium/media/filters/memory_data_source.h7
-rw-r--r--chromium/media/filters/offloading_video_decoder.cc22
-rw-r--r--chromium/media/filters/offloading_video_decoder.h13
-rw-r--r--chromium/media/filters/offloading_video_decoder_unittest.cc15
-rw-r--r--chromium/media/filters/pipeline_controller.cc32
-rw-r--r--chromium/media/filters/pipeline_controller.h7
-rw-r--r--chromium/media/filters/pipeline_controller_unittest.cc37
-rw-r--r--chromium/media/filters/stream_parser_factory.cc7
-rw-r--r--chromium/media/filters/video_decoder_stream_unittest.cc6
-rw-r--r--chromium/media/filters/video_renderer_algorithm_unittest.cc20
-rw-r--r--chromium/media/filters/vp9_bool_decoder.cc3
-rw-r--r--chromium/media/filters/vp9_compressed_header_parser.cc3
-rw-r--r--chromium/media/filters/vp9_parser.cc44
-rw-r--r--chromium/media/filters/vp9_parser.h2
-rw-r--r--chromium/media/filters/vp9_parser_encrypted_fuzzertest.cc70
-rw-r--r--chromium/media/filters/vp9_uncompressed_header_parser.cc4
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc13
-rw-r--r--chromium/media/filters/vpx_video_decoder.h13
-rw-r--r--chromium/media/formats/ac3/ac3_util.cc6
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.h4
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video_unittest.cc18
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc4
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc4
-rw-r--r--chromium/media/formats/mp2t/timestamp_unroller_unittest.cc4
-rw-r--r--chromium/media/formats/mp4/avc.cc19
-rw-r--r--chromium/media/formats/mp4/avc_unittest.cc8
-rw-r--r--chromium/media/formats/mp4/box_definitions.cc3
-rw-r--r--chromium/media/formats/mp4/box_reader.h2
-rw-r--r--chromium/media/formats/mp4/dolby_vision.cc3
-rw-r--r--chromium/media/formats/mp4/hevc.cc3
-rw-r--r--chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc6
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.cc6
-rw-r--r--chromium/media/formats/mp4/track_run_iterator_unittest.cc83
-rw-r--r--chromium/media/formats/mpeg/adts_constants.cc6
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.cc4
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser_unittest.cc56
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.cc35
-rw-r--r--chromium/media/formats/webm/webm_parser.cc19
-rw-r--r--chromium/media/formats/webm/webm_parser_unittest.cc6
-rw-r--r--chromium/media/gpu/BUILD.gn101
-rw-r--r--chromium/media/gpu/OWNERS2
-rw-r--r--chromium/media/gpu/accelerated_video_decoder.h8
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser.h3
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser_impl.cc5
-rw-r--r--chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc54
-rw-r--r--chromium/media/gpu/android/codec_image.cc3
-rw-r--r--chromium/media/gpu/android/codec_image.h9
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner.cc9
-rw-r--r--chromium/media/gpu/android/image_reader_gl_owner.h10
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.cc20
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder.h18
-rw-r--r--chromium/media/gpu/android/media_codec_video_decoder_unittest.cc2
-rw-r--r--chromium/media/gpu/android/mock_texture_owner.h7
-rw-r--r--chromium/media/gpu/android/surface_chooser_helper.cc4
-rw-r--r--chromium/media/gpu/android/surface_chooser_helper.h3
-rw-r--r--chromium/media/gpu/android/surface_chooser_helper_unittest.cc10
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner.cc5
-rw-r--r--chromium/media/gpu/android/surface_texture_gl_owner.h13
-rw-r--r--chromium/media/gpu/android/texture_owner.h8
-rw-r--r--chromium/media/gpu/command_buffer_helper.cc17
-rw-r--r--chromium/media/gpu/command_buffer_helper.h10
-rw-r--r--chromium/media/gpu/fake_command_buffer_helper.cc8
-rw-r--r--chromium/media/gpu/fake_command_buffer_helper.h2
-rw-r--r--chromium/media/gpu/fake_jpeg_decode_accelerator.cc2
-rw-r--r--chromium/media/gpu/h264_decoder.cc49
-rw-r--r--chromium/media/gpu/h264_decoder.h24
-rw-r--r--chromium/media/gpu/h264_decoder_unittest.cc38
-rw-r--r--chromium/media/gpu/image_processor.cc50
-rw-r--r--chromium/media/gpu/image_processor.h137
-rw-r--r--chromium/media/gpu/image_processor_factory.cc46
-rw-r--r--chromium/media/gpu/image_processor_factory.h56
-rw-r--r--chromium/media/gpu/image_processor_test.cc129
-rw-r--r--chromium/media/gpu/ipc/service/picture_buffer_manager.cc171
-rw-r--r--chromium/media/gpu/ipc/service/picture_buffer_manager_unittest.cc67
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.cc15
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder.h16
-rw-r--r--chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc8
-rw-r--r--chromium/media/gpu/jpeg_decode_accelerator_unittest.cc46
-rw-r--r--chromium/media/gpu/jpeg_encode_accelerator_unittest.cc19
-rw-r--r--chromium/media/gpu/libyuv_image_processor.cc192
-rw-r--r--chromium/media/gpu/libyuv_image_processor.h103
-rw-r--r--chromium/media/gpu/platform_video_frame.cc109
-rw-r--r--chromium/media/gpu/platform_video_frame.h26
-rw-r--r--chromium/media/gpu/test/BUILD.gn107
-rw-r--r--chromium/media/gpu/v4l2/generic_v4l2_device.cc22
-rw-r--r--chromium/media/gpu/v4l2/generic_v4l2_device.h3
-rw-r--r--chromium/media/gpu/v4l2/tegra_v4l2_device.cc9
-rw-r--r--chromium/media/gpu/v4l2/tegra_v4l2_device.h4
-rw-r--r--chromium/media/gpu/v4l2/v4l2_decode_surface.cc93
-rw-r--r--chromium/media/gpu/v4l2/v4l2_decode_surface.h122
-rw-r--r--chromium/media/gpu/v4l2/v4l2_decode_surface_handler.h38
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.cc84
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device.h21
-rw-r--r--chromium/media/gpu/v4l2/v4l2_device_unittest.cc22
-rw-r--r--chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc468
-rw-r--r--chromium/media/gpu/v4l2/v4l2_h264_accelerator.h78
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor.cc200
-rw-r--r--chromium/media/gpu/v4l2/v4l2_image_processor.h90
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.cc27
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.h5
-rw-r--r--chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h1
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc1503
-rw-r--r--chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h78
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc149
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h15
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc293
-rw-r--r--chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h31
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc259
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp8_accelerator.h44
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc419
-rw-r--r--chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h58
-rw-r--r--chromium/media/gpu/vaapi/BUILD.gn47
-rw-r--r--chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc15
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator.cc5
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_unittest.cc10
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc10
-rw-r--r--chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc41
-rw-r--r--chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.cc22
-rw-r--r--chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.h2
-rw-r--r--chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_egl.cc5
-rw-r--r--chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc5
-rw-r--r--chromium/media/gpu/vaapi/vaapi_utils.cc18
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc341
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h93
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc88
-rw-r--r--chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc28
-rw-r--r--chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc16
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.cc284
-rw-r--r--chromium/media/gpu/vaapi/vaapi_wrapper.h53
-rw-r--r--chromium/media/gpu/vaapi/vp8_encoder.cc11
-rw-r--r--chromium/media/gpu/video_decode_accelerator_tests.cc208
-rw-r--r--chromium/media/gpu/video_decode_accelerator_unittest.cc68
-rw-r--r--chromium/media/gpu/video_encode_accelerator_unittest.cc164
-rw-r--r--chromium/media/gpu/vp8_decoder.cc13
-rw-r--r--chromium/media/gpu/vp8_decoder.h1
-rw-r--r--chromium/media/gpu/vp9_decoder.cc11
-rw-r--r--chromium/media/gpu/vp9_decoder.h1
-rw-r--r--chromium/media/gpu/vt_video_decode_accelerator_mac.cc33
-rw-r--r--chromium/media/gpu/vt_video_decode_accelerator_mac.h15
-rw-r--r--chromium/media/gpu/windows/d3d11_cdm_proxy.cc32
-rw-r--r--chromium/media/gpu/windows/d3d11_cdm_proxy.h2
-rw-r--r--chromium/media/gpu/windows/d3d11_cdm_proxy_unittest.cc357
-rw-r--r--chromium/media/gpu/windows/d3d11_decryptor.h2
-rw-r--r--chromium/media/gpu/windows/d3d11_decryptor_unittest.cc75
-rw-r--r--chromium/media/gpu/windows/d3d11_h264_accelerator.cc14
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.cc429
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder.h95
-rw-r--r--chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc245
-rw-r--r--chromium/media/gpu/windows/d3d11_vp9_accelerator.cc16
-rw-r--r--chromium/media/gpu/windows/dxva_picture_buffer_win.cc26
-rw-r--r--chromium/media/gpu/windows/dxva_picture_buffer_win.h3
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc70
-rw-r--r--chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h3
-rw-r--r--chromium/media/learning/common/BUILD.gn9
-rw-r--r--chromium/media/learning/common/labelled_example.cc94
-rw-r--r--chromium/media/learning/common/labelled_example.h118
-rw-r--r--chromium/media/learning/common/labelled_example_unittest.cc233
-rw-r--r--chromium/media/learning/common/learning_session.h4
-rw-r--r--chromium/media/learning/common/learning_task.cc12
-rw-r--r--chromium/media/learning/common/learning_task.h53
-rw-r--r--chromium/media/learning/common/training_example.cc69
-rw-r--r--chromium/media/learning/common/training_example.h142
-rw-r--r--chromium/media/learning/common/training_example_unittest.cc134
-rw-r--r--chromium/media/learning/common/value.cc6
-rw-r--r--chromium/media/learning/common/value.h15
-rw-r--r--chromium/media/learning/common/value_unittest.cc13
-rw-r--r--chromium/media/learning/impl/BUILD.gn30
-rw-r--r--chromium/media/learning/impl/distribution_reporter.cc78
-rw-r--r--chromium/media/learning/impl/distribution_reporter.h54
-rw-r--r--chromium/media/learning/impl/distribution_reporter_unittest.cc71
-rw-r--r--chromium/media/learning/impl/extra_trees_trainer.cc69
-rw-r--r--chromium/media/learning/impl/extra_trees_trainer.h61
-rw-r--r--chromium/media/learning/impl/extra_trees_trainer_unittest.cc211
-rw-r--r--chromium/media/learning/impl/fisher_iris_dataset.cc198
-rw-r--r--chromium/media/learning/impl/fisher_iris_dataset.h39
-rw-r--r--chromium/media/learning/impl/learning_session_impl.cc6
-rw-r--r--chromium/media/learning/impl/learning_session_impl.h2
-rw-r--r--chromium/media/learning/impl/learning_session_impl_unittest.cc8
-rw-r--r--chromium/media/learning/impl/learning_task_controller.h4
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl.cc72
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl.h46
-rw-r--r--chromium/media/learning/impl/learning_task_controller_impl_unittest.cc143
-rw-r--r--chromium/media/learning/impl/lookup_table_trainer.cc50
-rw-r--r--chromium/media/learning/impl/lookup_table_trainer.h37
-rw-r--r--chromium/media/learning/impl/lookup_table_trainer_unittest.cc176
-rw-r--r--chromium/media/learning/impl/model.h7
-rw-r--r--chromium/media/learning/impl/one_hot.cc121
-rw-r--r--chromium/media/learning/impl/one_hot.h83
-rw-r--r--chromium/media/learning/impl/one_hot_unittest.cc118
-rw-r--r--chromium/media/learning/impl/random_number_generator.cc60
-rw-r--r--chromium/media/learning/impl/random_number_generator.h62
-rw-r--r--chromium/media/learning/impl/random_number_generator_unittest.cc102
-rw-r--r--chromium/media/learning/impl/random_tree_trainer.cc383
-rw-r--r--chromium/media/learning/impl/random_tree_trainer.h73
-rw-r--r--chromium/media/learning/impl/random_tree_trainer_unittest.cc239
-rw-r--r--chromium/media/learning/impl/target_distribution.cc45
-rw-r--r--chromium/media/learning/impl/target_distribution.h44
-rw-r--r--chromium/media/learning/impl/target_distribution_unittest.cc48
-rw-r--r--chromium/media/learning/impl/test_random_number_generator.cc26
-rw-r--r--chromium/media/learning/impl/test_random_number_generator.h28
-rw-r--r--chromium/media/learning/impl/training_algorithm.h21
-rw-r--r--chromium/media/learning/impl/voting_ensemble.cc26
-rw-r--r--chromium/media/learning/impl/voting_ensemble.h38
-rw-r--r--chromium/media/learning/mojo/mojo_learning_session_impl.cc2
-rw-r--r--chromium/media/learning/mojo/mojo_learning_session_impl.h2
-rw-r--r--chromium/media/learning/mojo/mojo_learning_session_impl_unittest.cc8
-rw-r--r--chromium/media/learning/mojo/public/cpp/learning_mojom_traits.cc8
-rw-r--r--chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h12
-rw-r--r--chromium/media/learning/mojo/public/cpp/mojo_learning_session.cc2
-rw-r--r--chromium/media/learning/mojo/public/cpp/mojo_learning_session.h2
-rw-r--r--chromium/media/learning/mojo/public/cpp/mojo_learning_session_unittest.cc6
-rw-r--r--chromium/media/learning/mojo/public/mojom/learning_session.mojom2
-rw-r--r--chromium/media/learning/mojo/public/mojom/learning_types.mojom4
-rw-r--r--chromium/media/learning/mojo/public/mojom/learning_types.typemap4
-rw-r--r--chromium/media/media_options.gni12
-rw-r--r--chromium/media/midi/java/src/org/chromium/midi/MidiInputPortAndroid.java9
-rw-r--r--chromium/media/midi/java/src/org/chromium/midi/MidiManagerAndroid.java46
-rw-r--r--chromium/media/midi/message_util_unittest.cc11
-rw-r--r--chromium/media/midi/midi_manager.cc19
-rw-r--r--chromium/media/midi/midi_manager.h6
-rw-r--r--chromium/media/midi/midi_manager_alsa.cc4
-rw-r--r--chromium/media/midi/midi_manager_alsa.h6
-rw-r--r--chromium/media/midi/midi_manager_android.cc2
-rw-r--r--chromium/media/midi/midi_manager_android.h6
-rw-r--r--chromium/media/midi/midi_manager_usb.h8
-rw-r--r--chromium/media/midi/midi_manager_usb_unittest.cc4
-rw-r--r--chromium/media/midi/midi_manager_win.cc3
-rw-r--r--chromium/media/midi/midi_service.cc1
-rw-r--r--chromium/media/midi/usb_midi_descriptor_parser_unittest.cc12
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.cc1
-rw-r--r--chromium/media/midi/usb_midi_input_stream.h1
-rw-r--r--chromium/media/midi/usb_midi_input_stream_unittest.cc16
-rw-r--r--chromium/media/midi/usb_midi_output_stream.cc12
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder.cc19
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder.h18
-rw-r--r--chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc23
-rw-r--r--chromium/media/mojo/clients/mojo_cdm.cc9
-rw-r--r--chromium/media/mojo/clients/mojo_cdm.h2
-rw-r--r--chromium/media/mojo/clients/mojo_cdm_factory.cc12
-rw-r--r--chromium/media/mojo/clients/mojo_cdm_unittest.cc4
-rw-r--r--chromium/media/mojo/clients/mojo_renderer.cc16
-rw-r--r--chromium/media/mojo/clients/mojo_renderer.h3
-rw-r--r--chromium/media/mojo/clients/mojo_renderer_factory.cc10
-rw-r--r--chromium/media/mojo/clients/mojo_renderer_factory.h12
-rw-r--r--chromium/media/mojo/clients/mojo_video_decoder.cc90
-rw-r--r--chromium/media/mojo/clients/mojo_video_decoder.h15
-rw-r--r--chromium/media/mojo/common/media_type_converters.cc22
-rw-r--r--chromium/media/mojo/common/media_type_converters.h14
-rw-r--r--chromium/media/mojo/common/media_type_converters_unittest.cc12
-rw-r--r--chromium/media/mojo/common/mojo_decoder_buffer_converter_unittest.cc26
-rw-r--r--chromium/media/mojo/interfaces/BUILD.gn10
-rw-r--r--chromium/media/mojo/interfaces/audio_decoder.mojom4
-rw-r--r--chromium/media/mojo/interfaces/audio_decoder_config_struct_traits_unittest.cc4
-rw-r--r--chromium/media/mojo/interfaces/cdm_key_information.typemap23
-rw-r--r--chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.cc85
-rw-r--r--chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.h47
-rw-r--r--chromium/media/mojo/interfaces/cdm_key_information_mojom_traits_unittest.cc26
-rw-r--r--chromium/media/mojo/interfaces/content_decryption_module.mojom11
-rw-r--r--chromium/media/mojo/interfaces/content_decryption_module.typemap1
-rw-r--r--chromium/media/mojo/interfaces/media_types.mojom8
-rw-r--r--chromium/media/mojo/interfaces/media_types.typemap4
-rw-r--r--chromium/media/mojo/interfaces/renderer.mojom12
-rw-r--r--chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.cc32
-rw-r--r--chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.h54
-rw-r--r--chromium/media/mojo/interfaces/typemaps.gni1
-rw-r--r--chromium/media/mojo/interfaces/video_decoder.mojom4
-rw-r--r--chromium/media/mojo/interfaces/video_decoder.typemap20
-rw-r--r--chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc4
-rw-r--r--chromium/media/mojo/services/BUILD.gn4
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client.cc43
-rw-r--r--chromium/media/mojo/services/gpu_mojo_media_client.h10
-rw-r--r--chromium/media/mojo/services/interface_factory_impl.cc7
-rw-r--r--chromium/media/mojo/services/interface_factory_impl.h6
-rw-r--r--chromium/media/mojo/services/main.cc15
-rw-r--r--chromium/media/mojo/services/media_service.cc6
-rw-r--r--chromium/media/mojo/services/media_service.h4
-rw-r--r--chromium/media/mojo/services/media_service_unittest.cc3
-rw-r--r--chromium/media/mojo/services/mojo_audio_decoder_service.cc7
-rw-r--r--chromium/media/mojo/services/mojo_audio_decoder_service.h4
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service.cc5
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service_context.cc6
-rw-r--r--chromium/media/mojo/services/mojo_media_client.cc4
-rw-r--r--chromium/media/mojo/services/mojo_media_client.h3
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.cc8
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.h3
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.cc17
-rw-r--r--chromium/media/mojo/services/mojo_video_decoder_service.h2
-rw-r--r--chromium/media/mojo/services/video_decode_perf_history.cc14
-rw-r--r--chromium/media/mojo/services/video_decode_perf_history.h5
-rw-r--r--chromium/media/muxers/webm_muxer_fuzzertest.cc7
-rw-r--r--chromium/media/remoting/courier_renderer.cc3
-rw-r--r--chromium/media/remoting/courier_renderer_unittest.cc3
-rw-r--r--chromium/media/remoting/media_remoting_rpc.proto1
-rw-r--r--chromium/media/remoting/proto_enum_utils.cc2
-rw-r--r--chromium/media/remoting/proto_utils.cc5
-rw-r--r--chromium/media/remoting/receiver.cc8
-rw-r--r--chromium/media/remoting/receiver.h3
-rw-r--r--chromium/media/remoting/renderer_controller.cc44
-rw-r--r--chromium/media/remoting/renderer_controller_unittest.cc10
-rw-r--r--chromium/media/renderers/BUILD.gn5
-rw-r--r--chromium/media/renderers/audio_renderer_impl.cc6
-rw-r--r--chromium/media/renderers/audio_renderer_impl.h2
-rw-r--r--chromium/media/renderers/audio_renderer_impl_unittest.cc15
-rw-r--r--chromium/media/renderers/decrypting_renderer.cc197
-rw-r--r--chromium/media/renderers/decrypting_renderer.h95
-rw-r--r--chromium/media/renderers/decrypting_renderer_factory.cc34
-rw-r--r--chromium/media/renderers/decrypting_renderer_factory.h50
-rw-r--r--chromium/media/renderers/decrypting_renderer_unittest.cc268
-rw-r--r--chromium/media/renderers/default_decoder_factory.cc9
-rw-r--r--chromium/media/renderers/paint_canvas_video_renderer.cc8
-rw-r--r--chromium/media/renderers/renderer_impl.cc12
-rw-r--r--chromium/media/renderers/renderer_impl.h3
-rw-r--r--chromium/media/renderers/renderer_impl_unittest.cc2
-rw-r--r--chromium/media/renderers/video_renderer_impl.cc6
-rw-r--r--chromium/media/renderers/video_renderer_impl.h2
-rw-r--r--chromium/media/renderers/video_renderer_impl_unittest.cc5
-rw-r--r--chromium/media/renderers/video_resource_updater.cc145
-rw-r--r--chromium/media/renderers/video_resource_updater_unittest.cc47
-rw-r--r--chromium/media/test/BUILD.gn6
-rw-r--r--chromium/media/video/BUILD.gn5
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.cc1
-rw-r--r--chromium/media/video/gpu_video_accelerator_factories.h4
-rw-r--r--chromium/media/video/h264_parser.cc18
-rw-r--r--chromium/media/video/h264_poc.cc4
-rw-r--r--chromium/media/video/mock_gpu_video_accelerator_factories.h1
-rw-r--r--chromium/media/video/supported_video_decoder_config.cc53
-rw-r--r--chromium/media/video/supported_video_decoder_config.h54
-rw-r--r--chromium/media/video/supported_video_decoder_config_unittest.cc104
-rw-r--r--chromium/media/video/trace_util.cc19
-rw-r--r--chromium/media/video/trace_util.h24
-rw-r--r--chromium/media/webrtc/audio_processor.cc39
-rw-r--r--chromium/media/webrtc/audio_processor_controls.h2
-rw-r--r--chromium/media/webrtc/audio_processor_unittest.cc4
602 files changed, 16098 insertions, 7065 deletions
diff --git a/chromium/media/BUILD.gn b/chromium/media/BUILD.gn
index b4b5298e615..f07b064dfa9 100644
--- a/chromium/media/BUILD.gn
+++ b/chromium/media/BUILD.gn
@@ -21,19 +21,20 @@ buildflag_header("media_buildflags") {
"ENABLE_AC3_EAC3_AUDIO_DEMUXING=$enable_ac3_eac3_audio_demuxing",
"ENABLE_CBCS_ENCRYPTION_SCHEME=$enable_cbcs_encryption_scheme",
"ENABLE_CDM_HOST_VERIFICATION=$enable_cdm_host_verification",
- "ENABLE_HEVC_DEMUXING=$enable_hevc_demuxing",
+ "ENABLE_CDM_STORAGE_ID=$enable_cdm_storage_id",
"ENABLE_DOLBY_VISION_DEMUXING=$enable_dolby_vision_demuxing",
"ENABLE_FFMPEG=$media_use_ffmpeg",
"ENABLE_FFMPEG_VIDEO_DECODERS=$enable_ffmpeg_video_decoders",
+ "ENABLE_HEVC_DEMUXING=$enable_hevc_demuxing",
"ENABLE_HLS_SAMPLE_AES=$enable_hls_sample_aes",
"ENABLE_LIBRARY_CDMS=$enable_library_cdms",
"ENABLE_LIBVPX=$media_use_libvpx",
- "ENABLE_MSE_MPEG2TS_STREAM_PARSER=$enable_mse_mpeg2ts_stream_parser",
- "ENABLE_MPEG_H_AUDIO_DEMUXING=$enable_mpeg_h_audio_demuxing",
- "ENABLE_RUNTIME_MEDIA_RENDERER_SELECTION=$enable_runtime_media_renderer_selection",
- "ENABLE_CDM_STORAGE_ID=$enable_cdm_storage_id",
+ "ENABLE_LOGGING_OVERRIDE=$enable_logging_override",
"ENABLE_MEDIA_REMOTING=$enable_media_remoting",
"ENABLE_MEDIA_REMOTING_RPC=$enable_media_remoting_rpc",
+ "ENABLE_MPEG_H_AUDIO_DEMUXING=$enable_mpeg_h_audio_demuxing",
+ "ENABLE_MSE_MPEG2TS_STREAM_PARSER=$enable_mse_mpeg2ts_stream_parser",
+ "ENABLE_RUNTIME_MEDIA_RENDERER_SELECTION=$enable_runtime_media_renderer_selection",
"USE_PROPRIETARY_CODECS=$proprietary_codecs",
]
}
@@ -335,6 +336,18 @@ fuzzer_test("media_vp9_parser_fuzzer") {
libfuzzer_options = [ "max_len = 400000" ]
}
+fuzzer_test("media_vp9_parser_encrypted_fuzzer") {
+ sources = [
+ "filters/vp9_parser_encrypted_fuzzertest.cc",
+ ]
+ deps = [
+ ":test_support",
+ "//base",
+ "//base/test:test_support",
+ ]
+ seed_corpus = "//media/test/data"
+}
+
fuzzer_test("media_vpx_video_decoder_fuzzer") {
sources = [
"filters/vpx_video_decoder_fuzzertest.cc",
diff --git a/chromium/media/OWNERS b/chromium/media/OWNERS
index 8dc88520e2e..6d0a3132f95 100644
--- a/chromium/media/OWNERS
+++ b/chromium/media/OWNERS
@@ -10,7 +10,6 @@
chcunningham@chromium.org
dalecurtis@chromium.org
-hubbe@chromium.org
jrummell@chromium.org
liberato@chromium.org
sandersd@chromium.org
diff --git a/chromium/media/PRESUBMIT.py b/chromium/media/PRESUBMIT.py
index b0a96fe1f99..fa07bf7b06f 100644
--- a/chromium/media/PRESUBMIT.py
+++ b/chromium/media/PRESUBMIT.py
@@ -184,6 +184,26 @@ def _CheckForUseOfLazyInstance(input_api, output_api):
'base::LazyInstance is deprecated; use a thread safe static.', problems)]
return []
+def _CheckNoLoggingOverrideInHeaders(input_api, output_api):
+ """Checks to make sure no .h files include logging_override_if_enabled.h."""
+ files = []
+ pattern = input_api.re.compile(
+ r'^#include\s*"media/base/logging_override_if_enabled.h"',
+ input_api.re.MULTILINE)
+ for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
+ if not f.LocalPath().endswith('.h'):
+ continue
+ contents = input_api.ReadFile(f)
+ if pattern.search(contents):
+ files.append(f)
+
+ if len(files):
+ return [output_api.PresubmitError(
+ 'Do not #include "logging_override_if_enabled.h" in header files, '
+ 'since it overrides DVLOG() in every file including the header. '
+ 'Instead, only include it in source files.',
+ files) ]
+ return []
def _CheckChange(input_api, output_api):
results = []
@@ -191,6 +211,7 @@ def _CheckChange(input_api, output_api):
results.extend(_CheckPassByValue(input_api, output_api))
results.extend(_CheckForHistogramOffByOne(input_api, output_api))
results.extend(_CheckForUseOfLazyInstance(input_api, output_api))
+ results.extend(_CheckNoLoggingOverrideInHeaders(input_api, output_api))
return results
diff --git a/chromium/media/audio/BUILD.gn b/chromium/media/audio/BUILD.gn
index 69dd2acc691..982904023e2 100644
--- a/chromium/media/audio/BUILD.gn
+++ b/chromium/media/audio/BUILD.gn
@@ -4,48 +4,28 @@
import("//build/config/linux/pkg_config.gni")
import("//media/media_options.gni")
+import("//tools/generate_stubs/rules.gni")
# When libpulse is not directly linked, use stubs to allow for dlopening of the
# binary.
-if (!link_pulseaudio) {
- action("pulse_generate_stubs") {
+if (use_pulseaudio && !link_pulseaudio) {
+ generate_stubs("libpulse_stubs") {
extra_header = "pulse/pulse_stub_header.fragment"
-
- script = "../../tools/generate_stubs/generate_stubs.py"
- sources = [
- "pulse/pulse.sigs",
- ]
- inputs = [
- extra_header,
- ]
- stubs_filename_root = "pulse_stubs"
-
- outputs = [
- "$target_gen_dir/pulse/$stubs_filename_root.cc",
- "$target_gen_dir/pulse/$stubs_filename_root.h",
- ]
- args = [
- "-i",
- rebase_path("$target_gen_dir/pulse", root_build_dir),
- "-o",
- rebase_path("$target_gen_dir/pulse", root_build_dir),
- "-t",
- "posix_stubs",
- "-e",
- rebase_path(extra_header, root_build_dir),
- "-s",
- stubs_filename_root,
- "-p",
- "media/audio/pulse",
+ sigs = [ "pulse/pulse.sigs" ]
+ output_name = "pulse/pulse_stubs"
+ deps = [
+ "//base",
]
-
- args += rebase_path(sources, root_build_dir)
}
}
config("platform_config") {
+ defines = []
+ if (is_chromecast) {
+ defines += [ "IS_CHROMECAST" ]
+ }
if (use_alsa) {
- defines = [ "USE_ALSA" ]
+ defines += [ "USE_ALSA" ]
}
}
@@ -292,7 +272,7 @@ source_set("audio") {
"cras/cras_unified.h",
]
configs += [ ":libcras" ]
- deps += [ "//chromeos:chromeos" ]
+ deps += [ "//chromeos/audio" ]
}
if (use_pulseaudio) {
@@ -310,9 +290,7 @@ source_set("audio") {
if (link_pulseaudio) {
configs += [ ":libpulse" ]
} else {
- libs += [ "dl" ]
- deps += [ ":pulse_generate_stubs" ]
- sources += get_target_outputs(":pulse_generate_stubs")
+ deps += [ ":libpulse_stubs" ]
}
}
@@ -379,6 +357,12 @@ static_library("test_support") {
"//testing/gmock",
"//testing/gtest",
]
+ if (use_alsa) {
+ sources += [
+ "alsa/mock_alsa_wrapper.cc",
+ "alsa/mock_alsa_wrapper.h",
+ ]
+ }
}
source_set("unit_tests") {
@@ -447,7 +431,10 @@ source_set("unit_tests") {
]
if (!is_chromecast) {
- deps += [ "//chromeos:chromeos" ]
+ deps += [
+ "//chromeos/audio",
+ "//chromeos/dbus:test_support",
+ ]
}
if (use_cras) {
@@ -472,6 +459,7 @@ source_set("unit_tests") {
if (use_alsa) {
sources += [
"alsa/alsa_output_unittest.cc",
+ "alsa/alsa_util_unittest.cc",
"audio_low_latency_input_output_unittest.cc",
]
}
diff --git a/chromium/media/audio/alsa/alsa_input.cc b/chromium/media/audio/alsa/alsa_input.cc
index 13fb7c6070c..a70748274ae 100644
--- a/chromium/media/audio/alsa/alsa_input.cc
+++ b/chromium/media/audio/alsa/alsa_input.cc
@@ -9,8 +9,8 @@
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "media/audio/alsa/alsa_output.h"
#include "media/audio/alsa/alsa_util.h"
#include "media/audio/alsa/alsa_wrapper.h"
@@ -56,18 +56,18 @@ bool AlsaPcmInputStream::Open() {
if (device_handle_)
return false; // Already open.
- uint32_t latency_us =
- buffer_duration_.InMicroseconds() * kNumPacketsInRingBuffer;
+ uint32_t packet_us = buffer_duration_.InMicroseconds();
+ uint32_t buffer_us = packet_us * kNumPacketsInRingBuffer;
// Use the same minimum required latency as output.
- latency_us = std::max(latency_us, AlsaPcmOutputStream::kMinLatencyMicros);
+ buffer_us = std::max(buffer_us, AlsaPcmOutputStream::kMinLatencyMicros);
if (device_name_ == kAutoSelectDevice) {
const char* device_names[] = { kDefaultDevice1, kDefaultDevice2 };
- for (size_t i = 0; i < arraysize(device_names); ++i) {
+ for (size_t i = 0; i < base::size(device_names); ++i) {
device_handle_ = alsa_util::OpenCaptureDevice(
wrapper_, device_names[i], params_.channels(), params_.sample_rate(),
- kAlsaSampleFormat, latency_us);
+ kAlsaSampleFormat, buffer_us, packet_us);
if (device_handle_) {
device_name_ = device_names[i];
@@ -77,7 +77,7 @@ bool AlsaPcmInputStream::Open() {
} else {
device_handle_ = alsa_util::OpenCaptureDevice(
wrapper_, device_name_.c_str(), params_.channels(),
- params_.sample_rate(), kAlsaSampleFormat, latency_us);
+ params_.sample_rate(), kAlsaSampleFormat, buffer_us, packet_us);
}
if (device_handle_) {
diff --git a/chromium/media/audio/alsa/alsa_output_unittest.cc b/chromium/media/audio/alsa/alsa_output_unittest.cc
index 8150c329533..bca316b71d4 100644
--- a/chromium/media/audio/alsa/alsa_output_unittest.cc
+++ b/chromium/media/audio/alsa/alsa_output_unittest.cc
@@ -15,6 +15,7 @@
#include "media/audio/alsa/alsa_output.h"
#include "media/audio/alsa/alsa_wrapper.h"
#include "media/audio/alsa/audio_manager_alsa.h"
+#include "media/audio/alsa/mock_alsa_wrapper.h"
#include "media/audio/fake_audio_log_factory.h"
#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/test_audio_thread.h"
@@ -42,42 +43,6 @@ using testing::Unused;
namespace media {
-class MockAlsaWrapper : public AlsaWrapper {
- public:
- MOCK_METHOD3(DeviceNameHint, int(int card,
- const char* iface,
- void*** hints));
- MOCK_METHOD2(DeviceNameGetHint, char*(const void* hint, const char* id));
- MOCK_METHOD1(DeviceNameFreeHint, int(void** hints));
-
- MOCK_METHOD4(PcmOpen, int(snd_pcm_t** handle, const char* name,
- snd_pcm_stream_t stream, int mode));
- MOCK_METHOD1(PcmClose, int(snd_pcm_t* handle));
- MOCK_METHOD1(PcmPrepare, int(snd_pcm_t* handle));
- MOCK_METHOD1(PcmDrop, int(snd_pcm_t* handle));
- MOCK_METHOD2(PcmDelay, int(snd_pcm_t* handle, snd_pcm_sframes_t* delay));
- MOCK_METHOD3(PcmWritei, snd_pcm_sframes_t(snd_pcm_t* handle,
- const void* buffer,
- snd_pcm_uframes_t size));
- MOCK_METHOD3(PcmReadi, snd_pcm_sframes_t(snd_pcm_t* handle,
- void* buffer,
- snd_pcm_uframes_t size));
- MOCK_METHOD3(PcmRecover, int(snd_pcm_t* handle, int err, int silent));
- MOCK_METHOD7(PcmSetParams, int(snd_pcm_t* handle, snd_pcm_format_t format,
- snd_pcm_access_t access, unsigned int channels,
- unsigned int rate, int soft_resample,
- unsigned int latency));
- MOCK_METHOD3(PcmGetParams, int(snd_pcm_t* handle,
- snd_pcm_uframes_t* buffer_size,
- snd_pcm_uframes_t* period_size));
- MOCK_METHOD1(PcmName, const char*(snd_pcm_t* handle));
- MOCK_METHOD1(PcmAvailUpdate, snd_pcm_sframes_t(snd_pcm_t* handle));
- MOCK_METHOD1(PcmState, snd_pcm_state_t(snd_pcm_t* handle));
- MOCK_METHOD1(PcmStart, int(snd_pcm_t* handle));
-
- MOCK_METHOD1(StrError, const char*(int errnum));
-};
-
class MockAudioManagerAlsa : public AudioManagerAlsa {
public:
MockAudioManagerAlsa()
diff --git a/chromium/media/audio/alsa/alsa_util.cc b/chromium/media/audio/alsa/alsa_util.cc
index 127e1719e8b..84b4133507f 100644
--- a/chromium/media/audio/alsa/alsa_util.cc
+++ b/chromium/media/audio/alsa/alsa_util.cc
@@ -6,33 +6,231 @@
#include <stddef.h>
+#include <functional>
+#include <memory>
+
#include "base/logging.h"
+#include "base/time/time.h"
#include "media/audio/alsa/alsa_wrapper.h"
namespace alsa_util {
+namespace {
+
+// Set hardware parameters of PCM. It does the same thing as the corresponding
+// part in snd_pcm_set_params() (https://www.alsa-project.org, source code:
+// https://github.com/tiwai/alsa-lib/blob/master/src/pcm/pcm.c#L8459), except
+// that it configures buffer size and period size both to closest available
+// values instead of forcing the buffer size be 4 times of the period size.
+int ConfigureHwParams(media::AlsaWrapper* wrapper,
+ snd_pcm_t* handle,
+ snd_pcm_format_t format,
+ snd_pcm_access_t access,
+ unsigned int channels,
+ unsigned int sample_rate,
+ int soft_resample,
+ snd_pcm_uframes_t frames_per_buffer,
+ snd_pcm_uframes_t frames_per_period) {
+ int error = 0;
+
+ snd_pcm_hw_params_t* hw_params = nullptr;
+ error = wrapper->PcmHwParamsMalloc(&hw_params);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsMalloc: " << wrapper->StrError(error);
+ return error;
+ }
+ // |snd_pcm_hw_params_t| is not exposed and requires memory allocation through
+ // ALSA API. Therefore, use a smart pointer to pointer to insure freeing
+ // memory when the function returns.
+ std::unique_ptr<snd_pcm_hw_params_t*,
+ std::function<void(snd_pcm_hw_params_t**)>>
+ params_holder(&hw_params, [wrapper](snd_pcm_hw_params_t** params) {
+ wrapper->PcmHwParamsFree(*params);
+ });
+
+ error = wrapper->PcmHwParamsAny(handle, hw_params);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsAny: " << wrapper->StrError(error);
+ return error;
+ }
+
+ error = wrapper->PcmHwParamsSetRateResample(handle, hw_params, soft_resample);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetRateResample: " << wrapper->StrError(error);
+ return error;
+ }
+
+ error = wrapper->PcmHwParamsSetAccess(handle, hw_params, access);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetAccess: " << wrapper->StrError(error);
+ return error;
+ }
+
+ error = wrapper->PcmHwParamsSetFormat(handle, hw_params, format);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetFormat: " << wrapper->StrError(error);
+ return error;
+ }
+
+ error = wrapper->PcmHwParamsSetChannels(handle, hw_params, channels);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetChannels: " << wrapper->StrError(error);
+ return error;
+ }
+
+ unsigned int rate = sample_rate;
+ error = wrapper->PcmHwParamsSetRateNear(handle, hw_params, &rate, nullptr);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetRateNear: " << wrapper->StrError(error);
+ return error;
+ }
+ if (rate != sample_rate) {
+ LOG(ERROR) << "Rate doesn't match, required: " << sample_rate
+ << "Hz, but get: " << rate << "Hz.";
+ return -EINVAL;
+ }
+
+ error = wrapper->PcmHwParamsSetBufferSizeNear(handle, hw_params,
+ &frames_per_buffer);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetBufferSizeNear: " << wrapper->StrError(error);
+ return error;
+ }
+
+ int direction = 0;
+ error = wrapper->PcmHwParamsSetPeriodSizeNear(handle, hw_params,
+ &frames_per_period, &direction);
+ if (error < 0) {
+ LOG(ERROR) << "PcmHwParamsSetPeriodSizeNear: " << wrapper->StrError(error);
+ return error;
+ }
+
+ if (frames_per_period > frames_per_buffer / 2) {
+ LOG(ERROR) << "Period size (" << frames_per_period
+ << ") is too big; buffer size = " << frames_per_buffer;
+ return -EINVAL;
+ }
+
+ error = wrapper->PcmHwParams(handle, hw_params);
+ if (error < 0)
+ LOG(ERROR) << "PcmHwParams: " << wrapper->StrError(error);
+
+ return error;
+}
+
+// Set software parameters of PCM. It does the same thing as the corresponding
+// part in snd_pcm_set_params()
+// (https://github.com/tiwai/alsa-lib/blob/master/src/pcm/pcm.c#L8603).
+int ConfigureSwParams(media::AlsaWrapper* wrapper,
+ snd_pcm_t* handle,
+ snd_pcm_uframes_t frames_per_buffer,
+ snd_pcm_uframes_t frames_per_period) {
+ int error = 0;
+
+ snd_pcm_sw_params_t* sw_params = nullptr;
+ error = wrapper->PcmSwParamsMalloc(&sw_params);
+ if (error < 0) {
+ LOG(ERROR) << "PcmSwParamsMalloc: " << wrapper->StrError(error);
+ return error;
+ }
+ // |snd_pcm_sw_params_t| is not exposed and thus use a smart pointer to
+ // pointer to insure freeing memory when the function returns.
+ std::unique_ptr<snd_pcm_sw_params_t*,
+ std::function<void(snd_pcm_sw_params_t**)>>
+ params_holder(&sw_params, [wrapper](snd_pcm_sw_params_t** params) {
+ wrapper->PcmSwParamsFree(*params);
+ });
+
+ error = wrapper->PcmSwParamsCurrent(handle, sw_params);
+ if (error < 0) {
+ LOG(ERROR) << "PcmSwParamsCurrent: " << wrapper->StrError(error);
+ return error;
+ }
+
+ // For playback, start the transfer when the buffer is almost full.
+ int start_threshold =
+ (frames_per_buffer / frames_per_period) * frames_per_period;
+ error =
+ wrapper->PcmSwParamsSetStartThreshold(handle, sw_params, start_threshold);
+ if (error < 0) {
+ LOG(ERROR) << "PcmSwParamsSetStartThreshold: " << wrapper->StrError(error);
+ return error;
+ }
+
+ // For capture, wake capture thread as soon as possible (1 period).
+ error = wrapper->PcmSwParamsSetAvailMin(handle, sw_params, frames_per_period);
+ if (error < 0) {
+ LOG(ERROR) << "PcmSwParamsSetAvailMin: " << wrapper->StrError(error);
+ return error;
+ }
+
+ error = wrapper->PcmSwParams(handle, sw_params);
+ if (error < 0)
+ LOG(ERROR) << "PcmSwParams: " << wrapper->StrError(error);
+
+ return error;
+}
+
+int SetParams(media::AlsaWrapper* wrapper,
+ snd_pcm_t* handle,
+ snd_pcm_format_t format,
+ unsigned int channels,
+ unsigned int rate,
+ unsigned int frames_per_buffer,
+ unsigned int frames_per_period) {
+ int error = ConfigureHwParams(
+ wrapper, handle, format, SND_PCM_ACCESS_RW_INTERLEAVED, channels, rate,
+ 1 /* Enable resampling */, frames_per_buffer, frames_per_period);
+ if (error == 0) {
+ error = ConfigureSwParams(wrapper, handle, frames_per_buffer,
+ frames_per_period);
+ }
+ return error;
+}
+
+} // namespace
+
static snd_pcm_t* OpenDevice(media::AlsaWrapper* wrapper,
const char* device_name,
snd_pcm_stream_t type,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
- int latency_us) {
+ int buffer_us,
+ int period_us = 0) {
snd_pcm_t* handle = NULL;
int error = wrapper->PcmOpen(&handle, device_name, type, SND_PCM_NONBLOCK);
if (error < 0) {
- LOG(WARNING) << "PcmOpen: " << device_name << ","
- << wrapper->StrError(error);
+ LOG(ERROR) << "PcmOpen: " << device_name << "," << wrapper->StrError(error);
return NULL;
}
- error = wrapper->PcmSetParams(handle, pcm_format,
- SND_PCM_ACCESS_RW_INTERLEAVED, channels,
- sample_rate, 1, latency_us);
+ error =
+ wrapper->PcmSetParams(handle, pcm_format, SND_PCM_ACCESS_RW_INTERLEAVED,
+ channels, sample_rate, 1, buffer_us);
if (error < 0) {
LOG(WARNING) << "PcmSetParams: " << device_name << ", "
- << wrapper->StrError(error) << " - Format: " << pcm_format
- << " Channels: " << channels << " Latency: " << latency_us;
+ << wrapper->StrError(error);
+ // Default parameter setting function failed, try again with the customized
+ // one if |period_us| is set, which is the case for capture but not for
+ // playback.
+ if (period_us > 0) {
+ const unsigned int frames_per_buffer = static_cast<unsigned int>(
+ static_cast<int64_t>(buffer_us) * sample_rate /
+ base::Time::kMicrosecondsPerSecond);
+ const unsigned int frames_per_period = static_cast<unsigned int>(
+ static_cast<int64_t>(period_us) * sample_rate /
+ base::Time::kMicrosecondsPerSecond);
+ LOG(WARNING) << "SetParams: " << device_name
+ << " - Format: " << pcm_format << " Channels: " << channels
+ << " Sample rate: " << sample_rate
+ << " Buffer size: " << frames_per_buffer
+ << " Period size: " << frames_per_period;
+ error = SetParams(wrapper, handle, pcm_format, channels, sample_rate,
+ frames_per_buffer, frames_per_period);
+ }
+ }
+ if (error < 0) {
if (alsa_util::CloseDevice(wrapper, handle) < 0) {
// TODO(ajwong): Retry on certain errors?
LOG(WARNING) << "Unable to close audio device. Leaking handle.";
@@ -78,9 +276,10 @@ snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
- int latency_us) {
+ int buffer_us,
+ int period_us) {
return OpenDevice(wrapper, device_name, SND_PCM_STREAM_CAPTURE, channels,
- sample_rate, pcm_format, latency_us);
+ sample_rate, pcm_format, buffer_us, period_us);
}
snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
@@ -88,9 +287,9 @@ snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
- int latency_us) {
+ int buffer_us) {
return OpenDevice(wrapper, device_name, SND_PCM_STREAM_PLAYBACK, channels,
- sample_rate, pcm_format, latency_us);
+ sample_rate, pcm_format, buffer_us);
}
snd_mixer_t* OpenMixer(media::AlsaWrapper* wrapper,
diff --git a/chromium/media/audio/alsa/alsa_util.h b/chromium/media/audio/alsa/alsa_util.h
index d24584a523d..47b5d1b6594 100644
--- a/chromium/media/audio/alsa/alsa_util.h
+++ b/chromium/media/audio/alsa/alsa_util.h
@@ -8,25 +8,32 @@
#include <alsa/asoundlib.h>
#include <string>
+#include "media/base/media_export.h"
+
namespace media {
class AlsaWrapper;
}
namespace alsa_util {
-snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
- const char* device_name,
- int channels,
- int sample_rate,
- snd_pcm_format_t pcm_format,
- int latency_us);
+// When opening ALSA devices, |period_us| is the size of a packet and
+// |buffer_us| is the size of the ring buffer, which consists of multiple
+// packets. In capture devices, the latency relies more on |period_us|, and thus
+// one may require more details upon the value implicitly set by ALSA.
+MEDIA_EXPORT snd_pcm_t* OpenCaptureDevice(media::AlsaWrapper* wrapper,
+ const char* device_name,
+ int channels,
+ int sample_rate,
+ snd_pcm_format_t pcm_format,
+ int buffer_us,
+ int period_us);
snd_pcm_t* OpenPlaybackDevice(media::AlsaWrapper* wrapper,
const char* device_name,
int channels,
int sample_rate,
snd_pcm_format_t pcm_format,
- int latency_us);
+ int buffer_us);
int CloseDevice(media::AlsaWrapper* wrapper, snd_pcm_t* handle);
diff --git a/chromium/media/audio/alsa/alsa_util_unittest.cc b/chromium/media/audio/alsa/alsa_util_unittest.cc
new file mode 100644
index 00000000000..343730dc07b
--- /dev/null
+++ b/chromium/media/audio/alsa/alsa_util_unittest.cc
@@ -0,0 +1,44 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/alsa/alsa_util.h"
+#include "media/audio/alsa/mock_alsa_wrapper.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace alsa_util {
+
+namespace {
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Invoke;
+using ::testing::Return;
+
+} // namespace
+
+TEST(AlsaUtilTest, FreeHwParams) {
+ InSequence seq;
+ media::MockAlsaWrapper mock_alsa_wrapper;
+ snd_pcm_hw_params_t* params_ptr = (snd_pcm_hw_params_t*)malloc(1);
+ EXPECT_CALL(mock_alsa_wrapper, PcmOpen(_, _, _, _)).WillOnce(Return(0));
+ EXPECT_CALL(mock_alsa_wrapper, PcmSetParams(_, _, _, _, _, _, _))
+ .WillOnce(Return(-1));
+ EXPECT_CALL(mock_alsa_wrapper, StrError(_)).WillOnce(Return("error"));
+ EXPECT_CALL(mock_alsa_wrapper, PcmHwParamsMalloc(_))
+ .WillOnce(Invoke([params_ptr](snd_pcm_hw_params_t** params) {
+ *params = params_ptr;
+ return 0;
+ }));
+ EXPECT_CALL(mock_alsa_wrapper, PcmHwParamsAny(_, _)).WillOnce(Return(-1));
+ EXPECT_CALL(mock_alsa_wrapper, StrError(_)).WillOnce(Return("error"));
+ EXPECT_CALL(mock_alsa_wrapper, PcmHwParamsFree(params_ptr));
+ EXPECT_CALL(mock_alsa_wrapper, PcmName(_)).WillOnce(Return("default"));
+ EXPECT_CALL(mock_alsa_wrapper, PcmClose(_)).WillOnce(Return(0));
+ snd_pcm_t* handle = OpenCaptureDevice(&mock_alsa_wrapper, "default", 2, 48000,
+ SND_PCM_FORMAT_S16, 40000, 10000);
+ EXPECT_EQ(handle, nullptr);
+ free(params_ptr);
+}
+
+} // namespace alsa_util
diff --git a/chromium/media/audio/alsa/alsa_wrapper.cc b/chromium/media/audio/alsa/alsa_wrapper.cc
index a11a4a4eb58..0696fbe087d 100644
--- a/chromium/media/audio/alsa/alsa_wrapper.cc
+++ b/chromium/media/audio/alsa/alsa_wrapper.cc
@@ -89,6 +89,121 @@ int AlsaWrapper::PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
return snd_pcm_get_params(handle, buffer_size, period_size);
}
+int AlsaWrapper::PcmHwParamsMalloc(snd_pcm_hw_params_t** hw_params) {
+ return snd_pcm_hw_params_malloc(hw_params);
+}
+
+int AlsaWrapper::PcmHwParamsAny(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params) {
+ return snd_pcm_hw_params_any(handle, hw_params);
+}
+
+int AlsaWrapper::PcmHwParamsSetRateResample(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int value) {
+ return snd_pcm_hw_params_set_rate_resample(handle, hw_params, value);
+}
+
+int AlsaWrapper::PcmHwParamsSetRateNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int* rate,
+ int* direction) {
+ return snd_pcm_hw_params_set_rate_near(handle, hw_params, rate, direction);
+}
+
+int AlsaWrapper::PcmHwParamsTestFormat(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format) {
+ return snd_pcm_hw_params_test_format(handle, hw_params, format);
+}
+
+int AlsaWrapper::PcmFormatSize(snd_pcm_format_t format, size_t samples) {
+ return snd_pcm_format_size(format, samples);
+}
+
+int AlsaWrapper::PcmHwParamsGetChannelsMin(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* min_channels) {
+ return snd_pcm_hw_params_get_channels_min(hw_params, min_channels);
+}
+
+int AlsaWrapper::PcmHwParamsGetChannelsMax(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* max_channels) {
+ return snd_pcm_hw_params_get_channels_min(hw_params, max_channels);
+}
+
+int AlsaWrapper::PcmHwParamsSetFormat(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format) {
+ return snd_pcm_hw_params_set_format(handle, hw_params, format);
+}
+
+int AlsaWrapper::PcmHwParamsSetAccess(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_access_t access) {
+ return snd_pcm_hw_params_set_access(handle, hw_params, access);
+}
+
+int AlsaWrapper::PcmHwParamsSetChannels(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int channels) {
+ return snd_pcm_hw_params_set_channels(handle, hw_params, channels);
+}
+
+int AlsaWrapper::PcmHwParamsSetBufferSizeNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* buffer_size) {
+ return snd_pcm_hw_params_set_buffer_size_near(handle, hw_params, buffer_size);
+}
+
+int AlsaWrapper::PcmHwParamsSetPeriodSizeNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* period_size,
+ int* direction) {
+ return snd_pcm_hw_params_set_period_size_near(handle, hw_params, period_size,
+ direction);
+}
+
+int AlsaWrapper::PcmHwParams(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params) {
+ return snd_pcm_hw_params(handle, hw_params);
+}
+
+void AlsaWrapper::PcmHwParamsFree(snd_pcm_hw_params_t* hw_params) {
+ return snd_pcm_hw_params_free(hw_params);
+}
+
+int AlsaWrapper::PcmSwParamsMalloc(snd_pcm_sw_params_t** sw_params) {
+ return snd_pcm_sw_params_malloc(sw_params);
+}
+
+int AlsaWrapper::PcmSwParamsCurrent(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params) {
+ return snd_pcm_sw_params_current(handle, sw_params);
+}
+
+int AlsaWrapper::PcmSwParamsSetStartThreshold(
+ snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t start_threshold) {
+ return snd_pcm_sw_params_set_start_threshold(handle, sw_params,
+ start_threshold);
+}
+
+int AlsaWrapper::PcmSwParamsSetAvailMin(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t period_size) {
+ return snd_pcm_sw_params_set_avail_min(handle, sw_params, period_size);
+}
+
+int AlsaWrapper::PcmSwParams(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params) {
+ return snd_pcm_sw_params(handle, sw_params);
+}
+
+void AlsaWrapper::PcmSwParamsFree(snd_pcm_sw_params_t* sw_params) {
+ return snd_pcm_sw_params_free(sw_params);
+}
+
snd_pcm_sframes_t AlsaWrapper::PcmAvailUpdate(snd_pcm_t* handle) {
return snd_pcm_avail_update(handle);
}
diff --git a/chromium/media/audio/alsa/alsa_wrapper.h b/chromium/media/audio/alsa/alsa_wrapper.h
index bd70f5373b0..6998ca04218 100644
--- a/chromium/media/audio/alsa/alsa_wrapper.h
+++ b/chromium/media/audio/alsa/alsa_wrapper.h
@@ -46,6 +46,52 @@ class MEDIA_EXPORT AlsaWrapper {
unsigned int latency);
virtual int PcmGetParams(snd_pcm_t* handle, snd_pcm_uframes_t* buffer_size,
snd_pcm_uframes_t* period_size);
+ virtual int PcmHwParamsMalloc(snd_pcm_hw_params_t** hw_params);
+ virtual int PcmHwParamsAny(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params);
+ virtual int PcmHwParamsSetRateResample(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int value);
+ virtual int PcmHwParamsSetRateNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int* rate,
+ int* direction);
+ virtual int PcmHwParamsTestFormat(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format);
+ virtual int PcmFormatSize(snd_pcm_format_t format, size_t samples);
+ virtual int PcmHwParamsGetChannelsMin(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* min_channels);
+ virtual int PcmHwParamsGetChannelsMax(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* max_channels);
+ virtual int PcmHwParamsSetFormat(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format);
+ virtual int PcmHwParamsSetAccess(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_access_t access);
+ virtual int PcmHwParamsSetChannels(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int channels);
+ virtual int PcmHwParamsSetBufferSizeNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* buffer_size);
+ virtual int PcmHwParamsSetPeriodSizeNear(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* period_size,
+ int* direction);
+ virtual int PcmHwParams(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params);
+ virtual void PcmHwParamsFree(snd_pcm_hw_params_t* hw_params);
+ virtual int PcmSwParamsMalloc(snd_pcm_sw_params_t** sw_params);
+ virtual int PcmSwParamsCurrent(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params);
+ virtual int PcmSwParamsSetStartThreshold(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t start_threshold);
+ virtual int PcmSwParamsSetAvailMin(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t period_size);
+ virtual int PcmSwParams(snd_pcm_t* handle, snd_pcm_sw_params_t* sw_params);
+ virtual void PcmSwParamsFree(snd_pcm_sw_params_t* sw_params);
virtual const char* PcmName(snd_pcm_t* handle);
virtual snd_pcm_sframes_t PcmAvailUpdate(snd_pcm_t* handle);
virtual snd_pcm_state_t PcmState(snd_pcm_t* handle);
@@ -105,15 +151,6 @@ class MEDIA_EXPORT AlsaWrapper {
virtual const char* StrError(int errnum);
- private:
- int ConfigureHwParams(snd_pcm_t* handle,
- snd_pcm_hw_params_t* hw_params,
- snd_pcm_format_t format,
- snd_pcm_access_t access,
- unsigned int channels,
- unsigned int rate,
- int soft_resample,
- unsigned int latency);
DISALLOW_COPY_AND_ASSIGN(AlsaWrapper);
};
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
index 91108f0f920..3f0c60ecee9 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.cc
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -8,7 +8,6 @@
#include "base/command_line.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/free_deleter.h"
#include "base/metrics/histogram.h"
#include "base/stl_util.h"
@@ -175,7 +174,7 @@ bool AudioManagerAlsa::IsAlsaDeviceAvailable(
// it or not.
if (type == kStreamCapture) {
// Check if the device is in the list of invalid devices.
- for (size_t i = 0; i < arraysize(kInvalidAudioInputDevices); ++i) {
+ for (size_t i = 0; i < base::size(kInvalidAudioInputDevices); ++i) {
if (strncmp(kInvalidAudioInputDevices[i], device_name,
strlen(kInvalidAudioInputDevices[i])) == 0)
return false;
@@ -190,7 +189,7 @@ bool AudioManagerAlsa::IsAlsaDeviceAvailable(
// TODO(joi): Should we prefer "hw" instead?
static const char kDeviceTypeDesired[] = "plughw";
return strncmp(kDeviceTypeDesired, device_name,
- arraysize(kDeviceTypeDesired) - 1) == 0;
+ base::size(kDeviceTypeDesired) - 1) == 0;
}
// static
diff --git a/chromium/media/audio/alsa/mock_alsa_wrapper.cc b/chromium/media/audio/alsa/mock_alsa_wrapper.cc
new file mode 100644
index 00000000000..fdb9573a85a
--- /dev/null
+++ b/chromium/media/audio/alsa/mock_alsa_wrapper.cc
@@ -0,0 +1,13 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/alsa/mock_alsa_wrapper.h"
+
+namespace media {
+
+MockAlsaWrapper::MockAlsaWrapper() {}
+
+MockAlsaWrapper::~MockAlsaWrapper() = default;
+
+} // namespace media
diff --git a/chromium/media/audio/alsa/mock_alsa_wrapper.h b/chromium/media/audio/alsa/mock_alsa_wrapper.h
new file mode 100644
index 00000000000..f4fd16eb64f
--- /dev/null
+++ b/chromium/media/audio/alsa/mock_alsa_wrapper.h
@@ -0,0 +1,188 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_ALSA_MOCK_ALSA_WRAPPER_H_
+#define MEDIA_AUDIO_ALSA_MOCK_ALSA_WRAPPER_H_
+
+#include "base/macros.h"
+#include "media/audio/alsa/alsa_wrapper.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockAlsaWrapper : public AlsaWrapper {
+ public:
+ MockAlsaWrapper();
+
+ ~MockAlsaWrapper() override;
+
+ MOCK_METHOD3(DeviceNameHint, int(int card, const char* iface, void*** hints));
+ MOCK_METHOD2(DeviceNameGetHint, char*(const void* hint, const char* id));
+ MOCK_METHOD1(DeviceNameFreeHint, int(void** hints));
+ MOCK_METHOD1(CardNext, int(int* rcard));
+ MOCK_METHOD4(PcmOpen,
+ int(snd_pcm_t** handle,
+ const char* name,
+ snd_pcm_stream_t stream,
+ int mode));
+ MOCK_METHOD1(PcmClose, int(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmPrepare, int(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmDrain, int(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmDrop, int(snd_pcm_t* handle));
+ MOCK_METHOD2(PcmDelay, int(snd_pcm_t* handle, snd_pcm_sframes_t* delay));
+ MOCK_METHOD3(PcmWritei,
+ snd_pcm_sframes_t(snd_pcm_t* handle,
+ const void* buffer,
+ snd_pcm_uframes_t size));
+ MOCK_METHOD3(PcmReadi,
+ snd_pcm_sframes_t(snd_pcm_t* handle,
+ void* buffer,
+ snd_pcm_uframes_t size));
+ MOCK_METHOD3(PcmRecover, int(snd_pcm_t* handle, int err, int silent));
+ MOCK_METHOD7(PcmSetParams,
+ int(snd_pcm_t* handle,
+ snd_pcm_format_t format,
+ snd_pcm_access_t access,
+ unsigned int channels,
+ unsigned int rate,
+ int soft_resample,
+ unsigned int latency));
+ MOCK_METHOD3(PcmGetParams,
+ int(snd_pcm_t* handle,
+ snd_pcm_uframes_t* buffer_size,
+ snd_pcm_uframes_t* period_size));
+ MOCK_METHOD1(PcmHwParamsMalloc, int(snd_pcm_hw_params_t** hw_params));
+ MOCK_METHOD2(PcmHwParamsAny,
+ int(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params));
+ MOCK_METHOD3(PcmHwParamsSetRateResample,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int value));
+ MOCK_METHOD4(PcmHwParamsSetRateNear,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int* rate,
+ int* direction));
+ MOCK_METHOD3(PcmHwParamsTestFormat,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format));
+ MOCK_METHOD2(PcmFormatSize, int(snd_pcm_format_t format, size_t samples));
+ MOCK_METHOD2(PcmHwParamsGetChannelsMin,
+ int(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* min_channels));
+ MOCK_METHOD2(PcmHwParamsGetChannelsMax,
+ int(const snd_pcm_hw_params_t* hw_params,
+ unsigned int* max_channels));
+ MOCK_METHOD3(PcmHwParamsSetFormat,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_format_t format));
+ MOCK_METHOD3(PcmHwParamsSetAccess,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_access_t access));
+ MOCK_METHOD3(PcmHwParamsSetChannels,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ unsigned int channels));
+ MOCK_METHOD3(PcmHwParamsSetBufferSizeNear,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* buffer_size));
+ MOCK_METHOD4(PcmHwParamsSetPeriodSizeNear,
+ int(snd_pcm_t* handle,
+ snd_pcm_hw_params_t* hw_params,
+ snd_pcm_uframes_t* period_size,
+ int* direction));
+ MOCK_METHOD2(PcmHwParams,
+ int(snd_pcm_t* handle, snd_pcm_hw_params_t* hw_params));
+ MOCK_METHOD1(PcmHwParamsFree, void(snd_pcm_hw_params_t* hw_params));
+ MOCK_METHOD1(PcmSwParamsMalloc, int(snd_pcm_sw_params_t** sw_params));
+ MOCK_METHOD2(PcmSwParamsCurrent,
+ int(snd_pcm_t* handle, snd_pcm_sw_params_t* sw_params));
+ MOCK_METHOD3(PcmSwParamsSetStartThreshold,
+ int(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t start_threshold));
+ MOCK_METHOD3(PcmSwParamsSetAvailMin,
+ int(snd_pcm_t* handle,
+ snd_pcm_sw_params_t* sw_params,
+ snd_pcm_uframes_t period_size));
+ MOCK_METHOD2(PcmSwParams,
+ int(snd_pcm_t* handle, snd_pcm_sw_params_t* sw_params));
+ MOCK_METHOD1(PcmSwParamsFree, void(snd_pcm_sw_params_t* sw_params));
+ MOCK_METHOD1(PcmName, const char*(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmAvailUpdate, snd_pcm_sframes_t(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmState, snd_pcm_state_t(snd_pcm_t* handle));
+ MOCK_METHOD1(PcmStart, int(snd_pcm_t* handle));
+ MOCK_METHOD2(MixerOpen, int(snd_mixer_t** mixer, int mode));
+ MOCK_METHOD2(MixerAttach, int(snd_mixer_t* mixer, const char* name));
+ MOCK_METHOD3(MixerElementRegister,
+ int(snd_mixer_t* mixer,
+ struct snd_mixer_selem_regopt* options,
+ snd_mixer_class_t** classp));
+ MOCK_METHOD1(MixerFree, void(snd_mixer_t* mixer));
+ MOCK_METHOD2(MixerDetach, int(snd_mixer_t* mixer, const char* name));
+ MOCK_METHOD1(MixerClose, int(snd_mixer_t* mixer));
+ MOCK_METHOD1(MixerLoad, int(snd_mixer_t* mixer));
+ MOCK_METHOD1(MixerFirstElem, snd_mixer_elem_t*(snd_mixer_t* mixer));
+ MOCK_METHOD1(MixerNextElem, snd_mixer_elem_t*(snd_mixer_elem_t* elem));
+ MOCK_METHOD1(MixerSelemIsActive, int(snd_mixer_elem_t* elem));
+ MOCK_METHOD1(MixerSelemName, const char*(snd_mixer_elem_t* elem));
+ MOCK_METHOD2(MixerSelemSetCaptureVolumeAll,
+ int(snd_mixer_elem_t* elem, long value));
+ MOCK_METHOD3(MixerSelemGetCaptureVolume,
+ int(snd_mixer_elem_t* elem,
+ snd_mixer_selem_channel_id_t channel,
+ long* value));
+ MOCK_METHOD1(MixerSelemHasCaptureVolume, int(snd_mixer_elem_t* elem));
+ MOCK_METHOD3(MixerSelemGetCaptureVolumeRange,
+ int(snd_mixer_elem_t* elem, long* min, long* max));
+ MOCK_METHOD1(MixerElemGetCallbackPrivate, void*(const snd_mixer_elem_t* obj));
+ MOCK_METHOD2(MixerElemSetCallback,
+ void(snd_mixer_elem_t* obj, snd_mixer_elem_callback_t val));
+ MOCK_METHOD2(MixerElemSetCallbackPrivate,
+ void(snd_mixer_elem_t* obj, void* val));
+ MOCK_METHOD2(MixerFindSelem,
+ snd_mixer_elem_t*(snd_mixer_t* mixer,
+ const snd_mixer_selem_id_t* id));
+ MOCK_METHOD1(MixerHandleEvents, int(snd_mixer_t* mixer));
+ MOCK_METHOD3(MixerPollDescriptors,
+ int(snd_mixer_t* mixer,
+ struct pollfd* pfds,
+ unsigned int space));
+ MOCK_METHOD1(MixerPollDescriptorsCount, int(snd_mixer_t* mixer));
+ MOCK_METHOD3(MixerSelemGetPlaybackSwitch,
+ int(snd_mixer_elem_t* elem,
+ snd_mixer_selem_channel_id_t channel,
+ int* value));
+ MOCK_METHOD3(MixerSelemGetPlaybackVolume,
+ int(snd_mixer_elem_t* elem,
+ snd_mixer_selem_channel_id_t channel,
+ long* value));
+ MOCK_METHOD3(MixerSelemGetPlaybackVolumeRange,
+ int(snd_mixer_elem_t* elem, long* min, long* max));
+ MOCK_METHOD1(MixerSelemHasPlaybackSwitch, int(snd_mixer_elem_t* elem));
+ MOCK_METHOD2(MixerSelemIdSetIndex,
+ void(snd_mixer_selem_id_t* obj, unsigned int val));
+ MOCK_METHOD2(MixerSelemIdSetName,
+ void(snd_mixer_selem_id_t* obj, const char* val));
+ MOCK_METHOD3(MixerSelemSetPlaybackSwitch,
+ int(snd_mixer_elem_t* elem,
+ snd_mixer_selem_channel_id_t channel,
+ int value));
+ MOCK_METHOD2(MixerSelemSetPlaybackVolumeAll,
+ int(snd_mixer_elem_t* elem, long value));
+ MOCK_METHOD1(MixerSelemIdMalloc, int(snd_mixer_selem_id_t** ptr));
+ MOCK_METHOD1(MixerSelemIdFree, void(snd_mixer_selem_id_t* obj));
+ MOCK_METHOD1(StrError, const char*(int errnum));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAlsaWrapper);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_ALSA_MOCK_ALSA_WRAPPER_H_
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index 5a06fd3b72b..09edf52163a 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -5,7 +5,7 @@
#include "media/audio/android/opensles_input.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/trace_event/trace_event.h"
#include "media/audio/android/audio_manager_android.h"
#include "media/base/audio_bus.h"
@@ -237,13 +237,9 @@ bool OpenSLESInputStream::CreateRecorder() {
// Create AudioRecorder and specify SL_IID_ANDROIDCONFIGURATION.
LOG_ON_FAILURE_AND_RETURN(
- (*engine)->CreateAudioRecorder(engine,
- recorder_object_.Receive(),
- &audio_source,
- &audio_sink,
- arraysize(interface_id),
- interface_id,
- interface_required),
+ (*engine)->CreateAudioRecorder(
+ engine, recorder_object_.Receive(), &audio_source, &audio_sink,
+ base::size(interface_id), interface_id, interface_required),
false);
SLAndroidConfigurationItf recorder_config;
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index 61693f56c11..0c0a1c920b3 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -6,7 +6,7 @@
#include "base/android/build_info.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
@@ -303,13 +303,9 @@ bool OpenSLESOutputStream::CreatePlayer() {
const SLboolean interface_required[] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
SL_BOOLEAN_TRUE};
LOG_ON_FAILURE_AND_RETURN(
- (*engine)->CreateAudioPlayer(engine,
- player_object_.Receive(),
- &audio_source,
- &audio_sink,
- arraysize(interface_id),
- interface_id,
- interface_required),
+ (*engine)->CreateAudioPlayer(
+ engine, player_object_.Receive(), &audio_source, &audio_sink,
+ base::size(interface_id), interface_id, interface_required),
false);
// Create AudioPlayer and specify SL_IID_ANDROIDCONFIGURATION.
diff --git a/chromium/media/audio/audio_device_description.cc b/chromium/media/audio/audio_device_description.cc
index ebca4a61b23..87f8f37d366 100644
--- a/chromium/media/audio/audio_device_description.cc
+++ b/chromium/media/audio/audio_device_description.cc
@@ -56,6 +56,8 @@ std::string AudioDeviceDescription::GetDefaultDeviceName() {
std::string AudioDeviceDescription::GetCommunicationsDeviceName() {
#if defined(OS_WIN)
return GetLocalizedStringUTF8(COMMUNICATIONS_AUDIO_DEVICE_NAME);
+#elif defined(IS_CHROMECAST)
+ return "";
#else
NOTREACHED();
return "";
diff --git a/chromium/media/audio/audio_features.cc b/chromium/media/audio/audio_features.cc
index 61a5e7df051..ae38fa4d85b 100644
--- a/chromium/media/audio/audio_features.cc
+++ b/chromium/media/audio/audio_features.cc
@@ -22,12 +22,6 @@ const base::Feature kCrOSSystemAECDeactivatedGroups{
"CrOSSystemAECDeactivatedGroups", base::FEATURE_ENABLED_BY_DEFAULT};
#endif
-#if defined(OS_WIN)
-// Increases the input audio endpoint buffer size. http://crbug.com/830624.
-const base::Feature kIncreaseInputAudioBufferSize{
- "IncreaseInputAudioBufferSize", base::FEATURE_ENABLED_BY_DEFAULT};
-#endif
-
#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
const base::Feature kForceEnableSystemAec{"ForceEnableSystemAec",
base::FEATURE_DISABLED_BY_DEFAULT};
diff --git a/chromium/media/audio/audio_features.h b/chromium/media/audio/audio_features.h
index 614edd51f4c..0d686a33120 100644
--- a/chromium/media/audio/audio_features.h
+++ b/chromium/media/audio/audio_features.h
@@ -19,10 +19,6 @@ MEDIA_EXPORT extern const base::Feature kCrOSSystemAEC;
MEDIA_EXPORT extern const base::Feature kCrOSSystemAECDeactivatedGroups;
#endif
-#if defined(OS_WIN)
-MEDIA_EXPORT extern const base::Feature kIncreaseInputAudioBufferSize;
-#endif
-
#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
MEDIA_EXPORT extern const base::Feature kForceEnableSystemAec;
#endif
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index c831d58e77b..f2ed25e76fb 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -45,6 +45,16 @@ const int kCheckMissingCallbacksIntervalSeconds = 5;
// data from the source.
const int kGotDataCallbackIntervalSeconds = 1;
+base::ThreadPriority ThreadPriorityFromPurpose(
+ AudioInputDevice::Purpose purpose) {
+ switch (purpose) {
+ case AudioInputDevice::Purpose::kUserInput:
+ return base::ThreadPriority::REALTIME_AUDIO;
+ case AudioInputDevice::Purpose::kLoopback:
+ return base::ThreadPriority::NORMAL;
+ }
+}
+
} // namespace
// Takes care of invoking the capture callback on the audio thread.
@@ -56,6 +66,7 @@ class AudioInputDevice::AudioThreadCallback
AudioThreadCallback(const AudioParameters& audio_parameters,
base::ReadOnlySharedMemoryRegion shared_memory_region,
uint32_t total_segments,
+ bool enable_uma,
CaptureCallback* capture_callback,
base::RepeatingClosure got_data_callback);
~AudioThreadCallback() override;
@@ -66,6 +77,7 @@ class AudioInputDevice::AudioThreadCallback
void Process(uint32_t pending_data) override;
private:
+ const bool enable_uma_;
base::ReadOnlySharedMemoryRegion shared_memory_region_;
base::ReadOnlySharedMemoryMapping shared_memory_mapping_;
const base::TimeTicks start_time_;
@@ -87,8 +99,9 @@ class AudioInputDevice::AudioThreadCallback
};
AudioInputDevice::AudioInputDevice(std::unique_ptr<AudioInputIPC> ipc,
- base::ThreadPriority thread_priority)
- : thread_priority_(thread_priority),
+ Purpose purpose)
+ : thread_priority_(ThreadPriorityFromPurpose(purpose)),
+ enable_uma_(purpose == AudioInputDevice::Purpose::kUserInput),
callback_(nullptr),
ipc_(std::move(ipc)),
state_(IDLE),
@@ -129,12 +142,14 @@ void AudioInputDevice::Stop() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
TRACE_EVENT0("audio", "AudioInputDevice::Stop");
- UMA_HISTOGRAM_BOOLEAN(
- "Media.Audio.Capture.DetectedMissingCallbacks",
- alive_checker_ ? alive_checker_->DetectedDead() : false);
+ if (enable_uma_) {
+ UMA_HISTOGRAM_BOOLEAN(
+ "Media.Audio.Capture.DetectedMissingCallbacks",
+ alive_checker_ ? alive_checker_->DetectedDead() : false);
- UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.StreamCallbackError2",
- had_error_);
+ UMA_HISTOGRAM_ENUMERATION("Media.Audio.Capture.StreamCallbackError2",
+ had_error_);
+ }
had_error_ = kNoError;
// Close the stream, if we haven't already.
@@ -150,7 +165,7 @@ void AudioInputDevice::Stop() {
// audio_thread_.reset(). In most cases, the thread will already be stopped.
//
// |alive_checker_| must outlive |audio_callback_|.
- base::ScopedAllowBlocking allow_blocking;
+ base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_thread_join;
audio_thread_.reset();
audio_callback_.reset();
alive_checker_.reset();
@@ -249,7 +264,7 @@ void AudioInputDevice::OnStreamCreated(
// Unretained is safe since |alive_checker_| outlives |audio_callback_|.
audio_callback_ = std::make_unique<AudioInputDevice::AudioThreadCallback>(
audio_parameters_, std::move(shared_memory_region),
- kRequestedSharedMemoryCount, callback_,
+ kRequestedSharedMemoryCount, enable_uma_, callback_,
base::BindRepeating(&AliveChecker::NotifyAlive,
base::Unretained(alive_checker_.get())));
audio_thread_ =
@@ -332,12 +347,14 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
const AudioParameters& audio_parameters,
base::ReadOnlySharedMemoryRegion shared_memory_region,
uint32_t total_segments,
+ bool enable_uma,
CaptureCallback* capture_callback,
base::RepeatingClosure got_data_callback_)
: AudioDeviceThread::Callback(
audio_parameters,
ComputeAudioInputBufferSize(audio_parameters, 1u),
total_segments),
+ enable_uma_(enable_uma),
shared_memory_region_(std::move(shared_memory_region)),
start_time_(base::TimeTicks::Now()),
no_callbacks_received_(true),
@@ -354,8 +371,10 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
}
AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
- UMA_HISTOGRAM_LONG_TIMES("Media.Audio.Capture.InputStreamDuration",
- base::TimeTicks::Now() - start_time_);
+ if (enable_uma_) {
+ UMA_HISTOGRAM_LONG_TIMES("Media.Audio.Capture.InputStreamDuration",
+ base::TimeTicks::Now() - start_time_);
+ }
}
void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
@@ -383,8 +402,10 @@ void AudioInputDevice::AudioThreadCallback::Process(uint32_t pending_data) {
TRACE_EVENT_BEGIN0("audio", "AudioInputDevice::AudioThreadCallback::Process");
if (no_callbacks_received_) {
- UMA_HISTOGRAM_TIMES("Media.Audio.Render.InputDeviceStartTime",
- base::TimeTicks::Now() - start_time_);
+ if (enable_uma_) {
+ UMA_HISTOGRAM_TIMES("Media.Audio.Render.InputDeviceStartTime",
+ base::TimeTicks::Now() - start_time_);
+ }
no_callbacks_received_ = false;
}
diff --git a/chromium/media/audio/audio_input_device.h b/chromium/media/audio/audio_input_device.h
index 17351241557..83a149b318e 100644
--- a/chromium/media/audio/audio_input_device.h
+++ b/chromium/media/audio/audio_input_device.h
@@ -67,9 +67,12 @@ namespace media {
class MEDIA_EXPORT AudioInputDevice : public AudioCapturerSource,
public AudioInputIPCDelegate {
public:
+ enum Purpose : int8_t { kUserInput, kLoopback };
+
// NOTE: Clients must call Initialize() before using.
- AudioInputDevice(std::unique_ptr<AudioInputIPC> ipc,
- base::ThreadPriority thread_priority);
+ // |enable_uma| controls logging of UMA stats. It is used to ensure that
+ // stats are not logged for mirroring service streams.
+ AudioInputDevice(std::unique_ptr<AudioInputIPC> ipc, Purpose purpose);
// AudioCapturerSource implementation.
void Initialize(const AudioParameters& params,
@@ -122,6 +125,8 @@ class MEDIA_EXPORT AudioInputDevice : public AudioCapturerSource,
const base::ThreadPriority thread_priority_;
+ const bool enable_uma_;
+
CaptureCallback* callback_;
// A pointer to the IPC layer that takes care of sending requests over to
diff --git a/chromium/media/audio/audio_input_device_unittest.cc b/chromium/media/audio/audio_input_device_unittest.cc
index e1f7a552267..59104cb5b11 100644
--- a/chromium/media/audio/audio_input_device_unittest.cc
+++ b/chromium/media/audio/audio_input_device_unittest.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "media/audio/audio_input_device.h"
+
+#include <utility>
+
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory.h"
#include "base/message_loop/message_loop.h"
@@ -67,7 +70,7 @@ TEST(AudioInputDeviceTest, Noop) {
base::MessageLoopForIO io_loop;
MockAudioInputIPC* input_ipc = new MockAudioInputIPC();
scoped_refptr<AudioInputDevice> device(new AudioInputDevice(
- base::WrapUnique(input_ipc), base::ThreadPriority::REALTIME_AUDIO));
+ base::WrapUnique(input_ipc), AudioInputDevice::Purpose::kUserInput));
}
ACTION_P(ReportStateChange, device) {
@@ -82,7 +85,7 @@ TEST(AudioInputDeviceTest, FailToCreateStream) {
MockCaptureCallback callback;
MockAudioInputIPC* input_ipc = new MockAudioInputIPC();
scoped_refptr<AudioInputDevice> device(new AudioInputDevice(
- base::WrapUnique(input_ipc), base::ThreadPriority::REALTIME_AUDIO));
+ base::WrapUnique(input_ipc), AudioInputDevice::Purpose::kUserInput));
device->Initialize(params, &callback);
EXPECT_CALL(*input_ipc, CreateStream(_, _, _, _))
.WillOnce(ReportStateChange(device.get()));
@@ -118,7 +121,7 @@ TEST(AudioInputDeviceTest, CreateStream) {
MockCaptureCallback callback;
MockAudioInputIPC* input_ipc = new MockAudioInputIPC();
scoped_refptr<AudioInputDevice> device(new AudioInputDevice(
- base::WrapUnique(input_ipc), base::ThreadPriority::REALTIME_AUDIO));
+ base::WrapUnique(input_ipc), AudioInputDevice::Purpose::kUserInput));
device->Initialize(params, &callback);
EXPECT_CALL(*input_ipc, CreateStream(_, _, _, _))
diff --git a/chromium/media/audio/audio_manager.cc b/chromium/media/audio/audio_manager.cc
index 7379337212a..97871cd9e30 100644
--- a/chromium/media/audio/audio_manager.cc
+++ b/chromium/media/audio/audio_manager.cc
@@ -148,15 +148,17 @@ AudioManager* AudioManager::Get() {
bool AudioManager::Shutdown() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- // Do not attempt to stop the audio thread if it is hung.
- // Otherwise the current thread will hang too: crbug.com/729494
- // TODO(olka, grunell): Will be fixed when audio is its own process.
- if (audio_thread_->IsHung())
- return false;
-
if (audio_thread_->GetTaskRunner()->BelongsToCurrentThread()) {
+ // If this is the audio thread, there is no need to check if it's hung
+ // (since it's clearly not). https://crbug.com/919854.
ShutdownOnAudioThread();
} else {
+ // Do not attempt to stop the audio thread if it is hung.
+ // Otherwise the current thread will hang too: https://crbug.com/729494
+ // TODO(olka, grunell): Will be fixed when audio is its own process.
+ if (audio_thread_->IsHung())
+ return false;
+
audio_thread_->GetTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&AudioManager::ShutdownOnAudioThread,
base::Unretained(this)));
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 7a5f3a50a4e..c4940178357 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -56,7 +56,7 @@
#if defined(USE_CRAS)
#include "chromeos/audio/audio_devices_pref_handler_stub.h"
#include "chromeos/audio/cras_audio_handler.h"
-#include "chromeos/dbus/dbus_switches.h"
+#include "chromeos/dbus/constants/dbus_switches.h"
#include "chromeos/dbus/dbus_thread_manager.h"
#include "chromeos/dbus/fake_cras_audio_client.h"
#include "media/audio/cras/audio_manager_cras.h"
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index b298f7a3b93..0cf69752aa7 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -256,7 +256,7 @@ void AudioOutputDevice::ShutDownOnIOThread() {
// in which case, we cannot use the message loop to close the thread handle
// and can't rely on the main thread existing either.
base::AutoLock auto_lock_(audio_thread_lock_);
- base::ThreadRestrictions::ScopedAllowIO allow_io;
+ base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_thread_join;
audio_thread_.reset();
audio_callback_.reset();
stopping_hack_ = false;
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index 38a0e4f6ee7..c974385ad19 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -560,7 +560,7 @@ void AudioManagerCras::GetSystemAecGroupIdOnMainThread(
void AudioManagerCras::WaitEventOrShutdown(base::WaitableEvent* event) {
base::WaitableEvent* waitables[] = {event, &on_shutdown_};
- base::WaitableEvent::WaitMany(waitables, arraysize(waitables));
+ base::WaitableEvent::WaitMany(waitables, base::size(waitables));
}
} // namespace media
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index 1fa221dd4c8..79b9e8673a7 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -8,7 +8,7 @@
#include <algorithm>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/time/time.h"
#include "media/audio/audio_device_description.h"
@@ -145,7 +145,7 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
CRAS_CH_SL,
CRAS_CH_SR
};
- static_assert(arraysize(kChannelMap) == CHANNELS_MAX + 1,
+ static_assert(base::size(kChannelMap) == CHANNELS_MAX + 1,
"kChannelMap array size should match");
// If already playing, stop before re-starting.
@@ -170,12 +170,12 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
// Initialize channel layout to all -1 to indicate that none of
// the channels is set in the layout.
int8_t layout[CRAS_CH_MAX];
- for (size_t i = 0; i < arraysize(layout); ++i)
+ for (size_t i = 0; i < base::size(layout); ++i)
layout[i] = -1;
// Converts to CRAS defined channels. ChannelOrder will return -1
// for channels that are not present in params_.channel_layout().
- for (size_t i = 0; i < arraysize(kChannelMap); ++i) {
+ for (size_t i = 0; i < base::size(kChannelMap); ++i) {
layout[kChannelMap[i]] = ChannelOrder(params_.channel_layout(),
static_cast<Channels>(i));
}
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index 9687e633834..2a8f92a6f49 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -7,7 +7,7 @@
#include <algorithm>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "media/audio/cras/audio_manager_cras.h"
@@ -172,7 +172,7 @@ void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
// Converts to CRAS defined channels. ChannelOrder will return -1
// for channels that does not present in params_.channel_layout().
- for (size_t i = 0; i < arraysize(kChannelMap); ++i)
+ for (size_t i = 0; i < base::size(kChannelMap); ++i)
layout[kChannelMap[i]] = ChannelOrder(params_.channel_layout(),
static_cast<Channels>(i));
diff --git a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
index 79073fe7df4..a6555b9abde 100644
--- a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
@@ -10,9 +10,9 @@
#include "base/bind_helpers.h"
#include "base/compiler_specific.h"
#include "base/location.h"
-#include "base/macros.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/test/test_message_loop.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/audio/audio_io.h"
@@ -35,7 +35,7 @@ class AudioStreamHandlerTest : public testing::Test {
AudioManager::CreateForTesting(std::make_unique<TestAudioThread>());
base::RunLoop().RunUntilIdle();
- base::StringPiece data(kTestAudioData, arraysize(kTestAudioData));
+ base::StringPiece data(kTestAudioData, base::size(kTestAudioData));
audio_stream_handler_.reset(new AudioStreamHandler(data));
}
diff --git a/chromium/media/audio/sounds/sounds_manager.h b/chromium/media/audio/sounds/sounds_manager.h
index 6faf65231d3..27ec5fb72ae 100644
--- a/chromium/media/audio/sounds/sounds_manager.h
+++ b/chromium/media/audio/sounds/sounds_manager.h
@@ -5,7 +5,6 @@
#ifndef MEDIA_AUDIO_SOUNDS_SOUNDS_MANAGER_H_
#define MEDIA_AUDIO_SOUNDS_SOUNDS_MANAGER_H_
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/sequence_checker.h"
#include "base/strings/string_piece.h"
diff --git a/chromium/media/audio/sounds/sounds_manager_unittest.cc b/chromium/media/audio/sounds/sounds_manager_unittest.cc
index ddc433b03cc..47f147492ca 100644
--- a/chromium/media/audio/sounds/sounds_manager_unittest.cc
+++ b/chromium/media/audio/sounds/sounds_manager_unittest.cc
@@ -6,8 +6,8 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/string_piece.h"
#include "base/test/test_message_loop.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -63,7 +63,7 @@ TEST_F(SoundsManagerTest, Play) {
ASSERT_TRUE(SoundsManager::Get()->Initialize(
kTestAudioKey,
- base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
+ base::StringPiece(kTestAudioData, base::size(kTestAudioData))));
ASSERT_EQ(20,
SoundsManager::Get()->GetDuration(kTestAudioKey).InMicroseconds());
ASSERT_TRUE(SoundsManager::Get()->Play(kTestAudioKey));
@@ -86,7 +86,7 @@ TEST_F(SoundsManagerTest, Stop) {
ASSERT_TRUE(SoundsManager::Get()->Initialize(
kTestAudioKey,
- base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
+ base::StringPiece(kTestAudioData, base::size(kTestAudioData))));
// This overrides the wav data set by kTestAudioData and results in
// a never-ending sine wave being played.
diff --git a/chromium/media/audio/sounds/test_data.h b/chromium/media/audio/sounds/test_data.h
index 8e14076d370..b7b2966a47b 100644
--- a/chromium/media/audio/sounds/test_data.h
+++ b/chromium/media/audio/sounds/test_data.h
@@ -9,9 +9,9 @@
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "media/audio/sounds/audio_stream_handler.h"
namespace media {
@@ -22,7 +22,7 @@ const char kTestAudioData[] =
"RIFF\x28\x00\x00\x00WAVEfmt \x10\x00\x00\x00"
"\x01\x00\x02\x00\x80\xbb\x00\x00\x00\x77\x01\x00\x02\x00\x10\x00"
"data\x04\x00\x00\x00\x01\x00\x01\x00";
-const size_t kTestAudioDataSize = arraysize(kTestAudioData) - 1;
+const size_t kTestAudioDataSize = base::size(kTestAudioData) - 1;
class TestObserver : public AudioStreamHandler::TestObserver {
public:
diff --git a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
index cc0018fbeb4..21691cc9e2d 100644
--- a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
@@ -9,7 +9,7 @@
#include <memory>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_piece.h"
#include "media/audio/sounds/test_data.h"
#include "media/base/audio_bus.h"
@@ -47,7 +47,7 @@ TEST(WavAudioHandlerTest, SampleDataTest) {
ASSERT_EQ(4U, handler->data().size());
const char kData[] = "\x01\x00\x01\x00";
- ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler->data());
+ ASSERT_EQ(base::StringPiece(kData, base::size(kData) - 1), handler->data());
std::unique_ptr<AudioBus> bus =
AudioBus::Create(handler->num_channels(),
@@ -106,7 +106,7 @@ TEST(WavAudioHandlerTest, TestTooBigTotalSizeIsOkay) {
ASSERT_EQ(4U, handler->data().size());
const char kData[] = "\x01\x00\x01\x00";
- ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler->data());
+ ASSERT_EQ(base::StringPiece(kData, base::size(kData) - 1), handler->data());
}
TEST(WavAudioHandlerTest, TestTooBigDataChunkSizeIsOkay) {
@@ -128,7 +128,7 @@ TEST(WavAudioHandlerTest, TestTooBigDataChunkSizeIsOkay) {
ASSERT_EQ(4U, handler->data().size());
const char kData[] = "\x01\x00\x01\x00";
- ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler->data());
+ ASSERT_EQ(base::StringPiece(kData, base::size(kData) - 1), handler->data());
}
TEST(WavAudioHandlerTest, TestTooSmallFormatSizeIsNotValid) {
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index 3079281431e..85942602b70 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -783,19 +783,16 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
// Initialize the audio stream between the client and the device.
// We connect indirectly through the audio engine by using shared mode.
- // The buffer duration is normally set to 0, which ensures that the buffer
- // size is the minimum buffer size needed to ensure that glitches do not occur
- // between the periodic processing passes. It can be set to 100 ms via a
- // feature.
- // Note: if the value is changed, update the description in
- // chrome/browser/flag_descriptions.cc.
- REFERENCE_TIME buffer_duration =
- base::FeatureList::IsEnabled(features::kIncreaseInputAudioBufferSize)
- ? 100 * 1000 * 10 // 100 ms expressed in 100-ns units.
- : 0;
+ // The buffer duration is set to 100 ms, which reduces the risk of glitches.
+ // It would normally be set to 0 and the minimum buffer size to ensure that
+ // glitches do not occur would be used (typically around 22 ms). There are
+ // however cases when there are glitches anyway and it's avoided by setting a
+ // larger buffer size. The larger size does not create higher latency for
+ // properly implemented drivers.
HRESULT hr = audio_client_->Initialize(
- AUDCLNT_SHAREMODE_SHARED, flags, buffer_duration,
- 0, // device period, n/a for shared mode.
+ AUDCLNT_SHAREMODE_SHARED, flags,
+ 100 * 1000 * 10, // Buffer duration, 100 ms expressed in 100-ns units.
+ 0, // Device period, n/a for shared mode.
reinterpret_cast<const WAVEFORMATEX*>(&input_format_),
device_id_ == AudioDeviceDescription::kCommunicationsDeviceId
? &kCommunicationsSessionId
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index dfd700c754b..4a3fa164637 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -11,8 +11,8 @@
#include "base/command_line.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/metrics/histogram.h"
+#include "base/stl_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
@@ -407,10 +407,8 @@ void WASAPIAudioOutputStream::Run() {
// is signaled. An error event can also break the main thread loop.
while (playing && !error) {
// Wait for a close-down event, stream-switch event or a new render event.
- DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
- wait_array,
- FALSE,
- INFINITE);
+ DWORD wait_result = WaitForMultipleObjects(base::size(wait_array),
+ wait_array, FALSE, INFINITE);
switch (wait_result) {
case WAIT_OBJECT_0 + 0:
diff --git a/chromium/media/audio/win/avrt_wrapper_win.cc b/chromium/media/audio/win/avrt_wrapper_win.cc
index ace5cba6527..2fb716fcb59 100644
--- a/chromium/media/audio/win/avrt_wrapper_win.cc
+++ b/chromium/media/audio/win/avrt_wrapper_win.cc
@@ -5,7 +5,7 @@
#include "media/audio/win/avrt_wrapper_win.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
namespace avrt {
@@ -24,7 +24,7 @@ bool Initialize() {
// The avrt.dll is available on Windows Vista and later.
wchar_t path[MAX_PATH] = {0};
ExpandEnvironmentStrings(L"%WINDIR%\\system32\\avrt.dll", path,
- arraysize(path));
+ base::size(path));
g_avrt = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
if (!g_avrt)
return false;
diff --git a/chromium/media/base/BUILD.gn b/chromium/media/base/BUILD.gn
index 7afd69d42d8..c102902e7d6 100644
--- a/chromium/media/base/BUILD.gn
+++ b/chromium/media/base/BUILD.gn
@@ -6,8 +6,8 @@ import("//build/config/android/config.gni")
import("//build/config/arm.gni")
import("//build/config/features.gni")
import("//build/config/jumbo.gni")
-import("//build/config/ui.gni")
import("//build/config/linux/pkg_config.gni")
+import("//build/config/ui.gni")
import("//media/media_options.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
@@ -112,8 +112,6 @@ jumbo_source_set("base") {
"data_buffer.h",
"data_source.cc",
"data_source.h",
- "decode_capabilities.cc",
- "decode_capabilities.h",
"decode_status.cc",
"decode_status.h",
"decoder_buffer.cc",
@@ -154,6 +152,7 @@ jumbo_source_set("base") {
"key_systems.h",
"localized_strings.cc",
"localized_strings.h",
+ "logging_override_if_enabled.h",
"loopback_audio_converter.cc",
"loopback_audio_converter.h",
"media.cc",
@@ -182,6 +181,7 @@ jumbo_source_set("base") {
"media_track.h",
"media_tracks.cc",
"media_tracks.h",
+ "media_types.h",
"media_url_demuxer.cc",
"media_url_demuxer.h",
"media_url_params.h",
@@ -213,6 +213,8 @@ jumbo_source_set("base") {
"provision_fetcher.h",
"ranges.cc",
"ranges.h",
+ "reentrancy_checker.cc",
+ "reentrancy_checker.h",
"renderer.cc",
"renderer.h",
"renderer_client.h",
@@ -239,6 +241,8 @@ jumbo_source_set("base") {
"stream_parser_buffer.h",
"subsample_entry.cc",
"subsample_entry.h",
+ "supported_types.cc",
+ "supported_types.h",
"text_cue.cc",
"text_cue.h",
"text_ranges.cc",
@@ -284,6 +288,7 @@ jumbo_source_set("base") {
"video_types.h",
"video_util.cc",
"video_util.h",
+ "waiting.h",
"wall_clock_time_source.cc",
"wall_clock_time_source.h",
"watch_time_keys.cc",
@@ -369,7 +374,7 @@ jumbo_source_set("base") {
}
if (is_win) {
- deps += [ "//media/base/win" ]
+ public_deps += [ "//media/base/win:d3d11" ]
}
if (is_chromecast) {
@@ -452,6 +457,10 @@ static_library("test_support") {
"//ui/gfx:test_support",
"//url",
]
+
+ if (is_win) {
+ public_deps += [ "//media/base/win:d3d11_test_support" ]
+ }
}
source_set("unit_tests") {
@@ -484,7 +493,6 @@ source_set("unit_tests") {
"channel_mixing_matrix_unittest.cc",
"container_names_unittest.cc",
"data_buffer_unittest.cc",
- "decode_capabilities_unittest.cc",
"decoder_buffer_queue_unittest.cc",
"decoder_buffer_unittest.cc",
"decrypt_config_unittest.cc",
@@ -503,6 +511,7 @@ source_set("unit_tests") {
"null_video_sink_unittest.cc",
"pipeline_impl_unittest.cc",
"ranges_unittest.cc",
+ "reentrancy_checker_unittest.cc",
"renderer_factory_selector_unittest.cc",
"seekable_buffer_unittest.cc",
"serial_runner_unittest.cc",
@@ -510,6 +519,7 @@ source_set("unit_tests") {
"sinc_resampler_unittest.cc",
"stream_parser_unittest.cc",
"subsample_entry_unittest.cc",
+ "supported_types_unittest.cc",
"text_ranges_unittest.cc",
"text_renderer_unittest.cc",
"time_delta_interpolator_unittest.cc",
diff --git a/chromium/media/base/android/media_codec_loop.cc b/chromium/media/base/android/media_codec_loop.cc
index bf8418a7216..94f49f0e321 100644
--- a/chromium/media/base/android/media_codec_loop.cc
+++ b/chromium/media/base/android/media_codec_loop.cc
@@ -234,6 +234,7 @@ void MediaCodecLoop::EnqueueInputBuffer(const InputBuffer& input_buffer) {
// to send in nullptr for the source. Note that the client doesn't
// guarantee that the pointer will remain valid after we return anyway.
pending_input_buf_data_.memory = nullptr;
+ client_->OnWaiting(WaitingReason::kNoDecryptionKey);
SetState(STATE_WAITING_FOR_KEY);
// Do not call OnInputDataQueued yet.
break;
diff --git a/chromium/media/base/android/media_codec_loop.h b/chromium/media/base/android/media_codec_loop.h
index 8b5924edc96..4791db07db9 100644
--- a/chromium/media/base/android/media_codec_loop.h
+++ b/chromium/media/base/android/media_codec_loop.h
@@ -21,6 +21,7 @@
#include "media/base/encryption_scheme.h"
#include "media/base/media_export.h"
#include "media/base/subsample_entry.h"
+#include "media/base/waiting.h"
// MediaCodecLoop is based on Android's MediaCodec API.
// The MediaCodec API is required to play encrypted (as in EME) content on
@@ -184,6 +185,9 @@ class MEDIA_EXPORT MediaCodecLoop {
// If this returns false, then we transition to STATE_ERROR.
virtual bool OnDecodedFrame(const OutputBuffer& out) = 0;
+ // Notify the client when waiting for |reason|, e.g. STATE_WAITING_FOR_KEY.
+ virtual void OnWaiting(WaitingReason reason) = 0;
+
// Processes the output format change on |media_codec|. Returns true on
// success, or false to transition to the error state.
virtual bool OnOutputFormatChanged() = 0;
diff --git a/chromium/media/base/android/media_codec_loop_unittest.cc b/chromium/media/base/android/media_codec_loop_unittest.cc
index d257535a84e..1e2eb8c86e7 100644
--- a/chromium/media/base/android/media_codec_loop_unittest.cc
+++ b/chromium/media/base/android/media_codec_loop_unittest.cc
@@ -11,6 +11,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/mock_media_codec_bridge.h"
+#include "media/base/waiting.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -35,6 +36,7 @@ class MockMediaCodecLoopClient : public StrictMock<MediaCodecLoop::Client> {
MOCK_METHOD1(OnInputDataQueued, void(bool));
MOCK_METHOD1(OnDecodedEos, bool(const MediaCodecLoop::OutputBuffer&));
MOCK_METHOD1(OnDecodedFrame, bool(const MediaCodecLoop::OutputBuffer&));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason reason));
MOCK_METHOD0(OnOutputFormatChanged, bool());
MOCK_METHOD0(OnCodecLoopError, void());
};
@@ -449,6 +451,8 @@ TEST_F(MediaCodecLoopTest, TestOnKeyAdded) {
// Notify MCL that it's missing the key.
ExpectQueueInputBuffer(input_buffer_index, data, MEDIA_CODEC_NO_KEY);
+ EXPECT_CALL(*client_, OnWaiting(WaitingReason::kNoDecryptionKey)).Times(1);
+
// MCL should now try for output buffers.
ExpectDequeueOutputBuffer(MEDIA_CODEC_TRY_AGAIN_LATER);
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index c576edf83f1..9d2d1f09c96 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -14,13 +14,12 @@
#include "base/android/jni_string.h"
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/containers/hash_tables.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/sys_byteorder.h"
@@ -162,7 +161,7 @@ class KeySystemManager {
KeySystemManager::KeySystemManager() {
// Widevine is always supported in Android.
key_system_uuid_map_[kWidevineKeySystem] =
- UUID(kWidevineUuid, kWidevineUuid + arraysize(kWidevineUuid));
+ UUID(kWidevineUuid, kWidevineUuid + base::size(kWidevineUuid));
MediaDrmBridgeClient* client = GetMediaDrmBridgeClient();
if (client)
client->AddKeySystemUUIDMappings(&key_system_uuid_map_);
@@ -264,14 +263,6 @@ bool AreMediaDrmApisAvailable() {
return true;
}
-bool IsPersistentLicenseTypeSupportedByMediaDrm() {
- return MediaDrmBridge::IsAvailable() &&
- // In development. See http://crbug.com/493521
- base::FeatureList::IsEnabled(kMediaDrmPersistentLicense) &&
- base::android::BuildInfo::GetInstance()->sdk_int() >=
- base::android::SDK_VERSION_MARSHMALLOW;
-}
-
} // namespace
// MediaDrm is not generally usable without MediaCodec. Thus, both the MediaDrm
@@ -290,11 +281,20 @@ bool MediaDrmBridge::IsKeySystemSupported(const std::string& key_system) {
}
// static
+bool MediaDrmBridge::IsPerOriginProvisioningSupported() {
+ return base::android::BuildInfo::GetInstance()->sdk_int() >=
+ base::android::SDK_VERSION_MARSHMALLOW;
+}
+
+// static
bool MediaDrmBridge::IsPersistentLicenseTypeSupported(
- const std::string& key_system) {
+ const std::string& /* key_system */) {
// TODO(yucliu): Check |key_system| if persistent license is supported by
// MediaDrm.
- return IsPersistentLicenseTypeSupportedByMediaDrm();
+ return MediaDrmBridge::IsAvailable() &&
+ // In development. See http://crbug.com/493521
+ base::FeatureList::IsEnabled(kMediaDrmPersistentLicense) &&
+ IsPerOriginProvisioningSupported();
}
// static
@@ -343,7 +343,7 @@ scoped_refptr<MediaDrmBridge> MediaDrmBridge::CreateInternal(
std::move(storage), create_fetcher_cb, session_message_cb,
session_closed_cb, session_keys_change_cb, session_expiration_update_cb));
- if (media_drm_bridge->j_media_drm_.is_null())
+ if (!media_drm_bridge->j_media_drm_)
return nullptr;
return media_drm_bridge;
@@ -433,7 +433,7 @@ void MediaDrmBridge::CreateSessionAndGenerateRequest(
}
}
- if (j_init_data.is_null()) {
+ if (!j_init_data) {
j_init_data =
base::android::ToJavaByteArray(env, init_data.data(), init_data.size());
}
@@ -455,7 +455,8 @@ void MediaDrmBridge::LoadSession(
DCHECK(task_runner_->BelongsToCurrentThread());
DVLOG(2) << __func__;
- DCHECK(IsPersistentLicenseTypeSupportedByMediaDrm());
+ // Key system is not used, so just pass an empty string here.
+ DCHECK(IsPersistentLicenseTypeSupported(""));
if (session_type != CdmSessionType::kPersistentLicense) {
promise->reject(
@@ -561,6 +562,17 @@ bool MediaDrmBridge::IsSecureCodecRequired() {
return true;
}
+void MediaDrmBridge::Provision(
+ base::OnceCallback<void(bool)> provisioning_complete_cb) {
+ DVLOG(1) << __func__;
+ DCHECK(provisioning_complete_cb);
+ DCHECK(!provisioning_complete_cb_);
+ provisioning_complete_cb_ = std::move(provisioning_complete_cb);
+
+ JNIEnv* env = AttachCurrentThread();
+ Java_MediaDrmBridge_provision(env, j_media_drm_);
+}
+
void MediaDrmBridge::Unprovision() {
DVLOG(1) << __func__;
@@ -632,7 +644,7 @@ void MediaDrmBridge::OnMediaCryptoReady(
base::Passed(CreateJavaObjectPtr(j_media_crypto.obj()))));
}
-void MediaDrmBridge::OnStartProvisioning(
+void MediaDrmBridge::OnProvisionRequest(
JNIEnv* env,
const JavaParamRef<jobject>& j_media_drm,
const JavaParamRef<jstring>& j_default_url,
@@ -648,6 +660,18 @@ void MediaDrmBridge::OnStartProvisioning(
std::move(request_data)));
}
+void MediaDrmBridge::OnProvisioningComplete(
+ JNIEnv* env,
+ const base::android::JavaParamRef<jobject>& j_media_drm,
+ bool success) {
+ DVLOG(1) << __func__;
+
+ // This should only be called as result of a call to Provision().
+ DCHECK(provisioning_complete_cb_);
+ task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(provisioning_complete_cb_), success));
+}
+
void MediaDrmBridge::OnPromiseResolved(JNIEnv* env,
const JavaParamRef<jobject>& j_media_drm,
jint j_promise_id) {
@@ -828,9 +852,8 @@ MediaDrmBridge::MediaDrmBridge(
// TODO(yucliu): Remove the check once persistent storage is fully
// supported and check if origin is valid.
base::FeatureList::IsEnabled(kMediaDrmPersistentLicense) &&
- // MediaDrm implements origin isolated storage on Marshmallow.
- base::android::BuildInfo::GetInstance()->sdk_int() >=
- base::android::SDK_VERSION_MARSHMALLOW &&
+ // Per-origin provisioning must be supported for origin isolated storage.
+ IsPerOriginProvisioningSupported() &&
// origin id can be empty when MediaDrmBridge is created by
// CreateWithoutSessionSupport, which is used for unprovisioning.
!origin_id.empty();
@@ -853,7 +876,7 @@ MediaDrmBridge::~MediaDrmBridge() {
// After the call to Java_MediaDrmBridge_destroy() Java won't call native
// methods anymore, this is ensured by MediaDrmBridge.java.
- if (!j_media_drm_.is_null())
+ if (j_media_drm_)
Java_MediaDrmBridge_destroy(env, j_media_drm_);
player_tracker_.NotifyCdmUnset();
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 6ee3b9861a6..c801361461d 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -74,6 +74,10 @@ class MEDIA_EXPORT MediaDrmBridge : public ContentDecryptionModule,
const std::string& key_system,
const std::string& container_mime_type);
+ // Whether per-origin provisioning (setting "origin" property on MediaDrm) is
+ // supported or not. If false, per-device provisioning is used.
+ static bool IsPerOriginProvisioningSupported();
+
static bool IsPersistentLicenseTypeSupported(const std::string& key_system);
// Returns the list of the platform-supported key system names that
@@ -120,6 +124,11 @@ class MEDIA_EXPORT MediaDrmBridge : public ContentDecryptionModule,
// CdmContext implementation.
MediaCryptoContext* GetMediaCryptoContext() override;
+ // Provision the origin bound with |this|. |provisioning_complete_cb| will be
+ // called asynchronously to indicate whether this was successful or not.
+ // MediaDrmBridge must be created with a valid origin ID.
+ void Provision(base::OnceCallback<void(bool)> provisioning_complete_cb);
+
// Unprovision the origin bound with |this|. This will remove the cert for
// current origin and leave the offline licenses in invalid state (offline
// licenses can't be used anymore).
@@ -165,12 +174,19 @@ class MEDIA_EXPORT MediaDrmBridge : public ContentDecryptionModule,
const base::android::JavaParamRef<jobject>& j_media_crypto);
// Called by Java when we need to send a provisioning request,
- void OnStartProvisioning(
+ void OnProvisionRequest(
JNIEnv* env,
const base::android::JavaParamRef<jobject>& j_media_drm,
const base::android::JavaParamRef<jstring>& j_default_url,
const base::android::JavaParamRef<jbyteArray>& j_request_data);
+ // Called by Java when provisioning is complete. This is only in response to a
+ // provision() request.
+ void OnProvisioningComplete(
+ JNIEnv* env,
+ const base::android::JavaParamRef<jobject>& j_media_drm,
+ bool success);
+
// Callbacks to resolve the promise for |promise_id|.
void OnPromiseResolved(
JNIEnv* env,
@@ -312,6 +328,9 @@ class MEDIA_EXPORT MediaDrmBridge : public ContentDecryptionModule,
// Non-null iff when a provision request is pending.
std::unique_ptr<ProvisionFetcher> provision_fetcher_;
+ // The callback to be called when provisioning is complete.
+ base::OnceCallback<void(bool)> provisioning_complete_cb_;
+
// Callbacks for firing session events.
SessionMessageCB session_message_cb_;
SessionClosedCB session_closed_cb_;
diff --git a/chromium/media/base/android/media_drm_bridge_client.h b/chromium/media/base/android/media_drm_bridge_client.h
index dca8161a0c5..fc083a40e50 100644
--- a/chromium/media/base/android/media_drm_bridge_client.h
+++ b/chromium/media/base/android/media_drm_bridge_client.h
@@ -8,10 +8,10 @@
#include <stdint.h>
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "media/base/media_export.h"
@@ -36,7 +36,7 @@ using UUID = std::vector<uint8_t>;
// provide customized additions to Android's media handling.
class MEDIA_EXPORT MediaDrmBridgeClient {
public:
- typedef base::hash_map<std::string, UUID> KeySystemUuidMap;
+ typedef std::unordered_map<std::string, UUID> KeySystemUuidMap;
MediaDrmBridgeClient();
virtual ~MediaDrmBridgeClient();
diff --git a/chromium/media/base/android/media_drm_bridge_unittest.cc b/chromium/media/base/android/media_drm_bridge_unittest.cc
index 2458dfd045f..664854d7605 100644
--- a/chromium/media/base/android/media_drm_bridge_unittest.cc
+++ b/chromium/media/base/android/media_drm_bridge_unittest.cc
@@ -6,14 +6,20 @@
#include "base/android/build_info.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/provision_fetcher.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/widevine/cdm/widevine_cdm_common.h"
+using ::testing::_;
+using ::testing::StrictMock;
+
namespace media {
#define EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(a) \
@@ -36,6 +42,7 @@ const MediaDrmBridge::SecurityLevel kDefault =
const MediaDrmBridge::SecurityLevel kL1 = MediaDrmBridge::SECURITY_LEVEL_1;
const MediaDrmBridge::SecurityLevel kL3 = MediaDrmBridge::SECURITY_LEVEL_3;
const char kTestOrigin[] = "http://www.example.com";
+const char kEmptyOrigin[] = "";
// Helper functions to avoid typing "MediaDrmBridge::" in tests.
@@ -47,21 +54,72 @@ static bool IsKeySystemSupportedWithType(
}
namespace {
-// Mock ProvisionFetcher.
-class MockProvisionFetcher : public ProvisionFetcher {
+
+// This class is simply a wrapper that passes on calls to Retrieve() to another
+// implementation that is provided to the constructor. This is created as
+// MediaDrmBridge::CreateWithoutSessionSupport() requires the creation of a new
+// ProvisionFetcher each time it needs to retrieve a license from the license
+// server.
+class ProvisionFetcherWrapper : public ProvisionFetcher {
public:
- static std::unique_ptr<ProvisionFetcher> Create() {
- return base::WrapUnique(new MockProvisionFetcher);
- }
+ explicit ProvisionFetcherWrapper(ProvisionFetcher* provision_fetcher)
+ : provision_fetcher_(provision_fetcher) {}
// ProvisionFetcher implementation.
void Retrieve(const std::string& default_url,
const std::string& request_data,
- const ResponseCB& response_cb) override {}
+ const ResponseCB& response_cb) override {
+ provision_fetcher_->Retrieve(default_url, request_data, response_cb);
+ }
+
+ private:
+ ProvisionFetcher* provision_fetcher_;
};
-} // namespace (anonymous)
-TEST(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
+} // namespace
+
+class MediaDrmBridgeTest : public ProvisionFetcher, public testing::Test {
+ public:
+ MediaDrmBridgeTest() {}
+
+ void CreateWithoutSessionSupport(
+ const std::string& key_system,
+ const std::string& origin_id,
+ MediaDrmBridge::SecurityLevel security_level) {
+ media_drm_bridge_ = MediaDrmBridge::CreateWithoutSessionSupport(
+ key_system, origin_id, security_level,
+ base::BindRepeating(&MediaDrmBridgeTest::CreateProvisionFetcher,
+ base::Unretained(this)));
+ }
+
+ // ProvisionFetcher implementation. Done as a mock method so we can properly
+ // check if |media_drm_bridge_| invokes it or not.
+ MOCK_METHOD3(Retrieve,
+ void(const std::string& default_url,
+ const std::string& request_data,
+ const ResponseCB& response_cb));
+
+ void Provision() {
+ media_drm_bridge_->Provision(base::BindOnce(
+ &MediaDrmBridgeTest::ProvisioningDone, base::Unretained(this)));
+ }
+
+ // MediaDrmBridge::Provision() requires a callback that is called when
+ // provisioning completes and indicates if it succeeds or not.
+ MOCK_METHOD1(ProvisioningDone, void(bool));
+
+ protected:
+ scoped_refptr<MediaDrmBridge> media_drm_bridge_;
+
+ private:
+ std::unique_ptr<ProvisionFetcher> CreateProvisionFetcher() {
+ return std::make_unique<ProvisionFetcherWrapper>(this);
+ }
+
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+};
+
+TEST_F(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
// TODO(xhwang): Enable when b/13564917 is fixed.
// EXPECT_TRUE_IF_AVAILABLE(
// IsKeySystemSupportedWithType(kWidevineKeySystem, kAudioMp4));
@@ -85,7 +143,7 @@ TEST(MediaDrmBridgeTest, IsKeySystemSupported_Widevine) {
}
// Invalid key system is NOT supported regardless whether MediaDrm is available.
-TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
+TEST_F(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
EXPECT_FALSE(MediaDrmBridge::IsKeySystemSupported(kInvalidKeySystem));
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kAudioMp4));
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, kVideoMp4));
@@ -96,33 +154,81 @@ TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
EXPECT_FALSE(IsKeySystemSupportedWithType(kInvalidKeySystem, "audio/mp3"));
}
-TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_Widevine) {
- base::MessageLoop message_loop_;
- EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(MediaDrmBridge::CreateWithoutSessionSupport(
- kWidevineKeySystem, kTestOrigin, kDefault,
- base::Bind(&MockProvisionFetcher::Create)));
+TEST_F(MediaDrmBridgeTest, CreateWithoutSessionSupport_Widevine) {
+ CreateWithoutSessionSupport(kWidevineKeySystem, kTestOrigin, kDefault);
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(media_drm_bridge_);
}
// Invalid key system is NOT supported regardless whether MediaDrm is available.
-TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_InvalidKeySystem) {
- base::MessageLoop message_loop_;
- EXPECT_FALSE(MediaDrmBridge::CreateWithoutSessionSupport(
- kInvalidKeySystem, kTestOrigin, kDefault,
- base::Bind(&MockProvisionFetcher::Create)));
+TEST_F(MediaDrmBridgeTest, CreateWithoutSessionSupport_InvalidKeySystem) {
+ CreateWithoutSessionSupport(kInvalidKeySystem, kTestOrigin, kDefault);
+ EXPECT_FALSE(media_drm_bridge_);
}
-TEST(MediaDrmBridgeTest, CreateWithSecurityLevel_Widevine) {
- base::MessageLoop message_loop_;
-
+TEST_F(MediaDrmBridgeTest, CreateWithSecurityLevel_Widevine) {
// We test "L3" fully. But for "L1" we don't check the result as it depends on
// whether the test device supports "L1".
- EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(MediaDrmBridge::CreateWithoutSessionSupport(
- kWidevineKeySystem, kTestOrigin, kL3,
- base::Bind(&MockProvisionFetcher::Create)));
+ CreateWithoutSessionSupport(kWidevineKeySystem, kTestOrigin, kL3);
+ EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(media_drm_bridge_);
+
+ CreateWithoutSessionSupport(kWidevineKeySystem, kTestOrigin, kL1);
+}
+
+TEST_F(MediaDrmBridgeTest, Provision_Widevine) {
+ // Only test this if Widevine is supported. Otherwise
+ // CreateWithoutSessionSupport() will return null and it can't be tested.
+ if (!MediaDrmBridge::IsKeySystemSupported(kWidevineKeySystem)) {
+ VLOG(0) << "Widevine not supported on device.";
+ return;
+ }
+
+ // Provisioning requires the use of origin isolated storage, so skip this test
+ // if it's not supported.
+ if (!MediaDrmBridge::IsPerOriginProvisioningSupported()) {
+ VLOG(0) << "Origin isolated storage not supported on device.";
+ return;
+ }
+
+ // Calling Provision() later should trigger a provisioning request. As we
+ // can't pass the request to a license server,
+ // MockProvisionFetcher::Retrieve() simply drops the request and never
+ // responds. As a result, there should be a call to Retrieve() but not to
+ // ProvisioningDone() (CB passed to Provision()) as the provisioning never
+ // completes.
+ EXPECT_CALL(*this, Retrieve(_, _, _));
+ EXPECT_CALL(*this, ProvisioningDone(_)).Times(0);
+
+ // Create MediaDrmBridge. We only test "L3" as "L1" depends on whether the
+ // test device supports it or not.
+ CreateWithoutSessionSupport(kWidevineKeySystem, kTestOrigin, kL3);
+ EXPECT_TRUE(media_drm_bridge_);
+ Provision();
+
+ // ProvisioningDone() callback is executed asynchronously.
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(MediaDrmBridgeTest, Provision_Widevine_NoOrigin) {
+ // Only test this if Widevine is supported. Otherwise
+ // CreateWithoutSessionSupport() will return null and it can't be tested.
+ if (!MediaDrmBridge::IsKeySystemSupported(kWidevineKeySystem)) {
+ VLOG(0) << "Widevine not supported on device.";
+ return;
+ }
+
+ // Calling Provision() later should fail as the origin is not provided (or
+ // origin isolated storage is not available). No provisioning request should
+ // be attempted.
+ EXPECT_CALL(*this, ProvisioningDone(false));
+
+ // Create MediaDrmBridge. We only test "L3" as "L1" depends on whether the
+ // test device supports it or not.
+ CreateWithoutSessionSupport(kWidevineKeySystem, kEmptyOrigin, kL3);
+ EXPECT_TRUE(media_drm_bridge_);
+ Provision();
- MediaDrmBridge::CreateWithoutSessionSupport(
- kWidevineKeySystem, kTestOrigin, kL1,
- base::Bind(&MockProvisionFetcher::Create));
+ // ProvisioningDone() callback is executed asynchronously.
+ base::RunLoop().RunUntilIdle();
}
} // namespace media
diff --git a/chromium/media/base/android/media_player_bridge_unittest.cc b/chromium/media/base/android/media_player_bridge_unittest.cc
index 48a9bf1fb01..6a9ac2ac96d 100644
--- a/chromium/media/base/android/media_player_bridge_unittest.cc
+++ b/chromium/media/base/android/media_player_bridge_unittest.cc
@@ -35,7 +35,6 @@ class MockMediaPlayerManager : public MediaPlayerManager {
MOCK_METHOD2(OnError, void(int player_id, int error));
MOCK_METHOD3(OnVideoSizeChanged, void(int player_id, int width, int height));
MOCK_METHOD2(OnAudibleStateChanged, void(int player_id, bool is_audible_now));
- MOCK_METHOD1(OnWaitingForDecryptionKey, void(int player_id));
MOCK_METHOD1(GetPlayer, MediaPlayerAndroid*(int player_id));
MOCK_METHOD3(RequestPlay,
bool(int player_id, base::TimeDelta duration, bool has_audio));
diff --git a/chromium/media/base/audio_bus_unittest.cc b/chromium/media/base/audio_bus_unittest.cc
index 39f2f425f9e..aacafb95558 100644
--- a/chromium/media/base/audio_bus_unittest.cc
+++ b/chromium/media/base/audio_bus_unittest.cc
@@ -8,8 +8,8 @@
#include <limits>
#include <memory>
-#include "base/macros.h"
#include "base/memory/aligned_memory.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -305,7 +305,7 @@ static const int kTestVectorFrameCount = kTestVectorSize / 2;
static const float kTestVectorResult[][kTestVectorFrameCount] = {
{-1.0f, 1.0f, 0.5f, 0.0f, 0.0f},
{0.0f, -1.0f, -0.5f, 1.0f, 0.0f}};
-static const int kTestVectorChannelCount = arraysize(kTestVectorResult);
+static const int kTestVectorChannelCount = base::size(kTestVectorResult);
// Verify FromInterleaved() deinterleaves audio in supported formats correctly.
TEST_F(AudioBusTest, FromInterleaved) {
@@ -442,26 +442,26 @@ TEST_F(AudioBusTest, ToInterleaved) {
// Test deprecated version that takes |bytes_per_sample| as an input.
{
SCOPED_TRACE("uint8_t");
- uint8_t test_array[arraysize(kTestVectorUint8)];
+ uint8_t test_array[base::size(kTestVectorUint8)];
bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorUint8), test_array);
ASSERT_EQ(0,
memcmp(test_array, kTestVectorUint8, sizeof(kTestVectorUint8)));
}
{
SCOPED_TRACE("int16_t");
- int16_t test_array[arraysize(kTestVectorInt16)];
+ int16_t test_array[base::size(kTestVectorInt16)];
bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt16), test_array);
ASSERT_EQ(0,
memcmp(test_array, kTestVectorInt16, sizeof(kTestVectorInt16)));
}
{
SCOPED_TRACE("int32_t");
- int32_t test_array[arraysize(kTestVectorInt32)];
+ int32_t test_array[base::size(kTestVectorInt32)];
bus->ToInterleaved(bus->frames(), sizeof(*kTestVectorInt32), test_array);
// Some compilers get better precision than others on the half-max test, so
// let the test pass with an off by one check on the half-max.
- int32_t alternative_acceptable_result[arraysize(kTestVectorInt32)];
+ int32_t alternative_acceptable_result[base::size(kTestVectorInt32)];
memcpy(alternative_acceptable_result, kTestVectorInt32,
sizeof(kTestVectorInt32));
ASSERT_EQ(alternative_acceptable_result[4],
@@ -478,26 +478,26 @@ TEST_F(AudioBusTest, ToInterleaved) {
// parameter.
{
SCOPED_TRACE("UnsignedInt8SampleTypeTraits");
- uint8_t test_array[arraysize(kTestVectorUint8)];
+ uint8_t test_array[base::size(kTestVectorUint8)];
bus->ToInterleaved<UnsignedInt8SampleTypeTraits>(bus->frames(), test_array);
ASSERT_EQ(0,
memcmp(test_array, kTestVectorUint8, sizeof(kTestVectorUint8)));
}
{
SCOPED_TRACE("SignedInt16SampleTypeTraits");
- int16_t test_array[arraysize(kTestVectorInt16)];
+ int16_t test_array[base::size(kTestVectorInt16)];
bus->ToInterleaved<SignedInt16SampleTypeTraits>(bus->frames(), test_array);
ASSERT_EQ(0,
memcmp(test_array, kTestVectorInt16, sizeof(kTestVectorInt16)));
}
{
SCOPED_TRACE("SignedInt32SampleTypeTraits");
- int32_t test_array[arraysize(kTestVectorInt32)];
+ int32_t test_array[base::size(kTestVectorInt32)];
bus->ToInterleaved<SignedInt32SampleTypeTraits>(bus->frames(), test_array);
// Some compilers get better precision than others on the half-max test, so
// let the test pass with an off by one check on the half-max.
- int32_t alternative_acceptable_result[arraysize(kTestVectorInt32)];
+ int32_t alternative_acceptable_result[base::size(kTestVectorInt32)];
memcpy(alternative_acceptable_result, kTestVectorInt32,
sizeof(kTestVectorInt32));
ASSERT_EQ(alternative_acceptable_result[4],
@@ -511,7 +511,7 @@ TEST_F(AudioBusTest, ToInterleaved) {
}
{
SCOPED_TRACE("Float32SampleTypeTraits");
- float test_array[arraysize(kTestVectorFloat32)];
+ float test_array[base::size(kTestVectorFloat32)];
bus->ToInterleaved<Float32SampleTypeTraits>(bus->frames(), test_array);
ASSERT_EQ(
0, memcmp(test_array, kTestVectorFloat32, sizeof(kTestVectorFloat32)));
@@ -538,7 +538,7 @@ TEST_F(AudioBusTest, ToInterleavedSanitized) {
bus->frames());
// Verify FromInterleaved applied no sanity.
ASSERT_EQ(bus->channel(0)[0], kTestVectorFloat32Invalid[0]);
- float test_array[arraysize(kTestVectorFloat32)];
+ float test_array[base::size(kTestVectorFloat32)];
bus->ToInterleaved<Float32SampleTypeTraits>(bus->frames(), test_array);
ASSERT_EQ(0,
memcmp(test_array, kTestVectorFloat32, sizeof(kTestVectorFloat32)));
@@ -561,7 +561,7 @@ TEST_F(AudioBusTest, ToInterleavedPartial) {
// Test deprecated version that takes |bytes_per_sample| as an input.
{
SCOPED_TRACE("int16_t");
- int16_t test_array[arraysize(kTestVectorInt16)];
+ int16_t test_array[base::size(kTestVectorInt16)];
expected->ToInterleavedPartial(kPartialStart, kPartialFrames,
sizeof(*kTestVectorInt16), test_array);
ASSERT_EQ(0, memcmp(test_array, kTestVectorInt16 +
@@ -574,7 +574,7 @@ TEST_F(AudioBusTest, ToInterleavedPartial) {
// parameter.
{
SCOPED_TRACE("Float32SampleTypeTraits");
- float test_array[arraysize(kTestVectorFloat32)];
+ float test_array[base::size(kTestVectorFloat32)];
expected->ToInterleavedPartial<Float32SampleTypeTraits>(
kPartialStart, kPartialFrames, test_array);
ASSERT_EQ(0, memcmp(test_array, kTestVectorFloat32 +
diff --git a/chromium/media/base/audio_decoder.h b/chromium/media/base/audio_decoder.h
index c66ffa76ce8..e2d5926163b 100644
--- a/chromium/media/base/audio_decoder.h
+++ b/chromium/media/base/audio_decoder.h
@@ -16,6 +16,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
+#include "media/base/waiting.h"
namespace media {
@@ -35,10 +36,6 @@ class MEDIA_EXPORT AudioDecoder {
// DecoderBuffer, indicating that the pipeline can send next buffer to decode.
using DecodeCB = base::Callback<void(DecodeStatus)>;
- // Callback for whenever the key needed to decrypt the stream is not
- // available. May be called at any time after Initialize().
- using WaitingForDecryptionKeyCB = base::RepeatingClosure;
-
AudioDecoder();
// Fires any pending callbacks, stops and destroys the decoder.
@@ -67,14 +64,14 @@ class MEDIA_EXPORT AudioDecoder {
// stream is not encrypted.
// |init_cb| is used to return initialization status.
// |output_cb| is called for decoded audio buffers (see Decode()).
- // |waiting_for_decryption_key_cb| is called whenever the key needed to
- // decrypt the stream is not available.
- virtual void Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) = 0;
+ // |waiting_cb| is called whenever the decoder is stalled waiting for
+ // something, e.g. decryption key. May be called at any time after
+ // Initialize().
+ virtual void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) = 0;
// Requests samples to be decoded. Only one decode may be in flight at any
// given time. Once the buffer is decoded the decoder calls |decode_cb|.
diff --git a/chromium/media/base/audio_hash.cc b/chromium/media/base/audio_hash.cc
index 404e6f47468..9abafb3efca 100644
--- a/chromium/media/base/audio_hash.cc
+++ b/chromium/media/base/audio_hash.cc
@@ -7,8 +7,8 @@
#include "media/base/audio_hash.h"
-#include "base/macros.h"
#include "base/numerics/math_constants.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_bus.h"
@@ -29,7 +29,7 @@ void AudioHash::Update(const AudioBus* audio_bus, int frames) {
for (uint32_t i = 0; i < static_cast<uint32_t>(frames); ++i) {
const uint32_t kSampleIndex = sample_count_ + i;
const uint32_t kHashIndex =
- (kSampleIndex * (ch + 1)) % arraysize(audio_hash_);
+ (kSampleIndex * (ch + 1)) % base::size(audio_hash_);
// Mix in a sine wave with the result so we ensure that sequences of empty
// buffers don't result in an empty hash.
@@ -48,7 +48,7 @@ void AudioHash::Update(const AudioBus* audio_bus, int frames) {
std::string AudioHash::ToString() const {
std::string result;
- for (size_t i = 0; i < arraysize(audio_hash_); ++i)
+ for (size_t i = 0; i < base::size(audio_hash_); ++i)
result += base::StringPrintf("%.2f,", audio_hash_[i]);
return result;
}
@@ -58,7 +58,7 @@ bool AudioHash::IsEquivalent(const std::string& other, double tolerance) const {
char comma;
std::stringstream is(other);
- for (size_t i = 0; i < arraysize(audio_hash_); ++i) {
+ for (size_t i = 0; i < base::size(audio_hash_); ++i) {
is >> other_hash >> comma;
if (std::fabs(audio_hash_[i] - other_hash) > tolerance)
return false;
diff --git a/chromium/media/base/audio_parameters_unittest.cc b/chromium/media/base/audio_parameters_unittest.cc
index d3ecf4be962..3d7684d3ca4 100644
--- a/chromium/media/base/audio_parameters_unittest.cc
+++ b/chromium/media/base/audio_parameters_unittest.cc
@@ -4,7 +4,7 @@
#include <stddef.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/audio_parameters.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -129,8 +129,8 @@ TEST(AudioParameters, Compare) {
CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 200),
};
- for (size_t i = 0; i < arraysize(values); ++i) {
- for (size_t j = 0; j < arraysize(values); ++j) {
+ for (size_t i = 0; i < base::size(values); ++i) {
+ for (size_t j = 0; j < base::size(values); ++j) {
SCOPED_TRACE("i=" + base::NumberToString(i) +
" j=" + base::NumberToString(j));
EXPECT_EQ(i < j, values[i] < values[j]);
diff --git a/chromium/media/base/audio_renderer_mixer_unittest.cc b/chromium/media/base/audio_renderer_mixer_unittest.cc
index 1c00ec274b7..a29616a23f6 100644
--- a/chromium/media/base/audio_renderer_mixer_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_unittest.cc
@@ -14,7 +14,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/test/scoped_task_environment.h"
#include "base/threading/platform_thread.h"
@@ -540,19 +540,19 @@ INSTANTIATE_TEST_CASE_P(
// Downsampling, multuple input sample rates.
std::make_tuple(static_cast<const int* const>(kTestInput3Rates),
- arraysize(kTestInput3Rates),
+ base::size(kTestInput3Rates),
kTestInput3Rates[0],
0.01),
// Upsampling, multiple sinput sample rates.
std::make_tuple(static_cast<const int* const>(kTestInput3Rates),
- arraysize(kTestInput3Rates),
+ base::size(kTestInput3Rates),
kTestInput3Rates[2],
0.01),
// Both downsampling and upsampling, multiple input sample rates
std::make_tuple(static_cast<const int* const>(kTestInput3Rates),
- arraysize(kTestInput3Rates),
+ base::size(kTestInput3Rates),
kTestInput3Rates[1],
0.01)));
diff --git a/chromium/media/base/audio_shifter.cc b/chromium/media/base/audio_shifter.cc
index 98ca87b460f..16cf224bd0a 100644
--- a/chromium/media/base/audio_shifter.cc
+++ b/chromium/media/base/audio_shifter.cc
@@ -79,11 +79,11 @@ class ClockSmoother {
};
AudioShifter::AudioQueueEntry::AudioQueueEntry(
- base::TimeTicks target_playout_time_,
- std::unique_ptr<AudioBus> audio_)
- : target_playout_time(target_playout_time_), audio(audio_.release()) {}
+ base::TimeTicks target_playout_time,
+ std::unique_ptr<AudioBus> audio)
+ : target_playout_time(target_playout_time), audio(std::move(audio)) {}
-AudioShifter::AudioQueueEntry::AudioQueueEntry(const AudioQueueEntry& other) =
+AudioShifter::AudioQueueEntry::AudioQueueEntry(AudioQueueEntry&& other) =
default;
AudioShifter::AudioQueueEntry::~AudioQueueEntry() = default;
diff --git a/chromium/media/base/audio_shifter.h b/chromium/media/base/audio_shifter.h
index 737f00a7881..48cbdb36326 100644
--- a/chromium/media/base/audio_shifter.h
+++ b/chromium/media/base/audio_shifter.h
@@ -10,7 +10,6 @@
#include <memory>
#include "base/containers/circular_deque.h"
-#include "base/memory/linked_ptr.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/multi_channel_resampler.h"
@@ -41,14 +40,14 @@ class MEDIA_EXPORT AudioShifter {
// |max_buffer_size| is how much audio we are allowed to buffer.
// Often, this can be set fairly large as Push() will limit the
// size when it specifies when to play the audio.
- // |clock_accuracy| is used to determine if a skip has occured
+ // |clock_accuracy| is used to determine if a skip has occurred
// in the audio (as opposed to an inaccuracy in the timestamp.)
// It also limits the smallest amount of buffering allowed.
// |adjustement_time| specifies how long time should be used
// to adjust the audio. This should normally at least a few
// seconds. The larger the value, the smoother and less audible
// the transitions will be. (But it means that perfect audio
- // sync will take longer to achive.)
+ // sync will take longer to achieve.)
// |rate| is audio frames per second, eg 48000.
// |channels| is number of channels in input and output audio.
// TODO(hubbe): Allow input rate and output rate to be different
@@ -71,9 +70,9 @@ class MEDIA_EXPORT AudioShifter {
// Given audio from an a microphone, a reasonable way to calculate
// playout_time would be now + 30ms.
// Ideally playout_time is some time in the future, in which case
- // the samples will be buffered until the approperiate time. If
+ // the samples will be buffered until the appropriate time. If
// playout_time is in the past, everything will still work, and we'll
- // try to keep the buffring to a minimum.
+ // try to keep the buffering to a minimum.
void Push(std::unique_ptr<AudioBus> input, base::TimeTicks playout_time);
// Fills out |output| with samples. Tries to stretch/shrink the audio
@@ -87,16 +86,14 @@ private:
void ResamplerCallback(int frame_delay, AudioBus* destination);
struct AudioQueueEntry {
- AudioQueueEntry(base::TimeTicks target_playout_time_,
- std::unique_ptr<AudioBus> audio_);
- AudioQueueEntry(const AudioQueueEntry& other);
+ AudioQueueEntry(base::TimeTicks target_playout_time,
+ std::unique_ptr<AudioBus> audio);
+ AudioQueueEntry(AudioQueueEntry&& other);
~AudioQueueEntry();
base::TimeTicks target_playout_time;
- linked_ptr<AudioBus> audio;
+ std::unique_ptr<AudioBus> audio;
};
- using AudioShifterQueue = base::circular_deque<AudioQueueEntry>;
-
// Set from constructor.
const base::TimeDelta max_buffer_size_;
const base::TimeDelta clock_accuracy_;
@@ -116,18 +113,19 @@ private:
size_t position_;
// Queue of data provided to us.
- AudioShifterQueue queue_;
+ base::circular_deque<AudioQueueEntry> queue_;
- // Timestamp from alst Pull() call.
+ // Timestamp from last Pull() call.
base::TimeTicks previous_playout_time_;
- // Number of rames requested in last Pull call.
+
+ // Number of frames requested in last Pull call.
size_t previous_requested_samples_;
// Timestamp at the end of last audio bus
// consumed by resampler.
base::TimeTicks end_of_last_consumed_audiobus_;
- // If Push() timestamps are in the past, we have to decidede the playout delay
+ // If Push() timestamps are in the past, we have to decide the playout delay
// ourselves. The delay is then stored here.
base::TimeDelta bias_;
diff --git a/chromium/media/base/audio_timestamp_helper_unittest.cc b/chromium/media/base/audio_timestamp_helper_unittest.cc
index 0ae2ad54f95..7a06ee8fd39 100644
--- a/chromium/media/base/audio_timestamp_helper_unittest.cc
+++ b/chromium/media/base/audio_timestamp_helper_unittest.cc
@@ -5,7 +5,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -132,7 +132,7 @@ TEST_F(AudioTimestampHelperTest, GetDuration) {
int frame_count = 5;
int64_t expected_durations[] = {113, 113, 114, 113, 113, 114};
- for (size_t i = 0; i < arraysize(expected_durations); ++i) {
+ for (size_t i = 0; i < base::size(expected_durations); ++i) {
base::TimeDelta duration = helper_.GetFrameDuration(frame_count);
EXPECT_EQ(expected_durations[i], duration.InMicroseconds());
diff --git a/chromium/media/base/bit_reader_unittest.cc b/chromium/media/base/bit_reader_unittest.cc
index 491aaf26051..01f35965433 100644
--- a/chromium/media/base/bit_reader_unittest.cc
+++ b/chromium/media/base/bit_reader_unittest.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -97,7 +97,7 @@ TEST(BitReaderTest, VariableSkipBitsTest) {
// Set bits to one only for the first and last bit of each read
// in the pattern.
size_t pos = 0;
- for (size_t k = 0; k < arraysize(pattern_read_skip); ++k) {
+ for (size_t k = 0; k < base::size(pattern_read_skip); ++k) {
const size_t read_bit_count = pattern_read_skip[k][0];
if (read_bit_count > 0) {
SetBit(buffer, sizeof(buffer), pos);
@@ -110,7 +110,7 @@ TEST(BitReaderTest, VariableSkipBitsTest) {
// Run the test.
BitReader bit_reader(buffer, sizeof(buffer));
EXPECT_EQ(bit_reader.bits_available(), static_cast<int>(sizeof(buffer) * 8));
- for (size_t k = 0; k < arraysize(pattern_read_skip); ++k) {
+ for (size_t k = 0; k < base::size(pattern_read_skip); ++k) {
const size_t read_bit_count = pattern_read_skip[k][0];
if (read_bit_count > 0) {
int value;
diff --git a/chromium/media/base/cdm_context.cc b/chromium/media/base/cdm_context.cc
index 1eb70b442c4..6f8fadbd7b9 100644
--- a/chromium/media/base/cdm_context.cc
+++ b/chromium/media/base/cdm_context.cc
@@ -12,8 +12,8 @@ CdmContext::CdmContext() = default;
CdmContext::~CdmContext() = default;
-std::unique_ptr<CallbackRegistration> CdmContext::RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) {
+std::unique_ptr<CallbackRegistration> CdmContext::RegisterEventCB(
+ EventCB /* event_cb */) {
return nullptr;
}
diff --git a/chromium/media/base/cdm_context.h b/chromium/media/base/cdm_context.h
index 9c3dfadc0a2..cd59098fc15 100644
--- a/chromium/media/base/cdm_context.h
+++ b/chromium/media/base/cdm_context.h
@@ -28,27 +28,44 @@ class MediaCryptoContext;
//
// Thread Model: Since this interface is used in many different contexts (e.g.
// different processes or platforms), the thread model is not defined as part
-// of this interface. Subclasses must ensure thread safty.
+// of this interface. Subclasses must ensure thread safety.
class MEDIA_EXPORT CdmContext {
public:
// Indicates an invalid CDM ID. See GetCdmId() for details.
enum { kInvalidCdmId = 0 };
+ // Events happening in a CDM that a media player should be aware of.
+ enum class Event {
+ // A key is newly usable, e.g. new key available, or previously expired key
+ // has been renewed, etc.
+ kHasAdditionalUsableKey,
+
+ // A hardware reset happened. Some hardware context, e.g. hardware decoder
+ // context may be lost.
+ kHardwareContextLost,
+ };
+
+ // Callback to notify the occurrence of an Event.
+ using EventCB = base::RepeatingCallback<void(Event)>;
+
virtual ~CdmContext();
- // Registers a callback which will be called when an additional usable key is
- // available in the CDM. Can be called multiple times to register multiple
- // callbacks, all of which will be called when a new usable key is available.
- // Lifetime: The caller should keep the returned CallbackRegistration object
+ // Registers a callback which will be called when an event happens in the CDM.
+ // Returns null if the registration fails, otherwise the caller should hold
+ // the returned CallbackRegistration (see "Lifetime" notes below). Can be
+ // called multiple times to register multiple callbacks, all of which will be
+ // called when an event happens.
+ // Notes:
+ // - Lifetime: The caller should keep the returned CallbackRegistration object
// to keep the callback registered. The callback will be unregistered upon the
// destruction of the returned CallbackRegistration object. The returned
// CallbackRegistration object can be destructed on any thread.
- // Thread Model: Can be called on any thread. The registered callback will
- // always be called on the thread where RegisterNewKeyCB() is called.
- // TODO(xhwang): We are not using base::CallbackList because it is not thread-
+ // - Thread Model: Can be called on any thread. The registered callback will
+ // always be called on the thread where RegisterEventCB() is called.
+ // - TODO(xhwang): Not using base::CallbackList because it is not thread-
// safe. Consider refactoring base::CallbackList to avoid code duplication.
- virtual std::unique_ptr<CallbackRegistration> RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb);
+ virtual std::unique_ptr<CallbackRegistration> RegisterEventCB(
+ EventCB event_cb);
// Gets the Decryptor object associated with the CDM. Returns nullptr if the
// CDM does not support a Decryptor (i.e. platform-based CDMs where decryption
diff --git a/chromium/media/base/cdm_promise_adapter.cc b/chromium/media/base/cdm_promise_adapter.cc
index 6b9fe520fae..44912469d64 100644
--- a/chromium/media/base/cdm_promise_adapter.cc
+++ b/chromium/media/base/cdm_promise_adapter.cc
@@ -12,8 +12,8 @@ CdmPromiseAdapter::CdmPromiseAdapter()
: next_promise_id_(kInvalidPromiseId + 1) {}
CdmPromiseAdapter::~CdmPromiseAdapter() {
- DCHECK(promises_.empty());
DCHECK(thread_checker_.CalledOnValidThread());
+ DLOG_IF(WARNING, !promises_.empty()) << "There are unfulfilled promises";
Clear();
}
diff --git a/chromium/media/base/channel_layout.cc b/chromium/media/base/channel_layout.cc
index d0ba542e852..d7378d2619c 100644
--- a/chromium/media/base/channel_layout.cc
+++ b/chromium/media/base/channel_layout.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
namespace media {
@@ -163,7 +163,7 @@ static const int kChannelOrderings[CHANNEL_LAYOUT_MAX + 1][CHANNELS_MAX + 1] = {
};
int ChannelLayoutToChannelCount(ChannelLayout layout) {
- DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
+ DCHECK_LT(static_cast<size_t>(layout), base::size(kLayoutToChannels));
DCHECK_LE(kLayoutToChannels[layout], kMaxConcurrentChannels);
return kLayoutToChannels[layout];
}
@@ -194,8 +194,8 @@ ChannelLayout GuessChannelLayout(int channels) {
}
int ChannelOrder(ChannelLayout layout, Channels channel) {
- DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
- DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
+ DCHECK_LT(static_cast<size_t>(layout), base::size(kChannelOrderings));
+ DCHECK_LT(static_cast<size_t>(channel), base::size(kChannelOrderings[0]));
return kChannelOrderings[layout][channel];
}
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
index a4ea611bb68..d0a16b12206 100644
--- a/chromium/media/base/channel_mixer_unittest.cc
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -4,7 +4,7 @@
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_parameters.h"
@@ -166,35 +166,35 @@ INSTANTIATE_TEST_CASE_P(
testing::Values(ChannelMixerTestData(CHANNEL_LAYOUT_STEREO,
CHANNEL_LAYOUT_MONO,
kStereoToMonoValues,
- arraysize(kStereoToMonoValues),
+ base::size(kStereoToMonoValues),
0.5f),
ChannelMixerTestData(CHANNEL_LAYOUT_MONO,
CHANNEL_LAYOUT_STEREO,
kMonoToStereoValues,
- arraysize(kMonoToStereoValues),
+ base::size(kMonoToStereoValues),
1.0f),
ChannelMixerTestData(CHANNEL_LAYOUT_5_1,
CHANNEL_LAYOUT_MONO,
kFiveOneToMonoValues,
- arraysize(kFiveOneToMonoValues),
+ base::size(kFiveOneToMonoValues),
ChannelMixer::kHalfPower),
ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE,
2,
CHANNEL_LAYOUT_DISCRETE,
2,
kStereoToMonoValues,
- arraysize(kStereoToMonoValues)),
+ base::size(kStereoToMonoValues)),
ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE,
2,
CHANNEL_LAYOUT_DISCRETE,
5,
kStereoToMonoValues,
- arraysize(kStereoToMonoValues)),
+ base::size(kStereoToMonoValues)),
ChannelMixerTestData(CHANNEL_LAYOUT_DISCRETE,
5,
CHANNEL_LAYOUT_DISCRETE,
2,
kFiveDiscreteValues,
- arraysize(kFiveDiscreteValues))));
+ base::size(kFiveDiscreteValues))));
} // namespace media
diff --git a/chromium/media/base/channel_mixing_matrix_unittest.cc b/chromium/media/base/channel_mixing_matrix_unittest.cc
index a64ae05020f..77a542fb6ba 100644
--- a/chromium/media/base/channel_mixing_matrix_unittest.cc
+++ b/chromium/media/base/channel_mixing_matrix_unittest.cc
@@ -6,7 +6,7 @@
#include <stddef.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/channel_mixer.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -131,7 +131,7 @@ TEST(ChannelMixingMatrixTest, DiscreteToDiscrete) {
{2, 2}, {2, 5}, {5, 2},
};
- for (size_t n = 0; n < arraysize(test_case); n++) {
+ for (size_t n = 0; n < base::size(test_case); n++) {
int input_channels = test_case[n].input_channels;
int output_channels = test_case[n].output_channels;
ChannelMixingMatrix matrix_builder(CHANNEL_LAYOUT_DISCRETE,
diff --git a/chromium/media/base/container_names.cc b/chromium/media/base/container_names.cc
index afa71cad34e..434aa0adbfb 100644
--- a/chromium/media/base/container_names.cc
+++ b/chromium/media/base/container_names.cc
@@ -10,8 +10,8 @@
#include <limits>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "media/base/bit_reader.h"
namespace media {
@@ -375,7 +375,7 @@ static bool CheckDV(const uint8_t* buffer, int buffer_size) {
reader.SkipBits(3);
RCHECK(ReadBits(&reader, 24) == 0xffffff);
current_sequence_number = sequence_number;
- for (size_t i = 0; i < arraysize(last_block_number); ++i)
+ for (size_t i = 0; i < base::size(last_block_number); ++i)
last_block_number[i] = -1;
} else {
// Sequence number must match (this will also fail if no header seen).
diff --git a/chromium/media/base/data_buffer_unittest.cc b/chromium/media/base/data_buffer_unittest.cc
index 690b7ec3c82..102f20c4e9d 100644
--- a/chromium/media/base/data_buffer_unittest.cc
+++ b/chromium/media/base/data_buffer_unittest.cc
@@ -8,7 +8,7 @@
#include <memory>
#include <utility>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -49,7 +49,7 @@ TEST(DataBufferTest, Constructor_ScopedArray) {
TEST(DataBufferTest, CopyFrom) {
const uint8_t kTestData[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
- const int kTestDataSize = arraysize(kTestData);
+ const int kTestDataSize = base::size(kTestData);
scoped_refptr<DataBuffer> buffer =
DataBuffer::CopyFrom(kTestData, kTestDataSize);
@@ -99,9 +99,9 @@ TEST(DataBufferTest, Duration) {
TEST(DataBufferTest, ReadingWriting) {
const char kData[] = "hello";
- const int kDataSize = arraysize(kData);
+ const int kDataSize = base::size(kData);
const char kNewData[] = "chromium";
- const int kNewDataSize = arraysize(kNewData);
+ const int kNewDataSize = base::size(kNewData);
// Create a DataBuffer.
scoped_refptr<DataBuffer> buffer(new DataBuffer(kDataSize));
diff --git a/chromium/media/base/data_source.cc b/chromium/media/base/data_source.cc
index 474d65e8bfc..0ded6d30d37 100644
--- a/chromium/media/base/data_source.cc
+++ b/chromium/media/base/data_source.cc
@@ -12,4 +12,13 @@ DataSource::DataSource() = default;
DataSource::~DataSource() = default;
+bool DataSource::AssumeFullyBuffered() const {
+ return true;
+}
+
+int64_t DataSource::GetMemoryUsage() {
+ int64_t temp;
+ return GetSize(&temp) ? temp : 0;
+}
+
} // namespace media
diff --git a/chromium/media/base/data_source.h b/chromium/media/base/data_source.h
index 697b2ce0dae..27902b2c87c 100644
--- a/chromium/media/base/data_source.h
+++ b/chromium/media/base/data_source.h
@@ -33,7 +33,9 @@ class MEDIA_EXPORT DataSource {
const DataSource::ReadCB& read_cb) = 0;
// Stops the DataSource. Once this is called all future Read() calls will
- // return an error.
+ // return an error. This is a synchronous call and may be called from any
+ // thread. Once called, the DataSource may no longer be used and should be
+ // destructed shortly thereafter.
virtual void Stop() = 0;
// Similar to Stop(), but only aborts current reads and not future reads.
@@ -51,6 +53,12 @@ class MEDIA_EXPORT DataSource {
// Values of |bitrate| <= 0 are invalid and should be ignored.
virtual void SetBitrate(int bitrate) = 0;
+ // Assume fully bufferred by default.
+ virtual bool AssumeFullyBuffered() const;
+
+ // By default this just returns GetSize().
+ virtual int64_t GetMemoryUsage();
+
private:
DISALLOW_COPY_AND_ASSIGN(DataSource);
};
diff --git a/chromium/media/base/decode_capabilities.h b/chromium/media/base/decode_capabilities.h
deleted file mode 100644
index 4ae095fc524..00000000000
--- a/chromium/media/base/decode_capabilities.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_BASE_DECODE_CAPABILITIES_
-#define MEDIA_BASE_DECODE_CAPABILITIES_
-
-#include "media/base/audio_codecs.h"
-#include "media/base/media_export.h"
-#include "media/base/video_codecs.h"
-#include "media/base/video_color_space.h"
-
-namespace media {
-
-// APIs to media's decoder capabilities. Embedders may customize decoder
-// capabilities via MediaClient. See usage in mime_util_internal.cc.
-struct MEDIA_EXPORT AudioConfig {
- AudioCodec codec;
-};
-
-struct MEDIA_EXPORT VideoConfig {
- VideoCodec codec;
- VideoCodecProfile profile;
- int level;
- VideoColorSpace color_space;
-};
-
-MEDIA_EXPORT bool IsSupportedAudioConfig(const AudioConfig& config);
-MEDIA_EXPORT bool IsSupportedVideoConfig(const VideoConfig& config);
-
-} // namespace media
-
-#endif // MEDIA_BASE_DECODE_CAPABILITIES_
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 32392820e62..20f2b419878 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -147,6 +147,8 @@ class MEDIA_EXPORT DecoderBuffer
discard_padding_ = discard_padding;
}
+ // Returns DecryptConfig associated with |this|. Returns null iff |this| is
+ // not encrypted.
const DecryptConfig* decrypt_config() const {
DCHECK(!end_of_stream());
return decrypt_config_.get();
diff --git a/chromium/media/base/decoder_buffer_unittest.cc b/chromium/media/base/decoder_buffer_unittest.cc
index fb8249687ad..d8a4444238e 100644
--- a/chromium/media/base/decoder_buffer_unittest.cc
+++ b/chromium/media/base/decoder_buffer_unittest.cc
@@ -9,8 +9,8 @@
#include <memory>
-#include "base/macros.h"
#include "base/memory/shared_memory.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -37,7 +37,7 @@ TEST(DecoderBufferTest, CreateEOSBuffer) {
TEST(DecoderBufferTest, CopyFrom) {
const uint8_t kData[] = "hello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(&kData), kDataSize));
@@ -64,7 +64,7 @@ TEST(DecoderBufferTest, CopyFrom) {
TEST(DecoderBufferTest, FromSharedMemoryHandle) {
const uint8_t kData[] = "hello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
base::SharedMemory mem;
ASSERT_TRUE(mem.CreateAndMapAnonymous(kDataSize));
@@ -81,7 +81,7 @@ TEST(DecoderBufferTest, FromSharedMemoryHandle) {
TEST(DecoderBufferTest, FromSharedMemoryHandle_Unaligned) {
const uint8_t kData[] = "XXXhello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
const off_t kDataOffset = 3;
base::SharedMemory mem;
@@ -100,7 +100,7 @@ TEST(DecoderBufferTest, FromSharedMemoryHandle_Unaligned) {
TEST(DecoderBufferTest, FromSharedMemoryHandle_ZeroSize) {
const uint8_t kData[] = "hello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
base::SharedMemory mem;
ASSERT_TRUE(mem.CreateAndMapAnonymous(kDataSize));
@@ -114,7 +114,7 @@ TEST(DecoderBufferTest, FromSharedMemoryHandle_ZeroSize) {
#if !defined(OS_ANDROID)
TEST(DecoderBufferTest, PaddingAlignment) {
const uint8_t kData[] = "hello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> buffer2(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(&kData), kDataSize));
ASSERT_TRUE(buffer2.get());
@@ -142,7 +142,7 @@ TEST(DecoderBufferTest, PaddingAlignment) {
TEST(DecoderBufferTest, ReadingWriting) {
const char kData[] = "hello";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(kDataSize));
ASSERT_TRUE(buffer.get());
diff --git a/chromium/media/base/decryptor.cc b/chromium/media/base/decryptor.cc
index f37fda5ad45..8d24de8d34d 100644
--- a/chromium/media/base/decryptor.cc
+++ b/chromium/media/base/decryptor.cc
@@ -24,4 +24,8 @@ Decryptor::Decryptor() = default;
Decryptor::~Decryptor() = default;
+bool Decryptor::CanAlwaysDecrypt() {
+ return false;
+}
+
} // namespace media
diff --git a/chromium/media/base/decryptor.h b/chromium/media/base/decryptor.h
index f662f32ebc0..b865cabc8b8 100644
--- a/chromium/media/base/decryptor.h
+++ b/chromium/media/base/decryptor.h
@@ -53,6 +53,7 @@ class MEDIA_EXPORT Decryptor {
// If this function is called multiple times for the same |stream_type|, the
// previously registered callback will be replaced. In other words,
// registering a null callback cancels the originally registered callback.
+ // TODO(crbug.com/821288): Replace this with CdmContext::RegisterEventCB().
virtual void RegisterNewKeyCB(StreamType stream_type,
const NewKeyCB& key_added_cb) = 0;
@@ -157,6 +158,9 @@ class MEDIA_EXPORT Decryptor {
// The decoder can be reinitialized after it is uninitialized.
virtual void DeinitializeDecoder(StreamType stream_type) = 0;
+ // Returns whether or not the decryptor implementation supports decrypt-only.
+ virtual bool CanAlwaysDecrypt();
+
private:
DISALLOW_COPY_AND_ASSIGN(Decryptor);
};
diff --git a/chromium/media/base/fake_demuxer_stream.cc b/chromium/media/base/fake_demuxer_stream.cc
index 0345ce1fcab..736f6944901 100644
--- a/chromium/media/base/fake_demuxer_stream.cc
+++ b/chromium/media/base/fake_demuxer_stream.cc
@@ -13,8 +13,8 @@
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
@@ -190,8 +190,9 @@ void FakeDemuxerStream::DoRead() {
// TODO(xhwang): Output out-of-order buffers if needed.
if (is_encrypted_) {
buffer->set_decrypt_config(DecryptConfig::CreateCencConfig(
- std::string(kKeyId, kKeyId + arraysize(kKeyId)),
- std::string(kIv, kIv + arraysize(kIv)), std::vector<SubsampleEntry>()));
+ std::string(kKeyId, kKeyId + base::size(kKeyId)),
+ std::string(kIv, kIv + base::size(kIv)),
+ std::vector<SubsampleEntry>()));
}
buffer->set_timestamp(current_timestamp_);
buffer->set_duration(duration_);
diff --git a/chromium/media/base/fallback_video_decoder.cc b/chromium/media/base/fallback_video_decoder.cc
index 6d73bce38b1..0d5826f69a5 100644
--- a/chromium/media/base/fallback_video_decoder.cc
+++ b/chromium/media/base/fallback_video_decoder.cc
@@ -20,38 +20,35 @@ FallbackVideoDecoder::FallbackVideoDecoder(
fallback_decoder_(std::move(fallback)),
weak_factory_(this) {}
-void FallbackVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void FallbackVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
// If we've already fallen back, just reinitialize the selected decoder.
if (selected_decoder_ && did_fallback_) {
selected_decoder_->Initialize(config, low_delay, cdm_context, init_cb,
- output_cb, waiting_for_decryption_key_cb);
+ output_cb, waiting_cb);
return;
}
InitCB fallback_initialize_cb = base::BindRepeating(
&FallbackVideoDecoder::FallbackInitialize, weak_factory_.GetWeakPtr(),
- config, low_delay, cdm_context, init_cb, output_cb,
- waiting_for_decryption_key_cb);
+ config, low_delay, cdm_context, init_cb, output_cb, waiting_cb);
preferred_decoder_->Initialize(config, low_delay, cdm_context,
std::move(fallback_initialize_cb), output_cb,
- waiting_for_decryption_key_cb);
+ waiting_cb);
}
-void FallbackVideoDecoder::FallbackInitialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb,
- bool success) {
+void FallbackVideoDecoder::FallbackInitialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb,
+ bool success) {
// The preferred decoder was successfully initialized.
if (success) {
selected_decoder_ = preferred_decoder_.get();
@@ -69,7 +66,7 @@ void FallbackVideoDecoder::FallbackInitialize(
std::move(preferred_decoder_)));
selected_decoder_ = fallback_decoder_.get();
fallback_decoder_->Initialize(config, low_delay, cdm_context, init_cb,
- output_cb, waiting_for_decryption_key_cb);
+ output_cb, waiting_cb);
}
void FallbackVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
diff --git a/chromium/media/base/fallback_video_decoder.h b/chromium/media/base/fallback_video_decoder.h
index ed7bcd3d7d6..69ee1f2779e 100644
--- a/chromium/media/base/fallback_video_decoder.h
+++ b/chromium/media/base/fallback_video_decoder.h
@@ -21,13 +21,12 @@ class MEDIA_EXPORT FallbackVideoDecoder : public VideoDecoder {
// media::VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::RepeatingClosure& reset_cb) override;
@@ -39,14 +38,13 @@ class MEDIA_EXPORT FallbackVideoDecoder : public VideoDecoder {
~FallbackVideoDecoder() override;
private:
- void FallbackInitialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb,
- bool success);
+ void FallbackInitialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb,
+ bool success);
std::unique_ptr<media::VideoDecoder> preferred_decoder_;
std::unique_ptr<media::VideoDecoder> fallback_decoder_;
diff --git a/chromium/media/base/ipc/media_param_traits_macros.h b/chromium/media/base/ipc/media_param_traits_macros.h
index 26115f13b9d..e1a6a2bd4cb 100644
--- a/chromium/media/base/ipc/media_param_traits_macros.h
+++ b/chromium/media/base/ipc/media_param_traits_macros.h
@@ -24,6 +24,7 @@
#include "media/base/encryption_scheme.h"
#include "media/base/hdr_metadata.h"
#include "media/base/media_log_event.h"
+#include "media/base/media_status.h"
#include "media/base/output_device_info.h"
#include "media/base/overlay_info.h"
#include "media/base/pipeline_status.h"
@@ -33,6 +34,7 @@
#include "media/base/video_color_space.h"
#include "media/base/video_rotation.h"
#include "media/base/video_types.h"
+#include "media/base/waiting.h"
#include "media/base/watch_time_keys.h"
// TODO(crbug.com/676224): When EnabledIf attribute is supported in mojom files,
// move CdmProxy related code into #if BUILDFLAG(ENABLE_LIBRARY_CDMS).
@@ -57,9 +59,6 @@ IPC_ENUM_TRAITS_MAX_VALUE(media::AudioParameters::Format,
IPC_ENUM_TRAITS_MAX_VALUE(media::BufferingState,
media::BufferingState::BUFFERING_STATE_MAX)
-IPC_ENUM_TRAITS_MAX_VALUE(media::CdmKeyInformation::KeyStatus,
- media::CdmKeyInformation::KEY_STATUS_MAX)
-
IPC_ENUM_TRAITS_MAX_VALUE(media::CdmMessageType,
media::CdmMessageType::MESSAGE_TYPE_MAX)
@@ -112,6 +111,9 @@ IPC_ENUM_TRAITS_MAX_VALUE(media::HdcpVersion,
IPC_ENUM_TRAITS_MAX_VALUE(media::MediaLogEvent::Type,
media::MediaLogEvent::TYPE_LAST)
+IPC_ENUM_TRAITS_MAX_VALUE(media::MediaStatus::State,
+ media::MediaStatus::State::STATE_MAX)
+
IPC_ENUM_TRAITS_MAX_VALUE(media::OutputDeviceStatus,
media::OUTPUT_DEVICE_STATUS_MAX)
@@ -122,6 +124,9 @@ IPC_ENUM_TRAITS_MAX_VALUE(media::SampleFormat, media::kSampleFormatMax)
IPC_ENUM_TRAITS_MAX_VALUE(media::VideoCodec, media::kVideoCodecMax)
+IPC_ENUM_TRAITS_MAX_VALUE(media::WaitingReason,
+ media::WaitingReason::kMaxValue);
+
IPC_ENUM_TRAITS_MAX_VALUE(media::WatchTimeKey,
media::WatchTimeKey::kWatchTimeKeyMax);
@@ -168,12 +173,6 @@ IPC_STRUCT_TRAITS_BEGIN(media::CdmConfig)
IPC_STRUCT_TRAITS_MEMBER(use_hw_secure_codecs)
IPC_STRUCT_TRAITS_END()
-IPC_STRUCT_TRAITS_BEGIN(media::CdmKeyInformation)
- IPC_STRUCT_TRAITS_MEMBER(key_id)
- IPC_STRUCT_TRAITS_MEMBER(status)
- IPC_STRUCT_TRAITS_MEMBER(system_code)
-IPC_STRUCT_TRAITS_END()
-
IPC_STRUCT_TRAITS_BEGIN(media::MediaLogEvent)
IPC_STRUCT_TRAITS_MEMBER(id)
IPC_STRUCT_TRAITS_MEMBER(type)
@@ -211,6 +210,7 @@ IPC_STRUCT_TRAITS_END()
IPC_STRUCT_TRAITS_BEGIN(media::OverlayInfo)
IPC_STRUCT_TRAITS_MEMBER(routing_token)
IPC_STRUCT_TRAITS_MEMBER(is_fullscreen)
+ IPC_STRUCT_TRAITS_MEMBER(is_persistent_video)
IPC_STRUCT_TRAITS_END()
#endif // MEDIA_BASE_IPC_MEDIA_PARAM_TRAITS_MACROS_H_
diff --git a/chromium/media/base/key_systems.cc b/chromium/media/base/key_systems.cc
index 46cac7e4ff2..3cd0e5c8b8c 100644
--- a/chromium/media/base/key_systems.cc
+++ b/chromium/media/base/key_systems.cc
@@ -7,10 +7,10 @@
#include <stddef.h>
#include <memory>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
@@ -285,11 +285,11 @@ class KeySystemsImpl : public KeySystems {
bool IsValidMimeTypeCodecsCombination(const std::string& mime_type,
SupportedCodecs codecs) const;
- typedef base::hash_map<std::string, std::unique_ptr<KeySystemProperties>>
+ typedef std::unordered_map<std::string, std::unique_ptr<KeySystemProperties>>
KeySystemPropertiesMap;
- typedef base::hash_map<std::string, SupportedCodecs> MimeTypeToCodecsMap;
- typedef base::hash_map<std::string, EmeCodec> CodecMap;
- typedef base::hash_map<std::string, EmeInitDataType> InitDataTypesMap;
+ typedef std::unordered_map<std::string, SupportedCodecs> MimeTypeToCodecsMap;
+ typedef std::unordered_map<std::string, EmeCodec> CodecMap;
+ typedef std::unordered_map<std::string, EmeInitDataType> InitDataTypesMap;
// TODO(sandersd): Separate container enum from codec mask value.
// http://crbug.com/417440
@@ -332,7 +332,7 @@ KeySystemsImpl* KeySystemsImpl::GetInstance() {
KeySystemsImpl::KeySystemsImpl()
: audio_codec_mask_(EME_CODEC_AUDIO_ALL),
video_codec_mask_(EME_CODEC_VIDEO_ALL) {
- for (size_t i = 0; i < arraysize(kMimeTypeToCodecsMap); ++i) {
+ for (size_t i = 0; i < base::size(kMimeTypeToCodecsMap); ++i) {
RegisterMimeType(kMimeTypeToCodecsMap[i].mime_type,
kMimeTypeToCodecsMap[i].codecs);
}
diff --git a/chromium/media/base/key_systems_unittest.cc b/chromium/media/base/key_systems_unittest.cc
index cbfc657bf76..b2f3e288195 100644
--- a/chromium/media/base/key_systems_unittest.cc
+++ b/chromium/media/base/key_systems_unittest.cc
@@ -259,8 +259,8 @@ class TestMediaClient : public MediaClient {
bool IsKeySystemsUpdateNeeded() final;
void AddSupportedKeySystems(std::vector<std::unique_ptr<KeySystemProperties>>*
key_systems_properties) override;
- bool IsSupportedAudioConfig(const media::AudioConfig& config) final;
- bool IsSupportedVideoConfig(const media::VideoConfig& config) final;
+ bool IsSupportedAudioType(const media::AudioType& type) final;
+ bool IsSupportedVideoType(const media::VideoType& type) final;
bool IsSupportedBitstreamAudioCodec(AudioCodec codec) final;
// Helper function to test the case where IsKeySystemsUpdateNeeded() is true
@@ -297,11 +297,11 @@ void TestMediaClient::AddSupportedKeySystems(
is_update_needed_ = false;
}
-bool TestMediaClient::IsSupportedAudioConfig(const media::AudioConfig& config) {
+bool TestMediaClient::IsSupportedAudioType(const media::AudioType& type) {
return true;
}
-bool TestMediaClient::IsSupportedVideoConfig(const media::VideoConfig& config) {
+bool TestMediaClient::IsSupportedVideoType(const media::VideoType& type) {
return true;
}
diff --git a/chromium/media/base/logging_override_if_enabled.h b/chromium/media/base/logging_override_if_enabled.h
new file mode 100644
index 00000000000..302a5f0ea20
--- /dev/null
+++ b/chromium/media/base/logging_override_if_enabled.h
@@ -0,0 +1,22 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_LOGGING_OVERRIDE_IF_ENABLED_H_
+#define MEDIA_BASE_LOGGING_OVERRIDE_IF_ENABLED_H_
+
+// Provides a way to override DVLOGs to at build time.
+// Warning: Do NOT include this file in .h files to avoid unexpected override.
+// TODO(xhwang): Provide a way to choose which |verboselevel| to override.
+
+#include "media/media_buildflags.h"
+
+#if BUILDFLAG(ENABLE_LOGGING_OVERRIDE)
+#if !defined(DVLOG)
+#error This file must be included after base/logging.h.
+#endif
+#undef DVLOG
+#define DVLOG(verboselevel) LOG(INFO)
+#endif // BUILDFLAG(ENABLE_LOGGING_OVERRIDE)
+
+#endif // MEDIA_BASE_LOGGING_OVERRIDE_IF_ENABLED_H_
diff --git a/chromium/media/base/media.cc b/chromium/media/base/media.cc
index b9dcc90bc54..32272c14260 100644
--- a/chromium/media/base/media.cc
+++ b/chromium/media/base/media.cc
@@ -32,9 +32,6 @@ namespace media {
class MediaInitializer {
public:
MediaInitializer() {
- TRACE_EVENT_WARMUP_CATEGORY("audio");
- TRACE_EVENT_WARMUP_CATEGORY("media");
-
// Initializing the CPU flags may query /proc for details on the current CPU
// for NEON, VFP, etc optimizations. If in a sandboxed process, they should
// have been forced (see InitializeMediaLibraryInSandbox).
diff --git a/chromium/media/base/media_client.h b/chromium/media/base/media_client.h
index da43b06b059..f60faa08c26 100644
--- a/chromium/media/base/media_client.h
+++ b/chromium/media/base/media_client.h
@@ -10,9 +10,9 @@
#include <vector>
#include "media/base/audio_codecs.h"
-#include "media/base/decode_capabilities.h"
#include "media/base/key_system_properties.h"
#include "media/base/media_export.h"
+#include "media/base/media_types.h"
#include "media/base/video_codecs.h"
#include "media/base/video_color_space.h"
#include "ui/gfx/color_space.h"
@@ -49,10 +49,10 @@ class MEDIA_EXPORT MediaClient {
virtual bool IsKeySystemsUpdateNeeded() = 0;
// Returns true if the given audio config is supported.
- virtual bool IsSupportedAudioConfig(const AudioConfig& config) = 0;
+ virtual bool IsSupportedAudioType(const AudioType& type) = 0;
// Returns true if the given video config is supported.
- virtual bool IsSupportedVideoConfig(const VideoConfig& config) = 0;
+ virtual bool IsSupportedVideoType(const VideoType& type) = 0;
// Returns true if the compressed audio |codec| format is supported by the
// audio sink.
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index c3990189754..fd4c41b83fc 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -84,6 +84,8 @@ std::string MediaLog::EventTypeToString(MediaLogEvent::Type type) {
return "MEDIA_DEBUG_LOG_ENTRY";
case MediaLogEvent::PROPERTY_CHANGE:
return "PROPERTY_CHANGE";
+ case MediaLogEvent::SUSPENDED:
+ return "SUSPENDED";
}
NOTREACHED();
return NULL;
@@ -263,10 +265,11 @@ std::unique_ptr<MediaLogEvent> MediaLog::CreateTimeEvent(
const std::string& property,
base::TimeDelta value) {
std::unique_ptr<MediaLogEvent> event(CreateEvent(type));
- if (value.is_max())
- event->params.SetString(property, "unknown");
+ double value_in_seconds = value.InSecondsF();
+ if (std::isfinite(value_in_seconds))
+ event->params.SetDouble(property, value_in_seconds);
else
- event->params.SetDouble(property, value.InSecondsF());
+ event->params.SetString(property, "unknown");
return event;
}
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
index 3c529ed38d0..164b6e942aa 100644
--- a/chromium/media/base/media_log.h
+++ b/chromium/media/base/media_log.h
@@ -74,7 +74,8 @@ class MEDIA_EXPORT MediaLog {
// with whitespace in the latter kind of events.
static std::string MediaEventToMessageString(const MediaLogEvent& event);
- MediaLog();
+ // Constructor is protected, see below.
+
virtual ~MediaLog();
// Add an event to this log. Inheritors should override AddEventLocked to
@@ -139,6 +140,9 @@ class MEDIA_EXPORT MediaLog {
virtual std::unique_ptr<MediaLog> Clone();
protected:
+ // Ensures only subclasses and factories (e.g. Clone()) can create MediaLog.
+ MediaLog();
+
// Methods that may be overridden by inheritors. All calls may arrive on any
// thread, but will be synchronized with respect to any other *Locked calls on
// any other thread, and with any parent log invalidation.
@@ -175,9 +179,7 @@ class MEDIA_EXPORT MediaLog {
MediaLog(scoped_refptr<ParentLogRecord> parent_log_record);
private:
- // The underlying media log.
- scoped_refptr<ParentLogRecord> parent_log_record_;
-
+ // Allows MediaLogTest to construct MediaLog directly for testing.
friend class MediaLogTest;
FRIEND_TEST_ALL_PREFIXES(MediaLogTest, EventsAreForwarded);
FRIEND_TEST_ALL_PREFIXES(MediaLogTest, EventsAreNotForwardedAfterInvalidate);
@@ -192,6 +194,9 @@ class MEDIA_EXPORT MediaLog {
// the event, and sets the last 3 characters to an ellipsis.
static std::string TruncateUrlString(std::string log_string);
+ // The underlying media log.
+ scoped_refptr<ParentLogRecord> parent_log_record_;
+
// A unique (to this process) id for this MediaLog.
int32_t id_;
DISALLOW_COPY_AND_ASSIGN(MediaLog);
diff --git a/chromium/media/base/media_log_event.h b/chromium/media/base/media_log_event.h
index 27fb20e9b43..adb5035da1d 100644
--- a/chromium/media/base/media_log_event.h
+++ b/chromium/media/base/media_log_event.h
@@ -89,7 +89,10 @@ struct MediaLogEvent {
// A property has changed without any special event occurring.
PROPERTY_CHANGE,
- TYPE_LAST = PROPERTY_CHANGE
+ // Issued when a player is suspended.
+ SUSPENDED,
+
+ TYPE_LAST = SUSPENDED
};
int32_t id;
diff --git a/chromium/media/base/media_log_unittest.cc b/chromium/media/base/media_log_unittest.cc
index a6a1806d9ad..741806b8c55 100644
--- a/chromium/media/base/media_log_unittest.cc
+++ b/chromium/media/base/media_log_unittest.cc
@@ -18,6 +18,9 @@ namespace media {
class MediaLogTest : public testing::Test {
public:
static constexpr size_t kMaxUrlLength = MediaLog::kMaxUrlLength;
+
+ protected:
+ MediaLog media_log;
};
constexpr size_t MediaLogTest::kMaxUrlLength;
@@ -28,14 +31,14 @@ TEST_F(MediaLogTest, DontTruncateShortUrlString) {
// Verify that CreatedEvent does not truncate the short URL.
std::unique_ptr<MediaLogEvent> created_event =
- MediaLog().CreateCreatedEvent(short_url);
+ media_log.CreateCreatedEvent(short_url);
std::string stored_url;
created_event->params.GetString("origin_url", &stored_url);
EXPECT_EQ(stored_url, short_url);
// Verify that LoadEvent does not truncate the short URL.
std::unique_ptr<MediaLogEvent> load_event =
- MediaLog().CreateLoadEvent(short_url);
+ media_log.CreateLoadEvent(short_url);
load_event->params.GetString("url", &stored_url);
EXPECT_EQ(stored_url, short_url);
}
@@ -52,7 +55,7 @@ TEST_F(MediaLogTest, TruncateLongUrlStrings) {
// Verify that long CreatedEvent URL...
std::unique_ptr<MediaLogEvent> created_event =
- MediaLog().CreateCreatedEvent(long_url);
+ media_log.CreateCreatedEvent(long_url);
std::string stored_url;
created_event->params.GetString("origin_url", &stored_url);
@@ -67,7 +70,7 @@ TEST_F(MediaLogTest, TruncateLongUrlStrings) {
// Verify that long LoadEvent URL...
std::unique_ptr<MediaLogEvent> load_event =
- MediaLog().CreateCreatedEvent(long_url);
+ media_log.CreateCreatedEvent(long_url);
load_event->params.GetString("url", &stored_url);
// ... is truncated
EXPECT_EQ(stored_url.length(), MediaLogTest::kMaxUrlLength);
diff --git a/chromium/media/base/media_observer.h b/chromium/media/base/media_observer.h
index f6a5efa3a2d..1bb629930e5 100644
--- a/chromium/media/base/media_observer.h
+++ b/chromium/media/base/media_observer.h
@@ -77,7 +77,8 @@ class MEDIA_EXPORT MediaObserver {
// Called when the data source is asynchronously initialized.
virtual void OnDataSourceInitialized(const GURL& url_after_redirects) = 0;
- // Set the MediaObserverClient.
+ // Set the MediaObserverClient. May be called with nullptr to disconnect the
+ // the client from the observer.
virtual void SetClient(MediaObserverClient* client) = 0;
};
diff --git a/chromium/media/base/media_status.h b/chromium/media/base/media_status.h
index 8d913e3b434..5dc4ce9e1e5 100644
--- a/chromium/media/base/media_status.h
+++ b/chromium/media/base/media_status.h
@@ -16,7 +16,14 @@ namespace media {
// TODO(https://crbug.com/820277): Deduplicate media_router::MediaStatus.
struct MEDIA_EXPORT MediaStatus {
public:
- enum class State { UNKNOWN, PLAYING, PAUSED, BUFFERING, STOPPED };
+ enum class State {
+ UNKNOWN,
+ PLAYING,
+ PAUSED,
+ BUFFERING,
+ STOPPED,
+ STATE_MAX = STOPPED,
+ };
MediaStatus();
MediaStatus(const MediaStatus& other);
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index b0c5e587faf..5a9a768a8e1 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -184,11 +184,6 @@ const char kNoUserGestureRequiredPolicy[] = "no-user-gesture-required";
// Autoplay policy to require a user gesture in order to play.
const char kUserGestureRequiredPolicy[] = "user-gesture-required";
-// Autoplay policy to require a user gesture in order to play for cross origin
-// iframes.
-const char kUserGestureRequiredForCrossOriginPolicy[] =
- "user-gesture-required-for-cross-origin";
-
} // namespace autoplay
} // namespace switches
@@ -243,7 +238,7 @@ const base::Feature kUseAndroidOverlayAggressively{
// Let video track be unselected when video is playing in the background.
const base::Feature kBackgroundSrcVideoTrackOptimization{
- "BackgroundSrcVideoTrackOptimization", base::FEATURE_DISABLED_BY_DEFAULT};
+ "BackgroundSrcVideoTrackOptimization", base::FEATURE_ENABLED_BY_DEFAULT};
// Let video without audio be paused when it is playing in the background.
const base::Feature kBackgroundVideoPauseOptimization{
@@ -256,31 +251,14 @@ const base::Feature kMemoryPressureBasedSourceBufferGC{
"MemoryPressureBasedSourceBufferGC", base::FEATURE_DISABLED_BY_DEFAULT};
// Enable MojoVideoDecoder, replacing GpuVideoDecoder.
-const base::Feature kMojoVideoDecoder {
- "MojoVideoDecoder",
-#if defined(OS_CHROMEOS)
- // TODO(posciak): Re-enable once the feature is verified on CrOS.
- // https://crbug.com/902968.
- base::FEATURE_DISABLED_BY_DEFAULT
-#else
- base::FEATURE_ENABLED_BY_DEFAULT
-#endif
-};
+const base::Feature kMojoVideoDecoder{"MojoVideoDecoder",
+ base::FEATURE_ENABLED_BY_DEFAULT};
// Enable The D3D11 Video decoder. Must also enable MojoVideoDecoder for
// this to have any effect.
const base::Feature kD3D11VideoDecoder{"D3D11VideoDecoder",
base::FEATURE_DISABLED_BY_DEFAULT};
-// Allow playback of encrypted media through the D3D11 decoder. Requires
-// D3D11VideoDecoder to be enabled also.
-const base::Feature kD3D11EncryptedMedia{"D3D11EncryptedMedia",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
-// Enable VP9 decoding in the D3D11VideoDecoder.
-const base::Feature kD3D11VP9Decoder{"D3D11VP9Decoder",
- base::FEATURE_DISABLED_BY_DEFAULT};
-
// Falls back to other decoders after audio/video decode error happens. The
// implementation may choose different strategies on when to fallback. See
// DecoderStream for details. When disabled, playback will fail immediately
@@ -302,11 +280,11 @@ const base::Feature kNewEncodeCpuLoadEstimator{
// Use the new Remote Playback / media flinging pipeline.
const base::Feature kNewRemotePlaybackPipeline{
- "NewRemotePlaybackPipeline", base::FEATURE_DISABLED_BY_DEFAULT};
+ "NewRemotePlaybackPipeline", base::FEATURE_ENABLED_BY_DEFAULT};
// Use the new RTC hardware decode path via RTCVideoDecoderAdapter.
const base::Feature kRTCVideoDecoderAdapter{"RTCVideoDecoderAdapter",
- base::FEATURE_DISABLED_BY_DEFAULT};
+ base::FEATURE_ENABLED_BY_DEFAULT};
// CanPlayThrough issued according to standard.
const base::Feature kSpecCompliantCanPlayThrough{
@@ -359,12 +337,13 @@ const base::Feature kHardwareSecureDecryption{
// Enables handling of hardware media keys for controlling media.
const base::Feature kHardwareMediaKeyHandling{
- "HardwareMediaKeyHandling", base::FEATURE_DISABLED_BY_DEFAULT};
-
-// Limits number of media tags loading in parallel to 6. This speeds up
-// preloading of any media that requires multiple requests to preload.
-const base::Feature kLimitParallelMediaPreloading{
- "LimitParallelMediaPreloading", base::FEATURE_DISABLED_BY_DEFAULT};
+ "HardwareMediaKeyHandling",
+#if defined(OS_CHROMEOS)
+ base::FEATURE_ENABLED_BY_DEFAULT
+#else
+ base::FEATURE_DISABLED_BY_DEFAULT
+#endif
+};
// Enables low-delay video rendering in media pipeline on "live" stream.
const base::Feature kLowDelayVideoRenderingOnLiveStream{
@@ -425,7 +404,7 @@ const base::Feature kMediaFoundationH264Encoding{
// Enables MediaFoundation based video capture
const base::Feature kMediaFoundationVideoCapture{
- "MediaFoundationVideoCapture", base::FEATURE_DISABLED_BY_DEFAULT};
+ "MediaFoundationVideoCapture", base::FEATURE_ENABLED_BY_DEFAULT};
// Enables DirectShow GetPhotoState implementation
// Created to act as a kill switch by disabling it, in the case of the
@@ -487,6 +466,26 @@ const base::Feature kPreloadMediaEngagementData{
"PreloadMediaEngagementData", base::FEATURE_ENABLED_BY_DEFAULT};
#endif
+// Enables experimental local learning for media. Adds reporting only; does not
+// change media behavior.
+const base::Feature kMediaLearningExperiment{"MediaLearningExperiment",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+// Enables flash to be ducked by audio focus.
+const base::Feature kAudioFocusDuckFlash{"AudioFocusDuckFlash",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+// Enables the internal Media Session logic without enabling the Media Session
+// service.
+const base::Feature kInternalMediaSession {
+ "InternalMediaSession",
+#if defined(OS_ANDROID)
+ base::FEATURE_ENABLED_BY_DEFAULT
+#else
+ base::FEATURE_DISABLED_BY_DEFAULT
+#endif
+};
+
bool IsVideoCaptureAcceleratedJpegDecodingEnabled() {
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kDisableAcceleratedMjpegDecode)) {
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index 0a0655ea600..46306f9f689 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -87,7 +87,6 @@ namespace autoplay {
MEDIA_EXPORT extern const char kDocumentUserActivationRequiredPolicy[];
MEDIA_EXPORT extern const char kNoUserGestureRequiredPolicy[];
MEDIA_EXPORT extern const char kUserGestureRequiredPolicy[];
-MEDIA_EXPORT extern const char kUserGestureRequiredForCrossOriginPolicy[];
} // namespace autoplay
@@ -98,23 +97,23 @@ namespace media {
// All features in alphabetical order. The features should be documented
// alongside the definition of their values in the .cc file.
+MEDIA_EXPORT extern const base::Feature kAudioFocusDuckFlash;
MEDIA_EXPORT extern const base::Feature kAutoplayIgnoreWebAudio;
MEDIA_EXPORT extern const base::Feature kAutoplayDisableSettings;
MEDIA_EXPORT extern const base::Feature kAutoplayWhitelistSettings;
MEDIA_EXPORT extern const base::Feature kBackgroundSrcVideoTrackOptimization;
MEDIA_EXPORT extern const base::Feature kBackgroundVideoPauseOptimization;
-MEDIA_EXPORT extern const base::Feature kD3D11EncryptedMedia;
-MEDIA_EXPORT extern const base::Feature kD3D11VP9Decoder;
MEDIA_EXPORT extern const base::Feature kD3D11VideoDecoder;
MEDIA_EXPORT extern const base::Feature kExternalClearKeyForTesting;
MEDIA_EXPORT extern const base::Feature kFallbackAfterDecodeError;
MEDIA_EXPORT extern const base::Feature kHardwareMediaKeyHandling;
MEDIA_EXPORT extern const base::Feature kHardwareSecureDecryption;
-MEDIA_EXPORT extern const base::Feature kLimitParallelMediaPreloading;
+MEDIA_EXPORT extern const base::Feature kInternalMediaSession;
MEDIA_EXPORT extern const base::Feature kLowDelayVideoRenderingOnLiveStream;
MEDIA_EXPORT extern const base::Feature kMediaCapabilitiesWithParameters;
MEDIA_EXPORT extern const base::Feature kMediaCastOverlayButton;
MEDIA_EXPORT extern const base::Feature kMediaEngagementBypassAutoplayPolicies;
+MEDIA_EXPORT extern const base::Feature kMediaLearningExperiment;
MEDIA_EXPORT extern const base::Feature kMemoryPressureBasedSourceBufferGC;
MEDIA_EXPORT extern const base::Feature kMojoVideoDecoder;
MEDIA_EXPORT extern const base::Feature kMseBufferByPts;
diff --git a/chromium/media/base/media_types.h b/chromium/media/base/media_types.h
new file mode 100644
index 00000000000..b1b9498e6cc
--- /dev/null
+++ b/chromium/media/base/media_types.h
@@ -0,0 +1,32 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_TYPES_H_
+#define MEDIA_BASE_MEDIA_TYPES_H_
+
+#include "media/base/audio_codecs.h"
+#include "media/base/media_export.h"
+#include "media/base/video_codecs.h"
+#include "media/base/video_color_space.h"
+
+namespace media {
+
+// These structures represent parsed audio/video content types (mime strings).
+// These are a subset of {Audio|Video}DecoderConfig classes, which can only be
+// created after demuxing.
+
+struct MEDIA_EXPORT AudioType {
+ AudioCodec codec;
+};
+
+struct MEDIA_EXPORT VideoType {
+ VideoCodec codec;
+ VideoCodecProfile profile;
+ int level;
+ VideoColorSpace color_space;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_TYPES_H_
diff --git a/chromium/media/base/mime_util_internal.cc b/chromium/media/base/mime_util_internal.cc
index 424e101d326..021151fedf4 100644
--- a/chromium/media/base/mime_util_internal.cc
+++ b/chromium/media/base/mime_util_internal.cc
@@ -11,8 +11,8 @@
#include "base/strings/string_util.h"
#include "build/build_config.h"
#include "media/base/media.h"
-#include "media/base/media_client.h"
#include "media/base/media_switches.h"
+#include "media/base/supported_types.h"
#include "media/base/video_codecs.h"
#include "media/base/video_color_space.h"
#include "media/media_buildflags.h"
@@ -123,7 +123,7 @@ static bool IsValidH264Level(uint8_t level_idc) {
(level_idc >= 50 && level_idc <= 52));
}
-MimeUtil::MimeUtil() : allow_proprietary_codecs_(false) {
+MimeUtil::MimeUtil() {
#if defined(OS_ANDROID)
// When the unified media pipeline is enabled, we need support for both GPU
// video decoders and MediaCodec; indicated by HasPlatformDecoderSupport().
@@ -255,10 +255,6 @@ SupportsType MimeUtil::AreSupportedCodecs(
}
void MimeUtil::InitializeMimeTypeMaps() {
-#if BUILDFLAG(USE_PROPRIETARY_CODECS)
- allow_proprietary_codecs_ = true;
-#endif
-
AddSupportedMediaFormats();
}
@@ -329,36 +325,40 @@ void MimeUtil::AddSupportedMediaFormats() {
mp4_codecs.insert(mp4_video_codecs.begin(), mp4_video_codecs.end());
const CodecSet implicit_codec;
- AddContainerWithCodecs("audio/wav", wav_codecs, false);
- AddContainerWithCodecs("audio/x-wav", wav_codecs, false);
- AddContainerWithCodecs("audio/webm", webm_audio_codecs, false);
+ AddContainerWithCodecs("audio/wav", wav_codecs);
+ AddContainerWithCodecs("audio/x-wav", wav_codecs);
+ AddContainerWithCodecs("audio/webm", webm_audio_codecs);
DCHECK(!webm_video_codecs.empty());
- AddContainerWithCodecs("video/webm", webm_codecs, false);
- AddContainerWithCodecs("audio/ogg", ogg_audio_codecs, false);
+ AddContainerWithCodecs("video/webm", webm_codecs);
+ AddContainerWithCodecs("audio/ogg", ogg_audio_codecs);
// video/ogg is only supported if an appropriate video codec is supported.
// Note: This assumes such codecs cannot be later excluded.
if (!ogg_video_codecs.empty())
- AddContainerWithCodecs("video/ogg", ogg_codecs, false);
+ AddContainerWithCodecs("video/ogg", ogg_codecs);
// TODO(ddorwin): Should the application type support Opus?
- AddContainerWithCodecs("application/ogg", ogg_codecs, false);
- AddContainerWithCodecs("audio/flac", implicit_codec, false);
- AddContainerWithCodecs("audio/mpeg", mp3_codecs, false); // Allow "mp3".
- AddContainerWithCodecs("audio/mp3", implicit_codec, false);
- AddContainerWithCodecs("audio/x-mp3", implicit_codec, false);
- AddContainerWithCodecs("audio/mp4", mp4_audio_codecs, false);
+ AddContainerWithCodecs("application/ogg", ogg_codecs);
+ AddContainerWithCodecs("audio/flac", implicit_codec);
+ AddContainerWithCodecs("audio/mpeg", mp3_codecs); // Allow "mp3".
+ AddContainerWithCodecs("audio/mp3", implicit_codec);
+ AddContainerWithCodecs("audio/x-mp3", implicit_codec);
+ AddContainerWithCodecs("audio/mp4", mp4_audio_codecs);
DCHECK(!mp4_video_codecs.empty());
- AddContainerWithCodecs("video/mp4", mp4_codecs, false);
+ AddContainerWithCodecs("video/mp4", mp4_codecs);
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
- AddContainerWithCodecs("audio/aac", implicit_codec, true); // AAC / ADTS.
+ AddContainerWithCodecs("audio/aac", implicit_codec); // AAC / ADTS.
// These strings are supported for backwards compatibility only and thus only
// support the codecs needed for compatibility.
- AddContainerWithCodecs("audio/x-m4a", aac, true);
- AddContainerWithCodecs("video/x-m4v", avc_and_aac, true);
+ AddContainerWithCodecs("audio/x-m4a", aac);
+ AddContainerWithCodecs("video/x-m4v", avc_and_aac);
+
+ CodecSet video_3gpp_codecs(aac);
+ video_3gpp_codecs.emplace(H264);
+ AddContainerWithCodecs("video/3gpp", video_3gpp_codecs);
#if BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
CodecSet mp2t_codecs{H264, MPEG2_AAC, MPEG4_AAC, MP3};
- AddContainerWithCodecs("video/mp2t", mp2t_codecs, true);
+ AddContainerWithCodecs("video/mp2t", mp2t_codecs);
#endif // BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
#if defined(OS_ANDROID)
// HTTP Live Streaming (HLS).
@@ -369,28 +369,20 @@ void MimeUtil::AddSupportedMediaFormats() {
// Android HLS only supports MPEG4_AAC (missing demuxer
// support for MPEG2_AAC)
MPEG4_AAC};
- AddContainerWithCodecs("application/x-mpegurl", hls_codecs, true);
- AddContainerWithCodecs("application/vnd.apple.mpegurl", hls_codecs, true);
- AddContainerWithCodecs("audio/mpegurl", hls_codecs, true);
+ AddContainerWithCodecs("application/x-mpegurl", hls_codecs);
+ AddContainerWithCodecs("application/vnd.apple.mpegurl", hls_codecs);
+ AddContainerWithCodecs("audio/mpegurl", hls_codecs);
// Not documented by Apple, but unfortunately used extensively by Apple and
// others for both audio-only and audio+video playlists. See
// https://crbug.com/675552 for details and examples.
- AddContainerWithCodecs("audio/x-mpegurl", hls_codecs, true);
+ AddContainerWithCodecs("audio/x-mpegurl", hls_codecs);
#endif // defined(OS_ANDROID)
#endif // BUILDFLAG(USE_PROPRIETARY_CODECS)
}
void MimeUtil::AddContainerWithCodecs(const std::string& mime_type,
- const CodecSet& codecs,
- bool is_proprietary_mime_type) {
-#if !BUILDFLAG(USE_PROPRIETARY_CODECS)
- DCHECK(!is_proprietary_mime_type);
-#endif
-
+ const CodecSet& codecs) {
media_format_map_[mime_type] = codecs;
-
- if (is_proprietary_mime_type)
- proprietary_media_containers_.push_back(mime_type);
}
bool MimeUtil::IsSupportedMediaMimeType(const std::string& mime_type) const {
@@ -879,11 +871,6 @@ SupportsType MimeUtil::IsCodecSupported(const std::string& mime_type_lower_case,
DCHECK_GT(video_level, 0);
}
- // Bail early for disabled proprietary codecs
- if (!allow_proprietary_codecs_ && IsCodecProprietary(codec)) {
- return IsNotSupported;
- }
-
// Check for cases of ambiguous platform support.
// TODO(chcunningham): DELETE THIS. Platform should know its capabilities.
// Answer should come from MediaClient.
@@ -911,37 +898,19 @@ SupportsType MimeUtil::IsCodecSupported(const std::string& mime_type_lower_case,
AudioCodec audio_codec = MimeUtilToAudioCodec(codec);
if (audio_codec != kUnknownAudioCodec) {
- AudioConfig audio_config = {audio_codec};
-
- // If MediaClient is provided use it to check for decoder support.
- MediaClient* media_client = GetMediaClient();
- if (media_client && !media_client->IsSupportedAudioConfig(audio_config))
- return IsNotSupported;
-
- // When no MediaClient is provided, assume default decoders are available
- // as described by media::IsSupportedAudioConfig().
- if (!media_client && !IsSupportedAudioConfig(audio_config))
+ if (!IsSupportedAudioType({audio_codec}))
return IsNotSupported;
}
if (video_codec != kUnknownVideoCodec) {
- VideoConfig video_config = {video_codec, video_profile, video_level,
- color_space};
-
- // If MediaClient is provided use it to check for decoder support.
- MediaClient* media_client = GetMediaClient();
- if (media_client && !media_client->IsSupportedVideoConfig(video_config))
- return IsNotSupported;
-
- // When no MediaClient is provided, assume default decoders are available
- // as described by media::IsSupportedVideoConfig().
- if (!media_client && !IsSupportedVideoConfig(video_config))
+ if (!IsSupportedVideoType(
+ {video_codec, video_profile, video_level, color_space}))
return IsNotSupported;
}
#if defined(OS_ANDROID)
// TODO(chcunningham): Delete this. Android platform support should be
- // handled by (android specific) media::IsSupportedVideoConfig() above.
+ // handled by (android specific) media::IsSupportedVideoType() above.
if (!IsCodecSupportedOnAndroid(codec, mime_type_lower_case, is_encrypted,
platform_info_)) {
return IsNotSupported;
@@ -951,34 +920,6 @@ SupportsType MimeUtil::IsCodecSupported(const std::string& mime_type_lower_case,
return ambiguous_platform_support ? MayBeSupported : IsSupported;
}
-bool MimeUtil::IsCodecProprietary(Codec codec) const {
- switch (codec) {
- case INVALID_CODEC:
- case AC3:
- case EAC3:
- case MPEG_H_AUDIO:
- case MPEG2_AAC:
- case MPEG4_AAC:
- case H264:
- case HEVC:
- case DOLBY_VISION:
- return true;
-
- case MP3:
- case PCM:
- case VORBIS:
- case OPUS:
- case FLAC:
- case VP8:
- case VP9:
- case THEORA:
- case AV1:
- return false;
- }
-
- return true;
-}
-
bool MimeUtil::GetDefaultCodec(const std::string& mime_type,
Codec* default_codec) const {
// Codecs below are unambiguously implied by the mime type string. DO NOT add
diff --git a/chromium/media/base/mime_util_internal.h b/chromium/media/base/mime_util_internal.h
index 38fe0689284..1bd0196e41d 100644
--- a/chromium/media/base/mime_util_internal.h
+++ b/chromium/media/base/mime_util_internal.h
@@ -109,8 +109,7 @@ class MEDIA_EXPORT MimeUtil {
// Adds |mime_type| with the specified codecs to |media_format_map_|.
void AddContainerWithCodecs(const std::string& mime_type,
- const CodecSet& codecs_list,
- bool is_proprietary_mime_type);
+ const CodecSet& codecs_list);
// Returns IsSupported if all codec IDs in |codecs| are unambiguous and are
// supported in |mime_type_lower_case|. MayBeSupported is returned if at least
@@ -190,11 +189,6 @@ class MEDIA_EXPORT MimeUtil {
// A map of mime_types and hash map of the supported codecs for the mime_type.
MediaFormatMappings media_format_map_;
- // List of proprietary containers in |media_format_map_|.
- std::vector<std::string> proprietary_media_containers_;
- // Whether proprietary codec support should be advertised to callers.
- bool allow_proprietary_codecs_;
-
DISALLOW_COPY_AND_ASSIGN(MimeUtil);
};
diff --git a/chromium/media/base/mime_util_unittest.cc b/chromium/media/base/mime_util_unittest.cc
index b66bbe1ca7c..630efa8a4f5 100644
--- a/chromium/media/base/mime_util_unittest.cc
+++ b/chromium/media/base/mime_util_unittest.cc
@@ -4,7 +4,7 @@
#include <stddef.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/test/scoped_command_line.h"
@@ -51,7 +51,8 @@ static std::vector<bool> CreateTestVector(bool test_all_values,
bool single_value) {
const bool kTestStates[] = {true, false};
if (test_all_values)
- return std::vector<bool>(kTestStates, kTestStates + arraysize(kTestStates));
+ return std::vector<bool>(kTestStates,
+ kTestStates + base::size(kTestStates));
return std::vector<bool>(1, single_value);
}
@@ -183,6 +184,7 @@ TEST(MimeUtilTest, CommonMediaMimeType) {
EXPECT_TRUE(IsSupportedMediaMimeType("audio/x-m4a"));
EXPECT_TRUE(IsSupportedMediaMimeType("video/x-m4v"));
EXPECT_TRUE(IsSupportedMediaMimeType("audio/aac"));
+ EXPECT_TRUE(IsSupportedMediaMimeType("video/3gpp"));
#if BUILDFLAG(ENABLE_MSE_MPEG2TS_STREAM_PARSER)
EXPECT_TRUE(IsSupportedMediaMimeType("video/mp2t"));
@@ -194,6 +196,7 @@ TEST(MimeUtilTest, CommonMediaMimeType) {
EXPECT_FALSE(IsSupportedMediaMimeType("audio/x-m4a"));
EXPECT_FALSE(IsSupportedMediaMimeType("video/x-m4v"));
EXPECT_FALSE(IsSupportedMediaMimeType("audio/aac"));
+ EXPECT_FALSE(IsSupportedMediaMimeType("video/3gpp"));
#endif // USE_PROPRIETARY_CODECS
EXPECT_FALSE(IsSupportedMediaMimeType("video/mp3"));
@@ -229,7 +232,7 @@ TEST(MimeUtilTest, SplitAndStripCodecs) {
{",", 2, {"", ""}, {"", ""}},
};
- for (size_t i = 0; i < arraysize(tests); ++i) {
+ for (size_t i = 0; i < base::size(tests); ++i) {
std::vector<std::string> codecs_out;
SplitCodecs(tests[i].original, &codecs_out);
diff --git a/chromium/media/base/mock_filters.cc b/chromium/media/base/mock_filters.cc
index 0cb8b4115fc..249413d61da 100644
--- a/chromium/media/base/mock_filters.cc
+++ b/chromium/media/base/mock_filters.cc
@@ -37,6 +37,10 @@ void MockPipeline::Resume(std::unique_ptr<Renderer> renderer,
Resume(&renderer, timestamp, seek_cb);
}
+MockMediaResource::MockMediaResource() = default;
+
+MockMediaResource::~MockMediaResource() = default;
+
MockDemuxer::MockDemuxer() = default;
MockDemuxer::~MockDemuxer() = default;
diff --git a/chromium/media/base/mock_filters.h b/chromium/media/base/mock_filters.h
index a21f29f2c56..1c4414feecc 100644
--- a/chromium/media/base/mock_filters.h
+++ b/chromium/media/base/mock_filters.h
@@ -55,7 +55,7 @@ class MockPipelineClient : public Pipeline::Client {
MOCK_METHOD0(OnDurationChange, void());
MOCK_METHOD2(OnAddTextTrack,
void(const TextTrackConfig&, const AddTextTrackDoneCB&));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void());
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnAudioConfigChange, void(const AudioDecoderConfig&));
MOCK_METHOD1(OnVideoConfigChange, void(const VideoDecoderConfig&));
MOCK_METHOD1(OnVideoNaturalSizeChange, void(const gfx::Size&));
@@ -63,6 +63,7 @@ class MockPipelineClient : public Pipeline::Client {
MOCK_METHOD0(OnVideoAverageKeyframeDistanceUpdate, void());
MOCK_METHOD1(OnAudioDecoderChange, void(const std::string&));
MOCK_METHOD1(OnVideoDecoderChange, void(const std::string&));
+ MOCK_METHOD1(OnRemotePlayStateChange, void(MediaStatus::State state));
};
class MockPipeline : public Pipeline {
@@ -127,6 +128,18 @@ class MockPipeline : public Pipeline {
DISALLOW_COPY_AND_ASSIGN(MockPipeline);
};
+class MockMediaResource : public MediaResource {
+ public:
+ MockMediaResource();
+ ~MockMediaResource() override;
+
+ // MediaResource implementation.
+ MOCK_CONST_METHOD0(GetType, MediaResource::Type());
+ MOCK_METHOD0(GetAllStreams, std::vector<DemuxerStream*>());
+ MOCK_METHOD1(GetFirstStream, DemuxerStream*(DemuxerStream::Type type));
+ MOCK_CONST_METHOD0(GetMediaUrlParams, MediaUrlParams());
+};
+
class MockDemuxer : public Demuxer {
public:
MockDemuxer();
@@ -193,14 +206,13 @@ class MockVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- MOCK_METHOD6(
- Initialize,
- void(const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb));
+ MOCK_METHOD6(Initialize,
+ void(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb));
MOCK_METHOD2(Decode,
void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
@@ -221,13 +233,12 @@ class MockAudioDecoder : public AudioDecoder {
// AudioDecoder implementation.
std::string GetDisplayName() const override;
- MOCK_METHOD5(
- Initialize,
- void(const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb));
+ MOCK_METHOD5(Initialize,
+ void(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb));
MOCK_METHOD2(Decode,
void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
@@ -247,12 +258,13 @@ class MockRendererClient : public RendererClient {
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD1(OnStatisticsUpdate, void(const PipelineStatistics&));
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void());
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnAudioConfigChange, void(const AudioDecoderConfig&));
MOCK_METHOD1(OnVideoConfigChange, void(const VideoDecoderConfig&));
MOCK_METHOD1(OnVideoNaturalSizeChange, void(const gfx::Size&));
MOCK_METHOD1(OnVideoOpacityChange, void(bool));
MOCK_METHOD1(OnDurationChange, void(base::TimeDelta));
+ MOCK_METHOD1(OnRemotePlayStateChange, void(MediaStatus::State state));
};
class MockVideoRenderer : public VideoRenderer {
@@ -420,6 +432,7 @@ class MockDecryptor : public Decryptor {
const VideoDecodeCB& video_decode_cb));
MOCK_METHOD1(ResetDecoder, void(StreamType stream_type));
MOCK_METHOD1(DeinitializeDecoder, void(StreamType stream_type));
+ MOCK_METHOD0(CanAlwaysDecrypt, bool());
private:
DISALLOW_COPY_AND_ASSIGN(MockDecryptor);
diff --git a/chromium/media/base/overlay_info.h b/chromium/media/base/overlay_info.h
index e4af30f153b..489e0ec2e61 100644
--- a/chromium/media/base/overlay_info.h
+++ b/chromium/media/base/overlay_info.h
@@ -35,6 +35,9 @@ struct MEDIA_EXPORT OverlayInfo {
// Is the player in fullscreen?
bool is_fullscreen = false;
+
+ // Is the player persistent video (PiP)?
+ bool is_persistent_video = false;
};
using ProvideOverlayInfoCB = base::Callback<void(const OverlayInfo&)>;
diff --git a/chromium/media/base/pipeline.h b/chromium/media/base/pipeline.h
index 0df4066d957..7b96cb29dd3 100644
--- a/chromium/media/base/pipeline.h
+++ b/chromium/media/base/pipeline.h
@@ -13,6 +13,7 @@
#include "media/base/buffering_state.h"
#include "media/base/cdm_context.h"
#include "media/base/media_export.h"
+#include "media/base/media_status.h"
#include "media/base/media_track.h"
#include "media/base/pipeline_metadata.h"
#include "media/base/pipeline_status.h"
@@ -20,6 +21,7 @@
#include "media/base/text_track.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_rotation.h"
+#include "media/base/waiting.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -57,8 +59,8 @@ class MEDIA_EXPORT Pipeline {
virtual void OnAddTextTrack(const TextTrackConfig& config,
const AddTextTrackDoneCB& done_cb) = 0;
- // Executed whenever the key needed to decrypt the stream is not available.
- virtual void OnWaitingForDecryptionKey() = 0;
+ // Executed whenever the pipeline is waiting because of |reason|.
+ virtual void OnWaiting(WaitingReason reason) = 0;
// Executed for the first video frame and whenever natural size changes.
virtual void OnVideoNaturalSizeChange(const gfx::Size& size) = 0;
@@ -78,6 +80,12 @@ class MEDIA_EXPORT Pipeline {
// during playback.
virtual void OnAudioDecoderChange(const std::string& name) = 0;
virtual void OnVideoDecoderChange(const std::string& name) = 0;
+
+ // Executed whenever an important status change has happened, and that this
+ // change was not initiated by Pipeline or Pipeline::Client.
+ // Only used with FlingingRenderer, when an external device pauses/resumes
+ // a video that is playing remotely.
+ virtual void OnRemotePlayStateChange(MediaStatus::State state) = 0;
};
virtual ~Pipeline() {}
diff --git a/chromium/media/base/pipeline_impl.cc b/chromium/media/base/pipeline_impl.cc
index 2739aa7c1af..e3a45af4793 100644
--- a/chromium/media/base/pipeline_impl.cc
+++ b/chromium/media/base/pipeline_impl.cc
@@ -56,7 +56,7 @@ class PipelineImpl::RendererWrapper : public DemuxerHost,
Demuxer* demuxer,
std::unique_ptr<Renderer> renderer,
base::WeakPtr<PipelineImpl> weak_pipeline);
- void Stop(const base::Closure& stop_cb);
+ void Stop();
void Seek(base::TimeDelta time);
void Suspend();
void Resume(std::unique_ptr<Renderer> renderer, base::TimeDelta time);
@@ -132,12 +132,13 @@ class PipelineImpl::RendererWrapper : public DemuxerHost,
void OnEnded() final;
void OnStatisticsUpdate(const PipelineStatistics& stats) final;
void OnBufferingStateChange(BufferingState state) final;
- void OnWaitingForDecryptionKey() final;
+ void OnWaiting(WaitingReason reason) final;
void OnAudioConfigChange(const AudioDecoderConfig& config) final;
void OnVideoConfigChange(const VideoDecoderConfig& config) final;
void OnVideoNaturalSizeChange(const gfx::Size& size) final;
void OnVideoOpacityChange(bool opaque) final;
void OnDurationChange(base::TimeDelta duration) final;
+ void OnRemotePlayStateChange(MediaStatus::State state) final;
// Common handlers for notifications from renderers and demuxer.
void OnPipelineError(PipelineStatus error);
@@ -188,7 +189,9 @@ class PipelineImpl::RendererWrapper : public DemuxerHost,
// Series of tasks to Start(), Seek(), and Resume().
std::unique_ptr<SerialRunner> pending_callbacks_;
- base::WeakPtr<RendererWrapper> weak_this_;
+ // Called from non-media threads when an error occurs.
+ PipelineStatusCB error_cb_;
+
base::WeakPtrFactory<RendererWrapper> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(RendererWrapper);
};
@@ -209,7 +212,6 @@ PipelineImpl::RendererWrapper::RendererWrapper(
renderer_ended_(false),
text_renderer_ended_(false),
weak_factory_(this) {
- weak_this_ = weak_factory_.GetWeakPtr();
}
PipelineImpl::RendererWrapper::~RendererWrapper() {
@@ -248,29 +250,35 @@ void PipelineImpl::RendererWrapper::Start(
}
weak_pipeline_ = weak_pipeline;
+ // Setup |error_cb_| on the media thread.
+ error_cb_ = base::BindRepeating(&RendererWrapper::OnPipelineError,
+ weak_factory_.GetWeakPtr());
+
// Queue asynchronous actions required to start.
DCHECK(!pending_callbacks_);
SerialRunner::Queue fns;
// Initialize demuxer.
- fns.Push(base::Bind(&RendererWrapper::InitializeDemuxer, weak_this_));
+ fns.Push(base::BindRepeating(&RendererWrapper::InitializeDemuxer,
+ weak_factory_.GetWeakPtr()));
// Once the demuxer is initialized successfully, media metadata must be
// available - report the metadata to client. If starting without a renderer
// we'll complete initialization at this point.
- fns.Push(
- base::Bind(&RendererWrapper::ReportMetadata, weak_this_, start_type));
+ fns.Push(base::BindRepeating(&RendererWrapper::ReportMetadata,
+ weak_factory_.GetWeakPtr(), start_type));
// Initialize renderer.
- fns.Push(base::Bind(&RendererWrapper::InitializeRenderer, weak_this_));
+ fns.Push(base::BindRepeating(&RendererWrapper::InitializeRenderer,
+ weak_factory_.GetWeakPtr()));
// Run tasks.
- pending_callbacks_ =
- SerialRunner::Run(fns, base::Bind(&RendererWrapper::CompleteSeek,
- weak_this_, base::TimeDelta()));
+ pending_callbacks_ = SerialRunner::Run(
+ fns, base::BindRepeating(&RendererWrapper::CompleteSeek,
+ weak_factory_.GetWeakPtr(), base::TimeDelta()));
}
-void PipelineImpl::RendererWrapper::Stop(const base::Closure& stop_cb) {
+void PipelineImpl::RendererWrapper::Stop() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DCHECK(state_ != kStopping && state_ != kStopped);
@@ -286,6 +294,7 @@ void PipelineImpl::RendererWrapper::Stop(const base::Closure& stop_cb) {
// the pipeline is stopped before it had a chance to complete outstanding
// tasks.
pending_callbacks_.reset();
+ weak_factory_.InvalidateWeakPtrs();
DestroyRenderer();
@@ -296,17 +305,9 @@ void PipelineImpl::RendererWrapper::Stop(const base::Closure& stop_cb) {
SetState(kStopped);
- // Reset the status. Otherwise, if we encountered an error, new erros will
+ // Reset the status. Otherwise, if we encountered an error, new errors will
// never be propagated. See https://crbug.com/812465.
status_ = PIPELINE_OK;
-
- // Post the stop callback to enqueue it after the tasks that may have been
- // posted by Demuxer and Renderer during stopping. Note that in theory the
- // tasks posted by Demuxer/Renderer may post even more tasks that will get
- // enqueued after |stop_cb|. This may be problematic because Demuxer may
- // get destroyed as soon as |stop_cb| is run. In practice this is not a
- // problem, but ideally Demuxer should be destroyed on the media thread.
- media_task_runner_->PostTask(FROM_HERE, stop_cb);
}
void PipelineImpl::RendererWrapper::Seek(base::TimeDelta time) {
@@ -336,17 +337,18 @@ void PipelineImpl::RendererWrapper::Seek(base::TimeDelta time) {
// Flush.
DCHECK(shared_state_.renderer);
- bound_fns.Push(base::Bind(&Renderer::Flush,
- base::Unretained(shared_state_.renderer.get())));
+ bound_fns.Push(base::BindRepeating(
+ &Renderer::Flush, base::Unretained(shared_state_.renderer.get())));
// Seek demuxer.
- bound_fns.Push(
- base::Bind(&Demuxer::Seek, base::Unretained(demuxer_), seek_timestamp));
+ bound_fns.Push(base::BindRepeating(&Demuxer::Seek, base::Unretained(demuxer_),
+ seek_timestamp));
// Run tasks.
pending_callbacks_ = SerialRunner::Run(
bound_fns,
- base::Bind(&RendererWrapper::CompleteSeek, weak_this_, seek_timestamp));
+ base::BindRepeating(&RendererWrapper::CompleteSeek,
+ weak_factory_.GetWeakPtr(), seek_timestamp));
}
void PipelineImpl::RendererWrapper::Suspend() {
@@ -377,7 +379,8 @@ void PipelineImpl::RendererWrapper::Suspend() {
// No need to flush the renderer since it's going to be destroyed.
pending_callbacks_ = SerialRunner::Run(
- fns, base::Bind(&RendererWrapper::CompleteSuspend, weak_this_));
+ fns, base::BindRepeating(&RendererWrapper::CompleteSuspend,
+ weak_factory_.GetWeakPtr()));
}
void PipelineImpl::RendererWrapper::Resume(std::unique_ptr<Renderer> renderer,
@@ -412,14 +415,15 @@ void PipelineImpl::RendererWrapper::Resume(std::unique_ptr<Renderer> renderer,
// Queue the asynchronous actions required to start playback.
SerialRunner::Queue fns;
- fns.Push(
- base::Bind(&Demuxer::Seek, base::Unretained(demuxer_), start_timestamp));
+ fns.Push(base::BindRepeating(&Demuxer::Seek, base::Unretained(demuxer_),
+ start_timestamp));
- fns.Push(base::Bind(&RendererWrapper::InitializeRenderer, weak_this_));
+ fns.Push(base::BindRepeating(&RendererWrapper::InitializeRenderer,
+ weak_factory_.GetWeakPtr()));
pending_callbacks_ = SerialRunner::Run(
- fns,
- base::Bind(&RendererWrapper::CompleteSeek, weak_this_, start_timestamp));
+ fns, base::BindRepeating(&RendererWrapper::CompleteSeek,
+ weak_factory_.GetWeakPtr(), start_timestamp));
}
void PipelineImpl::RendererWrapper::SetPlaybackRate(double playback_rate) {
@@ -484,8 +488,9 @@ void PipelineImpl::RendererWrapper::SetCdm(
}
shared_state_.renderer->SetCdm(
- cdm_context, base::Bind(&RendererWrapper::OnCdmAttached, weak_this_,
- cdm_attached_cb, cdm_context));
+ cdm_context, base::BindRepeating(&RendererWrapper::OnCdmAttached,
+ weak_factory_.GetWeakPtr(),
+ cdm_attached_cb, cdm_context));
}
void PipelineImpl::RendererWrapper::OnBufferedTimeRangesChanged(
@@ -507,24 +512,21 @@ void PipelineImpl::RendererWrapper::SetDuration(base::TimeDelta duration) {
base::TimeDelta::FromDays(1), 50 /* bucket_count */);
main_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&PipelineImpl::OnDurationChange, weak_pipeline_, duration));
+ FROM_HERE, base::BindOnce(&PipelineImpl::OnDurationChange, weak_pipeline_,
+ duration));
}
void PipelineImpl::RendererWrapper::OnDemuxerError(PipelineStatus error) {
// TODO(alokp): Add thread DCHECK after ensuring that all Demuxer
// implementations call DemuxerHost on the media thread.
- media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&RendererWrapper::OnPipelineError, weak_this_, error));
+ DCHECK(error_cb_);
+ media_task_runner_->PostTask(FROM_HERE, base::BindOnce(error_cb_, error));
}
void PipelineImpl::RendererWrapper::OnError(PipelineStatus error) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
-
- media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&RendererWrapper::OnPipelineError, weak_this_, error));
+ DCHECK(error_cb_);
+ media_task_runner_->PostTask(FROM_HERE, base::BindOnce(error_cb_, error));
}
void PipelineImpl::RendererWrapper::OnEnded() {
@@ -586,7 +588,8 @@ void PipelineImpl::RendererWrapper::OnEnabledAudioTracksChanged(
demuxer_->OnEnabledAudioTracksChanged(
enabled_track_ids, GetCurrentTimestamp(),
base::BindOnce(&RendererWrapper::OnDemuxerCompletedTrackChange,
- weak_this_, base::Passed(&change_completed_cb)));
+ weak_factory_.GetWeakPtr(),
+ base::Passed(&change_completed_cb)));
}
void PipelineImpl::OnSelectedVideoTrackChanged(
@@ -625,7 +628,8 @@ void PipelineImpl::RendererWrapper::OnSelectedVideoTrackChanged(
demuxer_->OnSelectedVideoTrackChanged(
tracks, GetCurrentTimestamp(),
base::BindOnce(&RendererWrapper::OnDemuxerCompletedTrackChange,
- weak_this_, base::Passed(&change_completed_cb)));
+ weak_factory_.GetWeakPtr(),
+ base::Passed(&change_completed_cb)));
}
void PipelineImpl::RendererWrapper::OnDemuxerCompletedTrackChange(
@@ -701,8 +705,8 @@ void PipelineImpl::RendererWrapper::OnStatisticsUpdate(
old_key_frame_distance_average) {
main_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&PipelineImpl::OnVideoAverageKeyframeDistanceUpdate,
- weak_pipeline_));
+ base::BindOnce(&PipelineImpl::OnVideoAverageKeyframeDistanceUpdate,
+ weak_pipeline_));
}
}
@@ -712,16 +716,16 @@ void PipelineImpl::RendererWrapper::OnBufferingStateChange(
DVLOG(2) << __func__ << "(" << state << ") ";
main_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&PipelineImpl::OnBufferingStateChange, weak_pipeline_, state));
+ FROM_HERE, base::BindOnce(&PipelineImpl::OnBufferingStateChange,
+ weak_pipeline_, state));
}
-void PipelineImpl::RendererWrapper::OnWaitingForDecryptionKey() {
+void PipelineImpl::RendererWrapper::OnWaiting(WaitingReason reason) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
main_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&PipelineImpl::OnWaitingForDecryptionKey, weak_pipeline_));
+ base::BindOnce(&PipelineImpl::OnWaiting, weak_pipeline_, reason));
}
void PipelineImpl::RendererWrapper::OnVideoNaturalSizeChange(
@@ -737,26 +741,26 @@ void PipelineImpl::RendererWrapper::OnVideoOpacityChange(bool opaque) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
main_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&PipelineImpl::OnVideoOpacityChange, weak_pipeline_, opaque));
+ FROM_HERE, base::BindOnce(&PipelineImpl::OnVideoOpacityChange,
+ weak_pipeline_, opaque));
}
void PipelineImpl::RendererWrapper::OnAudioConfigChange(
const AudioDecoderConfig& config) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
- main_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&PipelineImpl::OnAudioConfigChange, weak_pipeline_, config));
+ main_task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&PipelineImpl::OnAudioConfigChange,
+ weak_pipeline_, config));
}
void PipelineImpl::RendererWrapper::OnVideoConfigChange(
const VideoDecoderConfig& config) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
- main_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&PipelineImpl::OnVideoConfigChange, weak_pipeline_, config));
+ main_task_runner_->PostTask(FROM_HERE,
+ base::BindOnce(&PipelineImpl::OnVideoConfigChange,
+ weak_pipeline_, config));
}
void PipelineImpl::RendererWrapper::OnDurationChange(base::TimeDelta duration) {
@@ -764,6 +768,15 @@ void PipelineImpl::RendererWrapper::OnDurationChange(base::TimeDelta duration) {
SetDuration(duration);
}
+void PipelineImpl::RendererWrapper::OnRemotePlayStateChange(
+ MediaStatus::State state) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ main_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&PipelineImpl::OnRemotePlayStateChange,
+ weak_pipeline_, state));
+}
+
void PipelineImpl::RendererWrapper::OnPipelineError(PipelineStatus error) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DCHECK_NE(PIPELINE_OK, error) << "PIPELINE_OK isn't an error!";
@@ -913,7 +926,7 @@ void PipelineImpl::RendererWrapper::InitializeRenderer(
if (cdm_context_) {
shared_state_.renderer->SetCdm(cdm_context_,
- base::Bind(&IgnoreCdmAttached));
+ base::BindRepeating(&IgnoreCdmAttached));
}
shared_state_.renderer->Initialize(demuxer_, this, done_cb);
@@ -1036,10 +1049,10 @@ void PipelineImpl::Start(StartType start_type,
seek_time_ = kNoTimestamp;
media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&RendererWrapper::Start,
- base::Unretained(renderer_wrapper_.get()), start_type, demuxer,
- base::Passed(&renderer), weak_factory_.GetWeakPtr()));
+ FROM_HERE, base::BindOnce(&RendererWrapper::Start,
+ base::Unretained(renderer_wrapper_.get()),
+ start_type, demuxer, base::Passed(&renderer),
+ weak_factory_.GetWeakPtr()));
}
void PipelineImpl::Stop() {
@@ -1053,29 +1066,13 @@ void PipelineImpl::Stop() {
if (media_task_runner_->BelongsToCurrentThread()) {
// This path is executed by unittests that share media and main threads.
- renderer_wrapper_->Stop(base::DoNothing());
+ renderer_wrapper_->Stop();
} else {
// This path is executed by production code where the two task runners -
// main and media - live on different threads.
- //
- // TODO(alokp): We should not have to wait for the RendererWrapper::Stop.
- // RendererWrapper holds a raw reference to Demuxer, which in turn holds a
- // raw reference to DataSource. Both Demuxer and DataSource need to live
- // until RendererWrapper is stopped. If RendererWrapper owned Demuxer and
- // Demuxer owned DataSource, we could simply let RendererWrapper get lazily
- // destroyed on the media thread.
- base::WaitableEvent waiter(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- base::Closure stop_cb =
- base::Bind(&base::WaitableEvent::Signal, base::Unretained(&waiter));
- // If posting the task fails or the posted task fails to run,
- // we will wait here forever. So add a CHECK to make sure we do not run
- // into those situations.
- CHECK(media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&RendererWrapper::Stop,
- base::Unretained(renderer_wrapper_.get()), stop_cb)));
- waiter.Wait();
+ media_task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(&RendererWrapper::Stop,
+ base::Unretained(renderer_wrapper_.get())));
}
// Once the pipeline is stopped, nothing is reported back to the client.
@@ -1167,9 +1164,9 @@ void PipelineImpl::SetPlaybackRate(double playback_rate) {
playback_rate_ = playback_rate;
media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&RendererWrapper::SetPlaybackRate,
- base::Unretained(renderer_wrapper_.get()), playback_rate_));
+ FROM_HERE, base::BindOnce(&RendererWrapper::SetPlaybackRate,
+ base::Unretained(renderer_wrapper_.get()),
+ playback_rate_));
}
float PipelineImpl::GetVolume() const {
@@ -1187,8 +1184,8 @@ void PipelineImpl::SetVolume(float volume) {
volume_ = volume;
media_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&RendererWrapper::SetVolume,
- base::Unretained(renderer_wrapper_.get()), volume_));
+ base::BindOnce(&RendererWrapper::SetVolume,
+ base::Unretained(renderer_wrapper_.get()), volume_));
}
base::TimeDelta PipelineImpl::GetMediaTime() const {
@@ -1250,9 +1247,9 @@ void PipelineImpl::SetCdm(CdmContext* cdm_context,
media_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&RendererWrapper::SetCdm,
- base::Unretained(renderer_wrapper_.get()), cdm_context,
- media::BindToCurrentLoop(cdm_attached_cb)));
+ base::BindOnce(&RendererWrapper::SetCdm,
+ base::Unretained(renderer_wrapper_.get()), cdm_context,
+ BindToCurrentLoop(cdm_attached_cb)));
}
#define RETURN_STRING(state) \
@@ -1339,13 +1336,13 @@ void PipelineImpl::OnDurationChange(base::TimeDelta duration) {
client_->OnDurationChange();
}
-void PipelineImpl::OnWaitingForDecryptionKey() {
+void PipelineImpl::OnWaiting(WaitingReason reason) {
DVLOG(2) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(IsRunning());
DCHECK(client_);
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void PipelineImpl::OnVideoNaturalSizeChange(const gfx::Size& size) {
@@ -1411,6 +1408,15 @@ void PipelineImpl::OnVideoDecoderChange(const std::string& name) {
client_->OnVideoDecoderChange(name);
}
+void PipelineImpl::OnRemotePlayStateChange(MediaStatus::State state) {
+ DVLOG(2) << __func__;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(IsRunning());
+
+ DCHECK(client_);
+ client_->OnRemotePlayStateChange(state);
+}
+
void PipelineImpl::OnSeekDone(bool is_suspended) {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
diff --git a/chromium/media/base/pipeline_impl.h b/chromium/media/base/pipeline_impl.h
index c89b88aa7f7..f1240bb75d0 100644
--- a/chromium/media/base/pipeline_impl.h
+++ b/chromium/media/base/pipeline_impl.h
@@ -138,7 +138,7 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
void OnMetadata(PipelineMetadata metadata);
void OnBufferingStateChange(BufferingState state);
void OnDurationChange(base::TimeDelta duration);
- void OnWaitingForDecryptionKey();
+ void OnWaiting(WaitingReason reason);
void OnAudioConfigChange(const AudioDecoderConfig& config);
void OnVideoConfigChange(const VideoDecoderConfig& config);
void OnVideoNaturalSizeChange(const gfx::Size& size);
@@ -146,6 +146,7 @@ class MEDIA_EXPORT PipelineImpl : public Pipeline {
void OnVideoAverageKeyframeDistanceUpdate();
void OnAudioDecoderChange(const std::string& name);
void OnVideoDecoderChange(const std::string& name);
+ void OnRemotePlayStateChange(MediaStatus::State state);
// Task completion callbacks from RendererWrapper.
void OnSeekDone(bool is_suspended);
diff --git a/chromium/media/base/pipeline_impl_unittest.cc b/chromium/media/base/pipeline_impl_unittest.cc
index 1d3c087b7e6..10a9974db89 100644
--- a/chromium/media/base/pipeline_impl_unittest.cc
+++ b/chromium/media/base/pipeline_impl_unittest.cc
@@ -22,7 +22,7 @@
#include "base/time/clock.h"
#include "media/base/fake_text_track_stream.h"
#include "media/base/gmock_callback_support.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/base/text_renderer.h"
@@ -178,7 +178,7 @@ class PipelineImplTest : public ::testing::Test {
void StartPipeline(
Pipeline::StartType start_type = Pipeline::StartType::kNormal) {
- EXPECT_CALL(callbacks_, OnWaitingForDecryptionKey()).Times(0);
+ EXPECT_CALL(callbacks_, OnWaiting(_)).Times(0);
pipeline_->Start(
start_type, demuxer_.get(), std::move(scoped_renderer_), &callbacks_,
base::Bind(&CallbackHelper::OnStart, base::Unretained(&callbacks_)));
@@ -322,7 +322,7 @@ class PipelineImplTest : public ::testing::Test {
StrictMock<CallbackHelper> callbacks_;
base::SimpleTestTickClock test_tick_clock_;
base::MessageLoop message_loop_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<PipelineImpl> pipeline_;
std::unique_ptr<StrictMock<MockDemuxer>> demuxer_;
diff --git a/chromium/media/base/reentrancy_checker.cc b/chromium/media/base/reentrancy_checker.cc
new file mode 100644
index 00000000000..296503debd6
--- /dev/null
+++ b/chromium/media/base/reentrancy_checker.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/reentrancy_checker.h"
+
+namespace media {
+
+NonReentrantScope::NonReentrantScope(base::Lock& lock) : lock_(lock) {
+ is_lock_holder_ = lock_.Try();
+
+ // TODO(sandersd): Allow the caller to provide the message? The macro knows
+ // the name of the scope.
+ if (!is_lock_holder_)
+ LOG(FATAL) << "Non-reentrant scope was reentered";
+}
+
+NonReentrantScope::~NonReentrantScope() {
+ if (!is_lock_holder_)
+ return;
+
+ lock_.AssertAcquired();
+ lock_.Release();
+}
+
+} // namespace media
diff --git a/chromium/media/base/reentrancy_checker.h b/chromium/media/base/reentrancy_checker.h
new file mode 100644
index 00000000000..4ac134b13be
--- /dev/null
+++ b/chromium/media/base/reentrancy_checker.h
@@ -0,0 +1,64 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_REENTRANCY_CHECKER_H_
+#define MEDIA_BASE_REENTRANCY_CHECKER_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "media/base/media_export.h"
+
+// Asserts that a non-reentrant scope (which can span multiple methods) is not
+// reentered.
+//
+// No-op and zero-sized when DCHECKs are disabled at build time.
+//
+// Failures are reported by LOG(FATAL):
+// [...:FATAL:reentrancy_checker.cc(15)] Non-reentrant scope reentered
+// #0 0x7f12ef2ee8dd base::debug::StackTrace::StackTrace()
+// #1 0x7f12eefdffaa base::debug::StackTrace::StackTrace()
+// #2 0x7f12ef051e0b logging::LogMessage::~LogMessage()
+// #3 0x7f12edc8bfc6 media::NonReentrantScope::NonReentrantScope()
+// #4 0x7f12edd93e41 MyClass::MyMethod()
+//
+// Usage:
+// class MyClass {
+// public:
+// void MyMethod() {
+// NON_REENTRANT_SCOPE(my_reentrancy_checker_);
+// ...
+// }
+//
+// private:
+// REENTRANCY_CHECKER(my_reentrancy_checker_);
+// };
+
+#if DCHECK_IS_ON()
+#define REENTRANCY_CHECKER(name) ::base::Lock name
+#define NON_REENTRANT_SCOPE(name) ::media::NonReentrantScope name##scope(name)
+#else // DCHECK_IS_ON()
+#define REENTRANCY_CHECKER(name)
+#define NON_REENTRANT_SCOPE(name)
+#endif // DCHECK_IS_ON()
+
+namespace media {
+
+// Implementation of NON_REENTRANT_SCOPE(). Do not use directly.
+class SCOPED_LOCKABLE MEDIA_EXPORT NonReentrantScope {
+ public:
+ explicit NonReentrantScope(base::Lock& lock) EXCLUSIVE_LOCK_FUNCTION(lock);
+ ~NonReentrantScope() UNLOCK_FUNCTION();
+
+ private:
+ base::Lock& lock_;
+ bool is_lock_holder_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(NonReentrantScope);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_REENTRANCY_CHECKER_H_
diff --git a/chromium/media/base/reentrancy_checker_unittest.cc b/chromium/media/base/reentrancy_checker_unittest.cc
new file mode 100644
index 00000000000..7eff2e7fa65
--- /dev/null
+++ b/chromium/media/base/reentrancy_checker_unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/reentrancy_checker.h"
+
+#include "base/logging.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class ReentrancyCheckerTest : public testing::Test {
+ public:
+ void UseNonReentrantScope() { NON_REENTRANT_SCOPE(reentrancy_checker_); }
+
+ protected:
+ REENTRANCY_CHECKER(reentrancy_checker_);
+};
+
+TEST_F(ReentrancyCheckerTest, Construct) {}
+
+TEST_F(ReentrancyCheckerTest, NonReentrantUse) {
+ NON_REENTRANT_SCOPE(reentrancy_checker_);
+}
+
+TEST_F(ReentrancyCheckerTest, ReentrantUse) {
+ NON_REENTRANT_SCOPE(reentrancy_checker_);
+
+#if DCHECK_IS_ON()
+ EXPECT_DEATH(UseNonReentrantScope(), "reentered");
+#else
+ // Does nothing if DCHECKs are off.
+ UseNonReentrantScope();
+#endif // DCHECK_IS_ON()
+}
+
+} // namespace media
diff --git a/chromium/media/base/renderer_client.h b/chromium/media/base/renderer_client.h
index 3fc8919c1b5..703ecfe6992 100644
--- a/chromium/media/base/renderer_client.h
+++ b/chromium/media/base/renderer_client.h
@@ -7,8 +7,10 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
+#include "media/base/media_status.h"
#include "media/base/pipeline_status.h"
#include "media/base/video_decoder_config.h"
+#include "media/base/waiting.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -29,8 +31,8 @@ class RendererClient {
// Executed when buffering state is changed.
virtual void OnBufferingStateChange(BufferingState state) = 0;
- // Executed whenever the key needed to decrypt the stream is not available.
- virtual void OnWaitingForDecryptionKey() = 0;
+ // Executed whenever the Renderer is waiting because of |reason|.
+ virtual void OnWaiting(WaitingReason reason) = 0;
// Executed whenever DemuxerStream status returns kConfigChange. Initial
// configs provided by OnMetadata.
@@ -48,6 +50,12 @@ class RendererClient {
// Executed when video metadata is first read, and whenever it changes.
// Only used when we are using a URL demuxer (e.g. for MediaPlayerRenderer).
virtual void OnDurationChange(base::TimeDelta duration) = 0;
+
+ // Executed when the status of a video playing remotely is changed, without
+ // the change originating from the media::Pipeline that owns |this|.
+ // Only used with the FlingingRenderer, when an external device play/pauses
+ // videos, and WMPI needs to be updated accordingly.
+ virtual void OnRemotePlayStateChange(media::MediaStatus::State state) = 0;
};
} // namespace media
diff --git a/chromium/media/base/seekable_buffer_unittest.cc b/chromium/media/base/seekable_buffer_unittest.cc
index f8178c3430c..e3895620650 100644
--- a/chromium/media/base/seekable_buffer_unittest.cc
+++ b/chromium/media/base/seekable_buffer_unittest.cc
@@ -10,7 +10,7 @@
#include <cstdlib>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/time/time.h"
#include "media/base/data_buffer.h"
#include "media/base/timestamp_constants.h"
@@ -338,7 +338,7 @@ TEST_F(SeekableBufferTest, GetTime) {
scoped_refptr<DataBuffer> buffer = DataBuffer::CopyFrom(data_, kWriteSize);
- for (size_t i = 0; i < arraysize(tests); ++i) {
+ for (size_t i = 0; i < base::size(tests); ++i) {
buffer->set_timestamp(base::TimeDelta::FromMicroseconds(
tests[i].first_time_useconds));
buffer->set_duration(base::TimeDelta::FromMicroseconds(
diff --git a/chromium/media/base/serial_runner.h b/chromium/media/base/serial_runner.h
index 31cb5ced5e6..e2cd59306fe 100644
--- a/chromium/media/base/serial_runner.h
+++ b/chromium/media/base/serial_runner.h
@@ -26,6 +26,7 @@ namespace media {
// the completion callback as the series progresses.
class MEDIA_EXPORT SerialRunner {
public:
+ // TODO(dalecurtis): Change SerialRunner to use OnceCallback.
typedef base::Callback<void(const base::Closure&)> BoundClosure;
typedef base::Callback<void(const PipelineStatusCB&)> BoundPipelineStatusCB;
diff --git a/chromium/media/base/decode_capabilities.cc b/chromium/media/base/supported_types.cc
index 9799a04bdb7..1795a783208 100644
--- a/chromium/media/base/decode_capabilities.cc
+++ b/chromium/media/base/supported_types.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/base/decode_capabilities.h"
+#include "media/base/supported_types.h"
#include "base/command_line.h"
#include "base/feature_list.h"
+#include "media/base/media_client.h"
#include "media/base/media_switches.h"
#include "media/media_buildflags.h"
#include "third_party/libaom/av1_buildflags.h"
@@ -18,6 +19,22 @@
namespace media {
+bool IsSupportedAudioType(const AudioType& type) {
+ MediaClient* media_client = GetMediaClient();
+ if (media_client)
+ return media_client->IsSupportedAudioType(type);
+
+ return IsDefaultSupportedAudioType(type);
+}
+
+bool IsSupportedVideoType(const VideoType& type) {
+ MediaClient* media_client = GetMediaClient();
+ if (media_client)
+ return media_client->IsSupportedVideoType(type);
+
+ return IsDefaultSupportedVideoType(type);
+}
+
bool IsColorSpaceSupported(const media::VideoColorSpace& color_space) {
switch (color_space.primaries) {
case media::VideoColorSpace::PrimaryID::EBU_3213_E:
@@ -122,8 +139,39 @@ bool IsVp9ProfileSupported(VideoCodecProfile profile) {
return false;
}
-bool IsSupportedAudioConfig(const AudioConfig& config) {
- switch (config.codec) {
+bool IsAudioCodecProprietary(AudioCodec codec) {
+ switch (codec) {
+ case media::kCodecAAC:
+ case media::kCodecAC3:
+ case media::kCodecEAC3:
+ case media::kCodecAMR_NB:
+ case media::kCodecAMR_WB:
+ case media::kCodecGSM_MS:
+ case media::kCodecALAC:
+ case media::kCodecMpegHAudio:
+ return true;
+
+ case media::kCodecFLAC:
+ case media::kCodecMP3:
+ case media::kCodecOpus:
+ case media::kCodecVorbis:
+ case media::kCodecPCM:
+ case media::kCodecPCM_MULAW:
+ case media::kCodecPCM_S16BE:
+ case media::kCodecPCM_S24BE:
+ case media::kCodecPCM_ALAW:
+ case media::kUnknownAudioCodec:
+ return false;
+ }
+}
+
+bool IsDefaultSupportedAudioType(const AudioType& type) {
+#if !BUILDFLAG(USE_PROPRIETARY_CODECS)
+ if (IsAudioCodecProprietary(type.codec))
+ return false;
+#endif
+
+ switch (type.codec) {
case media::kCodecAAC:
case media::kCodecFLAC:
case media::kCodecMP3:
@@ -157,21 +205,44 @@ bool IsSupportedAudioConfig(const AudioConfig& config) {
return false;
}
-// TODO(chcunningham): Query decoders for codec profile support. Add platform
-// specific logic for Android (move from MimeUtilIntenral).
-bool IsSupportedVideoConfig(const VideoConfig& config) {
- switch (config.codec) {
+bool IsVideoCodecProprietary(VideoCodec codec) {
+ switch (codec) {
+ case kCodecVC1:
+ case kCodecH264:
+ case kCodecMPEG2:
+ case kCodecMPEG4:
+ case kCodecHEVC:
+ case kCodecDolbyVision:
+ return true;
+ case kUnknownVideoCodec:
+ case kCodecTheora:
+ case kCodecVP8:
+ case kCodecVP9:
+ case kCodecAV1:
+ return false;
+ }
+}
+
+// TODO(chcunningham): Add platform specific logic for Android (move from
+// MimeUtilIntenral).
+bool IsDefaultSupportedVideoType(const VideoType& type) {
+#if !BUILDFLAG(USE_PROPRIETARY_CODECS)
+ if (IsVideoCodecProprietary(type.codec))
+ return false;
+#endif
+
+ switch (type.codec) {
case media::kCodecAV1:
#if BUILDFLAG(ENABLE_AV1_DECODER)
- return IsColorSpaceSupported(config.color_space);
+ return IsColorSpaceSupported(type.color_space);
#else
return false;
#endif
case media::kCodecVP9:
// Color management required for HDR to not look terrible.
- return IsColorSpaceSupported(config.color_space) &&
- IsVp9ProfileSupported(config.profile);
+ return IsColorSpaceSupported(type.color_space) &&
+ IsVp9ProfileSupported(type.profile);
case media::kCodecH264:
case media::kCodecVP8:
case media::kCodecTheora:
diff --git a/chromium/media/base/supported_types.h b/chromium/media/base/supported_types.h
new file mode 100644
index 00000000000..ba4ab01c37e
--- /dev/null
+++ b/chromium/media/base/supported_types.h
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SUPPORTED_TYPES_H_
+#define MEDIA_BASE_SUPPORTED_TYPES_H_
+
+#include "media/base/media_types.h"
+
+namespace media {
+
+// These functions will attempt to delegate to MediaClient (when present) to
+// describe what types of media are supported. When no MediaClient is provided,
+// they will fall back to calling the Default functions below.
+MEDIA_EXPORT bool IsSupportedAudioType(const AudioType& type);
+MEDIA_EXPORT bool IsSupportedVideoType(const VideoType& type);
+
+// These functions describe what media/ alone supports. They do not call out to
+// MediaClient and do not describe media/ embedder customization. Callers should
+// generally prefer the non-Default APIs above.
+MEDIA_EXPORT bool IsDefaultSupportedAudioType(const AudioType& type);
+MEDIA_EXPORT bool IsDefaultSupportedVideoType(const VideoType& type);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SUPPORTED_TYPES_H_
diff --git a/chromium/media/base/decode_capabilities_unittest.cc b/chromium/media/base/supported_types_unittest.cc
index 7c8b79c317c..b8b2c7a9b5f 100644
--- a/chromium/media/base/decode_capabilities_unittest.cc
+++ b/chromium/media/base/supported_types_unittest.cc
@@ -2,13 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/base/decode_capabilities.h"
+#include "media/base/supported_types.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
-TEST(DecodeCapabilitiesTest, IsSupportedVideoConfigBasics) {
+#if BUILDFLAG(USE_PROPRIETARY_CODECS)
+const bool kPropCodecsEnabled = true;
+#else
+const bool kPropCodecsEnabled = false;
+#endif
+
+TEST(SupportedTypesTest, IsSupportedVideoTypeBasics) {
// Default to common 709.
const media::VideoColorSpace kColorSpace = media::VideoColorSpace::REC709();
@@ -16,36 +22,40 @@ TEST(DecodeCapabilitiesTest, IsSupportedVideoConfigBasics) {
const int kUnspecifiedLevel = 0;
// Expect support for baseline configuration of known codecs.
- EXPECT_TRUE(IsSupportedVideoConfig(
- {media::kCodecH264, media::H264PROFILE_BASELINE, 1, kColorSpace}));
- EXPECT_TRUE(IsSupportedVideoConfig({media::kCodecVP8, media::VP8PROFILE_ANY,
- kUnspecifiedLevel, kColorSpace}));
+ EXPECT_TRUE(IsSupportedVideoType({media::kCodecVP8, media::VP8PROFILE_ANY,
+ kUnspecifiedLevel, kColorSpace}));
EXPECT_TRUE(
- IsSupportedVideoConfig({media::kCodecVP9, media::VP9PROFILE_PROFILE0,
- kUnspecifiedLevel, kColorSpace}));
- EXPECT_TRUE(IsSupportedVideoConfig({media::kCodecTheora,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
+ IsSupportedVideoType({media::kCodecVP9, media::VP9PROFILE_PROFILE0,
+ kUnspecifiedLevel, kColorSpace}));
+ EXPECT_TRUE(IsSupportedVideoType({media::kCodecTheora,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
// Expect non-support for the following.
- EXPECT_FALSE(IsSupportedVideoConfig({media::kUnknownVideoCodec,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
- EXPECT_FALSE(IsSupportedVideoConfig({media::kCodecVC1,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
- EXPECT_FALSE(IsSupportedVideoConfig({media::kCodecMPEG2,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
- EXPECT_FALSE(IsSupportedVideoConfig({media::kCodecMPEG4,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
- EXPECT_FALSE(IsSupportedVideoConfig({media::kCodecHEVC,
- media::VIDEO_CODEC_PROFILE_UNKNOWN,
- kUnspecifiedLevel, kColorSpace}));
+ EXPECT_FALSE(IsSupportedVideoType({media::kUnknownVideoCodec,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
+ EXPECT_FALSE(IsSupportedVideoType({media::kCodecVC1,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
+ EXPECT_FALSE(IsSupportedVideoType({media::kCodecMPEG2,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
+ EXPECT_FALSE(IsSupportedVideoType({media::kCodecMPEG4,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
+ EXPECT_FALSE(IsSupportedVideoType({media::kCodecHEVC,
+ media::VIDEO_CODEC_PROFILE_UNKNOWN,
+ kUnspecifiedLevel, kColorSpace}));
+
+ // Expect conditional support for the following.
+ EXPECT_EQ(
+ kPropCodecsEnabled,
+ IsSupportedVideoType(
+ {media::kCodecH264, media::H264PROFILE_BASELINE, 1, kColorSpace}));
}
-TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9TransferFunctions) {
+TEST(SupportedTypesTest, IsSupportedVideoType_VP9TransferFunctions) {
size_t num_found = 0;
// TODO(hubbe): Verify support for HDR codecs when color management enabled.
const std::set<media::VideoColorSpace::TransferID> kSupportedTransfers = {
@@ -76,14 +86,14 @@ TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9TransferFunctions) {
kSupportedTransfers.end();
if (found)
num_found++;
- EXPECT_EQ(found, IsSupportedVideoConfig({media::kCodecVP9,
- media::VP9PROFILE_PROFILE0, 1,
- color_space}));
+ EXPECT_EQ(found, IsSupportedVideoType({media::kCodecVP9,
+ media::VP9PROFILE_PROFILE0, 1,
+ color_space}));
}
EXPECT_EQ(kSupportedTransfers.size(), num_found);
}
-TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9Primaries) {
+TEST(SupportedTypesTest, IsSupportedVideoType_VP9Primaries) {
size_t num_found = 0;
// TODO(hubbe): Verify support for HDR codecs when color management enabled.
const std::set<media::VideoColorSpace::PrimaryID> kSupportedPrimaries = {
@@ -108,14 +118,14 @@ TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9Primaries) {
kSupportedPrimaries.end();
if (found)
num_found++;
- EXPECT_EQ(found, IsSupportedVideoConfig({media::kCodecVP9,
- media::VP9PROFILE_PROFILE0, 1,
- color_space}));
+ EXPECT_EQ(found, IsSupportedVideoType({media::kCodecVP9,
+ media::VP9PROFILE_PROFILE0, 1,
+ color_space}));
}
EXPECT_EQ(kSupportedPrimaries.size(), num_found);
}
-TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9Matrix) {
+TEST(SupportedTypesTest, IsSupportedVideoType_VP9Matrix) {
size_t num_found = 0;
// TODO(hubbe): Verify support for HDR codecs when color management enabled.
const std::set<media::VideoColorSpace::MatrixID> kSupportedMatrix = {
@@ -140,9 +150,9 @@ TEST(DecodeCapabilitiesTest, IsSupportedVideoConfig_VP9Matrix) {
kSupportedMatrix.find(color_space.matrix) != kSupportedMatrix.end();
if (found)
num_found++;
- EXPECT_EQ(found, IsSupportedVideoConfig({media::kCodecVP9,
- media::VP9PROFILE_PROFILE0, 1,
- color_space}));
+ EXPECT_EQ(found, IsSupportedVideoType({media::kCodecVP9,
+ media::VP9PROFILE_PROFILE0, 1,
+ color_space}));
}
EXPECT_EQ(kSupportedMatrix.size(), num_found);
}
diff --git a/chromium/media/base/test_data_util.cc b/chromium/media/base/test_data_util.cc
index 567029400b4..bb615c1290f 100644
--- a/chromium/media/base/test_data_util.cc
+++ b/chromium/media/base/test_data_util.cc
@@ -12,6 +12,7 @@
#include "base/no_destructor.h"
#include "base/numerics/safe_conversions.h"
#include "base/path_service.h"
+#include "base/stl_util.h"
#include "media/base/decoder_buffer.h"
namespace media {
@@ -221,13 +222,13 @@ scoped_refptr<DecoderBuffer> ReadTestDataFile(const std::string& name) {
bool LookupTestKeyVector(const std::vector<uint8_t>& key_id,
bool allow_rotation,
std::vector<uint8_t>* key) {
- std::vector<uint8_t> starting_key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> starting_key_id(kKeyId, kKeyId + base::size(kKeyId));
size_t rotate_limit = allow_rotation ? starting_key_id.size() : 1;
for (size_t pos = 0; pos < rotate_limit; ++pos) {
std::rotate(starting_key_id.begin(), starting_key_id.begin() + pos,
starting_key_id.end());
if (key_id == starting_key_id) {
- key->assign(kSecretKey, kSecretKey + arraysize(kSecretKey));
+ key->assign(kSecretKey, kSecretKey + base::size(kSecretKey));
std::rotate(key->begin(), key->begin() + pos, key->end());
return true;
}
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
index 8a1f79eb415..cac13df4d34 100644
--- a/chromium/media/base/test_helpers.cc
+++ b/chromium/media/base/test_helpers.cc
@@ -18,6 +18,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_util.h"
+#include "media/base/mock_filters.h"
#include "ui/gfx/geometry/rect.h"
using ::testing::_;
@@ -180,10 +181,10 @@ VideoDecoderConfig TestVideoConfig::NormalCodecProfile(
}
// static
-VideoDecoderConfig TestVideoConfig::NormalEncrypted(VideoCodec codec) {
- return GetTestConfig(codec, VIDEO_CODEC_PROFILE_UNKNOWN,
- VideoColorSpace::JPEG(), VIDEO_ROTATION_0, kNormalSize,
- true);
+VideoDecoderConfig TestVideoConfig::NormalEncrypted(VideoCodec codec,
+ VideoCodecProfile profile) {
+ return GetTestConfig(codec, profile, VideoColorSpace::JPEG(),
+ VIDEO_ROTATION_0, kNormalSize, true);
}
// static
@@ -372,4 +373,28 @@ bool VerifyFakeVideoBufferForTest(const DecoderBuffer& buffer,
height == config.coded_size().height());
}
+std::unique_ptr<StrictMock<MockDemuxerStream>> CreateMockDemuxerStream(
+ DemuxerStream::Type type,
+ bool encrypted) {
+ auto stream = std::make_unique<StrictMock<MockDemuxerStream>>(type);
+
+ switch (type) {
+ case DemuxerStream::AUDIO:
+ stream->set_audio_decoder_config(encrypted
+ ? TestAudioConfig::NormalEncrypted()
+ : TestAudioConfig::Normal());
+ break;
+ case DemuxerStream::VIDEO:
+ stream->set_video_decoder_config(encrypted
+ ? TestVideoConfig::NormalEncrypted()
+ : TestVideoConfig::Normal());
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+
+ return stream;
+}
+
} // namespace media
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
index e16c6e1398c..0bb2b06d8d9 100644
--- a/chromium/media/base/test_helpers.h
+++ b/chromium/media/base/test_helpers.h
@@ -33,6 +33,7 @@ namespace media {
class AudioBuffer;
class AudioBus;
class DecoderBuffer;
+class MockDemuxerStream;
// Return a callback that expects to be run once.
base::Closure NewExpectedClosure();
@@ -96,7 +97,9 @@ class TestVideoConfig {
static VideoDecoderConfig NormalCodecProfile(
VideoCodec codec = kCodecVP8,
VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN);
- static VideoDecoderConfig NormalEncrypted(VideoCodec codec = kCodecVP8);
+ static VideoDecoderConfig NormalEncrypted(
+ VideoCodec codec = kCodecVP8,
+ VideoCodecProfile = VIDEO_CODEC_PROFILE_UNKNOWN);
static VideoDecoderConfig NormalRotated(VideoRotation rotation);
// Returns a configuration that is larger in dimensions than Normal().
@@ -194,6 +197,10 @@ scoped_refptr<DecoderBuffer> CreateFakeVideoBufferForTest(
bool VerifyFakeVideoBufferForTest(const DecoderBuffer& buffer,
const VideoDecoderConfig& config);
+// Create a MockDemuxerStream for testing purposes.
+std::unique_ptr<::testing::StrictMock<MockDemuxerStream>>
+CreateMockDemuxerStream(DemuxerStream::Type type, bool encrypted);
+
// Compares two {Audio|Video}DecoderConfigs
MATCHER_P(DecoderConfigEq, config, "") {
return arg.Matches(config);
@@ -208,6 +215,14 @@ MATCHER(IsEndOfStream, "") {
return arg.get() && arg->end_of_stream();
}
+MATCHER(EosBeforeHaveMetadata, "") {
+ return CONTAINS_STRING(
+ arg,
+ "MediaSource endOfStream before demuxer initialization completes (before "
+ "HAVE_METADATA) is treated as an error. This may also occur as "
+ "consequence of other MediaSource errors before HAVE_METADATA.");
+}
+
MATCHER_P(SegmentMissingFrames, track_id, "") {
return CONTAINS_STRING(
arg, "Media segment did not contain any coded frames for track " +
diff --git a/chromium/media/base/unaligned_shared_memory_unittest.cc b/chromium/media/base/unaligned_shared_memory_unittest.cc
index 34b287e7273..35c3010b42a 100644
--- a/chromium/media/base/unaligned_shared_memory_unittest.cc
+++ b/chromium/media/base/unaligned_shared_memory_unittest.cc
@@ -10,8 +10,8 @@
#include <limits>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/shared_memory.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -19,11 +19,11 @@ namespace media {
namespace {
const uint8_t kUnalignedData[] = "XXXhello";
-const size_t kUnalignedDataSize = arraysize(kUnalignedData);
+const size_t kUnalignedDataSize = base::size(kUnalignedData);
const off_t kUnalignedOffset = 3;
const uint8_t kData[] = "hello";
-const size_t kDataSize = arraysize(kData);
+const size_t kDataSize = base::size(kData);
base::SharedMemoryHandle CreateHandle(const uint8_t* data, size_t size) {
base::SharedMemory shm;
diff --git a/chromium/media/base/video_bitrate_allocation.h b/chromium/media/base/video_bitrate_allocation.h
index 7d5059a9814..f56c0b072f9 100644
--- a/chromium/media/base/video_bitrate_allocation.h
+++ b/chromium/media/base/video_bitrate_allocation.h
@@ -39,7 +39,7 @@ class MEDIA_EXPORT VideoBitrateAllocation {
}
private:
- int sum_; // Cached sum of all elements of |bitrates_|, for perfomance.
+ int sum_; // Cached sum of all elements of |bitrates_|, for performance.
int bitrates_[kMaxSpatialLayers][kMaxTemporalLayers];
};
diff --git a/chromium/media/base/video_decoder.h b/chromium/media/base/video_decoder.h
index f9deaf3bc13..784bb73853c 100644
--- a/chromium/media/base/video_decoder.h
+++ b/chromium/media/base/video_decoder.h
@@ -13,6 +13,7 @@
#include "media/base/decode_status.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
+#include "media/base/waiting.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -36,10 +37,6 @@ class MEDIA_EXPORT VideoDecoder {
// buffer to decode.
using DecodeCB = base::Callback<void(DecodeStatus)>;
- // Callback for whenever the key needed to decrypt the stream is not
- // available. May be called at any time after Initialize().
- using WaitingForDecryptionKeyCB = base::RepeatingClosure;
-
VideoDecoder();
// Returns the name of the decoder for logging and decoder selection purposes.
@@ -70,8 +67,9 @@ class MEDIA_EXPORT VideoDecoder {
// |cdm_context| can be used to handle encrypted buffers. May be null if the
// stream is not encrypted.
//
- // |waiting_for_decryption_key_cb| is called whenever the key needed to
- // decrypt the stream is not available.
+ // |waiting_cb| is called whenever the decoder is stalled waiting for
+ // something, e.g. decryption key. May be called at any time after
+ // Initialize().
//
// Note:
// 1) The VideoDecoder will be reinitialized if it was initialized before.
@@ -82,13 +80,13 @@ class MEDIA_EXPORT VideoDecoder {
// is ready (i.e. w/o thread trampolining) since it can strongly affect frame
// delivery times with high-frame-rate material. See Decode() for additional
// notes.
- virtual void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) = 0;
+ // 5) |init_cb| may be called before this returns.
+ virtual void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) = 0;
// Requests a |buffer| to be decoded. The status of the decoder and decoded
// frame are returned via the provided callback. Some decoders may allow
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
index 0ef659e1da0..fa52c39c2c3 100644
--- a/chromium/media/base/video_decoder_config.h
+++ b/chromium/media/base/video_decoder_config.h
@@ -27,6 +27,8 @@ namespace media {
MEDIA_EXPORT VideoCodec
VideoCodecProfileToVideoCodec(VideoCodecProfile profile);
+// Describes the content of a video stream, as described by the media container
+// (or otherwise determined by the demuxer).
class MEDIA_EXPORT VideoDecoderConfig {
public:
// Constructs an uninitialized object. Clients should call Initialize() with
@@ -77,13 +79,21 @@ class MEDIA_EXPORT VideoDecoderConfig {
static std::string GetHumanReadableProfile(VideoCodecProfile profile);
+ // Video codec and profile.
VideoCodec codec() const { return codec_; }
VideoCodecProfile profile() const { return profile_; }
- // Video format used to determine YUV buffer sizes.
+ // Encoded video pixel format. Lossy codecs rarely have actual pixel formats,
+ // this should usually be interpreted as a subsampling specification. (The
+ // decoder determines the actual pixel format.)
VideoPixelFormat format() const { return format_; }
- // Default is VIDEO_ROTATION_0.
+ // Difference between encoded and display orientation.
+ //
+ // Default is VIDEO_ROTATION_0. Note that rotation should be applied after
+ // scaling to natural_size().
+ //
+ // TODO(sandersd): Which direction is orientation measured in?
VideoRotation video_rotation() const { return rotation_; }
// Deprecated. TODO(wolenetz): Remove. See https://crbug.com/665539.
@@ -91,20 +101,28 @@ class MEDIA_EXPORT VideoDecoderConfig {
// in this region are valid.
const gfx::Size& coded_size() const { return coded_size_; }
- // Region of |coded_size_| that is visible.
+ // Region of coded_size() that contains image data, also known as the clean
+ // aperture. Usually, but not always, origin-aligned (top-left).
const gfx::Rect& visible_rect() const { return visible_rect_; }
// Final visible width and height of a video frame with aspect ratio taken
- // into account.
+ // into account. Image data in the visible_rect() should be scaled to this
+ // size for display.
const gfx::Size& natural_size() const { return natural_size_; }
+ // The shape of encoded pixels. Given visible_rect() and a pixel aspect ratio,
+ // it is possible to compute natural_size() (see video_util.h).
+ //
// TODO(crbug.com/837337): This should be explicitly set (replacing
// |natural_size|). It should also be possible to determine whether it was set
// at all, since in-stream information may override it if it was not.
double GetPixelAspectRatio() const;
- // Optional byte data required to initialize video decoders, such as H.264
- // AVCC data.
+ // Optional video decoder initialization data, such as H.264 AVCC.
+ //
+ // Note: FFmpegVideoDecoder assumes that H.264 is in AVC format if there is
+ // |extra_data|, and in Annex B format if there is not. We should probably add
+ // explicit signaling of encoded format.
void SetExtraData(const std::vector<uint8_t>& extra_data);
const std::vector<uint8_t>& extra_data() const { return extra_data_; }
@@ -118,8 +136,10 @@ class MEDIA_EXPORT VideoDecoderConfig {
return encryption_scheme_;
}
+ // Color space of the image data.
const VideoColorSpace& color_space_info() const;
+ // Dynamic range of the image data.
void set_hdr_metadata(const HDRMetadata& hdr_metadata);
const base::Optional<HDRMetadata>& hdr_metadata() const;
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index 9cf68b397d6..77d1d4f87e8 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -11,10 +11,11 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
+#include "base/bits.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/aligned_memory.h"
+#include "base/stl_util.h"
#include "base/strings/string_piece.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
@@ -39,20 +40,6 @@ gfx::Rect Intersection(gfx::Rect a, const gfx::Rect& b) {
// Static constexpr class for generating unique identifiers for each VideoFrame.
static base::AtomicSequenceNumber g_unique_id_generator;
-static bool IsPowerOfTwo(size_t x) {
- return x != 0 && (x & (x - 1)) == 0;
-}
-
-static inline size_t RoundUp(size_t value, size_t alignment) {
- DCHECK(IsPowerOfTwo(alignment));
- return ((value + (alignment - 1)) & ~(alignment - 1));
-}
-
-static inline size_t RoundDown(size_t value, size_t alignment) {
- DCHECK(IsPowerOfTwo(alignment));
- return value & ~(alignment - 1);
-}
-
static std::string StorageTypeToString(
const VideoFrame::StorageType storage_type) {
switch (storage_type) {
@@ -78,9 +65,8 @@ static std::string StorageTypeToString(
return "INVALID";
}
-// Returns true if |frame| is accesible mapped in the VideoFrame memory space.
// static
-static bool IsStorageTypeMappable(VideoFrame::StorageType storage_type) {
+bool VideoFrame::IsStorageTypeMappable(VideoFrame::StorageType storage_type) {
return
#if defined(OS_LINUX)
// This is not strictly needed but makes explicit that, at VideoFrame
@@ -137,6 +123,7 @@ static bool RequiresEvenSizeAllocation(VideoPixelFormat format) {
case PIXEL_FORMAT_YUV444P12:
case PIXEL_FORMAT_I420A:
case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_P016LE:
return true;
case PIXEL_FORMAT_UNKNOWN:
break;
@@ -225,7 +212,7 @@ bool VideoFrame::IsValidConfig(VideoPixelFormat format,
return true;
// Make sure new formats are properly accounted for in the method.
- static_assert(PIXEL_FORMAT_MAX == 28,
+ static_assert(PIXEL_FORMAT_MAX == 29,
"Added pixel format, please review IsValidConfig()");
if (format == PIXEL_FORMAT_UNKNOWN) {
@@ -707,8 +694,8 @@ gfx::Size VideoFrame::PlaneSize(VideoPixelFormat format,
// Align to multiple-of-two size overall. This ensures that non-subsampled
// planes can be addressed by pixel with the same scaling as the subsampled
// planes.
- width = RoundUp(width, 2);
- height = RoundUp(height, 2);
+ width = base::bits::Align(width, 2);
+ height = base::bits::Align(height, 2);
}
const gfx::Size subsample = SampleSize(format, plane);
@@ -765,12 +752,13 @@ int VideoFrame::BytesPerElement(VideoPixelFormat format, size_t plane) {
case PIXEL_FORMAT_YUV420P12:
case PIXEL_FORMAT_YUV422P12:
case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_P016LE:
return 2;
case PIXEL_FORMAT_NV12:
case PIXEL_FORMAT_NV21:
case PIXEL_FORMAT_MT21: {
static const int bytes_per_element[] = {1, 2};
- DCHECK_LT(plane, arraysize(bytes_per_element));
+ DCHECK_LT(plane, base::size(bytes_per_element));
return bytes_per_element[plane];
}
case PIXEL_FORMAT_YV12:
@@ -789,17 +777,33 @@ int VideoFrame::BytesPerElement(VideoPixelFormat format, size_t plane) {
}
// static
+std::vector<int32_t> VideoFrame::ComputeStrides(VideoPixelFormat format,
+ const gfx::Size& coded_size) {
+ std::vector<int32_t> strides;
+ const size_t num_planes = NumPlanes(format);
+ if (num_planes == 1) {
+ strides.push_back(RowBytes(0, format, coded_size.width()));
+ } else {
+ for (size_t plane = 0; plane < num_planes; ++plane) {
+ strides.push_back(base::bits::Align(
+ RowBytes(plane, format, coded_size.width()), kFrameAddressAlignment));
+ }
+ }
+ return strides;
+}
+
+// static
size_t VideoFrame::Rows(size_t plane, VideoPixelFormat format, int height) {
DCHECK(IsValidPlane(plane, format));
const int sample_height = SampleSize(format, plane).height();
- return RoundUp(height, sample_height) / sample_height;
+ return base::bits::Align(height, sample_height) / sample_height;
}
// static
size_t VideoFrame::Columns(size_t plane, VideoPixelFormat format, int width) {
DCHECK(IsValidPlane(plane, format));
const int sample_width = SampleSize(format, plane).width();
- return RoundUp(width, sample_width) / sample_width;
+ return base::bits::Align(width, sample_width) / sample_width;
}
// static
@@ -856,8 +860,9 @@ const uint8_t* VideoFrame::visible_data(size_t plane) const {
// Calculate an offset that is properly aligned for all planes.
const gfx::Size alignment = CommonAlignment(format());
- const gfx::Point offset(RoundDown(visible_rect_.x(), alignment.width()),
- RoundDown(visible_rect_.y(), alignment.height()));
+ const gfx::Point offset(
+ base::bits::AlignDown(visible_rect_.x(), alignment.width()),
+ base::bits::AlignDown(visible_rect_.y(), alignment.height()));
const gfx::Size subsample = SampleSize(format(), plane);
DCHECK(offset.x() % subsample.width() == 0);
@@ -1109,8 +1114,8 @@ gfx::Size VideoFrame::DetermineAlignedSize(VideoPixelFormat format,
const gfx::Size& dimensions) {
const gfx::Size alignment = CommonAlignment(format);
const gfx::Size adjusted =
- gfx::Size(RoundUp(dimensions.width(), alignment.width()),
- RoundUp(dimensions.height(), alignment.height()));
+ gfx::Size(base::bits::Align(dimensions.width(), alignment.width()),
+ base::bits::Align(dimensions.height(), alignment.height()));
DCHECK((adjusted.width() % alignment.width() == 0) &&
(adjusted.height() % alignment.height() == 0));
return adjusted;
@@ -1199,6 +1204,7 @@ gfx::Size VideoFrame::SampleSize(VideoPixelFormat format, size_t plane) {
case PIXEL_FORMAT_YUV420P9:
case PIXEL_FORMAT_YUV420P10:
case PIXEL_FORMAT_YUV420P12:
+ case PIXEL_FORMAT_P016LE:
return gfx::Size(2, 2);
case PIXEL_FORMAT_UNKNOWN:
@@ -1258,22 +1264,6 @@ void VideoFrame::AllocateMemory(bool zero_initialize_memory) {
}
}
-// static
-std::vector<int32_t> VideoFrame::ComputeStrides(VideoPixelFormat format,
- const gfx::Size& coded_size) {
- std::vector<int32_t> strides;
- const size_t num_planes = NumPlanes(format);
- if (num_planes == 1) {
- strides.push_back(RowBytes(0, format, coded_size.width()));
- } else {
- for (size_t plane = 0; plane < num_planes; ++plane) {
- strides.push_back(RoundUp(RowBytes(plane, format, coded_size.width()),
- kFrameAddressAlignment));
- }
- }
- return strides;
-}
-
std::vector<size_t> VideoFrame::CalculatePlaneSize() const {
const size_t num_planes = NumPlanes(format());
const size_t num_buffers = layout_.num_buffers();
@@ -1308,7 +1298,8 @@ std::vector<size_t> VideoFrame::CalculatePlaneSize() const {
// These values were chosen to mirror ffmpeg's get_video_buffer().
// TODO(dalecurtis): This should be configurable; eventually ffmpeg wants
// us to use av_cpu_max_align(), but... for now, they just hard-code 32.
- const size_t height = RoundUp(rows(plane), kFrameAddressAlignment);
+ const size_t height =
+ base::bits::Align(rows(plane), kFrameAddressAlignment);
const size_t width = std::abs(stride(plane));
plane_size.push_back(width * height);
}
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index 596eb136fcd..d8e7afe3d25 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -331,6 +331,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Returns the number of bytes per element for given |plane| and |format|.
static int BytesPerElement(VideoPixelFormat format, size_t plane);
+ // Calculates strides for each plane based on |format| and |coded_size|.
+ static std::vector<int32_t> ComputeStrides(VideoPixelFormat format,
+ const gfx::Size& coded_size);
+
// Returns the number of rows for the given plane, format, and height.
// The height may be aligned to format requirements.
static size_t Rows(size_t plane, VideoPixelFormat format, int height);
@@ -344,6 +348,10 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
static void HashFrameForTesting(base::MD5Context* context,
const scoped_refptr<VideoFrame>& frame);
+ // Returns true if |frame| is accesible mapped in the VideoFrame memory space.
+ // static
+ static bool IsStorageTypeMappable(VideoFrame::StorageType storage_type);
+
// Returns true if |frame| is accessible and mapped in the VideoFrame memory
// space. If false, clients should refrain from accessing data(),
// visible_data() etc.
@@ -567,13 +575,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// alignment for each individual plane.
static gfx::Size CommonAlignment(VideoPixelFormat format);
- // Calculates strides if unassigned.
- // For the case that plane stride is not assigned, i.e. 0, in the layout_
- // object, it calculates strides for each plane based on frame format and
- // coded size then writes them back.
- static std::vector<int32_t> ComputeStrides(VideoPixelFormat format,
- const gfx::Size& coded_size);
-
void AllocateMemory(bool zero_initialize_memory);
// Calculates plane size.
diff --git a/chromium/media/base/video_frame_layout.cc b/chromium/media/base/video_frame_layout.cc
index d541d96b8ea..27121ce28e1 100644
--- a/chromium/media/base/video_frame_layout.cc
+++ b/chromium/media/base/video_frame_layout.cc
@@ -4,6 +4,7 @@
#include "media/base/video_frame_layout.h"
+#include <string.h>
#include <numeric>
#include <sstream>
@@ -56,6 +57,7 @@ size_t VideoFrameLayout::NumPlanes(VideoPixelFormat format) {
case PIXEL_FORMAT_NV12:
case PIXEL_FORMAT_NV21:
case PIXEL_FORMAT_MT21:
+ case PIXEL_FORMAT_P016LE:
return 2;
case PIXEL_FORMAT_I420:
case PIXEL_FORMAT_YV12:
@@ -141,13 +143,15 @@ size_t VideoFrameLayout::GetTotalBufferSize() const {
std::ostream& operator<<(std::ostream& ostream,
const VideoFrameLayout::Plane& plane) {
- ostream << "(" << plane.stride << ", " << plane.offset << ")";
+ ostream << "(" << plane.stride << ", " << plane.offset << ", "
+ << plane.modifier << ")";
return ostream;
}
bool VideoFrameLayout::Plane::operator==(
const VideoFrameLayout::Plane& rhs) const {
- return stride == rhs.stride && offset == rhs.offset;
+ return stride == rhs.stride && offset == rhs.offset &&
+ modifier == rhs.modifier;
}
bool VideoFrameLayout::Plane::operator!=(
@@ -157,7 +161,8 @@ bool VideoFrameLayout::Plane::operator!=(
bool VideoFrameLayout::operator==(const VideoFrameLayout& rhs) const {
return format_ == rhs.format_ && coded_size_ == rhs.coded_size_ &&
- planes_ == rhs.planes_ && buffer_sizes_ == rhs.buffer_sizes_;
+ planes_ == rhs.planes_ && buffer_sizes_ == rhs.buffer_sizes_ &&
+ buffer_addr_align_ == rhs.buffer_addr_align_;
}
bool VideoFrameLayout::operator!=(const VideoFrameLayout& rhs) const {
@@ -168,7 +173,8 @@ std::ostream& operator<<(std::ostream& ostream,
const VideoFrameLayout& layout) {
ostream << "VideoFrameLayout(format: " << layout.format()
<< ", coded_size: " << layout.coded_size().ToString()
- << ", planes (stride, offset): " << VectorToString(layout.planes())
+ << ", planes (stride, offset, modifier): "
+ << VectorToString(layout.planes())
<< ", buffer_sizes: " << VectorToString(layout.buffer_sizes()) << ")";
return ostream;
}
diff --git a/chromium/media/base/video_frame_layout.h b/chromium/media/base/video_frame_layout.h
index d69d0bf3776..b8ec34a1b92 100644
--- a/chromium/media/base/video_frame_layout.h
+++ b/chromium/media/base/video_frame_layout.h
@@ -17,6 +17,7 @@
#include "media/base/media_export.h"
#include "media/base/video_types.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/native_pixmap_handle.h"
namespace media {
@@ -38,6 +39,8 @@ class MEDIA_EXPORT VideoFrameLayout {
struct Plane {
Plane() = default;
Plane(int32_t stride, size_t offset) : stride(stride), offset(offset) {}
+ Plane(int32_t stride, size_t offset, uint64_t modifier)
+ : stride(stride), offset(offset), modifier(modifier) {}
bool operator==(const Plane& rhs) const;
bool operator!=(const Plane& rhs) const;
@@ -50,6 +53,11 @@ class MEDIA_EXPORT VideoFrameLayout {
// Offset of a plane, which stands for the offset of a start point of a
// color plane from a buffer fd.
size_t offset = 0;
+
+ // Modifier of a plane. The modifier is retrieved from GBM library. This can
+ // be a different value from kNoModifier only if the VideoFrame is created
+ // by using NativePixmap.
+ uint64_t modifier = gfx::NativePixmapPlane::kNoModifier;
};
// Factory functions.
diff --git a/chromium/media/base/video_frame_layout_unittest.cc b/chromium/media/base/video_frame_layout_unittest.cc
index a1d2b2b3866..3c9309a3358 100644
--- a/chromium/media/base/video_frame_layout_unittest.cc
+++ b/chromium/media/base/video_frame_layout_unittest.cc
@@ -16,6 +16,7 @@
#include "media/base/video_types.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/native_pixmap_handle.h"
namespace media {
@@ -235,10 +236,13 @@ TEST(VideoFrameLayout, ToString) {
std::ostringstream ostream;
ostream << *layout;
+ const std::string kNoModifier =
+ std::to_string(gfx::NativePixmapPlane::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_I420, coded_size: 320x180, "
- "planes (stride, offset): [(384, 0), (192, 0), (192, 0)], "
- "buffer_sizes: [73728, 18432, 18432])");
+ "planes (stride, offset, modifier): [(384, 0, " +
+ kNoModifier + "), (192, 0, " + kNoModifier + "), (192, 0, " +
+ kNoModifier + ")], buffer_sizes: [73728, 18432, 18432])");
}
TEST(VideoFrameLayout, ToStringOneBuffer) {
@@ -253,9 +257,12 @@ TEST(VideoFrameLayout, ToStringOneBuffer) {
std::ostringstream ostream;
ostream << *layout;
+ const std::string kNoModifier =
+ std::to_string(gfx::NativePixmapPlane::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 320x180, "
- "planes (stride, offset): [(384, 100)], buffer_sizes: [122880])");
+ "planes (stride, offset, modifier): [(384, 100, " +
+ kNoModifier + ")], buffer_sizes: [122880])");
}
TEST(VideoFrameLayout, ToStringNoBufferInfo) {
@@ -265,9 +272,13 @@ TEST(VideoFrameLayout, ToStringNoBufferInfo) {
std::ostringstream ostream;
ostream << *layout;
+ const std::string kNoModifier =
+ std::to_string(gfx::NativePixmapPlane::kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 320x180, "
- "planes (stride, offset): [(0, 0), (0, 0)], buffer_sizes: [])");
+ "planes (stride, offset, modifier): [(0, 0, " +
+ kNoModifier + "), (0, 0, " + kNoModifier +
+ ")], buffer_sizes: [])");
}
TEST(VideoFrameLayout, EqualOperator) {
@@ -275,21 +286,30 @@ TEST(VideoFrameLayout, EqualOperator) {
std::vector<int32_t> strides = {384, 192, 192};
std::vector<size_t> offsets = {0, 100, 200};
std::vector<size_t> buffer_sizes = {73728, 18432, 18432};
+ const size_t align = VideoFrameLayout::kBufferAddressAlignment;
+
auto layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- buffer_sizes);
+ buffer_sizes, align);
ASSERT_TRUE(layout.has_value());
auto same_layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- buffer_sizes);
+ buffer_sizes, align);
ASSERT_TRUE(same_layout.has_value());
EXPECT_EQ(*layout, *same_layout);
std::vector<size_t> another_buffer_sizes = {73728};
auto different_layout = VideoFrameLayout::CreateWithPlanes(
PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
- another_buffer_sizes);
+ another_buffer_sizes, align);
+ ASSERT_TRUE(different_layout.has_value());
+ EXPECT_NE(*layout, *different_layout);
+
+ const size_t another_align = 0x1000;
+ different_layout = VideoFrameLayout::CreateWithPlanes(
+ PIXEL_FORMAT_I420, coded_size, CreatePlanes(strides, offsets),
+ buffer_sizes, another_align);
ASSERT_TRUE(different_layout.has_value());
EXPECT_NE(*layout, *different_layout);
}
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 56576fc933c..c0a78ff2b5b 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -11,11 +11,11 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/format_macros.h"
-#include "base/macros.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory.h"
#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
@@ -244,15 +244,15 @@ TEST(VideoFrame, CreateBlackFrame) {
// Test frames themselves.
uint8_t* y_plane = frame->data(VideoFrame::kYPlane);
for (int y = 0; y < frame->coded_size().height(); ++y) {
- EXPECT_EQ(0, memcmp(kExpectedYRow, y_plane, arraysize(kExpectedYRow)));
+ EXPECT_EQ(0, memcmp(kExpectedYRow, y_plane, base::size(kExpectedYRow)));
y_plane += frame->stride(VideoFrame::kYPlane);
}
uint8_t* u_plane = frame->data(VideoFrame::kUPlane);
uint8_t* v_plane = frame->data(VideoFrame::kVPlane);
for (int y = 0; y < frame->coded_size().height() / 2; ++y) {
- EXPECT_EQ(0, memcmp(kExpectedUVRow, u_plane, arraysize(kExpectedUVRow)));
- EXPECT_EQ(0, memcmp(kExpectedUVRow, v_plane, arraysize(kExpectedUVRow)));
+ EXPECT_EQ(0, memcmp(kExpectedUVRow, u_plane, base::size(kExpectedUVRow)));
+ EXPECT_EQ(0, memcmp(kExpectedUVRow, v_plane, base::size(kExpectedUVRow)));
u_plane += frame->stride(VideoFrame::kUPlane);
v_plane += frame->stride(VideoFrame::kVPlane);
}
@@ -629,6 +629,7 @@ TEST(VideoFrame, AllocationSize_OddSize) {
case PIXEL_FORMAT_RGB32:
case PIXEL_FORMAT_ABGR:
case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
EXPECT_EQ(60u, VideoFrame::AllocationSize(format, size))
<< VideoPixelFormatToString(format);
break;
diff --git a/chromium/media/base/video_types.cc b/chromium/media/base/video_types.cc
index 815fb5dca05..4c7a74749b1 100644
--- a/chromium/media/base/video_types.cc
+++ b/chromium/media/base/video_types.cc
@@ -67,6 +67,8 @@ std::string VideoPixelFormatToString(VideoPixelFormat format) {
return "PIXEL_FORMAT_ABGR";
case PIXEL_FORMAT_XBGR:
return "PIXEL_FORMAT_XBGR";
+ case PIXEL_FORMAT_P016LE:
+ return "PIXEL_FORMAT_P016LE";
}
NOTREACHED() << "Invalid VideoPixelFormat provided: " << format;
return "";
@@ -107,6 +109,7 @@ bool IsYuvPlanar(VideoPixelFormat format) {
case PIXEL_FORMAT_YUV420P12:
case PIXEL_FORMAT_YUV422P12:
case PIXEL_FORMAT_YUV444P12:
+ case PIXEL_FORMAT_P016LE:
return true;
case PIXEL_FORMAT_UNKNOWN:
@@ -151,6 +154,7 @@ bool IsOpaque(VideoPixelFormat format) {
case PIXEL_FORMAT_YUV444P12:
case PIXEL_FORMAT_Y16:
case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
return true;
case PIXEL_FORMAT_I420A:
case PIXEL_FORMAT_ARGB:
@@ -197,6 +201,7 @@ size_t BitDepth(VideoPixelFormat format) {
case PIXEL_FORMAT_YUV444P12:
return 12;
case PIXEL_FORMAT_Y16:
+ case PIXEL_FORMAT_P016LE:
return 16;
}
NOTREACHED();
diff --git a/chromium/media/base/video_types.h b/chromium/media/base/video_types.h
index edf07692dd0..8803f605f51 100644
--- a/chromium/media/base/video_types.h
+++ b/chromium/media/base/video_types.h
@@ -72,9 +72,11 @@ enum VideoPixelFormat {
PIXEL_FORMAT_ABGR = 27, // 32bpp RGBA, 1 plane.
PIXEL_FORMAT_XBGR = 28, // 24bpp RGB, 1 plane.
+ PIXEL_FORMAT_P016LE = 29, // 24bpp NV12, 16 bits per channel
+
// Please update UMA histogram enumeration when adding new formats here.
PIXEL_FORMAT_MAX =
- PIXEL_FORMAT_XBGR, // Must always be equal to largest entry logged.
+ PIXEL_FORMAT_P016LE, // Must always be equal to largest entry logged.
};
// Returns the name of a Format as a string.
diff --git a/chromium/media/base/waiting.h b/chromium/media/base/waiting.h
new file mode 100644
index 00000000000..2b0cc47c41f
--- /dev/null
+++ b/chromium/media/base/waiting.h
@@ -0,0 +1,43 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_WAITING_H_
+#define MEDIA_BASE_WAITING_H_
+
+#include "base/callback_forward.h"
+
+namespace media {
+
+// Here "waiting" refers to the state that media pipeline stalls waiting because
+// of some reason, e.g. no decryption key. It could cause Javascript events like
+// "waitingforkey" [1], but not necessarily.
+// Note: this generally does not cause the "waiting" event on HTML5 media
+// elements [2], which is tightly related to the buffering state change (see
+// buffering_state.h).
+// [1] https://www.w3.org/TR/encrypted-media/#dom-evt-waitingforkey
+// [2]
+// https://www.w3.org/TR/html5/semantics-embedded-content.html#eventdef-media-waiting
+
+enum class WaitingReason {
+ // The playback cannot proceed because some decryption key is not available.
+ // This could happen when the license exchange is delayed or failed. The
+ // playback will resume after the decryption key becomes available.
+ kNoDecryptionKey,
+
+ // The playback cannot proceed because the decoder has lost its state, e.g.
+ // information about reference frames. Usually this only happens to hardware
+ // decoders. To recover from this state, reset the decoder and start decoding
+ // from a key frame, which can typically be accomplished by a pipeline seek.
+ kDecoderStateLost,
+
+ // Must be assigned with the last enum value above.
+ kMaxValue = kDecoderStateLost,
+};
+
+// Callback to notify waiting state and the reason.
+using WaitingCB = base::RepeatingCallback<void(WaitingReason)>;
+
+} // namespace media
+
+#endif // MEDIA_BASE_WAITING_H_
diff --git a/chromium/media/base/win/BUILD.gn b/chromium/media/base/win/BUILD.gn
index 1950ff1b44e..d851563429a 100644
--- a/chromium/media/base/win/BUILD.gn
+++ b/chromium/media/base/win/BUILD.gn
@@ -14,8 +14,7 @@ config("delay_load_mf") {
]
}
-jumbo_component("win") {
- output_name = "media_win_util"
+jumbo_component("media_foundation_util") {
defines = [ "MF_INITIALIZER_IMPLEMENTATION" ]
set_sources_assignment_filter([])
sources = [
@@ -44,3 +43,25 @@ jumbo_component("win") {
# MediaFoundation is not available on Windows N, so must be delay loaded.
all_dependent_configs = [ ":delay_load_mf" ]
}
+
+source_set("d3d11") {
+ sources = [
+ "d3d11_create_device_cb.h",
+ ]
+ deps = [
+ "//base",
+ ]
+}
+
+source_set("d3d11_test_support") {
+ testonly = true
+ sources = [
+ "d3d11_mocks.cc",
+ "d3d11_mocks.h",
+ ]
+ deps = [
+ ":d3d11",
+ "//base",
+ "//testing/gmock",
+ ]
+}
diff --git a/chromium/media/gpu/windows/d3d11_create_device_cb.h b/chromium/media/base/win/d3d11_create_device_cb.h
index 5b505298bcf..3f9cf8f956c 100644
--- a/chromium/media/gpu/windows/d3d11_create_device_cb.h
+++ b/chromium/media/base/win/d3d11_create_device_cb.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_GPU_WINDOWS_D3D11_CREATE_DEVICE_CB_H_
-#define MEDIA_GPU_WINDOWS_D3D11_CREATE_DEVICE_CB_H_
+#ifndef MEDIA_BASE_WIN_D3D11_CREATE_DEVICE_CB_H_
+#define MEDIA_BASE_WIN_D3D11_CREATE_DEVICE_CB_H_
#include <d3d11_1.h>
#include <wrl/client.h>
@@ -30,4 +30,4 @@ using D3D11CreateDeviceCB =
ID3D11DeviceContext**)>;
} // namespace media
-#endif // MEDIA_GPU_WINDOWS_D3D11_CREATE_DEVICE_CB_H_
+#endif // MEDIA_BASE_WIN_D3D11_CREATE_DEVICE_CB_H_
diff --git a/chromium/media/gpu/windows/d3d11_mocks.cc b/chromium/media/base/win/d3d11_mocks.cc
index a32f2990b09..a2e34df5c45 100644
--- a/chromium/media/gpu/windows/d3d11_mocks.cc
+++ b/chromium/media/base/win/d3d11_mocks.cc
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/gpu/windows/d3d11_mocks.h"
+#include "media/base/win/d3d11_mocks.h"
+
namespace media {
D3D11CreateDeviceMock::D3D11CreateDeviceMock() = default;
diff --git a/chromium/media/gpu/windows/d3d11_mocks.h b/chromium/media/base/win/d3d11_mocks.h
index 522fd6f5406..eacb856fde8 100644
--- a/chromium/media/gpu/windows/d3d11_mocks.h
+++ b/chromium/media/base/win/d3d11_mocks.h
@@ -1,8 +1,9 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_GPU_WINDOWS_D3D11_MOCKS_H_
-#define MEDIA_GPU_WINDOWS_D3D11_MOCKS_H_
+
+#ifndef MEDIA_BASE_WIN_D3D11_MOCKS_H_
+#define MEDIA_BASE_WIN_D3D11_MOCKS_H_
#include <d3d11.h>
#include <d3d11_1.h>
@@ -11,7 +12,7 @@
#include <wrl/implements.h>
#include "base/win/iunknown_impl.h"
-#include "media/gpu/windows/d3d11_create_device_cb.h"
+#include "media/base/win/d3d11_create_device_cb.h"
#include "testing/gmock/include/gmock/gmock.h"
#define MOCK_STDCALL_METHOD0(Name, Types) \
@@ -44,18 +45,36 @@
#define MOCK_STDCALL_METHOD9(Name, Types) \
MOCK_METHOD9_WITH_CALLTYPE(STDMETHODCALLTYPE, Name, Types)
+// Helper ON_CALL and EXPECT_CALL for Microsoft::WRL::ComPtr, e.g.
+// COM_EXPECT_CALL(foo_, Bar());
+// where |foo_| is ComPtr<D3D11FooMock>.
+#define COM_ON_CALL(obj, call) ON_CALL(*obj.Get(), call)
+#define COM_EXPECT_CALL(obj, call) EXPECT_CALL(*obj.Get(), call)
+
namespace media {
// Use this action when using SetArgPointee with COM pointers.
// e.g.
-// EXPECT_CALL(*device_mock_.Get(), QueryInterface(IID_ID3D11VideoDevice, _))
+// COM_EXPECT_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice, _))
// .WillRepeatedly(DoAll(
-// AddRefAndSetArgPointee<1>(video_device_mock_.Get()), Return(S_OK)));
-ACTION_TEMPLATE(AddRefAndSetArgPointee,
+// SetComPointee<1>(video_device_mock_.Get()), Return(S_OK)));
+ACTION_TEMPLATE(SetComPointee,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(p)) {
+ p->AddRef();
+ *std::get<k>(args) = p;
+}
+
+// Same as above, but returns S_OK for convenience.
+// e.g.
+// COM_EXPECT_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice, _))
+// .WillRepeatedly(SetComPointeeAndReturnOk<1>(video_device_mock_.Get()));
+ACTION_TEMPLATE(SetComPointeeAndReturnOk,
HAS_1_TEMPLATE_PARAMS(int, k),
AND_1_VALUE_PARAMS(p)) {
p->AddRef();
*std::get<k>(args) = p;
+ return S_OK;
}
// Use this function to create a mock so that they are ref-counted correctly.
@@ -1576,4 +1595,4 @@ class D3D11DeviceContextMock : public MockCOMInterface<ID3D11DeviceContext> {
#undef MOCK_STDCALL_METHOD8
#undef MOCK_STDCALL_METHOD9
-#endif // MEDIA_GPU_WINDOWS_D3D11_MOCKS_H_
+#endif // MEDIA_BASE_WIN_D3D11_MOCKS_H_
diff --git a/chromium/media/blink/DEPS b/chromium/media/blink/DEPS
index 06b6b6bf8e1..d7d401da6b0 100644
--- a/chromium/media/blink/DEPS
+++ b/chromium/media/blink/DEPS
@@ -15,9 +15,10 @@ include_rules = [
"+services/network/public/cpp",
"+services/network/public/mojom",
"+services/service_manager/public/cpp",
+ "+third_party/blink/public/common",
+ "+third_party/blink/public/mojom",
"+third_party/blink/public/platform",
"+third_party/blink/public/web",
- "+third_party/blink/public/common",
# media/mojo is not part of "media" target and should not use MEDIA_EXPORT.
"-media/base/media_export.h"
diff --git a/chromium/media/blink/cache_util.cc b/chromium/media/blink/cache_util.cc
index d235c56fbea..97618b30204 100644
--- a/chromium/media/blink/cache_util.cc
+++ b/chromium/media/blink/cache_util.cc
@@ -8,7 +8,7 @@
#include <string>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/time/time.h"
@@ -65,7 +65,7 @@ uint32_t GetReasonsForUncacheability(const WebURLResponse& response) {
TimeDelta::FromSeconds(3600); // Arbitrary value.
const char kMaxAgePrefix[] = "max-age=";
- const size_t kMaxAgePrefixLen = arraysize(kMaxAgePrefix) - 1;
+ const size_t kMaxAgePrefixLen = base::size(kMaxAgePrefix) - 1;
if (cache_control_header.substr(0, kMaxAgePrefixLen) == kMaxAgePrefix) {
int64_t max_age_seconds;
base::StringToInt64(
@@ -101,7 +101,7 @@ base::TimeDelta GetCacheValidUntil(const WebURLResponse& response) {
base::TimeDelta ret = base::TimeDelta::FromDays(30);
const char kMaxAgePrefix[] = "max-age=";
- const size_t kMaxAgePrefixLen = arraysize(kMaxAgePrefix) - 1;
+ const size_t kMaxAgePrefixLen = base::size(kMaxAgePrefix) - 1;
if (cache_control_header.substr(0, kMaxAgePrefixLen) == kMaxAgePrefix) {
int64_t max_age_seconds;
base::StringToInt64(
diff --git a/chromium/media/blink/cache_util_unittest.cc b/chromium/media/blink/cache_util_unittest.cc
index ea9f8398ddc..e7ef430d3b1 100644
--- a/chromium/media/blink/cache_util_unittest.cc
+++ b/chromium/media/blink/cache_util_unittest.cc
@@ -10,7 +10,7 @@
#include <string>
#include "base/format_macros.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@@ -73,7 +73,7 @@ TEST(CacheUtilTest, GetReasonsForUncacheability) {
{WebURLResponse::kHTTPVersion_1_1, 200,
"cache-control: no-cache\ncache-control: no-store", kNoCache | kNoStore},
};
- for (size_t i = 0; i < arraysize(tests); ++i) {
+ for (size_t i = 0; i < base::size(tests); ++i) {
SCOPED_TRACE(base::StringPrintf("case: %" PRIuS
", version: %d, code: %d, headers: %s",
i, tests[i].version, tests[i].status_code,
diff --git a/chromium/media/blink/cdm_session_adapter.h b/chromium/media/blink/cdm_session_adapter.h
index 0cb6cf55862..b5c2c8e2cdd 100644
--- a/chromium/media/blink/cdm_session_adapter.h
+++ b/chromium/media/blink/cdm_session_adapter.h
@@ -10,9 +10,9 @@
#include <map>
#include <memory>
#include <string>
+#include <unordered_map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
@@ -112,8 +112,9 @@ class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
friend class base::RefCounted<CdmSessionAdapter>;
// Session ID to WebContentDecryptionModuleSessionImpl mapping.
- typedef base::hash_map<std::string,
- base::WeakPtr<WebContentDecryptionModuleSessionImpl> >
+ typedef std::unordered_map<
+ std::string,
+ base::WeakPtr<WebContentDecryptionModuleSessionImpl>>
SessionMap;
~CdmSessionAdapter();
diff --git a/chromium/media/blink/key_system_config_selector.cc b/chromium/media/blink/key_system_config_selector.cc
index 27326374598..5f2bf5ccfc6 100644
--- a/chromium/media/blink/key_system_config_selector.cc
+++ b/chromium/media/blink/key_system_config_selector.cc
@@ -15,6 +15,7 @@
#include "media/base/cdm_config.h"
#include "media/base/key_system_names.h"
#include "media/base/key_systems.h"
+#include "media/base/logging_override_if_enabled.h"
#include "media/base/media_permission.h"
#include "media/base/mime_util.h"
#include "media/blink/webmediaplayer_util.h"
diff --git a/chromium/media/blink/lru.h b/chromium/media/blink/lru.h
index 7ad96371839..60ef27df645 100644
--- a/chromium/media/blink/lru.h
+++ b/chromium/media/blink/lru.h
@@ -8,8 +8,8 @@
#include <stddef.h>
#include <list>
+#include <unordered_map>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
namespace media {
@@ -85,7 +85,7 @@ class LRU {
// Maps element values to positions in the list so that we
// can quickly remove elements.
- base::hash_map<T, typename std::list<T>::iterator> pos_;
+ std::unordered_map<T, typename std::list<T>::iterator> pos_;
DISALLOW_COPY_AND_ASSIGN(LRU);
};
diff --git a/chromium/media/blink/multibuffer.h b/chromium/media/blink/multibuffer.h
index be1ea23cb78..987326f1c20 100644
--- a/chromium/media/blink/multibuffer.h
+++ b/chromium/media/blink/multibuffer.h
@@ -12,10 +12,10 @@
#include <map>
#include <memory>
#include <set>
+#include <unordered_map>
#include <vector>
#include "base/callback.h"
-#include "base/containers/hash_tables.h"
#include "base/hash.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
@@ -41,7 +41,7 @@ typedef std::pair<MultiBuffer*, MultiBufferBlockId> MultiBufferGlobalBlockId;
} // namespace media
-namespace BASE_HASH_NAMESPACE {
+namespace std {
template <>
struct hash<media::MultiBufferGlobalBlockId> {
@@ -50,7 +50,7 @@ struct hash<media::MultiBufferGlobalBlockId> {
}
};
-} // namespace BASE_HASH_NAMESPACE
+} // namespace std
namespace media {
@@ -210,7 +210,7 @@ class MEDIA_BLINK_EXPORT MultiBuffer {
// Block numbers can be calculated from byte positions as:
// block_num = byte_pos >> block_size_shift
typedef MultiBufferBlockId BlockId;
- typedef base::hash_map<BlockId, scoped_refptr<DataBuffer>> DataMap;
+ typedef std::unordered_map<BlockId, scoped_refptr<DataBuffer>> DataMap;
// Registers a reader at the given position.
// If the cache does not already contain |pos|, it will activate
diff --git a/chromium/media/blink/multibuffer_data_source.cc b/chromium/media/blink/multibuffer_data_source.cc
index 0c99bb24f4d..3e0e78432e3 100644
--- a/chromium/media/blink/multibuffer_data_source.cc
+++ b/chromium/media/blink/multibuffer_data_source.cc
@@ -129,6 +129,7 @@ MultibufferDataSource::MultibufferDataSource(
loading_(false),
failed_(false),
render_task_runner_(task_runner),
+ url_data_(std::move(url_data_arg)),
stop_signal_received_(false),
media_has_played_(false),
single_origin_(true),
@@ -140,14 +141,13 @@ MultibufferDataSource::MultibufferDataSource(
host_(host),
downloading_cb_(downloading_cb),
weak_factory_(this) {
- url_data_and_loading_state_.SetUrlData(std::move(url_data_arg));
weak_ptr_ = weak_factory_.GetWeakPtr();
DCHECK(host_);
DCHECK(downloading_cb_);
DCHECK(render_task_runner_->BelongsToCurrentThread());
- DCHECK(url_data());
- url_data()->Use();
- url_data()->OnRedirect(
+ DCHECK(url_data_.get());
+ url_data_->Use();
+ url_data_->OnRedirect(
base::Bind(&MultibufferDataSource::OnRedirect, weak_ptr_));
}
@@ -159,8 +159,9 @@ bool MultibufferDataSource::media_has_played() const {
return media_has_played_;
}
-bool MultibufferDataSource::assume_fully_buffered() {
- return !url_data()->url().SchemeIsHTTPOrHTTPS();
+bool MultibufferDataSource::AssumeFullyBuffered() const {
+ DCHECK(url_data_);
+ return !url_data_->url().SchemeIsHTTPOrHTTPS();
}
void MultibufferDataSource::SetReader(MultiBufferReader* reader) {
@@ -174,7 +175,7 @@ void MultibufferDataSource::CreateResourceLoader(int64_t first_byte_position,
DCHECK(render_task_runner_->BelongsToCurrentThread());
SetReader(new MultiBufferReader(
- url_data()->multibuffer(), first_byte_position, last_byte_position,
+ url_data_->multibuffer(), first_byte_position, last_byte_position,
base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_)));
reader_->SetIsClientAudioElement(is_client_audio_element_);
UpdateBufferSizes();
@@ -187,7 +188,7 @@ void MultibufferDataSource::CreateResourceLoader_Locked(
lock_.AssertAcquired();
reader_.reset(new MultiBufferReader(
- url_data()->multibuffer(), first_byte_position, last_byte_position,
+ url_data_->multibuffer(), first_byte_position, last_byte_position,
base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_)));
UpdateBufferSizes();
}
@@ -199,8 +200,6 @@ void MultibufferDataSource::Initialize(const InitializeCB& init_cb) {
init_cb_ = init_cb;
- url_data_and_loading_state_.SetLoadingState(
- UrlData::UrlDataWithLoadingState::LoadingState::kPreload);
CreateResourceLoader(0, kPositionNotSpecified);
// We're not allowed to call Wait() if data is already available.
@@ -238,14 +237,14 @@ void MultibufferDataSource::OnRedirect(
StopLoader();
return;
}
- if (url_data()->url().GetOrigin() != destination->url().GetOrigin()) {
+ if (url_data_->url().GetOrigin() != destination->url().GetOrigin()) {
single_origin_ = false;
}
SetReader(nullptr);
- url_data_and_loading_state_.SetUrlData(std::move(destination));
+ url_data_ = std::move(destination);
- if (url_data()) {
- url_data()->OnRedirect(
+ if (url_data_) {
+ url_data_->OnRedirect(
base::Bind(&MultibufferDataSource::OnRedirect, weak_ptr_));
if (init_cb_) {
@@ -287,11 +286,11 @@ bool MultibufferDataSource::HasSingleOrigin() {
}
bool MultibufferDataSource::IsCorsCrossOrigin() const {
- return url_data()->is_cors_cross_origin();
+ return url_data_->is_cors_cross_origin();
}
UrlData::CorsMode MultibufferDataSource::cors_mode() const {
- return url_data()->cors_mode();
+ return url_data_->cors_mode();
}
void MultibufferDataSource::MediaPlaybackRateChanged(double playback_rate) {
@@ -311,8 +310,6 @@ void MultibufferDataSource::MediaIsPlaying() {
cancel_on_defer_ = false;
// Once we start playing, we need preloading.
preload_ = AUTO;
- url_data_and_loading_state_.SetLoadingState(
- UrlData::UrlDataWithLoadingState::LoadingState::kHasPlayed);
UpdateBufferSizes();
}
@@ -322,6 +319,13 @@ void MultibufferDataSource::Stop() {
{
base::AutoLock auto_lock(lock_);
StopInternal_Locked();
+
+ // Cleanup resources immediately if we're already on the right thread.
+ if (render_task_runner_->BelongsToCurrentThread()) {
+ reader_.reset();
+ url_data_.reset();
+ return;
+ }
}
render_task_runner_->PostTask(FROM_HERE,
@@ -348,8 +352,6 @@ void MultibufferDataSource::SetBitrate(int bitrate) {
void MultibufferDataSource::OnBufferingHaveEnough(bool always_cancel) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
- url_data_and_loading_state_.SetLoadingState(
- UrlData::UrlDataWithLoadingState::LoadingState::kIdle);
if (reader_ && (always_cancel || (preload_ == METADATA &&
!media_has_played_ && !IsStreaming()))) {
cancel_on_defer_ = true;
@@ -367,14 +369,14 @@ void MultibufferDataSource::OnBufferingHaveEnough(bool always_cancel) {
}
}
-int64_t MultibufferDataSource::GetMemoryUsage() const {
+int64_t MultibufferDataSource::GetMemoryUsage() {
// TODO(hubbe): Make more accurate when url_data_ is shared.
- return base::checked_cast<int64_t>(url_data()->CachedSize())
- << url_data()->multibuffer()->block_size_shift();
+ return base::checked_cast<int64_t>(url_data_->CachedSize())
+ << url_data_->multibuffer()->block_size_shift();
}
GURL MultibufferDataSource::GetUrlAfterRedirects() const {
- return url_data()->url();
+ return url_data_->url();
}
void MultibufferDataSource::Read(int64_t position,
@@ -504,7 +506,7 @@ void MultibufferDataSource::SeekTask_Locked() {
if (read_op_)
return;
- url_data()->AddBytesRead(bytes_read_);
+ url_data_->AddBytesRead(bytes_read_);
bytes_read_ = 0;
if (reader_) {
@@ -580,18 +582,18 @@ void MultibufferDataSource::StartCallback() {
// All responses must be successful. Resources that are assumed to be fully
// buffered must have a known content length.
- bool success = reader_ && reader_->Available() > 0 && url_data() &&
- (!assume_fully_buffered() ||
- url_data()->length() != kPositionNotSpecified);
+ bool success =
+ reader_ && reader_->Available() > 0 && url_data_ &&
+ (!AssumeFullyBuffered() || url_data_->length() != kPositionNotSpecified);
if (success) {
{
base::AutoLock auto_lock(lock_);
- total_bytes_ = url_data()->length();
+ total_bytes_ = url_data_->length();
}
streaming_ =
- !assume_fully_buffered() && (total_bytes_ == kPositionNotSpecified ||
- !url_data()->range_supported());
+ !AssumeFullyBuffered() && (total_bytes_ == kPositionNotSpecified ||
+ !url_data_->range_supported());
media_log_->SetDoubleProperty("total_bytes",
static_cast<double>(total_bytes_));
@@ -609,7 +611,7 @@ void MultibufferDataSource::StartCallback() {
if (success) {
if (total_bytes_ != kPositionNotSpecified) {
host_->SetTotalBytes(total_bytes_);
- if (assume_fully_buffered())
+ if (AssumeFullyBuffered())
host_->AddBufferedByteRange(0, total_bytes_);
}
@@ -617,7 +619,7 @@ void MultibufferDataSource::StartCallback() {
// make sure that we update single_origin_ now.
media_log_->SetBooleanProperty("single_origin", single_origin_);
media_log_->SetBooleanProperty("range_header_supported",
- url_data()->range_supported());
+ url_data_->range_supported());
}
render_task_runner_->PostTask(FROM_HERE,
@@ -634,32 +636,28 @@ void MultibufferDataSource::ProgressCallback(int64_t begin, int64_t end) {
DVLOG(1) << __func__ << "(" << begin << ", " << end << ")";
DCHECK(render_task_runner_->BelongsToCurrentThread());
- if (assume_fully_buffered())
- return;
-
base::AutoLock auto_lock(lock_);
+ if (stop_signal_received_)
+ return;
- if (end > begin) {
- // TODO(scherkus): we shouldn't have to lock to signal host(), see
- // http://crbug.com/113712 for details.
- if (stop_signal_received_)
- return;
+ if (AssumeFullyBuffered())
+ return;
+ if (end > begin)
host_->AddBufferedByteRange(begin, end);
- }
- if (buffer_size_update_counter_ > 0) {
+ if (buffer_size_update_counter_ > 0)
buffer_size_update_counter_--;
- } else {
+ else
UpdateBufferSizes();
- }
+
UpdateLoadingState_Locked(false);
}
void MultibufferDataSource::UpdateLoadingState_Locked(bool force_loading) {
DVLOG(1) << __func__;
lock_.AssertAcquired();
- if (assume_fully_buffered())
+ if (AssumeFullyBuffered())
return;
// Update loading state.
bool is_loading = !!reader_ && reader_->IsLoading();
@@ -721,7 +719,7 @@ void MultibufferDataSource::UpdateBufferSizes() {
// Increase buffering slowly at a rate of 10% of data downloaded so
// far, maxing out at the preload size.
int64_t extra_buffer = std::min(
- preload, url_data()->BytesReadFromCache() * kSlowPreloadPercentage / 100);
+ preload, url_data_->BytesReadFromCache() * kSlowPreloadPercentage / 100);
// Add extra buffer to preload.
preload += extra_buffer;
@@ -749,14 +747,14 @@ void MultibufferDataSource::UpdateBufferSizes() {
extra_buffer * 3,
preload_high + pin_backward + extra_buffer);
- if (url_data()->FullyCached() ||
- (url_data()->length() != kPositionNotSpecified &&
- url_data()->length() < kDefaultPinSize)) {
+ if (url_data_->FullyCached() ||
+ (url_data_->length() != kPositionNotSpecified &&
+ url_data_->length() < kDefaultPinSize)) {
// We just make pin_forwards/backwards big enough to encompass the
// whole file regardless of where we are, with some extra margins.
- pin_forward = std::max(pin_forward, url_data()->length() * 2);
- pin_backward = std::max(pin_backward, url_data()->length() * 2);
- buffer_size = url_data()->length();
+ pin_forward = std::max(pin_forward, url_data_->length() * 2);
+ pin_backward = std::max(pin_backward, url_data_->length() * 2);
+ buffer_size = url_data_->length();
}
reader_->SetMaxBuffer(buffer_size);
diff --git a/chromium/media/blink/multibuffer_data_source.h b/chromium/media/blink/multibuffer_data_source.h
index 8a938b772e1..51b3cb4c614 100644
--- a/chromium/media/blink/multibuffer_data_source.h
+++ b/chromium/media/blink/multibuffer_data_source.h
@@ -13,7 +13,6 @@
#include "base/callback.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "media/base/data_source.h"
@@ -93,7 +92,7 @@ class MEDIA_BLINK_EXPORT MultibufferDataSource : public DataSource {
bool media_has_played() const;
// Returns true if the resource is local.
- bool assume_fully_buffered();
+ bool AssumeFullyBuffered() const override;
// Cancels any open network connections once reaching the deferred state. If
// |always_cancel| is false this is done only for preload=metadata, non-
@@ -102,7 +101,7 @@ class MEDIA_BLINK_EXPORT MultibufferDataSource : public DataSource {
// deferred, connections will be immediately closed.
void OnBufferingHaveEnough(bool always_cancel);
- int64_t GetMemoryUsage() const;
+ int64_t GetMemoryUsage() override;
GURL GetUrlAfterRedirects() const;
@@ -123,8 +122,6 @@ class MEDIA_BLINK_EXPORT MultibufferDataSource : public DataSource {
}
protected:
- UrlData* url_data() const { return url_data_and_loading_state_.url_data(); }
-
void OnRedirect(const scoped_refptr<UrlData>& destination);
// A factory method to create a BufferedResourceLoader based on the read
@@ -213,7 +210,7 @@ class MEDIA_BLINK_EXPORT MultibufferDataSource : public DataSource {
const scoped_refptr<base::SingleThreadTaskRunner> render_task_runner_;
// URL of the resource requested.
- UrlData::UrlDataWithLoadingState url_data_and_loading_state_;
+ scoped_refptr<UrlData> url_data_;
// A resource reader for the media resource.
std::unique_ptr<MultiBufferReader> reader_;
diff --git a/chromium/media/blink/multibuffer_data_source_unittest.cc b/chromium/media/blink/multibuffer_data_source_unittest.cc
index 6bf7d48cb99..421ecba8b9a 100644
--- a/chromium/media/blink/multibuffer_data_source_unittest.cc
+++ b/chromium/media/blink/multibuffer_data_source_unittest.cc
@@ -6,12 +6,12 @@
#include <stdint.h>
#include "base/bind.h"
-#include "base/macros.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/scoped_feature_list.h"
-#include "media/base/media_log.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/blink/buffered_data_source_host_impl.h"
@@ -190,13 +190,13 @@ class MockMultibufferDataSource : public MultibufferDataSource {
bool downloading() { return downloading_; }
void set_downloading(bool downloading) { downloading_ = downloading; }
- bool range_supported() { return url_data()->range_supported(); }
+ bool range_supported() { return url_data_->range_supported(); }
void CallSeekTask() { SeekTask(); }
private:
// Whether the resource is downloading or deferred.
bool downloading_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
DISALLOW_COPY_AND_ASSIGN(MockMultibufferDataSource);
};
@@ -445,8 +445,8 @@ class MultibufferDataSourceTest : public testing::Test {
return loader()->current_buffer_size_ * 32768 /* block size */;
}
double data_source_playback_rate() { return data_source_->playback_rate_; }
- bool is_local_source() { return data_source_->assume_fully_buffered(); }
- scoped_refptr<UrlData> url_data() { return data_source_->url_data(); }
+ bool is_local_source() { return data_source_->AssumeFullyBuffered(); }
+ scoped_refptr<UrlData> url_data() { return data_source_->url_data_; }
void set_might_be_reused_from_cache_in_future(bool value) {
url_data()->set_cacheable(value);
}
@@ -704,7 +704,7 @@ TEST_F(MultibufferDataSourceTest,
response_generator_->GeneratePartial206(0, kDataSize - 1);
WebURLResponse response2 =
response_generator_->GeneratePartial206(kDataSize, kDataSize * 2 - 1);
- response2.SetURL(GURL(kHttpDifferentPathUrl));
+ response2.SetCurrentRequestUrl(GURL(kHttpDifferentPathUrl));
// The origin URL of response1 and response2 are same. So no error should
// occur.
ExecuteMixedResponseSuccessTest(response1, response2);
@@ -717,7 +717,7 @@ TEST_F(MultibufferDataSourceTest,
response_generator_->GeneratePartial206(0, kDataSize - 1);
WebURLResponse response2 =
response_generator_->GeneratePartial206(kDataSize, kDataSize * 2 - 1);
- response2.SetURL(GURL(kHttpDifferentOriginUrl));
+ response2.SetCurrentRequestUrl(GURL(kHttpDifferentOriginUrl));
// The origin URL of response1 and response2 are different. So an error should
// occur.
ExecuteMixedResponseFailureTest(response1, response2);
@@ -887,7 +887,7 @@ TEST_F(MultibufferDataSourceTest, StopDuringRead) {
InitializeWith206Response();
uint8_t buffer[256];
- data_source_->Read(kDataSize, arraysize(buffer), buffer,
+ data_source_->Read(kDataSize, base::size(buffer), buffer,
base::Bind(&MultibufferDataSourceTest::ReadCallback,
base::Unretained(this)));
@@ -1820,43 +1820,4 @@ TEST_F(MultibufferDataSourceTest, Http_Seek_Back) {
Stop();
}
-TEST_F(MultibufferDataSourceTest, LoadLimitTest) {
- base::test::ScopedFeatureList feature_list;
- feature_list.InitFromCommandLine(kLimitParallelMediaPreloading.name, "");
-
- StrictMock<MockBufferedDataSourceHost> hosts[7];
- std::vector<std::unique_ptr<MockMultibufferDataSource>> sources;
- for (size_t i = 0; i < base::size(hosts); i++) {
- sources.push_back(std::make_unique<MockMultibufferDataSource>(
- base::ThreadTaskRunnerHandle::Get(),
- url_index_->GetByUrl(
- GURL(std::string(kHttpUrl) + "?" + base::IntToString(i)),
- UrlData::CORS_UNSPECIFIED),
- &hosts[i]));
- sources[i]->SetPreload(preload_);
- sources[i]->Initialize(base::Bind(&MultibufferDataSourceTest::OnInitialize,
- base::Unretained(this)));
- }
- base::RunLoop().RunUntilIdle();
- EXPECT_EQ(1UL, url_index_->load_queue_size());
-}
-
-TEST_F(MultibufferDataSourceTest, LoadLimitTestNoLimit) {
- StrictMock<MockBufferedDataSourceHost> hosts[7];
- std::vector<std::unique_ptr<MockMultibufferDataSource>> sources;
- for (size_t i = 0; i < base::size(hosts); i++) {
- sources.push_back(std::make_unique<MockMultibufferDataSource>(
- base::ThreadTaskRunnerHandle::Get(),
- url_index_->GetByUrl(
- GURL(std::string(kHttpUrl) + "?" + base::IntToString(i)),
- UrlData::CORS_UNSPECIFIED),
- &hosts[i]));
- sources[i]->SetPreload(preload_);
- sources[i]->Initialize(base::Bind(&MultibufferDataSourceTest::OnInitialize,
- base::Unretained(this)));
- }
- base::RunLoop().RunUntilIdle();
- EXPECT_EQ(0UL, url_index_->load_queue_size());
-}
-
} // namespace media
diff --git a/chromium/media/blink/resource_multibuffer_data_provider.cc b/chromium/media/blink/resource_multibuffer_data_provider.cc
index 01c6bafb5c5..4653b6cfb8d 100644
--- a/chromium/media/blink/resource_multibuffer_data_provider.cc
+++ b/chromium/media/blink/resource_multibuffer_data_provider.cc
@@ -80,11 +80,11 @@ void ResourceMultiBufferDataProvider::Start() {
}
// Prepare the request.
- auto request = std::make_unique<WebURLRequest>(url_data_->url());
- request->SetRequestContext(is_client_audio_element_
- ? blink::mojom::RequestContextType::AUDIO
- : blink::mojom::RequestContextType::VIDEO);
- request->SetHTTPHeaderField(
+ WebURLRequest request(url_data_->url());
+ request.SetRequestContext(is_client_audio_element_
+ ? blink::mojom::RequestContextType::AUDIO
+ : blink::mojom::RequestContextType::VIDEO);
+ request.SetHTTPHeaderField(
WebString::FromUTF8(net::HttpRequestHeaders::kRange),
WebString::FromUTF8(
net::HttpByteRange::RightUnbounded(byte_pos()).GetHeaderValue()));
@@ -94,8 +94,8 @@ void ResourceMultiBufferDataProvider::Start() {
// This lets the data reduction proxy know that we don't have anything
// previously cached data for this resource. We can only send it if this is
// the first request for this resource.
- request->SetHTTPHeaderField(WebString::FromUTF8("chrome-proxy"),
- WebString::FromUTF8("frfr"));
+ request.SetHTTPHeaderField(WebString::FromUTF8("chrome-proxy"),
+ WebString::FromUTF8("frfr"));
}
// We would like to send an if-match header with the request to
@@ -105,7 +105,7 @@ void ResourceMultiBufferDataProvider::Start() {
// along the way. See crbug/504194 and crbug/689989 for more information.
// Disable compression, compression for audio/video doesn't make sense...
- request->SetHTTPHeaderField(
+ request.SetHTTPHeaderField(
WebString::FromUTF8(net::HttpRequestHeaders::kAcceptEncoding),
WebString::FromUTF8("identity;q=1, *;q=0"));
@@ -117,24 +117,16 @@ void ResourceMultiBufferDataProvider::Start() {
options.preflight_policy =
network::mojom::CorsPreflightPolicy::kPreventPreflight;
- request->SetFetchRequestMode(network::mojom::FetchRequestMode::kCors);
+ request.SetFetchRequestMode(network::mojom::FetchRequestMode::kCors);
if (url_data_->cors_mode() != UrlData::CORS_USE_CREDENTIALS) {
- request->SetFetchCredentialsMode(
+ request.SetFetchCredentialsMode(
network::mojom::FetchCredentialsMode::kSameOrigin);
}
}
- url_data_->WaitToLoad(
- base::BindOnce(&ResourceMultiBufferDataProvider::StartLoading,
- weak_factory_.GetWeakPtr(), std::move(request), options));
-}
-
-void ResourceMultiBufferDataProvider::StartLoading(
- std::unique_ptr<WebURLRequest> request,
- const blink::WebAssociatedURLLoaderOptions& options) {
active_loader_ =
url_data_->url_index()->fetch_context()->CreateUrlLoader(options);
- active_loader_->LoadAsynchronously(*request, this);
+ active_loader_->LoadAsynchronously(request, this);
}
/////////////////////////////////////////////////////////////////////////////
@@ -365,11 +357,19 @@ void ResourceMultiBufferDataProvider::DidReceiveResponse(
return; // "this" may be deleted now.
}
+ // Get the response URL since it can differ from the request URL when a
+ // service worker provided the response. Normally we would just use
+ // ResponseUrl(), but ResourceMultibufferDataProvider disallows mixing
+ // constructed responses (new Response()) and native server responses, even if
+ // they have the same response URL.
+ GURL response_url;
+ if (!response.WasFetchedViaServiceWorker() ||
+ response.HasUrlListViaServiceWorker()) {
+ response_url = response.ResponseUrl();
+ }
+
// This test is vital for security!
- const GURL& original_url = response.WasFetchedViaServiceWorker()
- ? response.OriginalURLViaServiceWorker()
- : response.Url();
- if (!url_data_->ValidateDataOrigin(original_url.GetOrigin())) {
+ if (!url_data_->ValidateDataOrigin(response_url.GetOrigin())) {
active_loader_.reset();
url_data_->Fail();
return; // "this" may be deleted now.
@@ -421,7 +421,8 @@ void ResourceMultiBufferDataProvider::DidReceiveData(const char* data,
// Beware, this object might be deleted here.
}
-void ResourceMultiBufferDataProvider::DidDownloadData(int dataLength) {
+void ResourceMultiBufferDataProvider::DidDownloadData(
+ unsigned long long dataLength) {
NOTIMPLEMENTED();
}
diff --git a/chromium/media/blink/resource_multibuffer_data_provider.h b/chromium/media/blink/resource_multibuffer_data_provider.h
index 484f79a9ae2..8da76d2e1c2 100644
--- a/chromium/media/blink/resource_multibuffer_data_provider.h
+++ b/chromium/media/blink/resource_multibuffer_data_provider.h
@@ -22,7 +22,6 @@
namespace blink {
class WebAssociatedURLLoader;
-struct WebAssociatedURLLoaderOptions;
} // namespace blink
namespace media {
@@ -56,7 +55,7 @@ class MEDIA_BLINK_EXPORT ResourceMultiBufferDataProvider
void DidSendData(unsigned long long bytesSent,
unsigned long long totalBytesToBeSent) override;
void DidReceiveResponse(const blink::WebURLResponse& response) override;
- void DidDownloadData(int data_length) override;
+ void DidDownloadData(unsigned long long data_length) override;
void DidReceiveData(const char* data, int data_length) override;
void DidReceiveCachedMetadata(const char* data, int dataLength) override;
void DidFinishLoading() override;
@@ -71,11 +70,6 @@ class MEDIA_BLINK_EXPORT ResourceMultiBufferDataProvider
// Callback used when we're asked to fetch data after the end of the file.
void Terminate();
- // At the end of Start(), we potentially wait for other loaders to
- // finish, when they do a callback calls this function.
- void StartLoading(std::unique_ptr<blink::WebURLRequest> request,
- const blink::WebAssociatedURLLoaderOptions& options);
-
// Parse a Content-Range header into its component pieces and return true if
// each of the expected elements was found & parsed correctly.
// |*instance_size| may be set to kPositionNotSpecified if the range ends in
diff --git a/chromium/media/blink/run_all_unittests.cc b/chromium/media/blink/run_all_unittests.cc
index b2653b39a7b..1e61f7d4cbf 100644
--- a/chromium/media/blink/run_all_unittests.cc
+++ b/chromium/media/blink/run_all_unittests.cc
@@ -57,37 +57,53 @@ class BlinkPlatformWithTaskEnvironment : public blink::Platform {
DISALLOW_COPY_AND_ASSIGN(BlinkPlatformWithTaskEnvironment);
};
-static int RunTests(base::TestSuite* test_suite) {
+class MediaBlinkTestSuite : public base::TestSuite {
+ public:
+ MediaBlinkTestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {}
+
+ private:
+ void Initialize() override {
+ base::TestSuite::Initialize();
+
#if defined(OS_ANDROID)
- if (media::MediaCodecUtil::IsMediaCodecAvailable())
- media::EnablePlatformDecoderSupport();
+ if (media::MediaCodecUtil::IsMediaCodecAvailable())
+ media::EnablePlatformDecoderSupport();
#endif
- // Run this here instead of main() to ensure an AtExitManager is already
- // present.
- media::InitializeMediaLibrary();
+ // Run this here instead of main() to ensure an AtExitManager is already
+ // present.
+ media::InitializeMediaLibrary();
#if defined(V8_USE_EXTERNAL_STARTUP_DATA)
- gin::V8Initializer::LoadV8Snapshot(kSnapshotType);
- gin::V8Initializer::LoadV8Natives();
+ gin::V8Initializer::LoadV8Snapshot(kSnapshotType);
+ gin::V8Initializer::LoadV8Natives();
#endif
#if !defined(OS_IOS)
- // Initialize mojo firstly to enable Blink initialization to use it.
- mojo::core::Init();
+ // Initialize mojo firstly to enable Blink initialization to use it.
+ mojo::core::Init();
#endif
- BlinkPlatformWithTaskEnvironment platform_;
- service_manager::BinderRegistry empty_registry;
- blink::Initialize(&platform_, &empty_registry,
- platform_.GetMainThreadScheduler());
+ platform_ = std::make_unique<BlinkPlatformWithTaskEnvironment>();
+ blink::Initialize(platform_.get(), &empty_registry_,
+ platform_->GetMainThreadScheduler());
+ }
+
+ void Shutdown() override {
+ platform_.reset();
+ base::TestSuite::Shutdown();
+ }
- return test_suite->Run();
-}
+ std::unique_ptr<BlinkPlatformWithTaskEnvironment> platform_;
+ service_manager::BinderRegistry empty_registry_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaBlinkTestSuite);
+};
int main(int argc, char** argv) {
- base::TestSuite test_suite(argc, argv);
+ MediaBlinkTestSuite test_suite(argc, argv);
return base::LaunchUnitTests(
argc, argv,
- base::BindRepeating(&RunTests, base::Unretained(&test_suite)));
+ base::BindRepeating(&MediaBlinkTestSuite::Run,
+ base::Unretained(&test_suite)));
}
diff --git a/chromium/media/blink/url_index.cc b/chromium/media/blink/url_index.cc
index dc9fd338762..d3c381a7fea 100644
--- a/chromium/media/blink/url_index.cc
+++ b/chromium/media/blink/url_index.cc
@@ -22,18 +22,6 @@ namespace media {
const int kBlockSizeShift = 15; // 1<<15 == 32kb
const int kUrlMappingTimeoutSeconds = 300;
-// Max number of resource preloading in parallel.
-const size_t kMaxParallelPreload = 6;
-
-namespace {
-// Helper function, return max parallel preloads.
-size_t GetMaxParallelPreload() {
- if (base::FeatureList::IsEnabled(media::kLimitParallelMediaPreloading))
- return kMaxParallelPreload;
- return std::numeric_limits<size_t>::max();
-}
-}; // namespace
-
ResourceMultiBuffer::ResourceMultiBuffer(UrlData* url_data, int block_shift)
: MultiBuffer(block_shift, url_data->url_index_->lru_),
url_data_(url_data) {}
@@ -73,8 +61,6 @@ UrlData::~UrlData() {
BytesReadFromCache() >> 10);
UMA_HISTOGRAM_MEMORY_KB("Media.BytesReadFromNetwork",
BytesReadFromNetwork() >> 10);
- DCHECK_EQ(0, playing_);
- DCHECK_EQ(0, preloading_);
}
std::pair<GURL, UrlData::CorsMode> UrlData::key() const {
@@ -242,132 +228,6 @@ size_t UrlData::CachedSize() {
return multibuffer()->map().size();
}
-UrlData::UrlDataWithLoadingState::UrlDataWithLoadingState() {}
-UrlData::UrlDataWithLoadingState::~UrlDataWithLoadingState() {
- SetLoadingState(LoadingState::kIdle);
-}
-
-void UrlData::UrlDataWithLoadingState::SetLoadingState(
- LoadingState loading_state) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- if (!url_data_)
- return;
- // Note that we increase loading state first and decrease afterwards to avoid
- // having the loading/playing counts go to zero temporarily.
- url_data_->IncreaseLoadersInState(loading_state);
- url_data_->DecreaseLoadersInState(loading_state_);
- loading_state_ = loading_state;
-}
-
-void UrlData::UrlDataWithLoadingState::SetUrlData(
- scoped_refptr<UrlData> url_data) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- // Note that we increase loading state first and decrease afterwards to avoid
- // having the loading/playing counts go to zero temporarily.
- if (url_data)
- url_data->IncreaseLoadersInState(loading_state_);
- if (url_data_)
- url_data_->DecreaseLoadersInState(loading_state_);
- url_data_ = std::move(url_data);
-}
-
-bool UrlData::IsPreloading() const {
- return preloading_ > 0 && playing_ == 0;
-}
-
-void UrlData::IncreaseLoadersInState(
- UrlDataWithLoadingState::LoadingState state) {
- switch (state) {
- case UrlDataWithLoadingState::LoadingState::kIdle:
- break;
- case UrlDataWithLoadingState::LoadingState::kPreload:
- preloading_++;
- break;
- case UrlDataWithLoadingState::LoadingState::kHasPlayed:
- playing_++;
- if (playing_ == 1)
- url_index_->RemoveLoading(this);
- break;
- }
-}
-
-void UrlData::DecreaseLoadersInState(
- UrlDataWithLoadingState::LoadingState state) {
- switch (state) {
- case UrlDataWithLoadingState::LoadingState::kIdle:
- return;
- case UrlDataWithLoadingState::LoadingState::kPreload:
- preloading_--;
- DCHECK_GE(preloading_, 0);
- break;
- case UrlDataWithLoadingState::LoadingState::kHasPlayed:
- playing_--;
- DCHECK_GE(playing_, 0);
- break;
- }
- if (preloading_ == 0 && playing_ == 0)
- url_index_->RemoveLoading(this);
-}
-
-void UrlData::WaitToLoad(base::OnceClosure cb) {
- // We only limit and queue preloading requests.
- if (!IsPreloading()) {
- std::move(cb).Run();
- } else {
- waiting_load_callbacks_.emplace_back(std::move(cb));
- if (waiting_load_callbacks_.size() == 1)
- url_index_->WaitToLoad(this, true);
- }
-}
-
-void UrlData::LoadNow(bool immediate) {
- // Move the callbacks into local variables in case
- // any of the callbacks decide to call WaitToLoad().
- std::vector<base::OnceClosure> waiting_load_callbacks;
- std::swap(waiting_load_callbacks, waiting_load_callbacks_);
- for (auto& i : waiting_load_callbacks) {
- if (immediate) {
- std::move(i).Run();
- } else {
- base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, std::move(i));
- }
- }
-}
-
-void UrlIndex::WaitToLoad(UrlData* url_data, bool immediate) {
- if (loading_.find(url_data) != loading_.end()) {
- // Already loading
- url_data->LoadNow(immediate);
- return;
- }
- if (loading_.size() < GetMaxParallelPreload()) {
- loading_.insert(url_data);
- url_data->LoadNow(immediate);
- return;
- }
- loading_queue_.push_back(url_data);
-}
-
-void UrlIndex::RemoveLoading(UrlData* url_data) {
- auto i = loading_.find(url_data);
- if (i == loading_.end())
- return;
- loading_.erase(i);
- while (loading_.size() < GetMaxParallelPreload() && !loading_queue_.empty()) {
- auto url_data = loading_queue_.front();
- loading_queue_.pop_front();
- if (url_data->IsPreloading()) {
- WaitToLoad(url_data.get(), false);
- } else {
- url_data->LoadNow(false);
- }
- }
-}
-
-bool UrlIndex::HasReachedMaxParallelPreload() const {
- return loading_.size() >= kMaxParallelPreload;
-}
-
UrlIndex::UrlIndex(ResourceFetchContext* fetch_context)
: UrlIndex(fetch_context, kBlockSizeShift) {}
@@ -394,8 +254,6 @@ void UrlIndex::RemoveUrlData(const scoped_refptr<UrlData>& url_data) {
auto i = indexed_data_.find(url_data->key());
if (i != indexed_data_.end() && i->second == url_data)
indexed_data_.erase(i);
-
- RemoveLoading(url_data.get());
}
scoped_refptr<UrlData> UrlIndex::GetByUrl(const GURL& gurl,
diff --git a/chromium/media/blink/url_index.h b/chromium/media/blink/url_index.h
index 4b0ede71410..1c933968b7e 100644
--- a/chromium/media/blink/url_index.h
+++ b/chromium/media/blink/url_index.h
@@ -61,31 +61,6 @@ class MEDIA_BLINK_EXPORT UrlData : public base::RefCounted<UrlData> {
enum CorsMode { CORS_UNSPECIFIED, CORS_ANONYMOUS, CORS_USE_CREDENTIALS };
typedef std::pair<GURL, CorsMode> KeyType;
- // UrlData keeps track of how many clients are preloading or
- // playing from this resource. This class encapsulates the
- // adding and removing of counts to guarantee that the counts
- // are accurate. Clients who wish to change the loading
- // counts need to have one of these, assign an UrlData to it
- // and call SetLoadingState() to releflect what the client is
- // currently doing.
- class MEDIA_BLINK_EXPORT UrlDataWithLoadingState {
- public:
- UrlDataWithLoadingState();
- ~UrlDataWithLoadingState();
-
- enum class LoadingState { kIdle, kPreload, kHasPlayed };
-
- void SetLoadingState(LoadingState loading_state);
- void SetUrlData(scoped_refptr<UrlData> url_data);
- UrlData* url_data() const { return url_data_.get(); }
-
- private:
- LoadingState loading_state_ = LoadingState::kIdle;
- scoped_refptr<UrlData> url_data_;
- SEQUENCE_CHECKER(sequence_checker_);
- DISALLOW_COPY_AND_ASSIGN(UrlDataWithLoadingState);
- };
-
// Accessors
const GURL& url() const { return url_; }
@@ -183,10 +158,6 @@ class MEDIA_BLINK_EXPORT UrlData : public base::RefCounted<UrlData> {
void AddBytesReadFromNetwork(int64_t b);
int64_t BytesReadFromNetwork() const { return bytes_read_from_network_; }
- // Call |cb| when it's ok to start preloading an URL.
- // Note that |cb| may be called directly from inside this function.
- void WaitToLoad(base::OnceClosure cb);
-
protected:
UrlData(const GURL& url, CorsMode cors_mode, UrlIndex* url_index);
virtual ~UrlData();
@@ -197,26 +168,9 @@ class MEDIA_BLINK_EXPORT UrlData : public base::RefCounted<UrlData> {
friend class UrlIndexTest;
friend class base::RefCounted<UrlData>;
- // Returns true if one or more clients are prelaoding and no clients
- // are currently playing.
- bool IsPreloading() const;
-
- // Called by url_index when it's time to fire callbacks sent to WaitToLoad().
- // |immediate| is true if this call was not delayed in any way.
- void LoadNow(bool immediate);
-
void OnEmpty();
void MergeFrom(const scoped_refptr<UrlData>& other);
- // These two are called from UrlDataWithLoadingState to
- // increase and decrease |playing_| and |preloading_|.
- // They will also call the UrlIndex and and tell it to
- // de-queue other resources waiting to load as needed.
- void IncreaseLoadersInState(
- UrlDataWithLoadingState::LoadingState loading_state);
- void DecreaseLoadersInState(
- UrlDataWithLoadingState::LoadingState loading_state);
-
// Url we represent, note that there may be multiple UrlData for
// the same url.
const GURL url_;
@@ -272,12 +226,6 @@ class MEDIA_BLINK_EXPORT UrlData : public base::RefCounted<UrlData> {
std::vector<BytesReceivedCB> bytes_received_callbacks_;
- // Number of data sources that are currently preloading this url.
- int preloading_ = 0;
-
- // Number of data sources that are playing this url.
- int playing_ = 0;
-
std::vector<base::OnceClosure> waiting_load_callbacks_;
base::ThreadChecker thread_checker_;
@@ -331,16 +279,6 @@ class MEDIA_BLINK_EXPORT UrlIndex {
friend class UrlIndexTest;
void RemoveUrlData(const scoped_refptr<UrlData>& url_data);
- // Call url_data->LoadNow() when it's ok to start preloading.
- // Note that LoadNow may be called immediately.
- // |immediate| shold be true if this was called directly from
- // UrlData::WaitToLoad.
- void WaitToLoad(UrlData* url_data, bool immediate);
-
- // Let us know that |url_data| is done preloading. If other resources
- // are waiting, we will let one of them know it's ok to load now.
- void RemoveLoading(UrlData* url_data);
-
// Virtual so we can override it in tests.
virtual scoped_refptr<UrlData> NewUrlData(const GURL& url,
UrlData::CorsMode cors_mode);
@@ -357,7 +295,6 @@ class MEDIA_BLINK_EXPORT UrlIndex {
// Currently only changed for testing purposes.
const int block_shift_;
- std::set<UrlData*> loading_;
std::deque<scoped_refptr<UrlData>> loading_queue_;
base::MemoryPressureListener memory_pressure_listener_;
diff --git a/chromium/media/blink/url_index_unittest.cc b/chromium/media/blink/url_index_unittest.cc
index 01d53a54a82..1dc131a2348 100644
--- a/chromium/media/blink/url_index_unittest.cc
+++ b/chromium/media/blink/url_index_unittest.cc
@@ -30,15 +30,6 @@ class UrlIndexTest : public testing::Test {
return ret;
}
- void AddToLoadQueue(UrlData* url_data, base::OnceClosure cb) {
- url_data->waiting_load_callbacks_.emplace_back(std::move(cb));
- url_index_.loading_queue_.push_back(url_data);
- }
-
- void AddToLoading(UrlData* url_data) {
- url_index_.loading_.insert(url_data);
- }
-
UrlIndex url_index_;
};
@@ -166,30 +157,4 @@ TEST_F(UrlIndexTest, TryInsert) {
EXPECT_EQ(b, GetByUrl(url, UrlData::CORS_UNSPECIFIED));
}
-namespace {
-void SetBoolWhenCalled(bool* b) {
- *b = true;
-}
-}; // namespace
-
-TEST_F(UrlIndexTest, SetLoadingState) {
- bool called = false;
- GURL url_a("http://foo.bar.com");
- scoped_refptr<UrlData> a = GetByUrl(url_a, UrlData::CORS_UNSPECIFIED);
- AddToLoadQueue(a.get(), base::BindOnce(&SetBoolWhenCalled, &called));
- UrlData::UrlDataWithLoadingState url_data_with_loading_state;
- url_data_with_loading_state.SetUrlData(a);
- base::RunLoop().RunUntilIdle();
- EXPECT_FALSE(called);
- url_data_with_loading_state.SetLoadingState(
- UrlData::UrlDataWithLoadingState::LoadingState::kPreload);
- AddToLoading(a.get());
- base::RunLoop().RunUntilIdle();
- EXPECT_FALSE(called);
- url_data_with_loading_state.SetLoadingState(
- UrlData::UrlDataWithLoadingState::LoadingState::kHasPlayed);
- base::RunLoop().RunUntilIdle();
- EXPECT_TRUE(called);
-}
-
} // namespace media
diff --git a/chromium/media/blink/video_frame_compositor.cc b/chromium/media/blink/video_frame_compositor.cc
index 213e7c359ff..e2782a00505 100644
--- a/chromium/media/blink/video_frame_compositor.cc
+++ b/chromium/media/blink/video_frame_compositor.cc
@@ -28,30 +28,20 @@ VideoFrameCompositor::VideoFrameCompositor(
std::unique_ptr<blink::WebVideoFrameSubmitter> submitter)
: task_runner_(task_runner),
tick_clock_(base::DefaultTickClock::GetInstance()),
- background_rendering_enabled_(true),
background_rendering_timer_(
FROM_HERE,
base::TimeDelta::FromMilliseconds(kBackgroundRenderingTimeoutMs),
base::Bind(&VideoFrameCompositor::BackgroundRender,
base::Unretained(this))),
- client_(nullptr),
- rendering_(false),
- rendered_last_frame_(false),
- is_background_rendering_(false),
- new_background_frame_(false),
- // Assume 60Hz before the first UpdateCurrentFrame() call.
- last_interval_(base::TimeDelta::FromSecondsD(1.0 / 60)),
- callback_(nullptr),
submitter_(std::move(submitter)),
weak_ptr_factory_(this) {
- background_rendering_timer_.SetTaskRunner(task_runner_);
- if (submitter_.get()) {
+ if (submitter_) {
task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoFrameCompositor::InitializeSubmitter,
weak_ptr_factory_.GetWeakPtr()));
- update_submission_state_callback_ = media::BindToLoop(
+ update_submission_state_callback_ = BindToLoop(
task_runner_,
- base::BindRepeating(&VideoFrameCompositor::UpdateSubmissionState,
+ base::BindRepeating(&VideoFrameCompositor::SetIsSurfaceVisible,
weak_ptr_factory_.GetWeakPtr()));
}
}
@@ -61,9 +51,9 @@ VideoFrameCompositor::GetUpdateSubmissionStateCallback() {
return update_submission_state_callback_;
}
-void VideoFrameCompositor::UpdateSubmissionState(bool is_visible) {
+void VideoFrameCompositor::SetIsSurfaceVisible(bool is_visible) {
DCHECK(task_runner_->BelongsToCurrentThread());
- submitter_->UpdateSubmissionState(is_visible);
+ submitter_->SetIsSurfaceVisible(is_visible);
}
void VideoFrameCompositor::InitializeSubmitter() {
@@ -82,10 +72,8 @@ VideoFrameCompositor::~VideoFrameCompositor() {
void VideoFrameCompositor::EnableSubmission(
const viz::SurfaceId& id,
base::TimeTicks local_surface_id_allocation_time,
- media::VideoRotation rotation,
- bool force_submit,
- bool is_opaque,
- blink::WebFrameSinkDestroyedCallback frame_sink_destroyed_callback) {
+ VideoRotation rotation,
+ bool force_submit) {
DCHECK(task_runner_->BelongsToCurrentThread());
// If we're switching to |submitter_| from some other client, then tell it.
@@ -94,9 +82,7 @@ void VideoFrameCompositor::EnableSubmission(
submitter_->SetRotation(rotation);
submitter_->SetForceSubmit(force_submit);
- submitter_->SetIsOpaque(is_opaque);
- submitter_->EnableSubmission(id, local_surface_id_allocation_time,
- std::move(frame_sink_destroyed_callback));
+ submitter_->EnableSubmission(id, local_surface_id_allocation_time);
client_ = submitter_.get();
if (rendering_)
client_->StartRendering();
@@ -287,6 +273,22 @@ bool VideoFrameCompositor::ProcessNewFrame(
return true;
}
+void VideoFrameCompositor::UpdateRotation(VideoRotation rotation) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ submitter_->SetRotation(rotation);
+}
+
+void VideoFrameCompositor::SetIsPageVisible(bool is_visible) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (submitter_)
+ submitter_->SetIsPageVisible(is_visible);
+}
+
+void VideoFrameCompositor::SetForceSubmit(bool force_submit) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ submitter_->SetForceSubmit(force_submit);
+}
+
void VideoFrameCompositor::BackgroundRender() {
DCHECK(task_runner_->BelongsToCurrentThread());
const base::TimeTicks now = tick_clock_->NowTicks();
@@ -337,22 +339,5 @@ bool VideoFrameCompositor::CallRender(base::TimeTicks deadline_min,
return new_frame || had_new_background_frame;
}
-void VideoFrameCompositor::UpdateRotation(media::VideoRotation rotation) {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- submitter_->SetRotation(rotation);
-}
-
-void VideoFrameCompositor::SetForceSubmit(bool force_submit) {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- submitter_->SetForceSubmit(force_submit);
-}
-
-void VideoFrameCompositor::UpdateIsOpaque(bool is_opaque) {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
- submitter_->SetIsOpaque(is_opaque);
-}
} // namespace media
diff --git a/chromium/media/blink/video_frame_compositor.h b/chromium/media/blink/video_frame_compositor.h
index 287d7583a5b..7626381433f 100644
--- a/chromium/media/blink/video_frame_compositor.h
+++ b/chromium/media/blink/video_frame_compositor.h
@@ -81,10 +81,8 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
virtual void EnableSubmission(
const viz::SurfaceId& id,
base::TimeTicks local_surface_id_allocation_time,
- media::VideoRotation rotation,
- bool force_submit,
- bool is_opaque,
- blink::WebFrameSinkDestroyedCallback frame_sink_destroyed_callback);
+ VideoRotation rotation,
+ bool force_submit);
// cc::VideoFrameProvider implementation. These methods must be called on the
// |task_runner_|.
@@ -128,13 +126,13 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
virtual void SetOnNewProcessedFrameCallback(OnNewProcessedFrameCB cb);
// Updates the rotation information for frames given to |submitter_|.
- void UpdateRotation(media::VideoRotation rotation);
+ void UpdateRotation(VideoRotation rotation);
- // Notifies the |submitter_| that the frames must be submitted.
- void SetForceSubmit(bool);
+ // Should be called when page visibility changes. Notifies |submitter_|.
+ virtual void SetIsPageVisible(bool is_visible);
- // Updates the opacity inforamtion for frames given to |submitter_|.
- void UpdateIsOpaque(bool);
+ // Notifies the |submitter_| that the frames must be submitted.
+ void SetForceSubmit(bool force_submit);
void set_tick_clock_for_testing(const base::TickClock* tick_clock) {
tick_clock_ = tick_clock;
@@ -157,12 +155,12 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
// TracingCategory name for |auto_open_close_|.
static constexpr const char kTracingCategory[] = "media,rail";
- // Ran on the |task_runner_| to initalize |submitter_|;
+ // Ran on the |task_runner_| to initialize |submitter_|;
void InitializeSubmitter();
- // Signals the VideoFrameSubmitter to stop submitting frames. |is_visible|
- // indicates whether or not the consumer of the frames is (probably) visible.
- void UpdateSubmissionState(bool is_visible);
+ // Signals the VideoFrameSubmitter to stop submitting frames. Sets whether the
+ // video surface is visible within the view port.
+ void SetIsSurfaceVisible(bool is_visible);
// Indicates whether the endpoint for the VideoFrame exists.
bool IsClientSinkAvailable();
@@ -197,7 +195,7 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
const base::TickClock* tick_clock_;
// Allows tests to disable the background rendering task.
- bool background_rendering_enabled_;
+ bool background_rendering_enabled_ = true;
// Manages UpdateCurrentFrame() callbacks if |client_| has stopped sending
// them for various reasons. Runs on |task_runner_| and is reset
@@ -205,12 +203,15 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
base::RetainingOneShotTimer background_rendering_timer_;
// These values are only set and read on the compositor thread.
- cc::VideoFrameProvider::Client* client_;
- bool rendering_;
- bool rendered_last_frame_;
- bool is_background_rendering_;
- bool new_background_frame_;
- base::TimeDelta last_interval_;
+ cc::VideoFrameProvider::Client* client_ = nullptr;
+ bool rendering_ = false;
+ bool rendered_last_frame_ = false;
+ bool is_background_rendering_ = false;
+ bool new_background_frame_ = false;
+
+ // Assume 60Hz before the first UpdateCurrentFrame() call.
+ base::TimeDelta last_interval_ = base::TimeDelta::FromSecondsD(1.0 / 60);
+
base::TimeTicks last_background_render_;
OnNewProcessedFrameCB new_processed_frame_cb_;
cc::UpdateSubmissionStateCB update_submission_state_callback_;
@@ -222,7 +223,8 @@ class MEDIA_BLINK_EXPORT VideoFrameCompositor : public VideoRendererSink,
// These values are updated and read from the media and compositor threads.
base::Lock callback_lock_;
- VideoRendererSink::RenderCallback* callback_ GUARDED_BY(callback_lock_);
+ VideoRendererSink::RenderCallback* callback_ GUARDED_BY(callback_lock_) =
+ nullptr;
// AutoOpenCloseEvent for begin/end events.
std::unique_ptr<base::trace_event::AutoOpenCloseEvent<kTracingCategory>>
diff --git a/chromium/media/blink/video_frame_compositor_unittest.cc b/chromium/media/blink/video_frame_compositor_unittest.cc
index 003e9b0a5a6..a867eca9fc1 100644
--- a/chromium/media/blink/video_frame_compositor_unittest.cc
+++ b/chromium/media/blink/video_frame_compositor_unittest.cc
@@ -28,17 +28,14 @@ class MockWebVideoFrameSubmitter : public blink::WebVideoFrameSubmitter {
public:
// blink::WebVideoFrameSubmitter implementation.
void StopUsingProvider() override {}
- MOCK_METHOD3(EnableSubmission,
- void(viz::SurfaceId,
- base::TimeTicks,
- blink::WebFrameSinkDestroyedCallback));
+ MOCK_METHOD2(EnableSubmission, void(viz::SurfaceId, base::TimeTicks));
MOCK_METHOD0(StartRendering, void());
MOCK_METHOD0(StopRendering, void());
MOCK_CONST_METHOD0(IsDrivingFrameUpdates, bool(void));
MOCK_METHOD1(Initialize, void(cc::VideoFrameProvider*));
MOCK_METHOD1(SetRotation, void(media::VideoRotation));
- MOCK_METHOD1(SetIsOpaque, void(bool));
- MOCK_METHOD1(UpdateSubmissionState, void(bool));
+ MOCK_METHOD1(SetIsSurfaceVisible, void(bool));
+ MOCK_METHOD1(SetIsPageVisible, void(bool));
MOCK_METHOD1(SetForceSubmit, void(bool));
void DidReceiveFrame() override { ++did_receive_frame_count_; }
@@ -77,11 +74,10 @@ class VideoFrameCompositorTest : public VideoRendererSink::RenderCallback,
EXPECT_CALL(*submitter_,
SetRotation(Eq(media::VideoRotation::VIDEO_ROTATION_90)));
EXPECT_CALL(*submitter_, SetForceSubmit(false));
- EXPECT_CALL(*submitter_, EnableSubmission(Eq(viz::SurfaceId()), _, _));
- EXPECT_CALL(*submitter_, SetIsOpaque(true));
+ EXPECT_CALL(*submitter_, EnableSubmission(Eq(viz::SurfaceId()), _));
compositor_->EnableSubmission(viz::SurfaceId(), base::TimeTicks(),
media::VideoRotation::VIDEO_ROTATION_90,
- false, true, base::BindRepeating([] {}));
+ false);
}
compositor_->set_tick_clock_for_testing(&tick_clock_);
@@ -150,6 +146,32 @@ TEST_P(VideoFrameCompositorTest, InitialValues) {
EXPECT_FALSE(compositor()->GetCurrentFrame().get());
}
+TEST_P(VideoFrameCompositorTest, SetIsSurfaceVisible) {
+ if (!IsSurfaceLayerForVideoEnabled())
+ return;
+
+ auto cb = compositor()->GetUpdateSubmissionStateCallback();
+
+ EXPECT_CALL(*submitter_, SetIsSurfaceVisible(true));
+ cb.Run(true);
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_CALL(*submitter_, SetIsSurfaceVisible(false));
+ cb.Run(false);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_P(VideoFrameCompositorTest, SetIsPageVisible) {
+ if (!IsSurfaceLayerForVideoEnabled())
+ return;
+
+ EXPECT_CALL(*submitter_, SetIsPageVisible(true));
+ compositor()->SetIsPageVisible(true);
+
+ EXPECT_CALL(*submitter_, SetIsPageVisible(false));
+ compositor()->SetIsPageVisible(false);
+}
+
TEST_P(VideoFrameCompositorTest, PaintSingleFrame) {
scoped_refptr<VideoFrame> expected = VideoFrame::CreateEOSFrame();
diff --git a/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc b/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
index 18628a537be..21955a34cda 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
+++ b/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
@@ -9,7 +9,7 @@
#include "base/run_loop.h"
#include "media/base/audio_parameters.h"
#include "media/base/fake_audio_render_callback.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/mock_audio_renderer_sink.h"
#include "media/blink/webaudiosourceprovider_impl.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -106,7 +106,7 @@ class WebAudioSourceProviderImplTest
protected:
AudioParameters params_;
FakeAudioRenderCallback fake_callback_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
scoped_refptr<MockAudioRendererSink> mock_sink_;
scoped_refptr<WebAudioSourceProviderImpl> wasp_impl_;
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
index 85597e7ee2e..fdc2b253959 100644
--- a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.cc
@@ -71,14 +71,15 @@ WebContentDecryptionModuleAccessImpl::GetConfiguration() {
}
void WebContentDecryptionModuleAccessImpl::CreateContentDecryptionModule(
- blink::WebContentDecryptionModuleResult result) {
+ blink::WebContentDecryptionModuleResult result,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
// This method needs to run asynchronously, as it may need to load the CDM.
// As this object's lifetime is controlled by MediaKeySystemAccess on the
// blink side, copy all values needed by CreateCdm() in case the blink object
// gets garbage-collected.
std::unique_ptr<blink::WebContentDecryptionModuleResult> result_copy(
new blink::WebContentDecryptionModuleResult(result));
- base::ThreadTaskRunnerHandle::Get()->PostTask(
+ task_runner->PostTask(
FROM_HERE,
base::BindOnce(&CreateCdm, client_, key_system_, security_origin_,
cdm_config_, base::Passed(&result_copy)));
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
index b01e186fc32..821d1f39e32 100644
--- a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
@@ -33,7 +33,8 @@ class WebContentDecryptionModuleAccessImpl
blink::WebString GetKeySystem() override;
blink::WebMediaKeySystemConfiguration GetConfiguration() override;
void CreateContentDecryptionModule(
- blink::WebContentDecryptionModuleResult result) override;
+ blink::WebContentDecryptionModuleResult result,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) override;
private:
WebContentDecryptionModuleAccessImpl(
diff --git a/chromium/media/blink/webmediacapabilitiesclient_impl.cc b/chromium/media/blink/webmediacapabilitiesclient_impl.cc
index 886ede236c6..e4993cb999a 100644
--- a/chromium/media/blink/webmediacapabilitiesclient_impl.cc
+++ b/chromium/media/blink/webmediacapabilitiesclient_impl.cc
@@ -9,9 +9,9 @@
#include "base/bind_helpers.h"
#include "media/base/audio_codecs.h"
-#include "media/base/decode_capabilities.h"
#include "media/base/key_system_names.h"
#include "media/base/mime_util.h"
+#include "media/base/supported_types.h"
#include "media/base/video_codecs.h"
#include "media/base/video_color_space.h"
#include "media/blink/webcontentdecryptionmoduleaccess_impl.h"
@@ -56,8 +56,7 @@ bool CheckAudioSupport(const blink::WebAudioConfiguration& audio_config) {
<< audio_config.codec.Ascii();
audio_supported = false;
} else {
- AudioConfig audio_config = {audio_codec};
- audio_supported = IsSupportedAudioConfig(audio_config);
+ audio_supported = IsSupportedAudioType({audio_codec});
}
return audio_supported;
@@ -84,7 +83,7 @@ bool CheckVideoSupport(const blink::WebVideoConfiguration& video_config,
<< video_config.codec.Ascii();
video_supported = false;
} else {
- video_supported = IsSupportedVideoConfig(
+ video_supported = IsSupportedVideoType(
{video_codec, *out_video_profile, video_level, video_color_space});
}
diff --git a/chromium/media/blink/webmediaplayer_cast_android.cc b/chromium/media/blink/webmediaplayer_cast_android.cc
index 4244e07339e..898ab6c16bf 100644
--- a/chromium/media/blink/webmediaplayer_cast_android.cc
+++ b/chromium/media/blink/webmediaplayer_cast_android.cc
@@ -15,6 +15,7 @@
#include "third_party/blink/public/web/web_document.h"
#include "third_party/blink/public/web/web_local_frame.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkFont.h"
#include "third_party/skia/include/core/SkFontStyle.h"
#include "third_party/skia/include/core/SkPaint.h"
#include "third_party/skia/include/core/SkTypeface.h"
@@ -61,20 +62,21 @@ scoped_refptr<VideoFrame> MakeTextFrameForCast(
const SkScalar kMinPadding(40);
SkPaint paint;
- paint.setAntiAlias(true);
- paint.setFilterQuality(kHigh_SkFilterQuality);
paint.setColor(SK_ColorWHITE);
- paint.setTypeface(SkTypeface::MakeFromName("sans", SkFontStyle::Bold()));
- paint.setTextSize(kTextSize);
+
+ SkFont font;
+ font.setTypeface(SkTypeface::MakeFromName("sans", SkFontStyle::Bold()));
+ font.setSize(kTextSize);
// Calculate the vertical margin from the top
SkFontMetrics font_metrics;
- paint.getFontMetrics(&font_metrics);
+ font.getMetrics(&font_metrics);
SkScalar sk_vertical_margin = kMinPadding - font_metrics.fAscent;
// Measure the width of the entire text to display
- size_t display_text_width = paint.measureText(remote_playback_message.c_str(),
- remote_playback_message.size());
+ size_t display_text_width =
+ font.measureText(remote_playback_message.c_str(),
+ remote_playback_message.size(), kUTF8_SkTextEncoding);
std::string display_text(remote_playback_message);
if (display_text_width + (kMinPadding * 2) > canvas_size.width()) {
@@ -83,31 +85,33 @@ scoped_refptr<VideoFrame> MakeTextFrameForCast(
// First, figure out how much of the canvas the '...' will take up.
const std::string kTruncationEllipsis("\xE2\x80\xA6");
- SkScalar sk_ellipse_width = paint.measureText(kTruncationEllipsis.c_str(),
- kTruncationEllipsis.size());
+ SkScalar sk_ellipse_width =
+ font.measureText(kTruncationEllipsis.c_str(),
+ kTruncationEllipsis.size(), kUTF8_SkTextEncoding);
// Then calculate how much of the text can be drawn with the '...' appended
// to the end of the string.
SkScalar sk_max_original_text_width(canvas_size.width() -
(kMinPadding * 2) - sk_ellipse_width);
- size_t sk_max_original_text_length = paint.breakText(
+ size_t sk_max_original_text_length = font.breakText(
remote_playback_message.c_str(), remote_playback_message.size(),
- sk_max_original_text_width);
+ kUTF8_SkTextEncoding, sk_max_original_text_width);
// Remove the part of the string that doesn't fit and append '...'.
display_text.erase(
sk_max_original_text_length,
remote_playback_message.size() - sk_max_original_text_length);
display_text.append(kTruncationEllipsis);
- display_text_width =
- paint.measureText(display_text.c_str(), display_text.size());
+ display_text_width = font.measureText(
+ display_text.c_str(), display_text.size(), kUTF8_SkTextEncoding);
}
// Center the text horizontally.
SkScalar sk_horizontal_margin =
(canvas_size.width() - display_text_width) / 2.0;
- canvas.drawText(display_text.c_str(), display_text.size(),
- sk_horizontal_margin, sk_vertical_margin, paint);
+ canvas.drawSimpleText(display_text.c_str(), display_text.size(),
+ kUTF8_SkTextEncoding, sk_horizontal_margin,
+ sk_vertical_margin, font, paint);
GLES2Interface* gl = context_3d_cb.Run();
diff --git a/chromium/media/blink/webmediaplayer_cast_android.h b/chromium/media/blink/webmediaplayer_cast_android.h
index 01d7ce0f5b1..ae69e574371 100644
--- a/chromium/media/blink/webmediaplayer_cast_android.h
+++ b/chromium/media/blink/webmediaplayer_cast_android.h
@@ -68,7 +68,7 @@ class WebMediaPlayerCast : public RendererMediaPlayerInterface {
void OnTimeUpdate(base::TimeDelta current_timestamp,
base::TimeTicks current_time_ticks) override;
- // void OnWaitingForDecryptionKey() override;
+ // void OnWaiting(WaitingReason reason) override;
void OnPlayerReleased() override;
// Functions called when media player status changes.
diff --git a/chromium/media/blink/webmediaplayer_delegate.h b/chromium/media/blink/webmediaplayer_delegate.h
index 906d4c9735d..af33ac9e1d7 100644
--- a/chromium/media/blink/webmediaplayer_delegate.h
+++ b/chromium/media/blink/webmediaplayer_delegate.h
@@ -186,10 +186,6 @@ class WebMediaPlayerDelegate {
int player_id,
blink::WebFullscreenVideoStatus fullscreen_video_status) = 0;
- // Returns |true| if player should be suspended automatically when tab is
- // in background.
- virtual bool IsBackgroundMediaSuspendEnabled() = 0;
-
protected:
WebMediaPlayerDelegate() = default;
virtual ~WebMediaPlayerDelegate() = default;
diff --git a/chromium/media/blink/webmediaplayer_impl.cc b/chromium/media/blink/webmediaplayer_impl.cc
index fc7eef41e12..a25744ab843 100644
--- a/chromium/media/blink/webmediaplayer_impl.cc
+++ b/chromium/media/blink/webmediaplayer_impl.cc
@@ -54,7 +54,9 @@
#include "media/blink/webmediasource_impl.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_demuxer.h"
+#include "media/filters/memory_data_source.h"
#include "media/media_buildflags.h"
+#include "net/base/data_url.h"
#include "third_party/blink/public/common/picture_in_picture/picture_in_picture_control_info.h"
#include "third_party/blink/public/platform/web_encrypted_media_types.h"
#include "third_party/blink/public/platform/web_localized_string.h"
@@ -99,7 +101,7 @@ void SetSinkIdOnMediaThread(scoped_refptr<WebAudioSourceProviderImpl> sink,
sink->SwitchOutputDevice(device_id, std::move(callback));
}
-bool IsBackgroundSuspendEnabled(WebMediaPlayerDelegate* delegate) {
+bool IsBackgroundSuspendEnabled(const WebMediaPlayerImpl* wmpi) {
// TODO(crbug.com/867146): remove these switches.
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kDisableMediaSuspend))
@@ -108,7 +110,7 @@ bool IsBackgroundSuspendEnabled(WebMediaPlayerDelegate* delegate) {
switches::kEnableMediaSuspend))
return true;
- return delegate->IsBackgroundMediaSuspendEnabled();
+ return wmpi->IsBackgroundMediaSuspendEnabled();
}
bool IsResumeBackgroundVideosEnabled() {
@@ -210,6 +212,70 @@ bool IsLocalFile(const GURL& url) {
}
#endif
+// Handles destruction of media::Renderer dependent components after the
+// renderer has been destructed on the media thread.
+void DestructionHelper(
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ scoped_refptr<base::SingleThreadTaskRunner> vfc_task_runner,
+ std::unique_ptr<Demuxer> demuxer,
+ std::unique_ptr<DataSource> data_source,
+ std::unique_ptr<VideoFrameCompositor> compositor,
+ std::unique_ptr<CdmContextRef> cdm_context_1,
+ std::unique_ptr<CdmContextRef> cdm_context_2,
+ std::unique_ptr<MediaLog> media_log,
+ std::unique_ptr<RendererFactorySelector> renderer_factory_selector,
+ std::unique_ptr<blink::WebSurfaceLayerBridge> bridge,
+ bool is_chunk_demuxer) {
+ // We release |bridge| after pipeline stop to ensure layout tests receive
+ // painted video frames before test harness exit.
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(bridge));
+
+ // Since the media::Renderer is gone we can now destroy the compositor and
+ // renderer factory selector.
+ vfc_task_runner->DeleteSoon(FROM_HERE, std::move(compositor));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(renderer_factory_selector));
+
+ // ChunkDemuxer can be deleted on any thread, but other demuxers are bound to
+ // the main thread and must be deleted there now that the renderer is gone.
+ if (!is_chunk_demuxer) {
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(demuxer));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(data_source));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(cdm_context_1));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(cdm_context_2));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(media_log));
+ return;
+ }
+
+ // ChunkDemuxer's streams may contain much buffered, compressed media that
+ // may need to be paged back in during destruction. Paging delay may exceed
+ // the renderer hang monitor's threshold on at least Windows while also
+ // blocking other work on the renderer main thread, so we do the actual
+ // destruction in the background without blocking WMPI destruction or
+ // |task_runner|. On advice of task_scheduler OWNERS, MayBlock() is not
+ // used because virtual memory overhead is not considered blocking I/O; and
+ // CONTINUE_ON_SHUTDOWN is used to allow process termination to not block on
+ // completing the task.
+ base::PostTaskWithTraits(
+ FROM_HERE,
+ {base::TaskPriority::BEST_EFFORT,
+ base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ base::BindOnce(
+ [](scoped_refptr<base::SingleThreadTaskRunner> main_task_runner,
+ std::unique_ptr<Demuxer> demuxer_to_destroy,
+ std::unique_ptr<CdmContextRef> cdm_context_1,
+ std::unique_ptr<CdmContextRef> cdm_context_2,
+ std::unique_ptr<MediaLog> media_log) {
+ SCOPED_UMA_HISTOGRAM_TIMER("Media.MSE.DemuxerDestructionTime");
+ demuxer_to_destroy.reset();
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(cdm_context_1));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(cdm_context_2));
+ main_task_runner->DeleteSoon(FROM_HERE, std::move(media_log));
+ },
+ std::move(main_task_runner), std::move(demuxer),
+ std::move(cdm_context_1), std::move(cdm_context_2),
+ std::move(media_log)));
+}
+
} // namespace
class BufferedDataSourceHostImpl;
@@ -272,7 +338,12 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
create_bridge_callback_(params->create_bridge_callback()),
request_routing_token_cb_(params->request_routing_token_cb()),
overlay_routing_token_(OverlayInfo::RoutingToken()),
- media_metrics_provider_(params->take_metrics_provider()) {
+ media_metrics_provider_(params->take_metrics_provider()),
+ is_background_suspend_enabled_(params->IsBackgroundSuspendEnabled()),
+ is_background_video_playback_enabled_(
+ params->IsBackgroundVideoPlaybackEnabled()),
+ is_background_video_track_optimization_supported_(
+ params->IsBackgroundVideoTrackOptimizationSupported()) {
DVLOG(1) << __func__;
DCHECK(adjust_allocated_memory_cb_);
DCHECK(renderer_factory_selector_);
@@ -284,9 +355,8 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
always_enable_overlays_ = base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kForceVideoOverlays);
- if (base::FeatureList::IsEnabled(media::kOverlayFullscreenVideo)) {
- bool use_android_overlay =
- base::FeatureList::IsEnabled(media::kUseAndroidOverlay);
+ if (base::FeatureList::IsEnabled(kOverlayFullscreenVideo)) {
+ bool use_android_overlay = base::FeatureList::IsEnabled(kUseAndroidOverlay);
overlay_mode_ = use_android_overlay ? OverlayMode::kUseAndroidOverlay
: OverlayMode::kUseContentVideoView;
} else {
@@ -360,6 +430,11 @@ WebMediaPlayerImpl::~WebMediaPlayerImpl() {
watch_time_reporter_.reset();
// The underlying Pipeline must be stopped before it is destroyed.
+ //
+ // Note: This destruction happens synchronously on the media thread and
+ // |demuxer_|, |data_source_|, |compositor_|, and |media_log_| must outlive
+ // this process. They will be destructed by the DestructionHelper below
+ // after trampolining through the media thread.
pipeline_controller_.Stop();
if (last_reported_memory_usage_)
@@ -371,48 +446,41 @@ WebMediaPlayerImpl::~WebMediaPlayerImpl() {
client_->MediaRemotingStopped(
blink::WebLocalizedString::kMediaRemotingStopNoText);
- if (!surface_layer_for_video_enabled_ && video_layer_) {
+ if (!surface_layer_for_video_enabled_ && video_layer_)
video_layer_->StopUsingProvider();
- }
-
- vfc_task_runner_->DeleteSoon(FROM_HERE, std::move(compositor_));
-
- if (chunk_demuxer_) {
- // Continue destruction of |chunk_demuxer_| on the |media_task_runner_| to
- // avoid racing other pending tasks on |chunk_demuxer_| on that runner while
- // not further blocking |main_task_runner_| to perform the destruction.
- media_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&WebMediaPlayerImpl::DemuxerDestructionHelper,
- media_task_runner_, std::move(demuxer_)));
- }
media_log_->AddEvent(
media_log_->CreateEvent(MediaLogEvent::WEBMEDIAPLAYER_DESTROYED));
-}
-// static
-void WebMediaPlayerImpl::DemuxerDestructionHelper(
- scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- std::unique_ptr<Demuxer> demuxer) {
- DCHECK(task_runner->BelongsToCurrentThread());
- // ChunkDemuxer's streams may contain much buffered, compressed media that may
- // need to be paged back in during destruction. Paging delay may exceed the
- // renderer hang monitor's threshold on at least Windows while also blocking
- // other work on the renderer main thread, so we do the actual destruction in
- // the background without blocking WMPI destruction or |task_runner|. On
- // advice of task_scheduler OWNERS, MayBlock() is not used because virtual
- // memory overhead is not considered blocking I/O; and CONTINUE_ON_SHUTDOWN is
- // used to allow process termination to not block on completing the task.
- base::PostTaskWithTraits(
+ if (data_source_)
+ data_source_->Stop();
+
+ // Disconnect from the surface layer. We still preserve the |bridge_| until
+ // after pipeline shutdown to ensure any pending frames are painted for tests.
+ if (bridge_)
+ bridge_->ClearObserver();
+
+ // Disconnect from the MediaObserver implementation since it's lifetime is
+ // tied to the RendererFactorySelector which can't be destroyed until after
+ // the Pipeline stops.
+ //
+ // Note: We can't use a WeakPtr with the RendererFactory because its methods
+ // are called on the media thread and this destruction takes place on the
+ // renderer thread.
+ if (observer_)
+ observer_->SetClient(nullptr);
+
+ // Handle destruction of things that need to be destructed after the pipeline
+ // completes stopping on the media thread.
+ media_task_runner_->PostTask(
FROM_HERE,
- {base::TaskPriority::BEST_EFFORT,
- base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
- base::BindOnce(
- [](std::unique_ptr<Demuxer> demuxer_to_destroy) {
- SCOPED_UMA_HISTOGRAM_TIMER("Media.MSE.DemuxerDestructionTime");
- demuxer_to_destroy.reset();
- },
- std::move(demuxer)));
+ base::BindOnce(&DestructionHelper, std::move(main_task_runner_),
+ std::move(vfc_task_runner_), std::move(demuxer_),
+ std::move(data_source_), std::move(compositor_),
+ std::move(cdm_context_ref_),
+ std::move(pending_cdm_context_ref_), std::move(media_log_),
+ std::move(renderer_factory_selector_), std::move(bridge_),
+ !!chunk_demuxer_));
}
WebMediaPlayer::LoadTiming WebMediaPlayerImpl::Load(
@@ -584,6 +652,12 @@ void WebMediaPlayerImpl::OnDisplayTypeChanged(
break;
case WebMediaPlayer::DisplayType::kPictureInPicture:
watch_time_reporter_->OnDisplayTypePictureInPicture();
+
+ // Resumes playback if it was paused when hidden.
+ if (paused_when_hidden_) {
+ paused_when_hidden_ = false;
+ OnPlay();
+ }
break;
}
}
@@ -595,8 +669,11 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
DVLOG(1) << __func__;
DCHECK(main_task_runner_->BelongsToCurrentThread());
- GURL gurl(url);
- ReportMetrics(load_type, gurl, *frame_, media_log_.get());
+ // Note: |url| may be very large, take care when making copies.
+ loaded_url_ = GURL(url);
+ load_type_ = load_type;
+
+ ReportMetrics(load_type, loaded_url_, *frame_, media_log_.get());
// Report poster availability for SRC=.
if (load_type == kLoadTypeURL) {
@@ -607,16 +684,11 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
}
}
- // Set subresource URL for crash reporting.
+ // Set subresource URL for crash reporting; will be truncated to 256 bytes.
static base::debug::CrashKeyString* subresource_url =
base::debug::AllocateCrashKeyString("subresource_url",
base::debug::CrashKeySize::Size256);
- base::debug::SetCrashKeyString(subresource_url, gurl.spec());
-
- // Used for HLS playback.
- loaded_url_ = gurl;
-
- load_type_ = load_type;
+ base::debug::SetCrashKeyString(subresource_url, loaded_url_.spec());
SetNetworkState(WebMediaPlayer::kNetworkStateLoading);
SetReadyState(WebMediaPlayer::kReadyStateHaveNothing);
@@ -632,18 +704,43 @@ void WebMediaPlayerImpl::DoLoad(LoadType load_type,
if (load_type == kLoadTypeMediaSource) {
StartPipeline();
} else {
+ // Short circuit the more complex loading path for data:// URLs. Sending
+ // them through the network based loading path just wastes memory and causes
+ // worse performance since reads become asynchronous.
+ if (loaded_url_.SchemeIs(url::kDataScheme)) {
+ std::string mime_type, charset, data;
+ if (!net::DataURL::Parse(loaded_url_, &mime_type, &charset, &data)) {
+ DataSourceInitialized(false);
+ return;
+ }
+
+ // Replace |loaded_url_| with an empty data:// URL since it may be large.
+ loaded_url_ = GURL("data:,");
+
+ // Mark all the data as buffered.
+ buffered_data_source_host_.SetTotalBytes(data.size());
+ buffered_data_source_host_.AddBufferedByteRange(0, data.size());
+
+ DCHECK(!mb_data_source_);
+ data_source_.reset(new MemoryDataSource(std::move(data)));
+ DataSourceInitialized(true);
+ return;
+ }
+
auto url_data =
url_index_->GetByUrl(url, static_cast<UrlData::CorsMode>(cors_mode));
// Notify |this| of bytes received by the network.
url_data->AddBytesReceivedCallback(BindToCurrentLoop(base::BindRepeating(
&WebMediaPlayerImpl::OnBytesReceived, AsWeakPtr())));
- data_source_.reset(new MultibufferDataSource(
+ mb_data_source_ = new MultibufferDataSource(
main_task_runner_, std::move(url_data), media_log_.get(),
&buffered_data_source_host_,
- base::Bind(&WebMediaPlayerImpl::NotifyDownloading, AsWeakPtr())));
- data_source_->SetPreload(preload_);
- data_source_->SetIsClientAudioElement(client_->IsAudioElement());
- data_source_->Initialize(
+ base::BindRepeating(&WebMediaPlayerImpl::NotifyDownloading,
+ AsWeakPtr()));
+ data_source_.reset(mb_data_source_);
+ mb_data_source_->SetPreload(preload_);
+ mb_data_source_->SetIsClientAudioElement(client_->IsAudioElement());
+ mb_data_source_->Initialize(
base::Bind(&WebMediaPlayerImpl::DataSourceInitialized, AsWeakPtr()));
}
@@ -672,8 +769,8 @@ void WebMediaPlayerImpl::Play() {
pipeline_controller_.SetPlaybackRate(playback_rate_);
background_pause_timer_.Stop();
- if (data_source_)
- data_source_->MediaIsPlaying();
+ if (mb_data_source_)
+ mb_data_source_->MediaIsPlaying();
if (observer_)
observer_->OnPlaying();
@@ -819,8 +916,8 @@ void WebMediaPlayerImpl::SetRate(double rate) {
playback_rate_ = rate;
if (!paused_) {
pipeline_controller_.SetPlaybackRate(rate);
- if (data_source_)
- data_source_->MediaPlaybackRateChanged(rate);
+ if (mb_data_source_)
+ mb_data_source_->MediaPlaybackRateChanged(rate);
}
}
@@ -884,8 +981,8 @@ void WebMediaPlayerImpl::SetSinkId(
DCHECK(main_task_runner_->BelongsToCurrentThread());
DVLOG(1) << __func__;
- media::OutputDeviceStatusCB callback =
- media::ConvertToOutputDeviceStatusCB(std::move(web_callback));
+ OutputDeviceStatusCB callback =
+ ConvertToOutputDeviceStatusCB(std::move(web_callback));
media_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&SetSinkIdOnMediaThread, audio_source_provider_,
sink_id.Utf8(), std::move(callback)));
@@ -901,8 +998,8 @@ void WebMediaPlayerImpl::SetPreload(WebMediaPlayer::Preload preload) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
preload_ = static_cast<MultibufferDataSource::Preload>(preload);
- if (data_source_)
- data_source_->SetPreload(preload_);
+ if (mb_data_source_)
+ mb_data_source_->SetPreload(preload_);
}
bool WebMediaPlayerImpl::HasVideo() const {
@@ -973,6 +1070,10 @@ bool WebMediaPlayerImpl::Paused() const {
return pipeline_controller_.GetPlaybackRate() == 0.0f;
}
+bool WebMediaPlayerImpl::PausedWhenHidden() const {
+ return paused_when_hidden_;
+}
+
bool WebMediaPlayerImpl::Seeking() const {
DCHECK(main_task_runner_->BelongsToCurrentThread());
@@ -1088,7 +1189,8 @@ blink::WebTimeRanges WebMediaPlayerImpl::Seekable() const {
// Allow a special exception for seeks to zero for streaming sources with a
// finite duration; this allows looping to work.
- const bool is_finite_stream = data_source_ && data_source_->IsStreaming() &&
+ const bool is_finite_stream = mb_data_source_ &&
+ mb_data_source_->IsStreaming() &&
std::isfinite(seekable_end);
// Do not change the seekable range when using the MediaPlayerRenderer. It
@@ -1193,18 +1295,18 @@ bool WebMediaPlayerImpl::WouldTaintOrigin() const {
return true;
}
- if (!data_source_)
+ if (!mb_data_source_)
return false;
// When the resource is redirected to another origin we think it as
// tainted. This is actually not specified, and is under discussion.
// See https://github.com/whatwg/fetch/issues/737.
- if (!data_source_->HasSingleOrigin() &&
- data_source_->cors_mode() == UrlData::CORS_UNSPECIFIED) {
+ if (!mb_data_source_->HasSingleOrigin() &&
+ mb_data_source_->cors_mode() == UrlData::CORS_UNSPECIFIED) {
return true;
}
- return data_source_->IsCorsCrossOrigin();
+ return mb_data_source_->IsCorsCrossOrigin();
}
double WebMediaPlayerImpl::MediaTimeForTimeValue(double timeValue) const {
@@ -1509,6 +1611,9 @@ void WebMediaPlayerImpl::OnPipelineSeeked(bool time_updated) {
}
void WebMediaPlayerImpl::OnPipelineSuspended() {
+ // Add a log event so the player shows up as "SUSPENDED" in media-internals.
+ media_log_->AddEvent(media_log_->CreateEvent(MediaLogEvent::SUSPENDED));
+
#if defined(OS_ANDROID)
if (IsRemote() && !IsNewRemotePlaybackPipelineEnabled()) {
scoped_refptr<VideoFrame> frame = cast_impl_.GetCastingBanner();
@@ -1519,8 +1624,8 @@ void WebMediaPlayerImpl::OnPipelineSuspended() {
// Tell the data source we have enough data so that it may release the
// connection.
- if (data_source_)
- data_source_->OnBufferingHaveEnough(true);
+ if (mb_data_source_)
+ mb_data_source_->OnBufferingHaveEnough(true);
ReportMemoryUsage();
@@ -1605,19 +1710,45 @@ void WebMediaPlayerImpl::OnError(PipelineStatus status) {
return;
#if defined(OS_ANDROID)
- if (status == PipelineStatus::DEMUXER_ERROR_DETECTED_HLS) {
- demuxer_found_hls_ = true;
+ // |mb_data_source_| may be nullptr if someone passes in a m3u8 as a data://
+ // URL, since MediaPlayer doesn't support data:// URLs, fail playback now.
+ const bool found_hls = status == PipelineStatus::DEMUXER_ERROR_DETECTED_HLS;
+ if (found_hls && mb_data_source_) {
+ demuxer_found_hls_ = found_hls;
renderer_factory_selector_->SetUseMediaPlayer(true);
+ loaded_url_ = mb_data_source_->GetUrlAfterRedirects();
+ DCHECK(data_source_);
+ data_source_->Stop();
+ mb_data_source_ = nullptr;
+
pipeline_controller_.Stop();
SetMemoryReportingState(false);
- main_task_runner_->PostTask(
+ // Trampoline through the media task runner to destruct the demuxer and
+ // data source now that we're switching to HLS playback.
+ media_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(&WebMediaPlayerImpl::StartPipeline, AsWeakPtr()));
+ BindToCurrentLoop(base::BindOnce(
+ [](std::unique_ptr<Demuxer> demuxer,
+ std::unique_ptr<DataSource> data_source,
+ base::OnceClosure start_pipeline_cb) {
+ // Release resources before starting HLS.
+ demuxer.reset();
+ data_source.reset();
+
+ std::move(start_pipeline_cb).Run();
+ },
+ std::move(demuxer_), std::move(data_source_),
+ base::BindOnce(&WebMediaPlayerImpl::StartPipeline, AsWeakPtr()))));
+
return;
}
+
+ // We found hls in a data:// URL, fail immediately.
+ if (found_hls)
+ status = PIPELINE_ERROR_EXTERNAL_RENDERER_FAILED;
#endif
MaybeSetContainerName();
@@ -1749,14 +1880,12 @@ void WebMediaPlayerImpl::ActivateSurfaceLayerForVideo() {
vfc_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(
- &VideoFrameCompositor::EnableSubmission,
- base::Unretained(compositor_.get()), bridge_->GetSurfaceId(),
- bridge_->GetLocalSurfaceIdAllocationTime(),
- pipeline_metadata_.video_decoder_config.video_rotation(),
- IsInPictureInPicture(), opaque_,
- BindToCurrentLoop(base::BindRepeating(
- &WebMediaPlayerImpl::OnFrameSinkDestroyed, AsWeakPtr()))));
+ base::BindOnce(&VideoFrameCompositor::EnableSubmission,
+ base::Unretained(compositor_.get()),
+ bridge_->GetSurfaceId(),
+ bridge_->GetLocalSurfaceIdAllocationTime(),
+ pipeline_metadata_.video_decoder_config.video_rotation(),
+ IsInPictureInPicture()));
bridge_->SetContentsOpaque(opaque_);
// If the element is already in Picture-in-Picture mode, it means that it
@@ -1767,14 +1896,8 @@ void WebMediaPlayerImpl::ActivateSurfaceLayerForVideo() {
// TODO(872056): the surface should be activated but for some reasons, it
// does not. It is possible that this will no longer be needed after 872056
// is fixed.
- if (client_->DisplayType() ==
- WebMediaPlayer::DisplayType::kPictureInPicture) {
+ if (IsInPictureInPicture())
OnSurfaceIdUpdated(bridge_->GetSurfaceId());
- }
-}
-
-void WebMediaPlayerImpl::OnFrameSinkDestroyed() {
- bridge_->ClearSurfaceId();
}
void WebMediaPlayerImpl::OnBufferingStateChange(BufferingState state) {
@@ -1843,7 +1966,7 @@ bool WebMediaPlayerImpl::CanPlayThrough() {
return true;
if (chunk_demuxer_)
return true;
- if (data_source_ && data_source_->assume_fully_buffered())
+ if (data_source_ && data_source_->AssumeFullyBuffered())
return true;
// If we're not currently downloading, we have as much buffer as
// we're ever going to get, which means we say we can play through.
@@ -1891,8 +2014,8 @@ void WebMediaPlayerImpl::OnBufferingStateChangeInternal(
// Let the DataSource know we have enough data. It may use this information
// to release unused network connections.
- if (data_source_ && !client_->CouldPlayIfEnoughData())
- data_source_->OnBufferingHaveEnough(false);
+ if (mb_data_source_ && !client_->CouldPlayIfEnoughData())
+ mb_data_source_->OnBufferingHaveEnough(false);
// Blink expects a timeChanged() in response to a seek().
if (should_notify_time_changed_) {
@@ -1934,6 +2057,13 @@ void WebMediaPlayerImpl::OnBufferingStateChangeInternal(
void WebMediaPlayerImpl::OnDurationChange() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
+ if (frame_->IsAdSubframe()) {
+ UMA_HISTOGRAM_CUSTOM_TIMES("Ads.Media.Duration", GetPipelineMediaDuration(),
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromDays(1),
+ 50 /* bucket_count */);
+ }
+
// TODO(sandersd): We should call delegate_->DidPlay() with the new duration,
// especially if it changed from <5s to >5s.
if (ready_state_ == WebMediaPlayer::kReadyStateHaveNothing)
@@ -1958,20 +2088,35 @@ void WebMediaPlayerImpl::OnAddTextTrack(const TextTrackConfig& config,
std::unique_ptr<WebInbandTextTrackImpl> web_inband_text_track(
new WebInbandTextTrackImpl(web_kind, web_label, web_language, web_id));
- std::unique_ptr<media::TextTrack> text_track(new TextTrackImpl(
+ std::unique_ptr<TextTrack> text_track(new TextTrackImpl(
main_task_runner_, client_, std::move(web_inband_text_track)));
done_cb.Run(std::move(text_track));
}
-void WebMediaPlayerImpl::OnWaitingForDecryptionKey() {
+void WebMediaPlayerImpl::OnWaiting(WaitingReason reason) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- encrypted_client_->DidBlockPlaybackWaitingForKey();
- // TODO(jrummell): didResumePlaybackBlockedForKey() should only be called
- // when a key has been successfully added (e.g. OnSessionKeysChange() with
- // |has_additional_usable_key| = true). http://crbug.com/461903
- encrypted_client_->DidResumePlaybackBlockedForKey();
+ switch (reason) {
+ case WaitingReason::kNoDecryptionKey:
+ encrypted_client_->DidBlockPlaybackWaitingForKey();
+ // TODO(jrummell): didResumePlaybackBlockedForKey() should only be called
+ // when a key has been successfully added (e.g. OnSessionKeysChange() with
+ // |has_additional_usable_key| = true). http://crbug.com/461903
+ encrypted_client_->DidResumePlaybackBlockedForKey();
+ return;
+
+ // Ideally this should be handled by PipelineController directly without
+ // being proxied here. But currently Pipeline::Client (|this|) is passed to
+ // PipelineImpl directly without going through |pipeline_controller_|,
+ // making it difficult to do.
+ // TODO(xhwang): Handle this in PipelineController when we have a clearer
+ // picture on how to refactor WebMediaPlayerImpl, PipelineController and
+ // PipelineImpl.
+ case WaitingReason::kDecoderStateLost:
+ pipeline_controller_.OnDecoderStateLost();
+ return;
+ }
}
void WebMediaPlayerImpl::OnVideoNaturalSizeChange(const gfx::Size& size) {
@@ -2010,16 +2155,10 @@ void WebMediaPlayerImpl::OnVideoOpacityChange(bool opaque) {
DCHECK_NE(ready_state_, WebMediaPlayer::kReadyStateHaveNothing);
opaque_ = opaque;
- if (!surface_layer_for_video_enabled_) {
- if (video_layer_)
- video_layer_->SetContentsOpaque(opaque_);
- } else if (bridge_->GetCcLayer()) {
+ if (!surface_layer_for_video_enabled_ && video_layer_)
+ video_layer_->SetContentsOpaque(opaque_);
+ else if (bridge_->GetCcLayer())
bridge_->SetContentsOpaque(opaque_);
- vfc_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&VideoFrameCompositor::UpdateIsOpaque,
- base::Unretained(compositor_.get()), opaque_));
- }
}
void WebMediaPlayerImpl::OnAudioConfigChange(const AudioDecoderConfig& config) {
@@ -2088,6 +2227,18 @@ void WebMediaPlayerImpl::OnVideoDecoderChange(const std::string& name) {
UpdateSecondaryProperties();
}
+void WebMediaPlayerImpl::OnRemotePlayStateChange(MediaStatus::State state) {
+ DCHECK(is_flinging_);
+
+ if (state == MediaStatus::State::PLAYING && Paused()) {
+ DVLOG(1) << __func__ << " requesting PLAY.";
+ client_->RequestPlay();
+ } else if (state == MediaStatus::State::PAUSED && !Paused()) {
+ DVLOG(1) << __func__ << " requesting PAUSE.";
+ client_->RequestPause();
+ }
+}
+
void WebMediaPlayerImpl::OnFrameHidden() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
@@ -2107,6 +2258,12 @@ void WebMediaPlayerImpl::OnFrameHidden() {
// Schedule suspended playing media to be paused if the user doesn't come back
// to it within some timeout period to avoid any autoplay surprises.
ScheduleIdlePauseTimer();
+
+ // Notify the compositor of our page visibility status.
+ vfc_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoFrameCompositor::SetIsPageVisible,
+ base::Unretained(compositor_.get()), !IsHidden()));
}
void WebMediaPlayerImpl::OnFrameClosed() {
@@ -2128,6 +2285,12 @@ void WebMediaPlayerImpl::OnFrameShown() {
if (video_decode_stats_reporter_)
video_decode_stats_reporter_->OnShown();
+ // Notify the compositor of our page visibility status.
+ vfc_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoFrameCompositor::SetIsPageVisible,
+ base::Unretained(compositor_.get()), !IsHidden()));
+
// Only track the time to the first frame if playing or about to play because
// of being shown and only for videos we would optimize background playback
// for.
@@ -2194,6 +2357,8 @@ void WebMediaPlayerImpl::OnVolumeMultiplierUpdate(double multiplier) {
void WebMediaPlayerImpl::OnBecamePersistentVideo(bool value) {
client_->OnBecamePersistentVideo(value);
+ overlay_info_.is_persistent_video = value;
+ MaybeSendOverlayInfoToDecoder();
}
void WebMediaPlayerImpl::OnPictureInPictureModeEnded() {
@@ -2364,8 +2529,8 @@ void WebMediaPlayerImpl::DataSourceInitialized(bool success) {
DVLOG(1) << __func__;
DCHECK(main_task_runner_->BelongsToCurrentThread());
- if (observer_ && IsNewRemotePlaybackPipelineEnabled() && data_source_)
- observer_->OnDataSourceInitialized(data_source_->GetUrlAfterRedirects());
+ if (observer_ && IsNewRemotePlaybackPipelineEnabled() && mb_data_source_)
+ observer_->OnDataSourceInitialized(mb_data_source_->GetUrlAfterRedirects());
if (!success) {
SetNetworkState(WebMediaPlayer::kNetworkStateFormatError);
@@ -2379,8 +2544,9 @@ void WebMediaPlayerImpl::DataSourceInitialized(bool success) {
}
// No point in preloading data as we'll probably just throw it away anyways.
- if (IsStreaming() && preload_ > MultibufferDataSource::METADATA) {
- data_source_->SetPreload(MultibufferDataSource::METADATA);
+ if (IsStreaming() && preload_ > MultibufferDataSource::METADATA &&
+ mb_data_source_) {
+ mb_data_source_->SetPreload(MultibufferDataSource::METADATA);
}
StartPipeline();
@@ -2510,9 +2676,6 @@ void WebMediaPlayerImpl::StartPipeline() {
if (renderer_factory_selector_->GetCurrentFactory()
->GetRequiredMediaResourceType() == MediaResource::Type::URL) {
- if (data_source_)
- loaded_url_ = data_source_->GetUrlAfterRedirects();
-
// MediaPlayerRendererClient factory is the only factory that a
// MediaResource::Type::URL for the moment. This might no longer be true
// when we remove WebMediaPlayerCast.
@@ -2609,9 +2772,10 @@ void WebMediaPlayerImpl::SetReadyState(WebMediaPlayer::ReadyState state) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
if (state == WebMediaPlayer::kReadyStateHaveEnoughData && data_source_ &&
- data_source_->assume_fully_buffered() &&
- network_state_ == WebMediaPlayer::kNetworkStateLoading)
+ data_source_->AssumeFullyBuffered() &&
+ network_state_ == WebMediaPlayer::kNetworkStateLoading) {
SetNetworkState(WebMediaPlayer::kNetworkStateLoaded);
+ }
ready_state_ = state;
highest_ready_state_ = std::max(highest_ready_state_, ready_state_);
@@ -2668,7 +2832,7 @@ void WebMediaPlayerImpl::UpdatePlayState() {
#endif
bool is_suspended = pipeline_controller_.IsSuspended();
- bool is_backgrounded = IsBackgroundSuspendEnabled(delegate_) && IsHidden();
+ bool is_backgrounded = IsBackgroundSuspendEnabled(this) && IsHidden();
PlayState state = UpdatePlayState_ComputePlayState(
is_remote, is_flinging_, can_auto_suspend, is_suspended, is_backgrounded);
SetDelegateState(state.delegate_state, state.is_idle);
@@ -2710,7 +2874,7 @@ void WebMediaPlayerImpl::SetDelegateState(DelegateState new_state,
delegate_->DidPlayerSizeChange(delegate_id_, NaturalSize());
delegate_->DidPlay(
delegate_id_, HasVideo(), has_audio,
- media::DurationToMediaContentType(GetPipelineMediaDuration()));
+ DurationToMediaContentType(GetPipelineMediaDuration()));
break;
}
case DelegateState::PAUSED:
@@ -2868,8 +3032,8 @@ WebMediaPlayerImpl::UpdatePlayState_ComputePlayState(bool is_remote,
// suspend is enabled and resuming background videos is not (original Android
// behavior).
bool backgrounded_video_has_no_remote_controls =
- IsBackgroundSuspendEnabled(delegate_) &&
- !IsResumeBackgroundVideosEnabled() && is_backgrounded && HasVideo();
+ IsBackgroundSuspendEnabled(this) && !IsResumeBackgroundVideosEnabled() &&
+ is_backgrounded && HasVideo();
bool can_play = !has_error && have_future_data;
bool has_remote_controls =
HasAudio() && !backgrounded_video_has_no_remote_controls;
@@ -3104,10 +3268,17 @@ void WebMediaPlayerImpl::OnBecameVisible() {
UpdatePlayState();
}
+bool WebMediaPlayerImpl::IsOpaque() const {
+ return opaque_;
+}
+
bool WebMediaPlayerImpl::ShouldPauseVideoWhenHidden() const {
+ if (!is_background_video_playback_enabled_)
+ return true;
+
// If suspending background video, pause any video that's not remoted or
// not unlocked to play in the background.
- if (IsBackgroundSuspendEnabled(delegate_)) {
+ if (IsBackgroundSuspendEnabled(this)) {
if (!HasVideo())
return false;
@@ -3131,7 +3302,8 @@ bool WebMediaPlayerImpl::ShouldDisableVideoWhenHidden() const {
// video. MSE video track switching on hide has gone through a field test.
// TODO(tmathmeyer): Passing load_type_ won't be needed after src= field
// testing is finished. see: http://crbug.com/709302
- if (!IsBackgroundVideoTrackOptimizationEnabled(load_type_))
+ if (!is_background_video_track_optimization_supported_ ||
+ !IsBackgroundVideoTrackOptimizationEnabled(load_type_))
return false;
// Disable video track only for players with audio that match the criteria for
@@ -3371,10 +3543,6 @@ void WebMediaPlayerImpl::OnFirstFrame(base::TimeTicks frame_time) {
const base::TimeDelta elapsed = frame_time - load_start_time_;
media_metrics_provider_->SetTimeToFirstFrame(elapsed);
RecordTimingUMA("Media.TimeToFirstFrame", elapsed);
- if (url_index_->HasReachedMaxParallelPreload()) {
- base::UmaHistogramMediumTimes("Media.TimeToFirstFrame.SRC.ManyVideos",
- elapsed);
- }
}
void WebMediaPlayerImpl::RecordTimingUMA(const std::string& key,
diff --git a/chromium/media/blink/webmediaplayer_impl.h b/chromium/media/blink/webmediaplayer_impl.h
index 1d67327a206..28dc55d4a61 100644
--- a/chromium/media/blink/webmediaplayer_impl.h
+++ b/chromium/media/blink/webmediaplayer_impl.h
@@ -109,15 +109,6 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
std::unique_ptr<WebMediaPlayerParams> params);
~WebMediaPlayerImpl() override;
- // Destroys |demuxer| and records a UMA for the time taken to destroy it.
- // |task_runner| is the expected runner on which this method is called, and is
- // used as a parameter to ensure a scheduled task bound to this method is run
- // (to prevent uncontrolled |demuxer| destruction if |task_runner| has no
- // other references before such task is executed.)
- static void DemuxerDestructionHelper(
- scoped_refptr<base::SingleThreadTaskRunner> task_runner,
- std::unique_ptr<Demuxer> demuxer);
-
// WebSurfaceLayerBridgeObserver implementation.
void OnWebLayerUpdated() override;
void RegisterContentsLayer(cc::Layer* layer) override;
@@ -181,6 +172,8 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
virtual double timelineOffset() const;
double CurrentTime() const override;
+ bool PausedWhenHidden() const override;
+
// Internal states of loading and network.
// TODO(hclam): Ask the pipeline about the state rather than having reading
// them from members which would cause race conditions.
@@ -293,6 +286,11 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
bool DidLazyLoad() const override;
void OnBecameVisible() override;
+ bool IsOpaque() const override;
+
+ bool IsBackgroundMediaSuspendEnabled() const {
+ return is_background_suspend_enabled_;
+ }
// Called from WebMediaPlayerCast.
// TODO(hubbe): WMPI_CAST make private.
@@ -349,7 +347,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
void OnDurationChange() override;
void OnAddTextTrack(const TextTrackConfig& config,
const AddTextTrackDoneCB& done_cb) override;
- void OnWaitingForDecryptionKey() override;
+ void OnWaiting(WaitingReason reason) override;
void OnAudioConfigChange(const AudioDecoderConfig& config) override;
void OnVideoConfigChange(const VideoDecoderConfig& config) override;
void OnVideoNaturalSizeChange(const gfx::Size& size) override;
@@ -357,11 +355,7 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
void OnVideoAverageKeyframeDistanceUpdate() override;
void OnAudioDecoderChange(const std::string& name) override;
void OnVideoDecoderChange(const std::string& name) override;
-
- // When we lose the context_provider, we destroy the CompositorFrameSink to
- // prevent frames from being submitted. The current surface_ids become
- // invalid.
- void OnFrameSinkDestroyed();
+ void OnRemotePlayStateChange(MediaStatus::State state) override;
// Actually seek. Avoids causing |should_notify_time_changed_| to be set when
// |time_updated| is false.
@@ -735,7 +729,8 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
//
// |demuxer_| will contain the appropriate demuxer based on which resource
// load strategy we're using.
- std::unique_ptr<MultibufferDataSource> data_source_;
+ MultibufferDataSource* mb_data_source_ = nullptr;
+ std::unique_ptr<DataSource> data_source_;
std::unique_ptr<Demuxer> demuxer_;
ChunkDemuxer* chunk_demuxer_ = nullptr;
@@ -975,6 +970,17 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerImpl
// True if StartPipeline() completed a lazy load startup.
bool did_lazy_load_ = false;
+ // Whether the renderer should automatically suspend media playback in
+ // background tabs.
+ bool is_background_suspend_enabled_ = false;
+
+ // If disabled, video will be auto paused when in background. Affects the
+ // value of ShouldPauseVideoWhenHidden().
+ bool is_background_video_playback_enabled_ = true;
+
+ // Whether background video optimization is supported on current platform.
+ bool is_background_video_track_optimization_supported_ = true;
+
DISALLOW_COPY_AND_ASSIGN(WebMediaPlayerImpl);
};
diff --git a/chromium/media/blink/webmediaplayer_impl_unittest.cc b/chromium/media/blink/webmediaplayer_impl_unittest.cc
index 8c5eb9b6e94..a3a7f8d3e2d 100644
--- a/chromium/media/blink/webmediaplayer_impl_unittest.cc
+++ b/chromium/media/blink/webmediaplayer_impl_unittest.cc
@@ -46,9 +46,11 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/common/picture_in_picture/picture_in_picture_control_info.h"
+#include "third_party/blink/public/mojom/frame/document_interface_broker.mojom.h"
#include "third_party/blink/public/platform/web_fullscreen_video_status.h"
#include "third_party/blink/public/platform/web_media_player.h"
#include "third_party/blink/public/platform/web_media_player_client.h"
+#include "third_party/blink/public/platform/web_media_player_encrypted_media_client.h"
#include "third_party/blink/public/platform/web_media_player_source.h"
#include "third_party/blink/public/platform/web_security_origin.h"
#include "third_party/blink/public/platform/web_size.h"
@@ -65,8 +67,10 @@
#include "media/blink/renderer_media_player_interface.h"
#endif
+using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Eq;
+using ::testing::Gt;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::NiceMock;
@@ -74,12 +78,12 @@ using ::testing::NotNull;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::StrictMock;
-using ::testing::_;
namespace media {
constexpr char kAudioOnlyTestFile[] = "sfx-opus-441.webm";
constexpr char kVideoOnlyTestFile[] = "bear-320x240-video-only.webm";
+constexpr char kEncryptedVideoOnlyTestFile[] = "bear-320x240-av_enc-v.webm";
MATCHER(WmpiDestroyed, "") {
return CONTAINS_STRING(arg, "WEBMEDIAPLAYER_DESTROYED {}");
@@ -91,6 +95,12 @@ MATCHER_P2(PlaybackRateChanged, old_rate_string, new_rate_string, "") {
std::string(new_rate_string));
}
+// returns a valid handle that can be passed to WebLocalFrame constructor
+mojo::ScopedMessagePipeHandle CreateStubDocumentInterfaceBrokerHandle() {
+ blink::mojom::DocumentInterfaceBrokerPtrInfo info;
+ return mojo::MakeRequest(&info).PassMessagePipe();
+}
+
#if defined(OS_ANDROID)
class MockRendererMediaPlayerManager
: public RendererMediaPlayerManagerInterface {
@@ -183,6 +193,22 @@ class MockWebMediaPlayerClient : public blink::WebMediaPlayerClient {
DISALLOW_COPY_AND_ASSIGN(MockWebMediaPlayerClient);
};
+class MockWebMediaPlayerEncryptedMediaClient
+ : public blink::WebMediaPlayerEncryptedMediaClient {
+ public:
+ MockWebMediaPlayerEncryptedMediaClient() = default;
+
+ MOCK_METHOD3(Encrypted,
+ void(blink::WebEncryptedMediaInitDataType,
+ const unsigned char*,
+ unsigned));
+ MOCK_METHOD0(DidBlockPlaybackWaitingForKey, void());
+ MOCK_METHOD0(DidResumePlaybackBlockedForKey, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockWebMediaPlayerEncryptedMediaClient);
+};
+
class MockWebMediaPlayerDelegate : public WebMediaPlayerDelegate {
public:
MockWebMediaPlayerDelegate() = default;
@@ -259,10 +285,6 @@ class MockWebMediaPlayerDelegate : public WebMediaPlayerDelegate {
bool IsFrameClosed() override { return is_closed_; }
- bool IsBackgroundMediaSuspendEnabled() override {
- return is_background_media_suspend_enabled_;
- }
-
void SetIdleForTesting(bool is_idle) { is_idle_ = is_idle; }
void SetStaleForTesting(bool is_stale) {
@@ -284,10 +306,6 @@ class MockWebMediaPlayerDelegate : public WebMediaPlayerDelegate {
void SetFrameClosedForTesting(bool is_closed) { is_closed_ = is_closed; }
- void SetBackgroundMediaSuspendEnabledForTesting(bool enable) {
- is_background_media_suspend_enabled_ = enable;
- }
-
int player_id() { return player_id_; }
private:
@@ -297,7 +315,6 @@ class MockWebMediaPlayerDelegate : public WebMediaPlayerDelegate {
bool is_stale_ = false;
bool is_hidden_ = false;
bool is_closed_ = false;
- bool is_background_media_suspend_enabled_ = false;
};
class MockSurfaceLayerBridge : public blink::WebSurfaceLayerBridge {
@@ -309,6 +326,7 @@ class MockSurfaceLayerBridge : public blink::WebSurfaceLayerBridge {
MOCK_METHOD0(ClearSurfaceId, void());
MOCK_METHOD1(SetContentsOpaque, void(bool));
MOCK_METHOD0(CreateSurfaceLayer, void());
+ MOCK_METHOD0(ClearObserver, void());
};
class MockVideoFrameCompositor : public VideoFrameCompositor {
@@ -320,30 +338,27 @@ class MockVideoFrameCompositor : public VideoFrameCompositor {
// MOCK_METHOD doesn't like OnceCallback.
void SetOnNewProcessedFrameCallback(OnNewProcessedFrameCB cb) override {}
+ MOCK_METHOD1(SetIsPageVisible, void(bool));
MOCK_METHOD0(GetCurrentFrameAndUpdateIfStale, scoped_refptr<VideoFrame>());
- MOCK_METHOD6(EnableSubmission,
- void(const viz::SurfaceId&,
- base::TimeTicks,
- media::VideoRotation,
- bool,
- bool,
- blink::WebFrameSinkDestroyedCallback));
+ MOCK_METHOD4(
+ EnableSubmission,
+ void(const viz::SurfaceId&, base::TimeTicks, media::VideoRotation, bool));
};
class WebMediaPlayerImplTest : public testing::Test {
public:
WebMediaPlayerImplTest()
: media_thread_("MediaThreadForTest"),
- web_view_(
- blink::WebView::Create(/*client=*/nullptr,
- /*widget_client=*/nullptr,
- blink::mojom::PageVisibilityState::kVisible,
- nullptr)),
- web_local_frame_(
- blink::WebLocalFrame::CreateMainFrame(web_view_,
- &web_frame_client_,
- nullptr,
- nullptr)),
+ web_view_(blink::WebView::Create(/*client=*/nullptr,
+ /*is_hidden=*/false,
+ /*compositing_enabled=*/false,
+ nullptr)),
+ web_local_frame_(blink::WebLocalFrame::CreateMainFrame(
+ web_view_,
+ &web_frame_client_,
+ nullptr,
+ CreateStubDocumentInterfaceBrokerHandle(),
+ nullptr)),
context_provider_(viz::TestContextProvider::Create()),
audio_parameters_(TestAudioParameters::Normal()) {
media_thread_.StartAndWaitForTesting();
@@ -398,14 +413,16 @@ class WebMediaPlayerImplTest : public testing::Test {
viz::TestContextProvider::Create(),
base::FeatureList::IsEnabled(media::kUseSurfaceLayerForVideo)
? blink::WebMediaPlayer::SurfaceLayerMode::kAlways
- : blink::WebMediaPlayer::SurfaceLayerMode::kNever);
+ : blink::WebMediaPlayer::SurfaceLayerMode::kNever,
+ is_background_suspend_enabled_, is_background_video_playback_enabled_,
+ true);
auto compositor = std::make_unique<StrictMock<MockVideoFrameCompositor>>(
params->video_frame_compositor_task_runner());
compositor_ = compositor.get();
wmpi_ = std::make_unique<WebMediaPlayerImpl>(
- web_local_frame_, &client_, nullptr, &delegate_,
+ web_local_frame_, &client_, &encrypted_client_, &delegate_,
std::move(factory_selector), url_index_.get(), std::move(compositor),
std::move(params));
@@ -417,6 +434,7 @@ class WebMediaPlayerImplTest : public testing::Test {
~WebMediaPlayerImplTest() override {
EXPECT_CALL(client_, SetCcLayer(nullptr));
EXPECT_CALL(client_, MediaRemotingStopped(_));
+
// Destruct WebMediaPlayerImpl and pump the message loop to ensure that
// objects passed to the message loop for destruction are released.
//
@@ -424,7 +442,7 @@ class WebMediaPlayerImplTest : public testing::Test {
// destructed since WMPI may reference them during destruction.
wmpi_.reset();
- base::RunLoop().RunUntilIdle();
+ CycleThreads();
web_view_->MainFrameWidget()->Close();
}
@@ -495,6 +513,8 @@ class WebMediaPlayerImplTest : public testing::Test {
void OnMetadata(PipelineMetadata metadata) { wmpi_->OnMetadata(metadata); }
+ void OnWaiting(WaitingReason reason) { wmpi_->OnWaiting(reason); }
+
void OnVideoNaturalSizeChange(const gfx::Size& size) {
wmpi_->OnVideoNaturalSizeChange(size);
}
@@ -556,7 +576,11 @@ class WebMediaPlayerImplTest : public testing::Test {
}
void SetUpMediaSuspend(bool enable) {
- delegate_.SetBackgroundMediaSuspendEnabledForTesting(enable);
+ is_background_suspend_enabled_ = enable;
+ }
+
+ void SetUpBackgroundVideoPlayback(bool enable) {
+ is_background_video_playback_enabled_ = enable;
}
bool IsVideoLockedWhenPausedWhenHidden() const {
@@ -564,12 +588,14 @@ class WebMediaPlayerImplTest : public testing::Test {
}
void BackgroundPlayer() {
+ EXPECT_CALL(*compositor_, SetIsPageVisible(false));
delegate_.SetFrameHiddenForTesting(true);
delegate_.SetFrameClosedForTesting(false);
wmpi_->OnFrameHidden();
}
void ForegroundPlayer() {
+ EXPECT_CALL(*compositor_, SetIsPageVisible(true));
delegate_.SetFrameHiddenForTesting(false);
delegate_.SetFrameClosedForTesting(false);
wmpi_->OnFrameShown();
@@ -603,7 +629,7 @@ class WebMediaPlayerImplTest : public testing::Test {
return wmpi_->pipeline_metadata_.natural_size;
}
- void LoadAndWaitForMetadata(std::string data_file) {
+ void Load(std::string data_file) {
// URL doesn't matter, it's value is unknown to the underlying demuxer.
const GURL kTestURL("file://example.com/sample.webm");
@@ -644,6 +670,10 @@ class WebMediaPlayerImplTest : public testing::Test {
client->DidReceiveData(reinterpret_cast<const char*>(data->data()),
data->data_size());
client->DidFinishLoading();
+ }
+
+ void LoadAndWaitForMetadata(std::string data_file) {
+ Load(data_file);
// This runs until we reach the have current data state. Attempting to wait
// for states < kReadyStateHaveCurrentData is unreliable due to asynchronous
@@ -669,13 +699,8 @@ class WebMediaPlayerImplTest : public testing::Test {
// Ensure any tasks waiting to be posted to the media thread are posted.
base::RunLoop().RunUntilIdle();
- // Cycle media thread.
- {
- base::RunLoop loop;
- media_thread_.task_runner()->PostTaskAndReply(
- FROM_HERE, base::DoNothing(), loop.QuitClosure());
- loop.Run();
- }
+ // Flush all media tasks.
+ media_thread_.FlushForTesting();
// Cycle anything that was posted back from the media thread.
base::RunLoop().RunUntilIdle();
@@ -700,8 +725,12 @@ class WebMediaPlayerImplTest : public testing::Test {
// Audio hardware configuration.
AudioParameters audio_parameters_;
+ bool is_background_suspend_enabled_ = false;
+ bool is_background_video_playback_enabled_ = true;
+
// The client interface used by |wmpi_|.
NiceMock<MockWebMediaPlayerClient> client_;
+ MockWebMediaPlayerEncryptedMediaClient encrypted_client_;
#if defined(OS_ANDROID)
NiceMock<MockRendererMediaPlayerManager> mock_media_player_manager_;
@@ -756,6 +785,89 @@ TEST_F(WebMediaPlayerImplTest, LoadAndDestroy) {
EXPECT_GT(reported_memory_ - data_source_size, 0);
}
+// Verify LoadAndWaitForMetadata() functions without issue.
+TEST_F(WebMediaPlayerImplTest, LoadAndDestroyDataUrl) {
+ InitializeWebMediaPlayerImpl();
+ EXPECT_FALSE(IsSuspended());
+ wmpi_->SetPreload(blink::WebMediaPlayer::kPreloadAuto);
+
+ const GURL kMp3DataUrl(
+ "data://audio/mp3;base64,SUQzAwAAAAAAFlRFTkMAAAAMAAAAQW1hZGV1cyBQcm//"
+ "+5DEAAAAAAAAAAAAAAAAAAAAAABYaW5nAAAADwAAAAwAAAftABwcHBwcHBwcMTExMTExMTFG"
+ "RkZGRkZGRlpaWlpaWlpaWm9vb29vb29vhISEhISEhISYmJiYmJiYmJitra2tra2trcLCwsLC"
+ "wsLC3t7e3t7e3t7e7+/v7+/v7+///////////"
+ "wAAADxMQU1FMy45OHIErwAAAAAudQAANCAkCK9BAAHMAAAHbZV/"
+ "jdYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD/"
+ "+0DEAAABcAOH1AAAIhcbaLc80EgAAAAAAPAAAP/"
+ "0QfAAQdBwGBwf8AwGAgAAMBYxgxBgaTANj4NBIJgwVX+"
+ "jXKCAMFgC8LgBGBmB3KTAhAT8wIQFjATAWhyLf4TUFdHcW4WkdwwxdMT3EaJEeo4UknR8dww"
+ "wlxIj1RZJJ98S0khhhhiaPX/8LqO4YYS4kRhRhf/8nD2HsYj1HqZF4vf9YKiIKgqIlQAMA3/"
+ "kQAMHsToyV0cDdv/"
+ "7IMQHgEgUaSld4QAg1A0lde4cXDskP7w0MysiKzEUARMCEBQwLQPAwC8VADBwCsOwF+v///"
+ "6ydVW3tR1HNzg22xv+3Z9gAAOgA//"
+ "pg1gxGG0G6aJdDp5LCgnFycZmDJi0ADQhRrZGzKGQAqP//3t3Xe3pUv19yF6v7FIAAiMAb/"
+ "3/"
+ "+yDEAwBGpBsprn9gIN0NZOn9lFyAAGa1QaI6ZhLqtGY3QFgnJ4BlymYWTBYNQ4LcX88rfX/"
+ "1Yu+8WKLoSm09u7Fd1QADgbfwwBECUMBpB+TDDGAUySsMLO80jP18xowMNGTBgotYkm3gPv/"
+ "/6P1v2pspRShZJjXgT7V1AAAoAG/9//"
+ "sgxAMCxzRpKa9k5KDODOUR7ihciAAsEwYdoVZqATrn1uJSYowIBg9gKn0MboJlBF3Fh4YAfX"
+ "//9+52v6qhZt7o244rX/JfRoADB+B5MPsQ401sRj4pGKOeGUzuJDGwHEhUhAvBuMNAM1b//"
+ "t9kSl70NlDrbJecU/t99aoAACMAD//7IMQCggY0Gymuf2Ag7A0k9f2UXPwAAGaFSZ/"
+ "7BhFSu4Yy2FjHCYZlKoYQTiEMTLaGxV5nNu/8UddjmtWbl6r/SYAN/pAADACAI8wHQHwMM/"
+ "XrDJuAv48nRNEXDHS8w4YMJCy0aSDbgm3//26S0noiIgkPfZn1Sa9V16dNAAAgAA//"
+ "+yDEAoBHCGkpr2SkoMgDZXW/cAT4iAA8FEYeASxqGx/H20IYYpYHJg+AHH2GbgBlgl/"
+ "1yQ2AFP///YpK32okeasc/f/+xXsAAJ1AA/"
+ "9Ntaj1Pc0K7Yzw6FrOHlozEHzFYEEg6NANZbIn9a8p//j7HC6VvlmStt3o+pUAACMADfyA//"
+ "sgxAOCRkwbKa5/YCDUDWU1/ZxcAGZVQZ27Zg/KweYuMFmm74hkSqYKUCINS0ZoxZ5XOv/"
+ "8X7EgE4lCZDu7fc4AN/6BQHQwG0GpMMAUczI/wpM7iuM9TTGCQwsRMEBi8Cl7yAnv//"
+ "2+belL59SGkk1ENqvyagAAKAAP/aAAEBGmGv/"
+ "7IMQGAobYaSuvcOLgzA1lNe4cXGDeaOzj56RhnnIBMZrA4GMAKF4GBCJjK4gC+v///"
+ "uh3b1WWRQNv2e/syS7ABAADCACBMPUSw0sNqj23G4OZHMzmKjGgLDBMkAzxpMNAE1b///"
+ "od72VdCOtlpw1/764AAhwAf/0AAGUkeZb0Bgz/"
+ "+yDEB4CGMBsrrn9gINgNJXX9qFxCcAYkOE7GsVJi6QBCEZCEEav2owqE3f4+KbGKLWKN29/"
+ "YsAAC0AUAARAL5gMgLQYWGjRGQkBGh1MmZseGKjpgwUYCBoprUgcDlG//7372tX0y/"
+ "zl33dN2ugIf/yIADoERhDlqm9CtAfsRzhlK//"
+ "tAxAoAB7RpKPXhACHRkia3PPAAEkGL4EUFgCTA3BTMDkAcEgMgoCeefz/////"
+ "oxOy73ryRx97nI2//YryIAhX0mveu/"
+ "3tEgAAAABh2nnnBAAOYOK6ZtxB4mEYkiaDwX5gzgHGAkAUYGwB0kMGQFaKGBEAwDgHAUAcvP"
+ "KwDfJeHEGqcMk3iN5blKocU8c6FA4FxhTqXf/OtXzv37ErkOYWXP/"
+ "93kTV91+YNo3Lh8ECwliUABv7/"
+ "+xDEAYPIREMrXcMAKAAAP8AAAARfwAADHinN1RU5NKTjkHN1Mc08dTJQjL4GBwgYEAK/"
+ "X2a8/1qZjMtcFCUTiSXmteUeFNBWIqEKCioLiKyO10VVTEFNRTMuOTguMlVVVVVVVVVVVf/"
+ "7EMQJg8AAAaQAAAAgAAA0gAAABFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV"
+ "VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVEFHAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAAAAAAAAAAAAP8=");
+
+ wmpi_->Load(blink::WebMediaPlayer::kLoadTypeURL,
+ blink::WebMediaPlayerSource(blink::WebURL(kMp3DataUrl)),
+ blink::WebMediaPlayer::kCorsModeUnspecified);
+
+ base::RunLoop().RunUntilIdle();
+
+ // This runs until we reach the have current data state. Attempting to wait
+ // for states < kReadyStateHaveCurrentData is unreliable due to asynchronous
+ // execution of tasks on the base::test:ScopedTaskEnvironment.
+ while (wmpi_->GetReadyState() <
+ blink::WebMediaPlayer::kReadyStateHaveCurrentData) {
+ base::RunLoop loop;
+ EXPECT_CALL(client_, ReadyStateChanged())
+ .WillRepeatedly(RunClosure(loop.QuitClosure()));
+ loop.Run();
+
+ // Clear the mock so it doesn't have a stale QuitClosure.
+ testing::Mock::VerifyAndClearExpectations(&client_);
+ }
+
+ EXPECT_FALSE(IsSuspended());
+ CycleThreads();
+}
+
// Verify that preload=metadata suspend works properly.
TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspend) {
InitializeWebMediaPlayerImpl();
@@ -791,7 +903,7 @@ TEST_F(WebMediaPlayerImplTest, LazyLoadPreloadMetadataSuspend) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
}
@@ -823,7 +935,7 @@ TEST_F(WebMediaPlayerImplTest, LoadPreloadMetadataSuspendNoVideoMemoryUsage) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
}
@@ -1253,13 +1365,45 @@ TEST_F(WebMediaPlayerImplTest, NoStreams) {
if (base::FeatureList::IsEnabled(media::kUseSurfaceLayerForVideo)) {
EXPECT_CALL(*surface_layer_bridge_ptr_, CreateSurfaceLayer()).Times(0);
EXPECT_CALL(*surface_layer_bridge_ptr_, GetSurfaceId()).Times(0);
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, _, _)).Times(0);
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _)).Times(0);
}
// Nothing should happen. In particular, no assertions should fail.
OnMetadata(metadata);
}
+// TODO(xhwang): Use MockCdm in encrypted media related tests.
+
+TEST_F(WebMediaPlayerImplTest, Encrypted) {
+ InitializeWebMediaPlayerImpl();
+
+ base::RunLoop loop;
+ EXPECT_CALL(
+ encrypted_client_,
+ Encrypted(blink::WebEncryptedMediaInitDataType::kWebm, NotNull(), Gt(0u)))
+ .WillOnce(RunClosure(loop.QuitClosure()));
+
+ // Cannot wait for metadata since we don't have a CDM and pipeline
+ // initialization will stall waiting for a CDM to be set. But Encrypted()
+ // should still be called.
+ Load(kEncryptedVideoOnlyTestFile);
+
+ loop.Run();
+}
+
+TEST_F(WebMediaPlayerImplTest, Waiting_NoDecryptionKey) {
+ InitializeWebMediaPlayerImpl();
+
+ // Use non-encrypted file here since we don't have a CDM. Otherwise pipeline
+ // initialization will stall waiting for a CDM to be set.
+ LoadAndWaitForMetadata(kVideoOnlyTestFile);
+
+ EXPECT_CALL(encrypted_client_, DidBlockPlaybackWaitingForKey());
+ EXPECT_CALL(encrypted_client_, DidResumePlaybackBlockedForKey());
+
+ OnWaiting(WaitingReason::kNoDecryptionKey);
+}
+
TEST_F(WebMediaPlayerImplTest, NaturalSizeChange) {
InitializeWebMediaPlayerImpl();
PipelineMetadata metadata;
@@ -1274,7 +1418,7 @@ TEST_F(WebMediaPlayerImplTest, NaturalSizeChange) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
} else {
EXPECT_CALL(client_, SetCcLayer(NotNull()));
@@ -1303,7 +1447,7 @@ TEST_F(WebMediaPlayerImplTest, NaturalSizeChange_Rotated) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
} else {
EXPECT_CALL(client_, SetCcLayer(NotNull()));
@@ -1333,7 +1477,7 @@ TEST_F(WebMediaPlayerImplTest, VideoLockedWhenPausedWhenHidden) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
} else {
EXPECT_CALL(client_, SetCcLayer(NotNull()));
@@ -1411,7 +1555,7 @@ TEST_F(WebMediaPlayerImplTest, InfiniteDuration) {
.WillOnce(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
} else {
EXPECT_CALL(client_, SetCcLayer(NotNull()));
@@ -1452,7 +1596,7 @@ TEST_F(WebMediaPlayerImplTest, SetContentsLayerGetsWebLayerFromBridge) {
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillOnce(Return(base::TimeTicks()));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
// We only call the callback to create the bridge in OnMetadata, so we need
// to call it.
@@ -1465,6 +1609,8 @@ TEST_F(WebMediaPlayerImplTest, SetContentsLayerGetsWebLayerFromBridge) {
EXPECT_CALL(client_, SetCcLayer(Eq(layer.get())));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
wmpi_->RegisterContentsLayer(layer.get());
+
+ EXPECT_CALL(*surface_layer_bridge_ptr_, ClearObserver());
}
TEST_F(WebMediaPlayerImplTest, PlaybackRateChangeMediaLogs) {
@@ -1495,7 +1641,7 @@ TEST_F(WebMediaPlayerImplTest, PictureInPictureTriggerCallback) {
.WillRepeatedly(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillRepeatedly(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
PipelineMetadata metadata;
@@ -1522,6 +1668,7 @@ TEST_F(WebMediaPlayerImplTest, PictureInPictureTriggerCallback) {
// Updating SurfaceId should NOT exit Picture-in-Picture.
EXPECT_CALL(delegate_, DidPictureInPictureModeEnd(delegate_.player_id(), _))
.Times(0);
+ EXPECT_CALL(*surface_layer_bridge_ptr_, ClearObserver());
}
// Tests delegate methods are called with the appropriate play/pause button
@@ -1539,7 +1686,7 @@ TEST_F(WebMediaPlayerImplTest,
.WillRepeatedly(ReturnRef(surface_id_));
EXPECT_CALL(*surface_layer_bridge_ptr_, GetLocalSurfaceIdAllocationTime())
.WillRepeatedly(Return(base::TimeTicks()));
- EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _, false, _));
+ EXPECT_CALL(*compositor_, EnableSubmission(_, _, _, _));
EXPECT_CALL(*surface_layer_bridge_ptr_, SetContentsOpaque(false));
PipelineMetadata metadata;
@@ -1566,12 +1713,13 @@ TEST_F(WebMediaPlayerImplTest,
// Updating SurfaceId should NOT exit Picture-in-Picture.
EXPECT_CALL(delegate_, DidPictureInPictureModeEnd(delegate_.player_id(), _))
.Times(0);
+ EXPECT_CALL(*surface_layer_bridge_ptr_, ClearObserver());
}
class WebMediaPlayerImplBackgroundBehaviorTest
: public WebMediaPlayerImplTest,
public ::testing::WithParamInterface<
- std::tuple<bool, bool, int, int, bool, bool, bool, bool>> {
+ std::tuple<bool, bool, int, int, bool, bool, bool, bool, bool>> {
public:
// Indices of the tuple parameters.
static const int kIsMediaSuspendEnabled = 0;
@@ -1582,10 +1730,12 @@ class WebMediaPlayerImplBackgroundBehaviorTest
static const int kIsMediaSource = 5;
static const int kIsBackgroundPauseEnabled = 6;
static const int kIsPictureInPictureEnabled = 7;
+ static const int kIsBackgroundVideoPlaybackEnabled = 8;
void SetUp() override {
WebMediaPlayerImplTest::SetUp();
SetUpMediaSuspend(IsMediaSuspendOn());
+ SetUpBackgroundVideoPlayback(IsBackgroundVideoPlaybackEnabled());
std::string enabled_features;
std::string disabled_features;
@@ -1662,6 +1812,10 @@ class WebMediaPlayerImplBackgroundBehaviorTest
return std::get<kIsPictureInPictureEnabled>(GetParam());
}
+ bool IsBackgroundVideoPlaybackEnabled() {
+ return std::get<kIsBackgroundVideoPlaybackEnabled>(GetParam());
+ }
+
int GetDurationSec() const { return std::get<kDurationSec>(GetParam()); }
int GetAverageKeyframeDistanceSec() const {
@@ -1701,7 +1855,8 @@ TEST_P(WebMediaPlayerImplBackgroundBehaviorTest, AudioOnly) {
// Never optimize or pause an audio-only player.
SetMetadata(true, false);
EXPECT_FALSE(IsBackgroundOptimizationCandidate());
- EXPECT_FALSE(ShouldPauseVideoWhenHidden());
+ EXPECT_FALSE(IsBackgroundVideoPlaybackEnabled() &&
+ ShouldPauseVideoWhenHidden());
EXPECT_FALSE(ShouldDisableVideoWhenHidden());
}
@@ -1718,8 +1873,9 @@ TEST_P(WebMediaPlayerImplBackgroundBehaviorTest, VideoOnly) {
// Video is always paused when suspension is on and only if matches the
// optimization criteria if the optimization is on.
- bool should_pause =
- IsMediaSuspendOn() || (IsBackgroundPauseOn() && matches_requirements);
+ bool should_pause = !IsBackgroundVideoPlaybackEnabled() ||
+ IsMediaSuspendOn() ||
+ (IsBackgroundPauseOn() && matches_requirements);
EXPECT_EQ(should_pause, ShouldPauseVideoWhenHidden());
}
@@ -1737,8 +1893,11 @@ TEST_P(WebMediaPlayerImplBackgroundBehaviorTest, AudioVideo) {
ShouldDisableVideoWhenHidden());
// Only pause audible videos if both media suspend and resume background
- // videos is on. Both are on by default on Android and off on desktop.
- EXPECT_EQ(IsMediaSuspendOn() && IsResumeBackgroundVideoEnabled(),
+ // videos is on and background video playback is disabled. Background video
+ // playback is enabled by default. Both media suspend and resume background
+ // videos are on by default on Android and off on desktop.
+ EXPECT_EQ(!IsBackgroundVideoPlaybackEnabled() ||
+ (IsMediaSuspendOn() && IsResumeBackgroundVideoEnabled()),
ShouldPauseVideoWhenHidden());
if (!IsBackgroundOptimizationOn() || !matches_requirements ||
@@ -1781,6 +1940,7 @@ INSTANTIATE_TEST_CASE_P(
::testing::Bool(),
::testing::Bool(),
::testing::Bool(),
+ ::testing::Bool(),
::testing::Bool()));
} // namespace media
diff --git a/chromium/media/blink/webmediaplayer_params.cc b/chromium/media/blink/webmediaplayer_params.cc
index 25385207f49..9a4b5caf6ff 100644
--- a/chromium/media/blink/webmediaplayer_params.cc
+++ b/chromium/media/blink/webmediaplayer_params.cc
@@ -28,7 +28,10 @@ WebMediaPlayerParams::WebMediaPlayerParams(
mojom::MediaMetricsProviderPtr metrics_provider,
CreateSurfaceLayerBridgeCB create_bridge_callback,
scoped_refptr<viz::ContextProvider> context_provider,
- blink::WebMediaPlayer::SurfaceLayerMode use_surface_layer_for_video)
+ blink::WebMediaPlayer::SurfaceLayerMode use_surface_layer_for_video,
+ bool is_background_suspend_enabled,
+ bool is_background_video_playback_enabled,
+ bool is_background_video_track_optimization_supported)
: defer_load_cb_(defer_load_cb),
audio_renderer_sink_(audio_renderer_sink),
media_log_(std::move(media_log)),
@@ -45,7 +48,12 @@ WebMediaPlayerParams::WebMediaPlayerParams(
metrics_provider_(std::move(metrics_provider)),
create_bridge_callback_(std::move(create_bridge_callback)),
context_provider_(std::move(context_provider)),
- use_surface_layer_for_video_(use_surface_layer_for_video) {}
+ use_surface_layer_for_video_(use_surface_layer_for_video),
+ is_background_suspend_enabled_(is_background_suspend_enabled),
+ is_background_video_playback_enabled_(
+ is_background_video_playback_enabled),
+ is_background_video_track_optimization_supported_(
+ is_background_video_track_optimization_supported) {}
WebMediaPlayerParams::~WebMediaPlayerParams() = default;
diff --git a/chromium/media/blink/webmediaplayer_params.h b/chromium/media/blink/webmediaplayer_params.h
index 73b3d0dd774..f88979c209f 100644
--- a/chromium/media/blink/webmediaplayer_params.h
+++ b/chromium/media/blink/webmediaplayer_params.h
@@ -87,7 +87,10 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerParams {
mojom::MediaMetricsProviderPtr metrics_provider,
CreateSurfaceLayerBridgeCB bridge_callback,
scoped_refptr<viz::ContextProvider> context_provider,
- blink::WebMediaPlayer::SurfaceLayerMode use_surface_layer_for_video);
+ blink::WebMediaPlayer::SurfaceLayerMode use_surface_layer_for_video,
+ bool is_background_suspend_enabled,
+ bool is_background_video_play_enabled,
+ bool is_background_video_track_optimization_supported);
~WebMediaPlayerParams();
@@ -158,6 +161,18 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerParams {
return use_surface_layer_for_video_;
}
+ bool IsBackgroundSuspendEnabled() const {
+ return is_background_suspend_enabled_;
+ }
+
+ bool IsBackgroundVideoPlaybackEnabled() const {
+ return is_background_video_playback_enabled_;
+ }
+
+ bool IsBackgroundVideoTrackOptimizationSupported() const {
+ return is_background_video_track_optimization_supported_;
+ }
+
private:
DeferLoadCB defer_load_cb_;
scoped_refptr<SwitchableAudioRendererSink> audio_renderer_sink_;
@@ -179,6 +194,14 @@ class MEDIA_BLINK_EXPORT WebMediaPlayerParams {
scoped_refptr<viz::ContextProvider> context_provider_;
blink::WebMediaPlayer::SurfaceLayerMode use_surface_layer_for_video_;
+ // Whether the renderer should automatically suspend media playback in
+ // background tabs.
+ bool is_background_suspend_enabled_ = false;
+ // Whether the renderer is allowed to play video in background tabs.
+ bool is_background_video_playback_enabled_ = true;
+ // Whether background video optimization is supported on current platform.
+ bool is_background_video_track_optimization_supported_ = true;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMediaPlayerParams);
};
diff --git a/chromium/media/blink/websourcebuffer_impl.cc b/chromium/media/blink/websourcebuffer_impl.cc
index ac7ae84132f..2c656781b74 100644
--- a/chromium/media/blink/websourcebuffer_impl.cc
+++ b/chromium/media/blink/websourcebuffer_impl.cc
@@ -75,10 +75,7 @@ WebSourceBufferImpl::WebSourceBufferImpl(const std::string& id,
base::Unretained(this)));
}
-WebSourceBufferImpl::~WebSourceBufferImpl() {
- DCHECK(!demuxer_) << "Object destroyed w/o removedFromMediaSource() call";
- DCHECK(!client_);
-}
+WebSourceBufferImpl::~WebSourceBufferImpl() = default;
void WebSourceBufferImpl::SetClient(blink::WebSourceBufferClient* client) {
DCHECK(client);
diff --git a/chromium/media/capabilities/BUILD.gn b/chromium/media/capabilities/BUILD.gn
index 9dcfb7dcdf6..31066aa3639 100644
--- a/chromium/media/capabilities/BUILD.gn
+++ b/chromium/media/capabilities/BUILD.gn
@@ -22,6 +22,8 @@ source_set("capabilities") {
"bucket_utility.h",
"in_memory_video_decode_stats_db_impl.cc",
"in_memory_video_decode_stats_db_impl.h",
+ "learning_helper.cc",
+ "learning_helper.h",
"video_decode_stats_db.cc",
"video_decode_stats_db.h",
"video_decode_stats_db_impl.cc",
@@ -38,6 +40,11 @@ source_set("capabilities") {
"//ui/gfx/geometry",
]
+ deps = [
+ "//media/learning/common",
+ "//media/learning/impl",
+ ]
+
configs += [ "//media:subcomponent_config" ]
}
diff --git a/chromium/media/capabilities/in_memory_video_decode_stats_db_impl.h b/chromium/media/capabilities/in_memory_video_decode_stats_db_impl.h
index 746b78a4593..6cf05257154 100644
--- a/chromium/media/capabilities/in_memory_video_decode_stats_db_impl.h
+++ b/chromium/media/capabilities/in_memory_video_decode_stats_db_impl.h
@@ -10,7 +10,7 @@
#include "base/files/file_path.h"
#include "base/memory/weak_ptr.h"
-#include "components/leveldb_proto/proto_database.h"
+#include "components/leveldb_proto/public/proto_database.h"
#include "media/base/media_export.h"
#include "media/base/video_codecs.h"
#include "media/capabilities/video_decode_stats_db.h"
diff --git a/chromium/media/capabilities/learning_helper.cc b/chromium/media/capabilities/learning_helper.cc
new file mode 100644
index 00000000000..a7c0634cf07
--- /dev/null
+++ b/chromium/media/capabilities/learning_helper.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capabilities/learning_helper.h"
+
+#include "media/learning/common/learning_task.h"
+
+namespace media {
+
+using learning::FeatureValue;
+using learning::LabelledExample;
+using learning::LearningTask;
+using learning::TargetValue;
+
+const char* kDroppedFrameRatioTaskName = "DroppedFrameRatioTask";
+
+LearningHelper::LearningHelper() {
+ // Register a few learning tasks.
+ //
+ // We only do this here since we own the session. Normally, whatever creates
+ // the session would register all the learning tasks.
+ LearningTask dropped_frame_task(
+ kDroppedFrameRatioTaskName, LearningTask::Model::kExtraTrees,
+ {
+ {"codec_profile",
+ ::media::learning::LearningTask::Ordering::kUnordered},
+ {"width", ::media::learning::LearningTask::Ordering::kNumeric},
+ {"height", ::media::learning::LearningTask::Ordering::kNumeric},
+ {"frame_rate", ::media::learning::LearningTask::Ordering::kNumeric},
+ },
+ LearningTask::ValueDescription(
+ {"dropped_ratio", LearningTask::Ordering::kNumeric}));
+ // Enable hacky reporting of accuracy.
+ dropped_frame_task.uma_hacky_confusion_matrix =
+ "Media.Learning.MediaCapabilities.DroppedFrameRatioTask.BaseTree";
+ learning_session_.RegisterTask(dropped_frame_task);
+}
+
+LearningHelper::~LearningHelper() = default;
+
+void LearningHelper::AppendStats(
+ const VideoDecodeStatsDB::VideoDescKey& video_key,
+ const VideoDecodeStatsDB::DecodeStatsEntry& new_stats) {
+ // If no frames were recorded, then do nothing.
+ if (new_stats.frames_decoded == 0)
+ return;
+
+ // Sanity.
+ if (new_stats.frames_dropped > new_stats.frames_decoded)
+ return;
+
+ // Add a training example for |new_stats|.
+ LabelledExample example;
+
+ // Extract features from |video_key|.
+ example.features.push_back(FeatureValue(video_key.codec_profile));
+ example.features.push_back(FeatureValue(video_key.size.width()));
+ example.features.push_back(FeatureValue(video_key.size.height()));
+ example.features.push_back(FeatureValue(video_key.frame_rate));
+ // TODO(liberato): Other features?
+
+ // Record the ratio of dropped frames to non-dropped frames. Weight this
+ // example by the total number of frames, since we want to predict the
+ // aggregate dropped frames ratio. That lets us compare with the current
+ // implementation directly.
+ //
+ // It's also not clear that we want to do this; we might want to weight each
+ // playback equally and predict the dropped frame ratio. For example, if
+ // there is a dependence on video length, then it's unclear that weighting
+ // the examples is the right thing to do.
+ example.target_value = TargetValue(
+ static_cast<double>(new_stats.frames_dropped) / new_stats.frames_decoded);
+ example.weight = new_stats.frames_decoded;
+ learning_session_.AddExample(kDroppedFrameRatioTaskName, example);
+}
+
+} // namespace media
diff --git a/chromium/media/capabilities/learning_helper.h b/chromium/media/capabilities/learning_helper.h
new file mode 100644
index 00000000000..27d8ca467a1
--- /dev/null
+++ b/chromium/media/capabilities/learning_helper.h
@@ -0,0 +1,35 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAPABILITIES_LEARNING_HELPER_H_
+#define MEDIA_CAPABILITIES_LEARNING_HELPER_H_
+
+#include "base/macros.h"
+#include "media/base/media_export.h"
+#include "media/capabilities/video_decode_stats_db.h"
+#include "media/learning/impl/learning_session_impl.h"
+
+namespace media {
+
+// Helper class to allow MediaCapabilities to log training examples to a
+// media::learning LearningTask.
+class MEDIA_EXPORT LearningHelper {
+ public:
+ LearningHelper();
+ ~LearningHelper();
+
+ void AppendStats(const VideoDecodeStatsDB::VideoDescKey& video_key,
+ const VideoDecodeStatsDB::DecodeStatsEntry& new_stats);
+
+ private:
+ // Learning session for our profile. Normally, we'd not have one of these
+ // directly, but would instead get one that's connected to a browser profile.
+ // For now, however, we just instantiate one and assume that we'll be
+ // destroyed when the profile changes / history is cleared.
+ learning::LearningSessionImpl learning_session_;
+};
+
+} // namespace media
+
+#endif // MEDIA_CAPABILITIES_LEARNING_HELPER_H_
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.cc b/chromium/media/capabilities/video_decode_stats_db_impl.cc
index e5640e38672..67398707c17 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.cc
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.cc
@@ -15,7 +15,7 @@
#include "base/sequence_checker.h"
#include "base/task/post_task.h"
#include "base/time/default_clock.h"
-#include "components/leveldb_proto/proto_database_impl.h"
+#include "components/leveldb_proto/public/proto_database_provider.h"
#include "media/base/media_switches.h"
#include "media/capabilities/video_decode_stats.pb.h"
@@ -61,7 +61,7 @@ std::unique_ptr<VideoDecodeStatsDBImpl> VideoDecodeStatsDBImpl::Create(
DVLOG(2) << __func__ << " db_dir:" << db_dir;
auto proto_db =
- std::make_unique<leveldb_proto::ProtoDatabaseImpl<DecodeStatsProto>>(
+ leveldb_proto::ProtoDatabaseProvider::CreateUniqueDB<DecodeStatsProto>(
base::CreateSequencedTaskRunnerWithTraits(
{base::MayBlock(), base::TaskPriority::BEST_EFFORT,
base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN}));
diff --git a/chromium/media/capabilities/video_decode_stats_db_impl.h b/chromium/media/capabilities/video_decode_stats_db_impl.h
index bec7fec7dba..1dd9740db57 100644
--- a/chromium/media/capabilities/video_decode_stats_db_impl.h
+++ b/chromium/media/capabilities/video_decode_stats_db_impl.h
@@ -9,7 +9,7 @@
#include "base/files/file_path.h"
#include "base/memory/weak_ptr.h"
-#include "components/leveldb_proto/proto_database.h"
+#include "components/leveldb_proto/public/proto_database.h"
#include "media/base/media_export.h"
#include "media/base/video_codecs.h"
#include "media/capabilities/video_decode_stats_db.h"
diff --git a/chromium/media/capture/BUILD.gn b/chromium/media/capture/BUILD.gn
index 43eb62bf1c5..fd588806036 100644
--- a/chromium/media/capture/BUILD.gn
+++ b/chromium/media/capture/BUILD.gn
@@ -97,6 +97,8 @@ jumbo_component("capture_lib") {
"video/create_video_capture_device_factory.cc",
"video/create_video_capture_device_factory.h",
"video/scoped_buffer_pool_reservation.h",
+ "video/scoped_video_capture_jpeg_decoder.cc",
+ "video/scoped_video_capture_jpeg_decoder.h",
"video/shared_memory_buffer_tracker.cc",
"video/shared_memory_buffer_tracker.h",
"video/shared_memory_handle_provider.cc",
@@ -200,7 +202,7 @@ jumbo_component("capture_lib") {
"video/win/video_capture_device_win.cc",
"video/win/video_capture_device_win.h",
]
- deps += [ "//media/base/win" ]
+ deps += [ "//media/base/win:media_foundation_util" ]
libs = [
"mf.lib",
"mfplat.lib",
@@ -268,7 +270,7 @@ jumbo_component("capture_lib") {
public_deps += [ "//media/capture/video/chromeos/public" ]
deps += [
"//build/config/linux/libdrm",
- "//chromeos:chromeos",
+ "//chromeos/dbus",
"//media/capture/video/chromeos/mojo:cros_camera",
"//third_party/libsync",
]
@@ -281,6 +283,7 @@ jumbo_component("capture_lib") {
]
}
}
+
source_set("test_support") {
testonly = true
@@ -392,8 +395,9 @@ test("capture_unittests") {
]
deps += [
"//build/config/linux/libdrm",
- "//chromeos:chromeos",
+ "//chromeos/dbus:test_support",
"//media/capture/video/chromeos/mojo:cros_camera",
+ "//media/capture/video/chromeos/public",
"//mojo/core/embedder",
"//third_party/libsync",
"//third_party/minigbm",
diff --git a/chromium/media/capture/content/capture_resolution_chooser_unittest.cc b/chromium/media/capture/content/capture_resolution_chooser_unittest.cc
index 988756bbf7e..8b3e43ee4b1 100644
--- a/chromium/media/capture/content/capture_resolution_chooser_unittest.cc
+++ b/chromium/media/capture/content/capture_resolution_chooser_unittest.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include "base/location.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gfx/geometry/size.h"
@@ -88,7 +88,7 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
// Test the "find Nth lower size" logic.
for (size_t skips = 1; skips < 4; ++skips) {
- for (size_t i = skips; i < arraysize(kSizes); ++i) {
+ for (size_t i = skips; i < base::size(kSizes); ++i) {
EXPECT_EQ(
gfx::Size(kSizes[i][0], kSizes[i][1]),
chooser->FindSmallerFrameSize(
@@ -99,7 +99,7 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
// Test the "find Nth higher size" logic.
for (size_t skips = 1; skips < 4; ++skips) {
- for (size_t i = skips; i < arraysize(kSizes); ++i) {
+ for (size_t i = skips; i < base::size(kSizes); ++i) {
EXPECT_EQ(gfx::Size(kSizes[i - skips][0], kSizes[i - skips][1]),
chooser->FindLargerFrameSize(
gfx::Size(kSizes[i][0], kSizes[i][1]).GetArea(), skips));
@@ -107,7 +107,7 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
}
// Test the "find nearest size" logic.
- for (size_t i = 1; i < arraysize(kSizes) - 1; ++i) {
+ for (size_t i = 1; i < base::size(kSizes) - 1; ++i) {
const gfx::Size size(kSizes[i][0], kSizes[i][1]);
const int a_somewhat_smaller_area =
gfx::Size((kSizes[i - 1][0] + 3 * kSizes[i][0]) / 4,
diff --git a/chromium/media/capture/content/smooth_event_sampler_unittest.cc b/chromium/media/capture/content/smooth_event_sampler_unittest.cc
index abd7d4ccfa7..4de65f7df7d 100644
--- a/chromium/media/capture/content/smooth_event_sampler_unittest.cc
+++ b/chromium/media/capture/content/smooth_event_sampler_unittest.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -369,7 +369,8 @@ TEST(SmoothEventSamplerTest, DrawingAt24FpsWith60HzVsyncSampledAt30Hertz) {
{false, 0}};
SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+ ReplayCheckingSamplerDecisions(data_points, base::size(data_points),
+ &sampler);
}
TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
@@ -478,7 +479,8 @@ TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
{true, 33.44}};
SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+ ReplayCheckingSamplerDecisions(data_points, base::size(data_points),
+ &sampler);
}
TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
@@ -611,7 +613,8 @@ TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
{true, 50.16}};
SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+ ReplayCheckingSamplerDecisions(data_points, base::size(data_points),
+ &sampler);
}
} // namespace media
diff --git a/chromium/media/capture/mojom/video_capture_types.mojom b/chromium/media/capture/mojom/video_capture_types.mojom
index 36f64e59ece..8dd373122e0 100644
--- a/chromium/media/capture/mojom/video_capture_types.mojom
+++ b/chromium/media/capture/mojom/video_capture_types.mojom
@@ -40,6 +40,7 @@ enum VideoCapturePixelFormat {
Y16,
ABGR,
XBGR,
+ P016LE,
};
enum ResolutionChangePolicy {
diff --git a/chromium/media/capture/mojom/video_capture_types_mojom_traits.cc b/chromium/media/capture/mojom/video_capture_types_mojom_traits.cc
index f35e8aa2c6c..1220ecc4b70 100644
--- a/chromium/media/capture/mojom/video_capture_types_mojom_traits.cc
+++ b/chromium/media/capture/mojom/video_capture_types_mojom_traits.cc
@@ -143,6 +143,8 @@ EnumTraits<media::mojom::VideoCapturePixelFormat,
return media::mojom::VideoCapturePixelFormat::ABGR;
case media::VideoPixelFormat::PIXEL_FORMAT_XBGR:
return media::mojom::VideoCapturePixelFormat::XBGR;
+ case media::VideoPixelFormat::PIXEL_FORMAT_P016LE:
+ return media::mojom::VideoCapturePixelFormat::P016LE;
}
NOTREACHED();
return media::mojom::VideoCapturePixelFormat::I420;
@@ -238,6 +240,9 @@ bool EnumTraits<media::mojom::VideoCapturePixelFormat,
case media::mojom::VideoCapturePixelFormat::XBGR:
*output = media::PIXEL_FORMAT_XBGR;
return true;
+ case media::mojom::VideoCapturePixelFormat::P016LE:
+ *output = media::PIXEL_FORMAT_P016LE;
+ return true;
}
NOTREACHED();
return false;
diff --git a/chromium/media/capture/video/android/java/src/org/chromium/media/VideoCaptureCamera2.java b/chromium/media/capture/video/android/java/src/org/chromium/media/VideoCaptureCamera2.java
index d558a12d503..256bce94c7e 100644
--- a/chromium/media/capture/video/android/java/src/org/chromium/media/VideoCaptureCamera2.java
+++ b/chromium/media/capture/video/android/java/src/org/chromium/media/VideoCaptureCamera2.java
@@ -33,6 +33,7 @@ import android.view.Surface;
import org.chromium.base.ContextUtils;
import org.chromium.base.Log;
+import org.chromium.base.TraceEvent;
import org.chromium.base.annotations.JNINamespace;
import java.nio.ByteBuffer;
@@ -234,10 +235,13 @@ public class VideoCaptureCamera2 extends VideoCapture {
@Override
public void onConfigured(CameraCaptureSession session) {
+ TraceEvent.instant("VideoCaptureCamera2.java", "CrPhotoSessionListener.onConfigured");
assert mCameraThreadHandler.getLooper() == Looper.myLooper() : "called on wrong thread";
Log.d(TAG, "CrPhotoSessionListener.onConfigured");
try {
+ TraceEvent.instant(
+ "VideoCaptureCamera2.java", "Calling CameraCaptureSession.capture()");
// This line triggers a single photo capture. No |listener| is registered, so we
// will get notified via a CrPhotoSessionListener. Since |handler| is null, we'll
// work on the current Thread Looper.
@@ -292,6 +296,8 @@ public class VideoCaptureCamera2 extends VideoCapture {
@Override
public void onImageAvailable(ImageReader reader) {
+ TraceEvent.instant(
+ "VideoCaptureCamera2.java", "CrPhotoReaderListener.onImageAvailable");
assert mCameraThreadHandler.getLooper() == Looper.myLooper() : "called on wrong thread";
try (Image image = reader.acquireLatestImage()) {
@@ -817,6 +823,7 @@ public class VideoCaptureCamera2 extends VideoCapture {
@Override
public void run() {
assert mCameraThreadHandler.getLooper() == Looper.myLooper() : "called on wrong thread";
+ TraceEvent.instant("VideoCaptureCamera2.java", "TakePhotoTask.run");
if (mCameraDevice == null || mCameraState != CameraState.STARTED) {
Log.e(TAG,
@@ -837,6 +844,8 @@ public class VideoCaptureCamera2 extends VideoCapture {
if (closestSize != null) {
Log.d(TAG, " matched (%dx%d)", closestSize.getWidth(), closestSize.getHeight());
}
+ TraceEvent.instant(
+ "VideoCaptureCamera2.java", "TakePhotoTask.run creating ImageReader");
final ImageReader imageReader = ImageReader.newInstance(
(closestSize != null) ? closestSize.getWidth() : mCaptureFormat.getWidth(),
(closestSize != null) ? closestSize.getHeight() : mCaptureFormat.getHeight(),
@@ -867,12 +876,18 @@ public class VideoCaptureCamera2 extends VideoCapture {
photoRequestBuilder.addTarget(imageReader.getSurface());
photoRequestBuilder.set(CaptureRequest.JPEG_ORIENTATION, getCameraRotation());
+ TraceEvent.instant("VideoCaptureCamera2.java",
+ "TakePhotoTask.run calling configureCommonCaptureSettings");
configureCommonCaptureSettings(photoRequestBuilder);
+ TraceEvent.instant("VideoCaptureCamera2.java",
+ "TakePhotoTask.run calling photoRequestBuilder.build()");
final CaptureRequest photoRequest = photoRequestBuilder.build();
final CrPhotoSessionListener sessionListener =
new CrPhotoSessionListener(imageReader, photoRequest, mCallbackId);
try {
+ TraceEvent.instant("VideoCaptureCamera2.java",
+ "TakePhotoTask.run calling mCameraDevice.createCaptureSession()");
mCameraDevice.createCaptureSession(
surfaceList, sessionListener, mCameraThreadHandler);
} catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
@@ -971,199 +986,213 @@ public class VideoCaptureCamera2 extends VideoCapture {
assert mCameraThreadHandler.getLooper() == Looper.myLooper() : "called on wrong thread";
if (mCameraDevice == null) return false;
- // Create an ImageReader and plug a thread looper into it to have
- // readback take place on its own thread.
- mImageReader = ImageReader.newInstance(mCaptureFormat.getWidth(),
- mCaptureFormat.getHeight(), mCaptureFormat.getPixelFormat(), 2 /* maxImages */);
- final CrPreviewReaderListener imageReaderListener = new CrPreviewReaderListener();
- mImageReader.setOnImageAvailableListener(imageReaderListener, mCameraThreadHandler);
+ try (TraceEvent trace_event = TraceEvent.scoped(
+ "VideoCaptureCamera2.createPreviewObjectsAndStartPreview")) {
+ // Create an ImageReader and plug a thread looper into it to have
+ // readback take place on its own thread.
+ mImageReader = ImageReader.newInstance(mCaptureFormat.getWidth(),
+ mCaptureFormat.getHeight(), mCaptureFormat.getPixelFormat(), 2 /* maxImages */);
+ final CrPreviewReaderListener imageReaderListener = new CrPreviewReaderListener();
+ mImageReader.setOnImageAvailableListener(imageReaderListener, mCameraThreadHandler);
- try {
- // TEMPLATE_PREVIEW specifically means "high frame rate is given
- // priority over the highest-quality post-processing".
- mPreviewRequestBuilder =
- mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
- } catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
- Log.e(TAG, "createCaptureRequest: ", ex);
- return false;
- }
+ try {
+ // TEMPLATE_PREVIEW specifically means "high frame rate is given
+ // priority over the highest-quality post-processing".
+ mPreviewRequestBuilder =
+ mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
+ } catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
+ Log.e(TAG, "createCaptureRequest: ", ex);
+ return false;
+ }
- if (mPreviewRequestBuilder == null) {
- Log.e(TAG, "mPreviewRequestBuilder error");
- return false;
- }
+ if (mPreviewRequestBuilder == null) {
+ Log.e(TAG, "mPreviewRequestBuilder error");
+ return false;
+ }
- // Construct an ImageReader Surface and plug it into our CaptureRequest.Builder.
- mPreviewRequestBuilder.addTarget(mImageReader.getSurface());
+ // Construct an ImageReader Surface and plug it into our CaptureRequest.Builder.
+ mPreviewRequestBuilder.addTarget(mImageReader.getSurface());
- // A series of configuration options in the PreviewBuilder
- mPreviewRequestBuilder.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO);
- mPreviewRequestBuilder.set(
- CaptureRequest.NOISE_REDUCTION_MODE, CameraMetadata.NOISE_REDUCTION_MODE_FAST);
- mPreviewRequestBuilder.set(CaptureRequest.EDGE_MODE, CameraMetadata.EDGE_MODE_FAST);
+ // A series of configuration options in the PreviewBuilder
+ mPreviewRequestBuilder.set(
+ CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO);
+ mPreviewRequestBuilder.set(
+ CaptureRequest.NOISE_REDUCTION_MODE, CameraMetadata.NOISE_REDUCTION_MODE_FAST);
+ mPreviewRequestBuilder.set(CaptureRequest.EDGE_MODE, CameraMetadata.EDGE_MODE_FAST);
- // Depending on the resolution and other parameters, stabilization might not be available,
- // see https://crbug.com/718387.
- // https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.html#CONTROL_VIDEO_STABILIZATION_MODE
- final CameraCharacteristics cameraCharacteristics = getCameraCharacteristics(mId);
- final int[] stabilizationModes = cameraCharacteristics.get(
- CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
- for (int mode : stabilizationModes) {
- if (mode == CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON) {
- mPreviewRequestBuilder.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE,
- CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON);
- break;
+ // Depending on the resolution and other parameters, stabilization might not be
+ // available, see https://crbug.com/718387.
+ // https://developer.android.com/reference/android/hardware/camera2/CaptureRequest.html#CONTROL_VIDEO_STABILIZATION_MODE
+ final CameraCharacteristics cameraCharacteristics = getCameraCharacteristics(mId);
+ final int[] stabilizationModes = cameraCharacteristics.get(
+ CameraCharacteristics.CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
+ for (int mode : stabilizationModes) {
+ if (mode == CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON) {
+ mPreviewRequestBuilder.set(CaptureRequest.CONTROL_VIDEO_STABILIZATION_MODE,
+ CameraMetadata.CONTROL_VIDEO_STABILIZATION_MODE_ON);
+ break;
+ }
}
- }
- configureCommonCaptureSettings(mPreviewRequestBuilder);
+ configureCommonCaptureSettings(mPreviewRequestBuilder);
- // Overwrite settings to enable face detection.
- if (mEnableFaceDetection) {
- mPreviewRequestBuilder.set(
- CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_USE_SCENE_MODE);
- mPreviewRequestBuilder.set(CaptureRequest.CONTROL_SCENE_MODE,
- CameraMetadata.CONTROL_SCENE_MODE_FACE_PRIORITY);
- }
+ // Overwrite settings to enable face detection.
+ if (mEnableFaceDetection) {
+ mPreviewRequestBuilder.set(
+ CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_USE_SCENE_MODE);
+ mPreviewRequestBuilder.set(CaptureRequest.CONTROL_SCENE_MODE,
+ CameraMetadata.CONTROL_SCENE_MODE_FACE_PRIORITY);
+ }
- List<Surface> surfaceList = new ArrayList<Surface>(1);
- // TODO(mcasas): release this Surface when not needed, https://crbug.com/643884.
- surfaceList.add(mImageReader.getSurface());
+ List<Surface> surfaceList = new ArrayList<Surface>(1);
+ // TODO(mcasas): release this Surface when not needed, https://crbug.com/643884.
+ surfaceList.add(mImageReader.getSurface());
- mPreviewRequest = mPreviewRequestBuilder.build();
+ mPreviewRequest = mPreviewRequestBuilder.build();
- try {
- mCameraDevice.createCaptureSession(
- surfaceList, new CrPreviewSessionListener(mPreviewRequest), null);
- } catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
- Log.e(TAG, "createCaptureSession: ", ex);
- return false;
+ try {
+ mCameraDevice.createCaptureSession(
+ surfaceList, new CrPreviewSessionListener(mPreviewRequest), null);
+ } catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
+ Log.e(TAG, "createCaptureSession: ", ex);
+ return false;
+ }
+ // Wait for trigger on CrPreviewSessionListener.onConfigured();
+ return true;
}
- // Wait for trigger on CrPreviewSessionListener.onConfigured();
- return true;
}
private void configureCommonCaptureSettings(CaptureRequest.Builder requestBuilder) {
assert mCameraThreadHandler.getLooper() == Looper.myLooper() : "called on wrong thread";
+ try (TraceEvent trace_event =
+ TraceEvent.scoped("VideoCaptureCamera2.configureCommonCaptureSettings")) {
+ final CameraCharacteristics cameraCharacteristics = getCameraCharacteristics(mId);
- final CameraCharacteristics cameraCharacteristics = getCameraCharacteristics(mId);
+ // |mFocusMode| indicates if we're in auto/continuous, single-shot or manual mode.
+ // AndroidMeteringMode.SINGLE_SHOT is dealt with independently since it needs to be
+ // triggered by a capture.
+ if (mFocusMode == AndroidMeteringMode.CONTINUOUS) {
+ requestBuilder.set(CaptureRequest.CONTROL_AF_MODE,
+ CameraMetadata.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_IDLE);
+ } else if (mFocusMode == AndroidMeteringMode.FIXED) {
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AF_MODE, CameraMetadata.CONTROL_AF_MODE_OFF);
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_IDLE);
+ requestBuilder.set(CaptureRequest.LENS_FOCUS_DISTANCE, 1 / mCurrentFocusDistance);
+ }
- // |mFocusMode| indicates if we're in auto/continuous, single-shot or manual mode.
- // AndroidMeteringMode.SINGLE_SHOT is dealt with independently since it needs to be
- // triggered by a capture.
- if (mFocusMode == AndroidMeteringMode.CONTINUOUS) {
- requestBuilder.set(CaptureRequest.CONTROL_AF_MODE,
- CameraMetadata.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
- requestBuilder.set(
- CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_IDLE);
- } else if (mFocusMode == AndroidMeteringMode.FIXED) {
- requestBuilder.set(CaptureRequest.CONTROL_AF_MODE, CameraMetadata.CONTROL_AF_MODE_OFF);
- requestBuilder.set(
- CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_IDLE);
- requestBuilder.set(CaptureRequest.LENS_FOCUS_DISTANCE, 1 / mCurrentFocusDistance);
- }
+ // |mExposureMode|, |mFillLightMode| and |mTorch| interact to configure the AE and Flash
+ // modes. In a nutshell, FLASH_MODE is only effective if the auto-exposure is ON/OFF,
+ // otherwise the auto-exposure related flash control (ON_{AUTO,ALWAYS}_FLASH{_REDEYE)
+ // takes priority. |mTorch| mode overrides any previous |mFillLightMode| flash control.
+ if (mExposureMode == AndroidMeteringMode.NONE
+ || mExposureMode == AndroidMeteringMode.FIXED) {
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AE_MODE, CameraMetadata.CONTROL_AE_MODE_OFF);
+
+ // We need to configure by hand the exposure time when AE mode is off. Set it to
+ // the last known exposure interval if known, otherwise set it to the middle of the
+ // allowed range. Further tuning will be done via |mIso| and
+ // |mExposureCompensation|. mLastExposureTimeNs and range are in nanoseconds (from
+ // Android platform), but spec expects exposureTime to be in 100 microsecond units.
+ // https://w3c.github.io/mediacapture-image/#exposure-time
+ if (mLastExposureTimeNs != 0) {
+ requestBuilder.set(CaptureRequest.SENSOR_EXPOSURE_TIME,
+ mLastExposureTimeNs / kNanosecondsPer100Microsecond);
+ } else {
+ Range<Long> range = cameraCharacteristics.get(
+ CameraCharacteristics.SENSOR_INFO_EXPOSURE_TIME_RANGE);
+ requestBuilder.set(CaptureRequest.SENSOR_EXPOSURE_TIME,
+ (range.getLower() + (range.getUpper() + range.getLower()) / 2)
+ / kNanosecondsPer100Microsecond);
+ }
- // |mExposureMode|, |mFillLightMode| and |mTorch| interact to configure the AE and Flash
- // modes. In a nutshell, FLASH_MODE is only effective if the auto-exposure is ON/OFF,
- // otherwise the auto-exposure related flash control (ON_{AUTO,ALWAYS}_FLASH{_REDEYE) takes
- // priority. |mTorch| mode overrides any previous |mFillLightMode| flash control.
- if (mExposureMode == AndroidMeteringMode.NONE
- || mExposureMode == AndroidMeteringMode.FIXED) {
- requestBuilder.set(CaptureRequest.CONTROL_AE_MODE, CameraMetadata.CONTROL_AE_MODE_OFF);
-
- // We need to configure by hand the exposure time when AE mode is off. Set it to the
- // last known exposure interval if known, otherwise set it to the middle of the allowed
- // range. Further tuning will be done via |mIso| and |mExposureCompensation|.
- // mLastExposureTimeNs and range are in nanoseconds (from Android platform), but spec
- // expects exposureTime to be in 100 microsecond units.
- // https://w3c.github.io/mediacapture-image/#exposure-time
- if (mLastExposureTimeNs != 0) {
- requestBuilder.set(CaptureRequest.SENSOR_EXPOSURE_TIME,
- mLastExposureTimeNs / kNanosecondsPer100Microsecond);
} else {
- Range<Long> range = cameraCharacteristics.get(
- CameraCharacteristics.SENSOR_INFO_EXPOSURE_TIME_RANGE);
- requestBuilder.set(CaptureRequest.SENSOR_EXPOSURE_TIME,
- (range.getLower() + (range.getUpper() + range.getLower()) / 2)
- / kNanosecondsPer100Microsecond);
+ requestBuilder.set(CaptureRequest.CONTROL_MODE, CaptureRequest.CONTROL_MODE_AUTO);
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AE_MODE, CameraMetadata.CONTROL_AE_MODE_ON);
+ if (!shouldSkipSettingAeTargetFpsRange()) {
+ requestBuilder.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, mAeFpsRange);
+ }
}
- } else {
- requestBuilder.set(CaptureRequest.CONTROL_MODE, CaptureRequest.CONTROL_MODE_AUTO);
- requestBuilder.set(CaptureRequest.CONTROL_AE_MODE, CameraMetadata.CONTROL_AE_MODE_ON);
- if (!shouldSkipSettingAeTargetFpsRange()) {
- requestBuilder.set(CaptureRequest.CONTROL_AE_TARGET_FPS_RANGE, mAeFpsRange);
+ if (mTorch) {
+ requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
+ mExposureMode == AndroidMeteringMode.CONTINUOUS
+ ? CameraMetadata.CONTROL_AE_MODE_ON
+ : CameraMetadata.CONTROL_AE_MODE_OFF);
+ requestBuilder.set(CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_TORCH);
+ } else {
+ switch (mFillLightMode) {
+ case AndroidFillLightMode.OFF:
+ requestBuilder.set(
+ CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_OFF);
+ break;
+ case AndroidFillLightMode.AUTO:
+ // Setting the AE to CONTROL_AE_MODE_ON_AUTO_FLASH[_REDEYE] overrides
+ // FLASH_MODE.
+ requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
+ mRedEyeReduction
+ ? CameraMetadata.CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE
+ : CameraMetadata.CONTROL_AE_MODE_ON_AUTO_FLASH);
+ break;
+ case AndroidFillLightMode.FLASH:
+ // Setting the AE to CONTROL_AE_MODE_ON_ALWAYS_FLASH overrides FLASH_MODE.
+ requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
+ CameraMetadata.CONTROL_AE_MODE_ON_ALWAYS_FLASH);
+ requestBuilder.set(
+ CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_SINGLE);
+ break;
+ default:
+ }
+ requestBuilder.set(CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER,
+ CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER_IDLE);
}
- }
- if (mTorch) {
- requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
- mExposureMode == AndroidMeteringMode.CONTINUOUS
- ? CameraMetadata.CONTROL_AE_MODE_ON
- : CameraMetadata.CONTROL_AE_MODE_OFF);
- requestBuilder.set(CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_TORCH);
- } else {
- switch (mFillLightMode) {
- case AndroidFillLightMode.OFF:
- requestBuilder.set(CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_OFF);
- break;
- case AndroidFillLightMode.AUTO:
- // Setting the AE to CONTROL_AE_MODE_ON_AUTO_FLASH[_REDEYE] overrides
- // FLASH_MODE.
- requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
- mRedEyeReduction ? CameraMetadata.CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE
- : CameraMetadata.CONTROL_AE_MODE_ON_AUTO_FLASH);
- break;
- case AndroidFillLightMode.FLASH:
- // Setting the AE to CONTROL_AE_MODE_ON_ALWAYS_FLASH overrides FLASH_MODE.
- requestBuilder.set(CaptureRequest.CONTROL_AE_MODE,
- CameraMetadata.CONTROL_AE_MODE_ON_ALWAYS_FLASH);
- requestBuilder.set(CaptureRequest.FLASH_MODE, CameraMetadata.FLASH_MODE_SINGLE);
- break;
- default:
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AE_EXPOSURE_COMPENSATION, mExposureCompensation);
+
+ // White Balance mode AndroidMeteringMode.SINGLE_SHOT is not supported.
+ if (mWhiteBalanceMode == AndroidMeteringMode.CONTINUOUS) {
+ requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, false);
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AWB_MODE, CameraMetadata.CONTROL_AWB_MODE_AUTO);
+ // TODO(mcasas): support different luminant color temperatures, e.g. DAYLIGHT,
+ // SHADE. https://crbug.com/518807
+ } else if (mWhiteBalanceMode == AndroidMeteringMode.NONE) {
+ requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, false);
+ requestBuilder.set(
+ CaptureRequest.CONTROL_AWB_MODE, CameraMetadata.CONTROL_AWB_MODE_OFF);
+ } else if (mWhiteBalanceMode == AndroidMeteringMode.FIXED) {
+ requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, true);
+ }
+ if (mColorTemperature > 0) {
+ final int colorSetting = getClosestWhiteBalance(mColorTemperature,
+ cameraCharacteristics.get(
+ CameraCharacteristics.CONTROL_AWB_AVAILABLE_MODES));
+ Log.d(TAG, " Color temperature (%d ==> %d)", mColorTemperature, colorSetting);
+ if (colorSetting != -1) {
+ requestBuilder.set(CaptureRequest.CONTROL_AWB_MODE, colorSetting);
+ }
}
- requestBuilder.set(CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER,
- CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER_IDLE);
- }
-
- requestBuilder.set(CaptureRequest.CONTROL_AE_EXPOSURE_COMPENSATION, mExposureCompensation);
- // White Balance mode AndroidMeteringMode.SINGLE_SHOT is not supported.
- if (mWhiteBalanceMode == AndroidMeteringMode.CONTINUOUS) {
- requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, false);
- requestBuilder.set(
- CaptureRequest.CONTROL_AWB_MODE, CameraMetadata.CONTROL_AWB_MODE_AUTO);
- // TODO(mcasas): support different luminant color temperatures, e.g. DAYLIGHT, SHADE.
- // https://crbug.com/518807
- } else if (mWhiteBalanceMode == AndroidMeteringMode.NONE) {
- requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, false);
- requestBuilder.set(
- CaptureRequest.CONTROL_AWB_MODE, CameraMetadata.CONTROL_AWB_MODE_OFF);
- } else if (mWhiteBalanceMode == AndroidMeteringMode.FIXED) {
- requestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, true);
- }
- if (mColorTemperature > 0) {
- final int colorSetting = getClosestWhiteBalance(mColorTemperature,
- cameraCharacteristics.get(CameraCharacteristics.CONTROL_AWB_AVAILABLE_MODES));
- Log.d(TAG, " Color temperature (%d ==> %d)", mColorTemperature, colorSetting);
- if (colorSetting != -1) {
- requestBuilder.set(CaptureRequest.CONTROL_AWB_MODE, colorSetting);
+ if (mAreaOfInterest != null) {
+ MeteringRectangle[] array = {mAreaOfInterest};
+ Log.d(TAG, "Area of interest %s", mAreaOfInterest.toString());
+ requestBuilder.set(CaptureRequest.CONTROL_AF_REGIONS, array);
+ requestBuilder.set(CaptureRequest.CONTROL_AE_REGIONS, array);
+ requestBuilder.set(CaptureRequest.CONTROL_AWB_REGIONS, array);
}
- }
- if (mAreaOfInterest != null) {
- MeteringRectangle[] array = {mAreaOfInterest};
- Log.d(TAG, "Area of interest %s", mAreaOfInterest.toString());
- requestBuilder.set(CaptureRequest.CONTROL_AF_REGIONS, array);
- requestBuilder.set(CaptureRequest.CONTROL_AE_REGIONS, array);
- requestBuilder.set(CaptureRequest.CONTROL_AWB_REGIONS, array);
- }
+ if (!mCropRect.isEmpty()) {
+ requestBuilder.set(CaptureRequest.SCALER_CROP_REGION, mCropRect);
+ }
- if (!mCropRect.isEmpty()) {
- requestBuilder.set(CaptureRequest.SCALER_CROP_REGION, mCropRect);
+ if (mIso > 0) requestBuilder.set(CaptureRequest.SENSOR_SENSITIVITY, mIso);
}
-
- if (mIso > 0) requestBuilder.set(CaptureRequest.SENSOR_SENSITIVITY, mIso);
}
private void changeCameraStateAndNotify(CameraState state) {
@@ -1431,6 +1460,8 @@ public class VideoCaptureCamera2 extends VideoCapture {
final CrStateListener stateListener = new CrStateListener();
try {
+ TraceEvent.instant("VideoCaptureCamera2.java",
+ "VideoCaptureCamera2.startCaptureMaybeAsync calling manager.openCamera");
manager.openCamera(Integer.toString(mId), stateListener, mCameraThreadHandler);
} catch (CameraAccessException | IllegalArgumentException | SecurityException ex) {
Log.e(TAG, "allocate: manager.openCamera: ", ex);
@@ -1443,25 +1474,28 @@ public class VideoCaptureCamera2 extends VideoCapture {
@Override
public boolean stopCaptureAndBlockUntilStopped() {
nativeDCheckCurrentlyOnIncomingTaskRunner(mNativeVideoCaptureDeviceAndroid);
-
- // With Camera2 API, the capture is started asynchronously, which will cause problem if
- // stopCapture comes too quickly. Without stopping the previous capture properly, the next
- // startCapture will fail and make Chrome no-responding. So wait camera to be STARTED.
- synchronized (mCameraStateLock) {
- while (mCameraState != CameraState.STARTED && mCameraState != CameraState.STOPPED) {
- try {
- mCameraStateLock.wait();
- } catch (InterruptedException ex) {
- Log.e(TAG, "CaptureStartedEvent: ", ex);
+ try (TraceEvent trace_event =
+ TraceEvent.scoped("VideoCaptureCamera2.stopCaptureAndBlockUntilStopped")) {
+ // With Camera2 API, the capture is started asynchronously, which will cause problem if
+ // stopCapture comes too quickly. Without stopping the previous capture properly, the
+ // next startCapture will fail and make Chrome no-responding. So wait camera to be
+ // STARTED.
+ synchronized (mCameraStateLock) {
+ while (mCameraState != CameraState.STARTED && mCameraState != CameraState.STOPPED) {
+ try {
+ mCameraStateLock.wait();
+ } catch (InterruptedException ex) {
+ Log.e(TAG, "CaptureStartedEvent: ", ex);
+ }
}
+ if (mCameraState == CameraState.STOPPED) return true;
}
- if (mCameraState == CameraState.STOPPED) return true;
- }
- mCameraThreadHandler.post(new StopCaptureTask());
- mWaitForDeviceClosedConditionVariable.block();
+ mCameraThreadHandler.post(new StopCaptureTask());
+ mWaitForDeviceClosedConditionVariable.block();
- return true;
+ return true;
+ }
}
@Override
@@ -1487,6 +1521,8 @@ public class VideoCaptureCamera2 extends VideoCapture {
@Override
public void takePhotoAsync(long callbackId) {
nativeDCheckCurrentlyOnIncomingTaskRunner(mNativeVideoCaptureDeviceAndroid);
+ TraceEvent.instant("VideoCaptureCamera2.java", "takePhotoAsync");
+
mCameraThreadHandler.post(new TakePhotoTask(callbackId));
}
diff --git a/chromium/media/capture/video/android/video_capture_device_android.cc b/chromium/media/capture/video/android/video_capture_device_android.cc
index 31f1c0869e3..b871d462873 100644
--- a/chromium/media/capture/video/android/video_capture_device_android.cc
+++ b/chromium/media/capture/video/android/video_capture_device_android.cc
@@ -205,11 +205,18 @@ void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
void VideoCaptureDeviceAndroid::TakePhoto(TakePhotoCallback callback) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"),
+ "VideoCaptureDeviceAndroid::TakePhoto",
+ TRACE_EVENT_SCOPE_PROCESS);
{
base::AutoLock lock(lock_);
if (state_ != kConfigured)
return;
if (!got_first_frame_) { // We have to wait until we get the first frame.
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"),
+ "VideoCaptureDeviceAndroid::TakePhoto enqueuing to "
+ "wait for first frame",
+ TRACE_EVENT_SCOPE_PROCESS);
photo_requests_queue_.push_back(
base::Bind(&VideoCaptureDeviceAndroid::DoTakePhoto,
weak_ptr_factory_.GetWeakPtr(), base::Passed(&callback)));
@@ -500,6 +507,9 @@ void VideoCaptureDeviceAndroid::OnPhotoTaken(
jlong callback_id,
const base::android::JavaParamRef<jbyteArray>& data) {
DCHECK(callback_id);
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"),
+ "VideoCaptureDeviceAndroid::OnPhotoTaken",
+ TRACE_EVENT_SCOPE_PROCESS);
base::AutoLock lock(photo_callbacks_lock_);
@@ -612,6 +622,9 @@ void VideoCaptureDeviceAndroid::SetErrorState(media::VideoCaptureError error,
void VideoCaptureDeviceAndroid::DoTakePhoto(TakePhotoCallback callback) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"),
+ "VideoCaptureDeviceAndroid::DoTakePhoto",
+ TRACE_EVENT_SCOPE_PROCESS);
#if DCHECK_IS_ON()
{
base::AutoLock lock(lock_);
diff --git a/chromium/media/capture/video/chromeos/OWNERS b/chromium/media/capture/video/chromeos/OWNERS
index c1d5fd534d6..12ac58213b3 100644
--- a/chromium/media/capture/video/chromeos/OWNERS
+++ b/chromium/media/capture/video/chromeos/OWNERS
@@ -1,5 +1,4 @@
jcliang@chromium.org
-wuchengli@chromium.org
posciak@chromium.org
shik@chromium.org
hywu@chromium.org
diff --git a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
index 202e420c57d..66d8ce3ddb8 100644
--- a/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
+++ b/chromium/media/capture/video/chromeos/camera_hal_dispatcher_impl.cc
@@ -16,6 +16,7 @@
#include "base/posix/eintr_wrapper.h"
#include "base/rand_util.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/synchronization/waitable_event.h"
#include "base/trace_event/trace_event.h"
@@ -62,7 +63,7 @@ bool WaitForSocketReadable(int raw_socket_fd, int raw_cancel_fd) {
{raw_socket_fd, POLLIN, 0}, {raw_cancel_fd, POLLIN, 0},
};
- if (HANDLE_EINTR(poll(fds, arraysize(fds), -1)) <= 0) {
+ if (HANDLE_EINTR(poll(fds, base::size(fds), -1)) <= 0) {
PLOG(ERROR) << "poll()";
return false;
}
diff --git a/chromium/media/capture/video/chromeos/stream_buffer_manager.cc b/chromium/media/capture/video/chromeos/stream_buffer_manager.cc
index 5fd8aaa1a52..f67ce8028ba 100644
--- a/chromium/media/capture/video/chromeos/stream_buffer_manager.cc
+++ b/chromium/media/capture/video/chromeos/stream_buffer_manager.cc
@@ -728,12 +728,13 @@ void StreamBufferManager::SubmitCaptureResultIfComplete(
CaptureResult& pending_result = pending_results_[frame_number];
if (!stream_context_[stream_type]->capture_results_with_buffer.count(
frame_number) ||
- pending_result.partial_metadata_received.size() < partial_result_count_ ||
+ *pending_result.partial_metadata_received.rbegin() <
+ partial_result_count_ ||
pending_result.reference_time == base::TimeTicks()) {
// We can only submit the result buffer of |frame_number| for |stream_type|
// when:
// 1. The result buffer for |stream_type| is received, and
- // 2. All the result metadata are received, and
+ // 2. Received partial result id equals to partial result count, and
// 3. The shutter time is received.
return;
}
diff --git a/chromium/media/capture/video/fake_video_capture_device.cc b/chromium/media/capture/video/fake_video_capture_device.cc
index 6788e97dee4..678c291c251 100644
--- a/chromium/media/capture/video/fake_video_capture_device.cc
+++ b/chromium/media/capture/video/fake_video_capture_device.cc
@@ -22,6 +22,7 @@
#include "media/capture/mojom/image_capture_types.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkFont.h"
#include "third_party/skia/include/core/SkMatrix.h"
#include "third_party/skia/include/core/SkPaint.h"
#include "ui/gfx/codec/jpeg_codec.h"
@@ -326,6 +327,8 @@ void PacmanFramePainter::DrawPacman(base::TimeDelta elapsed_time,
bitmap.setPixels(target_buffer);
SkPaint paint;
paint.setStyle(SkPaint::kFill_Style);
+ SkFont font;
+ font.setEdging(SkFont::Edging::kAlias);
SkCanvas canvas(bitmap);
const SkScalar unscaled_zoom = fake_device_state_->zoom / 100.f;
@@ -362,7 +365,8 @@ void PacmanFramePainter::DrawPacman(base::TimeDelta elapsed_time,
base::StringPrintf("%d:%02d:%02d:%03d %d", hours, minutes, seconds,
milliseconds, frame_count);
canvas.scale(3, 3);
- canvas.drawText(time_string.data(), time_string.length(), 30, 20, paint);
+ canvas.drawSimpleText(time_string.data(), time_string.length(),
+ kUTF8_SkTextEncoding, 30, 20, font, paint);
if (pixel_format_ == Format::Y16) {
// Use 8 bit bitmap rendered to first half of the buffer as high byte values
diff --git a/chromium/media/capture/video/linux/camera_config_chromeos_unittest.cc b/chromium/media/capture/video/linux/camera_config_chromeos_unittest.cc
index 4610803b2b3..a5f236f0493 100644
--- a/chromium/media/capture/video/linux/camera_config_chromeos_unittest.cc
+++ b/chromium/media/capture/video/linux/camera_config_chromeos_unittest.cc
@@ -20,12 +20,11 @@ const char kConfigFileContent[] =
}
TEST(CameraConfigChromeOSTest, ParseSuccessfully) {
- const char file_name[] = "fake_camera_characteristics.conf";
- base::WriteFile(base::FilePath(file_name), kConfigFileContent,
- sizeof(kConfigFileContent));
+ base::FilePath conf_path;
+ ASSERT_TRUE(base::CreateTemporaryFile(&conf_path));
+ base::WriteFile(conf_path, kConfigFileContent, sizeof(kConfigFileContent));
- std::string file_name_str(file_name);
- CameraConfigChromeOS camera_config(file_name_str);
+ CameraConfigChromeOS camera_config(conf_path.value());
EXPECT_EQ(VideoFacingMode::MEDIA_VIDEO_FACING_ENVIRONMENT,
camera_config.GetCameraFacing(std::string("/dev/video2"),
std::string("04f2:b53a")));
diff --git a/chromium/media/capture/video/linux/v4l2_capture_device.h b/chromium/media/capture/video/linux/v4l2_capture_device.h
index 2c95357c9a2..1c38be7c96d 100644
--- a/chromium/media/capture/video/linux/v4l2_capture_device.h
+++ b/chromium/media/capture/video/linux/v4l2_capture_device.h
@@ -17,7 +17,7 @@ namespace media {
// Interface for abstracting out the V4L2 API. This allows using a mock or fake
// implementation in testing.
class CAPTURE_EXPORT V4L2CaptureDevice
- : public base::RefCounted<V4L2CaptureDevice> {
+ : public base::RefCountedThreadSafe<V4L2CaptureDevice> {
public:
virtual int open(const char* device_name, int flags) = 0;
virtual int close(int fd) = 0;
@@ -36,7 +36,7 @@ class CAPTURE_EXPORT V4L2CaptureDevice
virtual ~V4L2CaptureDevice() {}
private:
- friend class base::RefCounted<V4L2CaptureDevice>;
+ friend class base::RefCountedThreadSafe<V4L2CaptureDevice>;
};
} // namespace media
diff --git a/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm b/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm
index b9a79a7364f..30055787fdf 100644
--- a/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm
@@ -11,8 +11,8 @@
#include "base/bind.h"
#include "base/location.h"
-#include "base/macros.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/task_runner_util.h"
#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
@@ -45,7 +45,7 @@ static bool IsDeviceBlacklisted(
const VideoCaptureDeviceDescriptor& descriptor) {
bool is_device_blacklisted = false;
for (size_t i = 0;
- !is_device_blacklisted && i < arraysize(kBlacklistedCamerasIdSignature);
+ !is_device_blacklisted && i < base::size(kBlacklistedCamerasIdSignature);
++i) {
is_device_blacklisted =
base::EndsWith(descriptor.device_id, kBlacklistedCamerasIdSignature[i],
diff --git a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc b/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc
new file mode 100644
index 00000000000..43590846179
--- /dev/null
+++ b/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.cc
@@ -0,0 +1,40 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/scoped_video_capture_jpeg_decoder.h"
+
+namespace media {
+
+ScopedVideoCaptureJpegDecoder::ScopedVideoCaptureJpegDecoder(
+ std::unique_ptr<VideoCaptureJpegDecoder> decoder,
+ scoped_refptr<base::SequencedTaskRunner> task_runner)
+ : decoder_(std::move(decoder)), task_runner_(std::move(task_runner)) {}
+
+ScopedVideoCaptureJpegDecoder::~ScopedVideoCaptureJpegDecoder() {
+ task_runner_->DeleteSoon(FROM_HERE, std::move(decoder_));
+}
+
+// Implementation of VideoCaptureJpegDecoder:
+void ScopedVideoCaptureJpegDecoder::Initialize() {
+ decoder_->Initialize();
+}
+
+VideoCaptureJpegDecoder::STATUS ScopedVideoCaptureJpegDecoder::GetStatus()
+ const {
+ return decoder_->GetStatus();
+}
+
+void ScopedVideoCaptureJpegDecoder::DecodeCapturedData(
+ const uint8_t* data,
+ size_t in_buffer_size,
+ const media::VideoCaptureFormat& frame_format,
+ base::TimeTicks reference_time,
+ base::TimeDelta timestamp,
+ media::VideoCaptureDevice::Client::Buffer out_buffer) {
+ decoder_->DecodeCapturedData(data, in_buffer_size, frame_format,
+ reference_time, timestamp,
+ std::move(out_buffer));
+}
+
+} // namespace media
diff --git a/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h b/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h
new file mode 100644
index 00000000000..76b3436f222
--- /dev/null
+++ b/chromium/media/capture/video/scoped_video_capture_jpeg_decoder.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
+#define MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
+
+#include <memory>
+
+#include "base/sequenced_task_runner.h"
+#include "media/capture/capture_export.h"
+#include "media/capture/video/video_capture_jpeg_decoder.h"
+
+namespace media {
+
+// Decorator for media::VideoCaptureJpegDecoder that destroys the decorated
+// instance on a given task runner.
+class CAPTURE_EXPORT ScopedVideoCaptureJpegDecoder
+ : public VideoCaptureJpegDecoder {
+ public:
+ ScopedVideoCaptureJpegDecoder(
+ std::unique_ptr<VideoCaptureJpegDecoder> decoder,
+ scoped_refptr<base::SequencedTaskRunner> task_runner);
+
+ ~ScopedVideoCaptureJpegDecoder() override;
+
+ // Implementation of VideoCaptureJpegDecoder:
+ void Initialize() override;
+ STATUS GetStatus() const override;
+ void DecodeCapturedData(
+ const uint8_t* data,
+ size_t in_buffer_size,
+ const media::VideoCaptureFormat& frame_format,
+ base::TimeTicks reference_time,
+ base::TimeDelta timestamp,
+ media::VideoCaptureDevice::Client::Buffer out_buffer) override;
+
+ private:
+ std::unique_ptr<VideoCaptureJpegDecoder> decoder_;
+ scoped_refptr<base::SequencedTaskRunner> task_runner_;
+};
+
+} // namespace media
+
+#endif // MEDIA_CAPTURE_VIDEO_SCOPED_VIDEO_CAPTURE_JPEG_DECODER_H_
diff --git a/chromium/media/capture/video/video_capture_device.cc b/chromium/media/capture/video/video_capture_device.cc
index 5c45c4e5738..4085a0c505f 100644
--- a/chromium/media/capture/video/video_capture_device.cc
+++ b/chromium/media/capture/video/video_capture_device.cc
@@ -6,7 +6,7 @@
#include "base/command_line.h"
#include "base/i18n/timezone.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
#include "media/base/media_switches.h"
@@ -55,7 +55,7 @@ PowerLineFrequency VideoCaptureDevice::GetPowerLineFrequencyForLocation() {
"KN", "KR", "KY", "MS", "MX", "NI", "PA", "PE", "PF", "PH", "PR",
"PW", "SA", "SR", "SV", "TT", "TW", "UM", "US", "VG", "VI", "VE"};
const char** countries_using_60Hz_end =
- countries_using_60Hz + arraysize(countries_using_60Hz);
+ countries_using_60Hz + base::size(countries_using_60Hz);
if (std::find(countries_using_60Hz, countries_using_60Hz_end,
current_country) == countries_using_60Hz_end) {
return PowerLineFrequency::FREQUENCY_50HZ;
diff --git a/chromium/media/capture/video/video_capture_device_client.cc b/chromium/media/capture/video/video_capture_device_client.cc
index 044d8318093..6a401ff3b3e 100644
--- a/chromium/media/capture/video/video_capture_device_client.cc
+++ b/chromium/media/capture/video/video_capture_device_client.cc
@@ -113,10 +113,6 @@ VideoCaptureDeviceClient::VideoCaptureDeviceClient(
}
VideoCaptureDeviceClient::~VideoCaptureDeviceClient() {
- // This should be on the platform auxiliary thread since
- // |external_jpeg_decoder_| need to be destructed on the same thread as
- // OnIncomingCapturedData.
-
for (int buffer_id : buffer_ids_known_by_receiver_)
receiver_->OnBufferRetired(buffer_id);
}
@@ -142,7 +138,8 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData(
base::TimeDelta timestamp,
int frame_feedback_id) {
DFAKE_SCOPED_RECURSIVE_LOCK(call_from_producer_);
- TRACE_EVENT0("media", "VideoCaptureDeviceClient::OnIncomingCapturedData");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture"),
+ "VideoCaptureDeviceClient::OnIncomingCapturedData");
if (last_captured_pixel_format_ != format.pixel_format) {
OnLog("Pixel format: " + VideoPixelFormatToString(format.pixel_format));
diff --git a/chromium/media/capture/video/video_capture_device_unittest.cc b/chromium/media/capture/video/video_capture_device_unittest.cc
index 1846908a4a2..098fe984542 100644
--- a/chromium/media/capture/video/video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/video_capture_device_unittest.cc
@@ -52,6 +52,7 @@
#include "media/capture/video/chromeos/camera_buffer_factory.h"
#include "media/capture/video/chromeos/camera_hal_dispatcher_impl.h"
#include "media/capture/video/chromeos/local_gpu_memory_buffer_manager.h"
+#include "media/capture/video/chromeos/public/cros_features.h"
#include "media/capture/video/chromeos/video_capture_device_chromeos_halv3.h"
#include "media/capture/video/chromeos/video_capture_device_factory_chromeos.h"
#endif
@@ -62,9 +63,12 @@
DISABLED_UsingRealWebcam_AllocateBadSize
// We will always get YUYV from the Mac AVFoundation implementations.
#define MAYBE_UsingRealWebcam_CaptureMjpeg DISABLED_UsingRealWebcam_CaptureMjpeg
-#define MAYBE_UsingRealWebcam_TakePhoto UsingRealWebcam_TakePhoto
-#define MAYBE_UsingRealWebcam_GetPhotoState UsingRealWebcam_GetPhotoState
-#define MAYBE_UsingRealWebcam_CaptureWithSize UsingRealWebcam_CaptureWithSize
+// TODO(crbug.com/923874).
+#define MAYBE_UsingRealWebcam_TakePhoto DISABLED_UsingRealWebcam_TakePhoto
+ // TODO(crbug.com/923874).
+#define MAYBE_UsingRealWebcam_GetPhotoState DISABLED_UsingRealWebcam_GetPhotoState
+ // TODO(crbug.com/923874).
+#define MAYBE_UsingRealWebcam_CaptureWithSize DISABLED_UsingRealWebcam_CaptureWithSize
#define MAYBE_UsingRealWebcam_CheckPhotoCallbackRelease \
UsingRealWebcam_CheckPhotoCallbackRelease
#elif defined(OS_WIN)
@@ -657,6 +661,13 @@ WRAPPED_TEST_P(VideoCaptureDeviceTest, MAYBE_UsingRealWebcam_CaptureMjpeg) {
base::Unretained(this)));
}
void VideoCaptureDeviceTest::RunCaptureMjpegTestCase() {
+#if defined(OS_CHROMEOS)
+ if (media::ShouldUseCrosCameraService()) {
+ VLOG(1)
+ << "Skipped on Chrome OS device where HAL v3 camera service is used";
+ return;
+ }
+#endif
std::unique_ptr<VideoCaptureDeviceDescriptor> device_descriptor =
GetFirstDeviceDescriptorSupportingPixelFormat(PIXEL_FORMAT_MJPEG);
ASSERT_TRUE(device_descriptor);
diff --git a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc b/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc
index b429824b684..bb20fca8789 100644
--- a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc
+++ b/chromium/media/capture/video/video_capture_jpeg_decoder_impl.cc
@@ -25,27 +25,7 @@ VideoCaptureJpegDecoderImpl::VideoCaptureJpegDecoderImpl(
weak_ptr_factory_(this) {}
VideoCaptureJpegDecoderImpl::~VideoCaptureJpegDecoderImpl() {
- // |this| was set as |decoder_|'s client. |decoder_| has to be deleted on
- // |decoder_task_runner_| before this destructor returns to ensure that it
- // doesn't call back into its client.
-
- if (!decoder_)
- return;
-
- if (decoder_task_runner_->RunsTasksInCurrentSequence()) {
- decoder_.reset();
- return;
- }
-
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- // base::Unretained is safe because |this| will be valid until |event|
- // is signaled.
- decoder_task_runner_->PostTask(
- FROM_HERE,
- base::BindOnce(&VideoCaptureJpegDecoderImpl::DestroyDecoderOnIOThread,
- base::Unretained(this), &event));
- event.Wait();
+ DCHECK(decoder_task_runner_->RunsTasksInCurrentSequence());
}
void VideoCaptureJpegDecoderImpl::Initialize() {
@@ -61,8 +41,7 @@ void VideoCaptureJpegDecoderImpl::Initialize() {
weak_ptr_factory_.GetWeakPtr()));
}
-VideoCaptureJpegDecoderImpl::STATUS VideoCaptureJpegDecoderImpl::GetStatus()
- const {
+VideoCaptureJpegDecoder::STATUS VideoCaptureJpegDecoderImpl::GetStatus() const {
base::AutoLock lock(lock_);
return decoder_status_;
}
diff --git a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h b/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h
index a1bfcee8b75..a8d099ac208 100644
--- a/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h
+++ b/chromium/media/capture/video/video_capture_jpeg_decoder_impl.h
@@ -21,10 +21,6 @@
#include "media/capture/video/video_capture_jpeg_decoder.h"
#include "media/mojo/clients/mojo_jpeg_decode_accelerator.h"
-namespace base {
-class WaitableEvent;
-}
-
namespace media {
// Implementation of media::VideoCaptureJpegDecoder that delegates to a
@@ -34,13 +30,12 @@ namespace media {
// is invoked. Until |decode_done_cb_| is invoked, subsequent calls to
// DecodeCapturedData() are ignored.
// The given |decoder_task_runner| must allow blocking on |lock_|.
+// Instances must be destroyed on |decoder_task_runner|, but the
+// media::VideoCaptureJpegDecoder methods may be called from any thread.
class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
- : public media::VideoCaptureJpegDecoder,
- public media::JpegDecodeAccelerator::Client {
+ : public VideoCaptureJpegDecoder,
+ public JpegDecodeAccelerator::Client {
public:
- // |decode_done_cb| is called on the IO thread when decode succeeds. This can
- // be on any thread. |decode_done_cb| is never called after
- // VideoCaptureGpuJpegDecoder is destroyed.
VideoCaptureJpegDecoderImpl(
MojoJpegDecodeAcceleratorFactoryCB jpeg_decoder_factory,
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
@@ -60,7 +55,7 @@ class CAPTURE_EXPORT VideoCaptureJpegDecoderImpl
media::VideoCaptureDevice::Client::Buffer out_buffer) override;
// JpegDecodeAccelerator::Client implementation.
- // These will be called on IO thread.
+ // These will be called on |decoder_task_runner|.
void VideoFrameReady(int32_t buffer_id) override;
void NotifyError(int32_t buffer_id,
media::JpegDecodeAccelerator::Error error) override;
diff --git a/chromium/media/capture/video/win/sink_input_pin_win.cc b/chromium/media/capture/video/win/sink_input_pin_win.cc
index fa597f8a19e..ed40946a997 100644
--- a/chromium/media/capture/video/win/sink_input_pin_win.cc
+++ b/chromium/media/capture/video/win/sink_input_pin_win.cc
@@ -12,7 +12,7 @@
#include <stdint.h>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/timestamp_constants.h"
namespace media {
@@ -113,7 +113,7 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
#ifndef NDEBUG
WCHAR guid_str[128];
- StringFromGUID2(sub_type, guid_str, arraysize(guid_str));
+ StringFromGUID2(sub_type, guid_str, base::size(guid_str));
DVLOG(2) << __func__ << " unsupported media type: " << guid_str;
#endif
return false;
diff --git a/chromium/media/capture/video/win/video_capture_device_factory_win.cc b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
index 62628bf0a02..3a87b241609 100644
--- a/chromium/media/capture/video/win/video_capture_device_factory_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
@@ -15,7 +15,6 @@
#include "base/command_line.h"
#include "base/feature_list.h"
-#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
@@ -75,13 +74,19 @@ const char* const kBlacklistedCameraNames[] = {
// The following software WebCams cause crashes.
"IP Camera [JPEG/MJPEG]", "CyberLink Webcam Splitter", "EpocCam",
};
-static_assert(arraysize(kBlacklistedCameraNames) == BLACKLISTED_CAMERA_MAX + 1,
+static_assert(base::size(kBlacklistedCameraNames) == BLACKLISTED_CAMERA_MAX + 1,
"kBlacklistedCameraNames should be same size as "
"BlacklistedCameraNames enum");
const char* const kModelIdsBlacklistedForMediaFoundation[] = {
// Devices using Empia 2860 or 2820 chips, see https://crbug.com/849636.
- "eb1a:2860", "eb1a:2820", "1ce6:2820"};
+ "eb1a:2860", "eb1a:2820", "1ce6:2820",
+ // Elgato HD60 Pro
+ "12ab:0380",
+ // Sensoray 2253
+ "1943:2253",
+ // Dell E5440
+ "0c45:64d0", "0c45:64d2"};
const std::pair<VideoCaptureApi, std::vector<std::pair<GUID, GUID>>>
kMfAttributes[] = {{VideoCaptureApi::WIN_MEDIA_FOUNDATION,
@@ -113,7 +118,7 @@ bool LoadMediaFoundationDlls() {
for (const wchar_t* kMfDLL : kMfDLLs) {
wchar_t path[MAX_PATH] = {0};
- ExpandEnvironmentStringsW(kMfDLL, path, arraysize(path));
+ ExpandEnvironmentStringsW(kMfDLL, path, base::size(path));
if (!LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH))
return false;
}
@@ -146,7 +151,7 @@ bool CreateVideoCaptureDeviceMediaFoundation(const Descriptor& descriptor,
IMFMediaSource** source) {
ComPtr<IMFAttributes> attributes;
static_assert(
- arraysize(kMfAttributes) == 2,
+ base::size(kMfAttributes) == 2,
"Implementation here asumes that kMfAttributes has size of two.");
DCHECK_EQ(kMfAttributes[0].first, VideoCaptureApi::WIN_MEDIA_FOUNDATION);
const auto& attributes_data =
@@ -168,8 +173,8 @@ bool CreateVideoCaptureDeviceMediaFoundation(const Descriptor& descriptor,
bool IsDeviceBlackListed(const std::string& name) {
DCHECK_EQ(BLACKLISTED_CAMERA_MAX + 1,
- static_cast<int>(arraysize(kBlacklistedCameraNames)));
- for (size_t i = 0; i < arraysize(kBlacklistedCameraNames); ++i) {
+ static_cast<int>(base::size(kBlacklistedCameraNames)));
+ for (size_t i = 0; i < base::size(kBlacklistedCameraNames); ++i) {
if (base::StartsWith(name, kBlacklistedCameraNames[i],
base::CompareCase::INSENSITIVE_ASCII)) {
DVLOG(1) << "Enumerated blacklisted device: " << name;
diff --git a/chromium/media/capture/video/win/video_capture_device_win.cc b/chromium/media/capture/video/win/video_capture_device_win.cc
index 24e1eaf7793..ee71ff53031 100644
--- a/chromium/media/capture/video/win/video_capture_device_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_win.cc
@@ -13,7 +13,7 @@
#include <utility>
#include "base/feature_list.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/sys_string_conversions.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
@@ -340,7 +340,7 @@ VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
}
#ifndef NDEBUG
WCHAR guid_str[128];
- StringFromGUID2(sub_type, guid_str, arraysize(guid_str));
+ StringFromGUID2(sub_type, guid_str, base::size(guid_str));
DVLOG(2) << "Device (also) supports an unknown media type " << guid_str;
#endif
return PIXEL_FORMAT_UNKNOWN;
diff --git a/chromium/media/capture/video_capture_types.cc b/chromium/media/capture/video_capture_types.cc
index 323ef60c9f5..07a7b78c416 100644
--- a/chromium/media/capture/video_capture_types.cc
+++ b/chromium/media/capture/video_capture_types.cc
@@ -5,7 +5,7 @@
#include "media/capture/video_capture_types.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/limits.h"
#include "media/base/video_frame.h"
@@ -60,11 +60,11 @@ bool VideoCaptureFormat::ComparePixelFormatPreference(
const VideoPixelFormat& rhs) {
auto* format_lhs = std::find(
kSupportedCapturePixelFormats,
- kSupportedCapturePixelFormats + arraysize(kSupportedCapturePixelFormats),
+ kSupportedCapturePixelFormats + base::size(kSupportedCapturePixelFormats),
lhs);
auto* format_rhs = std::find(
kSupportedCapturePixelFormats,
- kSupportedCapturePixelFormats + arraysize(kSupportedCapturePixelFormats),
+ kSupportedCapturePixelFormats + base::size(kSupportedCapturePixelFormats),
rhs);
return format_lhs < format_rhs;
}
diff --git a/chromium/media/cast/logging/log_deserializer.cc b/chromium/media/cast/logging/log_deserializer.cc
index b34a91502d1..b006df4951f 100644
--- a/chromium/media/cast/logging/log_deserializer.cc
+++ b/chromium/media/cast/logging/log_deserializer.cc
@@ -30,7 +30,7 @@ namespace {
const int kMaxUncompressedBytes = 60 * 1000 * 1000;
void MergePacketEvent(const AggregatedPacketEvent& from,
- linked_ptr<AggregatedPacketEvent> to) {
+ AggregatedPacketEvent* to) {
for (int i = 0; i < from.base_packet_event_size(); i++) {
const BasePacketEvent& from_base_event = from.base_packet_event(i);
bool merged = false;
@@ -55,7 +55,7 @@ void MergePacketEvent(const AggregatedPacketEvent& from,
}
void MergeFrameEvent(const AggregatedFrameEvent& from,
- linked_ptr<AggregatedFrameEvent> to) {
+ AggregatedFrameEvent* to) {
to->mutable_event_type()->MergeFrom(from.event_type());
to->mutable_event_timestamp_ms()->MergeFrom(from.event_timestamp_ms());
if (!to->has_encoded_frame_size() && from.has_encoded_frame_size())
@@ -80,7 +80,7 @@ bool PopulateDeserializedLog(base::BigEndianReader* reader,
if (!reader->ReadU16(&proto_size))
return false;
- linked_ptr<AggregatedFrameEvent> frame_event(new AggregatedFrameEvent);
+ auto frame_event = std::make_unique<AggregatedFrameEvent>();
if (!frame_event->ParseFromArray(reader->ptr(), proto_size))
return false;
if (!reader->Skip(proto_size))
@@ -97,11 +97,11 @@ bool PopulateDeserializedLog(base::BigEndianReader* reader,
auto it = frame_event_map.find(relative_rtp_timestamp);
if (it == frame_event_map.end()) {
frame_event_map.insert(
- std::make_pair(relative_rtp_timestamp, frame_event));
+ std::make_pair(relative_rtp_timestamp, std::move(frame_event)));
} else {
// Events for the same frame might have been split into more than one
// proto. Merge them.
- MergeFrameEvent(*frame_event, it->second);
+ MergeFrameEvent(*frame_event, it->second.get());
}
}
@@ -113,7 +113,7 @@ bool PopulateDeserializedLog(base::BigEndianReader* reader,
if (!reader->ReadU16(&proto_size))
return false;
- linked_ptr<AggregatedPacketEvent> packet_event(new AggregatedPacketEvent);
+ auto packet_event = std::make_unique<AggregatedPacketEvent>();
if (!packet_event->ParseFromArray(reader->ptr(), proto_size))
return false;
if (!reader->Skip(proto_size))
@@ -127,11 +127,11 @@ bool PopulateDeserializedLog(base::BigEndianReader* reader,
auto it = packet_event_map.find(relative_rtp_timestamp);
if (it == packet_event_map.end()) {
packet_event_map.insert(
- std::make_pair(relative_rtp_timestamp, packet_event));
+ std::make_pair(relative_rtp_timestamp, std::move(packet_event)));
} else {
// Events for the same frame might have been split into more than one
// proto. Merge them.
- MergePacketEvent(*packet_event, it->second);
+ MergePacketEvent(*packet_event, it->second.get());
}
}
diff --git a/chromium/media/cast/logging/log_deserializer.h b/chromium/media/cast/logging/log_deserializer.h
index c9d4c724972..2c69116e414 100644
--- a/chromium/media/cast/logging/log_deserializer.h
+++ b/chromium/media/cast/logging/log_deserializer.h
@@ -6,21 +6,21 @@
#define MEDIA_CAST_LOGGING_LOG_DESERIALIZER_H_
#include <map>
+#include <memory>
#include <string>
-#include "base/memory/linked_ptr.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/logging/proto/raw_events.pb.h"
namespace media {
namespace cast {
-typedef std::map<RtpTimeTicks,
- linked_ptr<media::cast::proto::AggregatedFrameEvent>>
- FrameEventMap;
-typedef std::map<RtpTimeTicks,
- linked_ptr<media::cast::proto::AggregatedPacketEvent>>
- PacketEventMap;
+using FrameEventMap =
+ std::map<RtpTimeTicks,
+ std::unique_ptr<media::cast::proto::AggregatedFrameEvent>>;
+using PacketEventMap =
+ std::map<RtpTimeTicks,
+ std::unique_ptr<media::cast::proto::AggregatedPacketEvent>>;
// Represents deserialized raw event logs for a particular stream.
struct DeserializedLog {
diff --git a/chromium/media/cast/logging/raw_event_subscriber_bundle.cc b/chromium/media/cast/logging/raw_event_subscriber_bundle.cc
index a72de45fc35..5f814c9bc0d 100644
--- a/chromium/media/cast/logging/raw_event_subscriber_bundle.cc
+++ b/chromium/media/cast/logging/raw_event_subscriber_bundle.cc
@@ -60,8 +60,8 @@ void RawEventSubscriberBundle::AddEventSubscribers(bool is_audio) {
subscribers_.insert(std::make_pair(
is_audio,
- make_linked_ptr(new RawEventSubscriberBundleForStream(
- cast_environment_, is_audio, receiver_offset_estimator_.get()))));
+ std::make_unique<RawEventSubscriberBundleForStream>(
+ cast_environment_, is_audio, receiver_offset_estimator_.get())));
}
void RawEventSubscriberBundle::RemoveEventSubscribers(bool is_audio) {
@@ -79,15 +79,15 @@ void RawEventSubscriberBundle::RemoveEventSubscribers(bool is_audio) {
EncodingEventSubscriber*
RawEventSubscriberBundle::GetEncodingEventSubscriber(bool is_audio) {
auto it = subscribers_.find(is_audio);
- return it == subscribers_.end() ?
- NULL : it->second->GetEncodingEventSubscriber();
+ return it == subscribers_.end() ? nullptr
+ : it->second->GetEncodingEventSubscriber();
}
StatsEventSubscriber*
RawEventSubscriberBundle::GetStatsEventSubscriber(bool is_audio) {
auto it = subscribers_.find(is_audio);
- return it == subscribers_.end() ?
- NULL : it->second->GetStatsEventSubscriber();
+ return it == subscribers_.end() ? nullptr
+ : it->second->GetStatsEventSubscriber();
}
} // namespace cast
diff --git a/chromium/media/cast/logging/raw_event_subscriber_bundle.h b/chromium/media/cast/logging/raw_event_subscriber_bundle.h
index 4df3065cb99..bb9b9719d76 100644
--- a/chromium/media/cast/logging/raw_event_subscriber_bundle.h
+++ b/chromium/media/cast/logging/raw_event_subscriber_bundle.h
@@ -65,13 +65,12 @@ class RawEventSubscriberBundle {
StatsEventSubscriber* GetStatsEventSubscriber(bool is_audio);
private:
+ const scoped_refptr<CastEnvironment> cast_environment_;
// Map from (is_audio) -> RawEventSubscriberBundleForStream.
// TODO(imcheng): This works because we only have 1 audio and 1 video stream.
// This needs to scale better.
- typedef std::map<bool, linked_ptr<RawEventSubscriberBundleForStream> >
- SubscribersMapByStream;
- const scoped_refptr<CastEnvironment> cast_environment_;
- SubscribersMapByStream subscribers_;
+ std::map<bool, std::unique_ptr<RawEventSubscriberBundleForStream>>
+ subscribers_;
std::unique_ptr<ReceiverTimeOffsetEstimator> receiver_offset_estimator_;
DISALLOW_COPY_AND_ASSIGN(RawEventSubscriberBundle);
diff --git a/chromium/media/cast/logging/serialize_deserialize_test.cc b/chromium/media/cast/logging/serialize_deserialize_test.cc
index abab1fbe807..9d78162fb31 100644
--- a/chromium/media/cast/logging/serialize_deserialize_test.cc
+++ b/chromium/media/cast/logging/serialize_deserialize_test.cc
@@ -9,7 +9,7 @@
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/cast/logging/log_deserializer.h"
#include "media/cast/logging/log_serializer.h"
#include "media/cast/logging/logging_defines.h"
@@ -64,23 +64,24 @@ class SerializeDeserializeTest : public ::testing::Test {
for (int i = 0; i < metadata_.num_frame_events(); i++) {
auto frame_event = std::make_unique<AggregatedFrameEvent>();
frame_event->set_relative_rtp_timestamp(i * 90);
- for (uint32_t event_index = 0; event_index < arraysize(kVideoFrameEvents);
- ++event_index) {
+ for (uint32_t event_index = 0;
+ event_index < base::size(kVideoFrameEvents); ++event_index) {
frame_event->add_event_type(
ToProtoEventType(kVideoFrameEvents[event_index]));
frame_event->add_event_timestamp_ms(event_time_ms);
event_time_ms += 1024;
}
- frame_event->set_width(kWidth[i % arraysize(kWidth)]);
- frame_event->set_height(kHeight[i % arraysize(kHeight)]);
+ frame_event->set_width(kWidth[i % base::size(kWidth)]);
+ frame_event->set_height(kHeight[i % base::size(kHeight)]);
frame_event->set_encoded_frame_size(
- kEncodedFrameSize[i % arraysize(kEncodedFrameSize)]);
- frame_event->set_delay_millis(kDelayMillis[i % arraysize(kDelayMillis)]);
- frame_event->set_encoder_cpu_percent_utilized(kEncoderCPUPercentUtilized[
- i % arraysize(kEncoderCPUPercentUtilized)]);
+ kEncodedFrameSize[i % base::size(kEncodedFrameSize)]);
+ frame_event->set_delay_millis(kDelayMillis[i % base::size(kDelayMillis)]);
+ frame_event->set_encoder_cpu_percent_utilized(
+ kEncoderCPUPercentUtilized[i %
+ base::size(kEncoderCPUPercentUtilized)]);
frame_event->set_idealized_bitrate_percent_utilized(
- kIdealizedBitratePercentUtilized[
- i % arraysize(kIdealizedBitratePercentUtilized)]);
+ kIdealizedBitratePercentUtilized
+ [i % base::size(kIdealizedBitratePercentUtilized)]);
frame_event_list_.push_back(std::move(frame_event));
}
@@ -95,7 +96,7 @@ class SerializeDeserializeTest : public ::testing::Test {
base_event->set_packet_id(packet_id);
packet_id++;
for (uint32_t event_index = 0;
- event_index < arraysize(kVideoPacketEvents); ++event_index) {
+ event_index < base::size(kVideoPacketEvents); ++event_index) {
base_event->add_event_type(
ToProtoEventType(kVideoPacketEvents[event_index]));
base_event->add_event_timestamp_ms(event_time_ms);
diff --git a/chromium/media/cast/logging/stats_event_subscriber.cc b/chromium/media/cast/logging/stats_event_subscriber.cc
index 73f7f45170c..985c69137c7 100644
--- a/chromium/media/cast/logging/stats_event_subscriber.cc
+++ b/chromium/media/cast/logging/stats_event_subscriber.cc
@@ -332,30 +332,22 @@ const int kLargeMaxLatencyBucketMs = 1200;
const int kLargeBucketWidthMs = 50;
void StatsEventSubscriber::InitHistograms() {
- histograms_[E2E_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kLargeMaxLatencyBucketMs,
- kLargeBucketWidthMs));
- histograms_[QUEUEING_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
- kDefaultBucketWidthMs));
- histograms_[NETWORK_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
- kDefaultBucketWidthMs));
- histograms_[PACKET_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
- kDefaultBucketWidthMs));
- histograms_[FRAME_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
- kDefaultBucketWidthMs));
- histograms_[LATE_FRAME_MS_HISTO].reset(
- new SimpleHistogram(0, kDefaultMaxLatencyBucketMs,
- kDefaultBucketWidthMs));
- histograms_[CAPTURE_LATENCY_MS_HISTO].reset(
- new SimpleHistogram(0, kSmallMaxLatencyBucketMs,
- kSmallBucketWidthMs));
- histograms_[ENCODE_TIME_MS_HISTO].reset(
- new SimpleHistogram(0, kSmallMaxLatencyBucketMs,
- kSmallBucketWidthMs));
+ histograms_[E2E_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kLargeMaxLatencyBucketMs, kLargeBucketWidthMs);
+ histograms_[QUEUEING_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kDefaultMaxLatencyBucketMs, kDefaultBucketWidthMs);
+ histograms_[NETWORK_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kDefaultMaxLatencyBucketMs, kDefaultBucketWidthMs);
+ histograms_[PACKET_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kDefaultMaxLatencyBucketMs, kDefaultBucketWidthMs);
+ histograms_[FRAME_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kDefaultMaxLatencyBucketMs, kDefaultBucketWidthMs);
+ histograms_[LATE_FRAME_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kDefaultMaxLatencyBucketMs, kDefaultBucketWidthMs);
+ histograms_[CAPTURE_LATENCY_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kSmallMaxLatencyBucketMs, kSmallBucketWidthMs);
+ histograms_[ENCODE_TIME_MS_HISTO] = std::make_unique<SimpleHistogram>(
+ 0, kSmallMaxLatencyBucketMs, kSmallBucketWidthMs);
}
void StatsEventSubscriber::GetStatsInternal(StatsMap* stats_map) const {
diff --git a/chromium/media/cast/logging/stats_event_subscriber.h b/chromium/media/cast/logging/stats_event_subscriber.h
index 4313436162b..7b9e505c6c6 100644
--- a/chromium/media/cast/logging/stats_event_subscriber.h
+++ b/chromium/media/cast/logging/stats_event_subscriber.h
@@ -12,7 +12,6 @@
#include "base/gtest_prod_util.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/threading/thread_checker.h"
#include "base/time/tick_clock.h"
#include "media/cast/logging/logging_defines.h"
@@ -186,7 +185,7 @@ class StatsEventSubscriber : public RawEventSubscriber {
};
typedef std::map<CastStat, double> StatsMap;
- typedef std::map<CastStat, linked_ptr<SimpleHistogram> > HistogramMap;
+ typedef std::map<CastStat, std::unique_ptr<SimpleHistogram>> HistogramMap;
typedef std::map<RtpTimeTicks, FrameInfo> FrameInfoMap;
typedef std::map<std::pair<RtpTimeTicks, uint16_t>,
std::pair<base::TimeTicks, CastLoggingEvent>>
diff --git a/chromium/media/cast/receiver/audio_decoder_unittest.cc b/chromium/media/cast/receiver/audio_decoder_unittest.cc
index ced088fd41d..16884b31055 100644
--- a/chromium/media/cast/receiver/audio_decoder_unittest.cc
+++ b/chromium/media/cast/receiver/audio_decoder_unittest.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/sys_byteorder.h"
@@ -213,7 +213,7 @@ TEST_P(AudioDecoderTest, DecodesFramesWithVaryingDuration) {
const int kFrameDurationMs[] = { 5, 10, 20, 40, 60 };
const int kNumFrames = 10;
- for (size_t i = 0; i < arraysize(kFrameDurationMs); ++i)
+ for (size_t i = 0; i < base::size(kFrameDurationMs); ++i)
for (int j = 0; j < kNumFrames; ++j)
FeedMoreAudio(base::TimeDelta::FromMilliseconds(kFrameDurationMs[i]), 0);
WaitForAllAudioToBeDecoded();
diff --git a/chromium/media/cast/sender/audio_encoder_unittest.cc b/chromium/media/cast/sender/audio_encoder_unittest.cc
index 6a46f397705..585ed713dda 100644
--- a/chromium/media/cast/sender/audio_encoder_unittest.cc
+++ b/chromium/media/cast/sender/audio_encoder_unittest.cc
@@ -13,7 +13,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "build/build_config.h"
#include "media/base/audio_bus.h"
#include "media/base/fake_single_thread_task_runner.h"
@@ -229,26 +229,26 @@ INSTANTIATE_TEST_CASE_P(
AudioEncoderTestScenarios,
AudioEncoderTest,
::testing::Values(
- TestScenario(kOneCall_3Millis, arraysize(kOneCall_3Millis)),
- TestScenario(kOneCall_10Millis, arraysize(kOneCall_10Millis)),
- TestScenario(kOneCall_13Millis, arraysize(kOneCall_13Millis)),
- TestScenario(kOneCall_20Millis, arraysize(kOneCall_20Millis)),
- TestScenario(kTwoCalls_3Millis, arraysize(kTwoCalls_3Millis)),
- TestScenario(kTwoCalls_10Millis, arraysize(kTwoCalls_10Millis)),
- TestScenario(kTwoCalls_Mixed1, arraysize(kTwoCalls_Mixed1)),
- TestScenario(kTwoCalls_Mixed2, arraysize(kTwoCalls_Mixed2)),
- TestScenario(kTwoCalls_Mixed3, arraysize(kTwoCalls_Mixed3)),
- TestScenario(kTwoCalls_Mixed4, arraysize(kTwoCalls_Mixed4)),
- TestScenario(kManyCalls_3Millis, arraysize(kManyCalls_3Millis)),
- TestScenario(kManyCalls_10Millis, arraysize(kManyCalls_10Millis)),
- TestScenario(kManyCalls_Mixed1, arraysize(kManyCalls_Mixed1)),
- TestScenario(kManyCalls_Mixed2, arraysize(kManyCalls_Mixed2)),
- TestScenario(kManyCalls_Mixed3, arraysize(kManyCalls_Mixed3)),
- TestScenario(kManyCalls_Mixed4, arraysize(kManyCalls_Mixed4)),
- TestScenario(kManyCalls_Mixed5, arraysize(kManyCalls_Mixed5)),
- TestScenario(kOneBigUnderrun, arraysize(kOneBigUnderrun)),
- TestScenario(kTwoBigUnderruns, arraysize(kTwoBigUnderruns)),
- TestScenario(kMixedUnderruns, arraysize(kMixedUnderruns))));
+ TestScenario(kOneCall_3Millis, base::size(kOneCall_3Millis)),
+ TestScenario(kOneCall_10Millis, base::size(kOneCall_10Millis)),
+ TestScenario(kOneCall_13Millis, base::size(kOneCall_13Millis)),
+ TestScenario(kOneCall_20Millis, base::size(kOneCall_20Millis)),
+ TestScenario(kTwoCalls_3Millis, base::size(kTwoCalls_3Millis)),
+ TestScenario(kTwoCalls_10Millis, base::size(kTwoCalls_10Millis)),
+ TestScenario(kTwoCalls_Mixed1, base::size(kTwoCalls_Mixed1)),
+ TestScenario(kTwoCalls_Mixed2, base::size(kTwoCalls_Mixed2)),
+ TestScenario(kTwoCalls_Mixed3, base::size(kTwoCalls_Mixed3)),
+ TestScenario(kTwoCalls_Mixed4, base::size(kTwoCalls_Mixed4)),
+ TestScenario(kManyCalls_3Millis, base::size(kManyCalls_3Millis)),
+ TestScenario(kManyCalls_10Millis, base::size(kManyCalls_10Millis)),
+ TestScenario(kManyCalls_Mixed1, base::size(kManyCalls_Mixed1)),
+ TestScenario(kManyCalls_Mixed2, base::size(kManyCalls_Mixed2)),
+ TestScenario(kManyCalls_Mixed3, base::size(kManyCalls_Mixed3)),
+ TestScenario(kManyCalls_Mixed4, base::size(kManyCalls_Mixed4)),
+ TestScenario(kManyCalls_Mixed5, base::size(kManyCalls_Mixed5)),
+ TestScenario(kOneBigUnderrun, base::size(kOneBigUnderrun)),
+ TestScenario(kTwoBigUnderruns, base::size(kTwoBigUnderruns)),
+ TestScenario(kMixedUnderruns, base::size(kMixedUnderruns))));
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder.cc b/chromium/media/cast/sender/external_video_encoder.cc
index 5e11390dccf..30e8ff9621f 100644
--- a/chromium/media/cast/sender/external_video_encoder.cc
+++ b/chromium/media/cast/sender/external_video_encoder.cc
@@ -10,9 +10,9 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "build/build_config.h"
@@ -848,7 +848,7 @@ double QuantizerEstimator::EstimateForKeyFrame(const VideoFrame& frame) {
// histogram and return it.
const int num_samples = (size.width() - 1) * rows_in_subset;
return ToQuantizerEstimate(ComputeEntropyFromHistogram(
- histogram, arraysize(histogram), num_samples));
+ histogram, base::size(histogram), num_samples));
}
double QuantizerEstimator::EstimateForDeltaFrame(const VideoFrame& frame) {
@@ -893,7 +893,7 @@ double QuantizerEstimator::EstimateForDeltaFrame(const VideoFrame& frame) {
// histogram and return it.
const int num_samples = size.width() * rows_in_subset;
return ToQuantizerEstimate(ComputeEntropyFromHistogram(
- histogram, arraysize(histogram), num_samples));
+ histogram, base::size(histogram), num_samples));
}
// static
diff --git a/chromium/media/cast/sender/h264_vt_encoder.cc b/chromium/media/cast/sender/h264_vt_encoder.cc
index bca0e5272da..e4dd235fed3 100644
--- a/chromium/media/cast/sender/h264_vt_encoder.cc
+++ b/chromium/media/cast/sender/h264_vt_encoder.cc
@@ -14,8 +14,8 @@
#include "base/bind_helpers.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/power_monitor/power_monitor.h"
+#include "base/stl_util.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
#include "media/base/mac/video_frame_mac.h"
@@ -241,14 +241,14 @@ void H264VideoToolboxEncoder::ResetCompressionSession() {
CFTypeRef buffer_attributes_keys[] = {kCVPixelBufferPixelFormatTypeKey,
kCVBufferPropagatedAttachmentsKey};
CFTypeRef buffer_attributes_values[] = {
- video_toolbox::ArrayWithIntegers(format, arraysize(format)).release(),
+ video_toolbox::ArrayWithIntegers(format, base::size(format)).release(),
video_toolbox::DictionaryWithKeysAndValues(
- attachments_keys, attachments_values, arraysize(attachments_keys))
+ attachments_keys, attachments_values, base::size(attachments_keys))
.release()};
const base::ScopedCFTypeRef<CFDictionaryRef> buffer_attributes =
video_toolbox::DictionaryWithKeysAndValues(
buffer_attributes_keys, buffer_attributes_values,
- arraysize(buffer_attributes_keys));
+ base::size(buffer_attributes_keys));
for (auto* v : buffer_attributes_values)
CFRelease(v);
diff --git a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
index f5681b0c4f4..d8f13c609df 100644
--- a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
+++ b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
@@ -19,7 +19,6 @@
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media.h"
-#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/media_util.h"
#include "media/cast/common/rtp_time.h"
@@ -166,7 +165,7 @@ class EndToEndFrameChecker
friend class base::RefCountedThreadSafe<EndToEndFrameChecker>;
virtual ~EndToEndFrameChecker() {}
- MediaLog media_log_;
+ NullMediaLog media_log_;
FFmpegVideoDecoder decoder_;
base::queue<scoped_refptr<VideoFrame>> expectations_;
int count_frames_checked_;
diff --git a/chromium/media/cdm/aes_decryptor.cc b/chromium/media/cdm/aes_decryptor.cc
index e102cd71f3a..ff30013216e 100644
--- a/chromium/media/cdm/aes_decryptor.cc
+++ b/chromium/media/cdm/aes_decryptor.cc
@@ -453,8 +453,8 @@ CdmContext* AesDecryptor::GetCdmContext() {
return this;
}
-std::unique_ptr<CallbackRegistration> AesDecryptor::RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) {
+std::unique_ptr<CallbackRegistration> AesDecryptor::RegisterEventCB(
+ EventCB event_cb) {
NOTIMPLEMENTED();
return nullptr;
}
@@ -553,6 +553,10 @@ void AesDecryptor::DeinitializeDecoder(StreamType stream_type) {
// nothing to be done here.
}
+bool AesDecryptor::CanAlwaysDecrypt() {
+ return true;
+}
+
bool AesDecryptor::CreateSession(const std::string& session_id,
CdmSessionType session_type) {
auto it = open_sessions_.find(session_id);
diff --git a/chromium/media/cdm/aes_decryptor.h b/chromium/media/cdm/aes_decryptor.h
index 0bc5b3deef6..94d08fd597c 100644
--- a/chromium/media/cdm/aes_decryptor.h
+++ b/chromium/media/cdm/aes_decryptor.h
@@ -62,8 +62,8 @@ class MEDIA_EXPORT AesDecryptor : public ContentDecryptionModule,
CdmContext* GetCdmContext() override;
// CdmContext implementation.
- std::unique_ptr<CallbackRegistration> RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) override;
+ std::unique_ptr<CallbackRegistration> RegisterEventCB(
+ EventCB event_cb) override;
Decryptor* GetDecryptor() override;
int GetCdmId() const override;
@@ -84,6 +84,7 @@ class MEDIA_EXPORT AesDecryptor : public ContentDecryptionModule,
const VideoDecodeCB& video_decode_cb) override;
void ResetDecoder(StreamType stream_type) override;
void DeinitializeDecoder(StreamType stream_type) override;
+ bool CanAlwaysDecrypt() override;
private:
// Testing classes that needs to manipulate internal states for testing.
diff --git a/chromium/media/cdm/aes_decryptor_unittest.cc b/chromium/media/cdm/aes_decryptor_unittest.cc
index efeaee1f715..a32323b72e0 100644
--- a/chromium/media/cdm/aes_decryptor_unittest.cc
+++ b/chromium/media/cdm/aes_decryptor_unittest.cc
@@ -13,8 +13,8 @@
#include "base/bind.h"
#include "base/debug/leak_annotations.h"
#include "base/json/json_reader.h"
-#include "base/macros.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/scoped_task_environment.h"
#include "base/values.h"
@@ -237,15 +237,15 @@ class AesDecryptorTest : public testing::TestWithParam<TestType> {
base::Unretained(this))),
original_data_(kOriginalData, kOriginalData + kOriginalDataSize),
encrypted_data_(kEncryptedData,
- kEncryptedData + arraysize(kEncryptedData)),
+ kEncryptedData + base::size(kEncryptedData)),
subsample_encrypted_data_(
kSubsampleEncryptedData,
- kSubsampleEncryptedData + arraysize(kSubsampleEncryptedData)),
- key_id_(kKeyId, kKeyId + arraysize(kKeyId)),
- iv_(kIv, kIv + arraysize(kIv)),
+ kSubsampleEncryptedData + base::size(kSubsampleEncryptedData)),
+ key_id_(kKeyId, kKeyId + base::size(kKeyId)),
+ iv_(kIv, kIv + base::size(kIv)),
normal_subsample_entries_(
kSubsampleEntriesNormal,
- kSubsampleEntriesNormal + arraysize(kSubsampleEntriesNormal)) {}
+ kSubsampleEntriesNormal + base::size(kSubsampleEntriesNormal)) {}
protected:
void SetUp() override {
@@ -591,7 +591,7 @@ TEST_P(AesDecryptorTest, CreateSessionWithCencInitData) {
EXPECT_CALL(cdm_client_, OnSessionMessage(NotEmpty(), _, IsJSONDictionary()));
cdm_->CreateSessionAndGenerateRequest(
CdmSessionType::kTemporary, EmeInitDataType::CENC,
- std::vector<uint8_t>(init_data, init_data + arraysize(init_data)),
+ std::vector<uint8_t>(init_data, init_data + base::size(init_data)),
CreateSessionPromise(RESOLVED));
}
@@ -602,7 +602,7 @@ TEST_P(AesDecryptorTest, CreateSessionWithKeyIdsInitData) {
EXPECT_CALL(cdm_client_, OnSessionMessage(NotEmpty(), _, IsJSONDictionary()));
cdm_->CreateSessionAndGenerateRequest(
CdmSessionType::kTemporary, EmeInitDataType::KEYIDS,
- std::vector<uint8_t>(init_data, init_data + arraysize(init_data) - 1),
+ std::vector<uint8_t>(init_data, init_data + base::size(init_data) - 1),
CreateSessionPromise(RESOLVED));
}
@@ -672,14 +672,14 @@ TEST_P(AesDecryptorTest, MultipleKeysAndFrames) {
// The second key is also available.
encrypted_buffer = CreateEncryptedBuffer(
std::vector<uint8_t>(kEncryptedData2,
- kEncryptedData2 + arraysize(kEncryptedData2)),
- std::vector<uint8_t>(kKeyId2, kKeyId2 + arraysize(kKeyId2)),
- std::vector<uint8_t>(kIv2, kIv2 + arraysize(kIv2)),
+ kEncryptedData2 + base::size(kEncryptedData2)),
+ std::vector<uint8_t>(kKeyId2, kKeyId2 + base::size(kKeyId2)),
+ std::vector<uint8_t>(kIv2, kIv2 + base::size(kIv2)),
no_subsample_entries_);
ASSERT_NO_FATAL_FAILURE(DecryptAndExpect(
encrypted_buffer,
std::vector<uint8_t>(kOriginalData2,
- kOriginalData2 + arraysize(kOriginalData2) - 1),
+ kOriginalData2 + base::size(kOriginalData2) - 1),
SUCCESS));
}
@@ -741,7 +741,7 @@ TEST_P(AesDecryptorTest, SubsampleWrongSize) {
std::vector<SubsampleEntry> subsample_entries_wrong_size(
kSubsampleEntriesWrongSize,
- kSubsampleEntriesWrongSize + arraysize(kSubsampleEntriesWrongSize));
+ kSubsampleEntriesWrongSize + base::size(kSubsampleEntriesWrongSize));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
subsample_encrypted_data_, key_id_, iv_, subsample_entries_wrong_size);
@@ -755,7 +755,7 @@ TEST_P(AesDecryptorTest, SubsampleInvalidTotalSize) {
std::vector<SubsampleEntry> subsample_entries_invalid_total_size(
kSubsampleEntriesInvalidTotalSize,
kSubsampleEntriesInvalidTotalSize +
- arraysize(kSubsampleEntriesInvalidTotalSize));
+ base::size(kSubsampleEntriesInvalidTotalSize));
scoped_refptr<DecoderBuffer> encrypted_buffer =
CreateEncryptedBuffer(subsample_encrypted_data_, key_id_, iv_,
@@ -770,7 +770,7 @@ TEST_P(AesDecryptorTest, SubsampleClearBytesOnly) {
std::vector<SubsampleEntry> clear_only_subsample_entries(
kSubsampleEntriesClearOnly,
- kSubsampleEntriesClearOnly + arraysize(kSubsampleEntriesClearOnly));
+ kSubsampleEntriesClearOnly + base::size(kSubsampleEntriesClearOnly));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
original_data_, key_id_, iv_, clear_only_subsample_entries);
@@ -784,7 +784,7 @@ TEST_P(AesDecryptorTest, SubsampleCypherBytesOnly) {
std::vector<SubsampleEntry> cypher_only_subsample_entries(
kSubsampleEntriesCypherOnly,
- kSubsampleEntriesCypherOnly + arraysize(kSubsampleEntriesCypherOnly));
+ kSubsampleEntriesCypherOnly + base::size(kSubsampleEntriesCypherOnly));
scoped_refptr<DecoderBuffer> encrypted_buffer = CreateEncryptedBuffer(
encrypted_data_, key_id_, iv_, cypher_only_subsample_entries);
@@ -1031,8 +1031,8 @@ TEST_P(AesDecryptorTest, JWKKey) {
}
TEST_P(AesDecryptorTest, GetKeyIds) {
- std::vector<uint8_t> key_id1(kKeyId, kKeyId + arraysize(kKeyId));
- std::vector<uint8_t> key_id2(kKeyId2, kKeyId2 + arraysize(kKeyId2));
+ std::vector<uint8_t> key_id1(kKeyId, kKeyId + base::size(kKeyId));
+ std::vector<uint8_t> key_id2(kKeyId2, kKeyId2 + base::size(kKeyId2));
std::string session_id = CreateSession(key_id_);
EXPECT_FALSE(KeysInfoContains(key_id1));
@@ -1050,7 +1050,7 @@ TEST_P(AesDecryptorTest, GetKeyIds) {
}
TEST_P(AesDecryptorTest, NoKeysChangeForSameKey) {
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
std::string session_id = CreateSession(key_id_);
EXPECT_FALSE(KeysInfoContains(key_id));
@@ -1069,7 +1069,7 @@ TEST_P(AesDecryptorTest, NoKeysChangeForSameKey) {
}
TEST_P(AesDecryptorTest, RandomSessionIDs) {
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
const size_t kNumIterations = 25;
std::set<std::string> seen_sessions;
diff --git a/chromium/media/cdm/cdm_adapter.cc b/chromium/media/cdm/cdm_adapter.cc
index 6e5469cadb8..0903ff106fd 100644
--- a/chromium/media/cdm/cdm_adapter.cc
+++ b/chromium/media/cdm/cdm_adapter.cc
@@ -386,8 +386,8 @@ CdmContext* CdmAdapter::GetCdmContext() {
return this;
}
-std::unique_ptr<CallbackRegistration> CdmAdapter::RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) {
+std::unique_ptr<CallbackRegistration> CdmAdapter::RegisterEventCB(
+ EventCB event_cb) {
NOTIMPLEMENTED();
return nullptr;
}
@@ -654,7 +654,10 @@ cdm::Buffer* CdmAdapter::Allocate(uint32_t capacity) {
}
void CdmAdapter::SetTimer(int64_t delay_ms, void* context) {
- DCHECK(task_runner_->BelongsToCurrentThread());
+ // TODO(crbug.com/887761): Use CHECKs for bug investigation. Change back to
+ // DCHECK after it's completed.
+ CHECK(task_runner_);
+ CHECK(task_runner_->BelongsToCurrentThread());
auto delay = base::TimeDelta::FromMilliseconds(delay_ms);
DVLOG(3) << __func__ << ": delay = " << delay << ", context = " << context;
diff --git a/chromium/media/cdm/cdm_adapter.h b/chromium/media/cdm/cdm_adapter.h
index d77296d34bf..43691d31af6 100644
--- a/chromium/media/cdm/cdm_adapter.h
+++ b/chromium/media/cdm/cdm_adapter.h
@@ -93,8 +93,7 @@ class MEDIA_EXPORT CdmAdapter : public ContentDecryptionModule,
CdmContext* GetCdmContext() final;
// CdmContext implementation.
- std::unique_ptr<CallbackRegistration> RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) final;
+ std::unique_ptr<CallbackRegistration> RegisterEventCB(EventCB event_cb) final;
Decryptor* GetDecryptor() final;
int GetCdmId() const final;
diff --git a/chromium/media/cdm/cdm_adapter_unittest.cc b/chromium/media/cdm/cdm_adapter_unittest.cc
index 4ecc77cfec7..771161fcb72 100644
--- a/chromium/media/cdm/cdm_adapter_unittest.cc
+++ b/chromium/media/cdm/cdm_adapter_unittest.cc
@@ -10,8 +10,8 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/scoped_task_environment.h"
#include "media/base/cdm_callback_promise.h"
@@ -392,7 +392,7 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, BadLibraryPath) {
TEST_P(CdmAdapterTestWithClearKeyCdm, CreateWebmSession) {
InitializeAndExpect(SUCCESS);
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
CreateSessionAndExpect(EmeInitDataType::WEBM, key_id, SUCCESS);
}
@@ -401,7 +401,7 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, CreateKeyIdsSession) {
// Don't include the trailing /0 from the string in the data passed in.
std::vector<uint8_t> key_id(kKeyIdAsJWK,
- kKeyIdAsJWK + arraysize(kKeyIdAsJWK) - 1);
+ kKeyIdAsJWK + base::size(kKeyIdAsJWK) - 1);
CreateSessionAndExpect(EmeInitDataType::KEYIDS, key_id, SUCCESS);
}
@@ -409,7 +409,7 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, CreateCencSession) {
InitializeAndExpect(SUCCESS);
std::vector<uint8_t> key_id(kKeyIdAsPssh,
- kKeyIdAsPssh + arraysize(kKeyIdAsPssh));
+ kKeyIdAsPssh + base::size(kKeyIdAsPssh));
CreateSessionAndExpect(EmeInitDataType::CENC, key_id, SUCCESS);
}
@@ -417,7 +417,7 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, CreateSessionWithBadData) {
InitializeAndExpect(SUCCESS);
// Use |kKeyId| but specify KEYIDS format.
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
CreateSessionAndExpect(EmeInitDataType::KEYIDS, key_id, FAILURE);
}
@@ -425,14 +425,14 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, LoadSession) {
InitializeAndExpect(SUCCESS);
// LoadSession() is not supported by AesDecryptor.
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
CreateSessionAndExpect(EmeInitDataType::KEYIDS, key_id, FAILURE);
}
TEST_P(CdmAdapterTestWithClearKeyCdm, UpdateSession) {
InitializeAndExpect(SUCCESS);
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
CreateSessionAndExpect(EmeInitDataType::WEBM, key_id, SUCCESS);
UpdateSessionAndExpect(SessionId(), kKeyAsJWK, SUCCESS, true);
@@ -441,7 +441,7 @@ TEST_P(CdmAdapterTestWithClearKeyCdm, UpdateSession) {
TEST_P(CdmAdapterTestWithClearKeyCdm, UpdateSessionWithBadData) {
InitializeAndExpect(SUCCESS);
- std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
CreateSessionAndExpect(EmeInitDataType::WEBM, key_id, SUCCESS);
UpdateSessionAndExpect(SessionId(), "random data", FAILURE, true);
diff --git a/chromium/media/cdm/cenc_decryptor_unittest.cc b/chromium/media/cdm/cenc_decryptor_unittest.cc
index 3a8c383a540..adab4d8ca68 100644
--- a/chromium/media/cdm/cenc_decryptor_unittest.cc
+++ b/chromium/media/cdm/cenc_decryptor_unittest.cc
@@ -11,6 +11,7 @@
#include <vector>
#include "base/containers/span.h"
+#include "base/stl_util.h"
#include "base/time/time.h"
#include "crypto/encryptor.h"
#include "crypto/symmetric_key.h"
@@ -25,11 +26,11 @@ namespace {
// Keys and IVs have to be 128 bits.
const uint8_t kKey[] = {0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13};
-static_assert(arraysize(kKey) * 8 == 128, "kKey must be 128 bits");
+static_assert(base::size(kKey) * 8 == 128, "kKey must be 128 bits");
const uint8_t kIv[] = {0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-static_assert(arraysize(kIv) * 8 == 128, "kIv must be 128 bits");
+static_assert(base::size(kIv) * 8 == 128, "kIv must be 128 bits");
const uint8_t kOneBlock[] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'};
@@ -65,11 +66,11 @@ class CencDecryptorTest : public testing::Test {
CencDecryptorTest()
: key_(crypto::SymmetricKey::Import(
crypto::SymmetricKey::AES,
- std::string(kKey, kKey + arraysize(kKey)))),
- iv_(kIv, kIv + arraysize(kIv)),
- one_block_(kOneBlock, kOneBlock + arraysize(kOneBlock)),
+ std::string(kKey, kKey + base::size(kKey)))),
+ iv_(kIv, kIv + base::size(kIv)),
+ one_block_(kOneBlock, kOneBlock + base::size(kOneBlock)),
partial_block_(kPartialBlock,
- kPartialBlock + arraysize(kPartialBlock)) {}
+ kPartialBlock + base::size(kPartialBlock)) {}
// Excrypt |original| using AES-CTR encryption with |key| and |iv|.
std::vector<uint8_t> Encrypt(const std::vector<uint8_t>& original,
@@ -207,7 +208,7 @@ TEST_F(CencDecryptorTest, InvalidIv) {
TEST_F(CencDecryptorTest, InvalidKey) {
std::unique_ptr<crypto::SymmetricKey> bad_key = crypto::SymmetricKey::Import(
- crypto::SymmetricKey::AES, std::string(arraysize(kKey), 'b'));
+ crypto::SymmetricKey::AES, std::string(base::size(kKey), 'b'));
auto encrypted_block = Encrypt(one_block_, *key_, iv_);
std::vector<SubsampleEntry> subsamples = {
diff --git a/chromium/media/cdm/cenc_utils.cc b/chromium/media/cdm/cenc_utils.cc
index f885061731c..19583549caa 100644
--- a/chromium/media/cdm/cenc_utils.cc
+++ b/chromium/media/cdm/cenc_utils.cc
@@ -6,7 +6,8 @@
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
+#include "media/base/media_util.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/box_reader.h"
@@ -33,7 +34,7 @@ static bool ReadAllPsshBoxes(
DCHECK(!input.empty());
// TODO(wolenetz): Questionable MediaLog usage, http://crbug.com/712310
- MediaLog media_log;
+ NullMediaLog media_log;
// Verify that |input| contains only 'pssh' boxes.
// ReadAllChildrenAndCheckFourCC() is templated, so it checks that each
@@ -94,7 +95,7 @@ bool GetKeyIdsForCommonSystemId(const std::vector<uint8_t>& pssh_boxes,
KeyIdList result;
std::vector<uint8_t> common_system_id(
kCencCommonSystemId,
- kCencCommonSystemId + arraysize(kCencCommonSystemId));
+ kCencCommonSystemId + base::size(kCencCommonSystemId));
for (const auto& child : children) {
if (child.system_id == common_system_id) {
key_ids->assign(child.key_ids.begin(), child.key_ids.end());
diff --git a/chromium/media/cdm/cenc_utils_unittest.cc b/chromium/media/cdm/cenc_utils_unittest.cc
index 1b038ff4457..d068c11b473 100644
--- a/chromium/media/cdm/cenc_utils_unittest.cc
+++ b/chromium/media/cdm/cenc_utils_unittest.cc
@@ -10,7 +10,7 @@
#include <limits>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -39,13 +39,13 @@ const uint8_t kCommonSystemSystemId[] = {
class CencUtilsTest : public testing::Test {
public:
CencUtilsTest()
- : key1_(kKey1Data, kKey1Data + arraysize(kKey1Data)),
- key2_(kKey2Data, kKey2Data + arraysize(kKey2Data)),
- key3_(kKey3Data, kKey3Data + arraysize(kKey3Data)),
- key4_(kKey4Data, kKey4Data + arraysize(kKey4Data)),
+ : key1_(kKey1Data, kKey1Data + base::size(kKey1Data)),
+ key2_(kKey2Data, kKey2Data + base::size(kKey2Data)),
+ key3_(kKey3Data, kKey3Data + base::size(kKey3Data)),
+ key4_(kKey4Data, kKey4Data + base::size(kKey4Data)),
common_system_system_id_(
kCommonSystemSystemId,
- kCommonSystemSystemId + arraysize(kCommonSystemSystemId)) {}
+ kCommonSystemSystemId + base::size(kCommonSystemSystemId)) {}
protected:
// Initialize the start of the 'pssh' box (up to key_count)
@@ -337,9 +337,9 @@ TEST_F(CencUtilsTest, LongSize) {
KeyIdList key_ids;
EXPECT_TRUE(
- ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ ValidatePsshInput(std::vector<uint8_t>(data, data + base::size(data))));
EXPECT_TRUE(GetKeyIdsForCommonSystemId(
- std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+ std::vector<uint8_t>(data, data + base::size(data)), &key_ids));
EXPECT_EQ(2u, key_ids.size());
}
@@ -361,9 +361,9 @@ TEST_F(CencUtilsTest, SizeIsZero) {
KeyIdList key_ids;
EXPECT_TRUE(
- ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ ValidatePsshInput(std::vector<uint8_t>(data, data + base::size(data))));
EXPECT_TRUE(GetKeyIdsForCommonSystemId(
- std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+ std::vector<uint8_t>(data, data + base::size(data)), &key_ids));
EXPECT_EQ(2u, key_ids.size());
}
@@ -388,9 +388,9 @@ TEST_F(CencUtilsTest, HugeSize) {
// These calls fail as the box size is huge (0xffffffffffffffff) and there
// is not enough bytes in |data|.
EXPECT_FALSE(
- ValidatePsshInput(std::vector<uint8_t>(data, data + arraysize(data))));
+ ValidatePsshInput(std::vector<uint8_t>(data, data + base::size(data))));
EXPECT_FALSE(GetKeyIdsForCommonSystemId(
- std::vector<uint8_t>(data, data + arraysize(data)), &key_ids));
+ std::vector<uint8_t>(data, data + base::size(data)), &key_ids));
}
TEST_F(CencUtilsTest, GetPsshData_Version0) {
@@ -401,7 +401,7 @@ TEST_F(CencUtilsTest, GetPsshData_Version0) {
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(0u, pssh_data.size());
- std::vector<uint8_t> data(data_bytes, data_bytes + arraysize(data_bytes));
+ std::vector<uint8_t> data(data_bytes, data_bytes + base::size(data_bytes));
AppendData(box, data);
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(data, pssh_data);
@@ -415,7 +415,7 @@ TEST_F(CencUtilsTest, GetPsshData_Version1NoKeys) {
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(0u, pssh_data.size());
- std::vector<uint8_t> data(data_bytes, data_bytes + arraysize(data_bytes));
+ std::vector<uint8_t> data(data_bytes, data_bytes + base::size(data_bytes));
AppendData(box, data);
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(data, pssh_data);
@@ -429,7 +429,7 @@ TEST_F(CencUtilsTest, GetPsshData_Version1WithKeys) {
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(0u, pssh_data.size());
- std::vector<uint8_t> data(data_bytes, data_bytes + arraysize(data_bytes));
+ std::vector<uint8_t> data(data_bytes, data_bytes + base::size(data_bytes));
AppendData(box, data);
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
EXPECT_EQ(data, pssh_data);
@@ -488,7 +488,7 @@ TEST_F(CencUtilsTest, GetPsshData_Version1ThenVersion2) {
TEST_F(CencUtilsTest, GetPsshData_DifferentSystemID) {
std::vector<uint8_t> unknown_system_id(kKey1Data,
- kKey1Data + arraysize(kKey1Data));
+ kKey1Data + base::size(kKey1Data));
std::vector<uint8_t> pssh_data;
std::vector<uint8_t> box = MakePSSHBox(1, Key1());
@@ -501,7 +501,7 @@ TEST_F(CencUtilsTest, GetPsshData_MissingData) {
std::vector<uint8_t> pssh_data;
std::vector<uint8_t> box = MakePSSHBox(1, Key1());
- std::vector<uint8_t> data(data_bytes, data_bytes + arraysize(data_bytes));
+ std::vector<uint8_t> data(data_bytes, data_bytes + base::size(data_bytes));
AppendData(box, data);
EXPECT_TRUE(GetPsshData(box, CommonSystemSystemId(), &pssh_data));
@@ -517,11 +517,13 @@ TEST_F(CencUtilsTest, GetPsshData_MultiplePssh) {
std::vector<uint8_t> pssh_data;
std::vector<uint8_t> box1 = MakePSSHBox(1, Key1());
- std::vector<uint8_t> data1(data1_bytes, data1_bytes + arraysize(data1_bytes));
+ std::vector<uint8_t> data1(data1_bytes,
+ data1_bytes + base::size(data1_bytes));
AppendData(box1, data1);
std::vector<uint8_t> box2 = MakePSSHBox(0);
- std::vector<uint8_t> data2(data2_bytes, data2_bytes + arraysize(data2_bytes));
+ std::vector<uint8_t> data2(data2_bytes,
+ data2_bytes + base::size(data2_bytes));
AppendData(box2, data2);
box1.insert(box1.end(), box2.begin(), box2.end());
@@ -536,7 +538,7 @@ TEST_F(CencUtilsTest, NonPsshData) {
0x00, 0x00, 0x00, 0x08, // size = 8
'p', 's', 's', 'g'
};
- std::vector<uint8_t> non_pssh_box(data, data + arraysize(data));
+ std::vector<uint8_t> non_pssh_box(data, data + base::size(data));
EXPECT_FALSE(ValidatePsshInput(non_pssh_box));
// Make a valid 'pssh' box.
diff --git a/chromium/media/cdm/json_web_key_unittest.cc b/chromium/media/cdm/json_web_key_unittest.cc
index f0c4de25a29..3c4ed4f28f3 100644
--- a/chromium/media/cdm/json_web_key_unittest.cc
+++ b/chromium/media/cdm/json_web_key_unittest.cc
@@ -9,7 +9,7 @@
#include "base/base64.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/content_decryption_module.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -93,35 +93,35 @@ TEST_F(JSONWebKeyTest, GenerateJWKSet) {
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10};
EXPECT_EQ("{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}",
- GenerateJWKSet(data1, arraysize(data1), data1, arraysize(data1)));
+ GenerateJWKSet(data1, base::size(data1), data1, base::size(data1)));
EXPECT_EQ(
"{\"keys\":[{\"k\":\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}]}",
- GenerateJWKSet(data2, arraysize(data2), data2, arraysize(data2)));
+ GenerateJWKSet(data2, base::size(data2), data2, base::size(data2)));
EXPECT_EQ("{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}]}",
- GenerateJWKSet(data1, arraysize(data1), data2, arraysize(data2)));
+ GenerateJWKSet(data1, base::size(data1), data2, base::size(data2)));
EXPECT_EQ("{\"keys\":[{\"k\":\"AQIDBA\",\"kid\":\"AQI\",\"kty\":\"oct\"}]}",
- GenerateJWKSet(data2, arraysize(data2), data1, arraysize(data1)));
+ GenerateJWKSet(data2, base::size(data2), data1, base::size(data1)));
EXPECT_EQ(
"{\"keys\":[{\"k\":\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kid\":"
"\"AQIDBAUGBwgJCgsMDQ4PEA\",\"kty\":\"oct\"}]}",
- GenerateJWKSet(data3, arraysize(data3), data3, arraysize(data3)));
+ GenerateJWKSet(data3, base::size(data3), data3, base::size(data3)));
KeyIdAndKeyPairs keys;
keys.push_back(
- MakeKeyIdAndKeyPair(data1, arraysize(data1), data1, arraysize(data1)));
+ MakeKeyIdAndKeyPair(data1, base::size(data1), data1, base::size(data1)));
EXPECT_EQ(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"}],\"type\":"
"\"temporary\"}",
GenerateJWKSet(keys, CdmSessionType::kTemporary));
keys.push_back(
- MakeKeyIdAndKeyPair(data2, arraysize(data2), data2, arraysize(data2)));
+ MakeKeyIdAndKeyPair(data2, base::size(data2), data2, base::size(data2)));
EXPECT_EQ(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"},{\"k\":"
"\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"}],\"type\":\"persistent-"
"license\"}",
GenerateJWKSet(keys, CdmSessionType::kPersistentLicense));
keys.push_back(
- MakeKeyIdAndKeyPair(data3, arraysize(data3), data3, arraysize(data3)));
+ MakeKeyIdAndKeyPair(data3, base::size(data3), data3, base::size(data3)));
EXPECT_EQ(
"{\"keys\":[{\"k\":\"AQI\",\"kid\":\"AQI\",\"kty\":\"oct\"},{\"k\":"
"\"AQIDBA\",\"kid\":\"AQIDBA\",\"kty\":\"oct\"},{\"k\":"
@@ -426,17 +426,17 @@ TEST_F(JSONWebKeyTest, CreateLicense) {
const uint8_t data3[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10};
- CreateLicenseAndExpect(data1, arraysize(data1), CdmSessionType::kTemporary,
+ CreateLicenseAndExpect(data1, base::size(data1), CdmSessionType::kTemporary,
"{\"kids\":[\"AQI\"],\"type\":\"temporary\"}");
CreateLicenseAndExpect(
- data1, arraysize(data1), CdmSessionType::kPersistentLicense,
+ data1, base::size(data1), CdmSessionType::kPersistentLicense,
"{\"kids\":[\"AQI\"],\"type\":\"persistent-license\"}");
CreateLicenseAndExpect(
- data1, arraysize(data1), CdmSessionType::kPersistentUsageRecord,
+ data1, base::size(data1), CdmSessionType::kPersistentUsageRecord,
"{\"kids\":[\"AQI\"],\"type\":\"persistent-usage-record\"}");
- CreateLicenseAndExpect(data2, arraysize(data2), CdmSessionType::kTemporary,
+ CreateLicenseAndExpect(data2, base::size(data2), CdmSessionType::kTemporary,
"{\"kids\":[\"AQIDBA\"],\"type\":\"temporary\"}");
- CreateLicenseAndExpect(data3, arraysize(data3),
+ CreateLicenseAndExpect(data3, base::size(data3),
CdmSessionType::kPersistentLicense,
"{\"kids\":[\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":"
"\"persistent-license\"}");
@@ -448,21 +448,14 @@ TEST_F(JSONWebKeyTest, ExtractLicense) {
const uint8_t data3[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10};
+ ExtractKeyFromLicenseAndExpect("{\"kids\":[\"AQI\"],\"type\":\"temporary\"}",
+ true, data1, base::size(data1));
ExtractKeyFromLicenseAndExpect(
- "{\"kids\":[\"AQI\"],\"type\":\"temporary\"}",
- true,
- data1,
- arraysize(data1));
+ "{\"kids\":[\"AQIDBA\"],\"type\":\"temporary\"}", true, data2,
+ base::size(data2));
ExtractKeyFromLicenseAndExpect(
- "{\"kids\":[\"AQIDBA\"],\"type\":\"temporary\"}",
- true,
- data2,
- arraysize(data2));
- ExtractKeyFromLicenseAndExpect(
- "{\"kids\":[\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":\"persistent\"}",
- true,
- data3,
- arraysize(data3));
+ "{\"kids\":[\"AQIDBAUGBwgJCgsMDQ4PEA\"],\"type\":\"persistent\"}", true,
+ data3, base::size(data3));
// Try some incorrect JSON.
ExtractKeyFromLicenseAndExpect("", false, NULL, 0);
@@ -492,7 +485,7 @@ TEST_F(JSONWebKeyTest, Base64UrlEncoding) {
// and is padded with = when converted to base64.
std::string encoded_text;
base::Base64Encode(
- std::string(reinterpret_cast<const char*>(&data1[0]), arraysize(data1)),
+ std::string(reinterpret_cast<const char*>(&data1[0]), base::size(data1)),
&encoded_text);
EXPECT_EQ(encoded_text, "+/37/fv9+w==");
EXPECT_NE(encoded_text.find('+'), std::string::npos);
@@ -503,12 +496,12 @@ TEST_F(JSONWebKeyTest, Base64UrlEncoding) {
EXPECT_EQ(encoded_text.find('-'), std::string::npos);
EXPECT_EQ(encoded_text.find('_'), std::string::npos);
- CreateLicenseAndExpect(data1, arraysize(data1), CdmSessionType::kTemporary,
+ CreateLicenseAndExpect(data1, base::size(data1), CdmSessionType::kTemporary,
"{\"kids\":[\"-_37_fv9-w\"],\"type\":\"temporary\"}");
ExtractKeyFromLicenseAndExpect(
"{\"kids\":[\"-_37_fv9-w\"],\"type\":\"temporary\"}", true, data1,
- arraysize(data1));
+ base::size(data1));
}
TEST_F(JSONWebKeyTest, MultipleKeys) {
@@ -519,9 +512,9 @@ TEST_F(JSONWebKeyTest, MultipleKeys) {
std::vector<uint8_t> result;
KeyIdList key_ids;
- key_ids.push_back(std::vector<uint8_t>(data1, data1 + arraysize(data1)));
- key_ids.push_back(std::vector<uint8_t>(data2, data2 + arraysize(data2)));
- key_ids.push_back(std::vector<uint8_t>(data3, data3 + arraysize(data3)));
+ key_ids.push_back(std::vector<uint8_t>(data1, data1 + base::size(data1)));
+ key_ids.push_back(std::vector<uint8_t>(data2, data2 + base::size(data2)));
+ key_ids.push_back(std::vector<uint8_t>(data3, data3 + base::size(data3)));
CreateLicenseRequest(key_ids, CdmSessionType::kTemporary, &result);
std::string s(result.begin(), result.end());
EXPECT_EQ(
@@ -543,16 +536,16 @@ TEST_F(JSONWebKeyTest, ExtractKeyIds) {
&error_message));
EXPECT_EQ(1u, key_ids.size());
EXPECT_EQ(0u, error_message.length());
- VerifyKeyId(key_ids[0], data1, arraysize(data1));
+ VerifyKeyId(key_ids[0], data1, base::size(data1));
EXPECT_TRUE(ExtractKeyIdsFromKeyIdsInitData(
"{\"kids\":[\"AQI\",\"AQIDBA\",\"AQIDBAUGBwgJCgsMDQ4PEA\"]}", &key_ids,
&error_message));
EXPECT_EQ(3u, key_ids.size());
EXPECT_EQ(0u, error_message.length());
- VerifyKeyId(key_ids[0], data1, arraysize(data1));
- VerifyKeyId(key_ids[1], data2, arraysize(data2));
- VerifyKeyId(key_ids[2], data3, arraysize(data3));
+ VerifyKeyId(key_ids[0], data1, base::size(data1));
+ VerifyKeyId(key_ids[1], data2, base::size(data2));
+ VerifyKeyId(key_ids[2], data3, base::size(data3));
// Expect failure when non-ascii.
EXPECT_FALSE(ExtractKeyIdsFromKeyIdsInitData(
@@ -617,19 +610,19 @@ TEST_F(JSONWebKeyTest, CreateInitData) {
KeyIdList key_ids;
std::string error_message;
- key_ids.push_back(std::vector<uint8_t>(data1, data1 + arraysize(data1)));
+ key_ids.push_back(std::vector<uint8_t>(data1, data1 + base::size(data1)));
std::vector<uint8_t> init_data1;
CreateKeyIdsInitData(key_ids, &init_data1);
std::string result1(init_data1.begin(), init_data1.end());
EXPECT_EQ(result1, "{\"kids\":[\"AQI\"]}");
- key_ids.push_back(std::vector<uint8_t>(data2, data2 + arraysize(data2)));
+ key_ids.push_back(std::vector<uint8_t>(data2, data2 + base::size(data2)));
std::vector<uint8_t> init_data2;
CreateKeyIdsInitData(key_ids, &init_data2);
std::string result2(init_data2.begin(), init_data2.end());
EXPECT_EQ(result2, "{\"kids\":[\"AQI\",\"AQIDBA\"]}");
- key_ids.push_back(std::vector<uint8_t>(data3, data3 + arraysize(data3)));
+ key_ids.push_back(std::vector<uint8_t>(data3, data3 + base::size(data3)));
std::vector<uint8_t> init_data3;
CreateKeyIdsInitData(key_ids, &init_data3);
std::string result3(init_data3.begin(), init_data3.end());
diff --git a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_file_io_test.cc b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_file_io_test.cc
index 75d4c46af34..477dc31c022 100644
--- a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_file_io_test.cc
+++ b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_file_io_test.cc
@@ -9,7 +9,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
namespace media {
@@ -17,13 +17,13 @@ namespace media {
const uint8_t kData[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f};
-const uint32_t kDataSize = arraysize(kData);
+const uint32_t kDataSize = base::size(kData);
const uint8_t kBigData[] = {
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa,
0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55,
0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x00};
-const uint32_t kBigDataSize = arraysize(kBigData);
+const uint32_t kBigDataSize = base::size(kBigData);
// Must be > kReadSize in cdm_file_io_impl.cc.
const uint32_t kLargeDataSize = 20 * 1024 + 7;
diff --git a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
index 32a82d63663..8189991105c 100644
--- a/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
+++ b/chromium/media/cdm/library_cdm/clear_key_cdm/cdm_video_decoder.cc
@@ -182,7 +182,7 @@ class VideoDecoderAdapter : public CdmVideoDecoder {
weak_factory_.GetWeakPtr(), run_loop.QuitClosure()),
base::BindRepeating(&VideoDecoderAdapter::OnVideoFrameReady,
weak_factory_.GetWeakPtr()),
- /* waiting_for_decryption_key_cb = */ base::DoNothing());
+ /* waiting_cb = */ base::DoNothing());
run_loop.Run();
auto result = last_init_result_.value();
diff --git a/chromium/media/device_monitors/system_message_window_win.cc b/chromium/media/device_monitors/system_message_window_win.cc
index c90040b127e..b7bba16a79f 100644
--- a/chromium/media/device_monitors/system_message_window_win.cc
+++ b/chromium/media/device_monitors/system_message_window_win.cc
@@ -8,7 +8,7 @@
#include <stddef.h>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/system/system_monitor.h"
#include "base/win/wrapped_window_proc.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -43,7 +43,7 @@ class SystemMessageWindowWin::DeviceNotifications {
filter.dbcc_size = sizeof(filter);
filter.dbcc_devicetype = DBT_DEVTYP_DEVICEINTERFACE;
bool core_audio_support = media::CoreAudioUtil::IsSupported();
- for (size_t i = 0; i < arraysize(kDeviceCategoryMap); ++i) {
+ for (size_t i = 0; i < base::size(kDeviceCategoryMap); ++i) {
// If CoreAudio is supported, AudioDeviceListenerWin will
// take care of monitoring audio devices.
if (core_audio_support &&
@@ -61,7 +61,7 @@ class SystemMessageWindowWin::DeviceNotifications {
}
void Unregister() {
- for (size_t i = 0; i < arraysize(notifications_); ++i) {
+ for (size_t i = 0; i < base::size(notifications_); ++i) {
if (notifications_[i]) {
UnregisterDeviceNotification(notifications_[i]);
notifications_[i] = NULL;
@@ -70,7 +70,7 @@ class SystemMessageWindowWin::DeviceNotifications {
}
private:
- HDEVNOTIFY notifications_[arraysize(kDeviceCategoryMap)];
+ HDEVNOTIFY notifications_[base::size(kDeviceCategoryMap)];
DISALLOW_IMPLICIT_CONSTRUCTORS(DeviceNotifications);
};
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index d3c799c5115..a38d6a7e867 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -730,6 +730,9 @@ VideoPixelFormat AVPixelFormatToVideoPixelFormat(AVPixelFormat pixel_format) {
case AV_PIX_FMT_YUV444P12LE:
return PIXEL_FORMAT_YUV444P12;
+ case AV_PIX_FMT_P016LE:
+ return PIXEL_FORMAT_P016LE;
+
default:
DVLOG(1) << "Unsupported AVPixelFormat: " << pixel_format;
}
@@ -764,6 +767,8 @@ AVPixelFormat VideoPixelFormatToAVPixelFormat(VideoPixelFormat video_format) {
return AV_PIX_FMT_YUV444P10LE;
case PIXEL_FORMAT_YUV444P12:
return AV_PIX_FMT_YUV444P12LE;
+ case PIXEL_FORMAT_P016LE:
+ return AV_PIX_FMT_P016LE;
default:
DVLOG(1) << "Unsupported Format: " << video_format;
diff --git a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
index 4f66dd131fb..d73fba3df0e 100644
--- a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -10,7 +10,7 @@
#include "base/bind.h"
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media.h"
#include "media/base/media_util.h"
@@ -53,7 +53,7 @@ void TestConfigConvertExtraData(
// Valid combination: extra_data = non-nullptr && size > 0.
codec_parameters->extradata = &kExtraData[0];
- codec_parameters->extradata_size = arraysize(kExtraData);
+ codec_parameters->extradata_size = base::size(kExtraData);
EXPECT_TRUE(converter_fn.Run(stream, decoder_config));
EXPECT_EQ(static_cast<size_t>(codec_parameters->extradata_size),
decoder_config->extra_data().size());
@@ -196,7 +196,7 @@ TEST_F(FFmpegCommonTest, TimeBaseConversions) {
{1, 2, 1, 500000, 1}, {1, 3, 1, 333333, 1}, {1, 3, 2, 666667, 2},
};
- for (size_t i = 0; i < arraysize(test_data); ++i) {
+ for (size_t i = 0; i < base::size(test_data); ++i) {
SCOPED_TRACE(i);
AVRational time_base;
diff --git a/chromium/media/filters/BUILD.gn b/chromium/media/filters/BUILD.gn
index ffe1e64198f..aa3932a7470 100644
--- a/chromium/media/filters/BUILD.gn
+++ b/chromium/media/filters/BUILD.gn
@@ -35,6 +35,8 @@ jumbo_source_set("filters") {
"decrypting_audio_decoder.h",
"decrypting_demuxer_stream.cc",
"decrypting_demuxer_stream.h",
+ "decrypting_media_resource.cc",
+ "decrypting_media_resource.h",
"decrypting_video_decoder.cc",
"decrypting_video_decoder.h",
"file_data_source.cc",
@@ -266,6 +268,7 @@ source_set("unit_tests") {
"decoder_selector_unittest.cc",
"decrypting_audio_decoder_unittest.cc",
"decrypting_demuxer_stream_unittest.cc",
+ "decrypting_media_resource_unittest.cc",
"decrypting_video_decoder_unittest.cc",
"fake_video_decoder.cc",
"fake_video_decoder.h",
diff --git a/chromium/media/filters/android/media_codec_audio_decoder.cc b/chromium/media/filters/android/media_codec_audio_decoder.cc
index 375330bf686..8f08efad141 100644
--- a/chromium/media/filters/android/media_codec_audio_decoder.cc
+++ b/chromium/media/filters/android/media_codec_audio_decoder.cc
@@ -60,14 +60,15 @@ std::string MediaCodecAudioDecoder::GetDisplayName() const {
return "MediaCodecAudioDecoder";
}
-void MediaCodecAudioDecoder::Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void MediaCodecAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
DCHECK_NE(state_, STATE_WAITING_FOR_MEDIA_CRYPTO);
+ DCHECK(output_cb);
+ DCHECK(waiting_cb);
// Initialization and reinitialization should not be called during pending
// decode.
@@ -104,7 +105,11 @@ void MediaCodecAudioDecoder::Initialize(
}
config_ = config;
+
+ // TODO(xhwang): Check whether BindToCurrentLoop is needed here.
output_cb_ = BindToCurrentLoop(output_cb);
+ waiting_cb_ = BindToCurrentLoop(waiting_cb);
+
SetInitialConfiguration();
if (config_.is_encrypted() && !media_crypto_) {
@@ -466,6 +471,11 @@ bool MediaCodecAudioDecoder::OnDecodedFrame(
return true;
}
+void MediaCodecAudioDecoder::OnWaiting(WaitingReason reason) {
+ DVLOG(2) << __func__;
+ waiting_cb_.Run(reason);
+}
+
bool MediaCodecAudioDecoder::OnOutputFormatChanged() {
DVLOG(2) << __func__;
MediaCodecBridge* media_codec = codec_loop_->GetCodec();
diff --git a/chromium/media/filters/android/media_codec_audio_decoder.h b/chromium/media/filters/android/media_codec_audio_decoder.h
index 39e37e33087..923b3f79ec2 100644
--- a/chromium/media/filters/android/media_codec_audio_decoder.h
+++ b/chromium/media/filters/android/media_codec_audio_decoder.h
@@ -84,12 +84,11 @@ class MEDIA_EXPORT MediaCodecAudioDecoder : public AudioDecoder,
// AudioDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -101,6 +100,7 @@ class MEDIA_EXPORT MediaCodecAudioDecoder : public AudioDecoder,
void OnInputDataQueued(bool) override;
bool OnDecodedEos(const MediaCodecLoop::OutputBuffer& out) override;
bool OnDecodedFrame(const MediaCodecLoop::OutputBuffer& out) override;
+ void OnWaiting(WaitingReason reason) override;
bool OnOutputFormatChanged() override;
void OnCodecLoopError() override;
@@ -188,6 +188,8 @@ class MEDIA_EXPORT MediaCodecAudioDecoder : public AudioDecoder,
// Callback that delivers output frames.
OutputCB output_cb_;
+ WaitingCB waiting_cb_;
+
std::unique_ptr<MediaCodecLoop> codec_loop_;
std::unique_ptr<AudioTimestampHelper> timestamp_helper_;
diff --git a/chromium/media/filters/aom_video_decoder.cc b/chromium/media/filters/aom_video_decoder.cc
index ab54354d5be..14185a8ae90 100644
--- a/chromium/media/filters/aom_video_decoder.cc
+++ b/chromium/media/filters/aom_video_decoder.cc
@@ -136,13 +136,12 @@ std::string AomVideoDecoder::GetDisplayName() const {
return "AomVideoDecoder";
}
-void AomVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool /* low_delay */,
- CdmContext* /* cdm_context */,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void AomVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool /* low_delay */,
+ CdmContext* /* cdm_context */,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& /* waiting_cb */) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(config.IsValidConfig());
diff --git a/chromium/media/filters/aom_video_decoder.h b/chromium/media/filters/aom_video_decoder.h
index 750e7acff42..52f0b36ac3e 100644
--- a/chromium/media/filters/aom_video_decoder.h
+++ b/chromium/media/filters/aom_video_decoder.h
@@ -28,13 +28,12 @@ class MEDIA_EXPORT AomVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& reset_cb) override;
diff --git a/chromium/media/filters/audio_decoder_stream_unittest.cc b/chromium/media/filters/audio_decoder_stream_unittest.cc
index c89abf57fcd..f71d5832273 100644
--- a/chromium/media/filters/audio_decoder_stream_unittest.cc
+++ b/chromium/media/filters/audio_decoder_stream_unittest.cc
@@ -11,7 +11,7 @@
#include "base/test/scoped_task_environment.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "media/base/gmock_callback_support.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/filters/decoder_stream.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -111,7 +111,7 @@ class AudioDecoderStreamTest : public testing::Test {
}
base::test::ScopedTaskEnvironment task_environment_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
testing::NiceMock<MockDemuxerStream> demuxer_stream_{DemuxerStream::AUDIO};
AudioDecoderStream audio_decoder_stream_;
diff --git a/chromium/media/filters/audio_decoder_unittest.cc b/chromium/media/filters/audio_decoder_unittest.cc
index 9b373fd78ed..514efa28b34 100644
--- a/chromium/media/filters/audio_decoder_unittest.cc
+++ b/chromium/media/filters/audio_decoder_unittest.cc
@@ -247,7 +247,7 @@ class AudioDecoderTest
decoder_->Initialize(
config, nullptr, NewExpectedBoolCB(success),
base::Bind(&AudioDecoderTest::OnDecoderOutput, base::Unretained(this)),
- base::NullCallback());
+ base::DoNothing());
base::RunLoop().RunUntilIdle();
}
@@ -394,7 +394,7 @@ class AudioDecoderTest
base::MessageLoop message_loop_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
scoped_refptr<DecoderBuffer> data_;
std::unique_ptr<InMemoryUrlProtocol> protocol_;
std::unique_ptr<AudioFileReader> reader_;
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index e496653df46..7be7a9895e4 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -20,7 +20,7 @@
#include "base/bind.h"
#include "base/callback.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/channel_layout.h"
@@ -691,7 +691,7 @@ TEST_F(AudioRendererAlgorithmTest, FillBufferOffset) {
// filled appropriately at normal, above normal, and below normal.
const int kHalfSize = kFrameSize / 2;
const float kAudibleRates[] = {1.0f, 2.0f, 0.5f, 5.0f, 0.25f};
- for (size_t i = 0; i < arraysize(kAudibleRates); ++i) {
+ for (size_t i = 0; i < base::size(kAudibleRates); ++i) {
SCOPED_TRACE(kAudibleRates[i]);
bus->Zero();
diff --git a/chromium/media/filters/audio_timestamp_validator_unittest.cc b/chromium/media/filters/audio_timestamp_validator_unittest.cc
index b2b84afa3f8..663bd1d6860 100644
--- a/chromium/media/filters/audio_timestamp_validator_unittest.cc
+++ b/chromium/media/filters/audio_timestamp_validator_unittest.cc
@@ -6,6 +6,7 @@
#include <tuple>
+#include "base/stl_util.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_util.h"
@@ -87,7 +88,7 @@ TEST_P(AudioTimestampValidatorTest, WarnForEraticTimes) {
// Ping-pong between two random offsets to prevent validator from
// stabilizing timestamp pattern.
base::TimeDelta randomOffset =
- kRandomOffsets[i % arraysize(kRandomOffsets)];
+ kRandomOffsets[i % base::size(kRandomOffsets)];
encoded_buffer->set_timestamp(i * kBufferDuration + randomOffset);
if (i == 0) {
@@ -254,4 +255,4 @@ INSTANTIATE_TEST_CASE_P(
::testing::Values(base::TimeDelta(), // front discard
base::TimeDelta::FromMilliseconds(65))));
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/filters/blocking_url_protocol.cc b/chromium/media/filters/blocking_url_protocol.cc
index 62b66732e94..93414f88ce3 100644
--- a/chromium/media/filters/blocking_url_protocol.cc
+++ b/chromium/media/filters/blocking_url_protocol.cc
@@ -7,7 +7,7 @@
#include <stddef.h>
#include "base/bind.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/threading/thread_restrictions.h"
#include "media/base/data_source.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -64,7 +64,7 @@ int BlockingUrlProtocol::Read(int size, uint8_t* data) {
size_t index;
{
base::ScopedAllowBaseSyncPrimitives allow_base_sync_primitives;
- index = base::WaitableEvent::WaitMany(events, arraysize(events));
+ index = base::WaitableEvent::WaitMany(events, base::size(events));
}
if (events[index] == &aborted_)
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index 15ec60e28dc..39cc4a12160 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -154,6 +154,9 @@ void ChunkDemuxerStream::Seek(TimeDelta time) {
}
bool ChunkDemuxerStream::Append(const StreamParser::BufferQueue& buffers) {
+ if (append_observer_cb_)
+ append_observer_cb_.Run(&buffers);
+
if (buffers.empty())
return false;
@@ -258,6 +261,10 @@ void ChunkDemuxerStream::OnStartOfCodedFrameGroup(DecodeTimestamp start_dts,
DVLOG(2) << "ChunkDemuxerStream::OnStartOfCodedFrameGroup(dts "
<< start_dts.InSecondsF() << ", pts " << start_pts.InSecondsF()
<< ")";
+
+ if (group_start_observer_cb_)
+ group_start_observer_cb_.Run(start_dts, start_pts);
+
base::AutoLock auto_lock(lock_);
SBSTREAM_OP(OnStartOfCodedFrameGroup(start_dts, start_pts));
}
@@ -1123,6 +1130,10 @@ void ChunkDemuxer::MarkEndOfStream(PipelineStatus status) {
return;
if (state_ == INITIALIZING) {
+ MEDIA_LOG(ERROR, media_log_)
+ << "MediaSource endOfStream before demuxer initialization completes "
+ "(before HAVE_METADATA) is treated as an error. This may also occur "
+ "as consequence of other MediaSource errors before HAVE_METADATA.";
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
}
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index 5d085b1cf71..5cd48c0f67a 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -153,6 +153,20 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
MediaTrack::Id media_track_id() const { return media_track_id_; }
+ // Allows tests to verify invocations of Append().
+ using AppendObserverCB = base::RepeatingCallback<void(const BufferQueue*)>;
+ void set_append_observer_for_testing(AppendObserverCB append_observer_cb) {
+ append_observer_cb_ = std::move(append_observer_cb);
+ }
+
+ // Allows tests to verify invocations of OnStartOfCodedFrameGroup().
+ using GroupStartObserverCB =
+ base::RepeatingCallback<void(DecodeTimestamp, base::TimeDelta)>;
+ void set_group_start_observer_for_testing(
+ GroupStartObserverCB group_start_observer_cb) {
+ group_start_observer_cb_ = std::move(group_start_observer_cb);
+ }
+
private:
enum State {
UNINITIALIZED,
@@ -181,6 +195,9 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
const MediaTrack::Id media_track_id_;
+ AppendObserverCB append_observer_cb_;
+ GroupStartObserverCB group_start_observer_cb_;
+
mutable base::Lock lock_;
State state_ GUARDED_BY(lock_);
ReadCB read_cb_ GUARDED_BY(lock_);
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index 5ac43680153..2f11a8ec0ee 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -14,8 +14,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
-#include "base/macros.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@@ -734,7 +734,7 @@ class ChunkDemuxerTest : public ::testing::TestWithParam<BufferingApi> {
std::vector<uint8_t>(
kEncryptedMediaInitData,
kEncryptedMediaInitData +
- arraysize(kEncryptedMediaInitData))))
+ base::size(kEncryptedMediaInitData))))
.Times(Exactly(need_key_count));
}
@@ -1705,6 +1705,7 @@ TEST_P(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_,
NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN));
+ EXPECT_MEDIA_LOG(EosBeforeHaveMetadata());
MarkEndOfStream(PIPELINE_OK);
}
@@ -1716,7 +1717,10 @@ TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
CheckExpectedRanges("{ }");
+
+ EXPECT_MEDIA_LOG(EosBeforeHaveMetadata());
MarkEndOfStream(PIPELINE_OK);
+
ShutdownDemuxer();
CheckExpectedRanges("{ }");
demuxer_->RemoveId(kSourceId);
@@ -2875,7 +2879,7 @@ TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
&host_, base::BindRepeating(&ChunkDemuxerTest::DemuxerInitialized,
base::Unretained(this)));
- for (size_t i = 0; i < arraysize(codec_ids); ++i) {
+ for (size_t i = 0; i < base::size(codec_ids); ++i) {
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
expected = ChunkDemuxer::kOk;
#else
@@ -3391,10 +3395,10 @@ TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
true, true, true, true, false,
};
- static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
- "test arrays out of sync");
- static_assert(arraysize(kBuffer) == sizeof(kBuffer),
- "there should be one byte per index");
+ static_assert(base::size(kBuffer) == base::size(kExpectedReturnValues),
+ "test arrays out of sync");
+ static_assert(base::size(kBuffer) == sizeof(kBuffer),
+ "there should be one byte per index");
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(23)).Times(2);
diff --git a/chromium/media/filters/decoder_selector.cc b/chromium/media/filters/decoder_selector.cc
index 7fa7dce805d..38709671a3b 100644
--- a/chromium/media/filters/decoder_selector.cc
+++ b/chromium/media/filters/decoder_selector.cc
@@ -46,11 +46,10 @@ DecoderSelector<StreamType>::~DecoderSelector() {
}
template <DemuxerStream::Type StreamType>
-void DecoderSelector<StreamType>::Initialize(
- StreamTraits* traits,
- DemuxerStream* stream,
- CdmContext* cdm_context,
- base::RepeatingClosure waiting_for_decryption_key_cb) {
+void DecoderSelector<StreamType>::Initialize(StreamTraits* traits,
+ DemuxerStream* stream,
+ CdmContext* cdm_context,
+ WaitingCB waiting_cb) {
DVLOG(2) << __func__;
DCHECK(traits);
DCHECK(stream);
@@ -58,7 +57,7 @@ void DecoderSelector<StreamType>::Initialize(
traits_ = traits;
stream_ = stream;
cdm_context_ = cdm_context;
- waiting_for_decryption_key_cb_ = std::move(waiting_for_decryption_key_cb);
+ waiting_cb_ = std::move(waiting_cb);
}
template <DemuxerStream::Type StreamType>
@@ -157,7 +156,7 @@ void DecoderSelector<StreamType>::InitializeDecoder() {
decoder_.get(), config_, is_live, cdm_context_,
base::BindRepeating(&DecoderSelector<StreamType>::OnDecoderInitializeDone,
weak_this_factory_.GetWeakPtr()),
- output_cb_, waiting_for_decryption_key_cb_);
+ output_cb_, waiting_cb_);
}
template <DemuxerStream::Type StreamType>
@@ -196,7 +195,7 @@ void DecoderSelector<StreamType>::InitializeDecryptingDemuxerStream() {
"DecryptingDemuxerStream");
decrypting_demuxer_stream_ = std::make_unique<DecryptingDemuxerStream>(
- task_runner_, media_log_, waiting_for_decryption_key_cb_);
+ task_runner_, media_log_, waiting_cb_);
decrypting_demuxer_stream_->Initialize(
stream_, cdm_context_,
diff --git a/chromium/media/filters/decoder_selector.h b/chromium/media/filters/decoder_selector.h
index 4ec0ca2bc7e..06ca575fcce 100644
--- a/chromium/media/filters/decoder_selector.h
+++ b/chromium/media/filters/decoder_selector.h
@@ -15,6 +15,7 @@
#include "base/time/time.h"
#include "media/base/demuxer_stream.h"
#include "media/base/pipeline_status.h"
+#include "media/base/waiting.h"
#include "media/filters/decoder_stream_traits.h"
namespace base {
@@ -67,7 +68,7 @@ class MEDIA_EXPORT DecoderSelector {
void Initialize(StreamTraits* traits,
DemuxerStream* stream,
CdmContext* cdm_context,
- base::RepeatingClosure waiting_for_decryption_key_cb);
+ WaitingCB waiting_cb);
// Selects and initializes a decoder, which will be returned via
// |select_decoder_cb| posted to |task_runner|. Subsequent calls to
@@ -105,7 +106,7 @@ class MEDIA_EXPORT DecoderSelector {
StreamTraits* traits_ = nullptr;
DemuxerStream* stream_ = nullptr;
CdmContext* cdm_context_ = nullptr;
- base::RepeatingClosure waiting_for_decryption_key_cb_;
+ WaitingCB waiting_cb_;
// Overall decoder selection state.
DecoderConfig config_;
diff --git a/chromium/media/filters/decoder_selector_unittest.cc b/chromium/media/filters/decoder_selector_unittest.cc
index 2f2e3cc2251..cb16c8ffcee 100644
--- a/chromium/media/filters/decoder_selector_unittest.cc
+++ b/chromium/media/filters/decoder_selector_unittest.cc
@@ -13,6 +13,7 @@
#include "build/build_config.h"
#include "media/base/demuxer_stream.h"
#include "media/base/gmock_callback_support.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/filters/decoder_selector.h"
@@ -92,8 +93,7 @@ class AudioDecoderSelectorTestParam {
.WillRepeatedly(
[capability](const AudioDecoderConfig& config, CdmContext*,
const AudioDecoder::InitCB& init_cb,
- const AudioDecoder::OutputCB&,
- const AudioDecoder::WaitingForDecryptionKeyCB&) {
+ const AudioDecoder::OutputCB&, const WaitingCB&) {
init_cb.Run(IsConfigSupported(capability, config.is_encrypted()));
});
}
@@ -129,8 +129,7 @@ class VideoDecoderSelectorTestParam {
.WillRepeatedly(
[capability](const VideoDecoderConfig& config, bool low_delay,
CdmContext*, const VideoDecoder::InitCB& init_cb,
- const VideoDecoder::OutputCB&,
- const VideoDecoder::WaitingForDecryptionKeyCB&) {
+ const VideoDecoder::OutputCB&, const WaitingCB&) {
init_cb.Run(IsConfigSupported(capability, config.is_encrypted()));
});
}
@@ -162,7 +161,7 @@ class DecoderSelectorTest : public ::testing::Test {
: traits_(TypeParam::CreateStreamTraits(&media_log_)),
demuxer_stream_(TypeParam::kStreamType) {}
- void OnWaitingForDecryptionKey() { NOTREACHED(); }
+ void OnWaiting(WaitingReason reason) { NOTREACHED(); }
void OnOutput(const scoped_refptr<Output>& output) { NOTREACHED(); }
MOCK_METHOD2_T(OnDecoderSelected,
@@ -249,8 +248,7 @@ class DecoderSelectorTest : public ::testing::Test {
&media_log_);
decoder_selector_->Initialize(
traits_.get(), &demuxer_stream_, cdm_context_.get(),
- base::BindRepeating(&Self::OnWaitingForDecryptionKey,
- base::Unretained(this)));
+ base::BindRepeating(&Self::OnWaiting, base::Unretained(this)));
}
void UseClearDecoderConfig() {
@@ -291,7 +289,7 @@ class DecoderSelectorTest : public ::testing::Test {
void RunUntilIdle() { scoped_task_environment_.RunUntilIdle(); }
base::test::ScopedTaskEnvironment scoped_task_environment_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<StreamTraits> traits_;
StrictMock<MockDemuxerStream> demuxer_stream_;
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
index 062ad30a4f6..53e85a5335f 100644
--- a/chromium/media/filters/decoder_stream.cc
+++ b/chromium/media/filters/decoder_stream.cc
@@ -147,12 +147,11 @@ std::string DecoderStream<StreamType>::GetStreamTypeString() {
}
template <DemuxerStream::Type StreamType>
-void DecoderStream<StreamType>::Initialize(
- DemuxerStream* stream,
- InitCB init_cb,
- CdmContext* cdm_context,
- StatisticsCB statistics_cb,
- base::RepeatingClosure waiting_for_decryption_key_cb) {
+void DecoderStream<StreamType>::Initialize(DemuxerStream* stream,
+ InitCB init_cb,
+ CdmContext* cdm_context,
+ StatisticsCB statistics_cb,
+ WaitingCB waiting_cb) {
FUNCTION_DVLOG(1);
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(state_, STATE_UNINITIALIZED);
@@ -163,11 +162,13 @@ void DecoderStream<StreamType>::Initialize(
init_cb_ = std::move(init_cb);
cdm_context_ = cdm_context;
statistics_cb_ = std::move(statistics_cb);
- waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
+
+ // Make a copy here since it's also passed to |decoder_selector_| below.
+ waiting_cb_ = waiting_cb;
traits_->OnStreamReset(stream_);
decoder_selector_.Initialize(traits_.get(), stream, cdm_context,
- std::move(waiting_for_decryption_key_cb));
+ std::move(waiting_cb));
state_ = STATE_INITIALIZING;
SelectDecoder();
@@ -813,7 +814,7 @@ void DecoderStream<StreamType>::ReinitializeDecoder() {
weak_factory_.GetWeakPtr()),
base::BindRepeating(&DecoderStream<StreamType>::OnDecodeOutputReady,
fallback_weak_factory_.GetWeakPtr()),
- waiting_for_decryption_key_cb_);
+ waiting_cb_);
}
template <DemuxerStream::Type StreamType>
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
index bedd4343114..f2f128b9790 100644
--- a/chromium/media/filters/decoder_stream.h
+++ b/chromium/media/filters/decoder_stream.h
@@ -22,6 +22,7 @@
#include "media/base/moving_average.h"
#include "media/base/pipeline_status.h"
#include "media/base/timestamp_constants.h"
+#include "media/base/waiting.h"
#include "media/filters/decoder_selector.h"
#include "media/filters/decoder_stream_traits.h"
@@ -78,7 +79,7 @@ class MEDIA_EXPORT DecoderStream {
InitCB init_cb,
CdmContext* cdm_context,
StatisticsCB statistics_cb,
- base::RepeatingClosure waiting_for_decryption_key_cb);
+ WaitingCB waiting_cb);
// Reads a decoded Output and returns it via the |read_cb|. Note that
// |read_cb| is always called asynchronously. This method should only be
@@ -227,7 +228,7 @@ class MEDIA_EXPORT DecoderStream {
StatisticsCB statistics_cb_;
InitCB init_cb_;
- base::RepeatingClosure waiting_for_decryption_key_cb_;
+ WaitingCB waiting_cb_;
ReadCB read_cb_;
base::OnceClosure reset_cb_;
diff --git a/chromium/media/filters/decoder_stream_traits.cc b/chromium/media/filters/decoder_stream_traits.cc
index 2120f04164d..cfb6f5f09a2 100644
--- a/chromium/media/filters/decoder_stream_traits.cc
+++ b/chromium/media/filters/decoder_stream_traits.cc
@@ -63,8 +63,7 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::InitializeDecoder(
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb,
- const DecoderType::WaitingForDecryptionKeyCB&
- waiting_for_decryption_key_cb) {
+ const WaitingCB& waiting_cb) {
DCHECK(config.IsValidConfig());
if (config_.IsValidConfig() && !config_.Matches(config))
@@ -72,8 +71,7 @@ void DecoderStreamTraits<DemuxerStream::AUDIO>::InitializeDecoder(
config_ = config;
stats_.audio_decoder_name = decoder->GetDisplayName();
- decoder->Initialize(config, cdm_context, init_cb, output_cb,
- waiting_for_decryption_key_cb);
+ decoder->Initialize(config, cdm_context, init_cb, output_cb, waiting_cb);
}
void DecoderStreamTraits<DemuxerStream::AUDIO>::OnStreamReset(
@@ -157,13 +155,12 @@ void DecoderStreamTraits<DemuxerStream::VIDEO>::InitializeDecoder(
CdmContext* cdm_context,
const InitCB& init_cb,
const OutputCB& output_cb,
- const DecoderType::WaitingForDecryptionKeyCB&
- waiting_for_decryption_key_cb) {
+ const WaitingCB& waiting_cb) {
DCHECK(config.IsValidConfig());
stats_.video_decoder_name = decoder->GetDisplayName();
DVLOG(2) << stats_.video_decoder_name;
decoder->Initialize(config, low_delay, cdm_context, init_cb, output_cb,
- waiting_for_decryption_key_cb);
+ waiting_cb);
}
void DecoderStreamTraits<DemuxerStream::VIDEO>::OnStreamReset(
diff --git a/chromium/media/filters/decoder_stream_traits.h b/chromium/media/filters/decoder_stream_traits.h
index 62c173218b7..db867e5d05b 100644
--- a/chromium/media/filters/decoder_stream_traits.h
+++ b/chromium/media/filters/decoder_stream_traits.h
@@ -38,7 +38,6 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
using DecoderConfigType = AudioDecoderConfig;
using InitCB = AudioDecoder::InitCB;
using OutputCB = AudioDecoder::OutputCB;
- using WaitingForDecryptionKeyCB = AudioDecoder::WaitingForDecryptionKeyCB;
static std::string ToString();
static bool NeedsBitstreamConversion(DecoderType* decoder);
@@ -47,14 +46,13 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::AUDIO> {
DecoderStreamTraits(MediaLog* media_log, ChannelLayout initial_hw_layout);
void ReportStatistics(const StatisticsCB& statistics_cb, int bytes_decoded);
- void InitializeDecoder(
- DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb);
+ void InitializeDecoder(DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb);
DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
void OnDecode(const DecoderBuffer& buffer);
PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
@@ -83,7 +81,6 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::VIDEO> {
using DecoderConfigType = VideoDecoderConfig;
using InitCB = VideoDecoder::InitCB;
using OutputCB = VideoDecoder::OutputCB;
- using WaitingForDecryptionKeyCB = VideoDecoder::WaitingForDecryptionKeyCB;
static std::string ToString();
static bool NeedsBitstreamConversion(DecoderType* decoder);
@@ -93,14 +90,13 @@ class MEDIA_EXPORT DecoderStreamTraits<DemuxerStream::VIDEO> {
DecoderConfigType GetDecoderConfig(DemuxerStream* stream);
void ReportStatistics(const StatisticsCB& statistics_cb, int bytes_decoded);
- void InitializeDecoder(
- DecoderType* decoder,
- const DecoderConfigType& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb);
+ void InitializeDecoder(DecoderType* decoder,
+ const DecoderConfigType& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb);
void OnDecode(const DecoderBuffer& buffer);
PostDecodeAction OnDecodeDone(const scoped_refptr<OutputType>& buffer);
void OnStreamReset(DemuxerStream* stream);
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index cf8f4685dfb..ed41d539c67 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -37,24 +37,17 @@ static inline bool IsOutOfSync(const base::TimeDelta& timestamp_1,
DecryptingAudioDecoder::DecryptingAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log)
- : task_runner_(task_runner),
- media_log_(media_log),
- state_(kUninitialized),
- decryptor_(NULL),
- key_added_while_decode_pending_(false),
- support_clear_content_(false),
- weak_factory_(this) {}
+ : task_runner_(task_runner), media_log_(media_log), weak_factory_(this) {}
std::string DecryptingAudioDecoder::GetDisplayName() const {
return "DecryptingAudioDecoder";
}
-void DecryptingAudioDecoder::Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void DecryptingAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(2) << "Initialize()";
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!decode_cb_);
@@ -80,8 +73,8 @@ void DecryptingAudioDecoder::Initialize(
weak_this_ = weak_factory_.GetWeakPtr();
output_cb_ = BindToCurrentLoop(output_cb);
- DCHECK(waiting_for_decryption_key_cb);
- waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
+ DCHECK(waiting_cb);
+ waiting_cb_ = waiting_cb;
// TODO(xhwang): We should be able to DCHECK config.IsValidConfig().
if (!config.IsValidConfig()) {
@@ -294,7 +287,7 @@ void DecryptingAudioDecoder::DeliverFrame(
}
state_ = kWaitingForKey;
- waiting_for_decryption_key_cb_.Run();
+ waiting_cb_.Run(WaitingReason::kNoDecryptionKey);
return;
}
diff --git a/chromium/media/filters/decrypting_audio_decoder.h b/chromium/media/filters/decrypting_audio_decoder.h
index de6ec91979b..95f8d234295 100644
--- a/chromium/media/filters/decrypting_audio_decoder.h
+++ b/chromium/media/filters/decrypting_audio_decoder.h
@@ -41,12 +41,11 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// AudioDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -89,22 +88,22 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// Sets timestamps for |frames| and then passes them to |output_cb_|.
void ProcessDecodedFrames(const Decryptor::AudioFrames& frames);
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ // Set in constructor.
+ scoped_refptr<base::SingleThreadTaskRunner> const task_runner_;
+ MediaLog* const media_log_;
- MediaLog* media_log_;
-
- State state_;
+ State state_ = kUninitialized;
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
- base::Closure waiting_for_decryption_key_cb_;
+ WaitingCB waiting_cb_;
// The current decoder configuration.
AudioDecoderConfig config_;
- Decryptor* decryptor_;
+ Decryptor* decryptor_ = nullptr;
// The buffer that needs decrypting/decoding.
scoped_refptr<media::DecoderBuffer> pending_buffer_to_decode_;
@@ -114,13 +113,13 @@ class MEDIA_EXPORT DecryptingAudioDecoder : public AudioDecoder {
// If this variable is true and kNoKey is returned then we need to try
// decrypting/decoding again in case the newly added key is the correct
// decryption key.
- bool key_added_while_decode_pending_;
+ bool key_added_while_decode_pending_ = false;
std::unique_ptr<AudioTimestampHelper> timestamp_helper_;
// Once Initialized() with encrypted content support, if the stream changes to
// clear content, we want to ensure this decoder remains used.
- bool support_clear_content_;
+ bool support_clear_content_ = false;
base::WeakPtr<DecryptingAudioDecoder> weak_this_;
base::WeakPtrFactory<DecryptingAudioDecoder> weak_factory_;
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index 39d394a4faa..069f110c8d8 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -9,9 +9,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
@@ -46,8 +46,8 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(buffer_size));
buffer->set_decrypt_config(DecryptConfig::CreateCencConfig(
std::string(reinterpret_cast<const char*>(kFakeKeyId),
- arraysize(kFakeKeyId)),
- std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv)),
+ base::size(kFakeKeyId)),
+ std::string(reinterpret_cast<const char*>(kFakeIv), base::size(kFakeIv)),
std::vector<SubsampleEntry>()));
return buffer;
}
@@ -80,12 +80,11 @@ class DecryptingAudioDecoderTest : public testing::Test {
kNoTimestamp);
decoded_frame_list_.push_back(decoded_frame_);
- decoder_->Initialize(
- config, cdm_context_.get(), NewExpectedBoolCB(success),
- base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)),
- base::Bind(&DecryptingAudioDecoderTest::OnWaitingForDecryptionKey,
- base::Unretained(this)));
+ decoder_->Initialize(config, cdm_context_.get(), NewExpectedBoolCB(success),
+ base::Bind(&DecryptingAudioDecoderTest::FrameReady,
+ base::Unretained(this)),
+ base::Bind(&DecryptingAudioDecoderTest::OnWaiting,
+ base::Unretained(this)));
base::RunLoop().RunUntilIdle();
}
@@ -119,12 +118,12 @@ class DecryptingAudioDecoderTest : public testing::Test {
.WillOnce(RunCallback<1>(true));
EXPECT_CALL(*decryptor_, RegisterNewKeyCB(Decryptor::kAudio, _))
.WillOnce(SaveArg<1>(&key_added_cb_));
- decoder_->Initialize(
- new_config, cdm_context_.get(), NewExpectedBoolCB(true),
- base::Bind(&DecryptingAudioDecoderTest::FrameReady,
- base::Unretained(this)),
- base::Bind(&DecryptingAudioDecoderTest::OnWaitingForDecryptionKey,
- base::Unretained(this)));
+ decoder_->Initialize(new_config, cdm_context_.get(),
+ NewExpectedBoolCB(true),
+ base::Bind(&DecryptingAudioDecoderTest::FrameReady,
+ base::Unretained(this)),
+ base::Bind(&DecryptingAudioDecoderTest::OnWaiting,
+ base::Unretained(this)));
}
// Decode |buffer| and expect DecodeDone to get called with |status|.
@@ -195,7 +194,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
EXPECT_CALL(*decryptor_, DecryptAndDecodeAudio(encrypted_buffer_, _))
.WillRepeatedly(
RunCallback<1>(Decryptor::kNoKey, Decryptor::AudioFrames()));
- EXPECT_CALL(*this, OnWaitingForDecryptionKey());
+ EXPECT_CALL(*this, OnWaiting(WaitingReason::kNoDecryptionKey));
decoder_->Decode(encrypted_buffer_,
base::Bind(&DecryptingAudioDecoderTest::DecodeDone,
base::Unretained(this)));
@@ -241,10 +240,10 @@ class DecryptingAudioDecoderTest : public testing::Test {
MOCK_METHOD1(FrameReady, void(const scoped_refptr<AudioBuffer>&));
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
base::MessageLoop message_loop_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<DecryptingAudioDecoder> decoder_;
std::unique_ptr<StrictMock<MockCdmContext>> cdm_context_;
std::unique_ptr<StrictMock<MockDecryptor>> decryptor_;
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index 64697669aeb..76780080fef 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -28,11 +28,11 @@ static bool IsStreamValid(DemuxerStream* stream) {
DecryptingDemuxerStream::DecryptingDemuxerStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log,
- const base::Closure& waiting_for_decryption_key_cb)
+ const WaitingCB& waiting_cb)
: task_runner_(task_runner),
media_log_(media_log),
state_(kUninitialized),
- waiting_for_decryption_key_cb_(waiting_for_decryption_key_cb),
+ waiting_cb_(waiting_cb),
demuxer_stream_(NULL),
decryptor_(NULL),
key_added_while_decrypt_pending_(false),
@@ -305,7 +305,7 @@ void DecryptingDemuxerStream::DeliverBuffer(
TRACE_EVENT_ASYNC_BEGIN0(
"media", "DecryptingDemuxerStream::WaitingForDecryptionKey", this);
- waiting_for_decryption_key_cb_.Run();
+ waiting_cb_.Run(WaitingReason::kNoDecryptionKey);
return;
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.h b/chromium/media/filters/decrypting_demuxer_stream.h
index 7a8e03fc116..64cc5519912 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.h
+++ b/chromium/media/filters/decrypting_demuxer_stream.h
@@ -15,6 +15,7 @@
#include "media/base/demuxer_stream.h"
#include "media/base/pipeline_status.h"
#include "media/base/video_decoder_config.h"
+#include "media/base/waiting.h"
namespace base {
class SingleThreadTaskRunner;
@@ -34,7 +35,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
DecryptingDemuxerStream(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log,
- const base::Closure& waiting_for_decryption_key_cb);
+ const WaitingCB& waiting_cb);
// Cancels all pending operations immediately and fires all pending callbacks.
~DecryptingDemuxerStream() override;
@@ -112,7 +113,7 @@ class MEDIA_EXPORT DecryptingDemuxerStream : public DemuxerStream {
PipelineStatusCB init_cb_;
ReadCB read_cb_;
base::Closure reset_cb_;
- base::Closure waiting_for_decryption_key_cb_;
+ WaitingCB waiting_cb_;
// Pointer to the input demuxer stream that will feed us encrypted buffers.
DemuxerStream* demuxer_stream_;
diff --git a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
index ab7d6a14486..ab865a44500 100644
--- a/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream_unittest.cc
@@ -9,9 +9,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/gmock_callback_support.h"
@@ -24,8 +24,8 @@
using ::testing::_;
using ::testing::HasSubstr;
-using ::testing::IsNull;
using ::testing::InSequence;
+using ::testing::IsNull;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::StrictMock;
@@ -44,11 +44,11 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedStreamBuffer(
std::string iv = is_clear
? std::string()
: std::string(reinterpret_cast<const char*>(kFakeIv),
- arraysize(kFakeIv));
+ base::size(kFakeIv));
if (!is_clear) {
buffer->set_decrypt_config(DecryptConfig::CreateCencConfig(
std::string(reinterpret_cast<const char*>(kFakeKeyId),
- arraysize(kFakeKeyId)),
+ base::size(kFakeKeyId)),
iv, {}));
}
return buffer;
@@ -70,7 +70,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
: demuxer_stream_(new DecryptingDemuxerStream(
message_loop_.task_runner(),
&media_log_,
- base::Bind(&DecryptingDemuxerStreamTest::OnWaitingForDecryptionKey,
+ base::Bind(&DecryptingDemuxerStreamTest::OnWaiting,
base::Unretained(this)))),
cdm_context_(new StrictMock<MockCdmContext>()),
decryptor_(new StrictMock<MockDecryptor>()),
@@ -232,7 +232,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
.WillRepeatedly(
RunCallback<2>(Decryptor::kNoKey, scoped_refptr<DecoderBuffer>()));
EXPECT_MEDIA_LOG(HasSubstr("DecryptingDemuxerStream: no key for key ID"));
- EXPECT_CALL(*this, OnWaitingForDecryptionKey());
+ EXPECT_CALL(*this, OnWaiting(WaitingReason::kNoDecryptionKey));
demuxer_stream_->Read(base::Bind(&DecryptingDemuxerStreamTest::BufferReady,
base::Unretained(this)));
base::RunLoop().RunUntilIdle();
@@ -261,7 +261,7 @@ class DecryptingDemuxerStreamTest : public testing::Test {
MOCK_METHOD2(BufferReady,
void(DemuxerStream::Status, scoped_refptr<DecoderBuffer>));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
base::MessageLoop message_loop_;
StrictMock<MockMediaLog> media_log_;
diff --git a/chromium/media/filters/decrypting_media_resource.cc b/chromium/media/filters/decrypting_media_resource.cc
new file mode 100644
index 00000000000..d004db77984
--- /dev/null
+++ b/chromium/media/filters/decrypting_media_resource.cc
@@ -0,0 +1,104 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/decrypting_media_resource.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/logging.h"
+#include "media/base/cdm_context.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_log.h"
+#include "media/base/pipeline_status.h"
+#include "media/filters/decrypting_demuxer_stream.h"
+
+namespace media {
+
+DecryptingMediaResource::DecryptingMediaResource(
+ MediaResource* media_resource,
+ CdmContext* cdm_context,
+ MediaLog* media_log,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner)
+ : media_resource_(media_resource),
+ cdm_context_(cdm_context),
+ media_log_(media_log),
+ task_runner_(task_runner),
+ weak_factory_(this) {
+ DCHECK(media_resource);
+ DCHECK_EQ(MediaResource::STREAM, media_resource->GetType());
+ DCHECK(cdm_context_);
+ DCHECK(cdm_context_->GetDecryptor());
+ DCHECK(cdm_context_->GetDecryptor()->CanAlwaysDecrypt());
+ DCHECK(media_log_);
+ DCHECK(task_runner->BelongsToCurrentThread());
+}
+
+DecryptingMediaResource::~DecryptingMediaResource() = default;
+
+MediaResource::Type DecryptingMediaResource::GetType() const {
+ DCHECK_EQ(MediaResource::STREAM, media_resource_->GetType());
+ return MediaResource::STREAM;
+}
+
+std::vector<DemuxerStream*> DecryptingMediaResource::GetAllStreams() {
+ if (streams_.size())
+ return streams_;
+
+ return media_resource_->GetAllStreams();
+}
+
+void DecryptingMediaResource::Initialize(InitCB init_cb, WaitingCB waiting_cb) {
+ DCHECK(init_cb);
+
+ auto streams = media_resource_->GetAllStreams();
+
+ // Save the callback so that we can invoke it when the
+ // DecryptingDemuxerStreams have finished initialization.
+ init_cb_ = std::move(init_cb);
+ num_dds_pending_init_ = streams.size();
+
+ for (auto* stream : streams) {
+ auto decrypting_demuxer_stream = std::make_unique<DecryptingDemuxerStream>(
+ task_runner_, media_log_, waiting_cb);
+
+ // DecryptingDemuxerStream always invokes the callback asynchronously so
+ // that we have no reentrancy issues. "All public APIs and callbacks are
+ // trampolined to the |task_runner_|."
+ decrypting_demuxer_stream->Initialize(
+ stream, cdm_context_,
+ base::BindRepeating(
+ &DecryptingMediaResource::OnDecryptingDemuxerInitialized,
+ weak_factory_.GetWeakPtr()));
+
+ streams_.push_back(decrypting_demuxer_stream.get());
+ owned_streams_.push_back(std::move(decrypting_demuxer_stream));
+ }
+}
+
+int DecryptingMediaResource::DecryptingDemuxerStreamCountForTesting() const {
+ return owned_streams_.size();
+}
+
+void DecryptingMediaResource::OnDecryptingDemuxerInitialized(
+ PipelineStatus status) {
+ DVLOG(2) << __func__ << ": DecryptingDemuxerStream initialization ended "
+ << "with the status: " << MediaLog::PipelineStatusToString(status);
+
+ // Decrement the count of DecryptingDemuxerStreams that need to be
+ // initialized.
+ --num_dds_pending_init_;
+
+ if (!init_cb_)
+ return;
+
+ if (status != PIPELINE_OK)
+ std::move(init_cb_).Run(false);
+ else if (num_dds_pending_init_ == 0)
+ std::move(init_cb_).Run(true);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/decrypting_media_resource.h b/chromium/media/filters/decrypting_media_resource.h
new file mode 100644
index 00000000000..6207d221c99
--- /dev/null
+++ b/chromium/media/filters/decrypting_media_resource.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_DECRYPTING_MEDIA_RESOURCE_H_
+#define MEDIA_FILTERS_DECRYPTING_MEDIA_RESOURCE_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/media_resource.h"
+#include "media/base/pipeline.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+class CdmContext;
+class DemuxerStream;
+class DecryptingDemuxerStream;
+
+// DecryptingMediaResource is a wrapper for a MediaResource implementation that
+// provides decryption. It should only be created when:
+// - The |media_resource| has type MediaResource::STREAM, and
+// - The |cdm_context| has a Decryptor that always supports decrypt-only.
+// Internally DecryptingDemuxerStreams will be created for all streams in
+// |media_resource| and decrypt them into clear streams. These clear streams are
+// then passed downstream to the rest of the media pipeline, which should no
+// longer need to worry about decryption.
+class MEDIA_EXPORT DecryptingMediaResource : public MediaResource {
+ public:
+ using InitCB = base::OnceCallback<void(bool success)>;
+
+ DecryptingMediaResource(
+ MediaResource* media_resource,
+ CdmContext* cdm_context,
+ MediaLog* media_log,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner);
+ ~DecryptingMediaResource() override;
+
+ // MediaResource implementation:
+ MediaResource::Type GetType() const override;
+ std::vector<DemuxerStream*> GetAllStreams() override;
+
+ void Initialize(InitCB init_cb, WaitingCB waiting_cb_);
+
+ // Returns the number of DecryptingDemuxerStreams that were created.
+ virtual int DecryptingDemuxerStreamCountForTesting() const;
+
+ private:
+ void OnDecryptingDemuxerInitialized(PipelineStatus status);
+
+ MediaResource* const media_resource_;
+ CdmContext* const cdm_context_;
+ MediaLog* const media_log_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // Number of DecryptingDemuxerStreams that have yet to be initialized.
+ int num_dds_pending_init_ = 0;
+
+ // |streams_| is the set of streams that this implementation does not own and
+ // will be returned when GetAllStreams() is invoked. |owned_streams_| is the
+ // set of DecryptingDemuxerStreams that we have created and own (i.e.
+ // responsible for destructing).
+ std::vector<DemuxerStream*> streams_;
+ std::vector<std::unique_ptr<DecryptingDemuxerStream>> owned_streams_;
+
+ // Called when the final DecryptingDemuxerStream has been initialized *or*
+ // if one of the DecryptingDemuxerStreams failed to initialize correctly.
+ InitCB init_cb_;
+ base::WeakPtrFactory<DecryptingMediaResource> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecryptingMediaResource);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_DECRYPTING_MEDIA_RESOURCE_H_
diff --git a/chromium/media/filters/decrypting_media_resource_unittest.cc b/chromium/media/filters/decrypting_media_resource_unittest.cc
new file mode 100644
index 00000000000..567de639f77
--- /dev/null
+++ b/chromium/media/filters/decrypting_media_resource_unittest.cc
@@ -0,0 +1,228 @@
+// Copyright (c) 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/test/mock_callback.h"
+#include "base/test/scoped_task_environment.h"
+#include "media/base/decoder_buffer.h"
+#include "media/base/decrypt_config.h"
+#include "media/base/decryptor.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/gmock_callback_support.h"
+#include "media/base/media_util.h"
+#include "media/base/mock_filters.h"
+#include "media/base/pipeline_status.h"
+#include "media/base/test_helpers.h"
+#include "media/filters/decrypting_demuxer_stream.h"
+#include "media/filters/decrypting_media_resource.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::StrictMock;
+
+namespace media {
+
+static constexpr int kFakeBufferSize = 16;
+static constexpr uint8_t kFakeKeyId[] = {0x4b, 0x65, 0x79, 0x20, 0x49, 0x44};
+static constexpr uint8_t kFakeIv[DecryptConfig::kDecryptionKeySize] = {0};
+
+// Use anonymous namespace here to prevent the actions to be defined multiple
+// times across multiple test files. Sadly we can't use static for them.
+namespace {
+
+ACTION_P(ReturnBuffer, buffer) {
+ arg0.Run(buffer.get() ? DemuxerStream::kOk : DemuxerStream::kAborted, buffer);
+}
+
+} // namespace
+
+class DecryptingMediaResourceTest : public testing::Test {
+ public:
+ DecryptingMediaResourceTest() {
+ encrypted_buffer_ =
+ scoped_refptr<DecoderBuffer>(new DecoderBuffer(kFakeBufferSize));
+ encrypted_buffer_->set_decrypt_config(DecryptConfig::CreateCencConfig(
+ std::string(reinterpret_cast<const char*>(kFakeKeyId),
+ base::size(kFakeKeyId)),
+ std::string(reinterpret_cast<const char*>(kFakeIv),
+ base::size(kFakeIv)),
+ {}));
+
+ EXPECT_CALL(cdm_context_, GetDecryptor())
+ .WillRepeatedly(Return(&decryptor_));
+ EXPECT_CALL(decryptor_, CanAlwaysDecrypt()).WillRepeatedly(Return(true));
+ EXPECT_CALL(decryptor_, CancelDecrypt(_)).Times(AnyNumber());
+ EXPECT_CALL(decryptor_, RegisterNewKeyCB(_, _)).Times(AnyNumber());
+ EXPECT_CALL(demuxer_, GetAllStreams())
+ .WillRepeatedly(
+ Invoke(this, &DecryptingMediaResourceTest::GetAllStreams));
+
+ decrypting_media_resource_ = std::make_unique<DecryptingMediaResource>(
+ &demuxer_, &cdm_context_, &null_media_log_,
+ scoped_task_environment_.GetMainThreadTaskRunner());
+ }
+
+ ~DecryptingMediaResourceTest() {
+ // Ensure that the DecryptingMediaResource is destructed before other
+ // objects that it internally references but does not own.
+ decrypting_media_resource_.reset();
+ }
+
+ bool HasEncryptedStream() {
+ for (auto* stream : decrypting_media_resource_->GetAllStreams()) {
+ if ((stream->type() == DemuxerStream::AUDIO &&
+ stream->audio_decoder_config().is_encrypted()) ||
+ (stream->type() == DemuxerStream::VIDEO &&
+ stream->video_decoder_config().is_encrypted()))
+ return true;
+ }
+
+ return false;
+ }
+
+ void AddStream(DemuxerStream::Type type, bool encrypted) {
+ streams_.push_back(CreateMockDemuxerStream(type, encrypted));
+ }
+
+ std::vector<DemuxerStream*> GetAllStreams() {
+ std::vector<DemuxerStream*> streams;
+
+ for (auto& stream : streams_) {
+ streams.push_back(stream.get());
+ }
+
+ return streams;
+ }
+
+ MOCK_METHOD2(BufferReady,
+ void(DemuxerStream::Status, scoped_refptr<DecoderBuffer>));
+
+ protected:
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+ base::MockCallback<DecryptingMediaResource::InitCB>
+ decrypting_media_resource_init_cb_;
+ base::MockCallback<WaitingCB> waiting_cb_;
+ NullMediaLog null_media_log_;
+ StrictMock<MockDecryptor> decryptor_;
+ StrictMock<MockDemuxer> demuxer_;
+ StrictMock<MockCdmContext> cdm_context_;
+ std::unique_ptr<DecryptingMediaResource> decrypting_media_resource_;
+ std::vector<std::unique_ptr<StrictMock<MockDemuxerStream>>> streams_;
+
+ // Constant buffer to be returned by the input demuxer streams and
+ // |decryptor_|.
+ scoped_refptr<DecoderBuffer> encrypted_buffer_;
+};
+
+TEST_F(DecryptingMediaResourceTest, ClearStreams) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ false);
+
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(true));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_EQ(
+ decrypting_media_resource_->DecryptingDemuxerStreamCountForTesting(), 2);
+ EXPECT_FALSE(HasEncryptedStream());
+}
+
+TEST_F(DecryptingMediaResourceTest, EncryptedStreams) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(true));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ // When using an AesDecryptor we preemptively wrap our streams with a
+ // DecryptingDemuxerStream, regardless of encryption. With this in mind, we
+ // should have three DecryptingDemuxerStreams.
+ EXPECT_EQ(
+ decrypting_media_resource_->DecryptingDemuxerStreamCountForTesting(), 2);
+
+ // All of the streams that we get from our DecryptingMediaResource, NOT the
+ // internal MediaResource implementation, should be clear.
+ EXPECT_FALSE(HasEncryptedStream());
+}
+
+TEST_F(DecryptingMediaResourceTest, MixedStreams) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(true));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_EQ(
+ decrypting_media_resource_->DecryptingDemuxerStreamCountForTesting(), 2);
+ EXPECT_FALSE(HasEncryptedStream());
+}
+
+TEST_F(DecryptingMediaResourceTest,
+ OneDecryptingDemuxerStreamFailsInitialization) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ // The first DecryptingDemuxerStream will fail to initialize, causing the
+ // callback to be run with a value of false. The second
+ // DecryptingDemuxerStream will succeed but never invoke the callback.
+ EXPECT_CALL(cdm_context_, GetDecryptor())
+ .WillOnce(Return(nullptr))
+ .WillRepeatedly(Return(&decryptor_));
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(false));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+}
+
+TEST_F(DecryptingMediaResourceTest,
+ BothDecryptingDemuxerStreamsFailInitialization) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ // Both DecryptingDemuxerStreams will fail to initialize but the callback
+ // should still only be invoked a single time.
+ EXPECT_CALL(cdm_context_, GetDecryptor()).WillRepeatedly(Return(nullptr));
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(false));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+}
+
+TEST_F(DecryptingMediaResourceTest, WaitingCallback) {
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(*streams_.front(), Read(_))
+ .WillRepeatedly(ReturnBuffer(encrypted_buffer_));
+ EXPECT_CALL(decryptor_, Decrypt(_, encrypted_buffer_, _))
+ .WillRepeatedly(
+ RunCallback<2>(Decryptor::kNoKey, scoped_refptr<DecoderBuffer>()));
+ EXPECT_CALL(decrypting_media_resource_init_cb_, Run(true));
+ EXPECT_CALL(waiting_cb_, Run(WaitingReason::kNoDecryptionKey));
+
+ decrypting_media_resource_->Initialize(
+ decrypting_media_resource_init_cb_.Get(), waiting_cb_.Get());
+ decrypting_media_resource_->GetAllStreams().front()->Read(base::BindRepeating(
+ &DecryptingMediaResourceTest::BufferReady, base::Unretained(this)));
+ scoped_task_environment_.RunUntilIdle();
+}
+
+} // namespace media
diff --git a/chromium/media/filters/decrypting_video_decoder.cc b/chromium/media/filters/decrypting_video_decoder.cc
index 5302ef07de4..1c0a3a975cf 100644
--- a/chromium/media/filters/decrypting_video_decoder.cc
+++ b/chromium/media/filters/decrypting_video_decoder.cc
@@ -24,25 +24,18 @@ const char DecryptingVideoDecoder::kDecoderName[] = "DecryptingVideoDecoder";
DecryptingVideoDecoder::DecryptingVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
MediaLog* media_log)
- : task_runner_(task_runner),
- media_log_(media_log),
- state_(kUninitialized),
- decryptor_(NULL),
- key_added_while_decode_pending_(false),
- support_clear_content_(false),
- weak_factory_(this) {}
+ : task_runner_(task_runner), media_log_(media_log), weak_factory_(this) {}
std::string DecryptingVideoDecoder::GetDisplayName() const {
return kDecoderName;
}
-void DecryptingVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool /* low_delay */,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void DecryptingVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool /* low_delay */,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(2) << __func__ << ": " << config.AsHumanReadableString();
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -74,8 +67,8 @@ void DecryptingVideoDecoder::Initialize(
weak_this_ = weak_factory_.GetWeakPtr();
config_ = config;
- DCHECK(waiting_for_decryption_key_cb);
- waiting_for_decryption_key_cb_ = waiting_for_decryption_key_cb;
+ DCHECK(waiting_cb);
+ waiting_cb_ = waiting_cb;
if (state_ == kUninitialized) {
if (!cdm_context->GetDecryptor()) {
@@ -283,7 +276,7 @@ void DecryptingVideoDecoder::DeliverFrame(
TRACE_EVENT_ASYNC_BEGIN0(
"media", "DecryptingVideoDecoder::WaitingForDecryptionKey", this);
state_ = kWaitingForKey;
- waiting_for_decryption_key_cb_.Run();
+ waiting_cb_.Run(WaitingReason::kNoDecryptionKey);
return;
}
diff --git a/chromium/media/filters/decrypting_video_decoder.h b/chromium/media/filters/decrypting_video_decoder.h
index d0dd80a16ac..03cec553195 100644
--- a/chromium/media/filters/decrypting_video_decoder.h
+++ b/chromium/media/filters/decrypting_video_decoder.h
@@ -38,13 +38,12 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -85,21 +84,21 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
void CompletePendingDecode(Decryptor::Status status);
void CompleteWaitingForDecryptionKey();
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ // Set in constructor.
+ scoped_refptr<base::SingleThreadTaskRunner> const task_runner_;
+ MediaLog* const media_log_;
- MediaLog* media_log_;
-
- State state_;
+ State state_ = kUninitialized;
InitCB init_cb_;
OutputCB output_cb_;
DecodeCB decode_cb_;
base::Closure reset_cb_;
- base::Closure waiting_for_decryption_key_cb_;
+ WaitingCB waiting_cb_;
VideoDecoderConfig config_;
- Decryptor* decryptor_;
+ Decryptor* decryptor_ = nullptr;
// The buffer that needs decrypting/decoding.
scoped_refptr<media::DecoderBuffer> pending_buffer_to_decode_;
@@ -109,11 +108,11 @@ class MEDIA_EXPORT DecryptingVideoDecoder : public VideoDecoder {
// If this variable is true and kNoKey is returned then we need to try
// decrypting/decoding again in case the newly added key is the correct
// decryption key.
- bool key_added_while_decode_pending_;
+ bool key_added_while_decode_pending_ = false;
// Once Initialized() with encrypted content support, if the stream changes to
// clear content, we want to ensure this decoder remains used.
- bool support_clear_content_;
+ bool support_clear_content_ = false;
base::WeakPtr<DecryptingVideoDecoder> weak_this_;
base::WeakPtrFactory<DecryptingVideoDecoder> weak_factory_;
diff --git a/chromium/media/filters/decrypting_video_decoder_unittest.cc b/chromium/media/filters/decrypting_video_decoder_unittest.cc
index aac407704c6..4515c82d139 100644
--- a/chromium/media/filters/decrypting_video_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_video_decoder_unittest.cc
@@ -9,12 +9,13 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/gmock_callback_support.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
#include "media/base/video_frame.h"
@@ -39,8 +40,8 @@ static scoped_refptr<DecoderBuffer> CreateFakeEncryptedBuffer() {
scoped_refptr<DecoderBuffer> buffer(new DecoderBuffer(buffer_size));
buffer->set_decrypt_config(DecryptConfig::CreateCencConfig(
std::string(reinterpret_cast<const char*>(kFakeKeyId),
- arraysize(kFakeKeyId)),
- std::string(reinterpret_cast<const char*>(kFakeIv), arraysize(kFakeIv)),
+ base::size(kFakeKeyId)),
+ std::string(reinterpret_cast<const char*>(kFakeIv), base::size(kFakeIv)),
{}));
return buffer;
}
@@ -73,12 +74,12 @@ class DecryptingVideoDecoderTest : public testing::Test {
// can succeed or fail.
void InitializeAndExpectResult(const VideoDecoderConfig& config,
bool success) {
- decoder_->Initialize(
- config, false, cdm_context_.get(), NewExpectedBoolCB(success),
- base::Bind(&DecryptingVideoDecoderTest::FrameReady,
- base::Unretained(this)),
- base::Bind(&DecryptingVideoDecoderTest::OnWaitingForDecryptionKey,
- base::Unretained(this)));
+ decoder_->Initialize(config, false, cdm_context_.get(),
+ NewExpectedBoolCB(success),
+ base::Bind(&DecryptingVideoDecoderTest::FrameReady,
+ base::Unretained(this)),
+ base::Bind(&DecryptingVideoDecoderTest::OnWaiting,
+ base::Unretained(this)));
base::RunLoop().RunUntilIdle();
}
@@ -171,7 +172,7 @@ class DecryptingVideoDecoderTest : public testing::Test {
void EnterWaitingForKeyState() {
EXPECT_CALL(*decryptor_, DecryptAndDecodeVideo(_, _))
.WillRepeatedly(RunCallback<1>(Decryptor::kNoKey, null_video_frame_));
- EXPECT_CALL(*this, OnWaitingForDecryptionKey());
+ EXPECT_CALL(*this, OnWaiting(WaitingReason::kNoDecryptionKey));
decoder_->Decode(encrypted_buffer_,
base::Bind(&DecryptingVideoDecoderTest::DecodeDone,
base::Unretained(this)));
@@ -216,10 +217,10 @@ class DecryptingVideoDecoderTest : public testing::Test {
MOCK_METHOD1(FrameReady, void(const scoped_refptr<VideoFrame>&));
MOCK_METHOD1(DecodeDone, void(DecodeStatus));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
base::MessageLoop message_loop_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<DecryptingVideoDecoder> decoder_;
std::unique_ptr<StrictMock<MockCdmContext>> cdm_context_;
std::unique_ptr<StrictMock<MockDecryptor>> decryptor_;
diff --git a/chromium/media/filters/demuxer_perftest.cc b/chromium/media/filters/demuxer_perftest.cc
index a89b8d95e0a..aa108624df6 100644
--- a/chromium/media/filters/demuxer_perftest.cc
+++ b/chromium/media/filters/demuxer_perftest.cc
@@ -16,8 +16,8 @@
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/base/media.h"
-#include "media/base/media_log.h"
#include "media/base/media_tracks.h"
+#include "media/base/media_util.h"
#include "media/base/test_data_util.h"
#include "media/base/timestamp_constants.h"
#include "media/filters/ffmpeg_demuxer.h"
@@ -173,7 +173,7 @@ int StreamReader::GetNextStreamIndexToRead() {
static void RunDemuxerBenchmark(const std::string& filename) {
base::FilePath file_path(GetTestDataFilePath(filename));
base::TimeDelta total_time;
- MediaLog media_log_;
+ NullMediaLog media_log_;
for (int i = 0; i < kBenchmarkIterations; ++i) {
// Setup.
base::test::ScopedTaskEnvironment scoped_task_environment_;
diff --git a/chromium/media/filters/fake_video_decoder.cc b/chromium/media/filters/fake_video_decoder.cc
index 2e0eaaefcf6..9081bb77849 100644
--- a/chromium/media/filters/fake_video_decoder.cc
+++ b/chromium/media/filters/fake_video_decoder.cc
@@ -56,13 +56,12 @@ std::string FakeVideoDecoder::GetDisplayName() const {
return decoder_name_;
}
-void FakeVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void FakeVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(1) << decoder_name_ << ": " << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(config.IsValidConfig());
diff --git a/chromium/media/filters/fake_video_decoder.h b/chromium/media/filters/fake_video_decoder.h
index e5ae86e599a..c8fe39ac762 100644
--- a/chromium/media/filters/fake_video_decoder.h
+++ b/chromium/media/filters/fake_video_decoder.h
@@ -45,13 +45,12 @@ class FakeVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index 37728b0569f..64d5976ed37 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -64,12 +64,11 @@ std::string FFmpegAudioDecoder::GetDisplayName() const {
return "FFmpegAudioDecoder";
}
-void FFmpegAudioDecoder::Initialize(
- const AudioDecoderConfig& config,
- CdmContext* /* cdm_context */,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void FFmpegAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ CdmContext* /* cdm_context */,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& /* waiting_cb */) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(config.IsValidConfig());
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.h b/chromium/media/filters/ffmpeg_audio_decoder.h
index 44e1c23037d..923e8cd1ec3 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.h
+++ b/chromium/media/filters/ffmpeg_audio_decoder.h
@@ -40,12 +40,11 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
// AudioDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index f10ea024173..64c02a94e00 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -220,6 +220,24 @@ static void SetTimeProperty(MediaLogEvent* event,
event->params.SetDouble(key, value.InSecondsF());
}
+static int ReadFrameAndDiscardEmpty(AVFormatContext* context,
+ AVPacket* packet) {
+ // Skip empty packets in a tight loop to avoid timing out fuzzers.
+ int result;
+ bool drop_packet;
+ do {
+ result = av_read_frame(context, packet);
+ drop_packet = (!packet->data || !packet->size) && result >= 0;
+ if (drop_packet) {
+ av_packet_unref(packet);
+ DLOG(WARNING) << "Dropping empty packet, size: " << packet->size
+ << ", data: " << static_cast<void*>(packet->data);
+ }
+ } while (drop_packet);
+
+ return result;
+}
+
std::unique_ptr<FFmpegDemuxerStream> FFmpegDemuxerStream::Create(
FFmpegDemuxer* demuxer,
AVStream* stream,
@@ -391,8 +409,8 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
<< " < " << last_packet_pos_ << ")";
return;
}
- if (packet->pos == last_packet_pos_ && packet_dts <= last_packet_dts_) {
- DCHECK_NE(last_packet_dts_, AV_NOPTS_VALUE);
+ if (last_packet_dts_ != AV_NOPTS_VALUE && packet->pos == last_packet_pos_ &&
+ packet_dts <= last_packet_dts_) {
DVLOG(3) << "Dropped packet with out of order display timestamp ("
<< packet_dts << " < " << last_packet_dts_ << ")";
return;
@@ -420,7 +438,7 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
// Convert the packet if there is a bitstream filter.
if (bitstream_converter_ &&
!bitstream_converter_->ConvertPacket(packet.get())) {
- MEDIA_LOG(ERROR, media_log_) << "Format conversion failed.";
+ DVLOG(1) << "Format conversion failed.";
}
#endif
@@ -1817,9 +1835,10 @@ void FFmpegDemuxer::ReadFrameIfNeeded() {
pending_read_ = true;
base::PostTaskAndReplyWithResult(
blocking_task_runner_.get(), FROM_HERE,
- base::Bind(&av_read_frame, glue_->format_context(), packet_ptr),
- base::Bind(&FFmpegDemuxer::OnReadFrameDone, weak_factory_.GetWeakPtr(),
- base::Passed(&packet)));
+ base::BindOnce(&ReadFrameAndDiscardEmpty, glue_->format_context(),
+ packet_ptr),
+ base::BindOnce(&FFmpegDemuxer::OnReadFrameDone,
+ weak_factory_.GetWeakPtr(), std::move(packet)));
}
void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
@@ -1870,11 +1889,11 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
// giving us a bad stream index. See http://crbug.com/698549 for example.
if (packet->stream_index >= 0 &&
static_cast<size_t>(packet->stream_index) < streams_.size()) {
- // Drop empty packets since they're ignored on the decoder side anyways.
- if (!packet->data || !packet->size) {
- DLOG(WARNING) << "Dropping empty packet, size: " << packet->size
- << ", data: " << static_cast<void*>(packet->data);
- } else if (auto& demuxer_stream = streams_[packet->stream_index]) {
+ // This is ensured by ReadFrameAndDiscardEmpty.
+ DCHECK(packet->data);
+ DCHECK(packet->size);
+
+ if (auto& demuxer_stream = streams_[packet->stream_index]) {
if (demuxer_stream->IsEnabled())
demuxer_stream->EnqueuePacket(std::move(packet));
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index 4ff8ead41f1..c60ac81ab45 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -12,18 +12,18 @@
#include "base/files/file_path.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/path_service.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "media/base/decrypt_config.h"
-#include "media/base/media_log.h"
#include "media/base/media_tracks.h"
+#include "media/base/media_util.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/mock_media_log.h"
#include "media/base/test_helpers.h"
@@ -269,7 +269,7 @@ class FFmpegDemuxerTest : public testing::Test {
// using StrictMock<MockMediaLog> for all FFmpegDemuxerTests. See
// https://crbug.com/749178.
StrictMock<MockMediaLog> media_log_;
- MediaLog dummy_media_log_;
+ NullMediaLog dummy_media_log_;
std::unique_ptr<FileDataSource> data_source_;
std::unique_ptr<FFmpegDemuxer> demuxer_;
@@ -445,12 +445,12 @@ TEST_F(FFmpegDemuxerTest, Initialize_Multitrack) {
#endif
TEST_F(FFmpegDemuxerTest, Initialize_Encrypted) {
- EXPECT_CALL(*this,
- OnEncryptedMediaInitData(
- EmeInitDataType::WEBM,
- std::vector<uint8_t>(kEncryptedMediaInitData,
- kEncryptedMediaInitData +
- arraysize(kEncryptedMediaInitData))))
+ EXPECT_CALL(
+ *this, OnEncryptedMediaInitData(
+ EmeInitDataType::WEBM,
+ std::vector<uint8_t>(kEncryptedMediaInitData,
+ kEncryptedMediaInitData +
+ base::size(kEncryptedMediaInitData))))
.Times(Exactly(2));
CreateDemuxer("bear-320x240-av_enc-av.webm");
@@ -770,7 +770,7 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOpusDiscard_Sync) {
// Run the test twice with a seek in between.
for (int i = 0; i < 2; ++i) {
- for (size_t j = 0; j < arraysize(kTestExpectations); ++j) {
+ for (size_t j = 0; j < base::size(kTestExpectations); ++j) {
audio->Read(NewReadCB(FROM_HERE, kTestExpectations[j][0],
kTestExpectations[j][1], true));
base::RunLoop().Run();
@@ -844,7 +844,7 @@ TEST_F(FFmpegDemuxerTest,
FROM_HERE, 408, 0, base::TimeDelta::FromMicroseconds(6500), true));
base::RunLoop().Run();
- for (size_t j = 0; j < arraysize(kTestExpectations); ++j) {
+ for (size_t j = 0; j < base::size(kTestExpectations); ++j) {
audio->Read(NewReadCB(FROM_HERE, kTestExpectations[j][0],
kTestExpectations[j][1], true));
base::RunLoop().Run();
@@ -1255,7 +1255,7 @@ TEST_F(FFmpegDemuxerTest, IsValidAnnexB) {
"bear-1280x720-av_with-aud-nalus_frag.mp4"
};
- for (size_t i = 0; i < arraysize(files); ++i) {
+ for (size_t i = 0; i < base::size(files); ++i) {
DVLOG(1) << "Testing " << files[i];
CreateDemuxer(files[i]);
InitializeDemuxer();
@@ -1549,7 +1549,7 @@ TEST_F(FFmpegDemuxerTest, UTCDateToTime_Invalid) {
"2012-11-1012:34:56",
};
- for (size_t i = 0; i < arraysize(invalid_date_strings); ++i) {
+ for (size_t i = 0; i < base::size(invalid_date_strings); ++i) {
const char* date_string = invalid_date_strings[i];
base::Time result;
EXPECT_FALSE(base::Time::FromUTCString(date_string, &result))
diff --git a/chromium/media/filters/ffmpeg_glue.cc b/chromium/media/filters/ffmpeg_glue.cc
index 233ec6e0f26..bd498dcedbe 100644
--- a/chromium/media/filters/ffmpeg_glue.cc
+++ b/chromium/media/filters/ffmpeg_glue.cc
@@ -63,6 +63,13 @@ static int64_t AVIOSeekOperation(void* opaque, int64_t offset, int whence) {
return new_offset;
}
+static void LogContainer(bool is_local_file,
+ container_names::MediaContainerName container) {
+ base::UmaHistogramSparse("Media.DetectedContainer", container);
+ if (is_local_file)
+ base::UmaHistogramSparse("Media.DetectedContainer.Local", container);
+}
+
FFmpegGlue::FFmpegGlue(FFmpegURLProtocol* protocol) {
// Initialize an AVIOContext using our custom read and seek operations. Don't
// keep pointers to the buffer since FFmpeg may reallocate it on the fly. It
@@ -125,9 +132,7 @@ bool FFmpegGlue::OpenContext(bool is_local_file) {
return false;
container_ = container_names::DetermineContainer(buffer.data(), num_read);
- base::UmaHistogramSparse("Media.DetectedContainer", container_);
- if (is_local_file)
- base::UmaHistogramSparse("Media.DetectedContainer.Local", container_);
+ LogContainer(is_local_file, container_);
detected_hls_ =
container_ == container_names::MediaContainerName::CONTAINER_HLS;
@@ -157,7 +162,7 @@ bool FFmpegGlue::OpenContext(bool is_local_file) {
container_ = container_names::CONTAINER_AVI;
DCHECK_NE(container_, container_names::CONTAINER_UNKNOWN);
- base::UmaHistogramSparse("Media.DetectedContainer", container_);
+ LogContainer(is_local_file, container_);
return true;
}
diff --git a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
index a8d56d280e6..64f8c53440d 100644
--- a/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
+++ b/chromium/media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.cc
@@ -25,20 +25,25 @@ FFmpegH264ToAnnexBBitstreamConverter::~FFmpegH264ToAnnexBBitstreamConverter() =
bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
std::unique_ptr<mp4::AVCDecoderConfigurationRecord> avc_config;
- if (packet == NULL || !packet->data)
+ if (packet == NULL || !packet->data) {
+ DVLOG(2) << __func__ << ": Null or empty packet";
return false;
+ }
// Calculate the needed output buffer size.
if (!configuration_processed_) {
if (!stream_codec_parameters_->extradata ||
- stream_codec_parameters_->extradata_size <= 0)
+ stream_codec_parameters_->extradata_size <= 0) {
+ DVLOG(2) << __func__ << ": Empty extra data";
return false;
+ }
avc_config.reset(new mp4::AVCDecoderConfigurationRecord());
if (!converter_.ParseConfiguration(stream_codec_parameters_->extradata,
stream_codec_parameters_->extradata_size,
avc_config.get())) {
+ DVLOG(2) << __func__ << ": ParseConfiguration() failure";
return false;
}
}
@@ -46,13 +51,17 @@ bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
uint32_t output_packet_size = converter_.CalculateNeededOutputBufferSize(
packet->data, packet->size, avc_config.get());
- if (output_packet_size == 0)
+ if (output_packet_size == 0) {
+ DVLOG(2) << __func__ << ": zero |output_packet_size|";
return false; // Invalid input packet.
+ }
// Allocate new packet for the output.
AVPacket dest_packet;
- if (av_new_packet(&dest_packet, output_packet_size) != 0)
- return false; // Memory allocation failure.
+ if (av_new_packet(&dest_packet, output_packet_size) != 0) {
+ DVLOG(2) << __func__ << ": Memory allocation failure";
+ return false;
+ }
// This is a bit tricky: since the interface does not allow us to replace
// the pointer of the old packet with a new one, we will initially copy the
@@ -66,6 +75,7 @@ bool FFmpegH264ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
packet->data, packet->size,
avc_config.get(),
dest_packet.data, &io_size)) {
+ DVLOG(2) << __func__ << ": ConvertNalUnitStreamToByteStream() failure";
return false;
}
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index aa3bf04fd49..14ee37261fd 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -205,13 +205,12 @@ std::string FFmpegVideoDecoder::GetDisplayName() const {
return "FFmpegVideoDecoder";
}
-void FFmpegVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* /* cdm_context */,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* /* cdm_context */,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& /* waiting_cb */) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(config.IsValidConfig());
diff --git a/chromium/media/filters/ffmpeg_video_decoder.h b/chromium/media/filters/ffmpeg_video_decoder.h
index 33bb6fa5ae1..c16141b2d72 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.h
+++ b/chromium/media/filters/ffmpeg_video_decoder.h
@@ -39,13 +39,12 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
diff --git a/chromium/media/filters/frame_processor.cc b/chromium/media/filters/frame_processor.cc
index cbbe3ff231f..0c3d1132ed4 100644
--- a/chromium/media/filters/frame_processor.cc
+++ b/chromium/media/filters/frame_processor.cc
@@ -33,7 +33,8 @@ class MseTrackBuffer {
public:
MseTrackBuffer(ChunkDemuxerStream* stream,
MediaLog* media_log,
- const SourceBufferParseWarningCB& parse_warning_cb);
+ const SourceBufferParseWarningCB& parse_warning_cb,
+ ChunkDemuxerStream::RangeApi range_api);
~MseTrackBuffer();
// Get/set |last_decode_timestamp_|.
@@ -96,8 +97,11 @@ class MseTrackBuffer {
// monotonically increasing.
void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
- // Adds |frame| to the end of |processed_frames_|.
- void EnqueueProcessedFrame(scoped_refptr<StreamParserBuffer> frame);
+ // Adds |frame| to the end of |processed_frames_|. In some BufferingByPts
+ // SAP-Type-2 conditions, may also flush any previously enqueued frames, which
+ // can fail. Returns the result of such flushing, or true if no flushing was
+ // done.
+ bool EnqueueProcessedFrame(scoped_refptr<StreamParserBuffer> frame);
// Appends |processed_frames_|, if not empty, to |stream_| and clears
// |processed_frames_|. Returns false if append failed, true otherwise.
@@ -144,6 +148,15 @@ class MseTrackBuffer {
// coded frame group needs to be signalled.
base::TimeDelta last_keyframe_presentation_timestamp_;
+ // These are used to determine if more incremental flushing is needed to
+ // correctly buffer a SAP-Type-2 non-keyframe when buffering by PTS. They are
+ // updated (if necessary) in FlushProcessedFrames() and
+ // NotifyStartOfCodedFrameGroup(), and they are consulted (if necessary) in
+ // EnqueueProcessedFrame().
+ base::TimeDelta last_signalled_group_start_pts_;
+ bool have_flushed_since_last_group_start_;
+ ChunkDemuxerStream::RangeApi range_api_;
+
// The coded frame duration of the last coded frame appended in the current
// coded frame group. Initially kNoTimestamp, meaning "unset".
base::TimeDelta last_frame_duration_;
@@ -184,11 +197,15 @@ class MseTrackBuffer {
MseTrackBuffer::MseTrackBuffer(
ChunkDemuxerStream* stream,
MediaLog* media_log,
- const SourceBufferParseWarningCB& parse_warning_cb)
+ const SourceBufferParseWarningCB& parse_warning_cb,
+ ChunkDemuxerStream::RangeApi range_api)
: last_decode_timestamp_(kNoDecodeTimestamp()),
last_processed_decode_timestamp_(DecodeTimestamp()),
pending_group_start_pts_(kNoTimestamp),
last_keyframe_presentation_timestamp_(kNoTimestamp),
+ last_signalled_group_start_pts_(kNoTimestamp),
+ have_flushed_since_last_group_start_(false),
+ range_api_(range_api),
last_frame_duration_(kNoTimestamp),
highest_presentation_timestamp_(kNoTimestamp),
needs_random_access_point_(true),
@@ -225,7 +242,7 @@ void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
}
}
-void MseTrackBuffer::EnqueueProcessedFrame(
+bool MseTrackBuffer::EnqueueProcessedFrame(
scoped_refptr<StreamParserBuffer> frame) {
if (frame->is_key_frame()) {
last_keyframe_presentation_timestamp_ = frame->timestamp();
@@ -259,6 +276,42 @@ void MseTrackBuffer::EnqueueProcessedFrame(
<< ") that depends on it. This type of random access point is not "
"well supported by MSE; buffered range reporting may be less "
"precise.";
+
+ // SAP-Type-2 GOPs (when buffering ByPts), by definition, contain at
+ // least one non-keyframe with PTS prior to the keyframe's PTS, with DTS
+ // continuous from keyframe forward to at least that non-keyframe. If
+ // such a non-keyframe overlaps the end of a previously buffered GOP
+ // sufficiently (such that, say, some previous GOP's non-keyframes
+ // depending on the overlapped non-keyframe(s) must be dropped), then a
+ // gap might need to result. But if we attempt to buffer the new GOP's
+ // keyframe through at least that first non-keyframe that does such
+ // overlapping all at once, the buffering mechanism doesn't expect such
+ // a discontinuity could occur (failing assumptions in places like
+ // SourceBufferRangeByPts).
+ //
+ // To prevent such failure, we can first flush what's previously been
+ // enqueued (if anything), but do this conservatively to not flush
+ // unnecessarily: we suppress such a flush if this nonkeyframe's PTS is
+ // still higher than the last coded frame group start time signalled for
+ // this track and no flush has yet occurred for this track since then, or
+ // if there has been a flush since then but this nonkeyframe's PTS is no
+ // lower than the PTS of the first frame pending flush currently.
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts &&
+ !processed_frames_.empty()) {
+ DCHECK(kNoTimestamp != last_signalled_group_start_pts_);
+
+ if (!have_flushed_since_last_group_start_) {
+ if (frame->timestamp() < last_signalled_group_start_pts_) {
+ if (!FlushProcessedFrames())
+ return false;
+ }
+ } else {
+ if (frame->timestamp() < processed_frames_.front()->timestamp()) {
+ if (!FlushProcessedFrames())
+ return false;
+ }
+ }
+ }
}
}
@@ -267,6 +320,7 @@ void MseTrackBuffer::EnqueueProcessedFrame(
pending_group_start_pts_ = kNoTimestamp;
last_processed_decode_timestamp_ = frame->GetDecodeTimestamp();
processed_frames_.emplace_back(std::move(frame));
+ return true;
}
bool MseTrackBuffer::FlushProcessedFrames() {
@@ -275,6 +329,7 @@ bool MseTrackBuffer::FlushProcessedFrames() {
bool result = stream_->Append(processed_frames_);
processed_frames_.clear();
+ have_flushed_since_last_group_start_ = true;
DVLOG_IF(3, !result) << __func__
<< "(): Failure appending processed frames to stream";
@@ -287,6 +342,8 @@ void MseTrackBuffer::NotifyStartOfCodedFrameGroup(DecodeTimestamp start_dts,
last_keyframe_presentation_timestamp_ = kNoTimestamp;
last_processed_decode_timestamp_ = start_dts;
pending_group_start_pts_ = start_pts;
+ have_flushed_since_last_group_start_ = false;
+ last_signalled_group_start_pts_ = start_pts;
stream_->OnStartOfCodedFrameGroup(start_dts, start_pts);
}
@@ -430,8 +487,8 @@ bool FrameProcessor::AddTrack(StreamParser::TrackId id,
return false;
}
- track_buffers_[id] =
- std::make_unique<MseTrackBuffer>(stream, media_log_, parse_warning_cb_);
+ track_buffers_[id] = std::make_unique<MseTrackBuffer>(
+ stream, media_log_, parse_warning_cb_, range_api_);
return true;
}
@@ -956,13 +1013,14 @@ bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
// When buffering by PTS intervals and an otherwise continuous coded frame
// group (by DTS, and with non-decreasing keyframe PTS) contains a
- // keyframe with PTS in the future, signal a new coded frame group with
+ // keyframe with PTS in the future significantly far enough that it may be
+ // outside of buffering fudge room, signal a new coded frame group with
// start time set to the previous highest frame end time in the coded
// frame group for this track. This lets the stream coalesce a potential
// gap, and also pass internal buffer adjacency checks.
signal_new_cfg |=
track_buffer->highest_presentation_timestamp() != kNoTimestamp &&
- track_buffer->highest_presentation_timestamp() <
+ track_buffer->highest_presentation_timestamp() + frame->duration() <
presentation_timestamp;
}
@@ -993,14 +1051,15 @@ bool FrameProcessor::ProcessFrame(scoped_refptr<StreamParserBuffer> frame,
}
}
- DVLOG(3) << __func__ << ": Sending processed frame to stream, "
+ DVLOG(3) << __func__ << ": Enqueueing processed frame "
<< "PTS=" << presentation_timestamp.InMicroseconds()
<< "us, DTS=" << decode_timestamp.InMicroseconds() << "us";
// Steps 11-16: Note, we optimize by appending groups of contiguous
// processed frames for each track buffer at end of ProcessFrames() or prior
// to signalling coded frame group starts.
- track_buffer->EnqueueProcessedFrame(std::move(frame));
+ if (!track_buffer->EnqueueProcessedFrame(std::move(frame)))
+ return false;
// 17. Set last decode timestamp for track buffer to decode timestamp.
track_buffer->set_last_decode_timestamp(decode_timestamp);
diff --git a/chromium/media/filters/frame_processor_unittest.cc b/chromium/media/filters/frame_processor_unittest.cc
index 222a3fd8ba6..4ad4a7159bb 100644
--- a/chromium/media/filters/frame_processor_unittest.cc
+++ b/chromium/media/filters/frame_processor_unittest.cc
@@ -28,6 +28,7 @@
#include "media/filters/frame_processor.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::_;
using ::testing::InSequence;
using ::testing::StrictMock;
using ::testing::Values;
@@ -79,6 +80,14 @@ class FrameProcessorTestCallbackHelper {
ASSERT_NE(kInfiniteDuration, new_duration);
}
+ MOCK_METHOD2(OnAppend,
+ void(const DemuxerStream::Type type,
+ const BufferQueue* buffers));
+ MOCK_METHOD3(OnGroupStart,
+ void(const DemuxerStream::Type type,
+ DecodeTimestamp start_dts,
+ base::TimeDelta start_pts));
+
private:
DISALLOW_COPY_AND_ASSIGN(FrameProcessorTestCallbackHelper);
};
@@ -107,7 +116,8 @@ class FrameProcessorTest
enum StreamFlags {
HAS_AUDIO = 1 << 0,
- HAS_VIDEO = 1 << 1
+ HAS_VIDEO = 1 << 1,
+ OBSERVE_APPENDS_AND_GROUP_STARTS = 1 << 2
};
void AddTestTracks(int stream_flags) {
@@ -115,14 +125,17 @@ class FrameProcessorTest
const bool has_video = (stream_flags & HAS_VIDEO) != 0;
ASSERT_TRUE(has_audio || has_video);
+ const bool setup_observers =
+ (stream_flags & OBSERVE_APPENDS_AND_GROUP_STARTS) != 0;
+
if (has_audio) {
- CreateAndConfigureStream(DemuxerStream::AUDIO);
+ CreateAndConfigureStream(DemuxerStream::AUDIO, setup_observers);
ASSERT_TRUE(audio_);
EXPECT_TRUE(frame_processor_->AddTrack(audio_id_, audio_.get()));
SeekStream(audio_.get(), Milliseconds(0));
}
if (has_video) {
- CreateAndConfigureStream(DemuxerStream::VIDEO);
+ CreateAndConfigureStream(DemuxerStream::VIDEO, setup_observers);
ASSERT_TRUE(video_);
EXPECT_TRUE(frame_processor_->AddTrack(video_id_, video_.get()));
SeekStream(video_.get(), Milliseconds(0));
@@ -341,8 +354,11 @@ class FrameProcessorTest
last_read_buffer_ = buffer;
}
- void CreateAndConfigureStream(DemuxerStream::Type type) {
+ void CreateAndConfigureStream(DemuxerStream::Type type,
+ bool setup_observers) {
// TODO(wolenetz/dalecurtis): Also test with splicing disabled?
+
+ ChunkDemuxerStream* stream;
switch (type) {
case DemuxerStream::AUDIO: {
ASSERT_FALSE(audio_);
@@ -354,6 +370,8 @@ class FrameProcessorTest
frame_processor_->OnPossibleAudioConfigUpdate(decoder_config);
ASSERT_TRUE(
audio_->UpdateAudioConfig(decoder_config, false, &media_log_));
+
+ stream = audio_.get();
break;
}
case DemuxerStream::VIDEO: {
@@ -362,6 +380,7 @@ class FrameProcessorTest
new ChunkDemuxerStream(DemuxerStream::VIDEO, "2", range_api_));
ASSERT_TRUE(video_->UpdateVideoConfig(TestVideoConfig::Normal(), false,
&media_log_));
+ stream = video_.get();
break;
}
// TODO(wolenetz): Test text coded frame processing.
@@ -370,6 +389,15 @@ class FrameProcessorTest
ASSERT_FALSE(true);
}
}
+
+ if (setup_observers) {
+ stream->set_append_observer_for_testing(
+ base::BindRepeating(&FrameProcessorTestCallbackHelper::OnAppend,
+ base::Unretained(&callbacks_), type));
+ stream->set_group_start_observer_for_testing(
+ base::BindRepeating(&FrameProcessorTestCallbackHelper::OnGroupStart,
+ base::Unretained(&callbacks_), type));
+ }
}
DISALLOW_COPY_AND_ASSIGN(FrameProcessorTest);
@@ -1335,7 +1363,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1343,32 +1371,41 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(1060));
+ // Note that the PTS of GOP non-keyframes earlier than the keyframe doesn't
+ // modify the GOP start of the buffered range here. This may change if we
+ // decide to improve spec for SAP Type 2 GOPs that begin a coded frame group.
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(1060)));
EXPECT_CALL(callbacks_,
OnParseWarning(
SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("1.06", "1"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(1070)));
EXPECT_TRUE(ProcessFrames(
"", "1060|0K 1000|10 1050|20 1010|30 1040|40 1020|50 1030|60"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
-
- // Note that the PTS of GOP non-keyframes earlier than the keyframe doesn't
- // modify the GOP start of the buffered range here. This may change if we
- // decide to improve spec for SAP Type 2 GOPs that begin a coded frame group.
CheckExpectedRangesByTimestamp(video_.get(), "{ [1060,1070) }");
// Process just the keyframe of the next SAP Type 2 GOP in decode continuity
// with the previous one.
+ // Note that this second GOP is buffered continuous with the first because
+ // there is no decode discontinuity detected. This results in inclusion of
+ // the significant PTS jump forward in the same continuous range.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(60)),
+ Milliseconds(1070)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(1140)));
EXPECT_TRUE(ProcessFrames("", "1130|70K"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
-
- // Note that the second GOP is buffered continuous with the first because
- // there was no decode discontinuity detected. This results in inclusion of
- // the significant PTS jump forward in the same continuous range.
CheckExpectedRangesByTimestamp(video_.get(), "{ [1060,1140) }");
// Process the remainder of the second GOP.
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(1140)));
EXPECT_TRUE(
ProcessFrames("", "1070|80 1120|90 1080|100 1110|110 1090|120 1100|130"));
@@ -1397,7 +1434,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1405,10 +1442,19 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(100));
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(100)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(140)));
EXPECT_TRUE(ProcessFrames("", "100|0K 110|10 120|20 130|30"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(30)),
+ Milliseconds(125)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(165)));
EXPECT_TRUE(ProcessFrames("", "125|40K 135|50 145|60 155|70"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
@@ -1429,7 +1475,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1437,10 +1483,19 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(100));
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(100)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(140)));
EXPECT_TRUE(ProcessFrames("", "100|0K 110|10 120|20K 130|30"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(30)),
+ Milliseconds(115)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
// TODO(wolenetz): Duration shouldn't be allowed to possibly increase to 140ms
// here. See https://crbug.com/763620.
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(140)));
@@ -1463,7 +1518,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1471,14 +1526,23 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(120));
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(120)));
EXPECT_CALL(callbacks_,
OnParseWarning(
SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.12", "0.1"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(140)));
EXPECT_TRUE(ProcessFrames("", "120|0K 100|10 130|20 110|30"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+ // Note, we *don't* expect another OnGroupStart during the next ProcessFrames,
+ // since the next GOP's keyframe PTS is after the first GOP and close enough
+ // to be assured adjacent.
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(165)));
EXPECT_TRUE(ProcessFrames("", "145|40K 125|50 155|60 135|70"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
@@ -1503,7 +1567,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1511,15 +1575,35 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(120));
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(120)));
EXPECT_CALL(callbacks_,
OnParseWarning(
SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.12", "0.1"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ // There is a second GOP that is SAP-Type-2 within this first ProcessFrames,
+ // with PTS jumping forward far enough to trigger group start signalling and a
+ // flush.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(30)),
+ Milliseconds(140)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(180)));
EXPECT_TRUE(ProcessFrames(
"", "120|0K 100|10 130|20 110|30 160|40K 140|50 170|60 150|70"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(70)),
+ Milliseconds(155)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
// TODO(wolenetz): Duration shouldn't be allowed to possibly increase to 180ms
// here. See https://crbug.com/763620.
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(180)));
@@ -1535,6 +1619,204 @@ TEST_P(FrameProcessorTest,
}
TEST_P(FrameProcessorTest,
+ BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_3_GopByGop) {
+ // API user might craft a continuous-in-DTS-with-previous-append GOP that has
+ // PTS interval overlapping the previous append, using SAP Type 2 GOPs. Tests
+ // SAP-Type 2 GOPs, where newly appended GOP overlaps enough nonkeyframes of
+ // the previous GOP such that dropped decode dependencies might cause problems
+ // if the first nonkeyframe with PTS prior to the GOP's keyframe PTS is
+ // flushed at the same time as its keyframe, but the second GOP's keyframe PTS
+ // is close enough to the end of the first GOP's presentation interval to not
+ // signal a new coded frame group start.
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
+ DVLOG(1) << "Skipping kLegacyByDts versions of this test";
+ return;
+ }
+
+ InSequence s;
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ frame_processor_->SetSequenceMode(use_sequence_mode_);
+
+ // Make the sequence mode buffering appear just like segments mode to simplify
+ // this test case.
+ if (use_sequence_mode_)
+ SetTimestampOffset(Milliseconds(500));
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(500)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(530)));
+ EXPECT_TRUE(ProcessFrames("", "500|0K 520|10 510|20"));
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,530) }");
+
+ EXPECT_CALL(callbacks_,
+ OnParseWarning(
+ SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
+ EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.54", "0.52"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(550)));
+ EXPECT_TRUE(ProcessFrames("", "540|30K 520|40 530|50"));
+
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,550) }");
+ SeekStream(video_.get(), Milliseconds(500));
+ CheckReadsThenReadStalls(video_.get(), "500 520 510 540 520 530");
+}
+
+TEST_P(
+ FrameProcessorTest,
+ BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_3_FrameByFrame) {
+ // Tests that the buffered range results match the previous GopByGop test if
+ // each frame of the second GOP is explicitly appended by the app
+ // one-at-a-time.
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
+ DVLOG(1) << "Skipping kLegacyByDts versions of this test";
+ return;
+ }
+
+ InSequence s;
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ frame_processor_->SetSequenceMode(use_sequence_mode_);
+
+ // Make the sequence mode buffering appear just like segments mode to simplify
+ // this test case.
+ if (use_sequence_mode_)
+ SetTimestampOffset(Milliseconds(500));
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(500)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(530)));
+ EXPECT_TRUE(ProcessFrames("", "500|0K 520|10 510|20"));
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,530) }");
+
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(550)));
+ EXPECT_TRUE(ProcessFrames("", "540|30K"));
+
+ EXPECT_CALL(callbacks_,
+ OnParseWarning(
+ SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
+ EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.54", "0.52"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(550)));
+ EXPECT_TRUE(ProcessFrames("", "520|40"));
+
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(550)));
+ EXPECT_TRUE(ProcessFrames("", "530|50"));
+
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,550) }");
+ SeekStream(video_.get(), Milliseconds(500));
+ CheckReadsThenReadStalls(video_.get(), "500 520 510 540 520 530");
+}
+
+TEST_P(FrameProcessorTest,
+ BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_4_GopByGop) {
+ // API user might craft a continuous-in-DTS-with-previous-append GOP that has
+ // PTS interval overlapping the previous append, using SAP Type 2 GOPs. Tests
+ // SAP-Type 2 GOPs, where newly appended GOP overlaps enough nonkeyframes of
+ // the previous GOP such that dropped decode dependencies might cause problems
+ // if the first nonkeyframe with PTS prior to the GOP's keyframe PTS is
+ // flushed at the same time as its keyframe.
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
+ DVLOG(1) << "Skipping kLegacyByDts versions of this test";
+ return;
+ }
+
+ InSequence s;
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ frame_processor_->SetSequenceMode(use_sequence_mode_);
+
+ // Make the sequence mode buffering appear just like segments mode to simplify
+ // this test case.
+ if (use_sequence_mode_)
+ SetTimestampOffset(Milliseconds(500));
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(500)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(530)));
+ EXPECT_TRUE(ProcessFrames("", "500|0K 520|10 510|20"));
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,530) }");
+
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(20)),
+ Milliseconds(530)));
+ EXPECT_CALL(callbacks_,
+ OnParseWarning(
+ SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
+ EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.55", "0.52"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(560)));
+ EXPECT_TRUE(ProcessFrames("", "550|30K 520|40 530|50 540|60"));
+
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,560) }");
+ SeekStream(video_.get(), Milliseconds(500));
+ CheckReadsThenReadStalls(video_.get(), "500 520 510 550 520 530 540");
+}
+
+TEST_P(
+ FrameProcessorTest,
+ BufferingByPts_ContinuousDts_NewSap2GopEndOverlapsLastGop_4_FrameByFrame) {
+ // Tests that the buffered range results match the previous GopByGop test if
+ // each frame of the second GOP is explicitly appended by the app
+ // one-at-a-time.
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kLegacyByDts) {
+ DVLOG(1) << "Skipping kLegacyByDts versions of this test";
+ return;
+ }
+
+ InSequence s;
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ frame_processor_->SetSequenceMode(use_sequence_mode_);
+
+ // Make the sequence mode buffering appear just like segments mode to simplify
+ // this test case.
+ if (use_sequence_mode_)
+ SetTimestampOffset(Milliseconds(500));
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(500)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(530)));
+ EXPECT_TRUE(ProcessFrames("", "500|0K 520|10 510|20"));
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,530) }");
+
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(20)),
+ Milliseconds(530)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(560)));
+ EXPECT_TRUE(ProcessFrames("", "550|30K"));
+
+ EXPECT_CALL(callbacks_,
+ OnParseWarning(
+ SourceBufferParseWarning::kKeyframeTimeGreaterThanDependant));
+ EXPECT_MEDIA_LOG(KeyframeTimeGreaterThanDependant("0.55", "0.52"));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(560)));
+ EXPECT_TRUE(ProcessFrames("", "520|40"));
+
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(560)));
+ EXPECT_TRUE(ProcessFrames("", "530|50"));
+
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(560)));
+ EXPECT_TRUE(ProcessFrames("", "540|60"));
+
+ CheckExpectedRangesByTimestamp(video_.get(), "{ [500,560) }");
+ SeekStream(video_.get(), Milliseconds(500));
+ CheckReadsThenReadStalls(video_.get(), "500 520 510 550 520 530 540");
+}
+
+TEST_P(FrameProcessorTest,
BufferingByPts_ContinuousDts_GopKeyframePtsOrder_2_1_3) {
// White-box test, demonstrating expected behavior for a specially crafted
// sequence that "should" be unusual, but gracefully handled:
@@ -1553,7 +1835,7 @@ TEST_P(FrameProcessorTest,
}
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1561,11 +1843,20 @@ TEST_P(FrameProcessorTest,
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(200));
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::VIDEO, DecodeTimestamp(),
+ Milliseconds(200)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(240)));
EXPECT_TRUE(ProcessFrames("", "200|0K 210|10 220|20 230|30"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
CheckExpectedRangesByTimestamp(video_.get(), "{ [200,240) }");
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(30)),
+ Milliseconds(100)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
// TODO(wolenetz): Duration shouldn't be allowed to possibly increase to 240ms
// here. See https://crbug.com/763620.
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(240)));
@@ -1573,6 +1864,12 @@ TEST_P(FrameProcessorTest,
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
CheckExpectedRangesByTimestamp(video_.get(), "{ [100,140) [200,240) }");
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(70)),
+ Milliseconds(140)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(260)));
EXPECT_TRUE(ProcessFrames("", "240|80K 250|90"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
@@ -1592,7 +1889,7 @@ TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
// the append sequence is required to have monotonically increasing DTS (even
// across GOPs).
InSequence s;
- AddTestTracks(HAS_VIDEO);
+ AddTestTracks(HAS_VIDEO | OBSERVE_APPENDS_AND_GROUP_STARTS);
frame_processor_->SetSequenceMode(use_sequence_mode_);
// Make the sequence mode buffering appear just like segments mode to simplify
@@ -1600,14 +1897,25 @@ TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
if (use_sequence_mode_)
SetTimestampOffset(Milliseconds(200));
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(200)),
+ Milliseconds(200)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(240)));
EXPECT_TRUE(ProcessFrames("", "200K 210 220 230"));
EXPECT_EQ(Milliseconds(0), timestamp_offset_);
CheckExpectedRangesByTimestamp(video_.get(), "{ [200,240) }");
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::VIDEO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(225)),
+ Milliseconds(240)));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::VIDEO, _));
// Note that duration is reported based on PTS regardless of buffering model.
EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(280)));
-
// Append a second GOP whose first DTS is below the last DTS of the first GOP,
// but whose PTS interval is continuous with the end of the first GOP.
EXPECT_TRUE(ProcessFrames("", "240|225K 250|235 260|245 270|255"));
@@ -1625,6 +1933,108 @@ TEST_P(FrameProcessorTest, ContinuousPts_DiscontinuousDts_AcrossGops) {
}
}
+TEST_P(FrameProcessorTest, OnlyKeyframes_ContinuousDts_ContinousPts_1) {
+ // Verifies that precisely one group start and one stream append occurs for a
+ // single continuous set of frames.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ if (use_sequence_mode_)
+ frame_processor_->SetSequenceMode(true);
+
+ // Default test frame duration is 10 milliseconds.
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO, DecodeTimestamp(),
+ base::TimeDelta()));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(40)));
+ EXPECT_TRUE(ProcessFrames("0K 10K 20K 30K", ""));
+ EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,40) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
+}
+
+TEST_P(FrameProcessorTest, OnlyKeyframes_ContinuousDts_ContinuousPts_2) {
+ // Verifies that precisely one group start and one stream append occurs while
+ // processing a single continuous set of frames that uses fudge room to just
+ // barely remain adjacent.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ if (use_sequence_mode_)
+ frame_processor_->SetSequenceMode(true);
+
+ frame_duration_ = Milliseconds(5);
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO, DecodeTimestamp(),
+ base::TimeDelta()));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(Milliseconds(35)));
+ EXPECT_TRUE(ProcessFrames("0K 10K 20K 30K", ""));
+ EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,35) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
+}
+
+TEST_P(FrameProcessorTest,
+ OnlyKeyframes_ContinuousDts_DiscontinuousPtsJustBeyondFudgeRoom) {
+ // Verifies that, in ByPts, multiple group starts and distinct appends occur
+ // when processing a single DTS-continuous set of frames with PTS deltas that
+ // just barely exceed the adjacency assumption in FrameProcessor.
+ // Verifies that, in ByDts, precisely one group start and one stream append
+ // occur.
+ InSequence s;
+ AddTestTracks(HAS_AUDIO | OBSERVE_APPENDS_AND_GROUP_STARTS);
+ if (use_sequence_mode_)
+ frame_processor_->SetSequenceMode(true);
+
+ frame_duration_ = base::TimeDelta::FromMicroseconds(4999);
+
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO, DecodeTimestamp(),
+ base::TimeDelta()));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts) {
+ // Frame "10|5K" following "0K" triggers start of new group and eventual
+ // append.
+ EXPECT_CALL(callbacks_, OnGroupStart(DemuxerStream::AUDIO,
+ DecodeTimestamp(), frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+
+ // Frame "20|10K" following "10|5K" triggers start of new group and eventual
+ // append.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::AUDIO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(5)),
+ Milliseconds(10) + frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+
+ // Frame "30|15K" following "20|10K" triggers start of new group and
+ // eventual append.
+ EXPECT_CALL(
+ callbacks_,
+ OnGroupStart(DemuxerStream::AUDIO,
+ DecodeTimestamp::FromPresentationTime(Milliseconds(10)),
+ Milliseconds(20) + frame_duration_));
+ EXPECT_CALL(callbacks_, OnAppend(DemuxerStream::AUDIO, _));
+ }
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(
+ base::TimeDelta::FromMicroseconds(34999)));
+ EXPECT_TRUE(ProcessFrames("0K 10|5K 20|10K 30|15K", ""));
+ EXPECT_EQ(Milliseconds(0), timestamp_offset_);
+
+ if (range_api_ == ChunkDemuxerStream::RangeApi::kNewByPts) {
+ // Note that the ByPts result is still buffered continuous since DTS was
+ // continuous and PTS was monotonically increasing (such that each group
+ // start was signalled by FrameProcessor to be continuous with the end of
+ // the previous group, if any.)
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,34) }");
+ } else {
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,19) }");
+ }
+ CheckReadsThenReadStalls(audio_.get(), "0 10 20 30");
+}
+
INSTANTIATE_TEST_CASE_P(SequenceModeLegacyByDts,
FrameProcessorTest,
Values(FrameProcessorTestParams(
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc b/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
index 5e9b952433e..ef261f93cd4 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
+++ b/chromium/media/filters/fuchsia/fuchsia_video_decoder.cc
@@ -7,13 +7,16 @@
#include <fuchsia/mediacodec/cpp/fidl.h>
#include <zircon/rights.h>
+#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/fuchsia/component_context.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
@@ -265,18 +268,17 @@ class OutputBuffer : public base::RefCountedThreadSafe<OutputBuffer> {
class FuchsiaVideoDecoder : public VideoDecoder {
public:
- FuchsiaVideoDecoder();
+ explicit FuchsiaVideoDecoder(bool enable_sw_decoding);
~FuchsiaVideoDecoder() override;
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -316,6 +318,8 @@ class FuchsiaVideoDecoder : public VideoDecoder {
uint64_t buffer_lifetime_ordinal,
uint32_t packet_index);
+ const bool enable_sw_decoding_;
+
OutputCB output_cb_;
// Aspect ratio specified in container, or 1.0 if it's not specified. This
@@ -350,7 +354,8 @@ class FuchsiaVideoDecoder : public VideoDecoder {
DISALLOW_COPY_AND_ASSIGN(FuchsiaVideoDecoder);
};
-FuchsiaVideoDecoder::FuchsiaVideoDecoder() : weak_factory_(this) {
+FuchsiaVideoDecoder::FuchsiaVideoDecoder(bool enable_sw_decoding)
+ : enable_sw_decoding_(enable_sw_decoding), weak_factory_(this) {
weak_this_ = weak_factory_.GetWeakPtr();
}
@@ -360,13 +365,12 @@ std::string FuchsiaVideoDecoder::GetDisplayName() const {
return "FuchsiaVideoDecoder";
}
-void FuchsiaVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void FuchsiaVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
output_cb_ = output_cb;
container_pixel_aspect_ratio_ = config.GetPixelAspectRatio();
@@ -398,7 +402,7 @@ void FuchsiaVideoDecoder::Initialize(
}
codec_params.promise_separate_access_units_on_input = true;
- codec_params.require_hw = true;
+ codec_params.require_hw = !enable_sw_decoding_;
auto codec_factory =
base::fuchsia::ComponentContext::GetDefault()
@@ -436,7 +440,10 @@ void FuchsiaVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
GetMaxDecodeRequests());
if (!codec_) {
- decode_cb.Run(DecodeStatus::DECODE_ERROR);
+ // Post the callback to the current sequence as DecoderStream doesn't expect
+ // Decode() to complete synchronously.
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(decode_cb, DecodeStatus::DECODE_ERROR));
return;
}
@@ -572,6 +579,22 @@ void FuchsiaVideoDecoder::OnOutputPacket(
DCHECK(layout);
break;
+ case libyuv::FOURCC_YV12:
+ layout = VideoFrameLayout::CreateWithPlanes(
+ PIXEL_FORMAT_YV12, coded_size,
+ std::vector<VideoFrameLayout::Plane>{
+ VideoFrameLayout::Plane(output_format_.primary_line_stride_bytes,
+ output_format_.primary_start_offset),
+ VideoFrameLayout::Plane(
+ output_format_.secondary_line_stride_bytes,
+ output_format_.secondary_start_offset),
+ VideoFrameLayout::Plane(
+ output_format_.secondary_line_stride_bytes,
+ output_format_.tertiary_start_offset),
+ });
+ DCHECK(layout);
+ break;
+
default:
LOG(ERROR) << "unknown fourcc: "
<< std::string(reinterpret_cast<char*>(&output_format_.fourcc),
@@ -738,6 +761,7 @@ void FuchsiaVideoDecoder::PumpInput() {
fuchsia::mediacodec::CodecPacket packet;
packet.header.buffer_lifetime_ordinal = input_buffer_lifetime_ordinal_;
packet.header.packet_index = input_buffer - input_buffers_.begin();
+ packet.buffer_index = packet.header.packet_index;
packet.has_timestamp_ish = true;
packet.timestamp_ish =
pending_decodes_.front().buffer().timestamp().InNanoseconds();
@@ -807,7 +831,12 @@ void FuchsiaVideoDecoder::OnFrameDestroyed(scoped_refptr<OutputBuffer> buffer,
}
std::unique_ptr<VideoDecoder> CreateFuchsiaVideoDecoder() {
- return std::make_unique<FuchsiaVideoDecoder>();
+ return std::make_unique<FuchsiaVideoDecoder>(/*enable_sw_decoding=*/false);
+}
+
+std::unique_ptr<VideoDecoder> CreateFuchsiaVideoDecoderForTests(
+ bool enable_sw_decoding) {
+ return std::make_unique<FuchsiaVideoDecoder>(enable_sw_decoding);
}
} // namespace media
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder.h b/chromium/media/filters/fuchsia/fuchsia_video_decoder.h
index d9c37d05c99..52f164ee129 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder.h
+++ b/chromium/media/filters/fuchsia/fuchsia_video_decoder.h
@@ -13,9 +13,16 @@ namespace media {
class VideoDecoder;
-// Creates VideoDecoder that uses fuchsia.mediacodec API.
+// Creates VideoDecoder that uses fuchsia.mediacodec API. The returned
+// VideoDecoder instance will only try to use hardware video codecs.
MEDIA_EXPORT std::unique_ptr<VideoDecoder> CreateFuchsiaVideoDecoder();
+// Same as above, but also allows to enable software codecs. This is useful for
+// FuchsiaVideoDecoder tests that run on systems that don't have hardware
+// decoder support.
+MEDIA_EXPORT std::unique_ptr<VideoDecoder> CreateFuchsiaVideoDecoderForTests(
+ bool enable_sw_decoding);
+
} // namespace media
#endif // MEDIA_FILTERS_FUCHSIA_FUCHSIA_VIDEO_DECODER_H_
diff --git a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc b/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
index c8ec6dd01a6..4e6adad56b4 100644
--- a/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
+++ b/chromium/media/filters/fuchsia/fuchsia_video_decoder_unittest.cc
@@ -5,6 +5,7 @@
#include "media/filters/fuchsia/fuchsia_video_decoder.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/message_loop/message_loop.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
@@ -16,7 +17,9 @@ namespace media {
class FuchsiaVideoDecoderTest : public testing::Test {
public:
- FuchsiaVideoDecoderTest() { decoder_ = CreateFuchsiaVideoDecoder(); }
+ FuchsiaVideoDecoderTest() {
+ decoder_ = CreateFuchsiaVideoDecoderForTests(/*enable_sw_decoding=*/true);
+ }
~FuchsiaVideoDecoderTest() override = default;
bool Initialize(VideoDecoderConfig config) WARN_UNUSED_RESULT {
@@ -32,7 +35,7 @@ class FuchsiaVideoDecoderTest : public testing::Test {
&init_cb_result, &run_loop),
base::BindRepeating(&FuchsiaVideoDecoderTest::OnVideoFrame,
base::Unretained(this)),
- VideoDecoder::WaitingForDecryptionKeyCB());
+ base::NullCallback());
run_loop.Run();
return init_cb_result;
@@ -82,11 +85,9 @@ class FuchsiaVideoDecoderTest : public testing::Test {
base::RepeatingClosure on_frame_;
};
-// All tests are disabled because they currently depend on HW decoder that
-// doesn't work on test bots.
-TEST_F(FuchsiaVideoDecoderTest, DISABLED_CreateAndDestroy) {}
+TEST_F(FuchsiaVideoDecoderTest, CreateAndDestroy) {}
-TEST_F(FuchsiaVideoDecoderTest, DISABLED_CreateInitDestroy) {
+TEST_F(FuchsiaVideoDecoderTest, CreateInitDestroy) {
EXPECT_TRUE(Initialize(TestVideoConfig::NormalH264()));
}
@@ -100,7 +101,7 @@ TEST_F(FuchsiaVideoDecoderTest, DISABLED_VP9) {
EXPECT_EQ(num_output_frames_, 1);
}
-TEST_F(FuchsiaVideoDecoderTest, DISABLED_H264) {
+TEST_F(FuchsiaVideoDecoderTest, H264) {
ASSERT_TRUE(Initialize(TestVideoConfig::NormalH264()));
ASSERT_TRUE(ReadAndDecodeFrame("h264-320x180-frame-0") == DecodeStatus::OK);
@@ -113,4 +114,4 @@ TEST_F(FuchsiaVideoDecoderTest, DISABLED_H264) {
EXPECT_EQ(num_output_frames_, 4);
}
-} // namespace media \ No newline at end of file
+} // namespace media
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index b9155ef826e..71789da8ff4 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -6,6 +6,7 @@
#include <algorithm>
#include <array>
+#include <cinttypes>
#include <utility>
#include "base/bind.h"
@@ -17,9 +18,13 @@
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
#include "base/task_runner_util.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_context.h"
@@ -31,6 +36,7 @@
#include "media/base/video_util.h"
#include "media/media_buildflags.h"
#include "media/video/gpu_video_accelerator_factories.h"
+#include "media/video/trace_util.h"
#include "third_party/skia/include/core/SkBitmap.h"
#if defined(OS_ANDROID) && BUILDFLAG(USE_PROPRIETARY_CODECS)
@@ -91,6 +97,8 @@ GpuVideoDecoder::GpuVideoDecoder(
bitstream_buffer_id_of_last_gc_(0),
weak_factory_(this) {
DCHECK(factories_);
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "media::GpuVideoDecoder", base::ThreadTaskRunnerHandle::Get());
}
void GpuVideoDecoder::Reset(const base::Closure& closure) {
@@ -152,13 +160,12 @@ std::string GpuVideoDecoder::GetDisplayName() const {
return kDecoderName;
}
-void GpuVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool /* low_delay */,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void GpuVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool /* low_delay */,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& /* waiting_cb */) {
DVLOG(3) << "Initialize()";
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
DCHECK(config.IsValidConfig());
@@ -815,6 +822,9 @@ GpuVideoDecoder::~GpuVideoDecoder() {
DVLOG(3) << __func__;
DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+
if (vda_)
DestroyVDA();
DCHECK(assigned_picture_buffers_.empty());
@@ -890,6 +900,44 @@ void GpuVideoDecoder::NotifyError(media::VideoDecodeAccelerator::Error error) {
DestroyVDA();
}
+bool GpuVideoDecoder::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ using base::trace_event::MemoryAllocatorDump;
+ DCheckGpuVideoAcceleratorFactoriesTaskRunnerIsCurrent();
+ if (assigned_picture_buffers_.empty())
+ return false;
+
+ if (!factories_)
+ return false;
+ auto* context_support = factories_->GetMediaContextProviderContextSupport();
+ if (!context_support)
+ return false;
+ const uint64_t context_group_tracing_id =
+ context_support->ShareGroupTracingGUID();
+
+ for (const auto& picture_buffer : assigned_picture_buffers_) {
+ PictureBuffer::TextureIds texture_ids =
+ picture_buffer.second.client_texture_ids();
+
+ for (uint32_t id : texture_ids) {
+ const auto dump_name = base::StringPrintf(
+ "gpu/video_decoding/context_group_0x%" PRIx64 "/texture_0x%" PRIX32,
+ context_group_tracing_id, id);
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
+ dump->AddScalar(
+ MemoryAllocatorDump::kNameSize, MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(picture_buffer.second.size().GetArea() * 4));
+
+ const auto client_guid =
+ GetGLTextureClientGUIDForTracing(context_group_tracing_id, id);
+ pmd->CreateSharedGlobalAllocatorDump(client_guid);
+ pmd->AddOwnershipEdge(dump->guid(), client_guid, 2 /* importance */);
+ }
+ }
+ return true;
+}
+
bool GpuVideoDecoder::IsProfileSupported(
const VideoDecodeAccelerator::Capabilities& capabilities,
VideoCodecProfile profile,
diff --git a/chromium/media/filters/gpu_video_decoder.h b/chromium/media/filters/gpu_video_decoder.h
index 74a4563ada6..1f2ae7f0e2f 100644
--- a/chromium/media/filters/gpu_video_decoder.h
+++ b/chromium/media/filters/gpu_video_decoder.h
@@ -17,6 +17,7 @@
#include "base/containers/flat_set.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "media/base/overlay_info.h"
#include "media/base/pipeline_status.h"
@@ -45,7 +46,8 @@ class MediaLog;
// GetMessageLoop().
class MEDIA_EXPORT GpuVideoDecoder
: public VideoDecoder,
- public VideoDecodeAccelerator::Client {
+ public VideoDecodeAccelerator::Client,
+ public base::trace_event::MemoryDumpProvider {
public:
GpuVideoDecoder(GpuVideoAcceleratorFactories* factories,
const RequestOverlayInfoCB& request_overlay_info_cb,
@@ -56,13 +58,12 @@ class MEDIA_EXPORT GpuVideoDecoder
// VideoDecoder implementation.
std::string GetDisplayName() const override;
bool IsPlatformDecoder() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -84,6 +85,10 @@ class MEDIA_EXPORT GpuVideoDecoder
void NotifyResetDone() override;
void NotifyError(media::VideoDecodeAccelerator::Error error) override;
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
static const char kDecoderName[];
private:
diff --git a/chromium/media/filters/ivf_parser.cc b/chromium/media/filters/ivf_parser.cc
index b6160fb533b..8361088b040 100644
--- a/chromium/media/filters/ivf_parser.cc
+++ b/chromium/media/filters/ivf_parser.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "media/filters/ivf_parser.h"
+
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/sys_byteorder.h"
-#include "media/filters/ivf_parser.h"
namespace media {
@@ -34,6 +36,7 @@ bool IvfParser::Initialize(const uint8_t* stream,
DCHECK(file_header);
ptr_ = stream;
end_ = stream + size;
+ CHECK_GE(end_, ptr_);
if (size < sizeof(IvfFileHeader)) {
DLOG(ERROR) << "EOF before file header";
@@ -65,8 +68,9 @@ bool IvfParser::ParseNextFrame(IvfFrameHeader* frame_header,
const uint8_t** payload) {
DCHECK(ptr_);
DCHECK(payload);
+ CHECK_GE(end_, ptr_);
- if (end_ < ptr_ + sizeof(IvfFrameHeader)) {
+ if (base::checked_cast<size_t>(end_ - ptr_) < sizeof(IvfFrameHeader)) {
DLOG_IF(ERROR, ptr_ != end_) << "Incomplete frame header";
return false;
}
@@ -75,7 +79,7 @@ bool IvfParser::ParseNextFrame(IvfFrameHeader* frame_header,
frame_header->ByteSwap();
ptr_ += sizeof(IvfFrameHeader);
- if (end_ < ptr_ + frame_header->frame_size) {
+ if (base::checked_cast<uint32_t>(end_ - ptr_) < frame_header->frame_size) {
DLOG(ERROR) << "Not enough frame data";
return false;
}
diff --git a/chromium/media/filters/jpeg_parser.cc b/chromium/media/filters/jpeg_parser.cc
index f17f16b25d5..009d449d036 100644
--- a/chromium/media/filters/jpeg_parser.cc
+++ b/chromium/media/filters/jpeg_parser.cc
@@ -6,7 +6,7 @@
#include "base/big_endian.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
using base::BigEndianReader;
@@ -149,7 +149,7 @@ static bool ParseSOF(const char* buffer,
return false;
}
if (!InRange(frame_header->num_components, 1,
- arraysize(frame_header->components))) {
+ base::size(frame_header->components))) {
DLOG(ERROR) << "num_components="
<< static_cast<int>(frame_header->num_components)
<< " is not supported";
@@ -268,7 +268,7 @@ static bool ParseDHT(const char* buffer,
size_t count = 0;
if (!reader.ReadBytes(&table->code_length, sizeof(table->code_length)))
return false;
- for (size_t i = 0; i < arraysize(table->code_length); i++)
+ for (size_t i = 0; i < base::size(table->code_length); i++)
count += table->code_length[i];
if (!InRange(count, 0, sizeof(table->code_value))) {
diff --git a/chromium/media/filters/memory_data_source.cc b/chromium/media/filters/memory_data_source.cc
index 2237adf4e13..edf3ab18173 100644
--- a/chromium/media/filters/memory_data_source.cc
+++ b/chromium/media/filters/memory_data_source.cc
@@ -10,6 +10,11 @@
namespace media {
+MemoryDataSource::MemoryDataSource(std::string data)
+ : data_string_(std::move(data)),
+ data_(reinterpret_cast<const uint8_t*>(data_string_.data())),
+ size_(data_string_.size()) {}
+
MemoryDataSource::MemoryDataSource(const uint8_t* data, size_t size)
: data_(data), size_(size) {}
diff --git a/chromium/media/filters/memory_data_source.h b/chromium/media/filters/memory_data_source.h
index b10f553cb96..b2a6c9feb15 100644
--- a/chromium/media/filters/memory_data_source.h
+++ b/chromium/media/filters/memory_data_source.h
@@ -19,6 +19,10 @@ class MEDIA_EXPORT MemoryDataSource : public DataSource {
// Construct MemoryDataSource with |data| and |size|. The data is guaranteed
// to be valid during the lifetime of MemoryDataSource.
MemoryDataSource(const uint8_t* data, size_t size);
+
+ // Similar to the above, but takes ownership of the std::string.
+ explicit MemoryDataSource(std::string data);
+
~MemoryDataSource() final;
// Implementation of DataSource.
@@ -33,8 +37,9 @@ class MEDIA_EXPORT MemoryDataSource : public DataSource {
void SetBitrate(int bitrate) final;
private:
+ const std::string data_string_;
const uint8_t* data_ = nullptr;
- size_t size_ = 0;
+ const size_t size_ = 0;
bool is_stopped_ = false;
diff --git a/chromium/media/filters/offloading_video_decoder.cc b/chromium/media/filters/offloading_video_decoder.cc
index da6f30fff1f..51da9b5483d 100644
--- a/chromium/media/filters/offloading_video_decoder.cc
+++ b/chromium/media/filters/offloading_video_decoder.cc
@@ -79,13 +79,12 @@ std::string OffloadingVideoDecoder::GetDisplayName() const {
return helper_->decoder()->GetDisplayName();
}
-void OffloadingVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void OffloadingVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(config.IsValidConfig());
@@ -109,8 +108,7 @@ void OffloadingVideoDecoder::Initialize(
// possible for this class to be destroyed during Initialize().
base::BindOnce(&OffloadingVideoDecoder::Initialize,
weak_factory_.GetWeakPtr(), config, low_delay,
- cdm_context, init_cb, output_cb,
- waiting_for_decryption_key_cb));
+ cdm_context, init_cb, output_cb, waiting_cb));
return;
}
@@ -132,8 +130,7 @@ void OffloadingVideoDecoder::Initialize(
if (disable_offloading) {
offload_task_runner_ = nullptr;
helper_->decoder()->Initialize(config, low_delay, cdm_context,
- bound_init_cb, bound_output_cb,
- waiting_for_decryption_key_cb);
+ bound_init_cb, bound_output_cb, waiting_cb);
return;
}
@@ -146,8 +143,7 @@ void OffloadingVideoDecoder::Initialize(
FROM_HERE,
base::BindOnce(&OffloadableVideoDecoder::Initialize,
base::Unretained(helper_->decoder()), config, low_delay,
- cdm_context, bound_init_cb, bound_output_cb,
- waiting_for_decryption_key_cb));
+ cdm_context, bound_init_cb, bound_output_cb, waiting_cb));
}
void OffloadingVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
diff --git a/chromium/media/filters/offloading_video_decoder.h b/chromium/media/filters/offloading_video_decoder.h
index 351fe7eb24f..2f02d734381 100644
--- a/chromium/media/filters/offloading_video_decoder.h
+++ b/chromium/media/filters/offloading_video_decoder.h
@@ -79,13 +79,12 @@ class MEDIA_EXPORT OffloadingVideoDecoder : public VideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& reset_cb) override;
diff --git a/chromium/media/filters/offloading_video_decoder_unittest.cc b/chromium/media/filters/offloading_video_decoder_unittest.cc
index 2dc14e44992..a79f524df18 100644
--- a/chromium/media/filters/offloading_video_decoder_unittest.cc
+++ b/chromium/media/filters/offloading_video_decoder_unittest.cc
@@ -36,14 +36,13 @@ class MockOffloadableVideoDecoder : public OffloadableVideoDecoder {
std::string GetDisplayName() const override {
return "MockOffloadableVideoDecoder";
}
- MOCK_METHOD6(
- Initialize,
- void(const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb));
+ MOCK_METHOD6(Initialize,
+ void(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb));
MOCK_METHOD2(Decode,
void(scoped_refptr<DecoderBuffer> buffer, const DecodeCB&));
MOCK_METHOD1(Reset, void(const base::Closure&));
diff --git a/chromium/media/filters/pipeline_controller.cc b/chromium/media/filters/pipeline_controller.cc
index 4627b84bc91..bd05ebdc78a 100644
--- a/chromium/media/filters/pipeline_controller.cc
+++ b/chromium/media/filters/pipeline_controller.cc
@@ -107,6 +107,38 @@ void PipelineController::Resume() {
}
}
+void PipelineController::OnDecoderStateLost() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // Note: |time_updated| and |pending_seeked_cb_| are both false.
+ pending_seek_except_start_ = true;
+
+ // If we are already seeking or resuming, or if there's already a seek
+ // pending,elide the seek. This is okay for decoder state lost since it just
+ // needs one seek to recover (the decoder is reset and the next decode starts
+ // from a key frame).
+ //
+ // Note on potential race condition: When the seek is elided, it's possible
+ // that the decoder state loss happens before or after the previous seek
+ // (decoder Reset()):
+ // 1. Decoder state loss happens before Decoder::Reset() during the previous
+ // seek. In this case we are fine since we just need a Reset().
+ // 2. Decoder state loss happens after Decoder::Reset() during a previous
+ // seek:
+ // 2.1 If state loss happens before any Decode() we are still fine, since the
+ // decoder is in a clean state.
+ // 2.2 If state loss happens after a Decode(), then here we should not be in
+ // the SEEKING state.
+ if (state_ == State::SEEKING || state_ == State::RESUMING || pending_seek_)
+ return;
+
+ // Force a seek to the current time.
+ pending_seek_time_ = pipeline_->GetMediaTime();
+ pending_seek_ = true;
+
+ Dispatch();
+}
+
bool PipelineController::IsStable() {
DCHECK(thread_checker_.CalledOnValidThread());
return state_ == State::PLAYING;
diff --git a/chromium/media/filters/pipeline_controller.h b/chromium/media/filters/pipeline_controller.h
index 92a420a079f..48213049933 100644
--- a/chromium/media/filters/pipeline_controller.h
+++ b/chromium/media/filters/pipeline_controller.h
@@ -102,6 +102,10 @@ class MEDIA_EXPORT PipelineController {
// been suspended.
void Resume();
+ // Called when a decoder in the pipeline lost its state. This requires a seek
+ // so that the decoder can start from a new key frame.
+ void OnDecoderStateLost();
+
// Returns true if the current state is stable. This means that |state_| is
// PLAYING and there are no pending operations. Requests are processed
// immediately when the state is stable, otherwise they are queued.
@@ -193,7 +197,8 @@ class MEDIA_EXPORT PipelineController {
// issued at the next stable state.
bool pending_seeked_cb_ = false;
- // Indicates that a seek has occurred from an explicit call to Seek().
+ // Indicates that a seek has occurred from an explicit call to Seek() or
+ // OnDecoderStateLost().
bool pending_seek_except_start_ = false;
// Indicates that time has been changed by a seek, which will be reported at
diff --git a/chromium/media/filters/pipeline_controller_unittest.cc b/chromium/media/filters/pipeline_controller_unittest.cc
index 36aabf5c3ef..bbf3e237a58 100644
--- a/chromium/media/filters/pipeline_controller_unittest.cc
+++ b/chromium/media/filters/pipeline_controller_unittest.cc
@@ -147,7 +147,7 @@ class PipelineControllerTest : public ::testing::Test, public Pipeline::Client {
void OnDurationChange() override {}
void OnAddTextTrack(const TextTrackConfig& config,
const AddTextTrackDoneCB& done_cb) override {}
- void OnWaitingForDecryptionKey() override {}
+ void OnWaiting(WaitingReason reason) override {}
void OnVideoNaturalSizeChange(const gfx::Size& size) override {}
void OnAudioConfigChange(const AudioDecoderConfig& config) override {}
void OnVideoConfigChange(const VideoDecoderConfig& config) override {}
@@ -155,6 +155,7 @@ class PipelineControllerTest : public ::testing::Test, public Pipeline::Client {
void OnVideoAverageKeyframeDistanceUpdate() override {}
void OnAudioDecoderChange(const std::string& name) override {}
void OnVideoDecoderChange(const std::string& name) override {}
+ void OnRemotePlayStateChange(MediaStatus::State state) override {}
base::MessageLoop message_loop_;
@@ -253,6 +254,40 @@ TEST_F(PipelineControllerTest, Seek) {
EXPECT_TRUE(pipeline_controller_.IsStable());
}
+// Makes sure OnDecoderStateLost() triggers a seek to the current media time.
+TEST_F(PipelineControllerTest, DecoderStateLost) {
+ Complete(StartPipeline());
+
+ constexpr auto kCurrentMediaTime = base::TimeDelta::FromSeconds(7);
+ EXPECT_CALL(*pipeline_, GetMediaTime())
+ .WillRepeatedly(Return(kCurrentMediaTime));
+
+ EXPECT_CALL(demuxer_, StartWaitingForSeek(kCurrentMediaTime));
+ EXPECT_CALL(*pipeline_, Seek(kCurrentMediaTime, _));
+
+ pipeline_controller_.OnDecoderStateLost();
+ base::RunLoop().RunUntilIdle();
+}
+
+// Makes sure OnDecoderStateLost() does not trigger a seek during pending seek.
+TEST_F(PipelineControllerTest, DecoderStateLost_DuringPendingSeek) {
+ Complete(StartPipeline());
+
+ // Create a pending seek.
+ base::TimeDelta kSeekTime = base::TimeDelta::FromSeconds(5);
+ EXPECT_CALL(demuxer_, StartWaitingForSeek(kSeekTime));
+ PipelineStatusCB seek_cb = SeekPipeline(kSeekTime);
+ base::RunLoop().RunUntilIdle();
+ Mock::VerifyAndClear(&demuxer_);
+
+ // OnDecoderStateLost() should not trigger another seek.
+ EXPECT_CALL(*pipeline_, GetMediaTime()).Times(0);
+ pipeline_controller_.OnDecoderStateLost();
+ base::RunLoop().RunUntilIdle();
+
+ Complete(seek_cb);
+}
+
TEST_F(PipelineControllerTest, SuspendResumeTime) {
Complete(StartPipeline());
Complete(SuspendPipeline());
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index bf2199b6560..a00e356231b 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -7,8 +7,8 @@
#include <stddef.h>
#include "base/command_line.h"
-#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
#include "base/strings/pattern.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -16,6 +16,7 @@
#include "build/build_config.h"
#include "media/base/media.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/video_codecs.h"
#include "media/formats/mp4/mp4_stream_parser.h"
#include "media/formats/mpeg/adts_stream_parser.h"
@@ -446,7 +447,7 @@ static bool CheckTypeAndCodecs(
std::vector<CodecInfo::HistogramTag>* audio_codecs,
std::vector<CodecInfo::HistogramTag>* video_codecs) {
// Search for the SupportedTypeInfo for |type|.
- for (size_t i = 0; i < arraysize(kSupportedTypeInfo); ++i) {
+ for (size_t i = 0; i < base::size(kSupportedTypeInfo); ++i) {
const SupportedTypeInfo& type_info = kSupportedTypeInfo[i];
if (type == type_info.type) {
if (codecs.empty()) {
@@ -503,7 +504,7 @@ bool StreamParserFactory::IsTypeSupported(
const std::string& type,
const std::vector<std::string>& codecs) {
// TODO(wolenetz): Questionable MediaLog usage, http://crbug.com/712310
- MediaLog media_log;
+ NullMediaLog media_log;
return CheckTypeAndCodecs(type, codecs, &media_log, nullptr, nullptr,
nullptr);
}
diff --git a/chromium/media/filters/video_decoder_stream_unittest.cc b/chromium/media/filters/video_decoder_stream_unittest.cc
index f1175f7df56..3dc02e3ebc6 100644
--- a/chromium/media/filters/video_decoder_stream_unittest.cc
+++ b/chromium/media/filters/video_decoder_stream_unittest.cc
@@ -249,7 +249,7 @@ class VideoDecoderStreamTest
decoder_ = static_cast<FakeVideoDecoder*>(decoder);
}
- MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
void OnStatistics(const PipelineStatistics& statistics) {
num_decoded_bytes_unreported_ -= statistics.video_bytes_decoded;
@@ -275,7 +275,7 @@ class VideoDecoderStreamTest
cdm_context_.get(),
base::BindRepeating(&VideoDecoderStreamTest::OnStatistics,
base::Unretained(this)),
- base::BindRepeating(&VideoDecoderStreamTest::OnWaitingForDecryptionKey,
+ base::BindRepeating(&VideoDecoderStreamTest::OnWaiting,
base::Unretained(this)));
base::RunLoop().RunUntilIdle();
}
@@ -374,7 +374,7 @@ class VideoDecoderStreamTest
case DECRYPTOR_NO_KEY:
if (GetParam().is_encrypted && GetParam().has_decryptor) {
EXPECT_MEDIA_LOG(HasSubstr("no key for key ID"));
- EXPECT_CALL(*this, OnWaitingForDecryptionKey());
+ EXPECT_CALL(*this, OnWaiting(WaitingReason::kNoDecryptionKey));
has_no_key_ = true;
}
ReadOneFrame();
diff --git a/chromium/media/filters/video_renderer_algorithm_unittest.cc b/chromium/media/filters/video_renderer_algorithm_unittest.cc
index ee84f83d2d0..6ae205cf6e1 100644
--- a/chromium/media/filters/video_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/video_renderer_algorithm_unittest.cc
@@ -10,12 +10,12 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/test/simple_test_tick_clock.h"
#include "build/build_config.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_frame_pool.h"
#include "media/base/wall_clock_time_source.h"
@@ -333,7 +333,7 @@ class VideoRendererAlgorithmTest : public testing::Test {
}
protected:
- MediaLog media_log_;
+ NullMediaLog media_log_;
VideoFramePool frame_pool_;
std::unique_ptr<base::SimpleTestTickClock> tick_clock_;
WallClockTimeSource time_source_;
@@ -1389,10 +1389,10 @@ TEST_F(VideoRendererAlgorithmTest, VariablePlaybackRateCadence) {
TickGenerator display_tg(tick_clock_->NowTicks(), 60);
const double kTestRates[] = {1.0, 2, 0.215, 0.5, 1.0, 3.15};
- const bool kTestRateHasCadence[arraysize(kTestRates)] = {true, true, true,
- true, true, false};
+ const bool kTestRateHasCadence[base::size(kTestRates)] = {true, true, true,
+ true, true, false};
- for (size_t i = 0; i < arraysize(kTestRates); ++i) {
+ for (size_t i = 0; i < base::size(kTestRates); ++i) {
const double playback_rate = kTestRates[i];
SCOPED_TRACE(base::StringPrintf("Playback Rate: %.03f", playback_rate));
time_source_.SetPlaybackRate(playback_rate);
@@ -1423,11 +1423,11 @@ TEST_F(VideoRendererAlgorithmTest, UglyTimestampsHaveCadence) {
// Run throught ~1.6 seconds worth of frames.
bool cadence_detected = false;
base::TimeDelta timestamp;
- for (size_t i = 0; i < arraysize(kBadTimestampsMs) * 2; ++i) {
+ for (size_t i = 0; i < base::size(kBadTimestampsMs) * 2; ++i) {
while (EffectiveFramesQueued() < 3) {
algorithm_.EnqueueFrame(CreateFrame(timestamp));
timestamp += base::TimeDelta::FromMilliseconds(
- kBadTimestampsMs[i % arraysize(kBadTimestampsMs)]);
+ kBadTimestampsMs[i % base::size(kBadTimestampsMs)]);
}
size_t frames_dropped = 0;
@@ -1458,11 +1458,11 @@ TEST_F(VideoRendererAlgorithmTest, VariableFrameRateNoCadence) {
bool cadence_detected = false;
bool cadence_turned_off = false;
base::TimeDelta timestamp;
- for (size_t i = 0; i < arraysize(kBadTimestampsMs);) {
+ for (size_t i = 0; i < base::size(kBadTimestampsMs);) {
while (EffectiveFramesQueued() < 3) {
algorithm_.EnqueueFrame(CreateFrame(timestamp));
timestamp += base::TimeDelta::FromMilliseconds(
- kBadTimestampsMs[i % arraysize(kBadTimestampsMs)]);
+ kBadTimestampsMs[i % base::size(kBadTimestampsMs)]);
++i;
}
diff --git a/chromium/media/filters/vp9_bool_decoder.cc b/chromium/media/filters/vp9_bool_decoder.cc
index 7700b9844c6..28d4e53ed5f 100644
--- a/chromium/media/filters/vp9_bool_decoder.cc
+++ b/chromium/media/filters/vp9_bool_decoder.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "base/logging.h"
+#include "base/stl_util.h"
#include "media/base/bit_reader.h"
namespace media {
@@ -111,7 +112,7 @@ bool Vp9BoolDecoder::ReadBool(int prob) {
// Need to fill |count| bits next time in order to make |bool_range_| >=
// 128.
- DCHECK_LT(bool_range_, arraysize(kCountToShiftTo128));
+ DCHECK_LT(bool_range_, base::size(kCountToShiftTo128));
DCHECK_GT(bool_range_, 0u);
int count = kCountToShiftTo128[bool_range_];
bool_range_ <<= count;
diff --git a/chromium/media/filters/vp9_compressed_header_parser.cc b/chromium/media/filters/vp9_compressed_header_parser.cc
index 3fc72fd8261..4ce08432215 100644
--- a/chromium/media/filters/vp9_compressed_header_parser.cc
+++ b/chromium/media/filters/vp9_compressed_header_parser.cc
@@ -5,6 +5,7 @@
#include "media/filters/vp9_compressed_header_parser.h"
#include "base/logging.h"
+#include "base/stl_util.h"
namespace media {
@@ -47,7 +48,7 @@ Vp9Prob InvRemapProb(uint8_t delta_prob, uint8_t prob) {
uint8_t v = delta_prob;
DCHECK_GE(m, 1);
DCHECK_LE(m, kVp9MaxProb);
- DCHECK_LT(v, arraysize(inv_map_table));
+ DCHECK_LT(v, base::size(inv_map_table));
v = inv_map_table[v];
m--;
if ((m << 1) <= kVp9MaxProb) {
diff --git a/chromium/media/filters/vp9_parser.cc b/chromium/media/filters/vp9_parser.cc
index bff60077cf5..2e200836063 100644
--- a/chromium/media/filters/vp9_parser.cc
+++ b/chromium/media/filters/vp9_parser.cc
@@ -16,8 +16,8 @@
#include "base/bind.h"
#include "base/containers/circular_deque.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "media/filters/vp9_compressed_header_parser.h"
#include "media/filters/vp9_uncompressed_header_parser.h"
@@ -137,11 +137,13 @@ const int16_t kAcQLookup[][kQIndexRange] = {
};
// clang-format on
-static_assert(arraysize(kDcQLookup[0]) == arraysize(kAcQLookup[0]),
+static_assert(base::size(kDcQLookup[0]) == base::size(kAcQLookup[0]),
"quantizer lookup arrays of incorrect size");
-size_t ClampQ(size_t q) {
- return std::min(q, kQIndexRange - 1);
+size_t ClampQ(int64_t q) {
+ return q < 0 ? 0
+ : base::checked_cast<size_t>(
+ std::min(q, static_cast<int64_t>(kQIndexRange - 1)));
}
int ClampLf(int lf) {
@@ -485,27 +487,27 @@ void Vp9Parser::Context::Reset() {
}
void Vp9Parser::Context::MarkFrameContextForUpdate(size_t frame_context_idx) {
- DCHECK_LT(frame_context_idx, arraysize(frame_context_managers_));
+ DCHECK_LT(frame_context_idx, base::size(frame_context_managers_));
frame_context_managers_[frame_context_idx].SetNeedsClientUpdate();
}
void Vp9Parser::Context::UpdateFrameContext(
size_t frame_context_idx,
const Vp9FrameContext& frame_context) {
- DCHECK_LT(frame_context_idx, arraysize(frame_context_managers_));
+ DCHECK_LT(frame_context_idx, base::size(frame_context_managers_));
frame_context_managers_[frame_context_idx].Update(frame_context);
}
const Vp9Parser::ReferenceSlot& Vp9Parser::Context::GetRefSlot(
size_t ref_type) const {
- DCHECK_LT(ref_type, arraysize(ref_slots_));
+ DCHECK_LT(ref_type, base::size(ref_slots_));
return ref_slots_[ref_type];
}
void Vp9Parser::Context::UpdateRefSlot(
size_t ref_type,
const Vp9Parser::ReferenceSlot& ref_slot) {
- DCHECK_LT(ref_type, arraysize(ref_slots_));
+ DCHECK_LT(ref_type, base::size(ref_slots_));
ref_slots_[ref_type] = ref_slot;
}
@@ -652,10 +654,11 @@ Vp9Parser::Result Vp9Parser::ParseNextFrame(
frame_info = frames_.front();
frames_.pop_front();
- if (frame_info.decrypt_config) {
- *frame_decrypt_config = frame_info.decrypt_config->Clone();
- } else {
- *frame_decrypt_config = nullptr;
+ if (frame_decrypt_config) {
+ if (frame_info.decrypt_config)
+ *frame_decrypt_config = frame_info.decrypt_config->Clone();
+ else
+ *frame_decrypt_config = nullptr;
}
if (ParseUncompressedHeader(frame_info, fhdr, &result))
@@ -680,7 +683,7 @@ Vp9Parser::Result Vp9Parser::ParseNextFrame(
Vp9Parser::ContextRefreshCallback Vp9Parser::GetContextRefreshCb(
size_t frame_context_idx) {
- DCHECK_LT(frame_context_idx, arraysize(context_.frame_context_managers_));
+ DCHECK_LT(frame_context_idx, base::size(context_.frame_context_managers_));
auto& frame_context_manager =
context_.frame_context_managers_[frame_context_idx];
@@ -814,20 +817,19 @@ base::circular_deque<Vp9Parser::FrameInfo> Vp9Parser::ParseSuperframe() {
}
// 8.6.1 Dequantization functions
-size_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
- size_t segid) const {
+int64_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
+ size_t segid) const {
const Vp9SegmentationParams& segmentation = context_.segmentation();
if (segmentation.FeatureEnabled(segid,
Vp9SegmentationParams::SEG_LVL_ALT_Q)) {
int16_t feature_data =
segmentation.FeatureData(segid, Vp9SegmentationParams::SEG_LVL_ALT_Q);
- size_t q_index = segmentation.abs_or_delta_update
- ? feature_data
- : quant.base_q_idx + feature_data;
+ int64_t q_index = segmentation.abs_or_delta_update
+ ? feature_data
+ : quant.base_q_idx + feature_data;
return ClampQ(q_index);
}
-
return quant.base_q_idx;
}
@@ -845,7 +847,7 @@ bool Vp9Parser::SetupSegmentationDequant() {
if (segmentation.enabled) {
for (size_t i = 0; i < Vp9SegmentationParams::kNumSegments; ++i) {
- const size_t q_index = GetQIndex(quant, i);
+ const int64_t q_index = GetQIndex(quant, i);
segmentation.y_dequant[i][0] =
kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
segmentation.y_dequant[i][1] =
@@ -856,7 +858,7 @@ bool Vp9Parser::SetupSegmentationDequant() {
kAcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_uv_ac)];
}
} else {
- const size_t q_index = quant.base_q_idx;
+ const int64_t q_index = quant.base_q_idx;
segmentation.y_dequant[0][0] =
kDcQLookup[bit_depth_index][ClampQ(q_index + quant.delta_q_y_dc)];
segmentation.y_dequant[0][1] = kAcQLookup[bit_depth_index][ClampQ(q_index)];
diff --git a/chromium/media/filters/vp9_parser.h b/chromium/media/filters/vp9_parser.h
index 0e503661d36..95e98f9770d 100644
--- a/chromium/media/filters/vp9_parser.h
+++ b/chromium/media/filters/vp9_parser.h
@@ -440,7 +440,7 @@ class MEDIA_EXPORT Vp9Parser {
// current frame.
bool ParseCompressedHeader(const FrameInfo& frame_info, Result* result);
- size_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
+ int64_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
// Returns true if the setup succeeded.
bool SetupSegmentationDequant();
void SetupLoopFilter();
diff --git a/chromium/media/filters/vp9_parser_encrypted_fuzzertest.cc b/chromium/media/filters/vp9_parser_encrypted_fuzzertest.cc
new file mode 100644
index 00000000000..0b7aa55486b
--- /dev/null
+++ b/chromium/media/filters/vp9_parser_encrypted_fuzzertest.cc
@@ -0,0 +1,70 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/numerics/safe_conversions.h"
+#include "base/test/fuzzed_data_provider.h"
+
+#include "media/base/decrypt_config.h"
+#include "media/base/subsample_entry.h"
+#include "media/filters/ivf_parser.h"
+#include "media/filters/vp9_parser.h"
+
+struct Environment {
+ Environment() {
+ // Disable noisy logging as per "libFuzzer in Chrome" documentation:
+ // testing/libfuzzer/getting_started.md#Disable-noisy-error-message-logging.
+ logging::SetMinLogLevel(logging::LOG_FATAL);
+ }
+};
+
+Environment* env = new Environment();
+
+// Entry point for LibFuzzer.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ base::FuzzedDataProvider data_provider(data, size);
+ std::string key_id = data_provider.ConsumeBytesAsString(4);
+ std::string iv = data_provider.ConsumeBytesAsString(16);
+
+ media::Vp9Parser vp9_parser(data_provider.ConsumeBool());
+
+ uint8_t subsample_count_upper_bound = 8;
+ uint8_t subsamples_count =
+ data_provider.ConsumeIntegral<uint8_t>() % subsample_count_upper_bound;
+ std::vector<media::SubsampleEntry> subsamples;
+ for (uint8_t entry = 0; entry < subsamples_count; ++entry) {
+ if (data_provider.remaining_bytes() >= 2 * sizeof(uint32_t)) {
+ uint32_t clear = data_provider.ConsumeIntegral<uint32_t>();
+ uint32_t cipher = data_provider.ConsumeIntegral<uint32_t>();
+ cipher &= 0xFFFFFFF0; // make sure cipher is a multiple of 16.
+ subsamples.push_back(media::SubsampleEntry(clear, cipher));
+ }
+ }
+
+ const uint8_t* ivf_payload = nullptr;
+ media::IvfParser ivf_parser;
+ media::IvfFileHeader ivf_file_header;
+ media::IvfFrameHeader ivf_frame_header;
+
+ if (!ivf_parser.Initialize(data, size, &ivf_file_header))
+ return 0;
+
+ // Parse until the end of stream/unsupported stream/error in stream is found.
+ while (ivf_parser.ParseNextFrame(&ivf_frame_header, &ivf_payload)) {
+ media::Vp9FrameHeader vp9_frame_header;
+ vp9_parser.SetStream(
+ ivf_payload, ivf_frame_header.frame_size,
+ media::DecryptConfig::CreateCencConfig(key_id, iv, subsamples));
+ // TODO(kcwu): further fuzzing the case of Vp9Parser::kAwaitingRefresh.
+ std::unique_ptr<media::DecryptConfig> null_config;
+ while (vp9_parser.ParseNextFrame(&vp9_frame_header, &null_config) ==
+ media::Vp9Parser::kOk) {
+ // Repeat until all frames processed.
+ }
+ }
+
+ return 0;
+}
diff --git a/chromium/media/filters/vp9_uncompressed_header_parser.cc b/chromium/media/filters/vp9_uncompressed_header_parser.cc
index 0118e819511..d69fa929bcd 100644
--- a/chromium/media/filters/vp9_uncompressed_header_parser.cc
+++ b/chromium/media/filters/vp9_uncompressed_header_parser.cc
@@ -4,6 +4,8 @@
#include "media/filters/vp9_uncompressed_header_parser.h"
+#include <type_traits>
+
#include "base/logging.h"
namespace media {
@@ -990,7 +992,7 @@ bool Vp9UncompressedHeaderParser::Parse(const uint8_t* stream,
} else {
fhdr->refresh_frame_flags = reader_.ReadLiteral(8);
- static_assert(arraysize(fhdr->ref_frame_sign_bias) >=
+ static_assert(std::extent<decltype(fhdr->ref_frame_sign_bias)>() >=
Vp9RefType::VP9_FRAME_LAST + kVp9NumRefsPerFrame,
"ref_frame_sign_bias is not big enough");
for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index 34077331a8f..9c53fb4bcf3 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -112,13 +112,12 @@ std::string VpxVideoDecoder::GetDisplayName() const {
return "VpxVideoDecoder";
}
-void VpxVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool /* low_delay */,
- CdmContext* /* cdm_context */,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void VpxVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool /* low_delay */,
+ CdmContext* /* cdm_context */,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& /* waiting_cb */) {
DVLOG(1) << __func__ << ": " << config.AsHumanReadableString();
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(config.IsValidConfig());
diff --git a/chromium/media/filters/vpx_video_decoder.h b/chromium/media/filters/vpx_video_decoder.h
index 1d9cca2529c..1ec1ca81ae6 100644
--- a/chromium/media/filters/vpx_video_decoder.h
+++ b/chromium/media/filters/vpx_video_decoder.h
@@ -43,13 +43,12 @@ class MEDIA_EXPORT VpxVideoDecoder : public OffloadableVideoDecoder {
// VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& reset_cb) override;
diff --git a/chromium/media/formats/ac3/ac3_util.cc b/chromium/media/formats/ac3/ac3_util.cc
index 2c28262da9e..a9ffade2773 100644
--- a/chromium/media/formats/ac3/ac3_util.cc
+++ b/chromium/media/formats/ac3/ac3_util.cc
@@ -5,7 +5,7 @@
#include "media/formats/ac3/ac3_util.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/bit_reader.h"
namespace media {
@@ -121,9 +121,9 @@ int GetAc3SyncFrameSampleCount() {
// Returns the size in bytes of the given AC3 synchronization frame.
int ParseAc3SyncFrameSize(Ac3Header& header) {
- if (header.sample_rate_code() >= arraysize(kSampleRate) ||
+ if (header.sample_rate_code() >= base::size(kSampleRate) ||
header.ac3_frame_size_code() >=
- arraysize(kSyncFrameSizeInWordsFor44kHz)) {
+ base::size(kSyncFrameSizeInWordsFor44kHz)) {
DVLOG(2) << __func__ << " Invalid frame header."
<< " fscod:" << header.sample_rate_code()
<< " frmsizecod:" << header.ac3_frame_size_code();
diff --git a/chromium/media/formats/common/stream_parser_test_base.h b/chromium/media/formats/common/stream_parser_test_base.h
index 2cc97c63c4f..fc47ccc5343 100644
--- a/chromium/media/formats/common/stream_parser_test_base.h
+++ b/chromium/media/formats/common/stream_parser_test_base.h
@@ -12,7 +12,7 @@
#include "base/macros.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/stream_parser.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
@@ -66,7 +66,7 @@ class StreamParserTestBase {
void OnNewSegment();
void OnEndOfSegment();
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<StreamParser> parser_;
std::stringstream results_stream_;
AudioDecoderConfig last_audio_config_;
diff --git a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
index 7a6b97aeb14..3c078a2f03a 100644
--- a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
@@ -11,7 +11,7 @@
#include "base/bind.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/time/time.h"
#include "media/base/media_util.h"
@@ -45,9 +45,9 @@ BufferQueue GenerateFakeBuffers(const int* frame_pts_ms,
BufferQueue buffers(frame_count);
for (size_t k = 0; k < frame_count; k++) {
- buffers[k] = StreamParserBuffer::CopyFrom(
- dummy_buffer, arraysize(dummy_buffer),
- is_key_frame[k], DemuxerStream::VIDEO, 0);
+ buffers[k] =
+ StreamParserBuffer::CopyFrom(dummy_buffer, base::size(dummy_buffer),
+ is_key_frame[k], DemuxerStream::VIDEO, 0);
if (frame_pts_ms[k] < 0) {
buffers[k]->set_timestamp(kNoTimestamp);
} else {
@@ -120,7 +120,7 @@ TEST_F(EsAdapterVideoTest, FrameDurationSimpleGop) {
true, false, false, false,
false, false, false, false };
BufferQueue buffer_queue =
- GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+ GenerateFakeBuffers(pts_ms, is_key_frame, base::size(pts_ms));
EXPECT_EQ("(1,Y) (2,N) (3,N) (4,N) (5,N) (6,N) (7,N) (7,N)",
RunAdapterTest(buffer_queue));
@@ -133,7 +133,7 @@ TEST_F(EsAdapterVideoTest, FrameDurationComplexGop) {
true, false, false, false, false,
false, false, false, false, false };
BufferQueue buffer_queue =
- GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+ GenerateFakeBuffers(pts_ms, is_key_frame, base::size(pts_ms));
EXPECT_EQ("(30,Y) (30,N) (30,N) (30,N) (30,N) "
"(30,N) (30,N) (30,N) (30,N) (30,N)",
@@ -144,7 +144,7 @@ TEST_F(EsAdapterVideoTest, LeadingNonKeyFrames) {
int pts_ms[] = {30, 40, 50, 120, 150, 180};
bool is_key_frame[] = {false, false, false, true, false, false};
BufferQueue buffer_queue =
- GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+ GenerateFakeBuffers(pts_ms, is_key_frame, base::size(pts_ms));
EXPECT_EQ("(30,Y) (30,Y) (30,Y) (30,Y) (30,N) (30,N)",
RunAdapterTest(buffer_queue));
@@ -154,7 +154,7 @@ TEST_F(EsAdapterVideoTest, LeadingKeyFrameWithNoTimestamp) {
int pts_ms[] = {-1, 40, 50, 120, 150, 180};
bool is_key_frame[] = {true, false, false, true, false, false};
BufferQueue buffer_queue =
- GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+ GenerateFakeBuffers(pts_ms, is_key_frame, base::size(pts_ms));
EXPECT_EQ("(40,Y) (40,Y) (30,Y) (30,N) (30,N)",
RunAdapterTest(buffer_queue));
@@ -164,7 +164,7 @@ TEST_F(EsAdapterVideoTest, LeadingFramesWithNoTimestamp) {
int pts_ms[] = {-1, -1, 50, 120, 150, 180};
bool is_key_frame[] = {false, false, false, true, false, false};
BufferQueue buffer_queue =
- GenerateFakeBuffers(pts_ms, is_key_frame, arraysize(pts_ms));
+ GenerateFakeBuffers(pts_ms, is_key_frame, base::size(pts_ms));
EXPECT_EQ("(70,Y) (30,Y) (30,N) (30,N)",
RunAdapterTest(buffer_queue));
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
index a7edeaf522f..d3e4dd8a899 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
@@ -8,7 +8,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/time/time.h"
-#include "media/base/media_log.h"
+#include "media/base/media_util.h"
#include "media/base/stream_parser_buffer.h"
#include "media/formats/mp2t/es_parser_mpeg1audio.h"
#include "media/formats/mp2t/es_parser_test_base.h"
@@ -28,7 +28,7 @@ class EsParserMpeg1AudioTest : public EsParserTestBase,
bool Process(const std::vector<Packet>& pes_packets, bool force_timing);
private:
- MediaLog media_log_;
+ NullMediaLog media_log_;
DISALLOW_COPY_AND_ASSIGN(EsParserMpeg1AudioTest);
};
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index f3189e9e176..bd1590adc08 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -20,9 +20,9 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/encryption_pattern.h"
-#include "media/base/media_log.h"
#include "media/base/media_track.h"
#include "media/base/media_tracks.h"
+#include "media/base/media_util.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
#include "media/base/text_track_config.h"
@@ -179,7 +179,7 @@ class Mp2tStreamParserTest : public testing::Test {
}
protected:
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<Mp2tStreamParser> parser_;
int segment_count_;
int config_count_;
diff --git a/chromium/media/formats/mp2t/timestamp_unroller_unittest.cc b/chromium/media/formats/mp2t/timestamp_unroller_unittest.cc
index 952ab1c4106..8fd0e3fd12e 100644
--- a/chromium/media/formats/mp2t/timestamp_unroller_unittest.cc
+++ b/chromium/media/formats/mp2t/timestamp_unroller_unittest.cc
@@ -7,7 +7,7 @@
#include <vector>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/test/perf_test_suite.h"
#include "media/formats/mp2t/timestamp_unroller.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -52,7 +52,7 @@ TEST(TimestampUnrollerTest, SingleStream) {
};
std::vector<int64_t> timestamps_vector(timestamps,
- timestamps + arraysize(timestamps));
+ timestamps + base::size(timestamps));
RunUnrollTest(timestamps_vector);
}
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
index 31f4df2112b..5a0bb690bef 100644
--- a/chromium/media/formats/mp4/avc.cc
+++ b/chromium/media/formats/mp4/avc.cc
@@ -134,10 +134,21 @@ bool AVC::InsertParamSetsAnnexB(const AVCDecoderConfigurationRecord& avc_config,
RCHECK(AVC::ConvertConfigToAnnexB(avc_config, &param_sets));
if (subsamples && !subsamples->empty()) {
- int subsample_index = FindSubsampleIndex(*buffer, subsamples,
- &(*config_insert_point));
- // Update the size of the subsample where SPS/PPS is to be inserted.
- (*subsamples)[subsample_index].clear_bytes += param_sets.size();
+ if (config_insert_point != buffer->end()) {
+ int subsample_index =
+ FindSubsampleIndex(*buffer, subsamples, &(*config_insert_point));
+ // Update the size of the subsample where SPS/PPS is to be inserted.
+ (*subsamples)[subsample_index].clear_bytes += param_sets.size();
+ } else {
+ int subsample_index = (*subsamples).size() - 1;
+ if ((*subsamples)[subsample_index].cypher_bytes == 0) {
+ // Extend the last clear range to include the inserted data.
+ (*subsamples)[subsample_index].clear_bytes += param_sets.size();
+ } else {
+ // Append a new subsample to cover the inserted data.
+ (*subsamples).emplace_back(param_sets.size(), 0);
+ }
+ }
}
buffer->insert(config_insert_point,
diff --git a/chromium/media/formats/mp4/avc_unittest.cc b/chromium/media/formats/mp4/avc_unittest.cc
index 944ca1b7599..3fed74fb2df 100644
--- a/chromium/media/formats/mp4/avc_unittest.cc
+++ b/chromium/media/formats/mp4/avc_unittest.cc
@@ -8,8 +8,8 @@
#include <ostream>
-#include "base/macros.h"
#include "base/optional.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "media/base/decrypt_config.h"
@@ -436,7 +436,7 @@ TEST_F(AVCConversionTest, ValidAnnexBConstructs) {
{"SDC I", false},
};
- for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ for (size_t i = 0; i < base::size(test_cases); ++i) {
std::vector<uint8_t> buf;
std::vector<SubsampleEntry> subsamples;
StringToAnnexB(test_cases[i].case_string, &buf, NULL);
@@ -484,7 +484,7 @@ TEST_F(AVCConversionTest, InvalidAnnexBConstructs) {
BitstreamConverter::AnalysisResult expected;
expected.is_conformant = false;
- for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ for (size_t i = 0; i < base::size(test_cases); ++i) {
std::vector<uint8_t> buf;
std::vector<SubsampleEntry> subsamples;
StringToAnnexB(test_cases[i].case_string, &buf, NULL);
@@ -532,7 +532,7 @@ TEST_F(AVCConversionTest, InsertParamSetsAnnexB) {
expected.is_conformant = true;
expected.is_keyframe = true;
- for (size_t i = 0; i < arraysize(test_cases); ++i) {
+ for (size_t i = 0; i < base::size(test_cases); ++i) {
std::vector<uint8_t> buf;
std::vector<SubsampleEntry> subsamples;
diff --git a/chromium/media/formats/mp4/box_definitions.cc b/chromium/media/formats/mp4/box_definitions.cc
index 9c4a22021d0..ebe972f0b99 100644
--- a/chromium/media/formats/mp4/box_definitions.cc
+++ b/chromium/media/formats/mp4/box_definitions.cc
@@ -14,6 +14,7 @@
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/video_types.h"
#include "media/base/video_util.h"
#include "media/formats/common/opus_constants.h"
@@ -610,7 +611,7 @@ bool AVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
bool AVCDecoderConfigurationRecord::Parse(const uint8_t* data, int data_size) {
BufferReader reader(data, data_size);
// TODO(wolenetz): Questionable MediaLog usage, http://crbug.com/712310
- MediaLog media_log;
+ NullMediaLog media_log;
return ParseInternal(&reader, &media_log);
}
diff --git a/chromium/media/formats/mp4/box_reader.h b/chromium/media/formats/mp4/box_reader.h
index 2da377323fd..9ef8ae86c6c 100644
--- a/chromium/media/formats/mp4/box_reader.h
+++ b/chromium/media/formats/mp4/box_reader.h
@@ -59,7 +59,7 @@ class MEDIA_EXPORT BufferReader {
count <= buf_size_ - pos_;
}
- // Read a value from the stream, perfoming endian correction, and advance the
+ // Read a value from the stream, performing endian correction, and advance the
// stream pointer.
bool Read1(uint8_t* v) WARN_UNUSED_RESULT;
bool Read2(uint16_t* v) WARN_UNUSED_RESULT;
diff --git a/chromium/media/formats/mp4/dolby_vision.cc b/chromium/media/formats/mp4/dolby_vision.cc
index fd32c85fffd..eac78312da1 100644
--- a/chromium/media/formats/mp4/dolby_vision.cc
+++ b/chromium/media/formats/mp4/dolby_vision.cc
@@ -5,6 +5,7 @@
#include "media/formats/mp4/dolby_vision.h"
#include "base/logging.h"
+#include "media/base/media_util.h"
#include "media/base/video_codecs.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/box_reader.h"
@@ -35,7 +36,7 @@ bool DolbyVisionConfiguration::Parse(BoxReader* reader) {
bool DolbyVisionConfiguration::ParseForTesting(const uint8_t* data,
int data_size) {
BufferReader reader(data, data_size);
- MediaLog media_log;
+ NullMediaLog media_log;
return ParseInternal(&reader, &media_log);
}
diff --git a/chromium/media/formats/mp4/hevc.cc b/chromium/media/formats/mp4/hevc.cc
index b2f606db1fd..c45f556c1ba 100644
--- a/chromium/media/formats/mp4/hevc.cc
+++ b/chromium/media/formats/mp4/hevc.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "media/base/decrypt_config.h"
+#include "media/base/media_util.h"
#include "media/formats/mp4/avc.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/box_reader.h"
@@ -49,7 +50,7 @@ bool HEVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
bool HEVCDecoderConfigurationRecord::Parse(const uint8_t* data, int data_size) {
BufferReader reader(data, data_size);
// TODO(wolenetz): Questionable MediaLog usage, http://crbug.com/712310
- MediaLog media_log;
+ NullMediaLog media_log;
return ParseInternal(&reader, &media_log);
}
diff --git a/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc b/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc
index 2c3e2fbefc1..92426e56320 100644
--- a/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc
+++ b/chromium/media/formats/mp4/sample_to_group_iterator_unittest.cc
@@ -9,7 +9,7 @@
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -24,7 +24,7 @@ class SampleToGroupIteratorTest : public testing::Test {
public:
SampleToGroupIteratorTest() {
// Build sample group description index table from kSampleToGroupTable.
- for (size_t i = 0; i < arraysize(kCompactSampleToGroupTable); ++i) {
+ for (size_t i = 0; i < base::size(kCompactSampleToGroupTable); ++i) {
for (uint32_t j = 0; j < kCompactSampleToGroupTable[i].sample_count;
++j) {
sample_to_group_table_.push_back(
@@ -34,7 +34,7 @@ class SampleToGroupIteratorTest : public testing::Test {
sample_to_group_.entries.assign(
kCompactSampleToGroupTable,
- kCompactSampleToGroupTable + arraysize(kCompactSampleToGroupTable));
+ kCompactSampleToGroupTable + base::size(kCompactSampleToGroupTable));
sample_to_group_iterator_.reset(
new SampleToGroupIterator(sample_to_group_));
}
diff --git a/chromium/media/formats/mp4/track_run_iterator.cc b/chromium/media/formats/mp4/track_run_iterator.cc
index ffd711396c6..e3b059f67ca 100644
--- a/chromium/media/formats/mp4/track_run_iterator.cc
+++ b/chromium/media/formats/mp4/track_run_iterator.cc
@@ -9,9 +9,9 @@
#include <limits>
#include <memory>
-#include "base/macros.h"
#include "base/numerics/checked_math.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "media/base/demuxer_memory_limit.h"
#include "media/base/encryption_scheme.h"
#include "media/base/media_util.h"
@@ -721,7 +721,7 @@ std::unique_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
if (ApplyConstantIv(sample_idx, &sample_encryption_entry)) {
std::string iv(reinterpret_cast<const char*>(
sample_encryption_entry.initialization_vector),
- arraysize(sample_encryption_entry.initialization_vector));
+ base::size(sample_encryption_entry.initialization_vector));
switch (run_itr_->encryption_scheme.mode()) {
case EncryptionScheme::CIPHER_MODE_UNENCRYPTED:
return nullptr;
@@ -744,7 +744,7 @@ std::unique_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
run_itr_->sample_encryption_entries[sample_idx];
std::string iv(reinterpret_cast<const char*>(
sample_encryption_entry.initialization_vector),
- arraysize(sample_encryption_entry.initialization_vector));
+ base::size(sample_encryption_entry.initialization_vector));
size_t total_size = 0;
if (!sample_encryption_entry.subsamples.empty() &&
diff --git a/chromium/media/formats/mp4/track_run_iterator_unittest.cc b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
index 03f68b491e8..c06c1f47858 100644
--- a/chromium/media/formats/mp4/track_run_iterator_unittest.cc
+++ b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
@@ -10,7 +10,7 @@
#include <memory>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "media/base/mock_media_log.h"
#include "media/formats/mp4/box_definitions.h"
@@ -344,7 +344,7 @@ class TrackRunIteratorTest : public testing::Test {
sinf->info.track_encryption.is_encrypted = true;
sinf->info.track_encryption.default_iv_size = 8;
sinf->info.track_encryption.default_kid.assign(kKeyId,
- kKeyId + arraysize(kKeyId));
+ kKeyId + base::size(kKeyId));
}
// Add SampleGroupDescription Box to track level sample table and to
@@ -361,7 +361,7 @@ class TrackRunIteratorTest : public testing::Test {
track_cenc_group.entries[0].iv_size = 8;
track_cenc_group.entries[0].key_id.assign(
kTrackCencSampleGroupKeyId,
- kTrackCencSampleGroupKeyId + arraysize(kTrackCencSampleGroupKeyId));
+ kTrackCencSampleGroupKeyId + base::size(kTrackCencSampleGroupKeyId));
frag->sample_group_description.grouping_type = FOURCC_SEIG;
frag->sample_group_description.entries.resize(3);
@@ -372,11 +372,11 @@ class TrackRunIteratorTest : public testing::Test {
frag->sample_group_description.entries[1].key_id.assign(
kFragmentCencSampleGroupKeyId,
kFragmentCencSampleGroupKeyId +
- arraysize(kFragmentCencSampleGroupKeyId));
+ base::size(kFragmentCencSampleGroupKeyId));
frag->sample_group_description.entries[2].is_encrypted = true;
frag->sample_group_description.entries[2].iv_size = 16;
frag->sample_group_description.entries[2].key_id.assign(
- kKeyId, kKeyId + arraysize(kKeyId));
+ kKeyId, kKeyId + base::size(kKeyId));
frag->sample_to_group.grouping_type = FOURCC_SEIG;
frag->sample_to_group.entries.assign(sample_to_group_entries,
@@ -400,12 +400,12 @@ class TrackRunIteratorTest : public testing::Test {
frag->sample_encryption.sample_encryption_data.assign(
kSampleEncryptionDataWithSubsamples,
kSampleEncryptionDataWithSubsamples +
- arraysize(kSampleEncryptionDataWithSubsamples));
+ base::size(kSampleEncryptionDataWithSubsamples));
} else {
frag->sample_encryption.sample_encryption_data.assign(
kSampleEncryptionDataWithoutSubsamples,
kSampleEncryptionDataWithoutSubsamples +
- arraysize(kSampleEncryptionDataWithoutSubsamples));
+ base::size(kSampleEncryptionDataWithoutSubsamples));
}
// Update sample sizes and aux info header.
@@ -438,7 +438,7 @@ class TrackRunIteratorTest : public testing::Test {
sinf->info.track_encryption.default_constant_iv_size = 16;
memcpy(sinf->info.track_encryption.default_constant_iv, kIv3, 16);
sinf->info.track_encryption.default_kid.assign(kKeyId,
- kKeyId + arraysize(kKeyId));
+ kKeyId + base::size(kKeyId));
}
void AddConstantIvsToCencSampleGroup(Track* track, TrackFragment* frag) {
@@ -467,7 +467,7 @@ class TrackRunIteratorTest : public testing::Test {
frag->sample_encryption.sample_encryption_data.assign(
kSampleEncryptionDataWithSubsamplesAndConstantIv,
kSampleEncryptionDataWithSubsamplesAndConstantIv +
- arraysize(kSampleEncryptionDataWithSubsamplesAndConstantIv));
+ base::size(kSampleEncryptionDataWithSubsamplesAndConstantIv));
// Update sample sizes and aux info header.
frag->runs.resize(1);
@@ -715,14 +715,14 @@ TEST_F(TrackRunIteratorTest,
EXPECT_EQ(iter_->GetMaxClearOffset(), moof.tracks[1].runs[0].data_offset);
std::unique_ptr<DecryptConfig> config = iter_->GetDecryptConfig();
EXPECT_EQ(
- std::string(reinterpret_cast<const char*>(kKeyId), arraysize(kKeyId)),
+ std::string(reinterpret_cast<const char*>(kKeyId), base::size(kKeyId)),
config->key_id());
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), arraysize(kIv1)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), base::size(kIv1)),
config->iv());
EXPECT_EQ(config->subsamples().size(), 0u);
iter_->AdvanceSample();
config = iter_->GetDecryptConfig();
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv2), arraysize(kIv2)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv2), base::size(kIv2)),
config->iv());
EXPECT_EQ(config->subsamples().size(), 0u);
}
@@ -743,7 +743,7 @@ TEST_F(TrackRunIteratorTest,
// With Iv size 16 bytes.
{1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 3}};
AddCencSampleGroup(&moov_.tracks[1], &moof.tracks[1], kSampleToGroupTable,
- arraysize(kSampleToGroupTable));
+ base::size(kSampleToGroupTable));
ASSERT_TRUE(iter_->Init(moof));
// The run for track 2 will be the second, which is parsed according to
@@ -758,14 +758,14 @@ TEST_F(TrackRunIteratorTest,
EXPECT_EQ(iter_->sample_offset(), 200);
EXPECT_EQ(iter_->GetMaxClearOffset(), moof.tracks[1].runs[0].data_offset);
std::unique_ptr<DecryptConfig> config = iter_->GetDecryptConfig();
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), arraysize(kIv1)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), base::size(kIv1)),
config->iv());
EXPECT_EQ(config->subsamples().size(), 1u);
EXPECT_EQ(config->subsamples()[0].clear_bytes, 1u);
EXPECT_EQ(config->subsamples()[0].cypher_bytes, 2u);
iter_->AdvanceSample();
config = iter_->GetDecryptConfig();
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), arraysize(kIv3)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), base::size(kIv3)),
config->iv());
EXPECT_EQ(config->subsamples().size(), 2u);
EXPECT_EQ(config->subsamples()[0].clear_bytes, 1u);
@@ -788,21 +788,22 @@ TEST_F(TrackRunIteratorTest, DecryptConfigTestWithAuxInfo) {
EXPECT_EQ(iter_->track_id(), 2u);
EXPECT_TRUE(iter_->is_encrypted());
ASSERT_TRUE(iter_->AuxInfoNeedsToBeCached());
- EXPECT_EQ(static_cast<uint32_t>(iter_->aux_info_size()), arraysize(kAuxInfo));
+ EXPECT_EQ(static_cast<uint32_t>(iter_->aux_info_size()),
+ base::size(kAuxInfo));
EXPECT_EQ(iter_->aux_info_offset(), 50);
EXPECT_EQ(iter_->GetMaxClearOffset(), 50);
EXPECT_FALSE(iter_->CacheAuxInfo(NULL, 0));
EXPECT_FALSE(iter_->CacheAuxInfo(kAuxInfo, 3));
EXPECT_TRUE(iter_->AuxInfoNeedsToBeCached());
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
EXPECT_FALSE(iter_->AuxInfoNeedsToBeCached());
EXPECT_EQ(iter_->sample_offset(), 200);
EXPECT_EQ(iter_->GetMaxClearOffset(), moof.tracks[0].runs[0].data_offset);
std::unique_ptr<DecryptConfig> config = iter_->GetDecryptConfig();
EXPECT_EQ(
- std::string(reinterpret_cast<const char*>(kKeyId), arraysize(kKeyId)),
+ std::string(reinterpret_cast<const char*>(kKeyId), base::size(kKeyId)),
config->key_id());
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), arraysize(kIv1)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv1), base::size(kIv1)),
config->iv());
EXPECT_TRUE(config->subsamples().empty());
iter_->AdvanceSample();
@@ -821,14 +822,15 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupTest) {
// Associated with the first entry in SampleGroupDescription Box.
{1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1}};
AddCencSampleGroup(&moov_.tracks[0], &moof.tracks[0], kSampleToGroupTable,
- arraysize(kSampleToGroupTable));
+ base::size(kSampleToGroupTable));
iter_.reset(new TrackRunIterator(&moov_, &media_log_));
ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
std::string cenc_sample_group_key_id(
kFragmentCencSampleGroupKeyId,
- kFragmentCencSampleGroupKeyId + arraysize(kFragmentCencSampleGroupKeyId));
+ kFragmentCencSampleGroupKeyId +
+ base::size(kFragmentCencSampleGroupKeyId));
// The first sample is encrypted and the second sample is unencrypted.
EXPECT_TRUE(iter_->is_encrypted());
EXPECT_EQ(cenc_sample_group_key_id, iter_->GetDecryptConfig()->key_id());
@@ -852,18 +854,19 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupWithTrackEncryptionBoxTest) {
// Associated with the 1st entry in track SampleGroupDescription Box.
{2, 1}};
AddCencSampleGroup(&moov_.tracks[0], &moof.tracks[0], kSampleToGroupTable,
- arraysize(kSampleToGroupTable));
+ base::size(kSampleToGroupTable));
iter_.reset(new TrackRunIterator(&moov_, &media_log_));
ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
- std::string track_encryption_key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ std::string track_encryption_key_id(kKeyId, kKeyId + base::size(kKeyId));
std::string track_cenc_sample_group_key_id(
kTrackCencSampleGroupKeyId,
- kTrackCencSampleGroupKeyId + arraysize(kTrackCencSampleGroupKeyId));
+ kTrackCencSampleGroupKeyId + base::size(kTrackCencSampleGroupKeyId));
std::string fragment_cenc_sample_group_key_id(
kFragmentCencSampleGroupKeyId,
- kFragmentCencSampleGroupKeyId + arraysize(kFragmentCencSampleGroupKeyId));
+ kFragmentCencSampleGroupKeyId +
+ base::size(kFragmentCencSampleGroupKeyId));
for (size_t i = 0; i < kSampleToGroupTable[0].sample_count; ++i) {
EXPECT_TRUE(iter_->is_encrypted());
@@ -911,18 +914,18 @@ TEST_F(TrackRunIteratorTest, SharedAuxInfoTest) {
ASSERT_TRUE(iter_->Init(moof));
EXPECT_EQ(iter_->track_id(), 1u);
EXPECT_EQ(iter_->aux_info_offset(), 50);
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
std::unique_ptr<DecryptConfig> config = iter_->GetDecryptConfig();
- ASSERT_EQ(arraysize(kIv1), config->iv().size());
+ ASSERT_EQ(base::size(kIv1), config->iv().size());
EXPECT_TRUE(!memcmp(kIv1, config->iv().data(), config->iv().size()));
iter_->AdvanceSample();
EXPECT_EQ(iter_->GetMaxClearOffset(), 50);
iter_->AdvanceRun();
EXPECT_EQ(iter_->GetMaxClearOffset(), 50);
EXPECT_EQ(iter_->aux_info_offset(), 50);
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
EXPECT_EQ(iter_->GetMaxClearOffset(), 200);
- ASSERT_EQ(arraysize(kIv1), config->iv().size());
+ ASSERT_EQ(base::size(kIv1), config->iv().size());
EXPECT_TRUE(!memcmp(kIv1, config->iv().data(), config->iv().size()));
iter_->AdvanceSample();
EXPECT_EQ(iter_->GetMaxClearOffset(), 201);
@@ -957,13 +960,13 @@ TEST_F(TrackRunIteratorTest, UnexpectedOrderingTest) {
EXPECT_EQ(iter_->track_id(), 2u);
EXPECT_EQ(iter_->aux_info_offset(), 50);
EXPECT_EQ(iter_->sample_offset(), 200);
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
EXPECT_EQ(iter_->GetMaxClearOffset(), 100);
iter_->AdvanceRun();
EXPECT_EQ(iter_->track_id(), 1u);
EXPECT_EQ(iter_->aux_info_offset(), 20000);
EXPECT_EQ(iter_->sample_offset(), 100);
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
EXPECT_EQ(iter_->GetMaxClearOffset(), 100);
iter_->AdvanceSample();
EXPECT_EQ(iter_->GetMaxClearOffset(), 101);
@@ -972,7 +975,7 @@ TEST_F(TrackRunIteratorTest, UnexpectedOrderingTest) {
EXPECT_EQ(iter_->aux_info_offset(), 201);
EXPECT_EQ(iter_->sample_offset(), 10000);
EXPECT_EQ(iter_->GetMaxClearOffset(), 201);
- EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, arraysize(kAuxInfo)));
+ EXPECT_TRUE(iter_->CacheAuxInfo(kAuxInfo, base::size(kAuxInfo)));
EXPECT_EQ(iter_->GetMaxClearOffset(), 10000);
}
@@ -1031,17 +1034,17 @@ TEST_F(TrackRunIteratorTest, DecryptConfigTestWithConstantIvNoAuxInfo) {
EXPECT_EQ(iter_->sample_offset(), 200);
std::unique_ptr<DecryptConfig> config = iter_->GetDecryptConfig();
EXPECT_EQ(
- std::string(reinterpret_cast<const char*>(kKeyId), arraysize(kKeyId)),
+ std::string(reinterpret_cast<const char*>(kKeyId), base::size(kKeyId)),
config->key_id());
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), arraysize(kIv3)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), base::size(kIv3)),
config->iv());
EXPECT_TRUE(config->subsamples().empty());
iter_->AdvanceSample();
config = iter_->GetDecryptConfig();
EXPECT_EQ(
- std::string(reinterpret_cast<const char*>(kKeyId), arraysize(kKeyId)),
+ std::string(reinterpret_cast<const char*>(kKeyId), base::size(kKeyId)),
config->key_id());
- EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), arraysize(kIv3)),
+ EXPECT_EQ(std::string(reinterpret_cast<const char*>(kIv3), base::size(kIv3)),
config->iv());
EXPECT_TRUE(config->subsamples().empty());
}
@@ -1063,7 +1066,7 @@ TEST_F(TrackRunIteratorTest, DecryptConfigTestWithSampleGroupsAndConstantIv) {
// Associated with the 1st entry in track SampleGroupDescription Box.
{1, 1}};
AddCencSampleGroup(&moov_.tracks[1], &moof.tracks[1], kSampleToGroupTable,
- arraysize(kSampleToGroupTable));
+ base::size(kSampleToGroupTable));
AddConstantIvsToCencSampleGroup(&moov_.tracks[1], &moof.tracks[1]);
iter_.reset(new TrackRunIterator(&moov_, &media_log_));
ASSERT_TRUE(iter_->Init(moof));
@@ -1071,9 +1074,9 @@ TEST_F(TrackRunIteratorTest, DecryptConfigTestWithSampleGroupsAndConstantIv) {
// The run for track 2 will be the second.
iter_->AdvanceRun();
- std::string track_encryption_iv(kIv3, kIv3 + arraysize(kIv3));
- std::string track_cenc_sample_group_iv(kIv4, kIv4 + arraysize(kIv4));
- std::string fragment_cenc_sample_group_iv(kIv5, kIv5 + arraysize(kIv5));
+ std::string track_encryption_iv(kIv3, kIv3 + base::size(kIv3));
+ std::string track_cenc_sample_group_iv(kIv4, kIv4 + base::size(kIv4));
+ std::string fragment_cenc_sample_group_iv(kIv5, kIv5 + base::size(kIv5));
for (size_t i = 0; i < kSampleToGroupTable[0].sample_count; ++i) {
EXPECT_TRUE(iter_->is_encrypted());
diff --git a/chromium/media/formats/mpeg/adts_constants.cc b/chromium/media/formats/mpeg/adts_constants.cc
index bc898908fa3..d868fccb5c9 100644
--- a/chromium/media/formats/mpeg/adts_constants.cc
+++ b/chromium/media/formats/mpeg/adts_constants.cc
@@ -4,7 +4,7 @@
#include "media/formats/mpeg/adts_constants.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
namespace media {
@@ -13,7 +13,7 @@ namespace media {
const int kADTSFrequencyTable[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350};
-const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable);
+const size_t kADTSFrequencyTableSize = base::size(kADTSFrequencyTable);
// The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.17 - Channel Configuration.
@@ -22,6 +22,6 @@ const media::ChannelLayout kADTSChannelLayoutTable[] = {
media::CHANNEL_LAYOUT_STEREO, media::CHANNEL_LAYOUT_SURROUND,
media::CHANNEL_LAYOUT_4_0, media::CHANNEL_LAYOUT_5_0_BACK,
media::CHANNEL_LAYOUT_5_1_BACK, media::CHANNEL_LAYOUT_7_1};
-const size_t kADTSChannelLayoutTableSize = arraysize(kADTSChannelLayoutTable);
+const size_t kADTSChannelLayoutTableSize = base::size(kADTSChannelLayoutTable);
} // namespace media
diff --git a/chromium/media/formats/webm/webm_cluster_parser.cc b/chromium/media/formats/webm/webm_cluster_parser.cc
index f4aa78c76cf..297906cca65 100644
--- a/chromium/media/formats/webm/webm_cluster_parser.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser.cc
@@ -9,8 +9,8 @@
#include <vector>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/checked_math.h"
+#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "media/base/decrypt_config.h"
#include "media/base/timestamp_constants.h"
@@ -243,7 +243,7 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
int opusConfig = (data[0] & kTocConfigMask) >> 3;
CHECK_GE(opusConfig, 0);
- CHECK_LT(opusConfig, static_cast<int>(arraysize(kOpusFrameDurationsMu)));
+ CHECK_LT(opusConfig, static_cast<int>(base::size(kOpusFrameDurationsMu)));
DCHECK_GT(frame_count, 0);
base::TimeDelta duration = base::TimeDelta::FromMicroseconds(
diff --git a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
index 7ce98a52461..ddacc0d5b27 100644
--- a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
@@ -15,7 +15,7 @@
#include "base/bind.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decrypt_config.h"
@@ -412,8 +412,9 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
9, // Cluster end emits all buffers and 3rd video's duration is estimated
};
- ASSERT_EQ(arraysize(kBlockInfo), arraysize(kExpectedBuffersOnPartialCluster));
- int block_count = arraysize(kBlockInfo);
+ ASSERT_EQ(base::size(kBlockInfo),
+ base::size(kExpectedBuffersOnPartialCluster));
+ int block_count = base::size(kBlockInfo);
// Iteratively create a cluster containing the first N+1 blocks and parse all
// but the last byte of the cluster (except when N==|block_count|, just parse
@@ -459,7 +460,7 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
TEST_F(WebMClusterParserTest, Reset) {
InSequence s;
- int block_count = arraysize(kDefaultBlockInfo);
+ int block_count = base::size(kDefaultBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kDefaultBlockInfo, block_count));
@@ -479,7 +480,7 @@ TEST_F(WebMClusterParserTest, Reset) {
}
TEST_F(WebMClusterParserTest, ParseClusterWithSingleCall) {
- int block_count = arraysize(kDefaultBlockInfo);
+ int block_count = base::size(kDefaultBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kDefaultBlockInfo, block_count));
@@ -489,7 +490,7 @@ TEST_F(WebMClusterParserTest, ParseClusterWithSingleCall) {
}
TEST_F(WebMClusterParserTest, ParseClusterWithMultipleCalls) {
- int block_count = arraysize(kDefaultBlockInfo);
+ int block_count = base::size(kDefaultBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kDefaultBlockInfo, block_count));
@@ -535,7 +536,7 @@ TEST_F(WebMClusterParserTest, ParseBlockGroup) {
{kAudioTrackNum, 0, 23, false, NULL, 0, true},
{kVideoTrackNum, 33, 34, false, NULL, 0, true},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
const uint8_t kClusterData[] = {
0x1F, 0x43, 0xB6, 0x75, 0x9B, // Cluster(size=27)
@@ -564,7 +565,7 @@ TEST_F(WebMClusterParserTest, ParseSimpleBlockAndBlockGroupMixture) {
{kAudioTrackNum, 46, 23, false, NULL, 0, false},
{kVideoTrackNum, 67, 33, false, NULL, 0, false},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
@@ -586,7 +587,7 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
{kAudioTrackNum, 46, 23, true, NULL, 0, false},
{kVideoTrackNum, 67, 34, true, NULL, 0, false},
};
- int input_block_count = arraysize(kInputBlockInfo);
+ int input_block_count = base::size(kInputBlockInfo);
const BlockInfo kOutputBlockInfo[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0, false},
@@ -595,7 +596,7 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
{kAudioTrackNum, 46, 23, true, NULL, 0, false},
{kVideoTrackNum, 67, 34, true, NULL, 0, false},
};
- int output_block_count = arraysize(kOutputBlockInfo);
+ int output_block_count = base::size(kOutputBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
@@ -626,7 +627,7 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
{kTextTrackNum, 55, 44, false, NULL, 0, true},
{kVideoTrackNum, 67, 34, true, NULL, 0, false},
};
- int input_block_count = arraysize(kInputBlockInfo);
+ int input_block_count = base::size(kInputBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
@@ -651,7 +652,7 @@ TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
const BlockInfo kInputBlockInfo[] = {
{ kTextTrackNum, 33, 42, true },
};
- int input_block_count = arraysize(kInputBlockInfo);
+ int input_block_count = base::size(kInputBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
@@ -687,7 +688,7 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
{kVideoTrackNum, 67, 34, true, NULL, 0, false},
{kSubtitleTextTrackNum, 67, 33, false, NULL, 0, false},
};
- int input_block_count = arraysize(kInputBlockInfo);
+ int input_block_count = base::size(kInputBlockInfo);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
@@ -771,7 +772,7 @@ TEST_F(WebMClusterParserTest, ParseInvalidTextBlockGroupWithoutDuration) {
const BlockInfo kBlockInfo[] = {
{kTextTrackNum, 33, -42, false, NULL, 0, false},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_LT(result, 0);
@@ -801,7 +802,7 @@ TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
false},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
// Send slightly less than the full cluster so all but the last block is
@@ -841,7 +842,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
{kVideoTrackNum, 100, kExpectedVideoEstimationInMs, true, NULL, 0, false},
};
- int block_count1 = arraysize(kBlockInfo1);
+ int block_count1 = base::size(kBlockInfo1);
std::unique_ptr<Cluster> cluster1(
CreateCluster(0, kBlockInfo1, block_count1));
@@ -879,7 +880,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
{kVideoTrackNum, 201, kExpectedVideoEstimationInMs, true, NULL, 0, false},
};
- int block_count2 = arraysize(kBlockInfo2);
+ int block_count2 = base::size(kBlockInfo2);
std::unique_ptr<Cluster> cluster2(
CreateCluster(0, kBlockInfo2, block_count2));
EXPECT_MEDIA_LOG(
@@ -913,7 +914,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
false},
};
- int block_count1 = arraysize(kBlockInfo1);
+ int block_count1 = base::size(kBlockInfo1);
std::unique_ptr<Cluster> cluster1(
CreateCluster(0, kBlockInfo1, block_count1));
@@ -951,7 +952,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
false},
};
- int block_count2 = arraysize(kBlockInfo2);
+ int block_count2 = base::size(kBlockInfo2);
std::unique_ptr<Cluster> cluster2(
CreateCluster(0, kBlockInfo2, block_count2));
EXPECT_MEDIA_LOG(
@@ -989,7 +990,7 @@ TEST_F(WebMClusterParserTest,
false},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
// Send slightly less than the full cluster so all but the last block is
@@ -1024,7 +1025,7 @@ TEST_F(WebMClusterParserTest,
},
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(
WebMClusterParser::kDefaultAudioBufferDurationInMs));
@@ -1044,7 +1045,7 @@ TEST_F(WebMClusterParserTest,
{ kVideoTrackNum, 0, kTestVideoFrameDefaultDurationInMs, true },
};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
@@ -1067,7 +1068,7 @@ TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
packet_ptr->data(),
packet_ptr->size()}};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int duration_ms = packet_ptr->duration_ms(); // Casts from double.
if (duration_ms > 120) {
@@ -1113,7 +1114,7 @@ TEST_F(WebMClusterParserTest, PreferOpusDurationsOverBlockDurations) {
packet_ptr->data(),
packet_ptr->size()}};
- int block_count = arraysize(block_infos);
+ int block_count = base::size(block_infos);
std::unique_ptr<Cluster> cluster(
CreateCluster(0, block_infos, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
@@ -1147,14 +1148,13 @@ TEST_F(WebMClusterParserTest, DontReadEncodedDurationWhenEncrypted) {
std::string(), kCodecOpus));
// Single Block with BlockDuration and encrypted data.
- const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
- 0,
+ const BlockInfo kBlockInfo[] = {{kAudioTrackNum, 0,
kTestAudioFrameDefaultDurationInMs,
false, // Not a SimpleBlock
kEncryptedFrame, // Encrypted frame data
- arraysize(kEncryptedFrame)}};
+ base::size(kEncryptedFrame)}};
- int block_count = arraysize(kBlockInfo);
+ int block_count = base::size(kBlockInfo);
std::unique_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
diff --git a/chromium/media/formats/webm/webm_content_encodings_client.cc b/chromium/media/formats/webm/webm_content_encodings_client.cc
index 35749ebb445..6e58adf0343 100644
--- a/chromium/media/formats/webm/webm_content_encodings_client.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client.cc
@@ -52,8 +52,7 @@ WebMParserClient* WebMContentEncodingsClient::OnListStart(int id) {
return this;
}
- // This should not happen if WebMListParser is working properly.
- DCHECK(false);
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported element " << id;
return NULL;
}
@@ -130,8 +129,7 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
return true;
}
- // This should not happen if WebMListParser is working properly.
- DCHECK(false);
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported element " << id;
return false;
}
@@ -240,8 +238,7 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64_t val) {
return true;
}
- // This should not happen if WebMListParser is working properly.
- DCHECK(false);
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported element " << id;
return false;
}
@@ -252,20 +249,24 @@ bool WebMContentEncodingsClient::OnBinary(int id,
int size) {
DCHECK(cur_content_encoding_.get());
DCHECK(data);
- DCHECK_GT(size, 0);
- if (id == kWebMIdContentEncKeyID) {
- if (!cur_content_encoding_->encryption_key_id().empty()) {
- MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncKeyID";
- return false;
- }
- cur_content_encoding_->SetEncryptionKeyId(data, size);
- return true;
+ if (id != kWebMIdContentEncKeyID) {
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported element " << id;
+ return false;
}
- // This should not happen if WebMListParser is working properly.
- DCHECK(false);
- return false;
+ if (!cur_content_encoding_->encryption_key_id().empty()) {
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncKeyID";
+ return false;
+ }
+
+ if (size <= 0) {
+ MEDIA_LOG(ERROR, media_log_) << "Invalid ContentEncKeyID size: " << size;
+ return false;
+ }
+
+ cur_content_encoding_->SetEncryptionKeyId(data, size);
+ return true;
}
} // namespace media
diff --git a/chromium/media/formats/webm/webm_parser.cc b/chromium/media/formats/webm/webm_parser.cc
index fe6b72f3cb2..d17781cdc48 100644
--- a/chromium/media/formats/webm/webm_parser.cc
+++ b/chromium/media/formats/webm/webm_parser.cc
@@ -7,9 +7,9 @@
// This file contains code to parse WebM file elements. It was created
// from information in the Matroska spec.
// http://www.matroska.org/technical/specs/index.html
-// This file contains code for encrypted WebM. Current WebM
-// encrypted request for comments specification is here
-// http://wiki.webmproject.org/encryption/webm-encryption-rfc
+//
+// WebM Container Guidelines is at https://www.webmproject.org/docs/container/
+// WebM Encryption spec is at: https://www.webmproject.org/docs/webm-encryption/
#include <stddef.h>
@@ -17,8 +17,8 @@
#include <limits>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
@@ -50,6 +50,11 @@ struct ListElementInfo {
// appear in the list, a parsing error is signalled. Some elements are
// marked as SKIP because they are valid, but we don't care about them
// right now.
+//
+// TODO(xhwang): There are many Matroska elements listed here which are not
+// supported by WebM. Since this is a WebM parser, maybe we should not list them
+// here so that the parsing clients doesn't need to handle them.
+
static const ElementIdInfo kEBMLHeaderIds[] = {
{UINT, kWebMIdEBMLVersion},
{UINT, kWebMIdEBMLReadVersion},
@@ -388,7 +393,7 @@ static const ElementIdInfo kSimpleTagIds[] = {
};
#define LIST_ELEMENT_INFO(id, level, id_info) \
- { (id), (level), (id_info), arraysize(id_info) }
+ { (id), (level), (id_info), base::size(id_info) }
static const ListElementInfo kListElementInfo[] = {
LIST_ELEMENT_INFO(kWebMIdCluster, 1, kClusterIds),
@@ -562,7 +567,7 @@ static ElementType FindIdType(int id,
// Finds ListElementInfo for a specific ID.
static const ListElementInfo* FindListInfo(int id) {
- for (size_t i = 0; i < arraysize(kListElementInfo); ++i) {
+ for (size_t i = 0; i < base::size(kListElementInfo); ++i) {
if (id == kListElementInfo[i].id_)
return &kListElementInfo[i];
}
@@ -987,7 +992,7 @@ bool WebMListParser::OnListEnd() {
bool WebMListParser::IsSiblingOrAncestor(int id_a, int id_b) const {
if (id_a == kWebMIdCluster) {
// kWebMIdCluster siblings.
- for (size_t i = 0; i < arraysize(kSegmentIds); i++) {
+ for (size_t i = 0; i < base::size(kSegmentIds); i++) {
if (kSegmentIds[i].id_ == id_b)
return true;
}
diff --git a/chromium/media/formats/webm/webm_parser_unittest.cc b/chromium/media/formats/webm/webm_parser_unittest.cc
index 11f3380417b..ae1f63f0843 100644
--- a/chromium/media/formats/webm/webm_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_parser_unittest.cc
@@ -9,7 +9,7 @@
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_constants.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -346,7 +346,7 @@ TEST_F(WebMParserTest, ReservedIds) {
const uint8_t* kBuffers[] = {k1ByteReservedId, k2ByteReservedId,
k3ByteReservedId, k4ByteReservedId};
- for (size_t i = 0; i < arraysize(kBuffers); i++) {
+ for (size_t i = 0; i < base::size(kBuffers); i++) {
int id;
int64_t element_size;
int buffer_size = 2 + i;
@@ -374,7 +374,7 @@ TEST_F(WebMParserTest, ReservedSizes) {
k5ByteReservedSize, k6ByteReservedSize,
k7ByteReservedSize, k8ByteReservedSize};
- for (size_t i = 0; i < arraysize(kBuffers); i++) {
+ for (size_t i = 0; i < base::size(kBuffers); i++) {
int id;
int64_t element_size;
int buffer_size = 2 + i;
diff --git a/chromium/media/gpu/BUILD.gn b/chromium/media/gpu/BUILD.gn
index e939892698c..8cde0b7c5f1 100644
--- a/chromium/media/gpu/BUILD.gn
+++ b/chromium/media/gpu/BUILD.gn
@@ -8,6 +8,7 @@ import("//build/config/ui.gni")
import("//media/gpu/args.gni")
import("//media/media_options.gni")
import("//testing/test.gni")
+import("//tools/generate_stubs/rules.gni")
buildflag_header("buildflags") {
header = "buildflags.h"
@@ -24,38 +25,13 @@ if (is_mac) {
}
if (is_chromeos && use_v4lplugin) {
- action("libv4l2_generate_stubs") {
+ generate_stubs("libv4l2_stubs") {
extra_header = "v4l2/v4l2_stub_header.fragment"
-
- script = "../../tools/generate_stubs/generate_stubs.py"
- sources = [
- "v4l2/v4l2.sig",
- ]
- inputs = [
- extra_header,
- ]
- stubs_filename_root = "v4l2_stubs"
-
- outputs = [
- "$target_gen_dir/v4l2/$stubs_filename_root.cc",
- "$target_gen_dir/v4l2/$stubs_filename_root.h",
- ]
- args = [
- "-i",
- rebase_path("$target_gen_dir/v4l2", root_build_dir),
- "-o",
- rebase_path("$target_gen_dir/v4l2", root_build_dir),
- "-t",
- "posix_stubs",
- "-e",
- rebase_path(extra_header, root_build_dir),
- "-s",
- stubs_filename_root,
- "-p",
- "media/gpu/v4l2",
+ sigs = [ "v4l2/v4l2.sig" ]
+ output_name = "v4l2/v4l2_stubs"
+ deps = [
+ "//base",
]
-
- args += rebase_path(sources, root_build_dir)
}
}
@@ -107,7 +83,14 @@ component("gpu") {
"gpu_video_decode_accelerator_factory.h",
"gpu_video_encode_accelerator_factory.cc",
"gpu_video_encode_accelerator_factory.h",
+ "image_processor.cc",
"image_processor.h",
+ "image_processor_factory.cc",
+ "image_processor_factory.h",
+ "libyuv_image_processor.cc",
+ "libyuv_image_processor.h",
+ "platform_video_frame.cc",
+ "platform_video_frame.h",
]
public_deps = [
@@ -200,6 +183,8 @@ component("gpu") {
]
libs += [ "android" ]
deps += [
+ "//gpu/ipc/common:android_image_reader_utils",
+
# TODO(crbug.com/789435): This can be removed once CdmManager is removed.
"//gpu/ipc/common:ipc_common_sources",
"//media/mojo:buildflags",
@@ -216,17 +201,20 @@ component("gpu") {
}
if (use_v4lplugin) {
- sources += get_target_outputs(":libv4l2_generate_stubs")
- deps += [ ":libv4l2_generate_stubs" ]
+ deps += [ ":libv4l2_stubs" ]
}
if (use_v4l2_codec) {
- deps += [ "//ui/ozone" ]
sources += [
"v4l2/generic_v4l2_device.cc",
"v4l2/generic_v4l2_device.h",
+ "v4l2/v4l2_decode_surface.cc",
+ "v4l2/v4l2_decode_surface.h",
+ "v4l2/v4l2_decode_surface_handler.h",
"v4l2/v4l2_device.cc",
"v4l2/v4l2_device.h",
+ "v4l2/v4l2_h264_accelerator.cc",
+ "v4l2/v4l2_h264_accelerator.h",
"v4l2/v4l2_image_processor.cc",
"v4l2/v4l2_image_processor.h",
"v4l2/v4l2_jpeg_decode_accelerator.cc",
@@ -239,7 +227,12 @@ component("gpu") {
"v4l2/v4l2_video_decode_accelerator.h",
"v4l2/v4l2_video_encode_accelerator.cc",
"v4l2/v4l2_video_encode_accelerator.h",
+ "v4l2/v4l2_vp8_accelerator.cc",
+ "v4l2/v4l2_vp8_accelerator.h",
+ "v4l2/v4l2_vp9_accelerator.cc",
+ "v4l2/v4l2_vp9_accelerator.h",
]
+
libs = [
"EGL",
"GLESv2",
@@ -258,7 +251,6 @@ component("gpu") {
if (is_win) {
sources += [
- "windows/d3d11_create_device_cb.h",
"windows/d3d11_h264_accelerator.cc",
"windows/d3d11_h264_accelerator.h",
"windows/d3d11_picture_buffer.cc",
@@ -287,7 +279,7 @@ component("gpu") {
"//build/config/compiler:no_size_t_to_int_warning",
"//third_party/khronos:khronos_headers",
]
- public_deps += [ "//media/base/win" ]
+ public_deps += [ "//media/base/win:media_foundation_util" ]
deps += [
"//third_party/angle:includes",
"//ui/display",
@@ -317,6 +309,10 @@ component("gpu") {
]
}
}
+
+ if (use_ozone) {
+ deps += [ "//ui/ozone" ]
+ }
}
source_set("common") {
@@ -365,10 +361,6 @@ source_set("common") {
# TODO(watk): Run this on bots. http://crbug.com/461437
if (is_win || is_android || use_v4l2_codec || use_vaapi) {
test("video_decode_accelerator_unittest") {
- sources = [
- "test/video_accelerator_unittest_helpers.h",
- ]
-
data = [
"//media/test/data/",
]
@@ -394,9 +386,12 @@ if (is_win || is_android || use_v4l2_codec || use_vaapi) {
configs += [ "//third_party/khronos:khronos_headers" ]
if (is_win || is_chromeos || use_v4l2_codec) {
- sources += [ "video_decode_accelerator_unittest.cc" ]
+ sources = [
+ "video_decode_accelerator_unittest.cc",
+ ]
deps += [
- "test:helpers",
+ "test:decode_helpers",
+ "test:frame_validator",
"//mojo/core/embedder",
"//ui/display",
"//ui/display/types",
@@ -481,7 +476,7 @@ source_set("android_video_decode_accelerator_unittests") {
if (use_v4l2_codec || use_vaapi || is_mac || is_win) {
test("video_encode_accelerator_unittest") {
deps = [
- "test:helpers",
+ "test:encode_helpers",
"//base",
"//base/test:test_support",
"//media:test_support",
@@ -511,6 +506,7 @@ if (use_v4l2_codec || use_vaapi || is_mac || is_win) {
if (use_v4l2_codec || use_vaapi) {
test("jpeg_encode_accelerator_unittest") {
deps = [
+ "test:helpers",
"//base",
"//base/test:test_support",
"//media:test_support",
@@ -529,7 +525,6 @@ if (use_v4l2_codec || use_vaapi) {
configs += [ "//third_party/libyuv:libyuv_config" ]
sources = [
"jpeg_encode_accelerator_unittest.cc",
- "test/video_accelerator_unittest_helpers.h",
]
if (use_x11) {
deps += [ "//ui/gfx/x" ]
@@ -543,6 +538,7 @@ if (use_v4l2_codec || use_vaapi) {
if (is_chromeos || is_linux) {
test("jpeg_decode_accelerator_unittest") {
deps = [
+ "test:helpers",
"//base",
"//media:test_support",
"//media/gpu",
@@ -560,7 +556,6 @@ if (is_chromeos || is_linux) {
configs += [ "//third_party/libyuv:libyuv_config" ]
sources = [
"jpeg_decode_accelerator_unittest.cc",
- "test/video_accelerator_unittest_helpers.h",
]
data = [
"//media/test/data/peach_pi-1280x720.jpg",
@@ -626,8 +621,6 @@ source_set("unit_tests") {
sources += [
"windows/d3d11_cdm_proxy_unittest.cc",
"windows/d3d11_decryptor_unittest.cc",
- "windows/d3d11_mocks.cc",
- "windows/d3d11_mocks.h",
"windows/d3d11_video_decoder_unittest.cc",
]
libs = [ "dxguid.lib" ]
@@ -645,6 +638,7 @@ if (is_chromeos) {
]
deps = [
":buildflags",
+ "test:frame_validator",
"test:video_player",
"//base/test:test_support",
"//media:test_support",
@@ -656,3 +650,18 @@ if (is_chromeos) {
}
}
}
+
+test("image_processor_test") {
+ sources = [
+ "image_processor_test.cc",
+ ]
+ deps = [
+ ":buildflags",
+ ":gpu",
+ "test:image_processor",
+ "//base/test:test_support",
+ "//media:test_support",
+ "//mojo/core/embedder",
+ "//testing/gtest",
+ ]
+}
diff --git a/chromium/media/gpu/OWNERS b/chromium/media/gpu/OWNERS
index a81c64a5528..1ae3f8c0918 100644
--- a/chromium/media/gpu/OWNERS
+++ b/chromium/media/gpu/OWNERS
@@ -1,11 +1,11 @@
dalecurtis@chromium.org
dcastagna@chromium.org
hiroh@chromium.org
+acourbot@chromium.org
jcliang@chromium.org
kcwu@chromium.org
posciak@chromium.org
sandersd@chromium.org
-wuchengli@chromium.org
# For Android media gpu files.
per-file *android*=liberato@chromium.org
diff --git a/chromium/media/gpu/accelerated_video_decoder.h b/chromium/media/gpu/accelerated_video_decoder.h
index ee9dc93ee26..fb7acd82a36 100644
--- a/chromium/media/gpu/accelerated_video_decoder.h
+++ b/chromium/media/gpu/accelerated_video_decoder.h
@@ -66,11 +66,13 @@ class MEDIA_GPU_EXPORT AcceleratedVideoDecoder {
// we need a new set of them, or when an error occurs.
virtual DecodeResult Decode() WARN_UNUSED_RESULT = 0;
- // Return dimensions/required number of output surfaces that client should
- // be ready to provide for the decoder to function properly.
- // To be used after Decode() returns kAllocateNewSurfaces.
+ // Return dimensions/required number of pictures that client should be ready
+ // to provide for the decoder to function properly (of which up to
+ // GetNumReferenceFrames() might be needed for internal decoding). To be used
+ // after Decode() returns kAllocateNewSurfaces.
virtual gfx::Size GetPicSize() const = 0;
virtual size_t GetRequiredNumOfPictures() const = 0;
+ virtual size_t GetNumReferenceFrames() const = 0;
// About 3 secs for 30 fps video. When the new sized keyframe is missed, the
// decoder cannot decode the frame. The number of frames are skipped until
diff --git a/chromium/media/gpu/android/android_video_surface_chooser.h b/chromium/media/gpu/android/android_video_surface_chooser.h
index f02c4c7dac6..c5254c6e8a4 100644
--- a/chromium/media/gpu/android/android_video_surface_chooser.h
+++ b/chromium/media/gpu/android/android_video_surface_chooser.h
@@ -57,6 +57,9 @@ class MEDIA_GPU_EXPORT AndroidVideoSurfaceChooser {
// SurfaceControl where the TextureOwner can be promoted to an overlay
// dynamically by the compositor.
bool always_use_texture_owner = false;
+
+ // Is the video persistent (PIP)?
+ bool is_persistent_video = false;
};
// Notify the client that |overlay| is ready for use. The client may get
diff --git a/chromium/media/gpu/android/android_video_surface_chooser_impl.cc b/chromium/media/gpu/android/android_video_surface_chooser_impl.cc
index b0843a7e616..e76784096f6 100644
--- a/chromium/media/gpu/android/android_video_surface_chooser_impl.cc
+++ b/chromium/media/gpu/android/android_video_surface_chooser_impl.cc
@@ -113,6 +113,11 @@ void AndroidVideoSurfaceChooserImpl::Choose() {
if (!current_state_.is_compositor_promotable)
new_overlay_state = kUsingTextureOwner;
+ // If we're PIP'd, then don't use an overlay unless it is required. It isn't
+ // positioned exactly right in some cases (crbug.com/917984).
+ if (current_state_.is_persistent_video)
+ new_overlay_state = kUsingTextureOwner;
+
// If we're expecting a relayout, then don't transition to overlay if we're
// not already in one. We don't want to transition out, though. This lets us
// delay entering on a fullscreen transition until blink relayout is complete.
diff --git a/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc b/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc
index c5ac5a76489..1abd2538b78 100644
--- a/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc
+++ b/chromium/media/gpu/android/android_video_surface_chooser_impl_unittest.cc
@@ -67,7 +67,12 @@ enum class IsSecure { No, Yes };
enum class IsCCPromotable { No, Yes };
enum class IsExpectingRelayout { No, Yes };
enum class PromoteAggressively { No, Yes };
-enum class IsVideoRotated { No, Yes };
+// Since gtest only supports ten args, combine some uncommon ones.
+enum class MiscFlags { None, Rotated, Persistent };
+
+// Allow any misc flag values.
+#define AnyMisc \
+ Values(MiscFlags::None, MiscFlags::Rotated, MiscFlags::Persistent)
using TestParams = std::tuple<ShouldUseOverlay,
ShouldBePowerEfficient,
@@ -78,7 +83,7 @@ using TestParams = std::tuple<ShouldUseOverlay,
IsCCPromotable,
IsExpectingRelayout,
PromoteAggressively,
- IsVideoRotated>;
+ MiscFlags>;
// Useful macro for instantiating tests.
#define Either(x) Values(x::No, x::Yes)
@@ -88,6 +93,8 @@ using TestParams = std::tuple<ShouldUseOverlay,
// c++14 can remove |n|, and std::get() by type.
#define IsYes(type, n) (::testing::get<n>(GetParam()) == type::Yes)
#define IsIgnored(type, n) (::testing::get<n>(GetParam()) == type::Ignored)
+// |v| is the value to check for equality.
+#define IsEqual(type, n, v) (::testing::get<n>(GetParam()) == type::v)
} // namespace
@@ -402,7 +409,8 @@ TEST_P(AndroidVideoSurfaceChooserImplTest, OverlayIsUsedOrNotBasedOnState) {
chooser_state_.is_expecting_relayout = IsYes(IsExpectingRelayout, 7);
chooser_state_.promote_aggressively = IsYes(PromoteAggressively, 8);
chooser_state_.video_rotation =
- IsYes(IsVideoRotated, 9) ? VIDEO_ROTATION_90 : VIDEO_ROTATION_0;
+ IsEqual(MiscFlags, 9, Rotated) ? VIDEO_ROTATION_90 : VIDEO_ROTATION_0;
+ chooser_state_.is_persistent_video = IsEqual(MiscFlags, 9, Persistent);
MockAndroidOverlay* overlay = overlay_.get();
@@ -439,7 +447,7 @@ INSTANTIATE_TEST_CASE_P(NoFullscreenUsesTextureOwner,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Values(PromoteAggressively::No),
- Either(IsVideoRotated)));
+ AnyMisc));
INSTANTIATE_TEST_CASE_P(FullscreenUsesOverlay,
AndroidVideoSurfaceChooserImplTest,
@@ -452,7 +460,7 @@ INSTANTIATE_TEST_CASE_P(FullscreenUsesOverlay,
Values(IsCCPromotable::Yes),
Values(IsExpectingRelayout::No),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None)));
INSTANTIATE_TEST_CASE_P(RequiredUsesOverlay,
AndroidVideoSurfaceChooserImplTest,
@@ -465,7 +473,8 @@ INSTANTIATE_TEST_CASE_P(RequiredUsesOverlay,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None,
+ MiscFlags::Persistent)));
// Secure textures should use an overlay if the compositor will promote them.
// We don't care about relayout, since it's transient; either behavior is okay
@@ -481,7 +490,7 @@ INSTANTIATE_TEST_CASE_P(SecureUsesOverlayIfPromotable,
Values(IsCCPromotable::Yes),
Values(IsExpectingRelayout::No),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None)));
// For all dynamic cases, we shouldn't use an overlay if the compositor won't
// promote it, unless it's marked as required. This includes secure surfaces,
@@ -499,7 +508,7 @@ INSTANTIATE_TEST_CASE_P(NotCCPromotableNotRequiredUsesTextureOwner,
Values(IsCCPromotable::No),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Either(IsVideoRotated)));
+ AnyMisc));
// If we're expecting a relayout, then we should never use an overlay unless
// it's required.
@@ -514,7 +523,7 @@ INSTANTIATE_TEST_CASE_P(InsecureExpectingRelayoutUsesTextureOwner,
Either(IsCCPromotable),
Values(IsExpectingRelayout::Yes),
Either(PromoteAggressively),
- Either(IsVideoRotated)));
+ AnyMisc));
// "is_fullscreen" should be enough to trigger an overlay pre-M.
INSTANTIATE_TEST_CASE_P(NotDynamicInFullscreenUsesOverlay,
@@ -528,7 +537,8 @@ INSTANTIATE_TEST_CASE_P(NotDynamicInFullscreenUsesOverlay,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None,
+ MiscFlags::Persistent)));
// "is_secure" should be enough to trigger an overlay pre-M.
INSTANTIATE_TEST_CASE_P(NotDynamicSecureUsesOverlay,
@@ -542,7 +552,8 @@ INSTANTIATE_TEST_CASE_P(NotDynamicSecureUsesOverlay,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None,
+ MiscFlags::Persistent)));
// "is_required" should be enough to trigger an overlay pre-M.
INSTANTIATE_TEST_CASE_P(NotDynamicRequiredUsesOverlay,
@@ -556,7 +567,8 @@ INSTANTIATE_TEST_CASE_P(NotDynamicRequiredUsesOverlay,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None,
+ MiscFlags::Persistent)));
// If we're promoting aggressively, then we should request power efficient.
INSTANTIATE_TEST_CASE_P(AggressiveOverlayIsPowerEfficient,
@@ -570,7 +582,7 @@ INSTANTIATE_TEST_CASE_P(AggressiveOverlayIsPowerEfficient,
Values(IsCCPromotable::Yes),
Values(IsExpectingRelayout::No),
Values(PromoteAggressively::Yes),
- Values(IsVideoRotated::No)));
+ Values(MiscFlags::None)));
// Rotated video is unsupported for overlays in all cases.
INSTANTIATE_TEST_CASE_P(IsVideoRotatedUsesTextureOwner,
@@ -584,6 +596,20 @@ INSTANTIATE_TEST_CASE_P(IsVideoRotatedUsesTextureOwner,
Either(IsCCPromotable),
Either(IsExpectingRelayout),
Either(PromoteAggressively),
- Values(IsVideoRotated::Yes)));
+ Values(MiscFlags::Rotated)));
+
+// Persistent, non-required video should not use an overlay.
+INSTANTIATE_TEST_CASE_P(FullscreenPersistentVideoUsesSurfaceTexture,
+ AndroidVideoSurfaceChooserImplTest,
+ Combine(Values(ShouldUseOverlay::No),
+ Values(ShouldBePowerEfficient::Ignored),
+ Values(AllowDynamic::Yes),
+ Values(IsRequired::No),
+ Values(IsFullscreen::Yes),
+ Either(IsSecure),
+ Values(IsCCPromotable::Yes),
+ Values(IsExpectingRelayout::No),
+ Either(PromoteAggressively),
+ Values(MiscFlags::Persistent)));
} // namespace media
diff --git a/chromium/media/gpu/android/codec_image.cc b/chromium/media/gpu/android/codec_image.cc
index 5842a6560cc..6554f973a05 100644
--- a/chromium/media/gpu/android/codec_image.cc
+++ b/chromium/media/gpu/android/codec_image.cc
@@ -8,6 +8,7 @@
#include <memory>
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gl/gl_context.h"
@@ -237,7 +238,7 @@ void CodecImage::ReleaseCodecBuffer() {
phase_ = Phase::kInvalidated;
}
-std::unique_ptr<gl::GLImage::ScopedHardwareBuffer>
+std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
CodecImage::GetAHardwareBuffer() {
DCHECK(texture_owner_);
diff --git a/chromium/media/gpu/android/codec_image.h b/chromium/media/gpu/android/codec_image.h
index 5835c4fab5a..b99deb73544 100644
--- a/chromium/media/gpu/android/codec_image.h
+++ b/chromium/media/gpu/android/codec_image.h
@@ -17,6 +17,12 @@
#include "media/gpu/android/surface_texture_gl_owner.h"
#include "media/gpu/media_gpu_export.h"
+namespace base {
+namespace android {
+class ScopedHardwareBufferFenceSync;
+} // namespace android
+} // namespace base
+
namespace media {
// A GLImage that renders MediaCodec buffers to a TextureOwner or overlay
@@ -54,7 +60,8 @@ class MEDIA_GPU_EXPORT CodecImage : public gpu::gles2::GLStreamTextureImage {
void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) override;
- std::unique_ptr<ScopedHardwareBuffer> GetAHardwareBuffer() override;
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override;
// gpu::gles2::GLStreamTextureMatrix implementation
void GetTextureMatrix(float xform[16]) override;
void NotifyPromotionHint(bool promotion_hint,
diff --git a/chromium/media/gpu/android/image_reader_gl_owner.cc b/chromium/media/gpu/android/image_reader_gl_owner.cc
index 94f02eacb0b..1df7765ba72 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner.cc
+++ b/chromium/media/gpu/android/image_reader_gl_owner.cc
@@ -9,6 +9,7 @@
#include <stdint.h>
#include "base/android/jni_android.h"
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_functions.h"
@@ -48,14 +49,14 @@ struct FrameAvailableEvent_ImageReader
};
class ImageReaderGLOwner::ScopedHardwareBufferImpl
- : public gl::GLImage::ScopedHardwareBuffer {
+ : public base::android::ScopedHardwareBufferFenceSync {
public:
ScopedHardwareBufferImpl(scoped_refptr<ImageReaderGLOwner> texture_owner,
AImage* image,
base::android::ScopedHardwareBufferHandle handle,
base::ScopedFD fence_fd)
- : gl::GLImage::ScopedHardwareBuffer(std::move(handle),
- std::move(fence_fd)),
+ : base::android::ScopedHardwareBufferFenceSync(std::move(handle),
+ std::move(fence_fd)),
texture_owner_(std::move(texture_owner)),
image_(image) {}
~ScopedHardwareBufferImpl() override {
@@ -276,7 +277,7 @@ bool ImageReaderGLOwner::MaybeDeleteCurrentImage() {
return gpu::DeleteAImageAsync(current_image_, &loader_);
}
-std::unique_ptr<gl::GLImage::ScopedHardwareBuffer>
+std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
ImageReaderGLOwner::GetAHardwareBuffer() {
if (!current_image_)
return nullptr;
diff --git a/chromium/media/gpu/android/image_reader_gl_owner.h b/chromium/media/gpu/android/image_reader_gl_owner.h
index 897624e197e..ce53effc594 100644
--- a/chromium/media/gpu/android/image_reader_gl_owner.h
+++ b/chromium/media/gpu/android/image_reader_gl_owner.h
@@ -13,6 +13,12 @@
#include "ui/gl/gl_fence_egl.h"
#include "ui/gl/gl_image_ahardwarebuffer.h"
+namespace base {
+namespace android {
+class ScopedHardwareBufferFenceSync;
+} // namespace android
+} // namespace base
+
namespace media {
struct FrameAvailableEvent_ImageReader;
@@ -35,8 +41,8 @@ class MEDIA_GPU_EXPORT ImageReaderGLOwner : public TextureOwner {
void IgnorePendingRelease() override;
bool IsExpectingFrameAvailable() override;
void WaitForFrameAvailable() override;
- std::unique_ptr<gl::GLImage::ScopedHardwareBuffer> GetAHardwareBuffer()
- override;
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override;
protected:
void OnTextureDestroyed(gpu::gles2::AbstractTexture*) override;
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.cc b/chromium/media/gpu/android/media_codec_video_decoder.cc
index 9e7f1ed8fdf..91f1b6cb7e8 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder.cc
@@ -186,13 +186,15 @@ void MediaCodecVideoDecoder::Destroy() {
StartDrainingCodec(DrainType::kForDestroy);
}
-void MediaCodecVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void MediaCodecVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
+ DCHECK(output_cb);
+ DCHECK(waiting_cb);
+
const bool first_init = !decoder_config_.IsValidConfig();
DVLOG(1) << (first_init ? "Initializing" : "Reinitializing")
<< " MCVD with config: " << config.AsHumanReadableString()
@@ -215,6 +217,7 @@ void MediaCodecVideoDecoder::Initialize(
surface_chooser_helper_.SetVideoRotation(decoder_config_.video_rotation());
output_cb_ = output_cb;
+ waiting_cb_ = waiting_cb;
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
if (config.codec() == kCodecH264)
@@ -375,6 +378,8 @@ void MediaCodecVideoDecoder::OnOverlayInfoChanged(
bool overlay_changed = !overlay_info_.RefersToSameOverlayAs(overlay_info);
overlay_info_ = overlay_info;
surface_chooser_helper_.SetIsFullscreen(overlay_info_.is_fullscreen);
+ surface_chooser_helper_.SetIsPersistentVideo(
+ overlay_info_.is_persistent_video);
surface_chooser_helper_.UpdateChooserState(
overlay_changed ? base::make_optional(CreateOverlayFactoryCb())
: base::nullopt);
@@ -661,6 +666,7 @@ bool MediaCodecVideoDecoder::QueueInput() {
case CodecWrapper::QueueStatus::kNoKey:
// Retry when a key is added.
waiting_for_key_ = true;
+ waiting_cb_.Run(WaitingReason::kNoDecryptionKey);
return false;
case CodecWrapper::QueueStatus::kError:
EnterTerminalState(State::kError);
diff --git a/chromium/media/gpu/android/media_codec_video_decoder.h b/chromium/media/gpu/android/media_codec_video_decoder.h
index 38cf6227919..39dd102a3da 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder.h
+++ b/chromium/media/gpu/android/media_codec_video_decoder.h
@@ -66,13 +66,12 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
// VideoDecoder implementation:
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::Closure& closure) override;
@@ -226,9 +225,10 @@ class MEDIA_GPU_EXPORT MediaCodecVideoDecoder : public VideoDecoder,
// The EOS decode cb for an EOS currently being processed by the codec. Called
// when the EOS is output.
- VideoDecoder::DecodeCB eos_decode_cb_;
+ DecodeCB eos_decode_cb_;
- VideoDecoder::OutputCB output_cb_;
+ OutputCB output_cb_;
+ WaitingCB waiting_cb_;
VideoDecoderConfig decoder_config_;
// Codec specific data (SPS and PPS for H264). Some MediaCodecs initialize
diff --git a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
index 013ecf51a61..1f09e9c6d7b 100644
--- a/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
+++ b/chromium/media/gpu/android/media_codec_video_decoder_unittest.cc
@@ -183,7 +183,7 @@ class MediaCodecVideoDecoderTest : public testing::TestWithParam<VideoCodec> {
auto init_cb = [](bool* result_out, bool result) { *result_out = result; };
mcvd_->Initialize(config, false, cdm_.get(), base::Bind(init_cb, &result),
base::BindRepeating(&OutputCb, &most_recent_frame_),
- base::NullCallback());
+ base::DoNothing());
base::RunLoop().RunUntilIdle();
// If there is a CDM available, then we expect that MCVD will be waiting
diff --git a/chromium/media/gpu/android/mock_texture_owner.h b/chromium/media/gpu/android/mock_texture_owner.h
index 71d7e61b351..25815abeffc 100644
--- a/chromium/media/gpu/android/mock_texture_owner.h
+++ b/chromium/media/gpu/android/mock_texture_owner.h
@@ -5,6 +5,9 @@
#ifndef MEDIA_GPU_ANDROID_MOCK_TEXTURE_OWNER_H_
#define MEDIA_GPU_ANDROID_MOCK_TEXTURE_OWNER_H_
+#include <memory>
+
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "media/gpu/android/texture_owner.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -34,8 +37,8 @@ class MockTextureOwner : public TextureOwner {
MOCK_METHOD0(WaitForFrameAvailable, void());
MOCK_METHOD1(OnTextureDestroyed, void(gpu::gles2::AbstractTexture*));
- std::unique_ptr<gl::GLImage::ScopedHardwareBuffer> GetAHardwareBuffer()
- override {
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override {
get_a_hardware_buffer_count++;
return nullptr;
}
diff --git a/chromium/media/gpu/android/surface_chooser_helper.cc b/chromium/media/gpu/android/surface_chooser_helper.cc
index 9093491a579..2c858d090fb 100644
--- a/chromium/media/gpu/android/surface_chooser_helper.cc
+++ b/chromium/media/gpu/android/surface_chooser_helper.cc
@@ -95,6 +95,10 @@ void SurfaceChooserHelper::SetVideoRotation(VideoRotation video_rotation) {
surface_chooser_state_.video_rotation = video_rotation;
}
+void SurfaceChooserHelper::SetIsPersistentVideo(bool is_persistent_video) {
+ surface_chooser_state_.is_persistent_video = is_persistent_video;
+}
+
void SurfaceChooserHelper::UpdateChooserState(
base::Optional<AndroidOverlayFactoryCB> new_factory) {
surface_chooser_->UpdateState(std::move(new_factory), surface_chooser_state_);
diff --git a/chromium/media/gpu/android/surface_chooser_helper.h b/chromium/media/gpu/android/surface_chooser_helper.h
index 33423dec9c9..5d77f8adb08 100644
--- a/chromium/media/gpu/android/surface_chooser_helper.h
+++ b/chromium/media/gpu/android/surface_chooser_helper.h
@@ -81,6 +81,9 @@ class MEDIA_GPU_EXPORT SurfaceChooserHelper {
// Notify us about the default rotation for the video.
void SetVideoRotation(VideoRotation video_rotation);
+ // Notify us about PIP state.
+ void SetIsPersistentVideo(bool is_persistent_video);
+
// Update the chooser state using the given factory.
void UpdateChooserState(base::Optional<AndroidOverlayFactoryCB> new_factory);
diff --git a/chromium/media/gpu/android/surface_chooser_helper_unittest.cc b/chromium/media/gpu/android/surface_chooser_helper_unittest.cc
index de6790b21ff..f9f4506fc85 100644
--- a/chromium/media/gpu/android/surface_chooser_helper_unittest.cc
+++ b/chromium/media/gpu/android/surface_chooser_helper_unittest.cc
@@ -86,6 +86,16 @@ TEST_F(SurfaceChooserHelperTest, SetVideoRotation) {
ASSERT_EQ(chooser_->current_state_.video_rotation, VIDEO_ROTATION_90);
}
+TEST_F(SurfaceChooserHelperTest, SetIsPersistentVideo) {
+ helper_->SetIsPersistentVideo(true);
+ UpdateChooserState();
+ ASSERT_TRUE(chooser_->current_state_.is_persistent_video);
+
+ helper_->SetIsPersistentVideo(false);
+ UpdateChooserState();
+ ASSERT_FALSE(chooser_->current_state_.is_persistent_video);
+}
+
TEST_F(SurfaceChooserHelperTest, SetIsOverlayRequired) {
// The default helper was created without |is_required|, so verify that.
UpdateChooserState();
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner.cc b/chromium/media/gpu/android/surface_texture_gl_owner.cc
index c6a25dd05a4..472bce4c450 100644
--- a/chromium/media/gpu/android/surface_texture_gl_owner.cc
+++ b/chromium/media/gpu/android/surface_texture_gl_owner.cc
@@ -4,6 +4,9 @@
#include "media/gpu/android/surface_texture_gl_owner.h"
+#include <memory>
+
+#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
@@ -143,7 +146,7 @@ void SurfaceTextureGLOwner::WaitForFrameAvailable() {
}
}
-std::unique_ptr<gl::GLImage::ScopedHardwareBuffer>
+std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
SurfaceTextureGLOwner::GetAHardwareBuffer() {
NOTREACHED() << "Don't use AHardwareBuffers with SurfaceTextureGLOwner";
return nullptr;
diff --git a/chromium/media/gpu/android/surface_texture_gl_owner.h b/chromium/media/gpu/android/surface_texture_gl_owner.h
index a96ed6df96a..91f0ad6fae7 100644
--- a/chromium/media/gpu/android/surface_texture_gl_owner.h
+++ b/chromium/media/gpu/android/surface_texture_gl_owner.h
@@ -12,6 +12,12 @@
#include "media/gpu/media_gpu_export.h"
#include "ui/gl/android/surface_texture.h"
+namespace base {
+namespace android {
+class ScopedHardwareBufferFenceSync;
+} // namespace android
+} // namespace base
+
namespace media {
struct FrameAvailableEvent;
@@ -34,8 +40,8 @@ class MEDIA_GPU_EXPORT SurfaceTextureGLOwner : public TextureOwner {
void IgnorePendingRelease() override;
bool IsExpectingFrameAvailable() override;
void WaitForFrameAvailable() override;
- std::unique_ptr<gl::GLImage::ScopedHardwareBuffer> GetAHardwareBuffer()
- override;
+ std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
+ GetAHardwareBuffer() override;
protected:
void OnTextureDestroyed(gpu::gles2::AbstractTexture*) override;
@@ -47,8 +53,7 @@ class MEDIA_GPU_EXPORT SurfaceTextureGLOwner : public TextureOwner {
~SurfaceTextureGLOwner() override;
scoped_refptr<gl::SurfaceTexture> surface_texture_;
- GLuint texture_id_;
- // The context and surface that were used to create |texture_id_|.
+ // The context and surface that were used to create |surface_texture_|.
scoped_refptr<gl::GLContext> context_;
scoped_refptr<gl::GLSurface> surface_;
// When SetReleaseTimeToNow() was last called. i.e., when the last
diff --git a/chromium/media/gpu/android/texture_owner.h b/chromium/media/gpu/android/texture_owner.h
index 0dcba7b7931..f2a16d851e7 100644
--- a/chromium/media/gpu/android/texture_owner.h
+++ b/chromium/media/gpu/android/texture_owner.h
@@ -17,6 +17,12 @@
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_surface.h"
+namespace base {
+namespace android {
+class ScopedHardwareBufferFenceSync;
+} // namespace android
+} // namespace base
+
namespace gpu {
class DecoderContext;
namespace gles2 {
@@ -87,7 +93,7 @@ class MEDIA_GPU_EXPORT TextureOwner
// Retrieves the AHardwareBuffer from the latest available image data.
// Note that the object must be used and destroyed on the same thread the
// TextureOwner is bound to.
- virtual std::unique_ptr<gl::GLImage::ScopedHardwareBuffer>
+ virtual std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() = 0;
protected:
diff --git a/chromium/media/gpu/command_buffer_helper.cc b/chromium/media/gpu/command_buffer_helper.cc
index 857373ac53f..cdd3e2b1bda 100644
--- a/chromium/media/gpu/command_buffer_helper.cc
+++ b/chromium/media/gpu/command_buffer_helper.cc
@@ -48,25 +48,18 @@ class CommandBufferHelperImpl
return decoder_helper_->GetGLContext();
}
- bool MakeContextCurrent() override {
- DVLOG(2) << __func__;
+ bool HasStub() override {
+ DVLOG(4) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- return decoder_helper_ && decoder_helper_->MakeContextCurrent();
+ return stub_;
}
- bool IsContextCurrent() const override {
+ bool MakeContextCurrent() override {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- if (!stub_)
- return false;
-
- gl::GLContext* context = stub_->decoder_context()->GetGLContext();
- if (!context)
- return false;
-
- return context->IsCurrent(nullptr);
+ return decoder_helper_ && decoder_helper_->MakeContextCurrent();
}
GLuint CreateTexture(GLenum target,
diff --git a/chromium/media/gpu/command_buffer_helper.h b/chromium/media/gpu/command_buffer_helper.h
index d0fa4b12627..13744e53e8f 100644
--- a/chromium/media/gpu/command_buffer_helper.h
+++ b/chromium/media/gpu/command_buffer_helper.h
@@ -47,14 +47,12 @@ class MEDIA_GPU_EXPORT CommandBufferHelper
// EGLImages. New clients should use more specialized accessors instead.
virtual gl::GLContext* GetGLContext() = 0;
+ // Checks whether the stub has been destroyed.
+ virtual bool HasStub() = 0;
+
// Makes the GL context current.
virtual bool MakeContextCurrent() = 0;
- // Returns whether or not the the context is current in the
- // GLContext::IsCurrent(nullptr) sense. Note that this is not necessarily the
- // same for virtual contexts as "Did somebody run MakeContextCurrent?".
- virtual bool IsContextCurrent() const = 0;
-
// Creates a texture and returns its |service_id|.
//
// See glTexImage2D() for argument definitions.
@@ -117,7 +115,7 @@ class MEDIA_GPU_EXPORT CommandBufferHelper
virtual void WaitForSyncToken(gpu::SyncToken sync_token,
base::OnceClosure done_cb) = 0;
- // Set the callback to be called when our stub is destroyed. This callback
+ // Set the callback to be called when our stub is destroyed. This callback
// may not change the current context.
virtual void SetWillDestroyStubCB(WillDestroyStubCB will_destroy_stub_cb) = 0;
diff --git a/chromium/media/gpu/fake_command_buffer_helper.cc b/chromium/media/gpu/fake_command_buffer_helper.cc
index c0ff03c9f5b..6349d1b1565 100644
--- a/chromium/media/gpu/fake_command_buffer_helper.cc
+++ b/chromium/media/gpu/fake_command_buffer_helper.cc
@@ -64,6 +64,10 @@ gl::GLContext* FakeCommandBufferHelper::GetGLContext() {
return nullptr;
}
+bool FakeCommandBufferHelper::HasStub() {
+ return has_stub_;
+}
+
bool FakeCommandBufferHelper::MakeContextCurrent() {
DVLOG(3) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -71,10 +75,6 @@ bool FakeCommandBufferHelper::MakeContextCurrent() {
return is_context_current_;
}
-bool FakeCommandBufferHelper::IsContextCurrent() const {
- return is_context_current_;
-}
-
GLuint FakeCommandBufferHelper::CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
diff --git a/chromium/media/gpu/fake_command_buffer_helper.h b/chromium/media/gpu/fake_command_buffer_helper.h
index 02cf0db80c5..e99f114f004 100644
--- a/chromium/media/gpu/fake_command_buffer_helper.h
+++ b/chromium/media/gpu/fake_command_buffer_helper.h
@@ -39,8 +39,8 @@ class FakeCommandBufferHelper : public CommandBufferHelper {
// CommandBufferHelper implementation.
gl::GLContext* GetGLContext() override;
+ bool HasStub() override;
bool MakeContextCurrent() override;
- bool IsContextCurrent() const override;
GLuint CreateTexture(GLenum target,
GLenum internal_format,
GLsizei width,
diff --git a/chromium/media/gpu/fake_jpeg_decode_accelerator.cc b/chromium/media/gpu/fake_jpeg_decode_accelerator.cc
index 6f6b353726e..b12b52f40ae 100644
--- a/chromium/media/gpu/fake_jpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/fake_jpeg_decode_accelerator.cc
@@ -45,8 +45,6 @@ void FakeJpegDecodeAccelerator::Decode(
new WritableUnalignedMapping(bitstream_buffer.handle(),
bitstream_buffer.size(),
bitstream_buffer.offset()));
- // The handle is no longer needed.
- bitstream_buffer.handle().Close();
if (!src_shm->IsValid()) {
DLOG(ERROR) << "Unable to map shared memory in FakeJpegDecodeAccelerator";
NotifyError(bitstream_buffer.id(), JpegDecodeAccelerator::UNREADABLE_INPUT);
diff --git a/chromium/media/gpu/h264_decoder.cc b/chromium/media/gpu/h264_decoder.cc
index 9effde45aa0..0fa017971f5 100644
--- a/chromium/media/gpu/h264_decoder.cc
+++ b/chromium/media/gpu/h264_decoder.cc
@@ -8,7 +8,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/optional.h"
#include "base/stl_util.h"
@@ -27,12 +26,6 @@ H264Decoder::H264Accelerator::Status H264Decoder::H264Accelerator::SetStream(
return H264Decoder::H264Accelerator::Status::kNotSupported;
}
-H264Decoder::H264Accelerator::Status
-H264Decoder::H264Accelerator::ParseSliceHeader(const H264NALU& slice_nalu,
- H264SliceHeader* slice_header) {
- return H264Decoder::H264Accelerator::Status::kNotSupported;
-}
-
H264Decoder::H264Decoder(std::unique_ptr<H264Accelerator> accelerator,
const VideoColorSpace& container_color_space)
: state_(kNeedStreamMetadata),
@@ -727,7 +720,7 @@ H264Decoder::H264Accelerator::Status H264Decoder::StartNewFrame(
bool H264Decoder::HandleMemoryManagementOps(scoped_refptr<H264Picture> pic) {
// 8.2.5.4
- for (size_t i = 0; i < arraysize(pic->ref_pic_marking); ++i) {
+ for (size_t i = 0; i < base::size(pic->ref_pic_marking); ++i) {
// Code below does not support interlaced stream (per-field pictures).
H264DecRefPicMarking* ref_pic_marking = &pic->ref_pic_marking[i];
scoped_refptr<H264Picture> to_mark;
@@ -1320,32 +1313,12 @@ H264Decoder::DecodeResult H264Decoder::Decode() {
// the call that failed previously. If it succeeds (it may not if no
// additional key has been provided, for example), then the remaining
// steps will be executed.
-
if (!curr_slice_hdr_) {
curr_slice_hdr_.reset(new H264SliceHeader());
- // If the accelerator handles the slice header, let it handle it.
- // If not, use the parser.
- H264Accelerator::Status result = accelerator_->ParseSliceHeader(
- *curr_nalu_, curr_slice_hdr_.get());
- switch (result) {
- case H264Accelerator::Status::kOk:
- break;
- case H264Accelerator::Status::kTryAgain:
- DVLOG(1) << "ParseSliceHeader() needs to try again";
- // reset |curr_slice_hdr_| so ParseSliceHeader() is tried again.
- curr_slice_hdr_.reset();
- return H264Decoder::kTryAgain;
- case H264Accelerator::Status::kNotSupported:
- // Let the parser try to handle it.
- par_res =
- parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
- if (par_res == H264Parser::kOk)
- break;
- FALLTHROUGH;
- case H264Accelerator::Status::kFail:
- SET_ERROR_AND_RETURN();
- }
-
+ par_res =
+ parser_.ParseSliceHeader(*curr_nalu_, curr_slice_hdr_.get());
+ if (par_res != H264Parser::kOk)
+ SET_ERROR_AND_RETURN();
state_ = kTryPreprocessCurrentSlice;
}
@@ -1445,7 +1418,17 @@ gfx::Size H264Decoder::GetPicSize() const {
}
size_t H264Decoder::GetRequiredNumOfPictures() const {
- return dpb_.max_num_pics() + kPicsInPipeline;
+ constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
+ return GetNumReferenceFrames() + kPicsInPipeline;
+}
+
+size_t H264Decoder::GetNumReferenceFrames() const {
+ // Use the maximum number of pictures in the Decoded Picture Buffer plus one
+ // for the one being currently egressed.
+ // Another +1 is experimentally needed for high-to-high resolution changes.
+ // TODO(mcasas): Figure out why +2 instead of +1, see crbug.com/909926 and
+ // http://crrev.com/c/1363807/9/media/gpu/h264_decoder.cc#1449.
+ return dpb_.max_num_pics() + 2;
}
// static
diff --git a/chromium/media/gpu/h264_decoder.h b/chromium/media/gpu/h264_decoder.h
index a7b9e4be16f..66bb08917ed 100644
--- a/chromium/media/gpu/h264_decoder.h
+++ b/chromium/media/gpu/h264_decoder.h
@@ -54,8 +54,8 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
// operation later, once the data has been provided.
kTryAgain,
- // Operation is not supported. Used by SetStream() and ParseSliceHeader()
- // to indicate that the Accelerator can not handle this operation.
+ // Operation is not supported. Used by SetStream() to indicate that the
+ // Accelerator can not handle this operation.
kNotSupported,
};
@@ -142,14 +142,6 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
virtual Status SetStream(base::span<const uint8_t> stream,
const DecryptConfig* decrypt_config);
- // Parse a slice header, returning it in |*slice_header|. |slice_nalu| must
- // be a slice NALU. On success, this populates |*slice_header|. If the
- // Accelerator doesn't handle this slice header, then it should return
- // kNotSupported. This method has a default implementation that returns
- // kNotSupported.
- virtual Status ParseSliceHeader(const H264NALU& slice_nalu,
- H264SliceHeader* slice_header);
-
private:
DISALLOW_COPY_AND_ASSIGN(H264Accelerator);
};
@@ -168,6 +160,7 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
size_t GetRequiredNumOfPictures() const override;
+ size_t GetNumReferenceFrames() const override;
// Return true if we need to start a new picture.
static bool IsNewPrimaryCodedPicture(const H264Picture* curr_pic,
@@ -182,17 +175,6 @@ class MEDIA_GPU_EXPORT H264Decoder : public AcceleratedVideoDecoder {
H264Picture* pic);
private:
- // We need to keep at most kDPBMaxSize pictures in DPB for
- // reference/to display later and an additional one for the one currently
- // being decoded. We also ask for some additional ones since VDA needs
- // to accumulate a few ready-to-output pictures before it actually starts
- // displaying and giving them back. +2 instead of +1 because of subjective
- // smoothness improvement during testing.
- enum {
- kPicsInPipeline = limits::kMaxVideoFrames + 2,
- kMaxNumReqPictures = H264DPB::kDPBMaxSize + kPicsInPipeline,
- };
-
// Internal state of the decoder.
enum State {
// After initialization, need an SPS.
diff --git a/chromium/media/gpu/h264_decoder_unittest.cc b/chromium/media/gpu/h264_decoder_unittest.cc
index bd0ca1ee12a..f48003fb177 100644
--- a/chromium/media/gpu/h264_decoder_unittest.cc
+++ b/chromium/media/gpu/h264_decoder_unittest.cc
@@ -129,9 +129,6 @@ class MockH264Accelerator : public H264Decoder::H264Accelerator {
MOCK_METHOD2(SetStream,
Status(base::span<const uint8_t> stream,
const DecryptConfig* decrypt_config));
- MOCK_METHOD2(ParseSliceHeader,
- Status(const H264NALU& slice_nalu,
- H264SliceHeader* slice_header));
void Reset() override {}
};
@@ -182,9 +179,6 @@ void H264DecoderTest::SetUp() {
ON_CALL(*accelerator_, SetStream(_, _))
.WillByDefault(
Return(H264Decoder::H264Accelerator::Status::kNotSupported));
- ON_CALL(*accelerator_, ParseSliceHeader(_, _))
- .WillByDefault(
- Return(H264Decoder::H264Accelerator::Status::kNotSupported));
}
void H264DecoderTest::SetInputFrameFiles(
@@ -592,7 +586,6 @@ TEST_F(H264DecoderTest, SetStreamRetry) {
{
InSequence sequence;
- EXPECT_CALL(*accelerator_, ParseSliceHeader(_, _));
EXPECT_CALL(*accelerator_, CreateH264Picture());
EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
EXPECT_CALL(*accelerator_, SubmitSlice(_, _, _, _, _, _, _, _));
@@ -607,36 +600,5 @@ TEST_F(H264DecoderTest, SetStreamRetry) {
ASSERT_TRUE(decoder_->Flush());
}
-TEST_F(H264DecoderTest, ParseSliceHeaderRetry) {
- SetInputFrameFiles({kBaselineFrame0});
- ASSERT_EQ(AcceleratedVideoDecoder::kAllocateNewSurfaces, Decode());
- EXPECT_EQ(gfx::Size(320, 192), decoder_->GetPicSize());
- EXPECT_LE(9u, decoder_->GetRequiredNumOfPictures());
-
- EXPECT_CALL(*accelerator_, ParseSliceHeader(_, _))
- .WillOnce(Return(H264Decoder::H264Accelerator::Status::kTryAgain));
- ASSERT_EQ(AcceleratedVideoDecoder::kTryAgain, Decode());
-
- H264SliceHeader slice_header = {};
- {
- InSequence sequence;
-
- EXPECT_CALL(*accelerator_, ParseSliceHeader(_, _))
- .WillOnce(ComputeSliceHeader(&slice_header));
- EXPECT_CALL(*accelerator_, CreateH264Picture());
- EXPECT_CALL(*accelerator_, SubmitFrameMetadata(_, _, _, _, _, _, _));
- EXPECT_CALL(*accelerator_, SubmitSlice(_, SliceHeaderMatches(&slice_header),
- _, _, _, _, _, _));
- }
- ASSERT_EQ(AcceleratedVideoDecoder::kRanOutOfStreamData, Decode());
-
- {
- InSequence sequence;
- EXPECT_CALL(*accelerator_, SubmitDecode(WithPoc(0)));
- EXPECT_CALL(*accelerator_, OutputPicture(WithPoc(0)));
- }
- ASSERT_TRUE(decoder_->Flush());
-}
-
} // namespace
} // namespace media
diff --git a/chromium/media/gpu/image_processor.cc b/chromium/media/gpu/image_processor.cc
new file mode 100644
index 00000000000..9cf0ab583d0
--- /dev/null
+++ b/chromium/media/gpu/image_processor.cc
@@ -0,0 +1,50 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/image_processor.h"
+
+#include "media/base/bind_to_current_loop.h"
+
+namespace media {
+
+ImageProcessor::PortConfig::PortConfig(
+ const VideoFrameLayout& layout,
+ const gfx::Size& visible_size,
+ const std::vector<VideoFrame::StorageType>& preferred_storage_types)
+ : layout(layout),
+ visible_size(visible_size),
+ preferred_storage_types(preferred_storage_types) {}
+
+ImageProcessor::PortConfig::~PortConfig() {}
+
+ImageProcessor::ImageProcessor(const VideoFrameLayout& input_layout,
+ VideoFrame::StorageType input_storage_type,
+ const VideoFrameLayout& output_layout,
+ VideoFrame::StorageType output_storage_type,
+ OutputMode output_mode)
+ : input_layout_(input_layout),
+ input_storage_type_(input_storage_type),
+ output_layout_(output_layout),
+ output_storage_type_(output_storage_type),
+ output_mode_(output_mode) {}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+bool ImageProcessor::Process(scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) {
+ return ProcessInternal(std::move(frame), output_buffer_index,
+ std::move(output_dmabuf_fds),
+ BindToCurrentLoop(std::move(cb)));
+}
+#endif
+
+bool ImageProcessor::Process(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) {
+ return ProcessInternal(std::move(input_frame), std::move(output_frame),
+ BindToCurrentLoop(std::move(cb)));
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/image_processor.h b/chromium/media/gpu/image_processor.h
index aa477e525ff..ebff2531ce1 100644
--- a/chromium/media/gpu/image_processor.h
+++ b/chromium/media/gpu/image_processor.h
@@ -9,8 +9,12 @@
#include "base/callback_forward.h"
#include "base/files/scoped_file.h"
+#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "build/build_config.h"
#include "media/base/video_frame.h"
+#include "media/base/video_frame_layout.h"
+#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -21,37 +25,77 @@ namespace media {
// in a format different from what the rest of the pipeline expects.
//
// This class exposes the interface that an image processor should implement.
-class ImageProcessor {
+// The threading model of ImageProcessor:
+// There are two threads, "client thread" and "processor thread".
+// "client thread" is the thread that creates the ImageProcessor.
+// Process(), Reset() and callbacks (i.e. FrameReadyCB and ErrorCB) must be run
+// on client thread.
+// ImageProcessor should have its owned thread, "processor thread", so that
+// Process() doesn't block client thread. The callbacks can be called on
+// processor thread. ImageProcessor's client must guarantee the callback finally
+// posts them to and run on the thread that creates ImageProcessor.
+class MEDIA_GPU_EXPORT ImageProcessor {
public:
// OutputMode is used as intermediate stage. The ultimate goal is to make
// ImageProcessor's clients all use IMPORT output mode.
- // TODO(907767): Remove this once ImageProcessor always works as IMPORT mode
- // for output.
+ // TODO(crbug.com/907767): Remove this once ImageProcessor always works as
+ // IMPORT mode for output.
enum class OutputMode {
ALLOCATE,
IMPORT
};
- // Returns input allocated size required by the processor to be fed with.
- virtual gfx::Size input_allocated_size() const = 0;
+ // Encapsulates ImageProcessor input / output configurations.
+ struct MEDIA_GPU_EXPORT PortConfig {
+ PortConfig() = delete;
+ PortConfig(
+ const VideoFrameLayout& layout,
+ const gfx::Size& visible_size,
+ const std::vector<VideoFrame::StorageType>& preferred_storage_types);
+ ~PortConfig();
+
+ const VideoFrameLayout layout;
+ const gfx::Size visible_size;
+ const std::vector<VideoFrame::StorageType> preferred_storage_types;
+ };
+
+ // Callback to be used to return a processed image to the client.
+ // FrameReadyCB is guaranteed to be executed on the "client thread".
+ // Process() is responsible for making sure this invariant is
+ // respected by using media::BindToCurrentLoop().
+ using FrameReadyCB = base::OnceCallback<void(scoped_refptr<VideoFrame>)>;
+
+ // Callback to be used to notify client when ImageProcess encounters error.
+ // It should be assigned in subclass' factory method. ErrorCB is guaranteed to
+ // be executed on the "client thread". Implementations are responsible for
+ // making sure this invariant is respected, by using
+ // media::BindToCurrentLoop() where appropriate.
+ using ErrorCB = base::RepeatingClosure;
+
+ virtual ~ImageProcessor() = default;
+
+ // Returns input layout of the processor.
+ const VideoFrameLayout& input_layout() const { return input_layout_; }
- // Returns output allocated size required by the processor.
- virtual gfx::Size output_allocated_size() const = 0;
+ // Returns output layout of the processor.
+ const VideoFrameLayout& output_layout() const { return output_layout_; }
// Returns input storage type.
- virtual VideoFrame::StorageType input_storage_type() const = 0;
+ VideoFrame::StorageType input_storage_type() const {
+ return input_storage_type_;
+ }
// Returns output storage type.
- virtual VideoFrame::StorageType output_storage_type() const = 0;
+ VideoFrame::StorageType output_storage_type() const {
+ return output_storage_type_;
+ }
// Returns output mode.
- virtual OutputMode output_mode() const = 0;
-
- // Callback to be used to return the index of a processed image to the
- // client. After the client is done with the frame, call Process with the
- // index to return the output buffer to the image processor.
- using FrameReadyCB = base::OnceCallback<void(scoped_refptr<VideoFrame>)>;
+ // TODO(crbug.com/907767): Remove it once ImageProcessor always works as
+ // IMPORT mode for output.
+ OutputMode output_mode() const { return output_mode_; }
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Called by client to process |frame|. The resulting processed frame will be
// stored in |output_buffer_index| output buffer and notified via |cb|. The
// processor will drop all its references to |frame| after it finishes
@@ -59,16 +103,69 @@ class ImageProcessor {
// should pass non-empty |output_dmabuf_fds| and the processed frame will be
// stored in those buffers. If the number of |output_dmabuf_fds| is not
// expected, this function will return false.
- virtual bool Process(scoped_refptr<VideoFrame> frame,
- int output_buffer_index,
- std::vector<base::ScopedFD> output_dmabuf_fds,
- FrameReadyCB cb) = 0;
+ // Process() must be called on "client thread". This should not be blocking
+ // function.
+ //
+ // Note: because base::ScopedFD is defined under OS_POXIS or OS_FUCHSIA, we
+ // define this function under the same build config. It doesn't affect its
+ // current users as they are all under the same build config.
+ // TODO(crbug.com/907767): Remove this once ImageProcessor always works as
+ // IMPORT mode for output.
+ bool Process(scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb);
+#endif
+
+ // Called by client to process |input_frame| and store in |output_frame|. This
+ // can only be used when output mode is IMPORT. The processor will drop all
+ // its references to |input_frame| and |output_frame| after it finishes
+ // accessing it.
+ // Process() must be called on "client thread". This should not be blocking
+ // function.
+ bool Process(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb);
// Reset all processing frames. After this method returns, no more callbacks
// will be invoked. ImageProcessor is ready to process more frames.
+ // Reset() must be called on "client thread".
virtual bool Reset() = 0;
- virtual ~ImageProcessor() = default;
+ protected:
+ ImageProcessor(const VideoFrameLayout& input_layout,
+ VideoFrame::StorageType input_storage_type,
+ const VideoFrameLayout& output_layout,
+ VideoFrame::StorageType output_storage_type,
+ OutputMode output_mode);
+
+ // Stores input frame's layout and storage type.
+ const VideoFrameLayout input_layout_;
+ const VideoFrame::StorageType input_storage_type_;
+
+ // Stores output frame's layout, storage type and output mode.
+ // TODO(crbug.com/907767): Remove |output_mode_| once ImageProcessor always
+ // works as IMPORT mode for output.
+ const VideoFrameLayout output_layout_;
+ const VideoFrame::StorageType output_storage_type_;
+ const OutputMode output_mode_;
+
+ private:
+ // Each ImageProcessor shall implement ProcessInternal() as Process().
+ // ProcessInternal() is called inside of Process() with
+ // media::BindToCurrentLoop() on |cb| to guarantee |cb| will be executed on
+ // "client thread".
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ virtual bool ProcessInternal(scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) = 0;
+#endif
+ virtual bool ProcessInternal(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) = 0;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ImageProcessor);
};
} // namespace media
diff --git a/chromium/media/gpu/image_processor_factory.cc b/chromium/media/gpu/image_processor_factory.cc
new file mode 100644
index 00000000000..ba259c217f1
--- /dev/null
+++ b/chromium/media/gpu/image_processor_factory.cc
@@ -0,0 +1,46 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/image_processor_factory.h"
+
+#include <stddef.h>
+
+#include "base/callback.h"
+#include "media/gpu/buildflags.h"
+#include "media/gpu/libyuv_image_processor.h"
+
+#if BUILDFLAG(USE_V4L2_CODEC)
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/v4l2/v4l2_image_processor.h"
+#endif // BUILDFLAG(USE_V4L2_CODEC)
+
+namespace media {
+
+// static
+std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create(
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const std::vector<ImageProcessor::OutputMode>& preferred_output_modes,
+ size_t num_buffers,
+ ImageProcessor::ErrorCB error_cb) {
+ std::unique_ptr<ImageProcessor> image_processor;
+#if BUILDFLAG(USE_V4L2_CODEC)
+ for (auto output_mode : preferred_output_modes) {
+ image_processor = V4L2ImageProcessor::Create(
+ V4L2Device::Create(), input_config, output_config, output_mode,
+ num_buffers, error_cb);
+ if (image_processor)
+ return image_processor;
+ }
+#endif // BUILDFLAG(USE_V4L2_CODEC)
+ for (auto output_mode : preferred_output_modes) {
+ image_processor = LibYUVImageProcessor::Create(input_config, output_config,
+ output_mode, error_cb);
+ if (image_processor)
+ return image_processor;
+ }
+ return nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/image_processor_factory.h b/chromium/media/gpu/image_processor_factory.h
new file mode 100644
index 00000000000..31f165dab8f
--- /dev/null
+++ b/chromium/media/gpu/image_processor_factory.h
@@ -0,0 +1,56 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_IMAGE_PROCESSOR_FACTORY_H_
+#define MEDIA_GPU_IMAGE_PROCESSOR_FACTORY_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "media/gpu/image_processor.h"
+#include "media/gpu/media_gpu_export.h"
+
+namespace media {
+
+class MEDIA_GPU_EXPORT ImageProcessorFactory {
+ public:
+ // Factory method to create ImageProcessor.
+ // Given input and output PortConfig, it tries to find out the most suitable
+ // ImageProcessor to be used for the current platform.
+ //
+ // For |preferred_output_modes|, it tries instantiate an ImageProcessor class
+ // with an output mode in sequence. With ImageProcessor subclass and output
+ // mode selected, the subclass' factory method will pick the first supported
+ // input and output storage type from input and output PortConfig,
+ // respectively. It has an assumption that the selection of output mode is
+ // independent from the selection of output storage type.
+ //
+ // TODO(crbug.com/907767): Remove |preferred_output_modes| once ImageProcessor
+ // only accepts IMPORT output mode.
+ //
+ // Args:
+ // input_config: input PortConfig.
+ // output_config: output PortConfig.
+ // preferred_output_modes: list of preferred output modes.
+ // num_buffers: number of input and output buffers.
+ // error_cb: Callback when error occurs.
+ //
+ // Returns:
+ // Most suitable ImageProcessor instance. nullptr if no ImageProcessor
+ // is available for given parameters on current platform.
+ static std::unique_ptr<ImageProcessor> Create(
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const std::vector<ImageProcessor::OutputMode>& preferred_output_modes,
+ size_t num_buffers,
+ ImageProcessor::ErrorCB error_cb);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ImageProcessorFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_IMAGE_PROCESSOR_FACTORY_H_
diff --git a/chromium/media/gpu/image_processor_test.cc b/chromium/media/gpu/image_processor_test.cc
new file mode 100644
index 00000000000..84803739270
--- /dev/null
+++ b/chromium/media/gpu/image_processor_test.cc
@@ -0,0 +1,129 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#include <tuple>
+
+#include "base/files/file_path.h"
+#include "base/md5.h"
+#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_suite.h"
+#include "build/build_config.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_types.h"
+#include "media/gpu/image_processor.h"
+#include "media/gpu/test/image_processor/image_processor_client.h"
+#include "media/gpu/test/video_image_info.h"
+#include "mojo/core/embedder/embedder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+namespace {
+
+// I420 formatted 320x192 video frame. (bear)
+// TODO(crbug.com/917951): Dynamically load this info from json file.
+constexpr test::VideoImageInfo kI420Image(
+ FILE_PATH_LITERAL("bear_320x192.i420.yuv"),
+ "962820755c74b28f9385fd67219cc04a",
+ PIXEL_FORMAT_I420,
+ gfx::Size(320, 192));
+
+// NV12 formatted 320x192 video frame. (bear)
+// TODO(crbug.com/917951): Dynamically load this info from json file.
+constexpr test::VideoImageInfo kNV12Image(
+ FILE_PATH_LITERAL("bear_320x192.i420.nv12.yuv"),
+ "ce21986434743d3671056719136d46ff",
+ PIXEL_FORMAT_NV12,
+ gfx::Size(320, 192));
+
+class ImageProcessorSimpleParamTest
+ : public ::testing::Test,
+ public ::testing::WithParamInterface<
+ std::tuple<test::VideoImageInfo, test::VideoImageInfo>> {
+ public:
+ // TODO(crbug.com/917951): Initialize Ozone once.
+ void SetUp() override {}
+ void TearDown() override {}
+
+ std::unique_ptr<test::ImageProcessorClient> CreateImageProcessorClient(
+ const test::VideoImageInfo& input_image_info,
+ const test::VideoImageInfo& output_image_info) {
+ // TODO(crbug.com/917951): Pass VideoFrameProcessor.
+ auto input_config_layout = input_image_info.VideoFrameLayout();
+ auto output_config_layout = output_image_info.VideoFrameLayout();
+ LOG_ASSERT(input_config_layout);
+ LOG_ASSERT(output_config_layout);
+ ImageProcessor::PortConfig input_config(*input_config_layout,
+ input_image_info.visible_size,
+ {VideoFrame::STORAGE_OWNED_MEMORY});
+ ImageProcessor::PortConfig output_config(
+ *output_config_layout, output_image_info.visible_size,
+ {VideoFrame::STORAGE_OWNED_MEMORY});
+ // TODO(crbug.com/917951): Select more appropriate number of buffers.
+ constexpr size_t kNumBuffers = 1;
+ auto ip_client = test::ImageProcessorClient::Create(
+ input_config, output_config, kNumBuffers, true);
+ LOG_ASSERT(ip_client) << "Failed to create ImageProcessorClient";
+ return ip_client;
+ }
+};
+
+TEST_P(ImageProcessorSimpleParamTest, ConvertOneTimeFromMemToMem) {
+ test::VideoImageInfo input_image_info = std::get<0>(GetParam());
+ test::VideoImageInfo output_image_info = std::get<1>(GetParam());
+ auto ip_client =
+ CreateImageProcessorClient(input_image_info, output_image_info);
+
+ ip_client->Process(input_image_info, output_image_info);
+ EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
+ EXPECT_EQ(ip_client->GetErrorCount(), 0u);
+ EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
+
+ // TODO(crbug.com/917951): Replace this checker with VideoFrameProcessor
+ // interface and get results by ImageProcessorClient function like
+ // ImageProcessorClient::GetProcessResults().
+ const auto output_frames = ip_client->GetProcessedImages();
+ ASSERT_EQ(output_frames.size(), 1u);
+ auto processed_frame = output_frames[0];
+ ASSERT_TRUE(processed_frame->IsMappable());
+ base::MD5Context context;
+ base::MD5Init(&context);
+ VideoFrame::HashFrameForTesting(&context, processed_frame);
+ base::MD5Digest digest;
+ base::MD5Final(&digest, &context);
+ std::string expected_md5 = output_image_info.md5sum;
+ std::string computed_md5 = MD5DigestToBase16(digest);
+ EXPECT_EQ(expected_md5, computed_md5);
+};
+
+// I420->NV12
+INSTANTIATE_TEST_CASE_P(ConvertI420ToNV12,
+ ImageProcessorSimpleParamTest,
+ ::testing::Values(std::make_tuple(kI420Image,
+ kNV12Image)));
+
+#if defined(OS_CHROMEOS)
+// TODO(hiroh): Add more tests.
+// MEM->DMABUF (V4L2VideoEncodeAccelerator),
+// DMABUF->DMABUF (GpuArcVideoEncodeAccelerator),
+#endif
+
+} // namespace
+} // namespace media
+
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ base::CommandLine::Init(argc, argv);
+ // Using shared memory requires mojo to be initialized (crbug.com/849207).
+ mojo::core::Init();
+ base::ShadowingAtExitManager at_exit_manager;
+
+ // Needed to enable DVLOG through --vmodule.
+ logging::LoggingSettings settings;
+ settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
+ LOG_ASSERT(logging::InitLogging(settings));
+
+ return RUN_ALL_TESTS();
+}
diff --git a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
index f7002d018f7..9cd5b1e1664 100644
--- a/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
+++ b/chromium/media/gpu/ipc/service/picture_buffer_manager.cc
@@ -50,21 +50,23 @@ class PictureBufferManagerImpl : public PictureBufferManager {
base::AutoLock lock(picture_buffers_lock_);
- // If there are no assigned picture buffers, predict that the VDA will
- // request some.
- if (picture_buffers_.empty())
- return true;
-
- // Predict that the VDA can output a picture if at least one picture buffer
- // is not in use as an output.
+ // If at least one picture buffer is not in use, predict that the VDA can
+ // use it to output another picture.
+ bool has_assigned_picture_buffer = false;
for (const auto& it : picture_buffers_) {
- const auto& state = it.second.state;
- if (std::find(state.begin(), state.end(), PictureBufferState::OUTPUT) ==
- state.end())
- return true;
+ if (!it.second.dismissed) {
+ // Note: If a picture buffer is waiting for SyncToken release, that
+ // release is already in some command buffer (or the wait is invalid).
+ // The wait will complete without further interaction from the client.
+ if (it.second.output_count == 0)
+ return true;
+ has_assigned_picture_buffer = true;
+ }
}
- return false;
+ // If there are no assigned picture buffers, predict that the VDA will
+ // request some.
+ return !has_assigned_picture_buffer;
}
std::vector<PictureBuffer> CreatePictureBuffers(
@@ -89,7 +91,6 @@ class PictureBufferManagerImpl : public PictureBufferManager {
std::vector<PictureBuffer> picture_buffers;
for (uint32_t i = 0; i < count; i++) {
- PictureBuffer::TextureIds service_ids;
PictureBufferData picture_data = {pixel_format, texture_size};
for (uint32_t j = 0; j < planes; j++) {
@@ -98,7 +99,7 @@ class PictureBufferManagerImpl : public PictureBufferManager {
texture_target, GL_RGBA, texture_size.width(),
texture_size.height(), GL_RGBA, GL_UNSIGNED_BYTE);
DCHECK(service_id);
- service_ids.push_back(service_id);
+ picture_data.service_ids.push_back(service_id);
// The texture is not cleared yet, but it will be before the VDA outputs
// it. Rather than requiring output to happen on the GPU thread, mark
@@ -124,11 +125,9 @@ class PictureBufferManagerImpl : public PictureBufferManager {
//
// TODO(sandersd): Refactor the bind image callback to use service IDs so
// that we can get rid of the client IDs altogether.
- picture_buffers.emplace_back(picture_buffer_id, texture_size, service_ids,
- service_ids, texture_target, pixel_format);
-
- // Record the textures used by the picture buffer.
- picture_buffer_textures_[picture_buffer_id] = std::move(service_ids);
+ picture_buffers.emplace_back(
+ picture_buffer_id, texture_size, picture_data.service_ids,
+ picture_data.service_ids, texture_target, pixel_format);
}
return picture_buffers;
}
@@ -138,26 +137,20 @@ class PictureBufferManagerImpl : public PictureBufferManager {
base::AutoLock lock(picture_buffers_lock_);
- // Check the state of the picture buffer.
const auto& it = picture_buffers_.find(picture_buffer_id);
- if (it == picture_buffers_.end()) {
+ if (it == picture_buffers_.end() || it->second.dismissed) {
DVLOG(1) << "Unknown picture buffer " << picture_buffer_id;
return false;
}
- bool is_available = it->second.IsAvailable();
-
- // Destroy the picture buffer data.
- picture_buffers_.erase(it);
+ it->second.dismissed = true;
- // If the picture was not bound to any VideoFrame, we can destroy its
- // textures immediately.
- if (is_available) {
+ // If the picture buffer is not in use, it should be destroyed immediately.
+ if (!it->second.IsInUse()) {
gpu_task_runner_->PostTask(
FROM_HERE,
- base::BindOnce(
- &PictureBufferManagerImpl::DestroyPictureBufferTextures, this,
- picture_buffer_id));
+ base::BindOnce(&PictureBufferManagerImpl::DestroyPictureBuffer, this,
+ picture_buffer_id));
}
return true;
@@ -169,8 +162,11 @@ class PictureBufferManagerImpl : public PictureBufferManager {
std::vector<int32_t> assigned_picture_buffer_ids;
{
base::AutoLock lock(picture_buffers_lock_);
- for (const auto& it : picture_buffers_)
- assigned_picture_buffer_ids.push_back(it.first);
+
+ for (const auto& it : picture_buffers_) {
+ if (!it.second.dismissed)
+ assigned_picture_buffer_ids.push_back(it.first);
+ }
}
for (int32_t picture_buffer_id : assigned_picture_buffer_ids)
@@ -192,13 +188,13 @@ class PictureBufferManagerImpl : public PictureBufferManager {
// Verify that the picture buffer is available.
const auto& it = picture_buffers_.find(picture_buffer_id);
- if (it == picture_buffers_.end()) {
+ if (it == picture_buffers_.end() || it->second.dismissed) {
DVLOG(1) << "Unknown picture buffer " << picture_buffer_id;
return nullptr;
}
- PictureBufferData& picture_buffer_data = it->second;
// Ensure that the picture buffer is large enough.
+ PictureBufferData& picture_buffer_data = it->second;
if (!gfx::Rect(picture_buffer_data.texture_size).Contains(visible_rect)) {
DLOG(WARNING) << "visible_rect " << visible_rect.ToString()
<< " exceeds coded_size "
@@ -209,8 +205,8 @@ class PictureBufferManagerImpl : public PictureBufferManager {
natural_size = GetNaturalSize(visible_rect, pixel_aspect_ratio);
}
- // Mark the picture as an output.
- picture_buffer_data.state.push_back(PictureBufferState::OUTPUT);
+ // Record the output.
+ picture_buffer_data.output_count++;
// Create and return a VideoFrame for the picture buffer.
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTextures(
@@ -232,7 +228,10 @@ class PictureBufferManagerImpl : public PictureBufferManager {
}
private:
- ~PictureBufferManagerImpl() override { DVLOG(1) << __func__; }
+ ~PictureBufferManagerImpl() override {
+ DVLOG(1) << __func__;
+ DCHECK(picture_buffers_.empty() || !command_buffer_helper_->HasStub());
+ }
void OnVideoFrameDestroyed(int32_t picture_buffer_id,
const gpu::SyncToken& sync_token) {
@@ -240,16 +239,13 @@ class PictureBufferManagerImpl : public PictureBufferManager {
base::AutoLock lock(picture_buffers_lock_);
- // If the picture buffer is still assigned, mark it as unreleased.
+ // Record the picture buffer as waiting for SyncToken release (even if it
+ // has been dismissed already).
const auto& it = picture_buffers_.find(picture_buffer_id);
- if (it != picture_buffers_.end()) {
- auto& state = it->second.state;
- auto state_it =
- std::find(state.begin(), state.end(), PictureBufferState::OUTPUT);
- if (state_it != state.end())
- state.erase(state_it);
- state.push_back(PictureBufferState::WAITING_FOR_SYNCTOKEN);
- }
+ DCHECK(it != picture_buffers_.end());
+ DCHECK_GT(it->second.output_count, 0);
+ it->second.output_count--;
+ it->second.waiting_for_synctoken_count++;
// Wait for the SyncToken release.
gpu_task_runner_->PostTask(
@@ -266,44 +262,55 @@ class PictureBufferManagerImpl : public PictureBufferManager {
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
- // If the picture buffer is still assigned, mark it as available.
+ // Remove the pending wait.
bool is_assigned = false;
+ bool is_in_use = true;
{
base::AutoLock lock(picture_buffers_lock_);
const auto& it = picture_buffers_.find(picture_buffer_id);
- if (it != picture_buffers_.end()) {
- auto& state = it->second.state;
- auto state_it = std::find(state.begin(), state.end(),
- PictureBufferState::WAITING_FOR_SYNCTOKEN);
- if (state_it != state.end())
- state.erase(state_it);
- is_assigned = true;
- }
+ DCHECK(it != picture_buffers_.end());
+
+ DCHECK_GT(it->second.waiting_for_synctoken_count, 0);
+ it->second.waiting_for_synctoken_count--;
+
+ is_assigned = !it->second.dismissed;
+ is_in_use = it->second.IsInUse();
}
// If the picture buffer is still assigned, it is ready to be reused.
- // Otherwise it has been dismissed and we can now delete its textures.
- // Neither of these operations should be done while holding the lock.
+ // Otherwise it should be destroyed if it is no longer in use. Neither of
+ // these operations should be done while holding the lock.
if (is_assigned) {
+ // The callback is called even if the picture buffer is still in use; the
+ // client is expected to wait for all copies of a picture buffer to be
+ // returned before reusing any textures.
reuse_picture_buffer_cb_.Run(picture_buffer_id);
- } else {
- DestroyPictureBufferTextures(picture_buffer_id);
+ } else if (!is_in_use) {
+ DestroyPictureBuffer(picture_buffer_id);
}
}
- void DestroyPictureBufferTextures(int32_t picture_buffer_id) {
+ void DestroyPictureBuffer(int32_t picture_buffer_id) {
DVLOG(3) << __func__ << "(" << picture_buffer_id << ")";
DCHECK(gpu_task_runner_);
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
+ std::vector<GLuint> service_ids;
+ {
+ base::AutoLock lock(picture_buffers_lock_);
+ const auto& it = picture_buffers_.find(picture_buffer_id);
+ DCHECK(it != picture_buffers_.end());
+ DCHECK(it->second.dismissed);
+ DCHECK(!it->second.IsInUse());
+ service_ids = std::move(it->second.service_ids);
+ picture_buffers_.erase(it);
+ }
+
if (!command_buffer_helper_->MakeContextCurrent())
return;
- const auto& it = picture_buffer_textures_.find(picture_buffer_id);
- DCHECK(it != picture_buffer_textures_.end());
- for (GLuint service_id : it->second)
+ for (GLuint service_id : service_ids)
command_buffer_helper_->DestroyTexture(service_id);
- picture_buffer_textures_.erase(it);
}
ReusePictureBufferCB reuse_picture_buffer_cb_;
@@ -312,30 +319,28 @@ class PictureBufferManagerImpl : public PictureBufferManager {
scoped_refptr<CommandBufferHelper> command_buffer_helper_;
int32_t picture_buffer_id_ = 0;
- // Includes picture puffers that have been dismissed if their textures have
- // not been deleted yet.
- std::map<int32_t, std::vector<GLuint>> picture_buffer_textures_;
- base::Lock picture_buffers_lock_;
- enum class PictureBufferState {
- // Output by the VDA, still bound to a VideoFrame.
- OUTPUT,
- // Waiting on a SyncToken before being reused.
- WAITING_FOR_SYNCTOKEN,
- };
struct PictureBufferData {
VideoPixelFormat pixel_format;
gfx::Size texture_size;
- // The picture buffer might be sent from VDA multiple times. Therefore we
- // use vector to track the status. The state is empty when the picture
- // buffer is not bound to any VideoFrame.
- std::vector<PictureBufferState> state;
+ std::vector<GLuint> service_ids;
gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
-
- // Available for use by the VDA.
- bool IsAvailable() const { return state.empty(); }
+ bool dismissed = false;
+
+ // The same picture buffer can be output from the VDA multiple times
+ // concurrently, so the state is tracked using counts.
+ // |output_count|: Number of VideoFrames this picture buffer is bound to.
+ // |waiting_for_synctoken_count|: Number of returned frames that are
+ // waiting for SyncToken release.
+ int output_count = 0;
+ int waiting_for_synctoken_count = 0;
+
+ bool IsInUse() const {
+ return output_count > 0 || waiting_for_synctoken_count > 0;
+ }
};
- // Pictures buffers that are assigned to the VDA.
+
+ base::Lock picture_buffers_lock_;
std::map<int32_t, PictureBufferData> picture_buffers_
GUARDED_BY(picture_buffers_lock_);
diff --git a/chromium/media/gpu/ipc/service/picture_buffer_manager_unittest.cc b/chromium/media/gpu/ipc/service/picture_buffer_manager_unittest.cc
index df513739024..ccd950bd2e4 100644
--- a/chromium/media/gpu/ipc/service/picture_buffer_manager_unittest.cc
+++ b/chromium/media/gpu/ipc/service/picture_buffer_manager_unittest.cc
@@ -128,9 +128,9 @@ TEST_F(PictureBufferManagerImplTest, ReusePictureBuffer) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
+ gpu::SyncToken sync_token = GenerateSyncToken(frame);
// Dropping the frame does not immediately trigger reuse.
- gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
@@ -140,28 +140,28 @@ TEST_F(PictureBufferManagerImplTest, ReusePictureBuffer) {
environment_.RunUntilIdle();
}
-TEST_F(PictureBufferManagerImplTest, ReusePictureBuffer_MultipleTime) {
- constexpr size_t kFrameNum = 3;
+TEST_F(PictureBufferManagerImplTest, ReusePictureBuffer_MultipleOutputs) {
+ constexpr size_t kOutputCountPerPictureBuffer = 3;
+
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
std::vector<scoped_refptr<VideoFrame>> frames;
- for (size_t i = 0; i < kFrameNum; ++i) {
- frames.push_back(CreateVideoFrame(pb.id()));
- }
-
- // Dropping the frame does not immediately trigger reuse.
std::vector<gpu::SyncToken> sync_tokens;
- for (auto& frame : frames) {
+ for (size_t i = 0; i < kOutputCountPerPictureBuffer; i++) {
+ scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
+ frames.push_back(frame);
sync_tokens.push_back(GenerateSyncToken(frame));
}
+
+ // Dropping the frames does not immediately trigger reuse.
frames.clear();
environment_.RunUntilIdle();
- // Completing the SyncToken wait does.
- EXPECT_CALL(reuse_cb_, Run(pb.id())).Times(kFrameNum);
- for (auto& sync_token : sync_tokens) {
+ // Completing the SyncToken waits does. (Clients are expected to wait for the
+ // output count to reach zero before actually reusing the picture buffer.)
+ EXPECT_CALL(reuse_cb_, Run(pb.id())).Times(kOutputCountPerPictureBuffer);
+ for (const auto& sync_token : sync_tokens)
cbh_->ReleaseSyncToken(sync_token);
- }
environment_.RunUntilIdle();
}
@@ -179,6 +179,7 @@ TEST_F(PictureBufferManagerImplTest, DismissPictureBuffer_Output) {
Initialize();
PictureBuffer pb = CreateARGBPictureBuffer();
scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
+ gpu::SyncToken sync_token = GenerateSyncToken(frame);
pbm_->DismissPictureBuffer(pb.id());
// Allocated textures should not be deleted while the VideoFrame exists.
@@ -186,18 +187,52 @@ TEST_F(PictureBufferManagerImplTest, DismissPictureBuffer_Output) {
EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
// Or after it has been returned.
- gpu::SyncToken sync_token = GenerateSyncToken(frame);
frame = nullptr;
environment_.RunUntilIdle();
EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
- // Until the SyncToken has been waited for. (Reuse callback should not be
- // called for a dismissed picture buffer.)
+ // The textures should be deleted once the the wait has completed. The reuse
+ // callback should not be called for a dismissed picture buffer.
cbh_->ReleaseSyncToken(sync_token);
environment_.RunUntilIdle();
EXPECT_FALSE(cbh_->HasTexture(pb.client_texture_ids()[0]));
}
+TEST_F(PictureBufferManagerImplTest, DismissPictureBuffer_MultipleOutputs) {
+ constexpr size_t kOutputCountPerPictureBuffer = 3;
+
+ Initialize();
+ PictureBuffer pb = CreateARGBPictureBuffer();
+ std::vector<scoped_refptr<VideoFrame>> frames;
+ std::vector<gpu::SyncToken> sync_tokens;
+ for (size_t i = 0; i < kOutputCountPerPictureBuffer; i++) {
+ scoped_refptr<VideoFrame> frame = CreateVideoFrame(pb.id());
+ frames.push_back(frame);
+ sync_tokens.push_back(GenerateSyncToken(frame));
+ }
+ pbm_->DismissPictureBuffer(pb.id());
+
+ // Allocated textures should not be deleted while the VideoFrames exists.
+ environment_.RunUntilIdle();
+ EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
+
+ // Or after they have been returned.
+ frames.clear();
+ environment_.RunUntilIdle();
+ EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
+
+ // The textures should be deleted only once all of the waits have completed.
+ for (size_t i = 0; i < kOutputCountPerPictureBuffer; i++) {
+ cbh_->ReleaseSyncToken(sync_tokens[i]);
+ environment_.RunUntilIdle();
+ if (i < kOutputCountPerPictureBuffer - 1) {
+ EXPECT_TRUE(cbh_->HasTexture(pb.client_texture_ids()[0]));
+ } else {
+ EXPECT_FALSE(cbh_->HasTexture(pb.client_texture_ids()[0]));
+ }
+ }
+}
+
TEST_F(PictureBufferManagerImplTest, CanReadWithoutStalling) {
// Works before Initialize().
EXPECT_TRUE(pbm_->CanReadWithoutStalling());
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.cc b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
index e8824ab1b46..82ad4598f03 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.cc
@@ -207,13 +207,12 @@ std::string VdaVideoDecoder::GetDisplayName() const {
return "VdaVideoDecoder";
}
-void VdaVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void VdaVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(1) << __func__ << "(" << config.AsHumanReadableString() << ")";
DCHECK(parent_task_runner_->BelongsToCurrentThread());
DCHECK(config.IsValidConfig());
@@ -285,7 +284,7 @@ void VdaVideoDecoder::InitializeOnGpuThread() {
DCHECK(gpu_task_runner_->BelongsToCurrentThread());
DCHECK(!vda_);
- // Set up |command_buffer_helper_|.
+ // Set up |command_buffer_helper|.
scoped_refptr<CommandBufferHelper> command_buffer_helper =
std::move(create_command_buffer_helper_cb_).Run();
if (!command_buffer_helper) {
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder.h b/chromium/media/gpu/ipc/service/vda_video_decoder.h
index 062d600ffe1..e9e9b5d6038 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder.h
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder.h
@@ -98,13 +98,12 @@ class VdaVideoDecoder : public VideoDecoder,
// media::VideoDecoder implementation.
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::RepeatingClosure& reset_cb) override;
@@ -154,7 +153,6 @@ class VdaVideoDecoder : public VideoDecoder,
gfx::Size texture_size,
GLenum texture_target);
void ReusePictureBuffer(int32_t picture_buffer_id);
- void AddEventOnParentThread(std::unique_ptr<MediaLogEvent> event);
// Error handling.
void EnterErrorState();
@@ -191,9 +189,11 @@ class VdaVideoDecoder : public VideoDecoder,
//
// Shared state.
//
+
// Only read on GPU thread during initialization, which is mutually exclusive
// with writes on the parent thread.
VideoDecoderConfig config_;
+
// Only written on the GPU thread during initialization, which is mutually
// exclusive with reads on the parent thread.
std::unique_ptr<VideoDecodeAccelerator> vda_;
diff --git a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
index 35a110db49c..859e6e195fb 100644
--- a/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
+++ b/chromium/media/gpu/ipc/service/vda_video_decoder_unittest.cc
@@ -6,10 +6,10 @@
#include <stdint.h>
-#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/scoped_refptr.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
#include "base/threading/thread.h"
@@ -43,7 +43,7 @@ namespace media {
namespace {
constexpr uint8_t kData[] = "foo";
-constexpr size_t kDataSize = arraysize(kData);
+constexpr size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> CreateDecoderBuffer(base::TimeDelta timestamp) {
scoped_refptr<DecoderBuffer> buffer =
@@ -311,9 +311,7 @@ class VdaVideoDecoderTest : public testing::TestWithParam<bool> {
testing::NiceMock<MockMediaLog> media_log_;
testing::StrictMock<base::MockCallback<VideoDecoder::InitCB>> init_cb_;
testing::StrictMock<base::MockCallback<VideoDecoder::OutputCB>> output_cb_;
- testing::StrictMock<
- base::MockCallback<VideoDecoder::WaitingForDecryptionKeyCB>>
- waiting_cb_;
+ testing::StrictMock<base::MockCallback<WaitingCB>> waiting_cb_;
testing::StrictMock<base::MockCallback<VideoDecoder::DecodeCB>> decode_cb_;
testing::StrictMock<base::MockCallback<base::RepeatingClosure>> reset_cb_;
diff --git a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc b/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
index a5d652122c5..51285a92e3c 100644
--- a/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/jpeg_decode_accelerator_unittest.cc
@@ -2,10 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This has to be included first.
-// See http://code.google.com/p/googletest/issues/detail?id=371
-#include "testing/gtest/include/gtest/gtest.h"
-
#include <stddef.h>
#include <stdint.h>
#include <string.h>
@@ -34,6 +30,7 @@
#include "media/gpu/test/video_accelerator_unittest_helpers.h"
#include "media/video/jpeg_decode_accelerator.h"
#include "mojo/core/embedder/embedder.h"
+#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv.h"
#include "ui/gfx/codec/jpeg_codec.h"
#include "ui/gfx/codec/png_codec.h"
@@ -161,20 +158,17 @@ class JpegDecodeAcceleratorTestEnvironment : public ::testing::Environment {
void SetUp() override;
- // Creates and returns a FilePath for the pathless |name|. The current folder
- // is used if |name| exists in it. If not the file will be treated as relative
- // to the test data path. This is either a custom test data path provided by
- // --test_data_path, or the default test data path (//media/test/data).
- base::FilePath GetOriginalOrTestDataFilePath(const std::string& name) {
- LOG_ASSERT(std::find_if(name.begin(), name.end(),
- base::FilePath::IsSeparator) == name.end())
- << name << " should be just a file name and not have a path";
- const base::FilePath original_file_path = base::FilePath(name);
+ // Resolve the specified file path. The file path can be either an absolute
+ // path, relative to the current directory, or relative to the test data path.
+ // This is either a custom test data path provided by --test_data_path, or the
+ // default test data path (//media/test/data).
+ base::FilePath GetOriginalOrTestDataFilePath(const std::string& file_path) {
+ const base::FilePath original_file_path = base::FilePath(file_path);
if (base::PathExists(original_file_path))
return original_file_path;
if (test_data_path_)
return base::FilePath(test_data_path_).Append(original_file_path);
- return GetTestDataFilePath(name);
+ return GetTestDataFilePath(file_path);
}
// Used for InputSizeChange test case. The image size should be smaller than
@@ -246,9 +240,10 @@ enum ClientState {
class JpegClient : public JpegDecodeAccelerator::Client {
public:
// JpegClient takes ownership of |note|.
- JpegClient(const std::vector<ParsedJpegImage*>& test_image_files,
- std::unique_ptr<ClientStateNotification<ClientState>> note,
- bool is_skip);
+ JpegClient(
+ const std::vector<ParsedJpegImage*>& test_image_files,
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>> note,
+ bool is_skip);
~JpegClient() override;
void CreateJpegDecoder();
void StartDecode(int32_t bitstream_buffer_id, bool do_prepare_memory = true);
@@ -261,7 +256,9 @@ class JpegClient : public JpegDecodeAccelerator::Client {
JpegDecodeAccelerator::Error error) override;
// Accessors.
- ClientStateNotification<ClientState>* note() const { return note_.get(); }
+ media::test::ClientStateNotification<ClientState>* note() const {
+ return note_.get();
+ }
private:
FRIEND_TEST_ALL_PREFIXES(JpegClientTest, GetMeanAbsoluteDifference);
@@ -284,7 +281,7 @@ class JpegClient : public JpegDecodeAccelerator::Client {
ClientState state_;
// Used to notify another thread about the state. JpegClient owns this.
- std::unique_ptr<ClientStateNotification<ClientState>> note_;
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>> note_;
// Skip JDA decode result. Used for testing performance.
bool is_skip_;
@@ -311,7 +308,7 @@ class JpegClient : public JpegDecodeAccelerator::Client {
JpegClient::JpegClient(
const std::vector<ParsedJpegImage*>& test_image_files,
- std::unique_ptr<ClientStateNotification<ClientState>> note,
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>> note,
bool is_skip)
: test_image_files_(test_image_files),
state_(CS_CREATED),
@@ -588,7 +585,8 @@ void JpegDecodeAcceleratorTest::TestDecode(
for (size_t i = 0; i < num_concurrent_decoders; i++) {
auto client = std::make_unique<JpegClient>(
- images, std::make_unique<ClientStateNotification<ClientState>>(),
+ images,
+ std::make_unique<media::test::ClientStateNotification<ClientState>>(),
false /* is_skip */);
scoped_clients.emplace_back(
new ScopedJpegClient(decoder_thread.task_runner(), std::move(client)));
@@ -624,7 +622,8 @@ void JpegDecodeAcceleratorTest::PerfDecodeByJDA(
ASSERT_TRUE(decoder_thread.Start());
auto client = std::make_unique<JpegClient>(
- images, std::make_unique<ClientStateNotification<ClientState>>(),
+ images,
+ std::make_unique<media::test::ClientStateNotification<ClientState>>(),
true /* is_skip */);
auto scoped_client = std::make_unique<ScopedJpegClient>(
decoder_thread.task_runner(), std::move(client));
@@ -658,7 +657,8 @@ void JpegDecodeAcceleratorTest::PerfDecodeBySW(
LOG_ASSERT(images.size() == 1);
std::unique_ptr<JpegClient> client = std::make_unique<JpegClient>(
- images, std::make_unique<ClientStateNotification<ClientState>>(),
+ images,
+ std::make_unique<media::test::ClientStateNotification<ClientState>>(),
true /* is_skip */);
const int32_t bitstream_buffer_id = 0;
diff --git a/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc b/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc
index c0721c974dd..920e5c2efed 100644
--- a/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/jpeg_encode_accelerator_unittest.cc
@@ -19,6 +19,7 @@
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/test/scoped_task_environment.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
@@ -131,6 +132,11 @@ class JpegEncodeAcceleratorTestEnvironment : public ::testing::Environment {
};
void JpegEncodeAcceleratorTestEnvironment::SetUp() {
+ // Since base::test::ScopedTaskEnvironment will call
+ // TestTimeouts::action_max_timeout(), TestTimeouts::Initialize() needs to be
+ // called in advance.
+ TestTimeouts::Initialize();
+
if (!log_path_.empty()) {
log_file_.reset(new base::File(
log_path_, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE));
@@ -236,7 +242,7 @@ class JpegClient : public JpegEncodeAccelerator::Client {
public:
JpegClient(const std::vector<TestImage*>& test_aligned_images,
const std::vector<TestImage*>& test_images,
- ClientStateNotification<ClientState>* note);
+ media::test::ClientStateNotification<ClientState>* note);
~JpegClient() override;
void CreateJpegEncoder();
void DestroyJpegEncoder();
@@ -287,7 +293,7 @@ class JpegClient : public JpegEncodeAccelerator::Client {
// Used to notify another thread about the state. JpegClient does not own
// this.
- ClientStateNotification<ClientState>* note_;
+ media::test::ClientStateNotification<ClientState>* note_;
// Output buffer prepared for JpegEncodeAccelerator.
std::unique_ptr<BitstreamBuffer> encoded_buffer_;
@@ -304,7 +310,7 @@ class JpegClient : public JpegEncodeAccelerator::Client {
JpegClient::JpegClient(const std::vector<TestImage*>& test_aligned_images,
const std::vector<TestImage*>& test_images,
- ClientStateNotification<ClientState>* note)
+ media::test::ClientStateNotification<ClientState>* note)
: test_aligned_images_(test_aligned_images),
test_images_(test_images),
state_(ClientState::CREATED),
@@ -590,11 +596,14 @@ void JpegEncodeAcceleratorTest::TestEncode(size_t num_concurrent_encoders) {
base::Thread encoder_thread("EncoderThread");
ASSERT_TRUE(encoder_thread.Start());
- std::vector<std::unique_ptr<ClientStateNotification<ClientState>>> notes;
+ std::vector<
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>>>
+ notes;
std::vector<std::unique_ptr<JpegClient>> clients;
for (size_t i = 0; i < num_concurrent_encoders; i++) {
- notes.push_back(std::make_unique<ClientStateNotification<ClientState>>());
+ notes.push_back(
+ std::make_unique<media::test::ClientStateNotification<ClientState>>());
clients.push_back(std::make_unique<JpegClient>(
test_aligned_images_, test_images_, notes.back().get()));
encoder_thread.task_runner()->PostTask(
diff --git a/chromium/media/gpu/libyuv_image_processor.cc b/chromium/media/gpu/libyuv_image_processor.cc
new file mode 100644
index 00000000000..96547496c1a
--- /dev/null
+++ b/chromium/media/gpu/libyuv_image_processor.cc
@@ -0,0 +1,192 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/libyuv_image_processor.h"
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/gpu/macros.h"
+#include "third_party/libyuv/include/libyuv/convert_from.h"
+
+namespace media {
+
+LibYUVImageProcessor::LibYUVImageProcessor(
+ const VideoFrameLayout& input_layout,
+ const gfx::Size& input_visible_size,
+ VideoFrame::StorageType input_storage_type,
+ const VideoFrameLayout& output_layout,
+ const gfx::Size& output_visible_size,
+ VideoFrame::StorageType output_storage_type,
+ OutputMode output_mode,
+ ErrorCB error_cb)
+ : ImageProcessor(input_layout,
+ input_storage_type,
+ output_layout,
+ output_storage_type,
+ output_mode),
+ input_visible_rect_(input_visible_size),
+ output_visible_rect_(output_visible_size),
+ error_cb_(error_cb),
+ process_thread_("LibYUVImageProcessorThread") {}
+
+LibYUVImageProcessor::~LibYUVImageProcessor() {
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
+ Reset();
+
+ process_thread_.Stop();
+}
+
+// static
+std::unique_ptr<LibYUVImageProcessor> LibYUVImageProcessor::Create(
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const ImageProcessor::OutputMode output_mode,
+ ErrorCB error_cb) {
+ VLOGF(2);
+
+ if (!IsFormatSupported(input_config.layout.format(),
+ output_config.layout.format())) {
+ VLOGF(2) << "Conversion from " << input_config.layout.format() << " to "
+ << output_config.layout.format() << " is not supported";
+ return nullptr;
+ }
+
+ // LibYUVImageProcessor supports only memory-based video frame for input.
+ VideoFrame::StorageType input_storage_type = VideoFrame::STORAGE_UNKNOWN;
+ for (auto input_type : input_config.preferred_storage_types) {
+ if (VideoFrame::IsStorageTypeMappable(input_type)) {
+ input_storage_type = input_type;
+ break;
+ }
+ }
+ if (input_storage_type == VideoFrame::STORAGE_UNKNOWN) {
+ VLOGF(2) << "Unsupported input storage type";
+ return nullptr;
+ }
+
+ // LibYUVImageProcessor supports only memory-based video frame for output.
+ VideoFrame::StorageType output_storage_type = VideoFrame::STORAGE_UNKNOWN;
+ for (auto output_type : output_config.preferred_storage_types) {
+ if (VideoFrame::IsStorageTypeMappable(output_type)) {
+ output_storage_type = output_type;
+ break;
+ }
+ }
+ if (output_storage_type == VideoFrame::STORAGE_UNKNOWN) {
+ VLOGF(2) << "Unsupported output storage type";
+ return nullptr;
+ }
+
+ if (output_mode != OutputMode::IMPORT) {
+ VLOGF(2) << "Only support OutputMode::IMPORT";
+ return nullptr;
+ }
+
+ auto processor = base::WrapUnique(new LibYUVImageProcessor(
+ input_config.layout, input_config.visible_size, input_storage_type,
+ output_config.layout, output_config.visible_size, output_storage_type,
+ output_mode, media::BindToCurrentLoop(std::move(error_cb))));
+ if (!processor->process_thread_.Start()) {
+ VLOGF(1) << "Failed to start processing thread";
+ return nullptr;
+ }
+
+ VLOGF(2) << "LibYUVImageProcessor created for converting from "
+ << input_config.layout << " to " << output_config.layout;
+ return processor;
+}
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+bool LibYUVImageProcessor::ProcessInternal(
+ scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) {
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
+ NOTIMPLEMENTED();
+ return false;
+}
+#endif
+
+bool LibYUVImageProcessor::ProcessInternal(
+ scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) {
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
+ DVLOGF(4);
+ DCHECK_EQ(input_frame->layout().format(), input_layout_.format());
+ DCHECK(input_frame->layout().coded_size() == input_layout_.coded_size());
+ DCHECK_EQ(output_frame->layout().format(), output_layout_.format());
+ DCHECK(output_frame->layout().coded_size() == output_layout_.coded_size());
+ DCHECK(VideoFrame::IsStorageTypeMappable(input_frame->storage_type()));
+ DCHECK(VideoFrame::IsStorageTypeMappable(output_frame->storage_type()));
+
+ // Since process_thread_ is owned by this class. base::Unretained(this) and
+ // the raw pointer of that task runner are safe.
+ process_task_tracker_.PostTask(
+ process_thread_.task_runner().get(), FROM_HERE,
+ base::BindOnce(&LibYUVImageProcessor::ProcessTask, base::Unretained(this),
+ std::move(input_frame), std::move(output_frame),
+ std::move(cb)));
+ return true;
+}
+
+void LibYUVImageProcessor::ProcessTask(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) {
+ DCHECK(process_thread_.task_runner()->BelongsToCurrentThread());
+ DVLOGF(4);
+
+ int result = libyuv::I420ToNV12(input_frame->data(VideoFrame::kYPlane),
+ input_frame->stride(VideoFrame::kYPlane),
+ input_frame->data(VideoFrame::kUPlane),
+ input_frame->stride(VideoFrame::kUPlane),
+ input_frame->data(VideoFrame::kVPlane),
+ input_frame->stride(VideoFrame::kVPlane),
+ output_frame->data(VideoFrame::kYPlane),
+ output_frame->stride(VideoFrame::kYPlane),
+ output_frame->data(VideoFrame::kUVPlane),
+ output_frame->stride(VideoFrame::kUVPlane),
+ output_frame->visible_rect().width(),
+ output_frame->visible_rect().height());
+ if (result != 0) {
+ VLOGF(1) << "libyuv::I420ToNV12 returns non-zero code: " << result;
+ NotifyError();
+ return;
+ }
+ std::move(cb).Run(std::move(output_frame));
+}
+
+bool LibYUVImageProcessor::Reset() {
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
+
+ process_task_tracker_.TryCancelAll();
+ return true;
+}
+
+void LibYUVImageProcessor::NotifyError() {
+ VLOGF(1);
+ error_cb_.Run();
+}
+
+// static
+bool LibYUVImageProcessor::IsFormatSupported(VideoPixelFormat input_format,
+ VideoPixelFormat output_format) {
+ if (input_format == PIXEL_FORMAT_I420) {
+ if (output_format == PIXEL_FORMAT_NV12) {
+ return true;
+ } else {
+ VLOGF(2) << "Unsupported output format: " << output_format
+ << " for converting input format: " << input_format;
+ return false;
+ }
+ } else {
+ VLOGF(2) << "Unsupported input format: " << input_format;
+ return false;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/libyuv_image_processor.h b/chromium/media/gpu/libyuv_image_processor.h
new file mode 100644
index 00000000000..386463dba74
--- /dev/null
+++ b/chromium/media/gpu/libyuv_image_processor.h
@@ -0,0 +1,103 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_LIBYUV_IMAGE_PROCESSOR_H_
+#define MEDIA_GPU_LIBYUV_IMAGE_PROCESSOR_H_
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/task/cancelable_task_tracker.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "build/build_config.h"
+#include "media/base/video_frame.h"
+#include "media/base/video_frame_layout.h"
+#include "media/base/video_types.h"
+#include "media/gpu/image_processor.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/geometry/rect.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+
+// A software image processor which uses libyuv to perform format conversion.
+// It expects input VideoFrame is mapped into CPU space, and output VideoFrame
+// is allocated in user space.
+class MEDIA_GPU_EXPORT LibYUVImageProcessor : public ImageProcessor {
+ public:
+ // ImageProcessor override
+ ~LibYUVImageProcessor() override;
+ bool Reset() override;
+
+ // Factory method to create LibYUVImageProcessor to convert video format
+ // specified in input_config and output_config. Provided |error_cb| will be
+ // posted to the same thread Create() is called if an error occurs after
+ // initialization.
+ // Returns nullptr if it fails to create LibYUVImageProcessor.
+ static std::unique_ptr<LibYUVImageProcessor> Create(
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const ImageProcessor::OutputMode output_mode,
+ ErrorCB error_cb);
+
+ private:
+ LibYUVImageProcessor(const VideoFrameLayout& input_layout,
+ const gfx::Size& input_visible_size,
+ VideoFrame::StorageType input_storage_type,
+ const VideoFrameLayout& output_layout,
+ const gfx::Size& output_visible_size,
+ VideoFrame::StorageType output_storage_type,
+ OutputMode output_mode,
+ ErrorCB error_cb);
+
+ // ImageProcessor override
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+ bool ProcessInternal(scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) override;
+#endif
+ bool ProcessInternal(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) override;
+
+ void ProcessTask(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb);
+
+ void NotifyError();
+
+ static bool IsFormatSupported(VideoPixelFormat input_format,
+ VideoPixelFormat output_format);
+
+ const gfx::Rect input_visible_rect_;
+ const gfx::Rect output_visible_rect_;
+
+ // Error callback to the client.
+ ErrorCB error_cb_;
+
+ // Thread to process frame format conversion.
+ base::Thread process_thread_;
+
+ // CancelableTaskTracker for ProcessTask().
+ // Because ProcessTask is posted from |client_task_runner_|'s thread to
+ // another sequence, |process_thread_|, it is unsafe to cancel the posted task
+ // from |client_task_runner_|'s thread using CancelableCallback and WeakPtr
+ // binding. CancelableTaskTracker is designed to deal with this scenario.
+ base::CancelableTaskTracker process_task_tracker_;
+
+ // Checker for the thread that creates this LibYUVImageProcessor.
+ THREAD_CHECKER(client_thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(LibYUVImageProcessor);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_LIBYUV_IMAGE_PROCESSOR_H_
diff --git a/chromium/media/gpu/platform_video_frame.cc b/chromium/media/gpu/platform_video_frame.cc
new file mode 100644
index 00000000000..ea89585476e
--- /dev/null
+++ b/chromium/media/gpu/platform_video_frame.cc
@@ -0,0 +1,109 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/platform_video_frame.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/files/scoped_file.h"
+#include "media/base/video_frame_layout.h"
+#include "media/gpu/format_utils.h"
+
+#if defined(USE_OZONE)
+#include "ui/gfx/native_pixmap.h"
+#include "ui/ozone/public/ozone_platform.h"
+#include "ui/ozone/public/surface_factory_ozone.h"
+#endif
+
+namespace media {
+
+namespace {
+
+#if defined(OS_CHROMEOS)
+scoped_refptr<VideoFrame> CreateVideoFrameOzone(VideoPixelFormat pixel_format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ gfx::BufferUsage buffer_usage,
+ base::TimeDelta timestamp) {
+ ui::OzonePlatform* platform = ui::OzonePlatform::GetInstance();
+ DCHECK(platform);
+ ui::SurfaceFactoryOzone* factory = platform->GetSurfaceFactoryOzone();
+ DCHECK(factory);
+
+ gfx::BufferFormat buffer_format =
+ VideoPixelFormatToGfxBufferFormat(pixel_format);
+ auto pixmap = factory->CreateNativePixmap(
+ gfx::kNullAcceleratedWidget, coded_size, buffer_format, buffer_usage);
+
+ const size_t num_planes = VideoFrame::NumPlanes(pixel_format);
+ std::vector<VideoFrameLayout::Plane> planes(num_planes);
+ for (size_t i = 0; i < num_planes; ++i) {
+ planes[i].stride = pixmap->GetDmaBufPitch(i);
+ planes[i].offset = pixmap->GetDmaBufOffset(i);
+ planes[i].modifier = pixmap->GetDmaBufModifier(i);
+ }
+
+ const size_t num_fds = pixmap->GetDmaBufFdCount();
+ std::vector<size_t> buffer_sizes(num_fds, 0u);
+ // If the number of buffer sizes is less than number of planes, the buffer for
+ // plane #i (i > the number of fds) is the last buffer.
+ for (size_t i = 0; i < num_planes; ++i) {
+ size_t buffer_size =
+ planes[i].offset +
+ planes[i].stride *
+ VideoFrame::Rows(i, pixel_format, coded_size.height());
+ if (i < num_fds) {
+ buffer_sizes[i] = buffer_size;
+ } else {
+ buffer_sizes.back() = std::max(buffer_sizes.back(), buffer_size);
+ }
+ }
+ auto layout = VideoFrameLayout::CreateWithPlanes(
+ pixel_format, coded_size, std::move(planes), std::move(buffer_sizes));
+ if (!layout)
+ return nullptr;
+
+ std::vector<base::ScopedFD> dmabuf_fds;
+ for (size_t i = 0; i < num_fds; ++i) {
+ int duped_fd = HANDLE_EINTR(dup(pixmap->GetDmaBufFd(i)));
+ if (duped_fd == -1) {
+ DLOG(ERROR) << "Failed duplicating dmabuf fd";
+ return nullptr;
+ }
+ dmabuf_fds.emplace_back(duped_fd);
+ }
+
+ auto frame = VideoFrame::WrapExternalDmabufs(
+ *layout, visible_rect, visible_rect.size(), std::move(dmabuf_fds),
+ timestamp);
+ if (!frame)
+ return nullptr;
+
+ // created |pixmap| must be owned by |frame|.
+ frame->AddDestructionObserver(
+ base::BindOnce(base::DoNothing::Once<scoped_refptr<gfx::NativePixmap>>(),
+ std::move(pixmap)));
+ return frame;
+}
+#endif // defined(OS_CHROMEOS)
+
+} // namespace
+
+scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
+ VideoPixelFormat pixel_format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ gfx::BufferUsage buffer_usage,
+ base::TimeDelta timestamp) {
+#if defined(OS_CHROMEOS)
+ return CreateVideoFrameOzone(pixel_format, coded_size, visible_rect,
+ natural_size, buffer_usage, timestamp);
+#endif // defined(OS_CHROMEOS)
+ NOTREACHED();
+ return nullptr;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/platform_video_frame.h b/chromium/media/gpu/platform_video_frame.h
new file mode 100644
index 00000000000..cddf0e2482b
--- /dev/null
+++ b/chromium/media/gpu/platform_video_frame.h
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_PLATFORM_VIDEO_FRAME_H_
+#define MEDIA_GPU_PLATFORM_VIDEO_FRAME_H_
+
+#include "media/base/video_frame.h"
+#include "media/gpu/media_gpu_export.h"
+#include "ui/gfx/buffer_types.h"
+
+namespace media {
+
+// Create platform dependent media::VideoFrame. |buffer_usage| is passed to
+// CreateNativePixmap(). See //media/base/video_frame.h for other parameters.
+MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
+ VideoPixelFormat pixel_format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ gfx::BufferUsage buffer_usage,
+ base::TimeDelta timestamp);
+
+} // namespace media
+
+#endif // MEDIA_GPU_PLATFORM_VIDEO_FRAME_H_
diff --git a/chromium/media/gpu/test/BUILD.gn b/chromium/media/gpu/test/BUILD.gn
index 7fc9435a54d..8b5b8d4a959 100644
--- a/chromium/media/gpu/test/BUILD.gn
+++ b/chromium/media/gpu/test/BUILD.gn
@@ -5,25 +5,41 @@
import("//build/config/ui.gni")
import("//media/gpu/args.gni")
-# TODO(dstaessens@) Split up in encode/decode/render/common helpers
source_set("helpers") {
testonly = true
+ sources = [
+ "video_accelerator_unittest_helpers.h",
+ ]
+ deps = [
+ "//base:base",
+ ]
+}
+source_set("render_helpers") {
+ testonly = true
sources = [
"rendering_helper.cc",
"rendering_helper.h",
"texture_ref.cc",
"texture_ref.h",
- "video_accelerator_unittest_helpers.h",
- "video_decode_accelerator_unittest_helpers.cc",
- "video_decode_accelerator_unittest_helpers.h",
- "video_encode_accelerator_unittest_helpers.cc",
- "video_encode_accelerator_unittest_helpers.h",
+ "video_frame_helpers.cc",
+ "video_frame_helpers.h",
+ ]
+ deps = [
+ "//media/gpu",
+ "//ui/gl/init:init",
+ ]
+ if (use_ozone) {
+ deps += [ "//ui/ozone" ]
+ }
+}
+
+source_set("frame_mapper") {
+ testonly = true
+ sources = [
"video_frame_mapper.h",
"video_frame_mapper_factory.cc",
"video_frame_mapper_factory.h",
- "video_frame_validator.cc",
- "video_frame_validator.h",
]
if (is_chromeos) {
sources += [
@@ -37,22 +53,68 @@ source_set("helpers") {
]
}
}
+
deps = [
"//media/gpu",
- "//testing/gtest",
+ ]
+}
+
+source_set("frame_validator") {
+ testonly = true
+ sources = [
+ "video_frame_validator.cc",
+ "video_frame_validator.h",
+ ]
+ public_deps = [
+ ":frame_mapper",
+ ]
+ deps = [
+ ":decode_helpers",
+ ":helpers",
+ "//media/gpu",
"//third_party/libyuv",
- "//ui/gl/init:init",
+ ]
+}
+
+source_set("decode_helpers") {
+ testonly = true
+ sources = [
+ "video_decode_accelerator_unittest_helpers.cc",
+ "video_decode_accelerator_unittest_helpers.h",
+ ]
+ public_deps = [
+ ":helpers",
+ ":render_helpers",
+ ]
+ deps = [
+ "//media/gpu",
+ "//testing/gtest",
]
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
}
+source_set("encode_helpers") {
+ testonly = true
+ sources = [
+ "video_encode_accelerator_unittest_helpers.cc",
+ "video_encode_accelerator_unittest_helpers.h",
+ ]
+ public_deps = [
+ ":helpers",
+ ":render_helpers",
+ ]
+ deps = [
+ ":frame_mapper",
+ "//media/gpu",
+ ]
+}
+
# TODO(dstaessens@) Make this work on other platforms too.
if (is_chromeos) {
static_library("video_player") {
testonly = true
-
sources = [
"video_player/frame_renderer.h",
"video_player/frame_renderer_dummy.cc",
@@ -66,18 +128,33 @@ if (is_chromeos) {
"video_player/video_player.cc",
"video_player/video_player.h",
]
-
data = [
"//media/test/data/",
]
-
deps = [
- ":helpers",
+ ":decode_helpers",
"//media/gpu",
]
-
if (use_ozone) {
deps += [ "//ui/ozone" ]
}
}
}
+
+static_library("image_processor") {
+ testonly = true
+ sources = [
+ "image_processor/image_processor_client.cc",
+ "image_processor/image_processor_client.h",
+ "video_image_info.h",
+ ]
+ deps = [
+ "//media:test_support",
+ "//media/gpu",
+ "//testing/gtest",
+ "//third_party/libyuv",
+ ]
+ data = [
+ "//media/test/data/",
+ ]
+}
diff --git a/chromium/media/gpu/v4l2/generic_v4l2_device.cc b/chromium/media/gpu/v4l2/generic_v4l2_device.cc
index a1294ae0b3c..d59dc6ced7a 100644
--- a/chromium/media/gpu/v4l2/generic_v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/generic_v4l2_device.cc
@@ -19,8 +19,8 @@
#include <memory>
#include "base/files/scoped_file.h"
-#include "base/macros.h"
#include "base/posix/eintr_wrapper.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
@@ -208,9 +208,9 @@ bool GenericV4L2Device::CanCreateEGLImageFrom(uint32_t v4l2_pixfmt) {
return std::find(
kEGLImageDrmFmtsSupported,
- kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported),
+ kEGLImageDrmFmtsSupported + base::size(kEGLImageDrmFmtsSupported),
V4L2PixFmtToDrmFormat(v4l2_pixfmt)) !=
- kEGLImageDrmFmtsSupported + arraysize(kEGLImageDrmFmtsSupported);
+ kEGLImageDrmFmtsSupported + base::size(kEGLImageDrmFmtsSupported);
}
EGLImageKHR GenericV4L2Device::CreateEGLImage(
@@ -332,19 +332,15 @@ scoped_refptr<gl::GLImage> GenericV4L2Device::CreateGLImage(
}
gfx::BufferFormat buffer_format = gfx::BufferFormat::BGRA_8888;
- unsigned internal_format = GL_BGRA_EXT;
switch (fourcc) {
case DRM_FORMAT_ARGB8888:
buffer_format = gfx::BufferFormat::BGRA_8888;
- internal_format = GL_BGRA_EXT;
break;
case DRM_FORMAT_NV12:
buffer_format = gfx::BufferFormat::YUV_420_BIPLANAR;
- internal_format = GL_RGB_YCBCR_420V_CHROMIUM;
break;
case DRM_FORMAT_YVU420:
buffer_format = gfx::BufferFormat::YVU_420;
- internal_format = GL_RGB_YCRCB_420_CHROMIUM;
break;
default:
NOTREACHED();
@@ -358,9 +354,9 @@ scoped_refptr<gl::GLImage> GenericV4L2Device::CreateGLImage(
DCHECK(pixmap);
- scoped_refptr<gl::GLImageNativePixmap> image(
- new gl::GLImageNativePixmap(size, internal_format));
- bool ret = image->Initialize(pixmap.get(), buffer_format);
+ auto image =
+ base::MakeRefCounted<gl::GLImageNativePixmap>(size, buffer_format);
+ bool ret = image->Initialize(pixmap.get());
DCHECK(ret);
return image;
}
@@ -379,11 +375,11 @@ GLenum GenericV4L2Device::GetTextureTarget() {
return GL_TEXTURE_EXTERNAL_OES;
}
-uint32_t GenericV4L2Device::PreferredInputFormat(Type type) {
+std::vector<uint32_t> GenericV4L2Device::PreferredInputFormat(Type type) {
if (type == Type::kEncoder)
- return V4L2_PIX_FMT_NV12M;
+ return {V4L2_PIX_FMT_NV12M, V4L2_PIX_FMT_NV12};
- return 0;
+ return {};
}
std::vector<uint32_t> GenericV4L2Device::GetSupportedImageProcessorPixelformats(
diff --git a/chromium/media/gpu/v4l2/generic_v4l2_device.h b/chromium/media/gpu/v4l2/generic_v4l2_device.h
index 3738bb30dd6..fa2baec43e7 100644
--- a/chromium/media/gpu/v4l2/generic_v4l2_device.h
+++ b/chromium/media/gpu/v4l2/generic_v4l2_device.h
@@ -12,6 +12,7 @@
#include <stdint.h>
#include <map>
+#include <vector>
#include "base/files/scoped_file.h"
#include "base/macros.h"
@@ -60,7 +61,7 @@ class GenericV4L2Device : public V4L2Device {
EGLBoolean DestroyEGLImage(EGLDisplay egl_display,
EGLImageKHR egl_image) override;
GLenum GetTextureTarget() override;
- uint32_t PreferredInputFormat(Type type) override;
+ std::vector<uint32_t> PreferredInputFormat(Type type) override;
std::vector<uint32_t> GetSupportedImageProcessorPixelformats(
v4l2_buf_type buf_type) override;
diff --git a/chromium/media/gpu/v4l2/tegra_v4l2_device.cc b/chromium/media/gpu/v4l2/tegra_v4l2_device.cc
index 7ed27f1eea4..2e0a85c28d3 100644
--- a/chromium/media/gpu/v4l2/tegra_v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/tegra_v4l2_device.cc
@@ -256,11 +256,12 @@ GLenum TegraV4L2Device::GetTextureTarget() {
return GL_TEXTURE_2D;
}
-uint32_t TegraV4L2Device::PreferredInputFormat(Type type) {
- if (type == Type::kEncoder)
- return V4L2_PIX_FMT_YUV420M;
+std::vector<uint32_t> TegraV4L2Device::PreferredInputFormat(Type type) {
+ if (type == Type::kEncoder) {
+ return {V4L2_PIX_FMT_YUV420M};
+ }
- return 0;
+ return {};
}
std::vector<uint32_t> TegraV4L2Device::GetSupportedImageProcessorPixelformats(
diff --git a/chromium/media/gpu/v4l2/tegra_v4l2_device.h b/chromium/media/gpu/v4l2/tegra_v4l2_device.h
index 2c473dfa78b..bcea9520cc0 100644
--- a/chromium/media/gpu/v4l2/tegra_v4l2_device.h
+++ b/chromium/media/gpu/v4l2/tegra_v4l2_device.h
@@ -11,6 +11,8 @@
#include <stddef.h>
#include <stdint.h>
+#include <vector>
+
#include "base/macros.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "ui/gl/gl_bindings.h"
@@ -56,7 +58,7 @@ class TegraV4L2Device : public V4L2Device {
EGLBoolean DestroyEGLImage(EGLDisplay egl_display,
EGLImageKHR egl_image) override;
GLenum GetTextureTarget() override;
- uint32_t PreferredInputFormat(Type type) override;
+ std::vector<uint32_t> PreferredInputFormat(Type type) override;
std::vector<uint32_t> GetSupportedImageProcessorPixelformats(
v4l2_buf_type buf_type) override;
diff --git a/chromium/media/gpu/v4l2/v4l2_decode_surface.cc b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
new file mode 100644
index 00000000000..26070c3657a
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_decode_surface.cc
@@ -0,0 +1,93 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+#include <linux/videodev2.h>
+
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "media/gpu/macros.h"
+
+namespace media {
+
+V4L2DecodeSurface::V4L2DecodeSurface(int input_record,
+ int output_record,
+ ReleaseCB release_cb)
+ : input_record_(input_record),
+ output_record_(output_record),
+ decoded_(false),
+ release_cb_(std::move(release_cb)) {}
+
+V4L2DecodeSurface::~V4L2DecodeSurface() {
+ DVLOGF(5) << "Releasing output record id=" << output_record_;
+ if (release_cb_)
+ std::move(release_cb_).Run(output_record_);
+}
+
+void V4L2DecodeSurface::SetDecoded() {
+ DCHECK(!decoded_);
+ decoded_ = true;
+
+ // We can now drop references to all reference surfaces for this surface
+ // as we are done with decoding.
+ reference_surfaces_.clear();
+
+ // And finally execute and drop the decode done callback, if set.
+ if (done_cb_)
+ std::move(done_cb_).Run();
+}
+
+void V4L2DecodeSurface::SetVisibleRect(const gfx::Rect& visible_rect) {
+ visible_rect_ = visible_rect;
+}
+
+void V4L2DecodeSurface::SetReferenceSurfaces(
+ std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces) {
+ DCHECK(reference_surfaces_.empty());
+ reference_surfaces_ = std::move(ref_surfaces);
+}
+
+void V4L2DecodeSurface::SetDecodeDoneCallback(base::OnceClosure done_cb) {
+ DCHECK(!done_cb_);
+ done_cb_ = std::move(done_cb);
+}
+
+std::string V4L2DecodeSurface::ToString() const {
+ std::string out;
+ base::StringAppendF(&out, "Buffer %d -> %d. ", input_record_, output_record_);
+ base::StringAppendF(&out, "Reference surfaces:");
+ for (const auto& ref : reference_surfaces_) {
+ DCHECK_NE(ref->output_record(), output_record_);
+ base::StringAppendF(&out, " %d", ref->output_record());
+ }
+ return out;
+}
+
+void V4L2ConfigStoreDecodeSurface::PrepareSetCtrls(
+ struct v4l2_ext_controls* ctrls) const {
+ DCHECK_NE(ctrls, nullptr);
+ DCHECK_GT(config_store_, 0u);
+
+ ctrls->config_store = config_store_;
+}
+
+void V4L2ConfigStoreDecodeSurface::PrepareQueueBuffer(
+ struct v4l2_buffer* buffer) const {
+ DCHECK_NE(buffer, nullptr);
+ DCHECK_GT(config_store_, 0u);
+
+ buffer->config_store = config_store_;
+}
+
+uint64_t V4L2ConfigStoreDecodeSurface::GetReferenceID() const {
+ // Control store uses the output buffer ID as reference.
+ return output_record();
+}
+
+bool V4L2ConfigStoreDecodeSurface::Submit() const {
+ // There is nothing extra to submit when using the config store
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_decode_surface.h b/chromium/media/gpu/v4l2/v4l2_decode_surface.h
new file mode 100644
index 00000000000..2a620356827
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_decode_surface.h
@@ -0,0 +1,122 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_H_
+#define MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "ui/gfx/geometry/rect.h"
+
+struct v4l2_ext_controls;
+struct v4l2_buffer;
+
+namespace media {
+
+// A V4L2-specific decode surface generated by V4L2DecodeSurfaceHandler.
+// It is used to store common picture metadata (e.g. visible_rect) and
+// platform-specific metadata (e.g. {input,output}_record).
+class V4L2DecodeSurface : public base::RefCounted<V4L2DecodeSurface> {
+ public:
+ // Callback function that releases the according output record.
+ // |output_record_| will be passed to the callback function as argument.
+ using ReleaseCB = base::OnceCallback<void(int)>;
+
+ // V4L2DecodeSurfaceHandler maintains a list of InputRecords, which records
+ // the status and metadata of input buffers.
+ // |input_record| is the index of the input record that corresponds to this
+ // V4L2DecodeSurface instance.
+ // |output_record|, similar to |input_record|, is the index of output record
+ // that corresponds to this instance.
+ // |release_cb| is the callback function that will be called when the instance
+ // is destroyed.
+ V4L2DecodeSurface(int input_record, int output_record, ReleaseCB release_cb);
+
+ // Mark the surface as decoded. This will also release all surfaces used for
+ // reference, as they are not needed anymore and execute the done callback,
+ // if not null.
+ void SetDecoded();
+ void SetVisibleRect(const gfx::Rect& visible_rect);
+ // Take references to each reference surface and keep them until the
+ // target surface is decoded.
+ void SetReferenceSurfaces(
+ std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces);
+ // If provided via this method, |done_cb| callback will be executed after
+ // decoding into this surface is finished. The callback is reset afterwards,
+ // so it needs to be set again before each decode operation.
+ void SetDecodeDoneCallback(base::OnceClosure done_cb);
+
+ // Update the passed v4l2_ext_controls structure to add the request or
+ // config store information.
+ virtual void PrepareSetCtrls(struct v4l2_ext_controls* ctrls) const = 0;
+ // Update the passed v4l2_buffer structure to add the request or
+ // config store information.
+ virtual void PrepareQueueBuffer(struct v4l2_buffer* buffer) const = 0;
+ // Return the ID to use in order to reference this frame.
+ virtual uint64_t GetReferenceID() const = 0;
+ // Submit the request corresponding to this surface once all controls have
+ // been set and all buffers queued.
+ virtual bool Submit() const = 0;
+
+ bool decoded() const { return decoded_; }
+ int input_record() const { return input_record_; }
+ int output_record() const { return output_record_; }
+ gfx::Rect visible_rect() const { return visible_rect_; }
+
+ std::string ToString() const;
+
+ protected:
+ virtual ~V4L2DecodeSurface();
+ friend class base::RefCounted<V4L2DecodeSurface>;
+
+ private:
+ // The index of the corresponding input record.
+ const int input_record_;
+ // The index of the corresponding output record.
+ const int output_record_;
+ // The visible size of the buffer.
+ gfx::Rect visible_rect_;
+
+ // Indicate whether the surface is decoded or not.
+ bool decoded_;
+ // Callback function which is called when the instance is destroyed.
+ ReleaseCB release_cb_;
+ // Callback function which is called after the surface has been decoded.
+ base::OnceClosure done_cb_;
+
+ // The decoded surfaces of the reference frames, which is kept until the
+ // surface has been decoded.
+ std::vector<scoped_refptr<V4L2DecodeSurface>> reference_surfaces_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface);
+};
+
+// An implementation of V4L2DecodeSurface that uses the config store to
+// associate controls/buffers to frames.
+class V4L2ConfigStoreDecodeSurface : public V4L2DecodeSurface {
+ public:
+ V4L2ConfigStoreDecodeSurface(int input_record,
+ int output_record,
+ ReleaseCB release_cb)
+ : V4L2DecodeSurface(input_record, output_record, std::move(release_cb)),
+ config_store_(input_record + 1) {}
+
+ void PrepareSetCtrls(struct v4l2_ext_controls* ctrls) const override;
+ void PrepareQueueBuffer(struct v4l2_buffer* buffer) const override;
+ uint64_t GetReferenceID() const override;
+ bool Submit() const override;
+
+ private:
+ ~V4L2ConfigStoreDecodeSurface() override = default;
+
+ // The configuration store of the input buffer.
+ uint32_t config_store_;
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_decode_surface_handler.h b/chromium/media/gpu/v4l2/v4l2_decode_surface_handler.h
new file mode 100644
index 00000000000..05f88d363db
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_decode_surface_handler.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_HANDLER_H_
+#define MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_HANDLER_H_
+
+#include <linux/videodev2.h>
+
+#include "media/gpu/decode_surface_handler.h"
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+
+namespace media {
+
+class V4L2DecodeSurfaceHandler
+ : public DecodeSurfaceHandler<V4L2DecodeSurface> {
+ public:
+ V4L2DecodeSurfaceHandler() = default;
+ ~V4L2DecodeSurfaceHandler() override = default;
+
+ // Append slice data in |data| of size |size| to pending hardware
+ // input buffer with |index|. This buffer will be submitted for decode
+ // on the next DecodeSurface(). Return true on success.
+ virtual bool SubmitSlice(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
+ const uint8_t* data,
+ size_t size) = 0;
+
+ // Decode the surface |dec_surface|.
+ virtual void DecodeSurface(
+ const scoped_refptr<V4L2DecodeSurface>& dec_surface) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurfaceHandler);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_DECODE_SURFACE_HANDLER_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_device.cc b/chromium/media/gpu/v4l2/v4l2_device.cc
index c0d849ad19c..faa1af6bd2d 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device.cc
@@ -40,6 +40,7 @@ class V4L2Buffer {
~V4L2Buffer();
void* GetPlaneMapping(const size_t plane);
+ size_t GetMemoryUsage() const;
const struct v4l2_buffer* v4l2_buffer() const { return &v4l2_buffer_; }
private:
@@ -145,6 +146,14 @@ void* V4L2Buffer::GetPlaneMapping(const size_t plane) {
return p;
}
+size_t V4L2Buffer::GetMemoryUsage() const {
+ size_t usage = 0;
+ for (size_t i = 0; i < v4l2_buffer_.length; i++) {
+ usage += v4l2_buffer_.m.planes[i].length;
+ }
+ return usage;
+}
+
// Module-private class that let users query/write V4L2 buffer information.
// It also makes some private V4L2Queue methods available to this module only.
class V4L2BufferQueueProxy {
@@ -622,6 +631,19 @@ bool V4L2Queue::DeallocateBuffers() {
return true;
}
+size_t V4L2Queue::GetMemoryUsage() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ size_t usage = 0;
+ for (const auto& buf : buffers_) {
+ usage += buf->GetMemoryUsage();
+ }
+ return usage;
+}
+
+v4l2_memory V4L2Queue::GetMemoryType() const {
+ return memory_;
+}
+
V4L2WritableBufferRef V4L2Queue::GetFreeBuffer() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
auto iter = free_buffers_.begin();
@@ -1104,7 +1126,7 @@ int32_t V4L2Device::H264LevelIdcToV4L2H264Level(uint8_t level_idc) {
}
// static
-gfx::Size V4L2Device::CodedSizeFromV4L2Format(struct v4l2_format format) {
+gfx::Size V4L2Device::AllocatedSizeFromV4L2Format(struct v4l2_format format) {
gfx::Size coded_size;
gfx::Size visible_size;
VideoPixelFormat frame_format = PIXEL_FORMAT_UNKNOWN;
@@ -1166,13 +1188,6 @@ gfx::Size V4L2Device::CodedSizeFromV4L2Format(struct v4l2_format format) {
int coded_height = sizeimage * 8 / coded_width / total_bpp;
coded_size.SetSize(coded_width, coded_height);
- // It's possible the driver gave us a slightly larger sizeimage than what
- // would be calculated from coded size. This is technically not allowed, but
- // some drivers (Exynos) like to have some additional alignment that is not a
- // multiple of bytesperline. The best thing we can do is to compensate by
- // aligning to next full row.
- if (sizeimage > VideoFrame::AllocationSize(frame_format, coded_size))
- coded_size.SetSize(coded_width, coded_height + 1);
DVLOGF(3) << "coded_size=" << coded_size.ToString();
// Sanity checks. Calculated coded size has to contain given visible size
@@ -1184,6 +1199,22 @@ gfx::Size V4L2Device::CodedSizeFromV4L2Format(struct v4l2_format format) {
}
// static
+std::string V4L2Device::V4L2MemoryToString(const v4l2_memory memory) {
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ return "V4L2_MEMORY_MMAP";
+ case V4L2_MEMORY_USERPTR:
+ return "V4L2_MEMORY_USERPTR";
+ case V4L2_MEMORY_DMABUF:
+ return "V4L2_MEMORY_DMABUF";
+ case V4L2_MEMORY_OVERLAY:
+ return "V4L2_MEMORY_OVERLAY";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+// static
std::string V4L2Device::V4L2FormatToString(const struct v4l2_format& format) {
std::ostringstream s;
s << "v4l2_format type: " << format.type;
@@ -1216,6 +1247,43 @@ std::string V4L2Device::V4L2FormatToString(const struct v4l2_format& format) {
}
// static
+std::string V4L2Device::V4L2BufferToString(const struct v4l2_buffer& buffer) {
+ std::ostringstream s;
+ s << "v4l2_buffer type: " << buffer.type << ", memory: " << buffer.memory
+ << ", index: " << buffer.index << " bytesused: " << buffer.bytesused
+ << ", length: " << buffer.length;
+ if (buffer.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ buffer.type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ // single-planar
+ if (buffer.memory == V4L2_MEMORY_MMAP) {
+ s << ", m.offset: " << buffer.m.offset;
+ } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
+ s << ", m.userptr: " << buffer.m.userptr;
+ } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
+ s << ", m.fd: " << buffer.m.fd;
+ }
+ } else if (V4L2_TYPE_IS_MULTIPLANAR(buffer.type)) {
+ for (size_t i = 0; i < buffer.length; ++i) {
+ const struct v4l2_plane& plane = buffer.m.planes[i];
+ s << ", m.planes[" << i << "](bytesused: " << plane.bytesused
+ << ", length: " << plane.length
+ << ", data_offset: " << plane.data_offset;
+ if (buffer.memory == V4L2_MEMORY_MMAP) {
+ s << ", m.mem_offset: " << plane.m.mem_offset;
+ } else if (buffer.memory == V4L2_MEMORY_USERPTR) {
+ s << ", m.userptr: " << plane.m.userptr;
+ } else if (buffer.memory == V4L2_MEMORY_DMABUF) {
+ s << ", m.fd: " << plane.m.fd;
+ }
+ s << ")";
+ }
+ } else {
+ s << " unsupported yet.";
+ }
+ return s.str();
+}
+
+// static
base::Optional<VideoFrameLayout> V4L2Device::V4L2FormatToVideoFrameLayout(
const struct v4l2_format& format) {
if (!V4L2_TYPE_IS_MULTIPLANAR(format.type)) {
diff --git a/chromium/media/gpu/v4l2/v4l2_device.h b/chromium/media/gpu/v4l2/v4l2_device.h
index 5b8b3e5035b..3e7b27b79c2 100644
--- a/chromium/media/gpu/v4l2/v4l2_device.h
+++ b/chromium/media/gpu/v4l2/v4l2_device.h
@@ -204,6 +204,13 @@ class MEDIA_GPU_EXPORT V4L2Queue
// released, or this call will fail.
bool DeallocateBuffers();
+ // Returns the memory usage of v4l2 buffers owned by this V4L2Queue which are
+ // mapped in user space memory.
+ size_t GetMemoryUsage() const;
+
+ // Returns |memory_|, memory type of last buffers allocated by this V4L2Queue.
+ v4l2_memory GetMemoryType() const;
+
// Return a unique pointer to a free buffer for the caller to prepare and
// submit, or an empty pointer if no buffer is currently free.
//
@@ -300,16 +307,22 @@ class MEDIA_GPU_EXPORT V4L2Device
uint32_t pix_fmt,
bool is_encoder);
static uint32_t V4L2PixFmtToDrmFormat(uint32_t format);
- // Convert format requirements requested by a V4L2 device to gfx::Size.
- static gfx::Size CodedSizeFromV4L2Format(struct v4l2_format format);
+ // Calculates the largest plane's allocation size requested by a V4L2 device.
+ static gfx::Size AllocatedSizeFromV4L2Format(struct v4l2_format format);
// Convert required H264 profile and level to V4L2 enums.
static int32_t VideoCodecProfileToV4L2H264Profile(VideoCodecProfile profile);
static int32_t H264LevelIdcToV4L2H264Level(uint8_t level_idc);
+ // Converts v4l2_memory to a string.
+ static std::string V4L2MemoryToString(const v4l2_memory memory);
+
// Composes human readable string of v4l2_format.
static std::string V4L2FormatToString(const struct v4l2_format& format);
+ // Composes human readable string of v4l2_buffer.
+ static std::string V4L2BufferToString(const struct v4l2_buffer& buffer);
+
// Composes VideoFrameLayout based on v4l2_format.
// If error occurs, it returns base::nullopt.
static base::Optional<VideoFrameLayout> V4L2FormatToVideoFrameLayout(
@@ -415,8 +428,8 @@ class MEDIA_GPU_EXPORT V4L2Device
// Returns the supported texture target for the V4L2Device.
virtual GLenum GetTextureTarget() = 0;
- // Returns the preferred V4L2 input format for |type| or 0 if none.
- virtual uint32_t PreferredInputFormat(Type type) = 0;
+ // Returns the preferred V4L2 input formats for |type| or empty if none.
+ virtual std::vector<uint32_t> PreferredInputFormat(Type type) = 0;
// NOTE: The below methods to query capabilities have a side effect of
// closing the previously-open device, if any, and should not be called after
diff --git a/chromium/media/gpu/v4l2/v4l2_device_unittest.cc b/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
index b770a313a79..df0f2455283 100644
--- a/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
+++ b/chromium/media/gpu/v4l2/v4l2_device_unittest.cc
@@ -9,6 +9,7 @@
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gfx/native_pixmap_handle.h"
namespace {
@@ -77,17 +78,20 @@ TEST(V4L2DeviceTest, V4L2FormatToVideoFrameLayoutNV12) {
ASSERT_TRUE(layout.has_value());
EXPECT_EQ(PIXEL_FORMAT_NV12, layout->format());
EXPECT_EQ(gfx::Size(300, 180), layout->coded_size());
+ constexpr uint64_t kNoModifier = gfx::NativePixmapPlane::kNoModifier;
std::vector<VideoFrameLayout::Plane> expected_planes(
- {{320, 0u}, {320, 57600u}});
+ {{320, 0u, kNoModifier}, {320, 57600u, kNoModifier}});
EXPECT_EQ(expected_planes, layout->planes());
EXPECT_EQ(std::vector<size_t>({86400u}), layout->buffer_sizes());
EXPECT_EQ(86400u, layout->GetTotalBufferSize());
std::ostringstream ostream;
ostream << *layout;
+ const std::string kNoModifierStr = std::to_string(kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_NV12, coded_size: 300x180, "
- "planes (stride, offset): [(320, 0), (320, 57600)], "
- "buffer_sizes: [86400])");
+ "planes (stride, offset, modifier): [(320, 0, " +
+ kNoModifierStr + "), (320, 57600, " + kNoModifierStr +
+ ")], buffer_sizes: [86400])");
}
// Test V4L2FormatToVideoFrameLayout with YUV420 pixelformat, which has one
@@ -99,17 +103,23 @@ TEST(V4L2DeviceTest, V4L2FormatToVideoFrameLayoutYUV420) {
ASSERT_TRUE(layout.has_value());
EXPECT_EQ(PIXEL_FORMAT_I420, layout->format());
EXPECT_EQ(gfx::Size(300, 180), layout->coded_size());
+ constexpr uint64_t kNoModifier = gfx::NativePixmapPlane::kNoModifier;
std::vector<VideoFrameLayout::Plane> expected_planes(
- {{320, 0u}, {160, 57600u}, {160, 72000}});
+ {{320, 0u, kNoModifier},
+ {160, 57600u, kNoModifier},
+ {160, 72000u, kNoModifier}});
EXPECT_EQ(expected_planes, layout->planes());
EXPECT_EQ(std::vector<size_t>({86400u}), layout->buffer_sizes());
EXPECT_EQ(86400u, layout->GetTotalBufferSize());
std::ostringstream ostream;
ostream << *layout;
+ const std::string kNoModifierStr = std::to_string(kNoModifier);
EXPECT_EQ(ostream.str(),
"VideoFrameLayout(format: PIXEL_FORMAT_I420, coded_size: 300x180, "
- "planes (stride, offset): [(320, 0), (160, 57600), (160, 72000)], "
- "buffer_sizes: [86400])");
+ "planes (stride, offset, modifier): [(320, 0, " +
+ kNoModifierStr + "), (160, 57600, " + kNoModifierStr +
+ "), (160, 72000, " + kNoModifierStr +
+ ")], buffer_sizes: [86400])");
}
// Test V4L2FormatToVideoFrameLayout with single planar v4l2_format.
diff --git a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc
new file mode 100644
index 00000000000..5fab11f50ea
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.cc
@@ -0,0 +1,468 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_h264_accelerator.h"
+
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+
+namespace media {
+
+class V4L2H264Picture : public H264Picture {
+ public:
+ explicit V4L2H264Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface)
+ : dec_surface_(dec_surface) {}
+
+ V4L2H264Picture* AsV4L2H264Picture() override { return this; }
+ scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
+
+ private:
+ ~V4L2H264Picture() override {}
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture);
+};
+
+V4L2H264Accelerator::V4L2H264Accelerator(
+ V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device)
+ : num_slices_(0), surface_handler_(surface_handler), device_(device) {
+ DCHECK(surface_handler_);
+}
+
+V4L2H264Accelerator::~V4L2H264Accelerator() {}
+
+scoped_refptr<H264Picture> V4L2H264Accelerator::CreateH264Picture() {
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ surface_handler_->CreateSurface();
+ if (!dec_surface)
+ return nullptr;
+
+ return new V4L2H264Picture(dec_surface);
+}
+
+void V4L2H264Accelerator::H264PictureListToDPBIndicesList(
+ const H264Picture::Vector& src_pic_list,
+ uint8_t dst_list[kDPBIndicesListSize]) {
+ size_t i;
+ for (i = 0; i < src_pic_list.size() && i < kDPBIndicesListSize; ++i) {
+ const scoped_refptr<H264Picture>& pic = src_pic_list[i];
+ dst_list[i] = pic ? pic->dpb_position : VIDEO_MAX_FRAME;
+ }
+
+ while (i < kDPBIndicesListSize)
+ dst_list[i++] = VIDEO_MAX_FRAME;
+}
+
+void V4L2H264Accelerator::H264DPBToV4L2DPB(
+ const H264DPB& dpb,
+ std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces) {
+ memset(v4l2_decode_param_.dpb, 0, sizeof(v4l2_decode_param_.dpb));
+ size_t i = 0;
+ for (const auto& pic : dpb) {
+ if (i >= base::size(v4l2_decode_param_.dpb)) {
+ VLOGF(1) << "Invalid DPB size";
+ break;
+ }
+
+ int index = VIDEO_MAX_FRAME;
+ if (!pic->nonexisting) {
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ H264PictureToV4L2DecodeSurface(pic);
+ index = dec_surface->GetReferenceID();
+ ref_surfaces->push_back(dec_surface);
+ }
+
+ struct v4l2_h264_dpb_entry& entry = v4l2_decode_param_.dpb[i++];
+ entry.buf_index = index;
+ entry.frame_num = pic->frame_num;
+ entry.pic_num = pic->pic_num;
+ entry.top_field_order_cnt = pic->top_field_order_cnt;
+ entry.bottom_field_order_cnt = pic->bottom_field_order_cnt;
+ entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) |
+ (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0);
+ }
+}
+
+H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitFrameMetadata(
+ const H264SPS* sps,
+ const H264PPS* pps,
+ const H264DPB& dpb,
+ const H264Picture::Vector& ref_pic_listp0,
+ const H264Picture::Vector& ref_pic_listb0,
+ const H264Picture::Vector& ref_pic_listb1,
+ const scoped_refptr<H264Picture>& pic) {
+ struct v4l2_ext_control ctrl;
+ std::vector<struct v4l2_ext_control> ctrls;
+
+ struct v4l2_ctrl_h264_sps v4l2_sps;
+ memset(&v4l2_sps, 0, sizeof(v4l2_sps));
+ v4l2_sps.constraint_set_flags =
+ (sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0) |
+ (sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0) |
+ (sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0) |
+ (sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0) |
+ (sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0) |
+ (sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0);
+#define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
+ SPS_TO_V4L2SPS(profile_idc);
+ SPS_TO_V4L2SPS(level_idc);
+ SPS_TO_V4L2SPS(seq_parameter_set_id);
+ SPS_TO_V4L2SPS(chroma_format_idc);
+ SPS_TO_V4L2SPS(bit_depth_luma_minus8);
+ SPS_TO_V4L2SPS(bit_depth_chroma_minus8);
+ SPS_TO_V4L2SPS(log2_max_frame_num_minus4);
+ SPS_TO_V4L2SPS(pic_order_cnt_type);
+ SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4);
+ SPS_TO_V4L2SPS(offset_for_non_ref_pic);
+ SPS_TO_V4L2SPS(offset_for_top_to_bottom_field);
+ SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle);
+
+ static_assert(std::extent<decltype(v4l2_sps.offset_for_ref_frame)>() ==
+ std::extent<decltype(sps->offset_for_ref_frame)>(),
+ "offset_for_ref_frame arrays must be same size");
+ for (size_t i = 0; i < base::size(v4l2_sps.offset_for_ref_frame); ++i)
+ v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i];
+ SPS_TO_V4L2SPS(max_num_ref_frames);
+ SPS_TO_V4L2SPS(pic_width_in_mbs_minus1);
+ SPS_TO_V4L2SPS(pic_height_in_map_units_minus1);
+#undef SPS_TO_V4L2SPS
+
+#define SET_V4L2_SPS_FLAG_IF(cond, flag) \
+ v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
+ SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag,
+ V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+ SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag,
+ V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
+ SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag,
+ V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
+ SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag,
+ V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED);
+ SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
+ SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag,
+ V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag,
+ V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
+#undef SET_V4L2_SPS_FLAG_IF
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SPS;
+ ctrl.size = sizeof(v4l2_sps);
+ ctrl.p_h264_sps = &v4l2_sps;
+ ctrls.push_back(ctrl);
+
+ struct v4l2_ctrl_h264_pps v4l2_pps;
+ memset(&v4l2_pps, 0, sizeof(v4l2_pps));
+#define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
+ PPS_TO_V4L2PPS(pic_parameter_set_id);
+ PPS_TO_V4L2PPS(seq_parameter_set_id);
+ PPS_TO_V4L2PPS(num_slice_groups_minus1);
+ PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1);
+ PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1);
+ PPS_TO_V4L2PPS(weighted_bipred_idc);
+ PPS_TO_V4L2PPS(pic_init_qp_minus26);
+ PPS_TO_V4L2PPS(pic_init_qs_minus26);
+ PPS_TO_V4L2PPS(chroma_qp_index_offset);
+ PPS_TO_V4L2PPS(second_chroma_qp_index_offset);
+#undef PPS_TO_V4L2PPS
+
+#define SET_V4L2_PPS_FLAG_IF(cond, flag) \
+ v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
+ SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag,
+ V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
+ SET_V4L2_PPS_FLAG_IF(
+ bottom_field_pic_order_in_frame_present_flag,
+ V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
+ SET_V4L2_PPS_FLAG_IF(weighted_pred_flag, V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
+ SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag,
+ V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+ SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag,
+ V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+ SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag,
+ V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
+ SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag,
+ V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
+ SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag,
+ V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT);
+#undef SET_V4L2_PPS_FLAG_IF
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PPS;
+ ctrl.size = sizeof(v4l2_pps);
+ ctrl.p_h264_pps = &v4l2_pps;
+ ctrls.push_back(ctrl);
+
+ struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix;
+ memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix));
+
+ static_assert(
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_4x4)>() <=
+ std::extent<decltype(pps->scaling_list4x4)>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_4x4[0])>() <=
+ std::extent<decltype(pps->scaling_list4x4[0])>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_8x8)>() <=
+ std::extent<decltype(pps->scaling_list8x8)>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_8x8[0])>() <=
+ std::extent<decltype(pps->scaling_list8x8[0])>(),
+ "scaling_lists must be of correct size");
+ static_assert(
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_4x4)>() <=
+ std::extent<decltype(sps->scaling_list4x4)>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_4x4[0])>() <=
+ std::extent<decltype(sps->scaling_list4x4[0])>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_8x8)>() <=
+ std::extent<decltype(sps->scaling_list8x8)>() &&
+ std::extent<decltype(v4l2_scaling_matrix.scaling_list_8x8[0])>() <=
+ std::extent<decltype(sps->scaling_list8x8[0])>(),
+ "scaling_lists must be of correct size");
+
+ const auto* scaling_list4x4 = &sps->scaling_list4x4[0];
+ const auto* scaling_list8x8 = &sps->scaling_list8x8[0];
+ if (pps->pic_scaling_matrix_present_flag) {
+ scaling_list4x4 = &pps->scaling_list4x4[0];
+ scaling_list8x8 = &pps->scaling_list8x8[0];
+ }
+
+ for (size_t i = 0; i < base::size(v4l2_scaling_matrix.scaling_list_4x4);
+ ++i) {
+ for (size_t j = 0; j < base::size(v4l2_scaling_matrix.scaling_list_4x4[i]);
+ ++j) {
+ v4l2_scaling_matrix.scaling_list_4x4[i][j] = scaling_list4x4[i][j];
+ }
+ }
+ for (size_t i = 0; i < base::size(v4l2_scaling_matrix.scaling_list_8x8);
+ ++i) {
+ for (size_t j = 0; j < base::size(v4l2_scaling_matrix.scaling_list_8x8[i]);
+ ++j) {
+ v4l2_scaling_matrix.scaling_list_8x8[i][j] = scaling_list8x8[i][j];
+ }
+ }
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX;
+ ctrl.size = sizeof(v4l2_scaling_matrix);
+ ctrl.p_h264_scal_mtrx = &v4l2_scaling_matrix;
+ ctrls.push_back(ctrl);
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ H264PictureToV4L2DecodeSurface(pic);
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.count = ctrls.size();
+ ext_ctrls.controls = &ctrls[0];
+ dec_surface->PrepareSetCtrls(&ext_ctrls);
+ if (device_->Ioctl(VIDIOC_S_EXT_CTRLS, &ext_ctrls) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_S_EXT_CTRLS";
+ return Status::kFail;
+ }
+
+ H264PictureListToDPBIndicesList(ref_pic_listp0,
+ v4l2_decode_param_.ref_pic_list_p0);
+ H264PictureListToDPBIndicesList(ref_pic_listb0,
+ v4l2_decode_param_.ref_pic_list_b0);
+ H264PictureListToDPBIndicesList(ref_pic_listb1,
+ v4l2_decode_param_.ref_pic_list_b1);
+
+ std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+ H264DPBToV4L2DPB(dpb, &ref_surfaces);
+ dec_surface->SetReferenceSurfaces(ref_surfaces);
+
+ return Status::kOk;
+}
+
+H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitSlice(
+ const H264PPS* pps,
+ const H264SliceHeader* slice_hdr,
+ const H264Picture::Vector& ref_pic_list0,
+ const H264Picture::Vector& ref_pic_list1,
+ const scoped_refptr<H264Picture>& pic,
+ const uint8_t* data,
+ size_t size,
+ const std::vector<SubsampleEntry>& subsamples) {
+ if (num_slices_ == kMaxSlices) {
+ VLOGF(1) << "Over limit of supported slices per frame";
+ return Status::kFail;
+ }
+
+ struct v4l2_ctrl_h264_slice_param& v4l2_slice_param =
+ v4l2_slice_params_[num_slices_++];
+ memset(&v4l2_slice_param, 0, sizeof(v4l2_slice_param));
+
+ v4l2_slice_param.size = size;
+#define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
+ SHDR_TO_V4L2SPARM(header_bit_size);
+ SHDR_TO_V4L2SPARM(first_mb_in_slice);
+ SHDR_TO_V4L2SPARM(slice_type);
+ SHDR_TO_V4L2SPARM(pic_parameter_set_id);
+ SHDR_TO_V4L2SPARM(colour_plane_id);
+ SHDR_TO_V4L2SPARM(frame_num);
+ SHDR_TO_V4L2SPARM(idr_pic_id);
+ SHDR_TO_V4L2SPARM(pic_order_cnt_lsb);
+ SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom);
+ SHDR_TO_V4L2SPARM(delta_pic_order_cnt0);
+ SHDR_TO_V4L2SPARM(delta_pic_order_cnt1);
+ SHDR_TO_V4L2SPARM(redundant_pic_cnt);
+ SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size);
+ SHDR_TO_V4L2SPARM(cabac_init_idc);
+ SHDR_TO_V4L2SPARM(slice_qp_delta);
+ SHDR_TO_V4L2SPARM(slice_qs_delta);
+ SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc);
+ SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2);
+ SHDR_TO_V4L2SPARM(slice_beta_offset_div2);
+ SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1);
+ SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1);
+ SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size);
+#undef SHDR_TO_V4L2SPARM
+
+#define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
+ v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
+ SET_V4L2_SPARM_FLAG_IF(field_pic_flag, V4L2_SLICE_FLAG_FIELD_PIC);
+ SET_V4L2_SPARM_FLAG_IF(bottom_field_flag, V4L2_SLICE_FLAG_BOTTOM_FIELD);
+ SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag,
+ V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED);
+ SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag, V4L2_SLICE_FLAG_SP_FOR_SWITCH);
+#undef SET_V4L2_SPARM_FLAG_IF
+
+ struct v4l2_h264_pred_weight_table* pred_weight_table =
+ &v4l2_slice_param.pred_weight_table;
+
+ if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
+ pps->weighted_pred_flag) ||
+ (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
+ pred_weight_table->luma_log2_weight_denom =
+ slice_hdr->luma_log2_weight_denom;
+ pred_weight_table->chroma_log2_weight_denom =
+ slice_hdr->chroma_log2_weight_denom;
+
+ struct v4l2_h264_weight_factors* factorsl0 =
+ &pred_weight_table->weight_factors[0];
+
+ for (int i = 0; i < 32; ++i) {
+ factorsl0->luma_weight[i] =
+ slice_hdr->pred_weight_table_l0.luma_weight[i];
+ factorsl0->luma_offset[i] =
+ slice_hdr->pred_weight_table_l0.luma_offset[i];
+
+ for (int j = 0; j < 2; ++j) {
+ factorsl0->chroma_weight[i][j] =
+ slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
+ factorsl0->chroma_offset[i][j] =
+ slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
+ }
+ }
+
+ if (slice_hdr->IsBSlice()) {
+ struct v4l2_h264_weight_factors* factorsl1 =
+ &pred_weight_table->weight_factors[1];
+
+ for (int i = 0; i < 32; ++i) {
+ factorsl1->luma_weight[i] =
+ slice_hdr->pred_weight_table_l1.luma_weight[i];
+ factorsl1->luma_offset[i] =
+ slice_hdr->pred_weight_table_l1.luma_offset[i];
+
+ for (int j = 0; j < 2; ++j) {
+ factorsl1->chroma_weight[i][j] =
+ slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
+ factorsl1->chroma_offset[i][j] =
+ slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
+ }
+ }
+ }
+ }
+
+ H264PictureListToDPBIndicesList(ref_pic_list0,
+ v4l2_slice_param.ref_pic_list0);
+ H264PictureListToDPBIndicesList(ref_pic_list1,
+ v4l2_slice_param.ref_pic_list1);
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ H264PictureToV4L2DecodeSurface(pic);
+
+ v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc;
+
+ // TODO(posciak): Don't add start code back here, but have it passed from
+ // the parser.
+ size_t data_copy_size = size + 3;
+ std::unique_ptr<uint8_t[]> data_copy(new uint8_t[data_copy_size]);
+ memset(data_copy.get(), 0, data_copy_size);
+ data_copy[2] = 0x01;
+ memcpy(data_copy.get() + 3, data, size);
+ return surface_handler_->SubmitSlice(dec_surface, data_copy.get(),
+ data_copy_size)
+ ? Status::kOk
+ : Status::kFail;
+}
+
+H264Decoder::H264Accelerator::Status V4L2H264Accelerator::SubmitDecode(
+ const scoped_refptr<H264Picture>& pic) {
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ H264PictureToV4L2DecodeSurface(pic);
+
+ v4l2_decode_param_.num_slices = num_slices_;
+ v4l2_decode_param_.idr_pic_flag = pic->idr;
+ v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt;
+ v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt;
+
+ struct v4l2_ext_control ctrl;
+ std::vector<struct v4l2_ext_control> ctrls;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM;
+ ctrl.size = sizeof(v4l2_slice_params_);
+ ctrl.p_h264_slice_param = v4l2_slice_params_;
+ ctrls.push_back(ctrl);
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM;
+ ctrl.size = sizeof(v4l2_decode_param_);
+ ctrl.p_h264_decode_param = &v4l2_decode_param_;
+ ctrls.push_back(ctrl);
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.count = ctrls.size();
+ ext_ctrls.controls = &ctrls[0];
+ dec_surface->PrepareSetCtrls(&ext_ctrls);
+ if (device_->Ioctl(VIDIOC_S_EXT_CTRLS, &ext_ctrls) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_S_EXT_CTRLS";
+ return Status::kFail;
+ }
+
+ Reset();
+
+ DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
+ surface_handler_->DecodeSurface(dec_surface);
+ return Status::kOk;
+}
+
+bool V4L2H264Accelerator::OutputPicture(const scoped_refptr<H264Picture>& pic) {
+ // TODO(crbug.com/647725): Insert correct color space.
+ surface_handler_->SurfaceReady(H264PictureToV4L2DecodeSurface(pic),
+ pic->bitstream_id(), pic->visible_rect(),
+ VideoColorSpace());
+ return true;
+}
+
+void V4L2H264Accelerator::Reset() {
+ num_slices_ = 0;
+ memset(&v4l2_decode_param_, 0, sizeof(v4l2_decode_param_));
+ memset(&v4l2_slice_params_, 0, sizeof(v4l2_slice_params_));
+}
+
+scoped_refptr<V4L2DecodeSurface>
+V4L2H264Accelerator::H264PictureToV4L2DecodeSurface(
+ const scoped_refptr<H264Picture>& pic) {
+ V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture();
+ CHECK(v4l2_pic);
+ return v4l2_pic->dec_surface();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h
new file mode 100644
index 00000000000..2a6c179c535
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_h264_accelerator.h
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_H264_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_V4L2_H264_ACCELERATOR_H_
+
+#include <linux/videodev2.h>
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "media/gpu/h264_decoder.h"
+#include "media/gpu/h264_dpb.h"
+
+namespace media {
+
+class V4L2Device;
+class V4L2DecodeSurface;
+class V4L2DecodeSurfaceHandler;
+
+class V4L2H264Accelerator : public H264Decoder::H264Accelerator {
+ public:
+ using Status = H264Decoder::H264Accelerator::Status;
+
+ explicit V4L2H264Accelerator(V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device);
+ ~V4L2H264Accelerator() override;
+
+ // H264Decoder::H264Accelerator implementation.
+ scoped_refptr<H264Picture> CreateH264Picture() override;
+ Status SubmitFrameMetadata(const H264SPS* sps,
+ const H264PPS* pps,
+ const H264DPB& dpb,
+ const H264Picture::Vector& ref_pic_listp0,
+ const H264Picture::Vector& ref_pic_listb0,
+ const H264Picture::Vector& ref_pic_listb1,
+ const scoped_refptr<H264Picture>& pic) override;
+ Status SubmitSlice(const H264PPS* pps,
+ const H264SliceHeader* slice_hdr,
+ const H264Picture::Vector& ref_pic_list0,
+ const H264Picture::Vector& ref_pic_list1,
+ const scoped_refptr<H264Picture>& pic,
+ const uint8_t* data,
+ size_t size,
+ const std::vector<SubsampleEntry>& subsamples) override;
+ Status SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
+ bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
+ void Reset() override;
+
+ private:
+ // Max size of reference list.
+ static constexpr size_t kDPBIndicesListSize = 32;
+ // TODO(posciak): This should be queried from hardware once supported.
+ static constexpr size_t kMaxSlices = 16;
+
+ void H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
+ uint8_t dst_list[kDPBIndicesListSize]);
+ void H264DPBToV4L2DPB(
+ const H264DPB& dpb,
+ std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces);
+ scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface(
+ const scoped_refptr<H264Picture>& pic);
+
+ size_t num_slices_;
+ V4L2DecodeSurfaceHandler* const surface_handler_;
+ V4L2Device* const device_;
+
+ struct v4l2_ctrl_h264_slice_param v4l2_slice_params_[kMaxSlices];
+ struct v4l2_ctrl_h264_decode_param v4l2_decode_param_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_H264_ACCELERATOR_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor.cc b/chromium/media/gpu/v4l2/v4l2_image_processor.cc
index c0a58353902..0b408d968ee 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor.cc
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor.cc
@@ -4,7 +4,6 @@
#include <errno.h>
#include <fcntl.h>
-#include <linux/videodev2.h>
#include <poll.h>
#include <string.h>
#include <sys/eventfd.h>
@@ -16,8 +15,7 @@
#include "base/callback.h"
#include "base/memory/ptr_util.h"
#include "base/numerics/safe_conversions.h"
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/scopedfd_helper.h"
#include "media/base/video_types.h"
#include "media/gpu/macros.h"
@@ -74,17 +72,16 @@ V4L2ImageProcessor::V4L2ImageProcessor(
gfx::Size input_visible_size,
gfx::Size output_visible_size,
size_t num_buffers,
- const base::Closure& error_cb)
- : input_layout_(input_layout),
+ ErrorCB error_cb)
+ : ImageProcessor(input_layout,
+ input_storage_type,
+ output_layout,
+ output_storage_type,
+ output_mode),
input_visible_size_(input_visible_size),
input_memory_type_(input_memory_type),
- input_storage_type_(input_storage_type),
- output_layout_(output_layout),
output_visible_size_(output_visible_size),
output_memory_type_(output_memory_type),
- output_storage_type_(output_storage_type),
- output_mode_(output_mode),
- child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
device_(device),
device_thread_("V4L2ImageProcessorThread"),
device_poll_thread_("V4L2ImageProcessorDevicePollThread"),
@@ -93,13 +90,10 @@ V4L2ImageProcessor::V4L2ImageProcessor(
output_streamon_(false),
output_buffer_queued_count_(0),
num_buffers_(num_buffers),
- error_cb_(error_cb),
- weak_this_factory_(this) {
- weak_this_ = weak_this_factory_.GetWeakPtr();
-}
+ error_cb_(error_cb) {}
V4L2ImageProcessor::~V4L2ImageProcessor() {
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
Destroy();
@@ -112,15 +106,6 @@ V4L2ImageProcessor::~V4L2ImageProcessor() {
void V4L2ImageProcessor::NotifyError() {
VLOGF(1);
- DCHECK(!child_task_runner_->BelongsToCurrentThread());
- child_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&V4L2ImageProcessor::NotifyErrorOnChildThread,
- weak_this_, error_cb_));
-}
-
-void V4L2ImageProcessor::NotifyErrorOnChildThread(
- const base::Closure& error_cb) {
- DCHECK(child_task_runner_->BelongsToCurrentThread());
error_cb_.Run();
}
@@ -144,20 +129,44 @@ v4l2_memory InputStorageTypeToV4L2Memory(VideoFrame::StorageType storage_type) {
// static
std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
- scoped_refptr<V4L2Device> device,
- VideoFrame::StorageType input_storage_type,
- VideoFrame::StorageType output_storage_type,
- OutputMode output_mode,
- const VideoFrameLayout& input_layout,
- const VideoFrameLayout& output_layout,
- gfx::Size input_visible_size,
- gfx::Size output_visible_size,
- size_t num_buffers,
- const base::Closure& error_cb) {
+ scoped_refptr<V4L2Device> device,
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const ImageProcessor::OutputMode output_mode,
+ size_t num_buffers,
+ ErrorCB error_cb) {
VLOGF(2);
DCHECK_GT(num_buffers, 0u);
if (!device) {
- VLOGF(1) << "Failed creating V4L2Device";
+ VLOGF(2) << "Failed creating V4L2Device";
+ return nullptr;
+ }
+
+ // V4L2ImageProcessor supports either DmaBuf-backed or memory-based video
+ // frame for input.
+ VideoFrame::StorageType input_storage_type = VideoFrame::STORAGE_UNKNOWN;
+ for (auto input_type : input_config.preferred_storage_types) {
+ if (input_type == VideoFrame::STORAGE_DMABUFS ||
+ VideoFrame::IsStorageTypeMappable(input_type)) {
+ input_storage_type = input_type;
+ break;
+ }
+ }
+ if (input_storage_type == VideoFrame::STORAGE_UNKNOWN) {
+ VLOGF(2) << "Unsupported input storage type";
+ return nullptr;
+ }
+
+ // V4L2ImageProcessor only supports DmaBuf-backed video frame for output.
+ VideoFrame::StorageType output_storage_type = VideoFrame::STORAGE_UNKNOWN;
+ for (auto output_type : output_config.preferred_storage_types) {
+ if (output_type == VideoFrame::STORAGE_DMABUFS) {
+ output_storage_type = output_type;
+ break;
+ }
+ }
+ if (output_storage_type == VideoFrame::STORAGE_UNKNOWN) {
+ VLOGF(2) << "Unsupported output storage type";
return nullptr;
}
@@ -168,12 +177,6 @@ std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
return nullptr;
}
- // Note that for v4l2 IP, output storage type must be STORAGE_DMABUFS.
- // And output_memory_type depends on its output mode.
- if (output_storage_type != VideoFrame::STORAGE_DMABUFS) {
- VLOGF(1) << "Unsupported output storage type: " << output_storage_type;
- return nullptr;
- }
const v4l2_memory output_memory_type =
output_mode == ImageProcessor::OutputMode::ALLOCATE ? V4L2_MEMORY_MMAP
: V4L2_MEMORY_DMABUF;
@@ -182,6 +185,8 @@ std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
VLOGF(1) << "V4L2ImageProcessor not supported in this platform";
return nullptr;
}
+
+ const VideoFrameLayout& input_layout = input_config.layout;
const uint32_t input_format_fourcc =
V4L2Device::VideoFrameLayoutToV4L2PixFmt(input_layout);
if (!input_format_fourcc) {
@@ -217,14 +222,15 @@ std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
DCHECK_LE(negotiated_input_layout->num_buffers(),
static_cast<size_t>(VIDEO_MAX_PLANES));
if (!gfx::Rect(negotiated_input_layout->coded_size())
- .Contains(gfx::Rect(input_visible_size))) {
+ .Contains(gfx::Rect(input_config.visible_size))) {
VLOGF(1) << "Negotiated input allocated size: "
<< negotiated_input_layout->coded_size().ToString()
<< " should contain visible size: "
- << input_visible_size.ToString();
+ << input_config.visible_size.ToString();
return nullptr;
}
+ const VideoFrameLayout& output_layout = output_config.layout;
const uint32_t output_format_fourcc =
V4L2Device::VideoFrameLayoutToV4L2PixFmt(output_layout);
if (!output_format_fourcc) {
@@ -238,6 +244,11 @@ std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
format.fmt.pix_mp.width = output_layout.coded_size().width();
format.fmt.pix_mp.height = output_layout.coded_size().height();
format.fmt.pix_mp.pixelformat = output_format_fourcc;
+ for (size_t i = 0; i < output_layout.num_buffers(); ++i) {
+ format.fmt.pix_mp.plane_fmt[i].sizeimage = output_layout.buffer_sizes()[i];
+ format.fmt.pix_mp.plane_fmt[i].bytesperline =
+ output_layout.planes()[i].stride;
+ }
if (device->Ioctl(VIDIOC_S_FMT, &format) != 0 ||
format.fmt.pix_mp.pixelformat != output_format_fourcc) {
VLOGF(1) << "Failed to negotiate output format";
@@ -252,20 +263,20 @@ std::unique_ptr<V4L2ImageProcessor> V4L2ImageProcessor::Create(
DCHECK_LE(negotiated_output_layout->num_buffers(),
static_cast<size_t>(VIDEO_MAX_PLANES));
if (!gfx::Rect(negotiated_output_layout->coded_size())
- .Contains(gfx::Rect(output_layout.coded_size()))) {
+ .Contains(gfx::Rect(output_layout.coded_size()))) {
VLOGF(1) << "Negotiated output allocated size: "
<< negotiated_output_layout->coded_size().ToString()
<< " should contain original output allocated size: "
<< output_layout.coded_size().ToString();
return nullptr;
-
}
auto processor = base::WrapUnique(new V4L2ImageProcessor(
std::move(device), input_storage_type, output_storage_type,
input_memory_type, output_memory_type, output_mode,
- *negotiated_input_layout, *negotiated_output_layout, input_visible_size,
- output_visible_size, num_buffers, std::move(error_cb)));
+ *negotiated_input_layout, *negotiated_output_layout,
+ input_config.visible_size, output_config.visible_size, num_buffers,
+ media::BindToCurrentLoop(std::move(error_cb))));
if (!processor->Initialize()) {
VLOGF(1) << "Failed to initialize V4L2ImageProcessor";
return nullptr;
@@ -357,36 +368,17 @@ bool V4L2ImageProcessor::TryOutputFormat(uint32_t input_pixelformat,
return false;
*num_planes = format.fmt.pix_mp.num_planes;
- *size = V4L2Device::CodedSizeFromV4L2Format(format);
+ *size = V4L2Device::AllocatedSizeFromV4L2Format(format);
VLOGF(2) << "adjusted output coded size=" << size->ToString()
<< ", num_planes=" << *num_planes;
return true;
}
-gfx::Size V4L2ImageProcessor::input_allocated_size() const {
- return input_layout_.coded_size();
-}
-
-gfx::Size V4L2ImageProcessor::output_allocated_size() const {
- return output_layout_.coded_size();
-}
-
-VideoFrame::StorageType V4L2ImageProcessor::input_storage_type() const {
- return input_storage_type_;
-}
-
-VideoFrame::StorageType V4L2ImageProcessor::output_storage_type() const {
- return output_storage_type_;
-}
-
-ImageProcessor::OutputMode V4L2ImageProcessor::output_mode() const {
- return output_mode_;
-}
-
-bool V4L2ImageProcessor::Process(scoped_refptr<VideoFrame> frame,
- int output_buffer_index,
- std::vector<base::ScopedFD> output_dmabuf_fds,
- FrameReadyCB cb) {
+bool V4L2ImageProcessor::ProcessInternal(
+ scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) {
DVLOGF(4) << "ts=" << frame->timestamp().InMilliseconds();
switch (output_memory_type_) {
@@ -427,12 +419,22 @@ bool V4L2ImageProcessor::Process(scoped_refptr<VideoFrame> frame,
if (!job_record->output_frame)
return false;
- device_thread_.task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&V4L2ImageProcessor::ProcessTask,
- base::Unretained(this), std::move(job_record)));
+ // Since device_thread_ is owned by this class. base::Unretained(this) and the
+ // raw pointer of that task runner are safe.
+ process_task_tracker_.PostTask(
+ device_thread_.task_runner().get(), FROM_HERE,
+ base::BindOnce(&V4L2ImageProcessor::ProcessTask, base::Unretained(this),
+ std::move(job_record)));
return true;
}
+bool V4L2ImageProcessor::ProcessInternal(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
void V4L2ImageProcessor::ProcessTask(std::unique_ptr<JobRecord> job_record) {
DVLOGF(4) << "Reusing output buffer, index="
<< job_record->output_buffer_index;
@@ -445,34 +447,21 @@ void V4L2ImageProcessor::ProcessTask(std::unique_ptr<JobRecord> job_record) {
bool V4L2ImageProcessor::Reset() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
DCHECK(device_thread_.IsRunning());
- weak_this_factory_.InvalidateWeakPtrs();
- device_thread_.task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&V4L2ImageProcessor::StopDevicePoll, base::Unretained(this)));
- device_thread_.Stop();
-
- weak_this_ = weak_this_factory_.GetWeakPtr();
- if (!device_thread_.Start()) {
- VLOGF(1) << "device thread failed to start";
- return false;
- }
- device_thread_.task_runner()->PostTask(
- FROM_HERE,
- base::Bind(&V4L2ImageProcessor::StartDevicePoll, base::Unretained(this)));
+ process_task_tracker_.TryCancelAll();
return true;
}
void V4L2ImageProcessor::Destroy() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
-
- weak_this_factory_.InvalidateWeakPtrs();
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
// If the device thread is running, destroy using posted task.
if (device_thread_.IsRunning()) {
+ process_task_tracker_.TryCancelAll();
+
device_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&V4L2ImageProcessor::StopDevicePoll,
base::Unretained(this)));
@@ -486,7 +475,8 @@ void V4L2ImageProcessor::Destroy() {
bool V4L2ImageProcessor::CreateInputBuffers() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
+
DCHECK(!input_streamon_);
struct v4l2_control control;
@@ -553,7 +543,7 @@ bool V4L2ImageProcessor::CreateInputBuffers() {
bool V4L2ImageProcessor::CreateOutputBuffers() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
DCHECK(!output_streamon_);
struct v4l2_rect visible_rect;
@@ -610,7 +600,7 @@ bool V4L2ImageProcessor::CreateOutputBuffers() {
void V4L2ImageProcessor::DestroyInputBuffers() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
DCHECK(!input_streamon_);
struct v4l2_requestbuffers reqbufs;
@@ -626,7 +616,7 @@ void V4L2ImageProcessor::DestroyInputBuffers() {
void V4L2ImageProcessor::DestroyOutputBuffers() {
VLOGF(2);
- DCHECK(child_task_runner_->BelongsToCurrentThread());
+ DCHECK_CALLED_ON_VALID_THREAD(client_thread_checker_);
DCHECK(!output_streamon_);
output_buffer_map_.clear();
@@ -802,10 +792,7 @@ void V4L2ImageProcessor::Dequeue() {
DVLOGF(4) << "Processing finished, returning frame, index=" << dqbuf.index;
- child_task_runner_->PostTask(
- FROM_HERE, base::BindOnce(&V4L2ImageProcessor::FrameReady, weak_this_,
- std::move(job_record->ready_cb),
- job_record->output_frame));
+ std::move(job_record->ready_cb).Run(std::move(job_record->output_frame));
}
}
@@ -844,7 +831,7 @@ bool V4L2ImageProcessor::EnqueueInputRecord() {
for (size_t i = 0; i < input_layout_.num_buffers(); ++i) {
qbuf.m.planes[i].bytesused =
VideoFrame::PlaneSize(input_record.frame->format(), i,
- input_allocated_size())
+ input_layout_.coded_size())
.GetArea();
qbuf.m.planes[i].length = qbuf.m.planes[i].bytesused;
switch (input_memory_type_) {
@@ -860,6 +847,7 @@ bool V4L2ImageProcessor::EnqueueInputRecord() {
return false;
}
}
+ DVLOGF(4) << "Calling VIDIOC_QBUF: " << V4L2Device::V4L2BufferToString(qbuf);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_record.at_device = true;
@@ -900,6 +888,8 @@ bool V4L2ImageProcessor::EnqueueOutputRecord(const JobRecord* job_record) {
}
qbuf.m.planes = qbuf_planes;
qbuf.length = output_layout_.num_buffers();
+
+ DVLOGF(4) << "Calling VIDIOC_QBUF: " << V4L2Device::V4L2BufferToString(qbuf);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
output_record.at_device = true;
output_buffer_queued_count_++;
@@ -975,10 +965,4 @@ void V4L2ImageProcessor::StopDevicePoll() {
output_buffer_queued_count_ = 0;
}
-void V4L2ImageProcessor::FrameReady(FrameReadyCB cb,
- scoped_refptr<VideoFrame> frame) {
- DCHECK(child_task_runner_->BelongsToCurrentThread());
- std::move(cb).Run(frame);
-}
-
} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_image_processor.h b/chromium/media/gpu/v4l2/v4l2_image_processor.h
index 9550dda5419..8d9936b88a7 100644
--- a/chromium/media/gpu/v4l2/v4l2_image_processor.h
+++ b/chromium/media/gpu/v4l2/v4l2_image_processor.h
@@ -13,16 +13,20 @@
#include <linux/videodev2.h>
+#include "base/callback_forward.h"
#include "base/containers/queue.h"
+#include "base/files/scoped_file.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/weak_ptr.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/task/cancelable_task_tracker.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
#include "media/base/video_frame.h"
#include "media/base/video_frame_layout.h"
#include "media/gpu/image_processor.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/v4l2/v4l2_device.h"
+#include "ui/gfx/geometry/size.h"
namespace media {
@@ -33,18 +37,8 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
public:
// ImageProcessor implementation.
~V4L2ImageProcessor() override;
- gfx::Size input_allocated_size() const override;
- gfx::Size output_allocated_size() const override;
- VideoFrame::StorageType input_storage_type() const override;
- VideoFrame::StorageType output_storage_type() const override;
- OutputMode output_mode() const override;
- bool Process(scoped_refptr<VideoFrame> frame,
- int output_buffer_index,
- std::vector<base::ScopedFD> output_dmabuf_fds,
- FrameReadyCB cb) override;
bool Reset() override;
-
// Returns true if image processing is supported on this platform.
static bool IsSupported();
@@ -64,23 +58,21 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
size_t* num_planes);
// Factory method to create V4L2ImageProcessor to convert from
- // input_format to output_format. Caller shall provide input and output
- // storage type as well as output mode. The number of input buffers and output
+ // input_config to output_config. The number of input buffers and output
// buffers will be |num_buffers|. Provided |error_cb| will be posted to the
- // child thread if an error occurs after initialization. Returns nullptr if
- // V4L2ImageProcessor fails to create.
+ // same thread Create() is called if an error occurs after initialization.
+ // Returns nullptr if V4L2ImageProcessor fails to create.
// Note: output_mode will be removed once all its clients use import mode.
+ // TODO(crbug.com/917798): remove |device| parameter once
+ // V4L2VideoDecodeAccelerator no longer creates and uses
+ // |image_processor_device_| before V4L2ImageProcessor is created.
static std::unique_ptr<V4L2ImageProcessor> Create(
scoped_refptr<V4L2Device> device,
- VideoFrame::StorageType input_storage_type,
- VideoFrame::StorageType output_storage_type,
- OutputMode output_mode,
- const VideoFrameLayout& input_layout,
- const VideoFrameLayout& output_layout,
- gfx::Size input_visible_size,
- gfx::Size output_visible_size,
+ const ImageProcessor::PortConfig& input_config,
+ const ImageProcessor::PortConfig& output_config,
+ const ImageProcessor::OutputMode output_mode,
size_t num_buffers,
- const base::Closure& error_cb);
+ ErrorCB error_cb);
private:
// Record for input buffers.
@@ -130,7 +122,7 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
gfx::Size input_visible_size,
gfx::Size output_visible_size,
size_t num_buffers,
- const base::Closure& error_cb);
+ ErrorCB error_cb);
bool Initialize();
void EnqueueInput();
@@ -144,7 +136,15 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
void DestroyOutputBuffers();
void NotifyError();
- void NotifyErrorOnChildThread(const base::Closure& error_cb);
+
+ // ImageProcessor implementation.
+ bool ProcessInternal(scoped_refptr<VideoFrame> frame,
+ int output_buffer_index,
+ std::vector<base::ScopedFD> output_dmabuf_fds,
+ FrameReadyCB cb) override;
+ bool ProcessInternal(scoped_refptr<VideoFrame> input_frame,
+ scoped_refptr<VideoFrame> output_frame,
+ FrameReadyCB cb) override;
void ProcessTask(std::unique_ptr<JobRecord> job_record);
void ServiceDeviceTask();
@@ -156,28 +156,17 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
// Ran on device_poll_thread_ to wait for device events.
void DevicePollTask(bool poll_device);
- // A processed frame is ready.
- void FrameReady(FrameReadyCB cb, scoped_refptr<VideoFrame> frame);
-
// Stop all processing and clean up. After this method returns no more
// callbacks will be invoked.
void Destroy();
- // Stores input frame's format, coded_size, buffer and plane layout.
- const VideoFrameLayout input_layout_;
+ // Stores input frame's visible size and v4l2_memory type.
const gfx::Size input_visible_size_;
const v4l2_memory input_memory_type_;
- const VideoFrame::StorageType input_storage_type_;
- // Stores input frame's format, coded_size, buffer and plane layout.
- const VideoFrameLayout output_layout_;
+ // Stores output frame's visible size and v4l2_memory type.
const gfx::Size output_visible_size_;
const v4l2_memory output_memory_type_;
- const VideoFrame::StorageType output_storage_type_;
- const OutputMode output_mode_;
-
- // Our original calling task runner for the child thread.
- const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
// V4L2 device in use.
scoped_refptr<V4L2Device> device_;
@@ -187,6 +176,13 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
// Thread used to poll the V4L2 for events only.
base::Thread device_poll_thread_;
+ // CancelableTaskTracker for ProcessTask().
+ // Because ProcessTask is posted from |client_task_runner_|'s thread to
+ // another sequence, |device_thread_|, it is unsafe to cancel the posted tasks
+ // from |client_task_runner_|'s thread using CancelableCallback and WeakPtr
+ // binding. CancelableTaskTracker is designed to deal with this scenario.
+ base::CancelableTaskTracker process_task_tracker_;
+
// All the below members are to be accessed from device_thread_ only
// (if it's running).
base::queue<std::unique_ptr<JobRecord>> input_queue_;
@@ -211,18 +207,10 @@ class MEDIA_GPU_EXPORT V4L2ImageProcessor : public ImageProcessor {
const size_t num_buffers_;
// Error callback to the client.
- base::Closure error_cb_;
-
- // WeakPtr<> pointing to |this| for use in posting tasks from the device
- // worker threads back to the child thread. Because the worker threads
- // are members of this class, any task running on those threads is guaranteed
- // that this object is still alive. As a result, tasks posted from the child
- // thread to the device thread should use base::Unretained(this),
- // and tasks posted the other way should use |weak_this_|.
- base::WeakPtr<V4L2ImageProcessor> weak_this_;
-
- // Weak factory for producing weak pointers on the child thread.
- base::WeakPtrFactory<V4L2ImageProcessor> weak_this_factory_;
+ ErrorCB error_cb_;
+
+ // Checker for the thread that creates this V4L2ImageProcessor.
+ THREAD_CHECKER(client_thread_checker_);
DISALLOW_COPY_AND_ASSIGN(V4L2ImageProcessor);
};
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.cc
index 0b69d1498f4..984fd38069c 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.cc
@@ -14,6 +14,7 @@
#include "base/big_endian.h"
#include "base/bind.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "media/filters/jpeg_parser.h"
#include "media/gpu/macros.h"
@@ -292,7 +293,7 @@ void V4L2JpegDecodeAccelerator::DecodeTask(
PostNotifyError(job_record->bitstream_buffer_id, UNREADABLE_INPUT);
return;
}
- input_jobs_.push(make_linked_ptr(job_record.release()));
+ input_jobs_.push(std::move(job_record));
ServiceDeviceTask(false);
}
@@ -310,7 +311,7 @@ bool V4L2JpegDecodeAccelerator::ShouldRecreateInputBuffers() {
if (input_jobs_.empty())
return false;
- linked_ptr<JobRecord> job_record = input_jobs_.front();
+ JobRecord* job_record = input_jobs_.front().get();
// Check input buffer size is enough
return (input_buffer_map_.empty() ||
(job_record->shm.size() + sizeof(kDefaultDhtSeg)) >
@@ -354,7 +355,7 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
DCHECK(!input_streamon_);
DCHECK(!input_jobs_.empty());
- linked_ptr<JobRecord> job_record = input_jobs_.front();
+ JobRecord* job_record = input_jobs_.front().get();
// The input image may miss huffman table. We didn't parse the image before,
// so we create more to avoid the situation of not enough memory.
// Reserve twice size to avoid recreating input buffer frequently.
@@ -389,7 +390,7 @@ bool V4L2JpegDecodeAccelerator::CreateInputBuffers() {
buffer.index = i;
buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buffer.m.planes = planes;
- buffer.length = arraysize(planes);
+ buffer.length = base::size(planes);
buffer.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
if (buffer.length != kMaxInputPlanes) {
@@ -417,7 +418,7 @@ bool V4L2JpegDecodeAccelerator::CreateOutputBuffers() {
DCHECK(decoder_task_runner_->BelongsToCurrentThread());
DCHECK(!output_streamon_);
DCHECK(!running_jobs_.empty());
- linked_ptr<JobRecord> job_record = running_jobs_.front();
+ JobRecord* job_record = running_jobs_.front().get();
size_t frame_size = VideoFrame::AllocationSize(
PIXEL_FORMAT_I420, job_record->out_frame->coded_size());
@@ -466,7 +467,7 @@ bool V4L2JpegDecodeAccelerator::CreateOutputBuffers() {
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.m.planes = planes;
- buffer.length = arraysize(planes);
+ buffer.length = base::size(planes);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
if (output_buffer_num_planes_ != buffer.length) {
@@ -759,7 +760,7 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
memset(planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
dqbuf.memory = V4L2_MEMORY_MMAP;
- dqbuf.length = arraysize(planes);
+ dqbuf.length = base::size(planes);
dqbuf.m.planes = planes;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
@@ -796,7 +797,7 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
// USERPTR. Also, client doesn't need to consider the buffer alignment and
// JpegDecodeAccelerator API will be simpler.
dqbuf.memory = V4L2_MEMORY_MMAP;
- dqbuf.length = arraysize(planes);
+ dqbuf.length = base::size(planes);
dqbuf.m.planes = planes;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
@@ -813,7 +814,7 @@ void V4L2JpegDecodeAccelerator::Dequeue() {
free_output_buffers_.push_back(dqbuf.index);
// Jobs are always processed in FIFO order.
- linked_ptr<JobRecord> job_record = running_jobs_.front();
+ std::unique_ptr<JobRecord> job_record = std::move(running_jobs_.front());
running_jobs_.pop();
if (dqbuf.flags & V4L2_BUF_FLAG_ERROR) {
@@ -923,7 +924,7 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
DCHECK(!free_input_buffers_.empty());
// Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
- linked_ptr<JobRecord> job_record = input_jobs_.front();
+ std::unique_ptr<JobRecord> job_record = std::move(input_jobs_.front());
input_jobs_.pop();
const int index = free_input_buffers_.back();
BufferRecord& input_record = input_buffer_map_[index];
@@ -943,13 +944,13 @@ bool V4L2JpegDecodeAccelerator::EnqueueInputRecord() {
qbuf.index = index;
qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
qbuf.memory = V4L2_MEMORY_MMAP;
- qbuf.length = arraysize(planes);
+ qbuf.length = base::size(planes);
// There is only one plane for V4L2_PIX_FMT_JPEG.
planes[0].bytesused = input_record.length[0];
qbuf.m.planes = planes;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_record.at_device = true;
- running_jobs_.push(job_record);
+ running_jobs_.push(std::move(job_record));
free_input_buffers_.pop_back();
DVLOGF(3) << "enqueued frame id=" << job_record->bitstream_buffer_id
@@ -972,7 +973,7 @@ bool V4L2JpegDecodeAccelerator::EnqueueOutputRecord() {
qbuf.index = index;
qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
qbuf.memory = V4L2_MEMORY_MMAP;
- qbuf.length = arraysize(planes);
+ qbuf.length = base::size(planes);
qbuf.m.planes = planes;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
output_record.at_device = true;
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.h
index f68c213c6b3..9fae6993c8d 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_decode_accelerator.h
@@ -13,7 +13,6 @@
#include "base/containers/queue.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
@@ -163,8 +162,8 @@ class MEDIA_GPU_EXPORT V4L2JpegDecodeAccelerator
// All the below members except |weak_factory_| are accessed from
// |decoder_thread_| only (if it's running).
- base::queue<linked_ptr<JobRecord>> input_jobs_;
- base::queue<linked_ptr<JobRecord>> running_jobs_;
+ base::queue<std::unique_ptr<JobRecord>> input_jobs_;
+ base::queue<std::unique_ptr<JobRecord>> running_jobs_;
// Input queue state.
bool input_streamon_;
diff --git a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
index 6daf769c841..48b9d1f434a 100644
--- a/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_jpeg_encode_accelerator.h
@@ -13,7 +13,6 @@
#include "base/containers/queue.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/single_thread_task_runner.h"
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index 25bb7ba7e0c..a566ca2647a 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -20,18 +20,23 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
-#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_types.h"
#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+#include "media/gpu/v4l2/v4l2_h264_accelerator.h"
+#include "media/gpu/v4l2/v4l2_vp8_accelerator.h"
+#include "media/gpu/v4l2/v4l2_vp9_accelerator.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/scoped_binders.h"
@@ -69,105 +74,6 @@ const uint32_t V4L2SliceVideoDecodeAccelerator::supported_input_fourccs_[] = {
V4L2_PIX_FMT_H264_SLICE, V4L2_PIX_FMT_VP8_FRAME, V4L2_PIX_FMT_VP9_FRAME,
};
-class V4L2DecodeSurface : public base::RefCounted<V4L2DecodeSurface> {
- public:
- using ReleaseCB = base::Callback<void(int)>;
-
- V4L2DecodeSurface(int input_record,
- int output_record,
- const ReleaseCB& release_cb);
-
- // Mark the surface as decoded. This will also release all references, as
- // they are not needed anymore and execute the done callback, if not null.
- void SetDecoded();
- bool decoded() const { return decoded_; }
-
- int input_record() const { return input_record_; }
- int output_record() const { return output_record_; }
- uint32_t config_store() const { return config_store_; }
- gfx::Rect visible_rect() const { return visible_rect_; }
-
- void set_visible_rect(const gfx::Rect& visible_rect) {
- visible_rect_ = visible_rect;
- }
-
- // Take references to each reference surface and keep them until the
- // target surface is decoded.
- void SetReferenceSurfaces(
- const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces);
-
- // If provided via this method, |done_cb| callback will be executed after
- // decoding into this surface is finished. The callback is reset afterwards,
- // so it needs to be set again before each decode operation.
- void SetDecodeDoneCallback(const base::Closure& done_cb) {
- DCHECK(!done_cb_);
- done_cb_ = done_cb;
- }
-
- std::string ToString() const;
-
- private:
- friend class base::RefCounted<V4L2DecodeSurface>;
- ~V4L2DecodeSurface();
-
- int input_record_;
- int output_record_;
- uint32_t config_store_;
- gfx::Rect visible_rect_;
-
- bool decoded_;
- ReleaseCB release_cb_;
- base::Closure done_cb_;
-
- std::vector<scoped_refptr<V4L2DecodeSurface>> reference_surfaces_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2DecodeSurface);
-};
-
-V4L2DecodeSurface::V4L2DecodeSurface(int input_record,
- int output_record,
- const ReleaseCB& release_cb)
- : input_record_(input_record),
- output_record_(output_record),
- config_store_(input_record + 1),
- decoded_(false),
- release_cb_(release_cb) {}
-
-V4L2DecodeSurface::~V4L2DecodeSurface() {
- DVLOGF(5) << "Releasing output record id=" << output_record_;
- release_cb_.Run(output_record_);
-}
-
-void V4L2DecodeSurface::SetReferenceSurfaces(
- const std::vector<scoped_refptr<V4L2DecodeSurface>>& ref_surfaces) {
- DCHECK(reference_surfaces_.empty());
- reference_surfaces_ = ref_surfaces;
-}
-
-void V4L2DecodeSurface::SetDecoded() {
- DCHECK(!decoded_);
- decoded_ = true;
-
- // We can now drop references to all reference surfaces for this surface
- // as we are done with decoding.
- reference_surfaces_.clear();
-
- // And finally execute and drop the decode done callback, if set.
- if (done_cb_)
- std::move(done_cb_).Run();
-}
-
-std::string V4L2DecodeSurface::ToString() const {
- std::string out;
- base::StringAppendF(&out, "Buffer %d -> %d. ", input_record_, output_record_);
- base::StringAppendF(&out, "Reference surfaces:");
- for (const auto& ref : reference_surfaces_) {
- DCHECK_NE(ref->output_record(), output_record_);
- base::StringAppendF(&out, " %d", ref->output_record());
- }
- return out;
-}
-
V4L2SliceVideoDecodeAccelerator::InputRecord::InputRecord()
: input_id(-1),
address(nullptr),
@@ -195,6 +101,7 @@ struct V4L2SliceVideoDecodeAccelerator::BitstreamBufferRef {
scoped_refptr<DecoderBuffer> buffer,
int32_t input_id);
~BitstreamBufferRef();
+
const base::WeakPtr<VideoDecodeAccelerator::Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
scoped_refptr<DecoderBuffer> buffer;
@@ -230,192 +137,6 @@ V4L2SliceVideoDecodeAccelerator::PictureRecord::PictureRecord(
V4L2SliceVideoDecodeAccelerator::PictureRecord::~PictureRecord() {}
-class V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator
- : public H264Decoder::H264Accelerator {
- public:
- using Status = H264Decoder::H264Accelerator::Status;
-
- explicit V4L2H264Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
- ~V4L2H264Accelerator() override;
-
- // H264Decoder::H264Accelerator implementation.
- scoped_refptr<H264Picture> CreateH264Picture() override;
-
- Status SubmitFrameMetadata(const H264SPS* sps,
- const H264PPS* pps,
- const H264DPB& dpb,
- const H264Picture::Vector& ref_pic_listp0,
- const H264Picture::Vector& ref_pic_listb0,
- const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) override;
-
- Status SubmitSlice(const H264PPS* pps,
- const H264SliceHeader* slice_hdr,
- const H264Picture::Vector& ref_pic_list0,
- const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
- const uint8_t* data,
- size_t size,
- const std::vector<SubsampleEntry>& subsamples) override;
-
- Status SubmitDecode(const scoped_refptr<H264Picture>& pic) override;
- bool OutputPicture(const scoped_refptr<H264Picture>& pic) override;
-
- void Reset() override;
-
- private:
- // Max size of reference list.
- static const size_t kDPBIndicesListSize = 32;
- void H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
- uint8_t dst_list[kDPBIndicesListSize]);
-
- void H264DPBToV4L2DPB(
- const H264DPB& dpb,
- std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces);
-
- scoped_refptr<V4L2DecodeSurface> H264PictureToV4L2DecodeSurface(
- const scoped_refptr<H264Picture>& pic);
-
- size_t num_slices_;
- V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
-
- // TODO(posciak): This should be queried from hardware once supported.
- static const size_t kMaxSlices = 16;
- struct v4l2_ctrl_h264_slice_param v4l2_slice_params_[kMaxSlices];
- struct v4l2_ctrl_h264_decode_param v4l2_decode_param_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2H264Accelerator);
-};
-
-class V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator
- : public VP8Decoder::VP8Accelerator {
- public:
- explicit V4L2VP8Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
- ~V4L2VP8Accelerator() override;
-
- // VP8Decoder::VP8Accelerator implementation.
- scoped_refptr<VP8Picture> CreateVP8Picture() override;
-
- bool SubmitDecode(scoped_refptr<VP8Picture> pic,
- const Vp8ReferenceFrameVector& reference_frames) override;
-
- bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
-
- private:
- scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface(
- const scoped_refptr<VP8Picture>& pic);
-
- V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator);
-};
-
-class V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator
- : public VP9Decoder::VP9Accelerator {
- public:
- explicit V4L2VP9Accelerator(V4L2SliceVideoDecodeAccelerator* v4l2_dec);
- ~V4L2VP9Accelerator() override;
-
- // VP9Decoder::VP9Accelerator implementation.
- scoped_refptr<VP9Picture> CreateVP9Picture() override;
-
- bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
- const Vp9SegmentationParams& segm_params,
- const Vp9LoopFilterParams& lf_params,
- const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
- const base::Closure& done_cb) override;
-
- bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
-
- bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
- Vp9FrameContext* frame_ctx) override;
-
- bool IsFrameContextRequired() const override {
- return device_needs_frame_context_;
- }
-
- private:
- scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface(
- const scoped_refptr<VP9Picture>& pic);
-
- bool device_needs_frame_context_;
-
- V4L2SliceVideoDecodeAccelerator* v4l2_dec_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2VP9Accelerator);
-};
-
-// Codec-specific subclasses of software decoder picture classes.
-// This allows us to keep decoders oblivious of our implementation details.
-class V4L2H264Picture : public H264Picture {
- public:
- explicit V4L2H264Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
-
- V4L2H264Picture* AsV4L2H264Picture() override { return this; }
- scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
-
- private:
- ~V4L2H264Picture() override;
-
- scoped_refptr<V4L2DecodeSurface> dec_surface_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2H264Picture);
-};
-
-V4L2H264Picture::V4L2H264Picture(
- const scoped_refptr<V4L2DecodeSurface>& dec_surface)
- : dec_surface_(dec_surface) {}
-
-V4L2H264Picture::~V4L2H264Picture() {}
-
-class V4L2VP8Picture : public VP8Picture {
- public:
- explicit V4L2VP8Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
-
- V4L2VP8Picture* AsV4L2VP8Picture() override { return this; }
- scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
-
- private:
- ~V4L2VP8Picture() override;
-
- scoped_refptr<V4L2DecodeSurface> dec_surface_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture);
-};
-
-V4L2VP8Picture::V4L2VP8Picture(
- const scoped_refptr<V4L2DecodeSurface>& dec_surface)
- : dec_surface_(dec_surface) {}
-
-V4L2VP8Picture::~V4L2VP8Picture() {}
-
-class V4L2VP9Picture : public VP9Picture {
- public:
- explicit V4L2VP9Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface);
-
- V4L2VP9Picture* AsV4L2VP9Picture() override { return this; }
- scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
-
- private:
- ~V4L2VP9Picture() override;
-
- scoped_refptr<VP9Picture> CreateDuplicate() override;
-
- scoped_refptr<V4L2DecodeSurface> dec_surface_;
-
- DISALLOW_COPY_AND_ASSIGN(V4L2VP9Picture);
-};
-
-V4L2VP9Picture::V4L2VP9Picture(
- const scoped_refptr<V4L2DecodeSurface>& dec_surface)
- : dec_surface_(dec_surface) {}
-
-V4L2VP9Picture::~V4L2VP9Picture() {}
-
-scoped_refptr<VP9Picture> V4L2VP9Picture::CreateDuplicate() {
- return new V4L2VP9Picture(dec_surface_);
-}
-
V4L2SliceVideoDecodeAccelerator::V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
EGLDisplay egl_display,
@@ -539,14 +260,16 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
}
if (video_profile_ >= H264PROFILE_MIN && video_profile_ <= H264PROFILE_MAX) {
- decoder_.reset(
- new H264Decoder(std::make_unique<V4L2H264Accelerator>(this)));
+ decoder_.reset(new H264Decoder(
+ std::make_unique<V4L2H264Accelerator>(this, device_.get())));
} else if (video_profile_ >= VP8PROFILE_MIN &&
video_profile_ <= VP8PROFILE_MAX) {
- decoder_.reset(new VP8Decoder(std::make_unique<V4L2VP8Accelerator>(this)));
+ decoder_.reset(new VP8Decoder(
+ std::make_unique<V4L2VP8Accelerator>(this, device_.get())));
} else if (video_profile_ >= VP9PROFILE_MIN &&
video_profile_ <= VP9PROFILE_MAX) {
- decoder_.reset(new VP9Decoder(std::make_unique<V4L2VP9Accelerator>(this)));
+ decoder_.reset(new VP9Decoder(
+ std::make_unique<V4L2VP9Accelerator>(this, device_.get())));
} else {
NOTREACHED() << "Unsupported profile " << GetProfileName(video_profile_);
return false;
@@ -570,6 +293,9 @@ bool V4L2SliceVideoDecodeAccelerator::Initialize(const Config& config,
return false;
}
decoder_thread_task_runner_ = decoder_thread_.task_runner();
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "media::V4l2SliceVideoDecodeAccelerator",
+ decoder_thread_task_runner_);
state_ = kInitialized;
output_mode_ = config.output_mode;
@@ -631,7 +357,7 @@ void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
decoder_current_bitstream_buffer_.reset();
while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
// Stop streaming and the device_poll_thread_.
StopDevicePoll(false);
@@ -639,6 +365,9 @@ void V4L2SliceVideoDecodeAccelerator::DestroyTask() {
DestroyInputBuffers();
DestroyOutputs(false);
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+
DCHECK(surfaces_at_device_.empty());
DCHECK(surfaces_at_display_.empty());
DCHECK(decoder_display_queue_.empty());
@@ -936,8 +665,7 @@ void V4L2SliceVideoDecodeAccelerator::Enqueue(
const int old_inputs_queued = input_buffer_queued_count_;
const int old_outputs_queued = output_buffer_queued_count_;
- if (!EnqueueInputRecord(dec_surface->input_record(),
- dec_surface->config_store())) {
+ if (!EnqueueInputRecord(dec_surface.get())) {
VLOGF(1) << "Failed queueing an input buffer";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
@@ -1114,11 +842,11 @@ void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(int index) {
}
bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
- int index,
- uint32_t config_store) {
+ const V4L2DecodeSurface* dec_surface) {
DVLOGF(4);
+ DCHECK_NE(dec_surface, nullptr);
+ const int index = dec_surface->input_record();
DCHECK_LT(index, static_cast<int>(input_buffer_map_.size()));
- DCHECK_GT(config_store, 0u);
// Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
InputRecord& input_record = input_buffer_map_[index];
@@ -1133,7 +861,7 @@ bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
qbuf.m.planes = qbuf_planes;
qbuf.m.planes[0].bytesused = input_record.bytes_used;
qbuf.length = input_planes_count_;
- qbuf.config_store = config_store;
+ dec_surface->PrepareQueueBuffer(&qbuf);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_record.at_device = true;
input_buffer_queued_count_++;
@@ -1350,7 +1078,7 @@ void V4L2SliceVideoDecodeAccelerator::DecodeTask(
if (!bitstream_record->buffer)
return;
- decoder_input_queue_.push(std::move(bitstream_record));
+ decoder_input_queue_.push_back(std::move(bitstream_record));
ScheduleDecodeBufferTaskIfNeeded();
}
@@ -1363,7 +1091,7 @@ bool V4L2SliceVideoDecodeAccelerator::TrySetNewBistreamBuffer() {
return false;
decoder_current_bitstream_buffer_ = std::move(decoder_input_queue_.front());
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
if (decoder_current_bitstream_buffer_->input_id == kFlushBufferId) {
// This is a buffer we queued for ourselves to trigger flush at this time.
@@ -1648,15 +1376,14 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
// the client, or by ourselves, if we are allocating.
output_record.at_client = true;
if (output_mode_ == Config::OutputMode::ALLOCATE) {
- std::vector<base::ScopedFD> dmabuf_fds = device_->GetDmabufsForV4L2Buffer(
- i, output_planes_count_, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (dmabuf_fds.empty()) {
+ std::vector<base::ScopedFD> passed_dmabuf_fds =
+ device_->GetDmabufsForV4L2Buffer(i, output_planes_count_,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (passed_dmabuf_fds.empty()) {
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
- auto passed_dmabuf_fds(base::WrapUnique(
- new std::vector<base::ScopedFD>(std::move(dmabuf_fds))));
ImportBufferForPictureTask(output_record.picture_id,
std::move(passed_dmabuf_fds));
} // else we'll get triggered via ImportBufferForPicture() from client.
@@ -1678,7 +1405,7 @@ void V4L2SliceVideoDecodeAccelerator::AssignPictureBuffersTask(
void V4L2SliceVideoDecodeAccelerator::CreateGLImageFor(
size_t buffer_index,
int32_t picture_buffer_id,
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
+ std::vector<base::ScopedFD> passed_dmabuf_fds,
GLuint client_texture_id,
GLuint texture_id,
const gfx::Size& size,
@@ -1699,7 +1426,7 @@ void V4L2SliceVideoDecodeAccelerator::CreateGLImageFor(
}
scoped_refptr<gl::GLImage> gl_image =
- device_->CreateGLImage(size, fourcc, *passed_dmabuf_fds);
+ device_->CreateGLImage(size, fourcc, passed_dmabuf_fds);
if (!gl_image) {
VLOGF(1) << "Could not create GLImage,"
<< " index=" << buffer_index << " texture_id=" << texture_id;
@@ -1715,13 +1442,13 @@ void V4L2SliceVideoDecodeAccelerator::CreateGLImageFor(
FROM_HERE,
base::BindOnce(&V4L2SliceVideoDecodeAccelerator::AssignDmaBufs,
base::Unretained(this), buffer_index, picture_buffer_id,
- base::Passed(&passed_dmabuf_fds)));
+ std::move(passed_dmabuf_fds)));
}
void V4L2SliceVideoDecodeAccelerator::AssignDmaBufs(
size_t buffer_index,
int32_t picture_buffer_id,
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ std::vector<base::ScopedFD> passed_dmabuf_fds) {
DVLOGF(3) << "index=" << buffer_index;
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
@@ -1748,7 +1475,7 @@ void V4L2SliceVideoDecodeAccelerator::AssignDmaBufs(
if (output_mode_ == Config::OutputMode::IMPORT) {
DCHECK(output_record.dmabuf_fds.empty());
- output_record.dmabuf_fds = std::move(*passed_dmabuf_fds);
+ output_record.dmabuf_fds = std::move(passed_dmabuf_fds);
}
DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
@@ -1765,11 +1492,11 @@ void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
DCHECK(child_task_runner_->BelongsToCurrentThread());
- auto passed_dmabuf_fds(base::WrapUnique(new std::vector<base::ScopedFD>()));
+ std::vector<base::ScopedFD> passed_dmabuf_fds;
#if defined(USE_OZONE)
for (const auto& fd : gpu_memory_buffer_handle.native_pixmap_handle.fds) {
DCHECK_NE(fd.fd, -1);
- passed_dmabuf_fds->push_back(base::ScopedFD(fd.fd));
+ passed_dmabuf_fds.push_back(base::ScopedFD(fd.fd));
}
#endif
@@ -1789,14 +1516,15 @@ void V4L2SliceVideoDecodeAccelerator::ImportBufferForPicture(
decoder_thread_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
- base::Unretained(this), picture_buffer_id,
- base::Passed(&passed_dmabuf_fds)));
+ base::BindOnce(
+ &V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask,
+ base::Unretained(this), picture_buffer_id,
+ std::move(passed_dmabuf_fds)));
}
void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
int32_t picture_buffer_id,
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds) {
+ std::vector<base::ScopedFD> passed_dmabuf_fds) {
DVLOGF(3) << "picture_buffer_id=" << picture_buffer_id;
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
@@ -1834,14 +1562,14 @@ void V4L2SliceVideoDecodeAccelerator::ImportBufferForPictureTask(
if (iter->texture_id != 0) {
child_task_runner_->PostTask(
FROM_HERE,
- base::Bind(&V4L2SliceVideoDecodeAccelerator::CreateGLImageFor,
- weak_this_, index, picture_buffer_id,
- base::Passed(&passed_dmabuf_fds), iter->client_texture_id,
- iter->texture_id, coded_size_, output_format_fourcc_));
+ base::BindOnce(&V4L2SliceVideoDecodeAccelerator::CreateGLImageFor,
+ weak_this_, index, picture_buffer_id,
+ std::move(passed_dmabuf_fds), iter->client_texture_id,
+ iter->texture_id, coded_size_, output_format_fourcc_));
} else {
// No need for a GLImage, start using this buffer now.
- DCHECK_EQ(output_planes_count_, passed_dmabuf_fds->size());
- iter->dmabuf_fds.swap(*passed_dmabuf_fds);
+ DCHECK_EQ(output_planes_count_, passed_dmabuf_fds.size());
+ iter->dmabuf_fds = std::move(passed_dmabuf_fds);
free_output_buffers_.push_back(index);
ScheduleDecodeBufferTaskIfNeeded();
}
@@ -1935,7 +1663,7 @@ void V4L2SliceVideoDecodeAccelerator::FlushTask() {
return;
// Queue an empty buffer which - when reached - will trigger flush sequence.
- decoder_input_queue_.push(std::make_unique<BitstreamBufferRef>(
+ decoder_input_queue_.push_back(std::make_unique<BitstreamBufferRef>(
decode_client_, decode_task_runner_, nullptr, kFlushBufferId));
ScheduleDecodeBufferTaskIfNeeded();
@@ -2026,7 +1754,7 @@ void V4L2SliceVideoDecodeAccelerator::ResetTask() {
// Drop all remaining inputs.
decoder_current_bitstream_buffer_.reset();
while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
decoder_resetting_ = true;
NewEventPending();
@@ -2097,376 +1825,13 @@ void V4L2SliceVideoDecodeAccelerator::SetErrorState(Error error) {
state_ = kError;
}
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::V4L2H264Accelerator(
- V4L2SliceVideoDecodeAccelerator* v4l2_dec)
- : num_slices_(0), v4l2_dec_(v4l2_dec) {
- DCHECK(v4l2_dec_);
-}
-
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::~V4L2H264Accelerator() {}
-
-scoped_refptr<H264Picture>
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::CreateH264Picture() {
- scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
- if (!dec_surface)
- return nullptr;
-
- return new V4L2H264Picture(dec_surface);
-}
-
-void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::
- H264PictureListToDPBIndicesList(const H264Picture::Vector& src_pic_list,
- uint8_t dst_list[kDPBIndicesListSize]) {
- size_t i;
- for (i = 0; i < src_pic_list.size() && i < kDPBIndicesListSize; ++i) {
- const scoped_refptr<H264Picture>& pic = src_pic_list[i];
- dst_list[i] = pic ? pic->dpb_position : VIDEO_MAX_FRAME;
- }
-
- while (i < kDPBIndicesListSize)
- dst_list[i++] = VIDEO_MAX_FRAME;
-}
-
-void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::H264DPBToV4L2DPB(
- const H264DPB& dpb,
- std::vector<scoped_refptr<V4L2DecodeSurface>>* ref_surfaces) {
- memset(v4l2_decode_param_.dpb, 0, sizeof(v4l2_decode_param_.dpb));
- size_t i = 0;
- for (const auto& pic : dpb) {
- if (i >= arraysize(v4l2_decode_param_.dpb)) {
- VLOGF(1) << "Invalid DPB size";
- break;
- }
-
- int index = VIDEO_MAX_FRAME;
- if (!pic->nonexisting) {
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
- index = dec_surface->output_record();
- ref_surfaces->push_back(dec_surface);
- }
-
- struct v4l2_h264_dpb_entry& entry = v4l2_decode_param_.dpb[i++];
- entry.buf_index = index;
- entry.frame_num = pic->frame_num;
- entry.pic_num = pic->pic_num;
- entry.top_field_order_cnt = pic->top_field_order_cnt;
- entry.bottom_field_order_cnt = pic->bottom_field_order_cnt;
- entry.flags = (pic->ref ? V4L2_H264_DPB_ENTRY_FLAG_ACTIVE : 0) |
- (pic->long_term ? V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM : 0);
- }
-}
-
-H264Decoder::H264Accelerator::Status
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitFrameMetadata(
- const H264SPS* sps,
- const H264PPS* pps,
- const H264DPB& dpb,
- const H264Picture::Vector& ref_pic_listp0,
- const H264Picture::Vector& ref_pic_listb0,
- const H264Picture::Vector& ref_pic_listb1,
- const scoped_refptr<H264Picture>& pic) {
- struct v4l2_ext_control ctrl;
- std::vector<struct v4l2_ext_control> ctrls;
-
- struct v4l2_ctrl_h264_sps v4l2_sps;
- memset(&v4l2_sps, 0, sizeof(v4l2_sps));
- v4l2_sps.constraint_set_flags =
- (sps->constraint_set0_flag ? V4L2_H264_SPS_CONSTRAINT_SET0_FLAG : 0) |
- (sps->constraint_set1_flag ? V4L2_H264_SPS_CONSTRAINT_SET1_FLAG : 0) |
- (sps->constraint_set2_flag ? V4L2_H264_SPS_CONSTRAINT_SET2_FLAG : 0) |
- (sps->constraint_set3_flag ? V4L2_H264_SPS_CONSTRAINT_SET3_FLAG : 0) |
- (sps->constraint_set4_flag ? V4L2_H264_SPS_CONSTRAINT_SET4_FLAG : 0) |
- (sps->constraint_set5_flag ? V4L2_H264_SPS_CONSTRAINT_SET5_FLAG : 0);
-#define SPS_TO_V4L2SPS(a) v4l2_sps.a = sps->a
- SPS_TO_V4L2SPS(profile_idc);
- SPS_TO_V4L2SPS(level_idc);
- SPS_TO_V4L2SPS(seq_parameter_set_id);
- SPS_TO_V4L2SPS(chroma_format_idc);
- SPS_TO_V4L2SPS(bit_depth_luma_minus8);
- SPS_TO_V4L2SPS(bit_depth_chroma_minus8);
- SPS_TO_V4L2SPS(log2_max_frame_num_minus4);
- SPS_TO_V4L2SPS(pic_order_cnt_type);
- SPS_TO_V4L2SPS(log2_max_pic_order_cnt_lsb_minus4);
- SPS_TO_V4L2SPS(offset_for_non_ref_pic);
- SPS_TO_V4L2SPS(offset_for_top_to_bottom_field);
- SPS_TO_V4L2SPS(num_ref_frames_in_pic_order_cnt_cycle);
-
- static_assert(arraysize(v4l2_sps.offset_for_ref_frame) ==
- arraysize(sps->offset_for_ref_frame),
- "offset_for_ref_frame arrays must be same size");
- for (size_t i = 0; i < arraysize(v4l2_sps.offset_for_ref_frame); ++i)
- v4l2_sps.offset_for_ref_frame[i] = sps->offset_for_ref_frame[i];
- SPS_TO_V4L2SPS(max_num_ref_frames);
- SPS_TO_V4L2SPS(pic_width_in_mbs_minus1);
- SPS_TO_V4L2SPS(pic_height_in_map_units_minus1);
-#undef SPS_TO_V4L2SPS
-
-#define SET_V4L2_SPS_FLAG_IF(cond, flag) \
- v4l2_sps.flags |= ((sps->cond) ? (flag) : 0)
- SET_V4L2_SPS_FLAG_IF(separate_colour_plane_flag,
- V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
- SET_V4L2_SPS_FLAG_IF(qpprime_y_zero_transform_bypass_flag,
- V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
- SET_V4L2_SPS_FLAG_IF(delta_pic_order_always_zero_flag,
- V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
- SET_V4L2_SPS_FLAG_IF(gaps_in_frame_num_value_allowed_flag,
- V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED);
- SET_V4L2_SPS_FLAG_IF(frame_mbs_only_flag, V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
- SET_V4L2_SPS_FLAG_IF(mb_adaptive_frame_field_flag,
- V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
- SET_V4L2_SPS_FLAG_IF(direct_8x8_inference_flag,
- V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
-#undef SET_V4L2_SPS_FLAG_IF
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SPS;
- ctrl.size = sizeof(v4l2_sps);
- ctrl.p_h264_sps = &v4l2_sps;
- ctrls.push_back(ctrl);
-
- struct v4l2_ctrl_h264_pps v4l2_pps;
- memset(&v4l2_pps, 0, sizeof(v4l2_pps));
-#define PPS_TO_V4L2PPS(a) v4l2_pps.a = pps->a
- PPS_TO_V4L2PPS(pic_parameter_set_id);
- PPS_TO_V4L2PPS(seq_parameter_set_id);
- PPS_TO_V4L2PPS(num_slice_groups_minus1);
- PPS_TO_V4L2PPS(num_ref_idx_l0_default_active_minus1);
- PPS_TO_V4L2PPS(num_ref_idx_l1_default_active_minus1);
- PPS_TO_V4L2PPS(weighted_bipred_idc);
- PPS_TO_V4L2PPS(pic_init_qp_minus26);
- PPS_TO_V4L2PPS(pic_init_qs_minus26);
- PPS_TO_V4L2PPS(chroma_qp_index_offset);
- PPS_TO_V4L2PPS(second_chroma_qp_index_offset);
-#undef PPS_TO_V4L2PPS
-
-#define SET_V4L2_PPS_FLAG_IF(cond, flag) \
- v4l2_pps.flags |= ((pps->cond) ? (flag) : 0)
- SET_V4L2_PPS_FLAG_IF(entropy_coding_mode_flag,
- V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
- SET_V4L2_PPS_FLAG_IF(
- bottom_field_pic_order_in_frame_present_flag,
- V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
- SET_V4L2_PPS_FLAG_IF(weighted_pred_flag, V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
- SET_V4L2_PPS_FLAG_IF(deblocking_filter_control_present_flag,
- V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
- SET_V4L2_PPS_FLAG_IF(constrained_intra_pred_flag,
- V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
- SET_V4L2_PPS_FLAG_IF(redundant_pic_cnt_present_flag,
- V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
- SET_V4L2_PPS_FLAG_IF(transform_8x8_mode_flag,
- V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
- SET_V4L2_PPS_FLAG_IF(pic_scaling_matrix_present_flag,
- V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT);
-#undef SET_V4L2_PPS_FLAG_IF
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_H264_PPS;
- ctrl.size = sizeof(v4l2_pps);
- ctrl.p_h264_pps = &v4l2_pps;
- ctrls.push_back(ctrl);
-
- struct v4l2_ctrl_h264_scaling_matrix v4l2_scaling_matrix;
- memset(&v4l2_scaling_matrix, 0, sizeof(v4l2_scaling_matrix));
-
- static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
- arraysize(pps->scaling_list4x4) &&
- arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
- arraysize(pps->scaling_list4x4[0]) &&
- arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
- arraysize(pps->scaling_list8x8) &&
- arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
- arraysize(pps->scaling_list8x8[0]),
- "scaling_lists must be of correct size");
- static_assert(arraysize(v4l2_scaling_matrix.scaling_list_4x4) <=
- arraysize(sps->scaling_list4x4) &&
- arraysize(v4l2_scaling_matrix.scaling_list_4x4[0]) <=
- arraysize(sps->scaling_list4x4[0]) &&
- arraysize(v4l2_scaling_matrix.scaling_list_8x8) <=
- arraysize(sps->scaling_list8x8) &&
- arraysize(v4l2_scaling_matrix.scaling_list_8x8[0]) <=
- arraysize(sps->scaling_list8x8[0]),
- "scaling_lists must be of correct size");
-
- const auto* scaling_list4x4 = &sps->scaling_list4x4[0];
- const auto* scaling_list8x8 = &sps->scaling_list8x8[0];
- if (pps->pic_scaling_matrix_present_flag) {
- scaling_list4x4 = &pps->scaling_list4x4[0];
- scaling_list8x8 = &pps->scaling_list8x8[0];
- }
-
- for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_4x4); ++i) {
- for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_4x4[i]);
- ++j) {
- v4l2_scaling_matrix.scaling_list_4x4[i][j] = scaling_list4x4[i][j];
- }
- }
- for (size_t i = 0; i < arraysize(v4l2_scaling_matrix.scaling_list_8x8); ++i) {
- for (size_t j = 0; j < arraysize(v4l2_scaling_matrix.scaling_list_8x8[i]);
- ++j) {
- v4l2_scaling_matrix.scaling_list_8x8[i][j] = scaling_list8x8[i][j];
- }
- }
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX;
- ctrl.size = sizeof(v4l2_scaling_matrix);
- ctrl.p_h264_scal_mtrx = &v4l2_scaling_matrix;
- ctrls.push_back(ctrl);
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
-
- struct v4l2_ext_controls ext_ctrls;
- memset(&ext_ctrls, 0, sizeof(ext_ctrls));
- ext_ctrls.count = ctrls.size();
- ext_ctrls.controls = &ctrls[0];
- ext_ctrls.config_store = dec_surface->config_store();
- v4l2_dec_->SubmitExtControls(&ext_ctrls);
-
- H264PictureListToDPBIndicesList(ref_pic_listp0,
- v4l2_decode_param_.ref_pic_list_p0);
- H264PictureListToDPBIndicesList(ref_pic_listb0,
- v4l2_decode_param_.ref_pic_list_b0);
- H264PictureListToDPBIndicesList(ref_pic_listb1,
- v4l2_decode_param_.ref_pic_list_b1);
-
- std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
- H264DPBToV4L2DPB(dpb, &ref_surfaces);
- dec_surface->SetReferenceSurfaces(ref_surfaces);
-
- return Status::kOk;
-}
-
-H264Decoder::H264Accelerator::Status
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitSlice(
- const H264PPS* pps,
- const H264SliceHeader* slice_hdr,
- const H264Picture::Vector& ref_pic_list0,
- const H264Picture::Vector& ref_pic_list1,
- const scoped_refptr<H264Picture>& pic,
+bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(
+ const scoped_refptr<V4L2DecodeSurface>& dec_surface,
const uint8_t* data,
- size_t size,
- const std::vector<SubsampleEntry>& subsamples) {
- if (num_slices_ == kMaxSlices) {
- VLOGF(1) << "Over limit of supported slices per frame";
- return Status::kFail;
- }
-
- struct v4l2_ctrl_h264_slice_param& v4l2_slice_param =
- v4l2_slice_params_[num_slices_++];
- memset(&v4l2_slice_param, 0, sizeof(v4l2_slice_param));
-
- v4l2_slice_param.size = size;
-#define SHDR_TO_V4L2SPARM(a) v4l2_slice_param.a = slice_hdr->a
- SHDR_TO_V4L2SPARM(header_bit_size);
- SHDR_TO_V4L2SPARM(first_mb_in_slice);
- SHDR_TO_V4L2SPARM(slice_type);
- SHDR_TO_V4L2SPARM(pic_parameter_set_id);
- SHDR_TO_V4L2SPARM(colour_plane_id);
- SHDR_TO_V4L2SPARM(frame_num);
- SHDR_TO_V4L2SPARM(idr_pic_id);
- SHDR_TO_V4L2SPARM(pic_order_cnt_lsb);
- SHDR_TO_V4L2SPARM(delta_pic_order_cnt_bottom);
- SHDR_TO_V4L2SPARM(delta_pic_order_cnt0);
- SHDR_TO_V4L2SPARM(delta_pic_order_cnt1);
- SHDR_TO_V4L2SPARM(redundant_pic_cnt);
- SHDR_TO_V4L2SPARM(dec_ref_pic_marking_bit_size);
- SHDR_TO_V4L2SPARM(cabac_init_idc);
- SHDR_TO_V4L2SPARM(slice_qp_delta);
- SHDR_TO_V4L2SPARM(slice_qs_delta);
- SHDR_TO_V4L2SPARM(disable_deblocking_filter_idc);
- SHDR_TO_V4L2SPARM(slice_alpha_c0_offset_div2);
- SHDR_TO_V4L2SPARM(slice_beta_offset_div2);
- SHDR_TO_V4L2SPARM(num_ref_idx_l0_active_minus1);
- SHDR_TO_V4L2SPARM(num_ref_idx_l1_active_minus1);
- SHDR_TO_V4L2SPARM(pic_order_cnt_bit_size);
-#undef SHDR_TO_V4L2SPARM
-
-#define SET_V4L2_SPARM_FLAG_IF(cond, flag) \
- v4l2_slice_param.flags |= ((slice_hdr->cond) ? (flag) : 0)
- SET_V4L2_SPARM_FLAG_IF(field_pic_flag, V4L2_SLICE_FLAG_FIELD_PIC);
- SET_V4L2_SPARM_FLAG_IF(bottom_field_flag, V4L2_SLICE_FLAG_BOTTOM_FIELD);
- SET_V4L2_SPARM_FLAG_IF(direct_spatial_mv_pred_flag,
- V4L2_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED);
- SET_V4L2_SPARM_FLAG_IF(sp_for_switch_flag, V4L2_SLICE_FLAG_SP_FOR_SWITCH);
-#undef SET_V4L2_SPARM_FLAG_IF
-
- struct v4l2_h264_pred_weight_table* pred_weight_table =
- &v4l2_slice_param.pred_weight_table;
-
- if (((slice_hdr->IsPSlice() || slice_hdr->IsSPSlice()) &&
- pps->weighted_pred_flag) ||
- (slice_hdr->IsBSlice() && pps->weighted_bipred_idc == 1)) {
- pred_weight_table->luma_log2_weight_denom =
- slice_hdr->luma_log2_weight_denom;
- pred_weight_table->chroma_log2_weight_denom =
- slice_hdr->chroma_log2_weight_denom;
-
- struct v4l2_h264_weight_factors* factorsl0 =
- &pred_weight_table->weight_factors[0];
-
- for (int i = 0; i < 32; ++i) {
- factorsl0->luma_weight[i] =
- slice_hdr->pred_weight_table_l0.luma_weight[i];
- factorsl0->luma_offset[i] =
- slice_hdr->pred_weight_table_l0.luma_offset[i];
-
- for (int j = 0; j < 2; ++j) {
- factorsl0->chroma_weight[i][j] =
- slice_hdr->pred_weight_table_l0.chroma_weight[i][j];
- factorsl0->chroma_offset[i][j] =
- slice_hdr->pred_weight_table_l0.chroma_offset[i][j];
- }
- }
-
- if (slice_hdr->IsBSlice()) {
- struct v4l2_h264_weight_factors* factorsl1 =
- &pred_weight_table->weight_factors[1];
-
- for (int i = 0; i < 32; ++i) {
- factorsl1->luma_weight[i] =
- slice_hdr->pred_weight_table_l1.luma_weight[i];
- factorsl1->luma_offset[i] =
- slice_hdr->pred_weight_table_l1.luma_offset[i];
-
- for (int j = 0; j < 2; ++j) {
- factorsl1->chroma_weight[i][j] =
- slice_hdr->pred_weight_table_l1.chroma_weight[i][j];
- factorsl1->chroma_offset[i][j] =
- slice_hdr->pred_weight_table_l1.chroma_offset[i][j];
- }
- }
- }
- }
-
- H264PictureListToDPBIndicesList(ref_pic_list0,
- v4l2_slice_param.ref_pic_list0);
- H264PictureListToDPBIndicesList(ref_pic_list1,
- v4l2_slice_param.ref_pic_list1);
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
-
- v4l2_decode_param_.nal_ref_idc = slice_hdr->nal_ref_idc;
-
- // TODO(posciak): Don't add start code back here, but have it passed from
- // the parser.
- size_t data_copy_size = size + 3;
- std::unique_ptr<uint8_t[]> data_copy(new uint8_t[data_copy_size]);
- memset(data_copy.get(), 0, data_copy_size);
- data_copy[2] = 0x01;
- memcpy(data_copy.get() + 3, data, size);
- return v4l2_dec_->SubmitSlice(dec_surface->input_record(), data_copy.get(),
- data_copy_size)
- ? Status::kOk
- : Status::kFail;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index,
- const uint8_t* data,
- size_t size) {
+ size_t size) {
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- InputRecord& input_record = input_buffer_map_[index];
+ InputRecord& input_record = input_buffer_map_[dec_surface->input_record()];
if (input_record.bytes_used + size > input_record.length) {
VLOGF(1) << "Input buffer too small";
@@ -2480,677 +1845,19 @@ bool V4L2SliceVideoDecodeAccelerator::SubmitSlice(int index,
return true;
}
-bool V4L2SliceVideoDecodeAccelerator::SubmitExtControls(
- struct v4l2_ext_controls* ext_ctrls) {
- DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK_GT(ext_ctrls->config_store, 0u);
- IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_S_EXT_CTRLS, ext_ctrls);
- return true;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::GetExtControls(
- struct v4l2_ext_controls* ext_ctrls) {
+void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
+ const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- DCHECK_GT(ext_ctrls->config_store, 0u);
- IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_G_EXT_CTRLS, ext_ctrls);
- return true;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::IsCtrlExposed(uint32_t ctrl_id) {
- struct v4l2_queryctrl query_ctrl;
- memset(&query_ctrl, 0, sizeof(query_ctrl));
- query_ctrl.id = ctrl_id;
-
- return (device_->Ioctl(VIDIOC_QUERYCTRL, &query_ctrl) == 0);
-}
-
-H264Decoder::H264Accelerator::Status
-V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::SubmitDecode(
- const scoped_refptr<H264Picture>& pic) {
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- H264PictureToV4L2DecodeSurface(pic);
-
- v4l2_decode_param_.num_slices = num_slices_;
- v4l2_decode_param_.idr_pic_flag = pic->idr;
- v4l2_decode_param_.top_field_order_cnt = pic->top_field_order_cnt;
- v4l2_decode_param_.bottom_field_order_cnt = pic->bottom_field_order_cnt;
-
- struct v4l2_ext_control ctrl;
- std::vector<struct v4l2_ext_control> ctrls;
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAM;
- ctrl.size = sizeof(v4l2_slice_params_);
- ctrl.p_h264_slice_param = v4l2_slice_params_;
- ctrls.push_back(ctrl);
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAM;
- ctrl.size = sizeof(v4l2_decode_param_);
- ctrl.p_h264_decode_param = &v4l2_decode_param_;
- ctrls.push_back(ctrl);
-
- struct v4l2_ext_controls ext_ctrls;
- memset(&ext_ctrls, 0, sizeof(ext_ctrls));
- ext_ctrls.count = ctrls.size();
- ext_ctrls.controls = &ctrls[0];
- ext_ctrls.config_store = dec_surface->config_store();
- if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
- return Status::kFail;
-
- Reset();
-
- DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
- v4l2_dec_->Enqueue(dec_surface);
- return Status::kOk;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::OutputPicture(
- const scoped_refptr<H264Picture>& pic) {
- // TODO(crbug.com/647725): Insert correct color space.
- v4l2_dec_->SurfaceReady(H264PictureToV4L2DecodeSurface(pic),
- pic->bitstream_id(), pic->visible_rect(),
- VideoColorSpace());
- return true;
-}
-
-void V4L2SliceVideoDecodeAccelerator::V4L2H264Accelerator::Reset() {
- num_slices_ = 0;
- memset(&v4l2_decode_param_, 0, sizeof(v4l2_decode_param_));
- memset(&v4l2_slice_params_, 0, sizeof(v4l2_slice_params_));
-}
-
-scoped_refptr<V4L2DecodeSurface> V4L2SliceVideoDecodeAccelerator::
- V4L2H264Accelerator::H264PictureToV4L2DecodeSurface(
- const scoped_refptr<H264Picture>& pic) {
- V4L2H264Picture* v4l2_pic = pic->AsV4L2H264Picture();
- CHECK(v4l2_pic);
- return v4l2_pic->dec_surface();
-}
-
-V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::V4L2VP8Accelerator(
- V4L2SliceVideoDecodeAccelerator* v4l2_dec)
- : v4l2_dec_(v4l2_dec) {
- DCHECK(v4l2_dec_);
-}
-
-V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::~V4L2VP8Accelerator() {}
-scoped_refptr<VP8Picture>
-V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::CreateVP8Picture() {
- scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
- if (!dec_surface)
- return nullptr;
-
- return new V4L2VP8Picture(dec_surface);
-}
-
-static void FillV4L2SegmentationHeader(
- const Vp8SegmentationHeader& vp8_sgmnt_hdr,
- struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) {
-#define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
- v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
- SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled,
- V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED);
- SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map,
- V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP);
- SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data,
- V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA);
-#undef SET_V4L2_SPARM_FLAG_IF
- v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode;
-
- SafeArrayMemcpy(v4l2_sgmnt_hdr->quant_update,
- vp8_sgmnt_hdr.quantizer_update_value);
- SafeArrayMemcpy(v4l2_sgmnt_hdr->lf_update, vp8_sgmnt_hdr.lf_update_value);
- SafeArrayMemcpy(v4l2_sgmnt_hdr->segment_probs, vp8_sgmnt_hdr.segment_prob);
-}
-
-static void FillV4L2LoopfilterHeader(
- const Vp8LoopFilterHeader& vp8_loopfilter_hdr,
- struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) {
-#define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
- v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
- SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE);
- SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update,
- V4L2_VP8_LF_HDR_DELTA_UPDATE);
-#undef SET_V4L2_SGMNT_HDR_FLAG_IF
-
-#define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
- LF_HDR_TO_V4L2_LF_HDR(type);
- LF_HDR_TO_V4L2_LF_HDR(level);
- LF_HDR_TO_V4L2_LF_HDR(sharpness_level);
-#undef LF_HDR_TO_V4L2_LF_HDR
-
- SafeArrayMemcpy(v4l2_lf_hdr->ref_frm_delta_magnitude,
- vp8_loopfilter_hdr.ref_frame_delta);
- SafeArrayMemcpy(v4l2_lf_hdr->mb_mode_delta_magnitude,
- vp8_loopfilter_hdr.mb_mode_delta);
-}
-
-static void FillV4L2QuantizationHeader(
- const Vp8QuantizationHeader& vp8_quant_hdr,
- struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) {
- v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi;
- v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta;
- v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta;
- v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta;
- v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta;
- v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta;
-}
-
-static void FillV4L2Vp8EntropyHeader(
- const Vp8EntropyHeader& vp8_entropy_hdr,
- struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) {
- SafeArrayMemcpy(v4l2_entropy_hdr->coeff_probs, vp8_entropy_hdr.coeff_probs);
- SafeArrayMemcpy(v4l2_entropy_hdr->y_mode_probs, vp8_entropy_hdr.y_mode_probs);
- SafeArrayMemcpy(v4l2_entropy_hdr->uv_mode_probs,
- vp8_entropy_hdr.uv_mode_probs);
- SafeArrayMemcpy(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs);
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::SubmitDecode(
- scoped_refptr<VP8Picture> pic,
- const Vp8ReferenceFrameVector& reference_frames) {
- struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr;
- memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
-
- const auto& frame_hdr = pic->frame_hdr;
- v4l2_frame_hdr.key_frame = frame_hdr->frame_type;
-#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
- FHDR_TO_V4L2_FHDR(version);
- FHDR_TO_V4L2_FHDR(width);
- FHDR_TO_V4L2_FHDR(horizontal_scale);
- FHDR_TO_V4L2_FHDR(height);
- FHDR_TO_V4L2_FHDR(vertical_scale);
- FHDR_TO_V4L2_FHDR(sign_bias_golden);
- FHDR_TO_V4L2_FHDR(sign_bias_alternate);
- FHDR_TO_V4L2_FHDR(prob_skip_false);
- FHDR_TO_V4L2_FHDR(prob_intra);
- FHDR_TO_V4L2_FHDR(prob_last);
- FHDR_TO_V4L2_FHDR(prob_gf);
- FHDR_TO_V4L2_FHDR(bool_dec_range);
- FHDR_TO_V4L2_FHDR(bool_dec_value);
- FHDR_TO_V4L2_FHDR(bool_dec_count);
-#undef FHDR_TO_V4L2_FHDR
-
-#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
- v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
- SET_V4L2_FRM_HDR_FLAG_IF(is_experimental,
- V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL);
- SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME);
- SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff,
- V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF);
-#undef SET_V4L2_FRM_HDR_FLAG_IF
-
- FillV4L2SegmentationHeader(frame_hdr->segmentation_hdr,
- &v4l2_frame_hdr.sgmnt_hdr);
-
- FillV4L2LoopfilterHeader(frame_hdr->loopfilter_hdr, &v4l2_frame_hdr.lf_hdr);
-
- FillV4L2QuantizationHeader(frame_hdr->quantization_hdr,
- &v4l2_frame_hdr.quant_hdr);
-
- FillV4L2Vp8EntropyHeader(frame_hdr->entropy_hdr, &v4l2_frame_hdr.entropy_hdr);
-
- v4l2_frame_hdr.first_part_size =
- base::checked_cast<__u32>(frame_hdr->first_part_size);
- v4l2_frame_hdr.first_part_offset =
- base::checked_cast<__u32>(frame_hdr->first_part_offset);
- v4l2_frame_hdr.macroblock_bit_offset =
- base::checked_cast<__u32>(frame_hdr->macroblock_bit_offset);
- v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions;
-
- static_assert(arraysize(v4l2_frame_hdr.dct_part_sizes) ==
- arraysize(frame_hdr->dct_partition_sizes),
- "DCT partition size arrays must have equal number of elements");
- for (size_t i = 0; i < frame_hdr->num_of_dct_partitions &&
- i < arraysize(v4l2_frame_hdr.dct_part_sizes);
- ++i)
- v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i];
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP8PictureToV4L2DecodeSurface(pic);
- std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
-
- const auto last_frame = reference_frames.GetFrame(Vp8RefType::VP8_FRAME_LAST);
- if (last_frame) {
- scoped_refptr<V4L2DecodeSurface> last_frame_surface =
- VP8PictureToV4L2DecodeSurface(last_frame);
- v4l2_frame_hdr.last_frame = last_frame_surface->output_record();
- ref_surfaces.push_back(last_frame_surface);
- } else {
- v4l2_frame_hdr.last_frame = VIDEO_MAX_FRAME;
- }
-
- const auto golden_frame =
- reference_frames.GetFrame(Vp8RefType::VP8_FRAME_GOLDEN);
- if (golden_frame) {
- scoped_refptr<V4L2DecodeSurface> golden_frame_surface =
- VP8PictureToV4L2DecodeSurface(golden_frame);
- v4l2_frame_hdr.golden_frame = golden_frame_surface->output_record();
- ref_surfaces.push_back(golden_frame_surface);
- } else {
- v4l2_frame_hdr.golden_frame = VIDEO_MAX_FRAME;
- }
-
- const auto alt_frame =
- reference_frames.GetFrame(Vp8RefType::VP8_FRAME_ALTREF);
- if (alt_frame) {
- scoped_refptr<V4L2DecodeSurface> alt_frame_surface =
- VP8PictureToV4L2DecodeSurface(alt_frame);
- v4l2_frame_hdr.alt_frame = alt_frame_surface->output_record();
- ref_surfaces.push_back(alt_frame_surface);
- } else {
- v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME;
- }
-
- struct v4l2_ext_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR;
- ctrl.size = sizeof(v4l2_frame_hdr);
- ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr;
-
- struct v4l2_ext_controls ext_ctrls;
- memset(&ext_ctrls, 0, sizeof(ext_ctrls));
- ext_ctrls.count = 1;
- ext_ctrls.controls = &ctrl;
- ext_ctrls.config_store = dec_surface->config_store();
-
- if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
- return false;
-
- dec_surface->SetReferenceSurfaces(ref_surfaces);
-
- if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data,
- frame_hdr->frame_size))
- return false;
-
- DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
- v4l2_dec_->Enqueue(dec_surface);
- return true;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2VP8Accelerator::OutputPicture(
- const scoped_refptr<VP8Picture>& pic) {
- // TODO(crbug.com/647725): Insert correct color space.
- v4l2_dec_->SurfaceReady(VP8PictureToV4L2DecodeSurface(pic),
- pic->bitstream_id(), pic->visible_rect(),
- VideoColorSpace());
- return true;
-}
-
-scoped_refptr<V4L2DecodeSurface> V4L2SliceVideoDecodeAccelerator::
- V4L2VP8Accelerator::VP8PictureToV4L2DecodeSurface(
- const scoped_refptr<VP8Picture>& pic) {
- V4L2VP8Picture* v4l2_pic = pic->AsV4L2VP8Picture();
- CHECK(v4l2_pic);
- return v4l2_pic->dec_surface();
-}
+ DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
+ Enqueue(dec_surface);
-V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::V4L2VP9Accelerator(
- V4L2SliceVideoDecodeAccelerator* v4l2_dec)
- : v4l2_dec_(v4l2_dec) {
- DCHECK(v4l2_dec_);
-
- device_needs_frame_context_ =
- v4l2_dec_->IsCtrlExposed(V4L2_CID_MPEG_VIDEO_VP9_ENTROPY);
- DVLOG_IF(1, device_needs_frame_context_)
- << "Device requires frame context parsing";
-}
-
-V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::~V4L2VP9Accelerator() {}
-
-scoped_refptr<VP9Picture>
-V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::CreateVP9Picture() {
- scoped_refptr<V4L2DecodeSurface> dec_surface = v4l2_dec_->CreateSurface();
- if (!dec_surface)
- return nullptr;
-
- return new V4L2VP9Picture(dec_surface);
-}
-
-static void FillV4L2VP9LoopFilterParams(
- const Vp9LoopFilterParams& vp9_lf_params,
- struct v4l2_vp9_loop_filter_params* v4l2_lf_params) {
-#define SET_LF_PARAMS_FLAG_IF(cond, flag) \
- v4l2_lf_params->flags |= ((vp9_lf_params.cond) ? (flag) : 0)
- SET_LF_PARAMS_FLAG_IF(delta_enabled, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED);
- SET_LF_PARAMS_FLAG_IF(delta_update, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE);
-#undef SET_LF_PARAMS_FLAG_IF
-
- v4l2_lf_params->level = vp9_lf_params.level;
- v4l2_lf_params->sharpness = vp9_lf_params.sharpness;
-
- SafeArrayMemcpy(v4l2_lf_params->deltas, vp9_lf_params.ref_deltas);
- SafeArrayMemcpy(v4l2_lf_params->mode_deltas, vp9_lf_params.mode_deltas);
- SafeArrayMemcpy(v4l2_lf_params->lvl_lookup, vp9_lf_params.lvl);
-}
-
-static void FillV4L2VP9QuantizationParams(
- const Vp9QuantizationParams& vp9_quant_params,
- struct v4l2_vp9_quantization_params* v4l2_q_params) {
-#define SET_Q_PARAMS_FLAG_IF(cond, flag) \
- v4l2_q_params->flags |= ((vp9_quant_params.cond) ? (flag) : 0)
- SET_Q_PARAMS_FLAG_IF(IsLossless(), V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS);
-#undef SET_Q_PARAMS_FLAG_IF
-
-#define Q_PARAMS_TO_V4L2_Q_PARAMS(a) v4l2_q_params->a = vp9_quant_params.a
- Q_PARAMS_TO_V4L2_Q_PARAMS(base_q_idx);
- Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_y_dc);
- Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_dc);
- Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_ac);
-#undef Q_PARAMS_TO_V4L2_Q_PARAMS
-}
-
-static void FillV4L2VP9SegmentationParams(
- const Vp9SegmentationParams& vp9_segm_params,
- struct v4l2_vp9_segmentation_params* v4l2_segm_params) {
-#define SET_SEG_PARAMS_FLAG_IF(cond, flag) \
- v4l2_segm_params->flags |= ((vp9_segm_params.cond) ? (flag) : 0)
- SET_SEG_PARAMS_FLAG_IF(enabled, V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED);
- SET_SEG_PARAMS_FLAG_IF(update_map, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP);
- SET_SEG_PARAMS_FLAG_IF(temporal_update,
- V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE);
- SET_SEG_PARAMS_FLAG_IF(update_data, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA);
- SET_SEG_PARAMS_FLAG_IF(abs_or_delta_update,
- V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE);
-#undef SET_SEG_PARAMS_FLAG_IF
-
- SafeArrayMemcpy(v4l2_segm_params->tree_probs, vp9_segm_params.tree_probs);
- SafeArrayMemcpy(v4l2_segm_params->pred_probs, vp9_segm_params.pred_probs);
- SafeArrayMemcpy(v4l2_segm_params->feature_data, vp9_segm_params.feature_data);
-
- static_assert(arraysize(v4l2_segm_params->feature_enabled) ==
- arraysize(vp9_segm_params.feature_enabled) &&
- arraysize(v4l2_segm_params->feature_enabled[0]) ==
- arraysize(vp9_segm_params.feature_enabled[0]),
- "feature_enabled arrays must be of same size");
- for (size_t i = 0; i < arraysize(v4l2_segm_params->feature_enabled); ++i) {
- for (size_t j = 0; j < arraysize(v4l2_segm_params->feature_enabled[i]);
- ++j) {
- v4l2_segm_params->feature_enabled[i][j] =
- vp9_segm_params.feature_enabled[i][j];
- }
+ if (!dec_surface->Submit()) {
+ VLOGF(1) << "Error while submitting frame for decoding!";
+ NOTIFY_ERROR(PLATFORM_FAILURE);
}
}
-static void FillV4L2Vp9EntropyContext(
- const Vp9FrameContext& vp9_frame_ctx,
- struct v4l2_vp9_entropy_ctx* v4l2_entropy_ctx) {
-#define ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(a) \
- SafeArrayMemcpy(v4l2_entropy_ctx->a, vp9_frame_ctx.a)
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_8x8);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_16x16);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_32x32);
-
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(coef_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(skip_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(inter_mode_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(interp_filter_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(is_inter_prob);
-
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_mode_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(single_ref_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_ref_prob);
-
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(y_mode_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(uv_mode_probs);
-
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(partition_probs);
-
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_joint_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_sign_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_bit_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_bits_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_fr_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_fr_probs);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_hp_prob);
- ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_hp_prob);
-#undef ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::SubmitDecode(
- const scoped_refptr<VP9Picture>& pic,
- const Vp9SegmentationParams& segm_params,
- const Vp9LoopFilterParams& lf_params,
- const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
- const base::Closure& done_cb) {
- const Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
- DCHECK(frame_hdr);
-
- struct v4l2_ctrl_vp9_frame_hdr v4l2_frame_hdr;
- memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
-
-#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
- FHDR_TO_V4L2_FHDR(profile);
- FHDR_TO_V4L2_FHDR(frame_type);
-
- FHDR_TO_V4L2_FHDR(bit_depth);
- FHDR_TO_V4L2_FHDR(color_range);
- FHDR_TO_V4L2_FHDR(subsampling_x);
- FHDR_TO_V4L2_FHDR(subsampling_y);
-
- FHDR_TO_V4L2_FHDR(frame_width);
- FHDR_TO_V4L2_FHDR(frame_height);
- FHDR_TO_V4L2_FHDR(render_width);
- FHDR_TO_V4L2_FHDR(render_height);
-
- FHDR_TO_V4L2_FHDR(reset_frame_context);
-
- FHDR_TO_V4L2_FHDR(interpolation_filter);
- FHDR_TO_V4L2_FHDR(frame_context_idx);
-
- FHDR_TO_V4L2_FHDR(tile_cols_log2);
- FHDR_TO_V4L2_FHDR(tile_rows_log2);
-
- FHDR_TO_V4L2_FHDR(header_size_in_bytes);
-#undef FHDR_TO_V4L2_FHDR
- v4l2_frame_hdr.color_space = static_cast<uint8_t>(frame_hdr->color_space);
-
- FillV4L2VP9QuantizationParams(frame_hdr->quant_params,
- &v4l2_frame_hdr.quant_params);
-
-#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
- v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
- SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME);
- SET_V4L2_FRM_HDR_FLAG_IF(error_resilient_mode,
- V4L2_VP9_FRAME_HDR_FLAG_ERR_RES);
- SET_V4L2_FRM_HDR_FLAG_IF(intra_only, V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA);
- SET_V4L2_FRM_HDR_FLAG_IF(allow_high_precision_mv,
- V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV);
- SET_V4L2_FRM_HDR_FLAG_IF(refresh_frame_context,
- V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX);
- SET_V4L2_FRM_HDR_FLAG_IF(frame_parallel_decoding_mode,
- V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE);
-#undef SET_V4L2_FRM_HDR_FLAG_IF
-
- FillV4L2VP9LoopFilterParams(lf_params, &v4l2_frame_hdr.lf_params);
- FillV4L2VP9SegmentationParams(segm_params, &v4l2_frame_hdr.sgmnt_params);
-
- std::vector<struct v4l2_ext_control> ctrls;
-
- struct v4l2_ext_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR;
- ctrl.size = sizeof(v4l2_frame_hdr);
- ctrl.p_vp9_frame_hdr = &v4l2_frame_hdr;
- ctrls.push_back(ctrl);
-
- struct v4l2_ctrl_vp9_decode_param v4l2_decode_param;
- memset(&v4l2_decode_param, 0, sizeof(v4l2_decode_param));
- DCHECK_EQ(ref_pictures.size(), arraysize(v4l2_decode_param.ref_frames));
-
- std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
- for (size_t i = 0; i < ref_pictures.size(); ++i) {
- if (ref_pictures[i]) {
- scoped_refptr<V4L2DecodeSurface> ref_surface =
- VP9PictureToV4L2DecodeSurface(ref_pictures[i]);
-
- v4l2_decode_param.ref_frames[i] = ref_surface->output_record();
- ref_surfaces.push_back(ref_surface);
- } else {
- v4l2_decode_param.ref_frames[i] = VIDEO_MAX_FRAME;
- }
- }
-
- static_assert(arraysize(v4l2_decode_param.active_ref_frames) ==
- arraysize(frame_hdr->ref_frame_idx),
- "active reference frame array sizes mismatch");
-
- for (size_t i = 0; i < arraysize(frame_hdr->ref_frame_idx); ++i) {
- uint8_t idx = frame_hdr->ref_frame_idx[i];
- if (idx >= ref_pictures.size())
- return false;
-
- struct v4l2_vp9_reference_frame* v4l2_ref_frame =
- &v4l2_decode_param.active_ref_frames[i];
-
- scoped_refptr<VP9Picture> ref_pic = ref_pictures[idx];
- if (ref_pic) {
- scoped_refptr<V4L2DecodeSurface> ref_surface =
- VP9PictureToV4L2DecodeSurface(ref_pic);
- v4l2_ref_frame->buf_index = ref_surface->output_record();
-#define REF_TO_V4L2_REF(a) v4l2_ref_frame->a = ref_pic->frame_hdr->a
- REF_TO_V4L2_REF(frame_width);
- REF_TO_V4L2_REF(frame_height);
- REF_TO_V4L2_REF(bit_depth);
- REF_TO_V4L2_REF(subsampling_x);
- REF_TO_V4L2_REF(subsampling_y);
-#undef REF_TO_V4L2_REF
- } else {
- v4l2_ref_frame->buf_index = VIDEO_MAX_FRAME;
- }
- }
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM;
- ctrl.size = sizeof(v4l2_decode_param);
- ctrl.p_vp9_decode_param = &v4l2_decode_param;
- ctrls.push_back(ctrl);
-
- // Defined outside of the if() clause below as it must remain valid until
- // the call to SubmitExtControls().
- struct v4l2_ctrl_vp9_entropy v4l2_entropy;
- if (device_needs_frame_context_) {
- memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
- FillV4L2Vp9EntropyContext(frame_hdr->initial_frame_context,
- &v4l2_entropy.initial_entropy_ctx);
- FillV4L2Vp9EntropyContext(frame_hdr->frame_context,
- &v4l2_entropy.current_entropy_ctx);
- v4l2_entropy.tx_mode = frame_hdr->compressed_header.tx_mode;
- v4l2_entropy.reference_mode = frame_hdr->compressed_header.reference_mode;
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
- ctrl.size = sizeof(v4l2_entropy);
- ctrl.p_vp9_entropy = &v4l2_entropy;
- ctrls.push_back(ctrl);
- }
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP9PictureToV4L2DecodeSurface(pic);
-
- struct v4l2_ext_controls ext_ctrls;
- memset(&ext_ctrls, 0, sizeof(ext_ctrls));
- ext_ctrls.count = ctrls.size();
- ext_ctrls.controls = &ctrls[0];
- ext_ctrls.config_store = dec_surface->config_store();
- if (!v4l2_dec_->SubmitExtControls(&ext_ctrls))
- return false;
-
- dec_surface->SetReferenceSurfaces(ref_surfaces);
- dec_surface->SetDecodeDoneCallback(done_cb);
-
- if (!v4l2_dec_->SubmitSlice(dec_surface->input_record(), frame_hdr->data,
- frame_hdr->frame_size))
- return false;
-
- DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
- v4l2_dec_->Enqueue(dec_surface);
- return true;
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::OutputPicture(
- const scoped_refptr<VP9Picture>& pic) {
- // TODO(crbug.com/647725): Insert correct color space.
- v4l2_dec_->SurfaceReady(VP9PictureToV4L2DecodeSurface(pic),
- pic->bitstream_id(), pic->visible_rect(),
- VideoColorSpace());
- return true;
-}
-
-static void FillVp9FrameContext(struct v4l2_vp9_entropy_ctx& v4l2_entropy_ctx,
- Vp9FrameContext* vp9_frame_ctx) {
-#define ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(a) \
- SafeArrayMemcpy(vp9_frame_ctx->a, v4l2_entropy_ctx.a)
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_8x8);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_16x16);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_32x32);
-
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(coef_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(skip_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(inter_mode_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(interp_filter_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(is_inter_prob);
-
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_mode_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(single_ref_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_ref_prob);
-
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(y_mode_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(uv_mode_probs);
-
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(partition_probs);
-
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_joint_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_sign_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_bit_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_bits_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_fr_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_fr_probs);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_hp_prob);
- ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_hp_prob);
-#undef ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX
-}
-
-bool V4L2SliceVideoDecodeAccelerator::V4L2VP9Accelerator::GetFrameContext(
- const scoped_refptr<VP9Picture>& pic,
- Vp9FrameContext* frame_ctx) {
- struct v4l2_ctrl_vp9_entropy v4l2_entropy;
- memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
-
- struct v4l2_ext_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
- ctrl.size = sizeof(v4l2_entropy);
- ctrl.p_vp9_entropy = &v4l2_entropy;
-
- scoped_refptr<V4L2DecodeSurface> dec_surface =
- VP9PictureToV4L2DecodeSurface(pic);
-
- struct v4l2_ext_controls ext_ctrls;
- memset(&ext_ctrls, 0, sizeof(ext_ctrls));
- ext_ctrls.count = 1;
- ext_ctrls.controls = &ctrl;
- ext_ctrls.config_store = dec_surface->config_store();
-
- if (!v4l2_dec_->GetExtControls(&ext_ctrls))
- return false;
-
- FillVp9FrameContext(v4l2_entropy.current_entropy_ctx, frame_ctx);
- return true;
-}
-
-scoped_refptr<V4L2DecodeSurface> V4L2SliceVideoDecodeAccelerator::
- V4L2VP9Accelerator::VP9PictureToV4L2DecodeSurface(
- const scoped_refptr<VP9Picture>& pic) {
- V4L2VP9Picture* v4l2_pic = pic->AsV4L2VP9Picture();
- CHECK(v4l2_pic);
- return v4l2_pic->dec_surface();
-}
-
void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
const scoped_refptr<V4L2DecodeSurface>& dec_surface,
int32_t bitstream_id,
@@ -3159,7 +1866,7 @@ void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
DVLOGF(4);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
- dec_surface->set_visible_rect(visible_rect);
+ dec_surface->SetVisibleRect(visible_rect);
decoder_display_queue_.push(std::make_pair(bitstream_id, dec_surface));
TryOutputSurfaces();
}
@@ -3239,10 +1946,11 @@ V4L2SliceVideoDecodeAccelerator::CreateSurface() {
DCHECK(decoder_current_bitstream_buffer_ != nullptr);
input_record.input_id = decoder_current_bitstream_buffer_->input_id;
- scoped_refptr<V4L2DecodeSurface> dec_surface = new V4L2DecodeSurface(
- input, output,
- base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer,
- base::Unretained(this)));
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ new V4L2ConfigStoreDecodeSurface(
+ input, output,
+ base::Bind(&V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer,
+ base::Unretained(this)));
DVLOGF(4) << "Created surface " << input << " -> " << output;
return dec_surface;
@@ -3320,8 +2028,79 @@ V4L2SliceVideoDecodeAccelerator::GetSupportedProfiles() {
if (!device)
return SupportedProfiles();
- return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
- supported_input_fourccs_);
+ return device->GetSupportedDecodeProfiles(
+ base::size(supported_input_fourccs_), supported_input_fourccs_);
+}
+
+// base::trace_event::MemoryDumpProvider implementation.
+bool V4L2SliceVideoDecodeAccelerator::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ // OnMemoryDump() must be performed on |decoder_thread_|.
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // VIDIOC_OUTPUT queue's memory usage.
+ const size_t input_queue_buffers_count = input_buffer_map_.size();
+ size_t input_queue_memory_usage = 0;
+ std::string input_queue_buffers_memory_type =
+ V4L2Device::V4L2MemoryToString(V4L2_MEMORY_MMAP);
+ for (const auto& input_record : input_buffer_map_) {
+ input_queue_memory_usage += input_record.length;
+ }
+
+ // VIDIOC_CAPTURE queue's memory usage.
+ const size_t output_queue_buffers_count = output_buffer_map_.size();
+ size_t output_queue_memory_usage = 0;
+ std::string output_queue_buffers_memory_type =
+ output_mode_ == Config::OutputMode::ALLOCATE
+ ? V4L2Device::V4L2MemoryToString(V4L2_MEMORY_MMAP)
+ : V4L2Device::V4L2MemoryToString(V4L2_MEMORY_DMABUF);
+ if (output_mode_ == Config::OutputMode::ALLOCATE) {
+ // Call QUERY_BUF here because the length of buffers on VIDIOC_CATURE queue
+ // are not recorded nowhere in V4L2VideoDecodeAccelerator.
+ for (uint32_t index = 0; index < output_buffer_map_.size(); ++index) {
+ struct v4l2_buffer v4l2_buffer = {};
+ struct v4l2_plane v4l2_planes[VIDEO_MAX_PLANES];
+ DCHECK_LT(output_planes_count_, base::size(v4l2_planes));
+ v4l2_buffer.m.planes = v4l2_planes;
+ v4l2_buffer.length =
+ std::min(output_planes_count_, base::size(v4l2_planes));
+ v4l2_buffer.index = index;
+ v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ v4l2_buffer.memory = V4L2_MEMORY_MMAP;
+ IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &v4l2_buffer);
+ for (size_t i = 0; i < output_planes_count_; ++i)
+ output_queue_memory_usage += v4l2_buffer.m.planes[i].length;
+ }
+ }
+
+ const size_t total_usage =
+ input_queue_memory_usage + output_queue_memory_usage;
+
+ using ::base::trace_event::MemoryAllocatorDump;
+
+ auto dump_name = base::StringPrintf("gpu/v4l2/slice_decoder/0x%" PRIxPTR,
+ reinterpret_cast<uintptr_t>(this));
+
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(total_usage));
+ dump->AddScalar("input_queue_memory_usage", MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(input_queue_memory_usage));
+ dump->AddScalar("input_queue_buffers_count",
+ MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(input_queue_buffers_count));
+ dump->AddString("input_queue_buffers_memory_type", "",
+ input_queue_buffers_memory_type);
+ dump->AddScalar("output_queue_memory_usage", MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(output_queue_memory_usage));
+ dump->AddScalar("output_queue_buffers_count",
+ MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(output_queue_buffers_count));
+ dump->AddString("output_queue_buffers_memory_type", "",
+ output_queue_buffers_memory_type);
+ return true;
}
} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
index d672268371b..ccb1f6eaab0 100644
--- a/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.h
@@ -19,10 +19,11 @@
#include "base/memory/weak_ptr.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
-#include "media/gpu/h264_decoder.h"
#include "media/gpu/media_gpu_export.h"
+#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/vp8_decoder.h"
#include "media/gpu/vp9_decoder.h"
@@ -39,7 +40,8 @@ class V4L2DecodeSurface;
// the input stream and managing decoder state across frames.
class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
: public VideoDecodeAccelerator,
- public DecodeSurfaceHandler<V4L2DecodeSurface> {
+ public V4L2DecodeSurfaceHandler,
+ public base::trace_event::MemoryDumpProvider {
public:
V4L2SliceVideoDecodeAccelerator(
const scoped_refptr<V4L2Device>& device,
@@ -69,11 +71,11 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
- private:
- class V4L2H264Accelerator;
- class V4L2VP8Accelerator;
- class V4L2VP9Accelerator;
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+ private:
// Record for input buffers.
struct InputRecord {
InputRecord();
@@ -134,7 +136,7 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
//
// Below methods are used by accelerator implementations.
//
- // DecodeSurfaceHandler implementation.
+ // V4L2DecodeSurfaceHandler implementation.
scoped_refptr<V4L2DecodeSurface> CreateSurface() override;
// SurfaceReady() uses |decoder_display_queue_| to guarantee that decoding
// of |dec_surface| happens in order.
@@ -142,21 +144,11 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& /* color_space */) override;
-
- // Append slice data in |data| of size |size| to pending hardware
- // input buffer with |index|. This buffer will be submitted for decode
- // on the next DecodeSurface(). Return true on success.
- bool SubmitSlice(int index, const uint8_t* data, size_t size);
-
- // Submit controls in |ext_ctrls| to hardware. Return true on success.
- bool SubmitExtControls(struct v4l2_ext_controls* ext_ctrls);
-
- // Gets current control values for controls in |ext_ctrls| from the driver.
- // Return true on success.
- bool GetExtControls(struct v4l2_ext_controls* ext_ctrls);
-
- // Return true if the driver exposes V4L2 control |ctrl_id|, false otherwise.
- bool IsCtrlExposed(uint32_t ctrl_id);
+ bool SubmitSlice(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
+ const uint8_t* data,
+ size_t size) override;
+ void DecodeSurface(
+ const scoped_refptr<V4L2DecodeSurface>& dec_surface) override;
//
// Internal methods of this class.
@@ -174,7 +166,7 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
void Dequeue();
// V4L2 QBUF helpers.
- bool EnqueueInputRecord(int index, uint32_t config_store);
+ bool EnqueueInputRecord(const V4L2DecodeSurface* dec_surface);
bool EnqueueOutputRecord(int index);
// Set input and output formats in hardware.
@@ -272,37 +264,26 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
// file descriptors.
void ImportBufferForPictureTask(
int32_t picture_buffer_id,
- // TODO(posciak): (https://crbug.com/561749) we should normally be able to
- // pass the vector by itself via std::move, but it's not possible to do
- // this if this method is used as a callback.
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds);
+ std::vector<base::ScopedFD> passed_dmabuf_fds);
// Create a GLImage for the buffer associated with V4L2 |buffer_index| and
// for |picture_buffer_id|, backed by dmabuf file descriptors in
// |passed_dmabuf_fds|, taking ownership of them.
// The GLImage will be associated |client_texture_id| in gles2 decoder.
- void CreateGLImageFor(
- size_t buffer_index,
- int32_t picture_buffer_id,
- // TODO(posciak): (https://crbug.com/561749) we should normally be able to
- // pass the vector by itself via std::move, but it's not possible to do
- // this if this method is used as a callback.
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds,
- GLuint client_texture_id,
- GLuint texture_id,
- const gfx::Size& size,
- uint32_t fourcc);
+ void CreateGLImageFor(size_t buffer_index,
+ int32_t picture_buffer_id,
+ std::vector<base::ScopedFD> passed_dmabuf_fds,
+ GLuint client_texture_id,
+ GLuint texture_id,
+ const gfx::Size& size,
+ uint32_t fourcc);
// Take the dmabuf |passed_dmabuf_fds|, for |picture_buffer_id|, and use it
// for OutputRecord at |buffer_index|. The buffer is backed by
// |passed_dmabuf_fds|, and the OutputRecord takes ownership of them.
- void AssignDmaBufs(
- size_t buffer_index,
- int32_t picture_buffer_id,
- // TODO(posciak): (https://crbug.com/561749) we should normally be able to
- // pass the vector by itself via std::move, but it's not possible to do
- // this if this method is used as a callback.
- std::unique_ptr<std::vector<base::ScopedFD>> passed_dmabuf_fds);
+ void AssignDmaBufs(size_t buffer_index,
+ int32_t picture_buffer_id,
+ std::vector<base::ScopedFD> passed_dmabuf_fds);
// Performed on decoder_thread_ as a consequence of poll() on decoder_thread_
// returning an event.
@@ -418,8 +399,11 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
gfx::Size coded_size_;
struct BitstreamBufferRef;
- // Input queue of stream buffers coming from the client.
- base::queue<std::unique_ptr<BitstreamBufferRef>> decoder_input_queue_;
+ // Input queue of stream buffers coming from the client. Although the elements
+ // in |decoder_input_queue_| is push()/pop() in queue order, this needs to be
+ // base::circular_deque because we need to do random access in OnMemoryDump().
+ base::circular_deque<std::unique_ptr<BitstreamBufferRef>>
+ decoder_input_queue_;
// BitstreamBuffer currently being processed.
std::unique_ptr<BitstreamBufferRef> decoder_current_bitstream_buffer_;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
index 0a42d8f57fa..9561839f7fe 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.cc
@@ -19,8 +19,11 @@
#include "base/numerics/safe_conversions.h"
#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "media/base/media_switches.h"
@@ -28,6 +31,7 @@
#include "media/base/unaligned_shared_memory.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
+#include "media/gpu/image_processor_factory.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_image_processor.h"
#include "media/video/h264_parser.h"
@@ -89,6 +93,7 @@ struct V4L2VideoDecodeAccelerator::BitstreamBufferRef {
scoped_refptr<DecoderBuffer> buffer,
int32_t input_id);
~BitstreamBufferRef();
+
const base::WeakPtr<Client> client;
const scoped_refptr<base::SingleThreadTaskRunner> client_task_runner;
scoped_refptr<DecoderBuffer> buffer;
@@ -274,6 +279,9 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
decoder_state_ = kInitialized;
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "media::V4l2VideoDecodeAccelerator", decoder_thread_.task_runner());
+
// InitializeTask will NOTIFY_ERROR on failure.
decoder_thread_.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&V4L2VideoDecodeAccelerator::InitializeTask,
@@ -784,8 +792,8 @@ V4L2VideoDecodeAccelerator::GetSupportedProfiles() {
if (!device)
return SupportedProfiles();
- return device->GetSupportedDecodeProfiles(arraysize(supported_input_fourccs_),
- supported_input_fourccs_);
+ return device->GetSupportedDecodeProfiles(
+ base::size(supported_input_fourccs_), supported_input_fourccs_);
}
void V4L2VideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
@@ -818,7 +826,7 @@ void V4L2VideoDecodeAccelerator::DecodeTask(scoped_refptr<DecoderBuffer> buffer,
return;
}
- decoder_input_queue_.push(std::move(bitstream_record));
+ decoder_input_queue_.push_back(std::move(bitstream_record));
decoder_decode_buffer_tasks_scheduled_++;
DecodeBufferTask();
}
@@ -852,7 +860,7 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
// Setup to use the next buffer.
decoder_current_bitstream_buffer_ = std::move(decoder_input_queue_.front());
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
const auto& buffer = decoder_current_bitstream_buffer_->buffer;
if (buffer) {
DVLOGF(4) << "reading input_id="
@@ -1637,7 +1645,7 @@ void V4L2VideoDecodeAccelerator::FlushTask() {
DCHECK(!decoder_flushing_);
// Queue up an empty buffer -- this triggers the flush.
- decoder_input_queue_.push(std::make_unique<BitstreamBufferRef>(
+ decoder_input_queue_.push_back(std::make_unique<BitstreamBufferRef>(
decode_client_, decode_task_runner_, nullptr, kFlushBufferId));
decoder_flushing_ = true;
SendPictureReady(); // Send all pending PictureReady.
@@ -1752,7 +1760,7 @@ void V4L2VideoDecodeAccelerator::ResetTask() {
}
decoder_current_bitstream_buffer_.reset();
while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
current_input_buffer_ = V4L2WritableBufferRef();
@@ -1866,13 +1874,22 @@ void V4L2VideoDecodeAccelerator::DestroyTask() {
decoder_decode_buffer_tasks_scheduled_ = 0;
decoder_frames_at_client_ = 0;
while (!decoder_input_queue_.empty())
- decoder_input_queue_.pop();
+ decoder_input_queue_.pop_front();
decoder_flushing_ = false;
image_processor_ = nullptr;
DestroyInputBuffers();
DestroyOutputBuffers();
+
+ if (decoder_thread_.IsRunning()) {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ // DestroyTask can be executed on not only decoder_thread but also child
+ // thread. When decoder thread is Stop(), |this| is not registered in
+ // MemoryDumpManager. So
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
+ }
}
bool V4L2VideoDecodeAccelerator::StartDevicePoll() {
@@ -2377,6 +2394,7 @@ bool V4L2VideoDecodeAccelerator::ResetImageProcessor() {
bool V4L2VideoDecodeAccelerator::CreateImageProcessor() {
VLOGF(2);
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!image_processor_);
const ImageProcessor::OutputMode image_processor_output_mode =
(output_mode_ == Config::OutputMode::ALLOCATE
@@ -2405,28 +2423,34 @@ bool V4L2VideoDecodeAccelerator::CreateImageProcessor() {
return false;
}
- // Unretained is safe because |this| owns image processor and there will be
- // no callbacks after processor destroys.
+ // Unretained(this) is safe for ErrorCB because |decoder_thread_| is owned by
+ // this V4L2VideoDecodeAccelerator and |this| must be valid when ErrorCB is
+ // executed.
+ // TODO(crbug.com/917798): Use ImageProcessorFactory::Create() once we remove
+ // |image_processor_device_| from V4L2VideoDecodeAccelerator.
image_processor_ = V4L2ImageProcessor::Create(
- image_processor_device_, VideoFrame::STORAGE_DMABUFS,
- VideoFrame::STORAGE_DMABUFS, image_processor_output_mode, *input_layout,
- *output_layout, visible_size_, visible_size_, output_buffer_map_.size(),
- base::Bind(&V4L2VideoDecodeAccelerator::ImageProcessorError,
- base::Unretained(this)));
+ image_processor_device_,
+ ImageProcessor::PortConfig(*input_layout, visible_size_,
+ {VideoFrame::STORAGE_DMABUFS}),
+ ImageProcessor::PortConfig(*output_layout, visible_size_,
+ {VideoFrame::STORAGE_DMABUFS}),
+ image_processor_output_mode, output_buffer_map_.size(),
+ base::BindRepeating(&V4L2VideoDecodeAccelerator::ImageProcessorError,
+ base::Unretained(this)));
if (!image_processor_) {
VLOGF(1) << "Initialize image processor failed";
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
- VLOGF(2) << "image_processor_->output_allocated_size()="
- << image_processor_->output_allocated_size().ToString();
- DCHECK(image_processor_->output_allocated_size() == egl_image_size_);
- if (image_processor_->input_allocated_size() != coded_size_) {
+ VLOGF(2) << "image_processor_->output_layout().coded_size()="
+ << image_processor_->output_layout().coded_size().ToString();
+ DCHECK(image_processor_->output_layout().coded_size() == egl_image_size_);
+ if (image_processor_->input_layout().coded_size() != coded_size_) {
VLOGF(1) << "Image processor should be able to take the output coded "
<< "size of decoder " << coded_size_.ToString()
<< " without adjusting to "
- << image_processor_->input_allocated_size().ToString();
+ << image_processor_->input_layout().coded_size().ToString();
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
@@ -2464,8 +2488,9 @@ bool V4L2VideoDecodeAccelerator::ProcessFrame(int32_t bitstream_buffer_id,
if (output_fds.empty())
return false;
}
- // Unretained is safe because |this| owns image processor and there will
- // be no callbacks after processor destroys.
+ // Unretained(this) is safe for FrameReadyCB because |decoder_thread_| is
+ // owned by this V4L2VideoDecodeAccelerator and |this| must be valid when
+ // FrameReadyCB is executed.
image_processor_->Process(
input_frame, output_buffer_index, std::move(output_fds),
base::BindOnce(&V4L2VideoDecodeAccelerator::FrameProcessed,
@@ -2658,8 +2683,27 @@ void V4L2VideoDecodeAccelerator::FrameProcessed(
DVLOGF(4) << "output_buffer_index=" << output_buffer_index
<< ", bitstream_buffer_id=" << bitstream_buffer_id;
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
- DCHECK(!image_processor_bitstream_buffer_ids_.empty());
- DCHECK(image_processor_bitstream_buffer_ids_.front() == bitstream_buffer_id);
+ // TODO(crbug.com/921825): Remove this workaround once reset callback is
+ // implemented.
+ if (image_processor_bitstream_buffer_ids_.empty() ||
+ image_processor_bitstream_buffer_ids_.front() != bitstream_buffer_id ||
+ output_buffer_map_.empty()) {
+ // This can happen if image processor is reset.
+ // V4L2VideoDecodeAccelerator::Reset() makes
+ // |image_processor_bitstream_buffer_ids| empty.
+ // During ImageProcessor::Reset(), some FrameProcessed() can have been
+ // posted to |decoder_thread|. |bitsream_buffer_id| is pushed to
+ // |image_processor_bitstream_buffer_ids_| in ProcessFrame(). Although we
+ // are not sure a new bitstream buffer id is pushed after Reset() and before
+ // FrameProcessed(), We should skip the case of mismatch of bitstream buffer
+ // id for safety.
+ // For |output_buffer_map_|, it is cleared in Destroy(). Destroy() destroys
+ // ImageProcessor which may call FrameProcessed() in parallel similar to
+ // Reset() case.
+ DVLOGF(4) << "Ignore processed frame for bitstream_buffer_id="
+ << bitstream_buffer_id;
+ return;
+ }
DCHECK_GE(output_buffer_index, 0);
DCHECK_LT(output_buffer_index, static_cast<int>(output_buffer_map_.size()));
@@ -2696,8 +2740,67 @@ void V4L2VideoDecodeAccelerator::FrameProcessed(
}
void V4L2VideoDecodeAccelerator::ImageProcessorError() {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
VLOGF(1) << "Image processor error";
NOTIFY_ERROR(PLATFORM_FAILURE);
}
+bool V4L2VideoDecodeAccelerator::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ // OnMemoryDump() must be performed on |decoder_thread_|.
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+
+ // |input_queue| and |output_queue| are owned by |decoder_thread_|.
+ size_t input_queue_buffers_count = 0;
+ size_t input_queue_memory_usage = 0;
+ std::string input_queue_buffers_memory_type;
+ if (input_queue_) {
+ input_queue_buffers_count = input_queue_->AllocatedBuffersCount();
+ input_queue_buffers_memory_type =
+ V4L2Device::V4L2MemoryToString(input_queue_->GetMemoryType());
+ if (output_queue_->GetMemoryType() == V4L2_MEMORY_MMAP)
+ input_queue_memory_usage = input_queue_->GetMemoryUsage();
+ }
+
+ size_t output_queue_buffers_count = 0;
+ size_t output_queue_memory_usage = 0;
+ std::string output_queue_buffers_memory_type;
+ if (output_queue_) {
+ output_queue_buffers_count = output_queue_->AllocatedBuffersCount();
+ output_queue_buffers_memory_type =
+ V4L2Device::V4L2MemoryToString(output_queue_->GetMemoryType());
+ if (output_queue_->GetMemoryType() == V4L2_MEMORY_MMAP)
+ output_queue_memory_usage = output_queue_->GetMemoryUsage();
+ }
+
+ const size_t total_usage =
+ input_queue_memory_usage + output_queue_memory_usage;
+
+ using ::base::trace_event::MemoryAllocatorDump;
+
+ auto dump_name = base::StringPrintf("gpu/v4l2/decoder/0x%" PRIxPTR,
+ reinterpret_cast<uintptr_t>(this));
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(total_usage));
+ dump->AddScalar("input_queue_memory_usage", MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(input_queue_memory_usage));
+ dump->AddScalar("input_queue_buffers_count",
+ MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(input_queue_buffers_count));
+ dump->AddString("input_queue_buffers_memory_type", "",
+ input_queue_buffers_memory_type);
+ dump->AddScalar("output_queue_memory_usage", MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(output_queue_memory_usage));
+ dump->AddScalar("output_queue_buffers_count",
+ MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(output_queue_buffers_count));
+ dump->AddString("output_queue_buffers_memory_type", "",
+ output_queue_buffers_memory_type);
+
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
index 526394fb80b..032a84d4ce2 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_decode_accelerator.h
@@ -22,6 +22,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "media/base/limits.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
@@ -91,7 +92,8 @@ class H264Parser;
// buffrers. We cannot drop any frame during resolution change. So V4L2VDA
// should destroy output buffers after image processor returns all the frames.
class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
- : public VideoDecodeAccelerator {
+ : public VideoDecodeAccelerator,
+ public base::trace_event::MemoryDumpProvider {
public:
V4L2VideoDecodeAccelerator(
EGLDisplay egl_display,
@@ -122,6 +124,10 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
static VideoDecodeAccelerator::SupportedProfiles GetSupportedProfiles();
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
private:
// These are rather subjectively tuned.
enum {
@@ -483,8 +489,11 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// Got a reset request while we were performing resolution change or waiting
// picture buffers.
bool reset_pending_;
- // Input queue for decoder_thread_: BitstreamBuffers in.
- base::queue<std::unique_ptr<BitstreamBufferRef>> decoder_input_queue_;
+ // Input queue for decoder_thread_: BitstreamBuffers in. Although the elements
+ // in |decoder_input_queue_| is push()/pop() in queue order, this needs to be
+ // base::circular_deque because we need to do random access in OnMemoryDump().
+ base::circular_deque<std::unique_ptr<BitstreamBufferRef>>
+ decoder_input_queue_;
// For H264 decode, hardware requires that we send it frame-sized chunks.
// We'll need to parse the stream.
std::unique_ptr<H264Parser> decoder_h264_parser_;
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index 12cfb27669a..f7713719a3a 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -12,13 +12,14 @@
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <numeric>
#include <utility>
#include "base/callback.h"
#include "base/command_line.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
@@ -28,8 +29,8 @@
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/gpu_video_encode_accelerator_helpers.h"
+#include "media/gpu/image_processor_factory.h"
#include "media/gpu/macros.h"
-#include "media/gpu/v4l2/v4l2_image_processor.h"
#include "media/video/h264_parser.h"
#define NOTIFY_ERROR(x) \
@@ -124,8 +125,6 @@ V4L2VideoEncodeAccelerator::V4L2VideoEncodeAccelerator(
const scoped_refptr<V4L2Device>& device)
: child_task_runner_(base::ThreadTaskRunnerHandle::Get()),
output_buffer_byte_size_(0),
- device_input_format_(PIXEL_FORMAT_UNKNOWN),
- input_planes_count_(0),
output_format_fourcc_(0),
encoder_state_(kUninitialized),
device_(device),
@@ -198,14 +197,10 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
return false;
}
- if (config.input_format != device_input_format_) {
- VLOGF(2) << "Input format not supported by the HW, will try to convert to "
- << VideoPixelFormatToString(device_input_format_);
-
- if (!V4L2ImageProcessor::IsSupported()) {
- VLOGF(1) << "Image processor not available";
- return false;
- }
+ if (config.input_format != device_input_layout_->format()) {
+ VLOGF(2) << "Input format: " << config.input_format << " is not supported "
+ << "by the HW. Will try to convert to "
+ << device_input_layout_->format();
// It is necessary to set strides and buffers even with dummy values,
// because VideoFrameLayout::num_buffers() specifies v4l2 pix format
@@ -220,32 +215,33 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
VLOGF(1) << "Invalid image processor input layout";
return false;
}
- auto output_layout = VideoFrameLayout::CreateWithStrides(
- device_input_format_, input_allocated_size_,
- std::vector<int32_t>(
- VideoFrameLayout::NumPlanes(device_input_format_)) /* strides */,
- std::vector<size_t>(
- VideoFrameLayout::NumPlanes(device_input_format_)) /* buffers */);
- if (!output_layout) {
- VLOGF(1) << "Invalid image processor output layout";
- return false;
- }
- // Convert from |config.input_format| to |device_input_format_|, keeping the
- // size at |visible_size_| and requiring the output buffers to be of at
- // least |input_allocated_size_|. Unretained is safe because |this| owns
- // image processor and there will be no callbacks after processor destroys.
+ // Convert from |config.input_format| to |device_input_layout_->format()|,
+ // keeping the size at |visible_size_| and requiring the output buffers to
+ // be of at least |device_input_layout_->coded_size()|.
+ // Unretained(this) is safe in creating ErrorCB because
+ // V4L2VideoEncodeAccelerator instance outlives |image_processor_| and
+ // ImageProcessor invalidates posted ErrorCB when its Reset() or destructor
+ // is called.
// |input_storage_type| can be STORAGE_SHMEM and STORAGE_MOJO_SHARED_BUFFER.
// However, it doesn't matter VideoFrame::STORAGE_OWNED_MEMORY is specified
// for |input_storage_type| here, as long as VideoFrame on Process()'s data
// can be accessed by VideoFrame::data().
- image_processor_ = V4L2ImageProcessor::Create(
- V4L2Device::Create(), VideoFrame::STORAGE_OWNED_MEMORY,
- VideoFrame::STORAGE_DMABUFS, ImageProcessor::OutputMode::ALLOCATE,
- *input_layout, *output_layout, visible_size_, visible_size_,
+ image_processor_ = ImageProcessorFactory::Create(
+ ImageProcessor::PortConfig(*input_layout, visible_size_,
+ {VideoFrame::STORAGE_OWNED_MEMORY}),
+ ImageProcessor::PortConfig(
+ *device_input_layout_, visible_size_,
+ {VideoFrame::STORAGE_DMABUFS, VideoFrame::STORAGE_OWNED_MEMORY}),
+ // Try OutputMode::ALLOCATE first because we want v4l2IP chooses
+ // ALLOCATE mode. For libyuvIP, it accepts only IMPORT.
+ {ImageProcessor::OutputMode::ALLOCATE,
+ ImageProcessor::OutputMode::IMPORT},
kImageProcBufferCount,
- base::Bind(&V4L2VideoEncodeAccelerator::ImageProcessorError,
- base::Unretained(this)));
+ // We have to bind |weak_this| for ImageProcessorError, because child
+ // thread is outlive this V4L2VideoEncodeAccelerator.
+ base::BindRepeating(&V4L2VideoEncodeAccelerator::ImageProcessorError,
+ weak_this_));
if (!image_processor_) {
VLOGF(1) << "Failed initializing image processor";
return false;
@@ -255,40 +251,35 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
// Output coded height of processor can be larger but not smaller than the
// input coded height of encoder. For example, suppose input size of encoder
// is 320x193. It is OK if the output of processor is 320x208.
- if (image_processor_->output_allocated_size().width() !=
- input_allocated_size_.width() ||
- image_processor_->output_allocated_size().height() <
- input_allocated_size_.height()) {
+ if (image_processor_->output_layout().coded_size().width() !=
+ device_input_layout_->coded_size().width() ||
+ image_processor_->output_layout().coded_size().height() <
+ device_input_layout_->coded_size().height()) {
VLOGF(1) << "Invalid image processor output coded size "
- << image_processor_->output_allocated_size().ToString()
+ << image_processor_->output_layout().coded_size().ToString()
<< ", encode input coded size is "
- << input_allocated_size_.ToString();
+ << device_input_layout_->coded_size().ToString();
return false;
}
- for (int i = 0; i < kImageProcBufferCount; i++)
- free_image_processor_output_buffers_.push_back(i);
+ // Initialize |free_image_processor_output_buffer_indices_|.
+ free_image_processor_output_buffer_indices_.resize(kImageProcBufferCount);
+ std::iota(free_image_processor_output_buffer_indices_.begin(),
+ free_image_processor_output_buffer_indices_.end(), 0);
+
+ if (!AllocateImageProcessorOutputBuffers())
+ return false;
}
+ if (!InitInputMemoryType(config))
+ return false;
+
if (!InitControls(config))
return false;
if (!CreateOutputBuffers())
return false;
- if (!image_processor_) {
- switch (config.storage_type.value_or(Config::StorageType::kShmem)) {
- case Config::StorageType::kShmem:
- input_memory_type_ = V4L2_MEMORY_USERPTR;
- break;
- case Config::StorageType::kDmabuf:
- input_memory_type_ = V4L2_MEMORY_DMABUF;
- break;
- }
- } else {
- input_memory_type_ = V4L2_MEMORY_DMABUF;
- }
-
if (!encoder_thread_.Start()) {
VLOGF(1) << "encoder thread failed to start";
return false;
@@ -304,13 +295,69 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
FROM_HERE,
base::Bind(&Client::RequireBitstreamBuffers, client_, kInputBufferCount,
image_processor_.get()
- ? image_processor_->input_allocated_size()
+ ? image_processor_->input_layout().coded_size()
: input_allocated_size_,
output_buffer_byte_size_));
return true;
}
+bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers() {
+ DCHECK(image_processor_);
+ // Allocate VideoFrames for image processor output if its mode is IMPORT.
+ if (image_processor_->output_mode() != ImageProcessor::OutputMode::IMPORT) {
+ return true;
+ }
+
+ image_processor_output_buffers_.resize(kImageProcBufferCount);
+ const auto output_storage_type = image_processor_->output_storage_type();
+ for (size_t i = 0; i < kImageProcBufferCount; i++) {
+ switch (output_storage_type) {
+ case VideoFrame::STORAGE_OWNED_MEMORY:
+ image_processor_output_buffers_[i] = VideoFrame::CreateFrameWithLayout(
+ *device_input_layout_, gfx::Rect(visible_size_), visible_size_,
+ base::TimeDelta(), true);
+ if (!image_processor_output_buffers_[i]) {
+ VLOG(1) << "Failed to create VideoFrame";
+ return false;
+ }
+ break;
+ // TODO(crbug.com/910590): Support VideoFrame::STORAGE_DMABUFS.
+ default:
+ VLOGF(1) << "Unsupported output storage type of image processor: "
+ << output_storage_type;
+ return false;
+ }
+ }
+ return true;
+}
+
+bool V4L2VideoEncodeAccelerator::InitInputMemoryType(const Config& config) {
+ if (image_processor_) {
+ const auto storage_type = image_processor_->output_storage_type();
+ if (storage_type == VideoFrame::STORAGE_DMABUFS) {
+ input_memory_type_ = V4L2_MEMORY_DMABUF;
+ } else if (VideoFrame::IsStorageTypeMappable(storage_type)) {
+ input_memory_type_ = V4L2_MEMORY_USERPTR;
+ } else {
+ VLOGF(1) << "Unsupported image processor's output StorageType: "
+ << storage_type;
+ return false;
+ }
+ } else {
+ switch (config.storage_type.value_or(Config::StorageType::kShmem)) {
+ case Config::StorageType::kShmem:
+ input_memory_type_ = V4L2_MEMORY_USERPTR;
+ break;
+ case Config::StorageType::kDmabuf:
+ input_memory_type_ = V4L2_MEMORY_DMABUF;
+ break;
+ }
+ }
+ return true;
+}
+
void V4L2VideoEncodeAccelerator::ImageProcessorError() {
+ DCHECK(child_task_runner_->BelongsToCurrentThread());
VLOGF(1) << "Image processor error";
NOTIFY_ERROR(kPlatformFailureError);
}
@@ -321,17 +368,41 @@ void V4L2VideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
DCHECK(child_task_runner_->BelongsToCurrentThread());
if (image_processor_) {
- if (free_image_processor_output_buffers_.size() > 0) {
- int output_buffer_index = free_image_processor_output_buffers_.back();
- free_image_processor_output_buffers_.pop_back();
- // Unretained is safe because |this| owns image processor and there will
- // be no callbacks after processor destroys.
- if (!image_processor_->Process(
- frame, output_buffer_index, std::vector<base::ScopedFD>(),
- base::BindOnce(&V4L2VideoEncodeAccelerator::FrameProcessed,
- base::Unretained(this), force_keyframe,
- frame->timestamp(), output_buffer_index))) {
- NOTIFY_ERROR(kPlatformFailureError);
+ if (!free_image_processor_output_buffer_indices_.empty()) {
+ // Create a VideoFrame by wrapping an instance from
+ // |image_processor_output_buffers_|. The new VideoFrame has its own life
+ // cycle but shares underlying payload from the VideoFrame being wrapped.
+ // When the VideoEncodeAccelerator finish processing ImageProcessor's
+ // output frame, the frame is no longer referenced, hence trigger
+ // destruction observer to recycle the frame.
+ const size_t output_buffer_index =
+ free_image_processor_output_buffer_indices_.back();
+ free_image_processor_output_buffer_indices_.pop_back();
+ if (image_processor_->output_mode() ==
+ ImageProcessor::OutputMode::IMPORT) {
+ const auto& buf = image_processor_output_buffers_[output_buffer_index];
+ auto output_frame = VideoFrame::WrapVideoFrame(
+ buf, buf->format(), buf->visible_rect(), buf->natural_size());
+
+ // We have to bind |weak_this| for FrameProcessed, because child
+ // thread is outlive this V4L2VideoEncodeAccelerator.
+ if (!image_processor_->Process(
+ frame, std::move(output_frame),
+ base::BindOnce(&V4L2VideoEncodeAccelerator::FrameProcessed,
+ weak_this_, force_keyframe, frame->timestamp(),
+ output_buffer_index))) {
+ NOTIFY_ERROR(kPlatformFailureError);
+ }
+ } else {
+ // We have to bind |weak_this| for FrameProcessed, because child
+ // thread is outlive this V4L2VideoEncodeAccelerator.
+ if (!image_processor_->Process(
+ frame, output_buffer_index, std::vector<base::ScopedFD>(),
+ base::BindOnce(&V4L2VideoEncodeAccelerator::FrameProcessed,
+ weak_this_, force_keyframe, frame->timestamp(),
+ output_buffer_index))) {
+ NOTIFY_ERROR(kPlatformFailureError);
+ }
}
} else {
image_processor_input_queue_.emplace(frame, force_keyframe);
@@ -430,7 +501,7 @@ void V4L2VideoEncodeAccelerator::FlushTask(FlushCallback flush_callback) {
if (flush_callback_ || encoder_state_ != kEncoding) {
VLOGF(1) << "Flush failed: there is a pending flush, "
- << "or VEA is not in kEncoding state";
+ << "or VideoEncodeAccelerator is not in kEncoding state";
NOTIFY_ERROR(kIllegalStateError);
child_task_runner_->PostTask(
FROM_HERE, base::BindOnce(std::move(flush_callback), false));
@@ -463,6 +534,8 @@ void V4L2VideoEncodeAccelerator::FrameProcessed(
DVLOGF(4) << "force_keyframe=" << force_keyframe
<< ", output_buffer_index=" << output_buffer_index;
DCHECK_GE(output_buffer_index, 0);
+ DCHECK(encoder_thread_.IsRunning());
+ DCHECK(!weak_this_.WasInvalidated());
frame->AddDestructionObserver(BindToCurrentLoop(
base::Bind(&V4L2VideoEncodeAccelerator::ReuseImageProcessorOutputBuffer,
@@ -477,7 +550,7 @@ void V4L2VideoEncodeAccelerator::ReuseImageProcessorOutputBuffer(
int output_buffer_index) {
DCHECK(child_task_runner_->BelongsToCurrentThread());
DVLOGF(4) << "output_buffer_index=" << output_buffer_index;
- free_image_processor_output_buffers_.push_back(output_buffer_index);
+ free_image_processor_output_buffer_indices_.push_back(output_buffer_index);
if (!image_processor_input_queue_.empty()) {
InputFrameInfo frame_info = image_processor_input_queue_.front();
image_processor_input_queue_.pop();
@@ -734,7 +807,7 @@ void V4L2VideoEncodeAccelerator::Dequeue() {
dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
dqbuf.memory = input_memory_type_;
dqbuf.m.planes = planes;
- dqbuf.length = input_planes_count_;
+ dqbuf.length = device_input_layout_->num_buffers();
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
// EAGAIN if we're just out of buffers to dequeue.
@@ -853,16 +926,27 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
qbuf.timestamp.tv_usec =
frame->timestamp().InMicroseconds() -
frame->timestamp().InSeconds() * base::Time::kMicrosecondsPerSecond;
- DCHECK_EQ(device_input_format_, frame->format());
-
- for (size_t i = 0; i < input_planes_count_; ++i) {
- qbuf.m.planes[i].bytesused = base::checked_cast<__u32>(
- VideoFrame::PlaneSize(frame->format(), i, input_allocated_size_)
- .GetArea());
+ DCHECK_EQ(device_input_layout_->format(), frame->format());
+
+ for (size_t i = 0; i < device_input_layout_->num_buffers(); ++i) {
+ // Single-buffer input format may have multiple color planes, so bytesused
+ // of the single buffer should be sum of each color planes' size.
+ if (device_input_layout_->num_buffers() == 1) {
+ qbuf.m.planes[i].bytesused = VideoFrame::AllocationSize(
+ frame->format(), device_input_layout_->coded_size());
+ } else {
+ DCHECK_EQ(device_input_layout_->num_buffers(),
+ VideoFrame::NumPlanes(device_input_layout_->format()));
+ qbuf.m.planes[i].bytesused = base::checked_cast<__u32>(
+ VideoFrame::PlaneSize(frame->format(), i,
+ device_input_layout_->coded_size())
+ .GetArea());
+ }
switch (input_memory_type_) {
case V4L2_MEMORY_USERPTR:
- qbuf.m.planes[i].length = qbuf.m.planes[i].bytesused;
+ // Use buffer_size VideoEncodeAccelerator HW requested by S_FMT.
+ qbuf.m.planes[i].length = device_input_layout_->buffer_sizes()[i];
qbuf.m.planes[i].m.userptr =
reinterpret_cast<unsigned long>(frame->data(i));
DCHECK(qbuf.m.planes[i].m.userptr);
@@ -871,7 +955,7 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
case V4L2_MEMORY_DMABUF: {
const auto& fds = frame->DmabufFds();
const auto& planes = frame->layout().planes();
- DCHECK_EQ(input_planes_count_, planes.size());
+ DCHECK_EQ(device_input_layout_->num_buffers(), planes.size());
qbuf.m.planes[i].m.fd =
(i < fds.size()) ? fds[i].get() : fds.back().get();
// TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is
@@ -882,7 +966,8 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
qbuf.m.planes[i].bytesused += qbuf.m.planes[i].data_offset;
// Workaround: filling length should not be needed. This is a bug of
// videobuf2 library.
- qbuf.m.planes[i].length = qbuf.m.planes[i].bytesused;
+ qbuf.m.planes[i].length = device_input_layout_->buffer_sizes()[i] +
+ qbuf.m.planes[i].data_offset;
DCHECK_NE(qbuf.m.planes[i].m.fd, -1);
break;
}
@@ -893,8 +978,9 @@ bool V4L2VideoEncodeAccelerator::EnqueueInputRecord() {
}
qbuf.memory = input_memory_type_;
- qbuf.length = input_planes_count_;
+ qbuf.length = device_input_layout_->num_buffers();
+ DVLOGF(4) << "Calling VIDIOC_QBUF: " << V4L2Device::V4L2BufferToString(qbuf);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_record.at_device = true;
input_record.frame = frame;
@@ -1121,16 +1207,15 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
DCHECK(!input_streamon_);
DCHECK(!output_streamon_);
- device_input_format_ = PIXEL_FORMAT_UNKNOWN;
- input_planes_count_ = 0;
-
- const std::vector<uint32_t> pix_fmt_candidates = {
- // First see if the device can use the provided format directly.
- // V4L2 VEA only supports multi plane input pixel format.
- V4L2Device::VideoPixelFormatToV4L2PixFmt(input_format, false),
- // Second try preferred input format.
- device_->PreferredInputFormat(V4L2Device::Type::kEncoder),
- };
+ // First see if the device can use the provided format directly.
+ std::vector<uint32_t> pix_fmt_candidates = {
+ V4L2Device::VideoPixelFormatToV4L2PixFmt(input_format, false)};
+ // Second try preferred input formats for both single-planar and
+ // multi-planar.
+ for (auto preferred_format :
+ device_->PreferredInputFormat(V4L2Device::Type::kEncoder)) {
+ pix_fmt_candidates.push_back(preferred_format);
+ }
for (const auto pix_fmt : pix_fmt_candidates) {
auto trying_format = V4L2Device::V4L2PixFmtToVideoPixelFormat(pix_fmt);
@@ -1139,7 +1224,6 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
DCHECK_LE(planes_count, static_cast<size_t>(VIDEO_MAX_PLANES));
VLOGF(2) << "Trying S_FMT with " << FourccToString(pix_fmt) << " ("
<< trying_format << ").";
-
struct v4l2_format format;
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
@@ -1149,24 +1233,27 @@ bool V4L2VideoEncodeAccelerator::NegotiateInputFormat(
format.fmt.pix_mp.num_planes = planes_count;
if (device_->Ioctl(VIDIOC_S_FMT, &format) == 0 &&
format.fmt.pix_mp.pixelformat == pix_fmt) {
- VLOGF(2) << "Success: S_FMT with" << FourccToString(pix_fmt);
- // Take device-adjusted sizes for allocated size. If the size is adjusted
- // down, it means the input is too big and the hardware does not support
- // it.
- auto adjusted_size = V4L2Device::CodedSizeFromV4L2Format(format);
- if (!gfx::Rect(adjusted_size).Contains(gfx::Rect(visible_size_))) {
- VLOGF(1) << "Input size too big " << visible_size_.ToString()
- << ", adjusted to " << adjusted_size.ToString();
+ VLOGF(2) << "Success: S_FMT with " << FourccToString(pix_fmt);
+ device_input_layout_ = V4L2Device::V4L2FormatToVideoFrameLayout(format);
+ if (!device_input_layout_) {
+ VLOGF(1) << "Invalid device_input_layout_";
return false;
}
-
- device_input_format_ = trying_format;
- input_planes_count_ = planes_count;
- input_allocated_size_ = adjusted_size;
+ VLOG(2) << "Negotiated device_input_layout_: " << *device_input_layout_;
+ if (!gfx::Rect(device_input_layout_->coded_size())
+ .Contains(gfx::Rect(visible_size_))) {
+ VLOGF(1) << "Input size " << visible_size_.ToString()
+ << " exceeds encoder capability. Size encoder can handle: "
+ << device_input_layout_->coded_size().ToString();
+ return false;
+ }
+ // TODO(crbug.com/914700): Remove this once
+ // Client::RequireBitstreamBuffers uses input's VideoFrameLayout to
+ // allocate input buffer.
+ input_allocated_size_ = V4L2Device::AllocatedSizeFromV4L2Format(format);
return true;
}
}
-
return false;
}
@@ -1403,7 +1490,7 @@ bool V4L2VideoEncodeAccelerator::CreateOutputBuffers() {
buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.m.planes = planes;
- buffer.length = arraysize(planes);
+ buffer.length = base::size(planes);
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
void* address = device_->Mmap(NULL,
buffer.m.planes[0].length,
diff --git a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
index abd367119a4..7e2d30cc062 100644
--- a/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
+++ b/chromium/media/gpu/v4l2/v4l2_video_encode_accelerator.h
@@ -15,7 +15,9 @@
#include "base/containers/queue.h"
#include "base/files/scoped_file.h"
#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "media/gpu/image_processor.h"
@@ -211,6 +213,10 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// false otherwise.
bool IsCtrlExposed(uint32_t ctrl_id);
+ // Allocates video frames for image processor's output buffers.
+ // Returns false if there's something wrong.
+ bool AllocateImageProcessorOutputBuffers();
+
// Recycle output buffer of image processor with |output_buffer_index|.
void ReuseImageProcessorOutputBuffer(int output_buffer_index);
@@ -222,17 +228,22 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
size_t bitstream_size,
std::unique_ptr<BitstreamBufferRef> buffer_ref);
+ // Initializes input_memory_type_.
+ bool InitInputMemoryType(const Config& config);
+
// Our original calling task runner for the child thread.
const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
gfx::Size visible_size_;
- // Input allocated size required by the device.
+ // Layout of device accepted input VideoFrame.
+ base::Optional<VideoFrameLayout> device_input_layout_;
+ // Input allocated size calculated by
+ // V4L2Device::AllocatedSizeFromV4L2Format().
+ // TODO(crbug.com/914700): Remove this once Client::RequireBitstreamBuffers
+ // uses input's VideoFrameLayout to allocate input buffer.
gfx::Size input_allocated_size_;
- size_t output_buffer_byte_size_;
- // Formats for input frames and the output stream.
- VideoPixelFormat device_input_format_;
- size_t input_planes_count_;
+ size_t output_buffer_byte_size_;
uint32_t output_format_fourcc_;
//
@@ -296,14 +307,18 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// Image processor, if one is in use.
std::unique_ptr<ImageProcessor> image_processor_;
+ // Video frames for image processor output / VideoEncodeAccelerator input.
+ // Only accessed on child thread.
+ std::vector<scoped_refptr<VideoFrame>> image_processor_output_buffers_;
// Indexes of free image processor output buffers. Only accessed on child
// thread.
- std::vector<int> free_image_processor_output_buffers_;
+ std::vector<size_t> free_image_processor_output_buffer_indices_;
// Video frames ready to be processed. Only accessed on child thread.
base::queue<InputFrameInfo> image_processor_input_queue_;
- // This thread services tasks posted from the VEA API entry points by the
- // child thread and device service callbacks posted from the device thread.
+ // This thread services tasks posted from the VideoEncodeAccelerator API entry
+ // points by the child thread and device service callbacks posted from the
+ // device thread.
base::Thread encoder_thread_;
// The device polling thread handles notifications of V4L2 device changes.
diff --git a/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc
new file mode 100644
index 00000000000..314bc49278e
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.cc
@@ -0,0 +1,259 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_vp8_accelerator.h"
+
+#include <type_traits>
+
+#include <linux/videodev2.h>
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
+#include "media/filters/vp8_parser.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/vp8_picture.h"
+
+namespace media {
+namespace {
+
+void FillV4L2SegmentationHeader(const Vp8SegmentationHeader& vp8_sgmnt_hdr,
+ struct v4l2_vp8_sgmnt_hdr* v4l2_sgmnt_hdr) {
+#define SET_V4L2_SGMNT_HDR_FLAG_IF(cond, flag) \
+ v4l2_sgmnt_hdr->flags |= ((vp8_sgmnt_hdr.cond) ? (flag) : 0)
+ SET_V4L2_SGMNT_HDR_FLAG_IF(segmentation_enabled,
+ V4L2_VP8_SEGMNT_HDR_FLAG_ENABLED);
+ SET_V4L2_SGMNT_HDR_FLAG_IF(update_mb_segmentation_map,
+ V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_MAP);
+ SET_V4L2_SGMNT_HDR_FLAG_IF(update_segment_feature_data,
+ V4L2_VP8_SEGMNT_HDR_FLAG_UPDATE_FEATURE_DATA);
+#undef SET_V4L2_SPARM_FLAG_IF
+ v4l2_sgmnt_hdr->segment_feature_mode = vp8_sgmnt_hdr.segment_feature_mode;
+
+ SafeArrayMemcpy(v4l2_sgmnt_hdr->quant_update,
+ vp8_sgmnt_hdr.quantizer_update_value);
+ SafeArrayMemcpy(v4l2_sgmnt_hdr->lf_update, vp8_sgmnt_hdr.lf_update_value);
+ SafeArrayMemcpy(v4l2_sgmnt_hdr->segment_probs, vp8_sgmnt_hdr.segment_prob);
+}
+
+void FillV4L2LoopfilterHeader(const Vp8LoopFilterHeader& vp8_loopfilter_hdr,
+ struct v4l2_vp8_loopfilter_hdr* v4l2_lf_hdr) {
+#define SET_V4L2_LF_HDR_FLAG_IF(cond, flag) \
+ v4l2_lf_hdr->flags |= ((vp8_loopfilter_hdr.cond) ? (flag) : 0)
+ SET_V4L2_LF_HDR_FLAG_IF(loop_filter_adj_enable, V4L2_VP8_LF_HDR_ADJ_ENABLE);
+ SET_V4L2_LF_HDR_FLAG_IF(mode_ref_lf_delta_update,
+ V4L2_VP8_LF_HDR_DELTA_UPDATE);
+#undef SET_V4L2_SGMNT_HDR_FLAG_IF
+
+#define LF_HDR_TO_V4L2_LF_HDR(a) v4l2_lf_hdr->a = vp8_loopfilter_hdr.a;
+ LF_HDR_TO_V4L2_LF_HDR(type);
+ LF_HDR_TO_V4L2_LF_HDR(level);
+ LF_HDR_TO_V4L2_LF_HDR(sharpness_level);
+#undef LF_HDR_TO_V4L2_LF_HDR
+
+ SafeArrayMemcpy(v4l2_lf_hdr->ref_frm_delta_magnitude,
+ vp8_loopfilter_hdr.ref_frame_delta);
+ SafeArrayMemcpy(v4l2_lf_hdr->mb_mode_delta_magnitude,
+ vp8_loopfilter_hdr.mb_mode_delta);
+}
+
+void FillV4L2QuantizationHeader(
+ const Vp8QuantizationHeader& vp8_quant_hdr,
+ struct v4l2_vp8_quantization_hdr* v4l2_quant_hdr) {
+ v4l2_quant_hdr->y_ac_qi = vp8_quant_hdr.y_ac_qi;
+ v4l2_quant_hdr->y_dc_delta = vp8_quant_hdr.y_dc_delta;
+ v4l2_quant_hdr->y2_dc_delta = vp8_quant_hdr.y2_dc_delta;
+ v4l2_quant_hdr->y2_ac_delta = vp8_quant_hdr.y2_ac_delta;
+ v4l2_quant_hdr->uv_dc_delta = vp8_quant_hdr.uv_dc_delta;
+ v4l2_quant_hdr->uv_ac_delta = vp8_quant_hdr.uv_ac_delta;
+}
+
+void FillV4L2Vp8EntropyHeader(const Vp8EntropyHeader& vp8_entropy_hdr,
+ struct v4l2_vp8_entropy_hdr* v4l2_entropy_hdr) {
+ SafeArrayMemcpy(v4l2_entropy_hdr->coeff_probs, vp8_entropy_hdr.coeff_probs);
+ SafeArrayMemcpy(v4l2_entropy_hdr->y_mode_probs, vp8_entropy_hdr.y_mode_probs);
+ SafeArrayMemcpy(v4l2_entropy_hdr->uv_mode_probs,
+ vp8_entropy_hdr.uv_mode_probs);
+ SafeArrayMemcpy(v4l2_entropy_hdr->mv_probs, vp8_entropy_hdr.mv_probs);
+}
+
+} // namespace
+
+class V4L2VP8Picture : public VP8Picture {
+ public:
+ explicit V4L2VP8Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface)
+ : dec_surface_(dec_surface) {}
+
+ V4L2VP8Picture* AsV4L2VP8Picture() override { return this; }
+ scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
+
+ private:
+ ~V4L2VP8Picture() override {}
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2VP8Picture);
+};
+
+V4L2VP8Accelerator::V4L2VP8Accelerator(
+ V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device)
+ : surface_handler_(surface_handler), device_(device) {
+ DCHECK(surface_handler_);
+}
+
+V4L2VP8Accelerator::~V4L2VP8Accelerator() {}
+
+scoped_refptr<VP8Picture> V4L2VP8Accelerator::CreateVP8Picture() {
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ surface_handler_->CreateSurface();
+ if (!dec_surface)
+ return nullptr;
+
+ return new V4L2VP8Picture(dec_surface);
+}
+
+bool V4L2VP8Accelerator::SubmitDecode(
+ scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& reference_frames) {
+ struct v4l2_ctrl_vp8_frame_hdr v4l2_frame_hdr;
+ memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
+
+ const auto& frame_hdr = pic->frame_hdr;
+ v4l2_frame_hdr.key_frame = frame_hdr->frame_type;
+#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
+ FHDR_TO_V4L2_FHDR(version);
+ FHDR_TO_V4L2_FHDR(width);
+ FHDR_TO_V4L2_FHDR(horizontal_scale);
+ FHDR_TO_V4L2_FHDR(height);
+ FHDR_TO_V4L2_FHDR(vertical_scale);
+ FHDR_TO_V4L2_FHDR(sign_bias_golden);
+ FHDR_TO_V4L2_FHDR(sign_bias_alternate);
+ FHDR_TO_V4L2_FHDR(prob_skip_false);
+ FHDR_TO_V4L2_FHDR(prob_intra);
+ FHDR_TO_V4L2_FHDR(prob_last);
+ FHDR_TO_V4L2_FHDR(prob_gf);
+ FHDR_TO_V4L2_FHDR(bool_dec_range);
+ FHDR_TO_V4L2_FHDR(bool_dec_value);
+ FHDR_TO_V4L2_FHDR(bool_dec_count);
+#undef FHDR_TO_V4L2_FHDR
+
+#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
+ v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
+ SET_V4L2_FRM_HDR_FLAG_IF(is_experimental,
+ V4L2_VP8_FRAME_HDR_FLAG_EXPERIMENTAL);
+ SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP8_FRAME_HDR_FLAG_SHOW_FRAME);
+ SET_V4L2_FRM_HDR_FLAG_IF(mb_no_skip_coeff,
+ V4L2_VP8_FRAME_HDR_FLAG_MB_NO_SKIP_COEFF);
+#undef SET_V4L2_FRM_HDR_FLAG_IF
+
+ FillV4L2SegmentationHeader(frame_hdr->segmentation_hdr,
+ &v4l2_frame_hdr.sgmnt_hdr);
+
+ FillV4L2LoopfilterHeader(frame_hdr->loopfilter_hdr, &v4l2_frame_hdr.lf_hdr);
+
+ FillV4L2QuantizationHeader(frame_hdr->quantization_hdr,
+ &v4l2_frame_hdr.quant_hdr);
+
+ FillV4L2Vp8EntropyHeader(frame_hdr->entropy_hdr, &v4l2_frame_hdr.entropy_hdr);
+
+ v4l2_frame_hdr.first_part_size =
+ base::checked_cast<__u32>(frame_hdr->first_part_size);
+ v4l2_frame_hdr.first_part_offset =
+ base::checked_cast<__u32>(frame_hdr->first_part_offset);
+ v4l2_frame_hdr.macroblock_bit_offset =
+ base::checked_cast<__u32>(frame_hdr->macroblock_bit_offset);
+ v4l2_frame_hdr.num_dct_parts = frame_hdr->num_of_dct_partitions;
+
+ static_assert(std::extent<decltype(v4l2_frame_hdr.dct_part_sizes)>() ==
+ std::extent<decltype(frame_hdr->dct_partition_sizes)>(),
+ "DCT partition size arrays must have equal number of elements");
+ for (size_t i = 0; i < frame_hdr->num_of_dct_partitions &&
+ i < base::size(v4l2_frame_hdr.dct_part_sizes);
+ ++i)
+ v4l2_frame_hdr.dct_part_sizes[i] = frame_hdr->dct_partition_sizes[i];
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ VP8PictureToV4L2DecodeSurface(pic);
+ std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+
+ const auto last_frame = reference_frames.GetFrame(Vp8RefType::VP8_FRAME_LAST);
+ if (last_frame) {
+ scoped_refptr<V4L2DecodeSurface> last_frame_surface =
+ VP8PictureToV4L2DecodeSurface(last_frame);
+ v4l2_frame_hdr.last_frame = last_frame_surface->GetReferenceID();
+ ref_surfaces.push_back(last_frame_surface);
+ } else {
+ v4l2_frame_hdr.last_frame = VIDEO_MAX_FRAME;
+ }
+
+ const auto golden_frame =
+ reference_frames.GetFrame(Vp8RefType::VP8_FRAME_GOLDEN);
+ if (golden_frame) {
+ scoped_refptr<V4L2DecodeSurface> golden_frame_surface =
+ VP8PictureToV4L2DecodeSurface(golden_frame);
+ v4l2_frame_hdr.golden_frame = golden_frame_surface->GetReferenceID();
+ ref_surfaces.push_back(golden_frame_surface);
+ } else {
+ v4l2_frame_hdr.golden_frame = VIDEO_MAX_FRAME;
+ }
+
+ const auto alt_frame =
+ reference_frames.GetFrame(Vp8RefType::VP8_FRAME_ALTREF);
+ if (alt_frame) {
+ scoped_refptr<V4L2DecodeSurface> alt_frame_surface =
+ VP8PictureToV4L2DecodeSurface(alt_frame);
+ v4l2_frame_hdr.alt_frame = alt_frame_surface->GetReferenceID();
+ ref_surfaces.push_back(alt_frame_surface);
+ } else {
+ v4l2_frame_hdr.alt_frame = VIDEO_MAX_FRAME;
+ }
+
+ struct v4l2_ext_control ctrl;
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR;
+ ctrl.size = sizeof(v4l2_frame_hdr);
+ ctrl.p_vp8_frame_hdr = &v4l2_frame_hdr;
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.count = 1;
+ ext_ctrls.controls = &ctrl;
+ dec_surface->PrepareSetCtrls(&ext_ctrls);
+ if (device_->Ioctl(VIDIOC_S_EXT_CTRLS, &ext_ctrls) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_S_EXT_CTRLS";
+ return false;
+ }
+
+ dec_surface->SetReferenceSurfaces(ref_surfaces);
+
+ if (!surface_handler_->SubmitSlice(dec_surface, frame_hdr->data,
+ frame_hdr->frame_size))
+ return false;
+
+ DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
+ surface_handler_->DecodeSurface(dec_surface);
+ return true;
+}
+
+bool V4L2VP8Accelerator::OutputPicture(const scoped_refptr<VP8Picture>& pic) {
+ // TODO(crbug.com/647725): Insert correct color space.
+ surface_handler_->SurfaceReady(VP8PictureToV4L2DecodeSurface(pic),
+ pic->bitstream_id(), pic->visible_rect(),
+ VideoColorSpace());
+ return true;
+}
+
+scoped_refptr<V4L2DecodeSurface>
+V4L2VP8Accelerator::VP8PictureToV4L2DecodeSurface(
+ const scoped_refptr<VP8Picture>& pic) {
+ V4L2VP8Picture* v4l2_pic = pic->AsV4L2VP8Picture();
+ CHECK(v4l2_pic);
+ return v4l2_pic->dec_surface();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.h b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.h
new file mode 100644
index 00000000000..edff830523c
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_vp8_accelerator.h
@@ -0,0 +1,44 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_VP8_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_V4L2_VP8_ACCELERATOR_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "media/gpu/vp8_decoder.h"
+
+namespace media {
+
+class V4L2Device;
+class V4L2DecodeSurface;
+class V4L2DecodeSurfaceHandler;
+
+class V4L2VP8Accelerator : public VP8Decoder::VP8Accelerator {
+ public:
+ explicit V4L2VP8Accelerator(V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device);
+ ~V4L2VP8Accelerator() override;
+
+ // VP8Decoder::VP8Accelerator implementation.
+ scoped_refptr<VP8Picture> CreateVP8Picture() override;
+ bool SubmitDecode(scoped_refptr<VP8Picture> pic,
+ const Vp8ReferenceFrameVector& reference_frames) override;
+ bool OutputPicture(const scoped_refptr<VP8Picture>& pic) override;
+
+ private:
+ scoped_refptr<V4L2DecodeSurface> VP8PictureToV4L2DecodeSurface(
+ const scoped_refptr<VP8Picture>& pic);
+
+ V4L2DecodeSurfaceHandler* const surface_handler_;
+ V4L2Device* const device_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2VP8Accelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_VP8_ACCELERATOR_H_
diff --git a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc
new file mode 100644
index 00000000000..4b7794ae37b
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.cc
@@ -0,0 +1,419 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/gpu/v4l2/v4l2_vp9_accelerator.h"
+
+#include <type_traits>
+
+#include <linux/videodev2.h>
+#include <string.h>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "media/gpu/macros.h"
+#include "media/gpu/v4l2/v4l2_decode_surface.h"
+#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
+#include "media/gpu/v4l2/v4l2_device.h"
+#include "media/gpu/vp9_picture.h"
+
+namespace media {
+namespace {
+
+void FillV4L2VP9LoopFilterParams(
+ const Vp9LoopFilterParams& vp9_lf_params,
+ struct v4l2_vp9_loop_filter_params* v4l2_lf_params) {
+#define SET_LF_PARAMS_FLAG_IF(cond, flag) \
+ v4l2_lf_params->flags |= ((vp9_lf_params.cond) ? (flag) : 0)
+ SET_LF_PARAMS_FLAG_IF(delta_enabled, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_ENABLED);
+ SET_LF_PARAMS_FLAG_IF(delta_update, V4L2_VP9_LOOP_FLTR_FLAG_DELTA_UPDATE);
+#undef SET_LF_PARAMS_FLAG_IF
+
+ v4l2_lf_params->level = vp9_lf_params.level;
+ v4l2_lf_params->sharpness = vp9_lf_params.sharpness;
+
+ SafeArrayMemcpy(v4l2_lf_params->deltas, vp9_lf_params.ref_deltas);
+ SafeArrayMemcpy(v4l2_lf_params->mode_deltas, vp9_lf_params.mode_deltas);
+ SafeArrayMemcpy(v4l2_lf_params->lvl_lookup, vp9_lf_params.lvl);
+}
+
+void FillV4L2VP9QuantizationParams(
+ const Vp9QuantizationParams& vp9_quant_params,
+ struct v4l2_vp9_quantization_params* v4l2_q_params) {
+#define SET_Q_PARAMS_FLAG_IF(cond, flag) \
+ v4l2_q_params->flags |= ((vp9_quant_params.cond) ? (flag) : 0)
+ SET_Q_PARAMS_FLAG_IF(IsLossless(), V4L2_VP9_QUANT_PARAMS_FLAG_LOSSLESS);
+#undef SET_Q_PARAMS_FLAG_IF
+
+#define Q_PARAMS_TO_V4L2_Q_PARAMS(a) v4l2_q_params->a = vp9_quant_params.a
+ Q_PARAMS_TO_V4L2_Q_PARAMS(base_q_idx);
+ Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_y_dc);
+ Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_dc);
+ Q_PARAMS_TO_V4L2_Q_PARAMS(delta_q_uv_ac);
+#undef Q_PARAMS_TO_V4L2_Q_PARAMS
+}
+
+void FillV4L2VP9SegmentationParams(
+ const Vp9SegmentationParams& vp9_segm_params,
+ struct v4l2_vp9_segmentation_params* v4l2_segm_params) {
+#define SET_SEG_PARAMS_FLAG_IF(cond, flag) \
+ v4l2_segm_params->flags |= ((vp9_segm_params.cond) ? (flag) : 0)
+ SET_SEG_PARAMS_FLAG_IF(enabled, V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED);
+ SET_SEG_PARAMS_FLAG_IF(update_map, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP);
+ SET_SEG_PARAMS_FLAG_IF(temporal_update,
+ V4L2_VP9_SGMNT_PARAM_FLAG_TEMPORAL_UPDATE);
+ SET_SEG_PARAMS_FLAG_IF(update_data, V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_DATA);
+ SET_SEG_PARAMS_FLAG_IF(abs_or_delta_update,
+ V4L2_VP9_SGMNT_PARAM_FLAG_ABS_OR_DELTA_UPDATE);
+#undef SET_SEG_PARAMS_FLAG_IF
+
+ SafeArrayMemcpy(v4l2_segm_params->tree_probs, vp9_segm_params.tree_probs);
+ SafeArrayMemcpy(v4l2_segm_params->pred_probs, vp9_segm_params.pred_probs);
+ SafeArrayMemcpy(v4l2_segm_params->feature_data, vp9_segm_params.feature_data);
+
+ static_assert(
+ std::extent<decltype(v4l2_segm_params->feature_enabled)>() ==
+ std::extent<decltype(vp9_segm_params.feature_enabled)>() &&
+ std::extent<decltype(v4l2_segm_params->feature_enabled[0])>() ==
+ std::extent<decltype(vp9_segm_params.feature_enabled[0])>(),
+ "feature_enabled arrays must be of same size");
+ for (size_t i = 0; i < base::size(v4l2_segm_params->feature_enabled); ++i) {
+ for (size_t j = 0; j < base::size(v4l2_segm_params->feature_enabled[i]);
+ ++j) {
+ v4l2_segm_params->feature_enabled[i][j] =
+ vp9_segm_params.feature_enabled[i][j];
+ }
+ }
+}
+
+void FillV4L2Vp9EntropyContext(const Vp9FrameContext& vp9_frame_ctx,
+ struct v4l2_vp9_entropy_ctx* v4l2_entropy_ctx) {
+#define ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(a) \
+ SafeArrayMemcpy(v4l2_entropy_ctx->a, vp9_frame_ctx.a)
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_8x8);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_16x16);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(tx_probs_32x32);
+
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(coef_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(skip_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(inter_mode_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(interp_filter_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(is_inter_prob);
+
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_mode_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(single_ref_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(comp_ref_prob);
+
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(y_mode_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(uv_mode_probs);
+
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(partition_probs);
+
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_joint_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_sign_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_bit_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_bits_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_fr_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_fr_probs);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_class0_hp_prob);
+ ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR(mv_hp_prob);
+#undef ARRAY_MEMCPY_CHECKED_FRM_CTX_TO_V4L2_ENTR
+}
+
+void FillVp9FrameContext(struct v4l2_vp9_entropy_ctx& v4l2_entropy_ctx,
+ Vp9FrameContext* vp9_frame_ctx) {
+#define ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(a) \
+ SafeArrayMemcpy(vp9_frame_ctx->a, v4l2_entropy_ctx.a)
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_8x8);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_16x16);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(tx_probs_32x32);
+
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(coef_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(skip_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(inter_mode_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(interp_filter_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(is_inter_prob);
+
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_mode_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(single_ref_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(comp_ref_prob);
+
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(y_mode_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(uv_mode_probs);
+
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(partition_probs);
+
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_joint_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_sign_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_bit_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_bits_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_fr_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_fr_probs);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_class0_hp_prob);
+ ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX(mv_hp_prob);
+#undef ARRAY_MEMCPY_CHECKED_V4L2_ENTR_TO_FRM_CTX
+}
+
+} // namespace
+
+class V4L2VP9Picture : public VP9Picture {
+ public:
+ explicit V4L2VP9Picture(const scoped_refptr<V4L2DecodeSurface>& dec_surface)
+ : dec_surface_(dec_surface) {}
+
+ V4L2VP9Picture* AsV4L2VP9Picture() override { return this; }
+ scoped_refptr<V4L2DecodeSurface> dec_surface() { return dec_surface_; }
+
+ private:
+ ~V4L2VP9Picture() override {}
+
+ scoped_refptr<VP9Picture> CreateDuplicate() override {
+ return new V4L2VP9Picture(dec_surface_);
+ }
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2VP9Picture);
+};
+
+V4L2VP9Accelerator::V4L2VP9Accelerator(
+ V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device)
+ : surface_handler_(surface_handler), device_(device) {
+ DCHECK(surface_handler_);
+
+ struct v4l2_queryctrl query_ctrl;
+ memset(&query_ctrl, 0, sizeof(query_ctrl));
+ query_ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
+ device_needs_frame_context_ =
+ (device_->Ioctl(VIDIOC_QUERYCTRL, &query_ctrl) == 0);
+
+ DVLOG_IF(1, device_needs_frame_context_)
+ << "Device requires frame context parsing";
+}
+
+V4L2VP9Accelerator::~V4L2VP9Accelerator() {}
+
+scoped_refptr<VP9Picture> V4L2VP9Accelerator::CreateVP9Picture() {
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ surface_handler_->CreateSurface();
+ if (!dec_surface)
+ return nullptr;
+
+ return new V4L2VP9Picture(dec_surface);
+}
+
+bool V4L2VP9Accelerator::SubmitDecode(
+ const scoped_refptr<VP9Picture>& pic,
+ const Vp9SegmentationParams& segm_params,
+ const Vp9LoopFilterParams& lf_params,
+ const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
+ const base::Closure& done_cb) {
+ const Vp9FrameHeader* frame_hdr = pic->frame_hdr.get();
+ DCHECK(frame_hdr);
+
+ struct v4l2_ctrl_vp9_frame_hdr v4l2_frame_hdr;
+ memset(&v4l2_frame_hdr, 0, sizeof(v4l2_frame_hdr));
+
+#define FHDR_TO_V4L2_FHDR(a) v4l2_frame_hdr.a = frame_hdr->a
+ FHDR_TO_V4L2_FHDR(profile);
+ FHDR_TO_V4L2_FHDR(frame_type);
+
+ FHDR_TO_V4L2_FHDR(bit_depth);
+ FHDR_TO_V4L2_FHDR(color_range);
+ FHDR_TO_V4L2_FHDR(subsampling_x);
+ FHDR_TO_V4L2_FHDR(subsampling_y);
+
+ FHDR_TO_V4L2_FHDR(frame_width);
+ FHDR_TO_V4L2_FHDR(frame_height);
+ FHDR_TO_V4L2_FHDR(render_width);
+ FHDR_TO_V4L2_FHDR(render_height);
+
+ FHDR_TO_V4L2_FHDR(reset_frame_context);
+
+ FHDR_TO_V4L2_FHDR(interpolation_filter);
+ FHDR_TO_V4L2_FHDR(frame_context_idx);
+
+ FHDR_TO_V4L2_FHDR(tile_cols_log2);
+ FHDR_TO_V4L2_FHDR(tile_rows_log2);
+
+ FHDR_TO_V4L2_FHDR(header_size_in_bytes);
+#undef FHDR_TO_V4L2_FHDR
+ v4l2_frame_hdr.color_space = static_cast<uint8_t>(frame_hdr->color_space);
+
+ FillV4L2VP9QuantizationParams(frame_hdr->quant_params,
+ &v4l2_frame_hdr.quant_params);
+
+#define SET_V4L2_FRM_HDR_FLAG_IF(cond, flag) \
+ v4l2_frame_hdr.flags |= ((frame_hdr->cond) ? (flag) : 0)
+ SET_V4L2_FRM_HDR_FLAG_IF(show_frame, V4L2_VP9_FRAME_HDR_FLAG_SHOW_FRAME);
+ SET_V4L2_FRM_HDR_FLAG_IF(error_resilient_mode,
+ V4L2_VP9_FRAME_HDR_FLAG_ERR_RES);
+ SET_V4L2_FRM_HDR_FLAG_IF(intra_only, V4L2_VP9_FRAME_HDR_FLAG_FRAME_INTRA);
+ SET_V4L2_FRM_HDR_FLAG_IF(allow_high_precision_mv,
+ V4L2_VP9_FRAME_HDR_ALLOW_HIGH_PREC_MV);
+ SET_V4L2_FRM_HDR_FLAG_IF(refresh_frame_context,
+ V4L2_VP9_FRAME_HDR_REFRESH_FRAME_CTX);
+ SET_V4L2_FRM_HDR_FLAG_IF(frame_parallel_decoding_mode,
+ V4L2_VP9_FRAME_HDR_PARALLEL_DEC_MODE);
+#undef SET_V4L2_FRM_HDR_FLAG_IF
+
+ FillV4L2VP9LoopFilterParams(lf_params, &v4l2_frame_hdr.lf_params);
+ FillV4L2VP9SegmentationParams(segm_params, &v4l2_frame_hdr.sgmnt_params);
+
+ std::vector<struct v4l2_ext_control> ctrls;
+
+ struct v4l2_ext_control ctrl;
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR;
+ ctrl.size = sizeof(v4l2_frame_hdr);
+ ctrl.p_vp9_frame_hdr = &v4l2_frame_hdr;
+ ctrls.push_back(ctrl);
+
+ struct v4l2_ctrl_vp9_decode_param v4l2_decode_param;
+ memset(&v4l2_decode_param, 0, sizeof(v4l2_decode_param));
+ DCHECK_EQ(ref_pictures.size(), base::size(v4l2_decode_param.ref_frames));
+
+ std::vector<scoped_refptr<V4L2DecodeSurface>> ref_surfaces;
+ for (size_t i = 0; i < ref_pictures.size(); ++i) {
+ if (ref_pictures[i]) {
+ scoped_refptr<V4L2DecodeSurface> ref_surface =
+ VP9PictureToV4L2DecodeSurface(ref_pictures[i]);
+
+ v4l2_decode_param.ref_frames[i] = ref_surface->GetReferenceID();
+ ref_surfaces.push_back(ref_surface);
+ } else {
+ v4l2_decode_param.ref_frames[i] = VIDEO_MAX_FRAME;
+ }
+ }
+
+ static_assert(std::extent<decltype(v4l2_decode_param.active_ref_frames)>() ==
+ std::extent<decltype(frame_hdr->ref_frame_idx)>(),
+ "active reference frame array sizes mismatch");
+
+ for (size_t i = 0; i < base::size(frame_hdr->ref_frame_idx); ++i) {
+ uint8_t idx = frame_hdr->ref_frame_idx[i];
+ if (idx >= ref_pictures.size())
+ return false;
+
+ struct v4l2_vp9_reference_frame* v4l2_ref_frame =
+ &v4l2_decode_param.active_ref_frames[i];
+
+ scoped_refptr<VP9Picture> ref_pic = ref_pictures[idx];
+ if (ref_pic) {
+ scoped_refptr<V4L2DecodeSurface> ref_surface =
+ VP9PictureToV4L2DecodeSurface(ref_pic);
+ v4l2_ref_frame->buf_index = ref_surface->GetReferenceID();
+#define REF_TO_V4L2_REF(a) v4l2_ref_frame->a = ref_pic->frame_hdr->a
+ REF_TO_V4L2_REF(frame_width);
+ REF_TO_V4L2_REF(frame_height);
+ REF_TO_V4L2_REF(bit_depth);
+ REF_TO_V4L2_REF(subsampling_x);
+ REF_TO_V4L2_REF(subsampling_y);
+#undef REF_TO_V4L2_REF
+ } else {
+ v4l2_ref_frame->buf_index = VIDEO_MAX_FRAME;
+ }
+ }
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM;
+ ctrl.size = sizeof(v4l2_decode_param);
+ ctrl.p_vp9_decode_param = &v4l2_decode_param;
+ ctrls.push_back(ctrl);
+
+ // Defined outside of the if() clause below as it must remain valid until
+ // the call to SubmitExtControls().
+ struct v4l2_ctrl_vp9_entropy v4l2_entropy;
+ if (device_needs_frame_context_) {
+ memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
+ FillV4L2Vp9EntropyContext(frame_hdr->initial_frame_context,
+ &v4l2_entropy.initial_entropy_ctx);
+ FillV4L2Vp9EntropyContext(frame_hdr->frame_context,
+ &v4l2_entropy.current_entropy_ctx);
+ v4l2_entropy.tx_mode = frame_hdr->compressed_header.tx_mode;
+ v4l2_entropy.reference_mode = frame_hdr->compressed_header.reference_mode;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
+ ctrl.size = sizeof(v4l2_entropy);
+ ctrl.p_vp9_entropy = &v4l2_entropy;
+ ctrls.push_back(ctrl);
+ }
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ VP9PictureToV4L2DecodeSurface(pic);
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.count = ctrls.size();
+ ext_ctrls.controls = &ctrls[0];
+ dec_surface->PrepareSetCtrls(&ext_ctrls);
+ if (device_->Ioctl(VIDIOC_S_EXT_CTRLS, &ext_ctrls) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_S_EXT_CTRLS";
+ return false;
+ }
+
+ dec_surface->SetReferenceSurfaces(ref_surfaces);
+ dec_surface->SetDecodeDoneCallback(done_cb);
+
+ if (!surface_handler_->SubmitSlice(dec_surface, frame_hdr->data,
+ frame_hdr->frame_size))
+ return false;
+
+ DVLOGF(4) << "Submitting decode for surface: " << dec_surface->ToString();
+ surface_handler_->DecodeSurface(dec_surface);
+ return true;
+}
+
+bool V4L2VP9Accelerator::OutputPicture(const scoped_refptr<VP9Picture>& pic) {
+ // TODO(crbug.com/647725): Insert correct color space.
+ surface_handler_->SurfaceReady(VP9PictureToV4L2DecodeSurface(pic),
+ pic->bitstream_id(), pic->visible_rect(),
+ VideoColorSpace());
+ return true;
+}
+
+bool V4L2VP9Accelerator::GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ Vp9FrameContext* frame_ctx) {
+ struct v4l2_ctrl_vp9_entropy v4l2_entropy;
+ memset(&v4l2_entropy, 0, sizeof(v4l2_entropy));
+
+ struct v4l2_ext_control ctrl;
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_MPEG_VIDEO_VP9_ENTROPY;
+ ctrl.size = sizeof(v4l2_entropy);
+ ctrl.p_vp9_entropy = &v4l2_entropy;
+
+ scoped_refptr<V4L2DecodeSurface> dec_surface =
+ VP9PictureToV4L2DecodeSurface(pic);
+
+ struct v4l2_ext_controls ext_ctrls;
+ memset(&ext_ctrls, 0, sizeof(ext_ctrls));
+ ext_ctrls.count = 1;
+ ext_ctrls.controls = &ctrl;
+ dec_surface->PrepareSetCtrls(&ext_ctrls);
+ if (device_->Ioctl(VIDIOC_G_EXT_CTRLS, &ext_ctrls) != 0) {
+ VPLOGF(1) << "ioctl() failed: VIDIOC_G_EXT_CTRLS";
+ return false;
+ }
+
+ FillVp9FrameContext(v4l2_entropy.current_entropy_ctx, frame_ctx);
+ return true;
+}
+
+bool V4L2VP9Accelerator::IsFrameContextRequired() const {
+ return device_needs_frame_context_;
+}
+
+scoped_refptr<V4L2DecodeSurface>
+V4L2VP9Accelerator::VP9PictureToV4L2DecodeSurface(
+ const scoped_refptr<VP9Picture>& pic) {
+ V4L2VP9Picture* v4l2_pic = pic->AsV4L2VP9Picture();
+ CHECK(v4l2_pic);
+ return v4l2_pic->dec_surface();
+}
+
+} // namespace media
diff --git a/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h
new file mode 100644
index 00000000000..b2b52f462a7
--- /dev/null
+++ b/chromium/media/gpu/v4l2/v4l2_vp9_accelerator.h
@@ -0,0 +1,58 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_H_
+#define MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_H_
+
+#include <vector>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "media/filters/vp9_parser.h"
+#include "media/gpu/vp9_decoder.h"
+
+namespace media {
+
+class V4L2DecodeSurface;
+class V4L2DecodeSurfaceHandler;
+class V4L2Device;
+
+class V4L2VP9Accelerator : public VP9Decoder::VP9Accelerator {
+ public:
+ explicit V4L2VP9Accelerator(V4L2DecodeSurfaceHandler* surface_handler,
+ V4L2Device* device);
+ ~V4L2VP9Accelerator() override;
+
+ // VP9Decoder::VP9Accelerator implementation.
+ scoped_refptr<VP9Picture> CreateVP9Picture() override;
+
+ bool SubmitDecode(const scoped_refptr<VP9Picture>& pic,
+ const Vp9SegmentationParams& segm_params,
+ const Vp9LoopFilterParams& lf_params,
+ const std::vector<scoped_refptr<VP9Picture>>& ref_pictures,
+ const base::Closure& done_cb) override;
+
+ bool OutputPicture(const scoped_refptr<VP9Picture>& pic) override;
+
+ bool GetFrameContext(const scoped_refptr<VP9Picture>& pic,
+ Vp9FrameContext* frame_ctx) override;
+
+ bool IsFrameContextRequired() const override;
+
+ private:
+ scoped_refptr<V4L2DecodeSurface> VP9PictureToV4L2DecodeSurface(
+ const scoped_refptr<VP9Picture>& pic);
+
+ bool device_needs_frame_context_;
+
+ V4L2DecodeSurfaceHandler* const surface_handler_;
+ V4L2Device* const device_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2VP9Accelerator);
+};
+
+} // namespace media
+
+#endif // MEDIA_GPU_V4L2_V4L2_VP9_ACCELERATOR_H_
diff --git a/chromium/media/gpu/vaapi/BUILD.gn b/chromium/media/gpu/vaapi/BUILD.gn
index df1051ebdd9..cf471b7feb8 100644
--- a/chromium/media/gpu/vaapi/BUILD.gn
+++ b/chromium/media/gpu/vaapi/BUILD.gn
@@ -5,49 +5,25 @@
import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//media/gpu/args.gni")
+import("//tools/generate_stubs/rules.gni")
import("//ui/gl/features.gni")
import("//ui/ozone/ozone.gni")
assert(use_vaapi)
-action("libva_generate_stubs") {
+generate_stubs("libva_stubs") {
extra_header = "va_stub_header.fragment"
-
- script = "//tools/generate_stubs/generate_stubs.py"
- sources = [
- "va.sigs",
- ]
- inputs = [
- extra_header,
- ]
+ sigs = [ "va.sigs" ]
if (use_x11) {
- sources += [ "va_x11.sigs" ]
+ sigs += [ "va_x11.sigs" ]
}
if (is_linux) {
- sources += [ "va_drm.sigs" ]
+ sigs += [ "va_drm.sigs" ]
}
- stubs_filename_root = "va_stubs"
-
- outputs = [
- "$target_gen_dir/$stubs_filename_root.cc",
- "$target_gen_dir/$stubs_filename_root.h",
- ]
- args = [
- "-i",
- rebase_path("$target_gen_dir", root_build_dir),
- "-o",
- rebase_path("$target_gen_dir", root_build_dir),
- "-t",
- "posix_stubs",
- "-e",
- rebase_path(extra_header, root_build_dir),
- "-s",
- stubs_filename_root,
- "-p",
- "media/gpu/vaapi",
+ output_name = "va_stubs"
+ deps = [
+ "//base",
]
-
- args += rebase_path(sources, root_build_dir)
}
source_set("vaapi") {
@@ -88,18 +64,21 @@ source_set("vaapi") {
"vp8_encoder.cc",
"vp8_encoder.h",
]
- sources += get_target_outputs(":libva_generate_stubs")
configs += [ "//third_party/libyuv:libyuv_config" ]
deps = [
- ":libva_generate_stubs",
+ ":libva_stubs",
"//gpu/ipc/service",
"//media",
"//media/gpu:common",
"//third_party/libyuv",
]
+ if (is_linux) {
+ configs += [ "//build/config/linux/libva" ]
+ }
+
if (use_x11) {
configs += [ "//build/config/linux:x11" ]
deps += [ "//ui/gfx/x" ]
diff --git a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
index 7702731aa08..2775fdde266 100644
--- a/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_h264_accelerator.cc
@@ -6,6 +6,7 @@
#include <va/va.h>
+#include "base/stl_util.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/h264_dpb.h"
#include "media/gpu/macros.h"
@@ -138,7 +139,7 @@ Status VaapiH264Accelerator::SubmitFrameMetadata(
// And fill it with picture info from DPB.
FillVARefFramesFromDPB(dpb, pic_param.ReferenceFrames,
- arraysize(pic_param.ReferenceFrames));
+ base::size(pic_param.ReferenceFrames));
pic_param.num_ref_frames = sps->max_num_ref_frames;
@@ -255,23 +256,23 @@ Status VaapiH264Accelerator::SubmitSlice(
}
}
- static_assert(
- arraysize(slice_param.RefPicList0) == arraysize(slice_param.RefPicList1),
- "Invalid RefPicList sizes");
+ static_assert(base::size(slice_param.RefPicList0) ==
+ base::size(slice_param.RefPicList1),
+ "Invalid RefPicList sizes");
- for (size_t i = 0; i < arraysize(slice_param.RefPicList0); ++i) {
+ for (size_t i = 0; i < base::size(slice_param.RefPicList0); ++i) {
InitVAPicture(&slice_param.RefPicList0[i]);
InitVAPicture(&slice_param.RefPicList1[i]);
}
for (size_t i = 0;
- i < ref_pic_list0.size() && i < arraysize(slice_param.RefPicList0);
+ i < ref_pic_list0.size() && i < base::size(slice_param.RefPicList0);
++i) {
if (ref_pic_list0[i])
FillVAPicture(&slice_param.RefPicList0[i], ref_pic_list0[i]);
}
for (size_t i = 0;
- i < ref_pic_list1.size() && i < arraysize(slice_param.RefPicList1);
+ i < ref_pic_list1.size() && i < base::size(slice_param.RefPicList1);
++i) {
if (ref_pic_list1[i])
FillVAPicture(&slice_param.RefPicList1[i], ref_pic_list1[i]);
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator.cc
index e12f045c7eb..ed242eedbb2 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator.cc
@@ -180,7 +180,7 @@ static void FillIQMatrix(const JpegQuantizationTable* q_table,
VAIQMatrixBufferJPEGBaseline* iq_matrix) {
memset(iq_matrix, 0, sizeof(*iq_matrix));
static_assert(kJpegMaxQuantizationTableNum ==
- base::size(decltype(iq_matrix->load_quantiser_table){}),
+ std::extent<decltype(iq_matrix->load_quantiser_table)>(),
"max number of quantization table mismatched");
static_assert(
sizeof(iq_matrix->quantiser_table[0]) == sizeof(q_table[0].value),
@@ -212,7 +212,7 @@ static void FillHuffmanTable(const JpegHuffmanTable* dc_table,
}
static_assert(kJpegMaxHuffmanTableNumBaseline ==
- base::size(decltype(huffman_table->load_huffman_table){}),
+ std::extent<decltype(huffman_table->load_huffman_table)>(),
"max number of huffman table mismatched");
static_assert(sizeof(huffman_table->huffman_table[0].num_dc_codes) ==
sizeof(dc_table[0].code_length),
@@ -414,6 +414,7 @@ bool VaapiJpegDecodeAccelerator::OutputPicture(
default:
VLOGF(1) << "Can't convert image to I420: unsupported format 0x"
<< std::hex << va_image_format.fourcc;
+ return false;
}
task_runner_->PostTask(
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_unittest.cc
index a65b7a70b7b..e29ee63121f 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_decode_accelerator_unittest.cc
@@ -99,8 +99,14 @@ class VaapiJpegDecodeAcceleratorTest : public ::testing::Test {
const JpegParseResult& parse_result,
VASurfaceID va_surface) const;
- base::Lock* GetVaapiWrapperLock() const { return wrapper_->va_lock_; }
- VADisplay GetVaapiWrapperVaDisplay() const { return wrapper_->va_display_; }
+ base::Lock* GetVaapiWrapperLock() const LOCK_RETURNED(wrapper_->va_lock_) {
+ return wrapper_->va_lock_;
+ }
+
+ VADisplay GetVaapiWrapperVaDisplay() const
+ EXCLUSIVE_LOCKS_REQUIRED(wrapper_->va_lock_) {
+ return wrapper_->va_display_;
+ }
protected:
scoped_refptr<VaapiWrapper> wrapper_;
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
index 7b1794fe56c..6153aed322e 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encode_accelerator.cc
@@ -143,12 +143,12 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
size_t max_coded_buffer_size =
VaapiJpegEncoder::GetMaxCodedBufferSize(input_size);
if (max_coded_buffer_size > cached_output_buffer_size_) {
- vaapi_wrapper_->DestroyCodedBuffers();
+ vaapi_wrapper_->DestroyVABuffers();
cached_output_buffer_size_ = 0;
VABufferID output_buffer_id;
- if (!vaapi_wrapper_->CreateCodedBuffer(max_coded_buffer_size,
- &output_buffer_id)) {
+ if (!vaapi_wrapper_->CreateVABuffer(max_coded_buffer_size,
+ &output_buffer_id)) {
VLOGF(1) << "Failed to create VA buffer for encoding output";
notify_error_cb_.Run(buffer_id, PLATFORM_FAILURE);
return;
@@ -179,10 +179,10 @@ void VaapiJpegEncodeAccelerator::Encoder::EncodeTask(
return;
}
- // Get the encoded output. DownloadFromCodedBuffer() is a blocking call. It
+ // Get the encoded output. DownloadFromVABuffer() is a blocking call. It
// would wait until encoding is finished.
size_t encoded_size = 0;
- if (!vaapi_wrapper_->DownloadFromCodedBuffer(
+ if (!vaapi_wrapper_->DownloadFromVABuffer(
cached_output_buffer_id_, va_surface_id_,
static_cast<uint8_t*>(request->output_shm->memory()),
request->output_shm->size(), &encoded_size)) {
diff --git a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
index 63b635e223e..e4bd4f1738f 100644
--- a/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
+++ b/chromium/media/gpu/vaapi/vaapi_jpeg_encoder.cc
@@ -4,13 +4,15 @@
#include "media/gpu/vaapi/vaapi_jpeg_encoder.h"
+#include <array>
+#include <type_traits>
+
#include <stddef.h>
#include <string.h>
-#include <array>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/safe_conversions.h"
+#include "base/stl_util.h"
#include "media/filters/jpeg_parser.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
@@ -50,37 +52,37 @@ void FillQMatrix(VAQMatrixBufferJPEG* q_matrix) {
// responsible for scaling the quantization tables based on picture
// parameter quality.
const JpegQuantizationTable& luminance = kDefaultQuantTable[0];
- static_assert(
- arraysize(luminance.value) == arraysize(q_matrix->lum_quantiser_matrix),
- "Luminance quantization table size mismatch.");
- static_assert(arraysize(kZigZag8x8) == arraysize(luminance.value),
+ static_assert(std::extent<decltype(luminance.value)>() ==
+ std::extent<decltype(q_matrix->lum_quantiser_matrix)>(),
+ "Luminance quantization table size mismatch.");
+ static_assert(base::size(kZigZag8x8) == base::size(luminance.value),
"Luminance quantization table size mismatch.");
q_matrix->load_lum_quantiser_matrix = 1;
- for (size_t i = 0; i < arraysize(kZigZag8x8); i++) {
+ for (size_t i = 0; i < base::size(kZigZag8x8); i++) {
q_matrix->lum_quantiser_matrix[i] = luminance.value[kZigZag8x8[i]];
}
const JpegQuantizationTable& chrominance = kDefaultQuantTable[1];
- static_assert(arraysize(chrominance.value) ==
- arraysize(q_matrix->chroma_quantiser_matrix),
+ static_assert(std::extent<decltype(chrominance.value)>() ==
+ std::extent<decltype(q_matrix->chroma_quantiser_matrix)>(),
"Chrominance quantization table size mismatch.");
- static_assert(arraysize(kZigZag8x8) == arraysize(chrominance.value),
+ static_assert(base::size(kZigZag8x8) == base::size(chrominance.value),
"Chrominance quantization table size mismatch.");
q_matrix->load_chroma_quantiser_matrix = 1;
- for (size_t i = 0; i < arraysize(kZigZag8x8); i++) {
+ for (size_t i = 0; i < base::size(kZigZag8x8); i++) {
q_matrix->chroma_quantiser_matrix[i] = chrominance.value[kZigZag8x8[i]];
}
}
void FillHuffmanTableParameters(
VAHuffmanTableBufferJPEGBaseline* huff_table_param) {
- static_assert(arraysize(kDefaultDcTable) == arraysize(kDefaultAcTable),
+ static_assert(base::size(kDefaultDcTable) == base::size(kDefaultAcTable),
"DC table and AC table size mismatch.");
- static_assert(
- arraysize(kDefaultDcTable) == arraysize(huff_table_param->huffman_table),
- "DC table and destination table size mismatch.");
+ static_assert(base::size(kDefaultDcTable) ==
+ std::extent<decltype(huff_table_param->huffman_table)>(),
+ "DC table and destination table size mismatch.");
- for (size_t i = 0; i < arraysize(kDefaultDcTable); ++i) {
+ for (size_t i = 0; i < base::size(kDefaultDcTable); ++i) {
const JpegHuffmanTable& dcTable = kDefaultDcTable[i];
const JpegHuffmanTable& acTable = kDefaultAcTable[i];
huff_table_param->load_huffman_table[i] = true;
@@ -92,9 +94,10 @@ void FillHuffmanTableParameters(
// so it has different size than
// |huff_table_param->huffman_table[i].dc_values|. Therefore we can't use
// SafeArrayMemcpy() here.
- static_assert(arraysize(huff_table_param->huffman_table[i].dc_values) <=
- arraysize(dcTable.code_value),
- "DC table code value array too small.");
+ static_assert(
+ std::extent<decltype(huff_table_param->huffman_table[i].dc_values)>() <=
+ std::extent<decltype(dcTable.code_value)>(),
+ "DC table code value array too small.");
memcpy(huff_table_param->huffman_table[i].dc_values, &dcTable.code_value[0],
sizeof(huff_table_param->huffman_table[i].dc_values));
diff --git a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.cc b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.cc
index 1e52ecd3f73..82bf1967906 100644
--- a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.cc
+++ b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.cc
@@ -50,28 +50,6 @@ VASurfaceID VaapiPictureNativePixmap::va_surface_id() const {
return va_surface_->id();
}
-unsigned VaapiPictureNativePixmap::BufferFormatToInternalFormat(
- gfx::BufferFormat format) const {
- switch (format) {
- case gfx::BufferFormat::BGRX_8888:
- case gfx::BufferFormat::RGBX_8888:
- return GL_RGB;
-
- case gfx::BufferFormat::BGRA_8888:
- return GL_BGRA_EXT;
-
- case gfx::BufferFormat::YVU_420:
- return GL_RGB_YCRCB_420_CHROMIUM;
-
- case gfx::BufferFormat::YUV_420_BIPLANAR:
- return GL_RGB_YCBCR_420V_CHROMIUM;
-
- default:
- NOTREACHED() << gfx::BufferFormatToString(format);
- return GL_BGRA_EXT;
- }
-}
-
// static
gfx::GpuMemoryBufferHandle
VaapiPictureNativePixmap::CreateGpuMemoryBufferHandleFromVideoFrame(
diff --git a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.h b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.h
index ff6cf0bb911..8d70ec8a5ff 100644
--- a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.h
+++ b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap.h
@@ -49,8 +49,6 @@ class VaapiPictureNativePixmap : public VaapiPicture {
bool AllowOverlay() const override;
VASurfaceID va_surface_id() const override;
- unsigned BufferFormatToInternalFormat(gfx::BufferFormat format) const;
-
protected:
// Ozone buffer, the storage of the EGLImage and the VASurface.
scoped_refptr<gfx::NativePixmap> pixmap_;
diff --git a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_egl.cc b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_egl.cc
index 312d998f2c1..598a0647961 100644
--- a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_egl.cc
+++ b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_egl.cc
@@ -5,6 +5,7 @@
#include "media/gpu/vaapi/vaapi_picture_native_pixmap_egl.h"
#include "base/file_descriptor_posix.h"
+#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
@@ -76,9 +77,7 @@ bool VaapiPictureNativePixmapEgl::Allocate(gfx::BufferFormat format) {
if (make_context_current_cb_ && !make_context_current_cb_.Run())
return false;
- scoped_refptr<gl::GLImageNativePixmap> image(
- new gl::GLImageNativePixmap(size_, BufferFormatToInternalFormat(format)));
-
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size_, format);
// Create an EGLImage from a gl texture
if (!image->InitializeFromTexture(texture_id_)) {
DLOG(ERROR) << "Failed to initialize eglimage from texture id: "
diff --git a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
index 9cd691f5f92..ba4b18c0097 100644
--- a/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
+++ b/chromium/media/gpu/vaapi/vaapi_picture_native_pixmap_ozone.cc
@@ -71,9 +71,8 @@ bool VaapiPictureNativePixmapOzone::Initialize() {
const gfx::BufferFormat format = pixmap_->GetBufferFormat();
- scoped_refptr<gl::GLImageNativePixmap> image(
- new gl::GLImageNativePixmap(size_, BufferFormatToInternalFormat(format)));
- if (!image->Initialize(pixmap_.get(), format)) {
+ auto image = base::MakeRefCounted<gl::GLImageNativePixmap>(size_, format);
+ if (!image->Initialize(pixmap_.get())) {
LOG(ERROR) << "Failed to create GLImage";
return false;
}
diff --git a/chromium/media/gpu/vaapi/vaapi_utils.cc b/chromium/media/gpu/vaapi/vaapi_utils.cc
index ff4a78617c3..1bd831b886d 100644
--- a/chromium/media/gpu/vaapi/vaapi_utils.cc
+++ b/chromium/media/gpu/vaapi/vaapi_utils.cc
@@ -4,6 +4,8 @@
#include "media/gpu/vaapi/vaapi_utils.h"
+#include <type_traits>
+
#include <va/va.h>
#include "base/logging.h"
@@ -200,8 +202,8 @@ bool FillVP8DataStructuresAndPassToVaapiWrapper(
CheckedMemcpy(pic_param.mb_segment_tree_probs, sgmnt_hdr.segment_prob);
- static_assert(base::size(decltype(sgmnt_hdr.lf_update_value){}) ==
- base::size(decltype(pic_param.loop_filter_level){}),
+ static_assert(std::extent<decltype(sgmnt_hdr.lf_update_value)>() ==
+ std::extent<decltype(pic_param.loop_filter_level)>(),
"loop filter level arrays mismatch");
for (size_t i = 0; i < base::size(sgmnt_hdr.lf_update_value); ++i) {
int lf_level = lf_hdr.level;
@@ -220,14 +222,14 @@ bool FillVP8DataStructuresAndPassToVaapiWrapper(
}
static_assert(
- base::size(decltype(lf_hdr.ref_frame_delta){}) ==
- base::size(decltype(pic_param.loop_filter_deltas_ref_frame){}),
+ std::extent<decltype(lf_hdr.ref_frame_delta)>() ==
+ std::extent<decltype(pic_param.loop_filter_deltas_ref_frame)>(),
"loop filter deltas arrays size mismatch");
- static_assert(base::size(decltype(lf_hdr.mb_mode_delta){}) ==
- base::size(decltype(pic_param.loop_filter_deltas_mode){}),
+ static_assert(std::extent<decltype(lf_hdr.mb_mode_delta)>() ==
+ std::extent<decltype(pic_param.loop_filter_deltas_mode)>(),
"loop filter deltas arrays size mismatch");
- static_assert(base::size(decltype(lf_hdr.ref_frame_delta){}) ==
- base::size(decltype(lf_hdr.mb_mode_delta){}),
+ static_assert(std::extent<decltype(lf_hdr.ref_frame_delta)>() ==
+ std::extent<decltype(lf_hdr.mb_mode_delta)>(),
"loop filter deltas arrays size mismatch");
for (size_t i = 0; i < base::size(lf_hdr.ref_frame_delta); ++i) {
pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i];
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
index d4f7c07d81d..9d7842d6b9d 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.cc
@@ -16,10 +16,13 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
+#include "base/numerics/ranges.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/bind_to_current_loop.h"
@@ -45,6 +48,7 @@ namespace {
// UMA errors that the VaapiVideoDecodeAccelerator class reports.
enum VAVDADecoderFailure {
VAAPI_ERROR = 0,
+ VAAPI_VPP_ERROR = 1,
VAVDA_DECODER_FAILURES_MAX,
};
@@ -70,8 +74,8 @@ void CloseGpuMemoryBufferHandle(const gfx::GpuMemoryBufferHandle& handle) {
}
#endif
-// Returns true if the CPU is an Intel Kaby Lake or later.
-// cpu platform id's are referenced from the following file in kernel source
+// Returns true if the CPU is an Intel Kaby/Gemini/Sky Lake or later.
+// Cpu platform id's are referenced from the following file in kernel source
// arch/x86/include/asm/intel-family.h
bool IsKabyLakeOrLater() {
constexpr int kPentiumAndLaterFamily = 0x06;
@@ -82,7 +86,6 @@ bool IsKabyLakeOrLater() {
cpuid.model() >= kFirstKabyLakeModelId;
return is_kaby_lake_or_later;
}
-
bool IsGeminiLakeOrLater() {
constexpr int kPentiumAndLaterFamily = 0x06;
constexpr int kGeminiLakeModelId = 0x7A;
@@ -92,6 +95,33 @@ bool IsGeminiLakeOrLater() {
cpuid.model() >= kGeminiLakeModelId;
return is_geminilake_or_later;
}
+bool IsSkyLakeOrLater() {
+ constexpr int kPentiumAndLaterFamily = 0x06;
+ constexpr int kFirstSkyLakeModelId = 0x4E;
+ static base::CPU cpuid;
+ static bool is_sky_lake_or_later = cpuid.family() == kPentiumAndLaterFamily &&
+ cpuid.model() >= kFirstSkyLakeModelId;
+ return is_sky_lake_or_later;
+}
+
+// Decides if the current platform and |profile| may decode using the client's
+// PictureBuffers, or engage the Vpp to adapt VaApi's and the client's format.
+bool ShouldDecodeOnclientPictureBuffers(
+ VideoDecodeAccelerator::Config::OutputMode output_mode,
+ VideoCodecProfile profile) {
+ return output_mode == VideoDecodeAccelerator::Config::OutputMode::ALLOCATE &&
+ (IsKabyLakeOrLater() || IsGeminiLakeOrLater()) &&
+ profile == VP9PROFILE_PROFILE0;
+}
+
+// Decides if the current platform and |output_mode| may used a reduced number
+// of buffer allocations. See https://crbug.com/920510 for more information.
+bool ShouldUseReducedNumberOfAllocations(
+ VideoDecodeAccelerator::Config::OutputMode output_mode) {
+ return output_mode == VideoDecodeAccelerator::Config::OutputMode::ALLOCATE &&
+ IsSkyLakeOrLater();
+}
+
} // namespace
#define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
@@ -160,12 +190,14 @@ VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
vaapi_picture_factory_(new VaapiPictureFactory()),
surfaces_available_(&lock_),
decode_using_client_picture_buffers_(false),
+ use_reduced_number_of_allocations_(false),
task_runner_(base::ThreadTaskRunnerHandle::Get()),
decoder_thread_("VaapiDecoderThread"),
- num_frames_at_client_(0),
finish_flush_pending_(false),
awaiting_va_surfaces_recycle_(false),
requested_num_pics_(0),
+ requested_num_reference_frames_(0),
+ previously_requested_num_reference_frames_(0),
profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
make_context_current_cb_(make_context_current_cb),
bind_image_cb_(bind_image_cb),
@@ -173,10 +205,15 @@ VaapiVideoDecodeAccelerator::VaapiVideoDecodeAccelerator(
weak_this_ = weak_this_factory_.GetWeakPtr();
va_surface_release_cb_ = BindToCurrentLoop(
base::Bind(&VaapiVideoDecodeAccelerator::RecycleVASurfaceID, weak_this_));
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "media::VaapiVideoDecodeAccelerator",
+ base::ThreadTaskRunnerHandle::Get());
}
VaapiVideoDecodeAccelerator::~VaapiVideoDecodeAccelerator() {
DCHECK(task_runner_->BelongsToCurrentThread());
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ this);
}
bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
@@ -223,13 +260,19 @@ bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
VLOGF(1) << "Unsupported profile " << GetProfileName(profile);
return false;
}
- profile_ = profile;
CHECK(decoder_thread_.Start());
decoder_thread_task_runner_ = decoder_thread_.task_runner();
state_ = kIdle;
+ profile_ = profile;
output_mode_ = config.output_mode;
+ decode_using_client_picture_buffers_ =
+ ShouldDecodeOnclientPictureBuffers(output_mode_, profile_);
+ use_reduced_number_of_allocations_ =
+ !decode_using_client_picture_buffers_ &&
+ ShouldUseReducedNumberOfAllocations(output_mode_);
+ previously_requested_num_reference_frames_ = 0;
return true;
}
@@ -273,17 +316,24 @@ void VaapiVideoDecodeAccelerator::OutputPicture(
"Failed putting surface into pixmap",
PLATFORM_FAILURE, );
}
- // Notify the client a picture is ready to be displayed.
- ++num_frames_at_client_;
- TRACE_COUNTER1("media,gpu", "Vaapi frames at client", num_frames_at_client_);
+
+ {
+ base::AutoLock auto_lock(lock_);
+ TRACE_COUNTER_ID2("media,gpu", "Vaapi frames at client", this, "used",
+ pictures_.size() - available_picture_buffers_.size(),
+ "available", available_picture_buffers_.size());
+ }
+
DVLOGF(4) << "Notifying output picture id " << output_id << " for input "
<< input_id
<< " is ready. visible rect: " << visible_rect.ToString();
- if (client_) {
- client_->PictureReady(Picture(output_id, input_id, visible_rect,
- picture_color_space.ToGfxColorSpace(),
- picture->AllowOverlay()));
- }
+ if (!client_)
+ return;
+
+ // Notify the |client_| a picture is ready to be consumed.
+ client_->PictureReady(Picture(output_id, input_id, visible_rect,
+ picture_color_space.ToGfxColorSpace(),
+ picture->AllowOverlay()));
}
void VaapiVideoDecodeAccelerator::TryOutputPicture() {
@@ -293,8 +343,11 @@ void VaapiVideoDecodeAccelerator::TryOutputPicture() {
if (!client_)
return;
- if (pending_output_cbs_.empty() || available_picture_buffers_.empty())
- return;
+ {
+ base::AutoLock auto_lock(lock_);
+ if (pending_output_cbs_.empty() || available_picture_buffers_.empty())
+ return;
+ }
auto output_cb = std::move(pending_output_cbs_.front());
pending_output_cbs_.pop();
@@ -448,7 +501,8 @@ void VaapiVideoDecodeAccelerator::DecodeTask() {
FROM_HERE,
base::Bind(&VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange,
weak_this_, decoder_->GetRequiredNumOfPictures(),
- decoder_->GetPicSize()));
+ decoder_->GetPicSize(),
+ decoder_->GetNumReferenceFrames()));
// We'll get rescheduled once ProvidePictureBuffers() finishes.
return;
@@ -485,23 +539,37 @@ void VaapiVideoDecodeAccelerator::DecodeTask() {
}
}
-void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(size_t num_pics,
- gfx::Size size) {
+void VaapiVideoDecodeAccelerator::InitiateSurfaceSetChange(
+ size_t num_pics,
+ gfx::Size size,
+ size_t num_reference_frames) {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!awaiting_va_surfaces_recycle_);
+ DCHECK_GT(num_pics, num_reference_frames);
// At this point decoder has stopped running and has already posted onto our
// loop any remaining output request callbacks, which executed before we got
- // here. Some of them might have been pended though, because we might not
- // have had enough TFPictures to output surfaces to. Initiate a wait cycle,
+ // here. Some of them might have been pended though, because we might not have
+ // had enough PictureBuffers to output surfaces to. Initiate a wait cycle,
// which will wait for client to return enough PictureBuffers to us, so that
// we can finish all pending output callbacks, releasing associated surfaces.
- VLOGF(2) << "Initiating surface set change";
awaiting_va_surfaces_recycle_ = true;
- requested_num_pics_ = num_pics;
requested_pic_size_ = size;
+ // If we can |use_reduced_number_of_allocations_|, split the requested
+ // |num_pics| between VA reference frames and client PictureBuffers proper.
+ if (use_reduced_number_of_allocations_)
+ requested_num_reference_frames_ = num_reference_frames;
+ else
+ requested_num_reference_frames_ = 0;
+
+ requested_num_pics_ = num_pics - requested_num_reference_frames_;
+
+ VLOGF(2) << " |requested_num_pics_| = " << requested_num_pics_
+ << "; |requested_num_reference_frames_| = "
+ << requested_num_reference_frames_;
+
TryFinishSurfaceSetChange();
}
@@ -511,14 +579,20 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
if (!awaiting_va_surfaces_recycle_)
return;
+ base::AutoLock auto_lock(lock_);
+ const size_t expected_max_available_va_surfaces =
+ use_reduced_number_of_allocations_
+ ? previously_requested_num_reference_frames_
+ : pictures_.size();
+
if (!pending_output_cbs_.empty() ||
- pictures_.size() != available_va_surfaces_.size()) {
- // Either:
- // 1. Not all pending pending output callbacks have been executed yet.
- // Wait for the client to return enough pictures and retry later.
- // 2. The above happened and all surface release callbacks have been posted
- // as the result, but not all have executed yet. Post ourselves after them
- // to let them release surfaces.
+ expected_max_available_va_surfaces != available_va_surfaces_.size()) {
+ // If we're here the stream resolution has changed; we need to wait until:
+ // - all |pending_output_cbs_| have been executed
+ // - all VASurfaces are back to |available_va_surfaces_|; we can't use
+ // |requested_num_reference_frames_| for comparison, since it might have
+ // changed in the previous call to InitiateSurfaceSetChange(), so we use
+ // |previously_requested_num_reference_frames_| instead.
DVLOGF(2) << "Awaiting pending output/surface release callbacks to finish";
task_runner_->PostTask(
FROM_HERE,
@@ -527,6 +601,8 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
return;
}
+ previously_requested_num_reference_frames_ = requested_num_reference_frames_;
+
// All surfaces released, destroy them and dismiss all PictureBuffers.
awaiting_va_surfaces_recycle_ = false;
available_va_surfaces_.clear();
@@ -543,13 +619,14 @@ void VaapiVideoDecodeAccelerator::TryFinishSurfaceSetChange() {
VLOGF(2) << "Requesting " << requested_num_pics_
<< " pictures of size: " << requested_pic_size_.ToString();
- VideoPixelFormat format = GfxBufferFormatToVideoPixelFormat(
+ const VideoPixelFormat format = GfxBufferFormatToVideoPixelFormat(
vaapi_picture_factory_->GetBufferFormat());
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&Client::ProvidePictureBuffers, client_,
requested_num_pics_, format, 1, requested_pic_size_,
vaapi_picture_factory_->GetGLTextureTarget()));
+ // |client_| may respond via AssignPictureBuffers().
}
void VaapiVideoDecodeAccelerator::Decode(
@@ -577,17 +654,6 @@ void VaapiVideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
QueueInputBuffer(std::move(buffer), bitstream_id);
}
-void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
- VASurfaceID va_surface_id) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- base::AutoLock auto_lock(lock_);
-
- available_va_surfaces_.push_back(va_surface_id);
- surfaces_available_.Signal();
-
- TryOutputPicture();
-}
-
void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
const std::vector<PictureBuffer>& buffers) {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -605,11 +671,29 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
const unsigned int va_format = GetVaFormatForVideoCodecProfile(profile_);
std::vector<VASurfaceID> va_surface_ids;
+ // If we can't |decode_using_client_picture_buffers_|, we have to allocate a
+ // |vpp_vaapi_wrapper_| for VaapiPicture to DownloadFromSurface() the VA's
+ // internal decoded frame.
+ if (!decode_using_client_picture_buffers_ && !vpp_vaapi_wrapper_) {
+ vpp_vaapi_wrapper_ = VaapiWrapper::Create(
+ VaapiWrapper::kVideoProcess, VAProfileNone,
+ base::BindRepeating(&ReportToUMA, VAAPI_VPP_ERROR));
+ if (!vpp_vaapi_wrapper_) {
+ VLOGF(1) << "Failed initializing VppVaapiWrapper";
+ NotifyError(PLATFORM_FAILURE);
+ }
+ }
+
for (size_t i = 0; i < buffers.size(); ++i) {
DCHECK(requested_pic_size_ == buffers[i].size());
- std::unique_ptr<VaapiPicture> picture(vaapi_picture_factory_->Create(
- vaapi_wrapper_, make_context_current_cb_, bind_image_cb_, buffers[i]));
+ // If |decode_using_client_picture_buffers_| is false, this |picture| is
+ // only used as a copy destination. Therefore, the VaapiWrapper used and
+ // owned by |picture| is |vpp_vaapi_wrapper_|.
+ std::unique_ptr<VaapiPicture> picture = vaapi_picture_factory_->Create(
+ decode_using_client_picture_buffers_ ? vaapi_wrapper_
+ : vpp_vaapi_wrapper_,
+ make_context_current_cb_, bind_image_cb_, buffers[i]);
RETURN_AND_NOTIFY_ON_FAILURE(picture, "Failed creating a VaapiPicture",
PLATFORM_FAILURE, );
@@ -630,28 +714,28 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
surfaces_available_.Signal();
}
- decode_using_client_picture_buffers_ =
- !va_surface_ids.empty() &&
- (IsKabyLakeOrLater() || IsGeminiLakeOrLater()) &&
- profile_ == VP9PROFILE_PROFILE0;
-
- // If we have some |va_surface_ids|, use them for decode, otherwise ask
- // |vaapi_wrapper_| to allocate them for us.
+ // If |decode_using_client_picture_buffers_|, we use |va_surface_ids| for
+ // decode, otherwise ask |vaapi_wrapper_| to allocate them for us.
if (decode_using_client_picture_buffers_) {
+ DCHECK(!va_surface_ids.empty());
RETURN_AND_NOTIFY_ON_FAILURE(
vaapi_wrapper_->CreateContext(va_format, requested_pic_size_),
"Failed creating VA Context", PLATFORM_FAILURE, );
+ DCHECK_EQ(va_surface_ids.size(), buffers.size());
} else {
+ const size_t requested_num_surfaces = use_reduced_number_of_allocations_
+ ? requested_num_reference_frames_
+ : pictures_.size();
+ CHECK_NE(requested_num_surfaces, 0u);
va_surface_ids.clear();
- RETURN_AND_NOTIFY_ON_FAILURE(
- vaapi_wrapper_->CreateContextAndSurfaces(
- va_format, requested_pic_size_, buffers.size(), &va_surface_ids),
- "Failed creating VA Surfaces", PLATFORM_FAILURE, );
+ RETURN_AND_NOTIFY_ON_FAILURE(vaapi_wrapper_->CreateContextAndSurfaces(
+ va_format, requested_pic_size_,
+ requested_num_surfaces, &va_surface_ids),
+ "Failed creating VA Surfaces",
+ PLATFORM_FAILURE, );
}
- DCHECK_EQ(va_surface_ids.size(), buffers.size());
- for (const auto id : va_surface_ids)
- available_va_surfaces_.push_back(id);
+ available_va_surfaces_.assign(va_surface_ids.begin(), va_surface_ids.end());
// Resume DecodeTask if it is still in decoding state.
if (state_ == kDecoding) {
@@ -676,27 +760,30 @@ void VaapiVideoDecodeAccelerator::ImportBufferForPicture(
return;
}
- if (!pictures_.count(picture_buffer_id)) {
- CloseGpuMemoryBufferHandle(gpu_memory_buffer_handle);
-
- // It's possible that we've already posted a DismissPictureBuffer for this
- // picture, but it has not yet executed when this ImportBufferForPicture
- // was posted to us by the client. In that case just ignore this (we've
- // already dismissed it and accounted for that).
- DVLOGF(3) << "got picture id=" << picture_buffer_id
- << " not in use (anymore?).";
- return;
- }
+ {
+ base::AutoLock auto_lock(lock_);
+ if (!pictures_.count(picture_buffer_id)) {
+ CloseGpuMemoryBufferHandle(gpu_memory_buffer_handle);
+
+ // It's possible that we've already posted a DismissPictureBuffer for this
+ // picture, but it has not yet executed when this ImportBufferForPicture
+ // was posted to us by the client. In that case just ignore this (we've
+ // already dismissed it and accounted for that).
+ DVLOGF(3) << "got picture id=" << picture_buffer_id
+ << " not in use (anymore?).";
+ return;
+ }
- VaapiPicture* picture = pictures_[picture_buffer_id].get();
- if (!picture->ImportGpuMemoryBufferHandle(
- VideoPixelFormatToGfxBufferFormat(pixel_format),
- gpu_memory_buffer_handle)) {
- // ImportGpuMemoryBufferHandle will close the handles even on failure, so
- // we don't need to do this ourselves.
- VLOGF(1) << "Failed to import GpuMemoryBufferHandle";
- NotifyError(PLATFORM_FAILURE);
- return;
+ VaapiPicture* picture = pictures_[picture_buffer_id].get();
+ if (!picture->ImportGpuMemoryBufferHandle(
+ VideoPixelFormatToGfxBufferFormat(pixel_format),
+ gpu_memory_buffer_handle)) {
+ // ImportGpuMemoryBufferHandle will close the handles even on failure, so
+ // we don't need to do this ourselves.
+ VLOGF(1) << "Failed to import GpuMemoryBufferHandle";
+ NotifyError(PLATFORM_FAILURE);
+ return;
+ }
}
ReusePictureBuffer(picture_buffer_id);
@@ -710,23 +797,25 @@ void VaapiVideoDecodeAccelerator::ReusePictureBuffer(
TRACE_EVENT1("media,gpu", "VAVDA::ReusePictureBuffer", "Picture id",
picture_buffer_id);
- if (!pictures_.count(picture_buffer_id)) {
- // It's possible that we've already posted a DismissPictureBuffer for this
- // picture, but it has not yet executed when this ReusePictureBuffer
- // was posted to us by the client. In that case just ignore this (we've
- // already dismissed it and accounted for that).
- DVLOGF(3) << "got picture id=" << picture_buffer_id
- << " not in use (anymore?).";
- return;
- }
-
- --num_frames_at_client_;
- TRACE_COUNTER1("media,gpu", "Vaapi frames at client", num_frames_at_client_);
-
{
base::AutoLock auto_lock(lock_);
+
+ if (!pictures_.count(picture_buffer_id)) {
+ // It's possible that we've already posted a DismissPictureBuffer for this
+ // picture, but it has not yet executed when this ReusePictureBuffer
+ // was posted to us by the client. In that case just ignore this (we've
+ // already dismissed it and accounted for that).
+ DVLOGF(3) << "got picture id=" << picture_buffer_id
+ << " not in use (anymore?).";
+ return;
+ }
+
available_picture_buffers_.push_back(picture_buffer_id);
+ TRACE_COUNTER_ID2("media,gpu", "Vaapi frames at client", this, "used",
+ pictures_.size() - available_picture_buffers_.size(),
+ "available", available_picture_buffers_.size());
}
+
TryOutputPicture();
}
@@ -938,10 +1027,10 @@ void VaapiVideoDecodeAccelerator::SurfaceReady(
if (state_ == kResetting || state_ == kDestroying)
return;
}
-
pending_output_cbs_.push(
- base::Bind(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
- dec_surface, bitstream_id, visible_rect, color_space));
+ base::BindOnce(&VaapiVideoDecodeAccelerator::OutputPicture, weak_this_,
+ dec_surface, bitstream_id, visible_rect, color_space));
+
TryOutputPicture();
}
@@ -956,6 +1045,14 @@ scoped_refptr<VASurface> VaapiVideoDecodeAccelerator::CreateSurface() {
if (!decode_using_client_picture_buffers_) {
const VASurfaceID id = available_va_surfaces_.front();
available_va_surfaces_.pop_front();
+
+ TRACE_COUNTER_ID2(
+ "media,gpu", "Vaapi VASurfaceIDs", this, "used",
+ (use_reduced_number_of_allocations_ ? requested_num_reference_frames_
+ : pictures_.size()) -
+ available_va_surfaces_.size(),
+ "available", available_va_surfaces_.size());
+
return new VASurface(id, requested_pic_size_,
vaapi_wrapper_->va_surface_format(),
va_surface_release_cb_);
@@ -981,6 +1078,64 @@ scoped_refptr<VASurface> VaapiVideoDecodeAccelerator::CreateSurface() {
return nullptr;
}
+void VaapiVideoDecodeAccelerator::RecycleVASurfaceID(
+ VASurfaceID va_surface_id) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ {
+ base::AutoLock auto_lock(lock_);
+ available_va_surfaces_.push_back(va_surface_id);
+ if (!decode_using_client_picture_buffers_) {
+ TRACE_COUNTER_ID2(
+ "media,gpu", "Vaapi VASurfaceIDs", this, "used",
+ (use_reduced_number_of_allocations_ ? requested_num_reference_frames_
+ : pictures_.size()) -
+ available_va_surfaces_.size(),
+ "available", available_va_surfaces_.size());
+ }
+ surfaces_available_.Signal();
+ }
+
+ TryOutputPicture();
+}
+bool VaapiVideoDecodeAccelerator::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ using base::trace_event::MemoryAllocatorDump;
+ base::AutoLock auto_lock(lock_);
+ if (decode_using_client_picture_buffers_ || !requested_num_reference_frames_)
+ return false;
+
+ auto dump_name = base::StringPrintf("gpu/vaapi/decoder/0x%" PRIxPTR,
+ reinterpret_cast<uintptr_t>(this));
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
+
+ constexpr float kNumBytesPerPixelYUV420 = 12.0 / 8;
+ constexpr float kNumBytesPerPixelYUV420_10bpp = 2 * kNumBytesPerPixelYUV420;
+ unsigned int va_surface_format = GetVaFormatForVideoCodecProfile(profile_);
+ DCHECK(va_surface_format == VA_RT_FORMAT_YUV420 ||
+ va_surface_format == VA_RT_FORMAT_YUV420_10BPP);
+ const float va_surface_bytes_per_pixel =
+ va_surface_format == VA_RT_FORMAT_YUV420 ? kNumBytesPerPixelYUV420
+ : kNumBytesPerPixelYUV420_10bpp;
+ // Report |requested_num_surfaces| and the associated memory size. The
+ // calculated size is an estimation since we don't know the internal VA
+ // strides, texture compression, headers, etc, but is a good lower boundary.
+ const size_t requested_num_surfaces = use_reduced_number_of_allocations_
+ ? requested_num_reference_frames_
+ : pictures_.size();
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(requested_num_surfaces *
+ requested_pic_size_.GetArea() *
+ va_surface_bytes_per_pixel));
+ dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(requested_num_surfaces));
+
+ return true;
+}
+
// static
VideoDecodeAccelerator::SupportedProfiles
VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
index 3e1e84b416a..10c9da07fd2 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator.h
@@ -25,7 +25,9 @@
#include "base/single_thread_task_runner.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
#include "base/threading/thread.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "media/base/bitstream_buffer.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
@@ -54,7 +56,8 @@ class VaapiPicture;
// can assume |*this| is still alive. See |weak_this_| below for more details.
class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
: public VideoDecodeAccelerator,
- public DecodeSurfaceHandler<VASurface> {
+ public DecodeSurfaceHandler<VASurface>,
+ public base::trace_event::MemoryDumpProvider {
public:
VaapiVideoDecodeAccelerator(
const MakeGLContextCurrentCallback& make_context_current_cb,
@@ -92,6 +95,10 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
const gfx::Rect& visible_rect,
const VideoColorSpace& color_space) override;
+ // base::trace_event::MemoryDumpProvider implementation.
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
private:
friend class VaapiVideoDecodeAcceleratorTest;
@@ -109,16 +116,16 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// |decoder_|. This method will sleep if no |input_buffers_| are available.
// Returns true if a new buffer has been set up, false if an early exit has
// been requested (due to initiated reset/flush/destroy).
- bool GetCurrInputBuffer_Locked();
+ bool GetCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Signals the client that |curr_input_buffer_| has been read and can be
// returned. Will also release the mapping.
- void ReturnCurrInputBuffer_Locked();
+ void ReturnCurrInputBuffer_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Waits for more surfaces to become available. Returns true once they do or
// false if an early exit has been requested (due to an initiated
// reset/flush/destroy).
- bool WaitForSurfaces_Locked();
+ bool WaitForSurfaces_Locked() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Continue decoding given input buffers and sleep waiting for input/output
// as needed. Will exit if a new set of surfaces or reset/flush/destroy
@@ -167,13 +174,16 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
void TryOutputPicture();
// Called when a VASurface is no longer in use by the decoder or is not being
- // synced/waiting to be synced to a picture. Returns it to available surfaces
- // pool.
+ // synced/waiting to be synced to a picture. Returns it to the
+ // |available_va_surfaces_|
void RecycleVASurfaceID(VASurfaceID va_surface_id);
- // Initiate wait cycle for surfaces to be released before we release them
- // and allocate new ones, as requested by the decoder.
- void InitiateSurfaceSetChange(size_t num_pics, gfx::Size size);
+ // Request a new set of |num_pics| PictureBuffers to be allocated by
+ // |client_|. Up to |num_reference_frames| out of |num_pics_| might be needed
+ // by |decoder_|.
+ void InitiateSurfaceSetChange(size_t num_pics,
+ gfx::Size size,
+ size_t num_reference_frames);
// Check if the surfaces have been released or post ourselves for later.
void TryFinishSurfaceSetChange();
@@ -192,39 +202,45 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
kDestroying,
};
- // |lock_| protects |input_buffers_|, |curr_input_buffer_|, |state_| and
- // |available_picture_buffers_|.
base::Lock lock_;
- State state_;
+ State state_ GUARDED_BY(lock_);
+ // Only used on |task_runner_|.
Config::OutputMode output_mode_;
// Queue of available InputBuffers.
- base::queue<std::unique_ptr<InputBuffer>> input_buffers_;
+ base::queue<std::unique_ptr<InputBuffer>> input_buffers_ GUARDED_BY(lock_);
// Signalled when input buffers are queued onto |input_buffers_| queue.
base::ConditionVariable input_ready_;
- // Current input buffer at decoder.
+ // Current input buffer at decoder. Only used on |decoder_thread_task_runner_|
std::unique_ptr<InputBuffer> curr_input_buffer_;
- // List of PictureBuffer ids available to be sent to |client_| via
- // OutputPicture() (|client_| returns them via ReusePictureBuffer()).
- std::list<int32_t> available_picture_buffers_;
-
+ // Only used on |task_runner_|.
std::unique_ptr<VaapiPictureFactory> vaapi_picture_factory_;
- // Constructed in Initialize() when the codec information is received.
+ // The following variables are constructed/initialized in Initialize() when
+ // the codec information is received. |vaapi_wrapper_| is thread safe.
scoped_refptr<VaapiWrapper> vaapi_wrapper_;
+ // Only used on |decoder_thread_task_runner_|.
std::unique_ptr<AcceleratedVideoDecoder> decoder_;
+ // VaapiWrapper for VPP (Video Post Processing). This is used for copying
+ // from a decoded surface to a surface bound to client's PictureBuffer.
+ scoped_refptr<VaapiWrapper> vpp_vaapi_wrapper_;
+
// All allocated VaapiPictures, regardless of their current state. Pictures
// are allocated at AssignPictureBuffers() and are kept until dtor or
// TryFinishSurfaceSetChange(). Comes after |vaapi_wrapper_| to ensure all
// pictures are destroyed before this is destroyed.
- base::small_map<std::map<int32_t, std::unique_ptr<VaapiPicture>>> pictures_;
+ base::small_map<std::map<int32_t, std::unique_ptr<VaapiPicture>>> pictures_
+ GUARDED_BY(lock_);
+ // List of PictureBuffer ids available to be sent to |client_| via
+ // OutputPicture() (|client_| returns them via ReusePictureBuffer()).
+ std::list<int32_t> available_picture_buffers_ GUARDED_BY(lock_);
// VASurfaceIDs no longer in use that can be passed back to |decoder_| for
// reuse, once it requests them.
- std::list<VASurfaceID> available_va_surfaces_;
+ std::list<VASurfaceID> available_va_surfaces_ GUARDED_BY(lock_);
// Signalled when output surfaces are queued into |available_va_surfaces_|.
base::ConditionVariable surfaces_available_;
@@ -236,14 +252,16 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// If we don't have any available |pictures_| at the time when the decoder
// requests output, we'll store the request in this queue for later and run it
// once the client gives us more textures via ReusePictureBuffer().
+ // Only used on |task_runner_|.
base::queue<base::OnceClosure> pending_output_cbs_;
+ // TODO(crbug.com/912295): Enable these two for IMPORT |output_mode_| as well.
// Under some circumstances, we can pass to libva our own VASurfaceIDs to
- // decode onto, which skips one copy.
+ // decode onto, which skips one copy. see https://crbug.com/822346.
bool decode_using_client_picture_buffers_;
-
- // ChildThread's task runner.
- const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ // When |decode_using_client_picture_buffers_| is false and under certain
+ // conditions, we can reduce the number of necessary allocated buffers.
+ bool use_reduced_number_of_allocations_;
// WeakPtr<> pointing to |this| for use in posting tasks from the decoder
// thread back to the ChildThread. Because the decoder thread is a member of
@@ -253,34 +271,39 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
// decoder thread to the ChildThread should use |weak_this_|.
base::WeakPtr<VaapiVideoDecodeAccelerator> weak_this_;
- // Callback used when creating VASurface objects.
+ // Callback used when creating VASurface objects. Only used on |task_runner_|.
VASurface::ReleaseCB va_surface_release_cb_;
- // To expose client callbacks from VideoDecodeAccelerator.
- // NOTE: all calls to these objects *MUST* be executed on task_runner_.
+ // To expose client callbacks from VideoDecodeAccelerator. Used only on
+ // |task_runner_|.
std::unique_ptr<base::WeakPtrFactory<Client>> client_ptr_factory_;
base::WeakPtr<Client> client_;
+ // ChildThread's task runner.
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
base::Thread decoder_thread_;
// Use this to post tasks to |decoder_thread_| instead of
// |decoder_thread_.task_runner()| because the latter will be NULL once
// |decoder_thread_.Stop()| returns.
scoped_refptr<base::SingleThreadTaskRunner> decoder_thread_task_runner_;
- int num_frames_at_client_;
-
- // Whether we are waiting for any pending_output_cbs_ to be run before
- // NotifyingFlushDone.
+ // Whether we are waiting for any |pending_output_cbs_| to be run before
+ // NotifyingFlushDone. Only used on |task_runner_|.
bool finish_flush_pending_;
// Decoder requested a new surface set and we are waiting for all the surfaces
- // to be returned before we can free them.
+ // to be returned before we can free them. Only used on |task_runner_|.
bool awaiting_va_surfaces_recycle_;
- // Last requested number/resolution of output picture buffers and their
- // format.
+ // Last requested number/resolution of output PictureBuffers.
size_t requested_num_pics_;
gfx::Size requested_pic_size_;
+ // Max number of reference frames needed by |decoder_|. Only used on
+ // |task_runner_| and when |use_reduced_number_of_allocations_| is true.
+ size_t requested_num_reference_frames_;
+ size_t previously_requested_num_reference_frames_;
+
VideoCodecProfile profile_;
// Callback to make GL context current.
diff --git a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
index 0a2311dd3e6..cdd47320824 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_decode_accelerator_unittest.cc
@@ -32,12 +32,15 @@ ACTION_P(RunClosure, closure) {
closure.Run();
}
-constexpr VideoCodecProfile kCodecProfiles[] = {H264PROFILE_MIN, VP8PROFILE_MIN,
- VP9PROFILE_MIN};
+struct TestParams {
+ VideoCodecProfile video_codec;
+ bool decode_using_client_picture_buffers;
+};
+
constexpr int32_t kBitstreamId = 123;
constexpr size_t kInputSize = 256;
-constexpr size_t kNumPictures = 2;
+constexpr size_t kNumPictures = 4;
const gfx::Size kPictureSize(64, 48);
constexpr size_t kNewNumPictures = 3;
@@ -58,6 +61,7 @@ class MockAcceleratedVideoDecoder : public AcceleratedVideoDecoder {
MOCK_METHOD0(Decode, DecodeResult());
MOCK_CONST_METHOD0(GetPicSize, gfx::Size());
MOCK_CONST_METHOD0(GetRequiredNumOfPictures, size_t());
+ MOCK_CONST_METHOD0(GetNumReferenceFrames, size_t());
};
class MockVaapiWrapper : public VaapiWrapper {
@@ -66,6 +70,7 @@ class MockVaapiWrapper : public VaapiWrapper {
MOCK_METHOD4(
CreateContextAndSurfaces,
bool(unsigned int, const gfx::Size&, size_t, std::vector<VASurfaceID>*));
+ MOCK_METHOD2(CreateContext, bool(unsigned int, const gfx::Size&));
MOCK_METHOD0(DestroyContextAndSurfaces, void());
private:
@@ -104,6 +109,10 @@ class MockVaapiPicture : public VaapiPicture {
return true;
}
bool AllowOverlay() const override { return false; }
+ VASurfaceID va_surface_id() const override {
+ // Return any number different from VA_INVALID_ID and VaapiPicture specific.
+ return static_cast<VASurfaceID>(texture_id_);
+ }
};
class MockVaapiPictureFactory : public VaapiPictureFactory {
@@ -127,7 +136,7 @@ class MockVaapiPictureFactory : public VaapiPictureFactory {
}
};
-class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
+class VaapiVideoDecodeAcceleratorTest : public TestWithParam<TestParams>,
public VideoDecodeAccelerator::Client {
public:
VaapiVideoDecodeAcceleratorTest()
@@ -140,19 +149,32 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
mock_decoder_(new MockAcceleratedVideoDecoder),
mock_vaapi_picture_factory_(new MockVaapiPictureFactory()),
mock_vaapi_wrapper_(new MockVaapiWrapper()),
+ mock_vpp_vaapi_wrapper_(new MockVaapiWrapper()),
weak_ptr_factory_(this) {
decoder_thread_.Start();
// Don't want to go through a vda_->Initialize() because it binds too many
- // items of the environment. Instead, just start the decoder thread.
+ // items of the environment. Instead, do all the necessary steps here.
+
vda_.decoder_thread_task_runner_ = decoder_thread_.task_runner();
// Plug in all the mocks and ourselves as the |client_|.
vda_.decoder_.reset(mock_decoder_);
vda_.client_ = weak_ptr_factory_.GetWeakPtr();
vda_.vaapi_wrapper_ = mock_vaapi_wrapper_;
+ vda_.vpp_vaapi_wrapper_ = mock_vpp_vaapi_wrapper_;
vda_.vaapi_picture_factory_.reset(mock_vaapi_picture_factory_);
+ // TODO(crbug.com/917999): add IMPORT mode to test variations.
+ vda_.output_mode_ = VideoDecodeAccelerator::Config::OutputMode::ALLOCATE;
+
+ vda_.decode_using_client_picture_buffers_ =
+ GetParam().decode_using_client_picture_buffers;
+ vda_.use_reduced_number_of_allocations_ =
+ !vda_.decode_using_client_picture_buffers_ &&
+ vda_.output_mode_ ==
+ VideoDecodeAccelerator::Config::OutputMode::ALLOCATE;
+
vda_.state_ = VaapiVideoDecodeAccelerator::kIdle;
}
~VaapiVideoDecodeAcceleratorTest() {}
@@ -163,6 +185,7 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
}
void SetVdaStateToUnitialized() {
+ base::AutoLock auto_lock(vda_.lock_);
vda_.state_ = VaapiVideoDecodeAccelerator::kUninitialized;
}
@@ -205,6 +228,9 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
EXPECT_CALL(*mock_decoder_, GetRequiredNumOfPictures())
.WillOnce(Return(num_pictures));
EXPECT_CALL(*mock_decoder_, GetPicSize()).WillOnce(Return(picture_size));
+ const size_t kNumReferenceFrames = num_pictures / 2;
+ EXPECT_CALL(*mock_decoder_, GetNumReferenceFrames())
+ .WillOnce(Return(kNumReferenceFrames));
EXPECT_CALL(*mock_vaapi_wrapper_, DestroyContextAndSurfaces());
if (expect_dismiss_picture_buffers) {
@@ -212,8 +238,14 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
.Times(num_picture_buffers_to_dismiss);
}
+ const size_t expected_num_picture_buffers_requested =
+ vda_.use_reduced_number_of_allocations_
+ ? num_pictures - kNumReferenceFrames
+ : num_pictures;
+
EXPECT_CALL(*this,
- ProvidePictureBuffers(num_pictures, _, 1, picture_size, _))
+ ProvidePictureBuffers(expected_num_picture_buffers_requested, _,
+ 1, picture_size, _))
.WillOnce(RunClosure(quit_closure));
base::SharedMemoryHandle handle;
@@ -237,17 +269,30 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
base::RunLoop run_loop;
base::Closure quit_closure = run_loop.QuitClosure();
- EXPECT_CALL(*mock_vaapi_wrapper_,
- CreateContextAndSurfaces(_, picture_size, num_pictures, _))
- .WillOnce(
- DoAll(WithArg<3>(Invoke(
- [num_pictures](std::vector<VASurfaceID>* va_surface_ids) {
- va_surface_ids->resize(num_pictures);
- })),
- Return(true)));
- EXPECT_CALL(*mock_vaapi_picture_factory_,
- MockCreateVaapiPicture(mock_vaapi_wrapper_.get(), picture_size))
- .Times(num_pictures);
+ // |decode_using_client_picture_buffers| determines the concrete method for
+ // creation of context, surfaces and VaapiPictures.
+ if (GetParam().decode_using_client_picture_buffers) {
+ EXPECT_CALL(*mock_vaapi_wrapper_, CreateContext(_, picture_size))
+ .WillOnce(Return(true));
+ EXPECT_CALL(
+ *mock_vaapi_picture_factory_,
+ MockCreateVaapiPicture(mock_vaapi_wrapper_.get(), picture_size))
+ .Times(num_pictures);
+ } else {
+ const size_t kNumReferenceFrames = num_pictures / 2;
+ EXPECT_CALL(
+ *mock_vaapi_wrapper_,
+ CreateContextAndSurfaces(_, picture_size, kNumReferenceFrames, _))
+ .WillOnce(DoAll(
+ WithArg<3>(Invoke([kNumReferenceFrames](
+ std::vector<VASurfaceID>* va_surface_ids) {
+ va_surface_ids->resize(kNumReferenceFrames);
+ })),
+ Return(true)));
+ EXPECT_CALL(*mock_vaapi_picture_factory_,
+ MockCreateVaapiPicture(_, picture_size))
+ .Times(num_pictures);
+ }
::testing::InSequence s;
EXPECT_CALL(*mock_decoder_, Decode())
@@ -314,6 +359,7 @@ class VaapiVideoDecodeAcceleratorTest : public TestWithParam<VideoCodecProfile>,
MockVaapiPictureFactory* mock_vaapi_picture_factory_;
scoped_refptr<MockVaapiWrapper> mock_vaapi_wrapper_;
+ scoped_refptr<MockVaapiWrapper> mock_vpp_vaapi_wrapper_;
std::unique_ptr<base::SharedMemory> in_shm_;
@@ -412,8 +458,14 @@ TEST_P(VaapiVideoDecodeAcceleratorTest,
ResetSequence();
}
+constexpr TestParams kTestCases[] = {
+ {H264PROFILE_MIN, false /* decode_using_client_picture_buffers */},
+ {VP8PROFILE_MIN, false /* decode_using_client_picture_buffers */},
+ {VP9PROFILE_MIN, false /* decode_using_client_picture_buffers */},
+ {VP9PROFILE_MIN, true /* decode_using_client_picture_buffers */}};
+
INSTANTIATE_TEST_CASE_P(/* No prefix. */,
VaapiVideoDecodeAcceleratorTest,
- ValuesIn(kCodecProfiles));
+ ValuesIn(kTestCases));
} // namespace media
diff --git a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
index 284736fdfdb..3c9e63623c7 100644
--- a/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_video_encode_accelerator.cc
@@ -8,6 +8,7 @@
#include <algorithm>
#include <memory>
+#include <type_traits>
#include <utility>
#include <va/va.h>
@@ -446,7 +447,7 @@ void VaapiVideoEncodeAccelerator::ReturnBitstreamBuffer(
uint8_t* target_data = static_cast<uint8_t*>(buffer->shm->memory());
size_t data_size = 0;
- if (!vaapi_wrapper_->DownloadAndDestroyCodedBuffer(
+ if (!vaapi_wrapper_->DownloadAndDestroyVABuffer(
encode_job->coded_buffer_id(), encode_job->input_surface()->id(),
target_data, buffer->shm->size(), &data_size)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed downloading coded buffer");
@@ -497,8 +498,8 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
}
VABufferID coded_buffer_id;
- if (!vaapi_wrapper_->CreateCodedBuffer(output_buffer_byte_size_,
- &coded_buffer_id)) {
+ if (!vaapi_wrapper_->CreateVABuffer(output_buffer_byte_size_,
+ &coded_buffer_id)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed creating coded buffer");
return nullptr;
}
@@ -1090,16 +1091,17 @@ bool VaapiVideoEncodeAccelerator::VP8Accelerator::SubmitFrameParameters(
if (frame_header->IsKeyframe())
pic_param.pic_flags.bits.forced_lf_adjustment = true;
- static_assert(
- arraysize(pic_param.loop_filter_level) ==
- arraysize(pic_param.ref_lf_delta) &&
- arraysize(pic_param.ref_lf_delta) ==
- arraysize(pic_param.mode_lf_delta) &&
- arraysize(pic_param.ref_lf_delta) ==
- arraysize(frame_header->loopfilter_hdr.ref_frame_delta) &&
- arraysize(pic_param.mode_lf_delta) ==
- arraysize(frame_header->loopfilter_hdr.mb_mode_delta),
- "Invalid loop filter array sizes");
+ static_assert(std::extent<decltype(pic_param.loop_filter_level)>() ==
+ std::extent<decltype(pic_param.ref_lf_delta)>() &&
+ std::extent<decltype(pic_param.ref_lf_delta)>() ==
+ std::extent<decltype(pic_param.mode_lf_delta)>() &&
+ std::extent<decltype(pic_param.ref_lf_delta)>() ==
+ std::extent<decltype(
+ frame_header->loopfilter_hdr.ref_frame_delta)>() &&
+ std::extent<decltype(pic_param.mode_lf_delta)>() ==
+ std::extent<decltype(
+ frame_header->loopfilter_hdr.mb_mode_delta)>(),
+ "Invalid loop filter array sizes");
for (size_t i = 0; i < base::size(pic_param.loop_filter_level); ++i) {
pic_param.loop_filter_level[i] = frame_header->loopfilter_hdr.level;
diff --git a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
index 69b994f1b18..d46aab8be36 100644
--- a/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
+++ b/chromium/media/gpu/vaapi/vaapi_vp9_accelerator.cc
@@ -4,6 +4,9 @@
#include "media/gpu/vaapi/vaapi_vp9_accelerator.h"
+#include <type_traits>
+
+#include "base/stl_util.h"
#include "media/gpu/decode_surface_handler.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/vaapi_common.h"
@@ -55,8 +58,8 @@ bool VaapiVP9Accelerator::SubmitDecode(
pic_param.frame_height =
base::checked_cast<uint16_t>(frame_hdr->frame_height);
- CHECK_EQ(ref_pictures.size(), arraysize(pic_param.reference_frames));
- for (size_t i = 0; i < arraysize(pic_param.reference_frames); ++i) {
+ CHECK_EQ(ref_pictures.size(), base::size(pic_param.reference_frames));
+ for (size_t i = 0; i < base::size(pic_param.reference_frames); ++i) {
if (ref_pictures[i]) {
pic_param.reference_frames[i] =
ref_pictures[i]->AsVaapiVP9Picture()->GetVASurfaceID();
@@ -119,10 +122,11 @@ bool VaapiVP9Accelerator::SubmitDecode(
slice_param.slice_data_offset = 0;
slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL;
- static_assert(arraysize(Vp9SegmentationParams::feature_enabled) ==
- arraysize(slice_param.seg_param),
- "seg_param array of incorrect size");
- for (size_t i = 0; i < arraysize(slice_param.seg_param); ++i) {
+ static_assert(
+ std::extent<decltype(Vp9SegmentationParams::feature_enabled)>() ==
+ std::extent<decltype(slice_param.seg_param)>(),
+ "seg_param array of incorrect size");
+ for (size_t i = 0; i < base::size(slice_param.seg_param); ++i) {
VASegmentParameterVP9& seg_param = slice_param.seg_param[i];
#define SEG_TO_SP_SF(a, b) seg_param.segment_flags.fields.a = b
SEG_TO_SP_SF(
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.cc b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
index b4156423f7b..93d7c98b80e 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.cc
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.cc
@@ -4,6 +4,8 @@
#include "media/gpu/vaapi/vaapi_wrapper.h"
+#include <type_traits>
+
#include <dlfcn.h>
#include <string.h>
@@ -195,7 +197,6 @@ class VADisplayState {
// Initialize static data before sandbox is enabled.
static void PreSandboxInitialization();
- // |va_lock_| must be held on entry.
bool Initialize();
void Deinitialize(VAStatus* status);
@@ -212,10 +213,9 @@ class VADisplayState {
~VADisplayState() = default;
// Implementation of Initialize() called only once.
- bool InitializeOnce();
+ bool InitializeOnce() EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
- // Protected by |va_lock_|.
- int refcount_;
+ int refcount_ GUARDED_BY(va_lock_);
// Libva is not thread safe, so we have to do locking for it ourselves.
// This lock is to be taken for the duration of all VA-API calls and for
@@ -257,7 +257,7 @@ VADisplayState::VADisplayState()
: refcount_(0), va_display_(nullptr), va_initialized_(false) {}
bool VADisplayState::Initialize() {
- va_lock_.AssertAcquired();
+ base::AutoLock auto_lock(va_lock_);
if (!IsVaInitialized() ||
#if defined(USE_X11)
@@ -278,6 +278,9 @@ bool VADisplayState::Initialize() {
}
bool VADisplayState::InitializeOnce() {
+ static_assert(VA_MAJOR_VERSION >= 1 && VA_MINOR_VERSION >= 1,
+ "Requires VA-API >= 1.1.0");
+
switch (gl::GetGLImplementation()) {
case gl::kGLImplementationEGLGLES2:
va_display_ = vaGetDisplayDRM(drm_fd_.get());
@@ -333,17 +336,25 @@ bool VADisplayState::InitializeOnce() {
DVLOG(1) << "VAAPI version: " << major_version << "." << minor_version << " "
<< va_vendor_string_;
- if (major_version != VA_MAJOR_VERSION || minor_version != VA_MINOR_VERSION) {
- LOG(ERROR) << "This build of Chromium requires VA-API version "
- << VA_MAJOR_VERSION << "." << VA_MINOR_VERSION
- << ", system version: " << major_version << "." << minor_version;
+ // The VAAPI version is determined from what is loaded on the system by
+ // calling vaInitialize(). Since the libva is now ABI-compatible, relax the
+ // version check which helps in upgrading the libva, without breaking any
+ // existing functionality. Make sure the system version is not older than
+ // the version with which the chromium is built since libva is only
+ // guaranteed to be backward (and not forward) compatible.
+ if (VA_MAJOR_VERSION > major_version ||
+ (VA_MAJOR_VERSION == major_version && VA_MINOR_VERSION > minor_version)) {
+ LOG(ERROR) << "The system version " << major_version << "." << minor_version
+ << " should be greater than or equal to "
+ << VA_MAJOR_VERSION << "." << VA_MINOR_VERSION;
return false;
}
return true;
}
void VADisplayState::Deinitialize(VAStatus* status) {
- va_lock_.AssertAcquired();
+ base::AutoLock auto_lock(va_lock_);
+
if (--refcount_ > 0)
return;
@@ -363,6 +374,10 @@ static std::vector<VAConfigAttrib> GetRequiredAttribs(
VAProfile profile) {
std::vector<VAConfigAttrib> required_attribs;
+ // No attribute for kVideoProcess.
+ if (mode == VaapiWrapper::kVideoProcess)
+ return required_attribs;
+
// VAConfigAttribRTFormat is common to both encode and decode |mode|s.
if (profile == VAProfileVP9Profile2 || profile == VAProfileVP9Profile3) {
required_attribs.push_back(
@@ -398,6 +413,8 @@ static VAEntrypoint GetVaEntryPoint(VaapiWrapper::CodecMode mode,
return VAEntrypointEncPicture;
else
return VAEntrypointEncSlice;
+ case VaapiWrapper::kVideoProcess:
+ return VAEntrypointVideoProc;
case VaapiWrapper::kCodecModeMax:
NOTREACHED();
return VAEntrypointVLD;
@@ -432,29 +449,30 @@ class VASupportedProfiles {
std::vector<ProfileInfo> GetSupportedProfileInfosForCodecModeInternal(
VaapiWrapper::CodecMode mode) const;
- // |va_lock_| must be held on entry in the following _Locked methods.
-
// Checks if |va_profile| supports |entrypoint| or not.
bool IsEntrypointSupported_Locked(VAProfile va_profile,
- VAEntrypoint entrypoint) const;
+ VAEntrypoint entrypoint) const
+ EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// Returns true if |va_profile| for |entrypoint| with |required_attribs| is
// supported.
bool AreAttribsSupported_Locked(
VAProfile va_profile,
VAEntrypoint entrypoint,
- const std::vector<VAConfigAttrib>& required_attribs) const;
+ const std::vector<VAConfigAttrib>& required_attribs) const
+ EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
// Gets maximum resolution for |va_profile| and |entrypoint| with
// |required_attribs|. If return value is true, |resolution| is the maximum
// resolution.
bool GetMaxResolution_Locked(VAProfile va_profile,
VAEntrypoint entrypoint,
std::vector<VAConfigAttrib>& required_attribs,
- gfx::Size* resolution) const;
+ gfx::Size* resolution) const
+ EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
std::vector<ProfileInfo> supported_profiles_[VaapiWrapper::kCodecModeMax];
// Pointer to VADisplayState's members |va_lock_| and its |va_display_|.
base::Lock* va_lock_;
- VADisplay va_display_;
+ VADisplay va_display_ GUARDED_BY(va_lock_);
const base::Closure report_error_to_uma_cb_;
@@ -483,30 +501,35 @@ bool VASupportedProfiles::IsProfileSupported(VaapiWrapper::CodecMode mode,
}
VASupportedProfiles::VASupportedProfiles()
- : va_display_(nullptr), report_error_to_uma_cb_(base::DoNothing()) {
+ : va_lock_(VADisplayState::Get()->va_lock()),
+ va_display_(nullptr),
+ report_error_to_uma_cb_(base::DoNothing()) {
VADisplayState* display_state = VADisplayState::Get();
- va_lock_ = display_state->va_lock();
- static_assert(arraysize(supported_profiles_) == VaapiWrapper::kCodecModeMax,
+ static_assert(std::extent<decltype(supported_profiles_)>() ==
+ VaapiWrapper::kCodecModeMax,
"The array size of supported profile is incorrect.");
+
+ if (!display_state->Initialize())
+ return;
+
{
base::AutoLock auto_lock(*va_lock_);
- if (!display_state->Initialize())
- return;
+ va_display_ = display_state->va_display();
}
- va_display_ = display_state->va_display();
DCHECK(va_display_) << "VADisplayState hasn't been properly Initialize()d";
for (size_t i = 0; i < VaapiWrapper::kCodecModeMax; ++i) {
supported_profiles_[i] = GetSupportedProfileInfosForCodecModeInternal(
static_cast<VaapiWrapper::CodecMode>(i));
}
+ VAStatus va_res = VA_STATUS_SUCCESS;
+ display_state->Deinitialize(&va_res);
+ VA_LOG_ON_ERROR(va_res, "VADisplayState::Deinitialize failed");
+
{
base::AutoLock auto_lock(*va_lock_);
- VAStatus va_res = VA_STATUS_SUCCESS;
- display_state->Deinitialize(&va_res);
- VA_LOG_ON_ERROR(va_res, "VADisplayState::Deinitialize failed");
va_display_ = nullptr;
}
}
@@ -514,6 +537,9 @@ VASupportedProfiles::VASupportedProfiles()
std::vector<VASupportedProfiles::ProfileInfo>
VASupportedProfiles::GetSupportedProfileInfosForCodecModeInternal(
VaapiWrapper::CodecMode mode) const {
+ if (mode == VaapiWrapper::kVideoProcess)
+ return {ProfileInfo{VAProfileNone, gfx::Size()}};
+
std::vector<ProfileInfo> supported_profile_infos;
std::vector<VAProfile> va_profiles;
if (!GetSupportedVAProfiles(&va_profiles))
@@ -677,7 +703,7 @@ bool VASupportedProfiles::GetMaxResolution_Locked(
VAProfile ProfileToVAProfile(VideoCodecProfile profile,
VaapiWrapper::CodecMode mode) {
VAProfile va_profile = VAProfileNone;
- for (size_t i = 0; i < arraysize(kProfileMap); ++i) {
+ for (size_t i = 0; i < base::size(kProfileMap); ++i) {
if (kProfileMap[i].profile == profile) {
va_profile = kProfileMap[i].va_profile;
break;
@@ -719,9 +745,12 @@ class VASupportedImageFormats {
VASupportedImageFormats();
~VASupportedImageFormats() = default;
- // Initialize the list of supported image formats. The VA display should be
- // locked upon calling this function.
- bool InitSupportedImageFormats();
+ // Initialize the list of supported image formats.
+ bool InitSupportedImageFormats_Locked() EXCLUSIVE_LOCKS_REQUIRED(va_lock_);
+
+ // Pointer to VADisplayState's members |va_lock_| and its |va_display_|.
+ base::Lock* va_lock_;
+ VADisplay va_display_ GUARDED_BY(va_lock_);
std::vector<VAImageFormat> supported_formats_;
const base::RepeatingClosure report_error_to_uma_cb_;
@@ -745,33 +774,31 @@ bool VASupportedImageFormats::IsImageFormatSupported(
}
VASupportedImageFormats::VASupportedImageFormats()
- : report_error_to_uma_cb_(base::DoNothing()) {
+ : va_lock_(VADisplayState::Get()->va_lock()),
+ report_error_to_uma_cb_(base::DoNothing()) {
VADisplayState* display_state = VADisplayState::Get();
- base::Lock* va_lock = display_state->va_lock();
- base::AutoLock auto_lock(*va_lock);
-
if (!display_state->Initialize())
return;
- VADisplay va_display = display_state->va_display();
- DCHECK(va_display) << "VADisplayState hasn't been properly initialized";
+ {
+ base::AutoLock auto_lock(*va_lock_);
+ va_display_ = display_state->va_display();
+ DCHECK(va_display_) << "VADisplayState hasn't been properly initialized";
- if (!InitSupportedImageFormats())
- LOG(ERROR) << "Failed to get supported image formats";
+ if (!InitSupportedImageFormats_Locked())
+ LOG(ERROR) << "Failed to get supported image formats";
+ }
VAStatus va_res = VA_STATUS_SUCCESS;
display_state->Deinitialize(&va_res);
VA_LOG_ON_ERROR(va_res, "VADisplayState::Deinitialize failed");
}
-bool VASupportedImageFormats::InitSupportedImageFormats() {
- VADisplayState* display_state = VADisplayState::Get();
- display_state->va_lock()->AssertAcquired();
- VADisplay va_display = display_state->va_display();
- DCHECK(va_display) << "VADisplayState hasn't been properly initialized";
+bool VASupportedImageFormats::InitSupportedImageFormats_Locked() {
+ va_lock_->AssertAcquired();
// Query the driver for the max number of image formats and allocate space.
- const int max_image_formats = vaMaxNumImageFormats(va_display);
+ const int max_image_formats = vaMaxNumImageFormats(va_display_);
if (max_image_formats < 0) {
LOG(ERROR) << "vaMaxNumImageFormats returned: " << max_image_formats;
return false;
@@ -780,8 +807,8 @@ bool VASupportedImageFormats::InitSupportedImageFormats() {
// Query the driver for the list of supported image formats.
int num_image_formats;
- VAStatus va_res = vaQueryImageFormats(va_display, supported_formats_.data(),
- &num_image_formats);
+ const VAStatus va_res = vaQueryImageFormats(
+ va_display_, supported_formats_.data(), &num_image_formats);
VA_SUCCESS_OR_RETURN(va_res, "vaQueryImageFormats failed", false);
if (num_image_formats < 0 || num_image_formats > max_image_formats) {
LOG(ERROR) << "vaQueryImageFormats returned: " << num_image_formats;
@@ -831,7 +858,7 @@ VaapiWrapper::GetSupportedEncodeProfiles() {
const std::vector<VASupportedProfiles::ProfileInfo>& encode_profile_infos =
VASupportedProfiles::Get().GetSupportedProfileInfosForCodecMode(kEncode);
- for (size_t i = 0; i < arraysize(kProfileMap); ++i) {
+ for (size_t i = 0; i < base::size(kProfileMap); ++i) {
VAProfile va_profile = ProfileToVAProfile(kProfileMap[i].profile, kEncode);
if (va_profile == VAProfileNone)
continue;
@@ -857,7 +884,7 @@ VaapiWrapper::GetSupportedDecodeProfiles() {
const std::vector<VASupportedProfiles::ProfileInfo>& decode_profile_infos =
VASupportedProfiles::Get().GetSupportedProfileInfosForCodecMode(kDecode);
- for (size_t i = 0; i < arraysize(kProfileMap); ++i) {
+ for (size_t i = 0; i < base::size(kProfileMap); ++i) {
VAProfile va_profile = ProfileToVAProfile(kProfileMap[i].profile, kDecode);
if (va_profile == VAProfileNone)
continue;
@@ -1226,23 +1253,23 @@ bool VaapiWrapper::UploadVideoFrameToSurface(
return ret == 0;
}
-bool VaapiWrapper::CreateCodedBuffer(size_t size, VABufferID* buffer_id) {
+bool VaapiWrapper::CreateVABuffer(size_t size, VABufferID* buffer_id) {
base::AutoLock auto_lock(*va_lock_);
VAStatus va_res =
vaCreateBuffer(va_display_, va_context_id_, VAEncCodedBufferType, size, 1,
NULL, buffer_id);
VA_SUCCESS_OR_RETURN(va_res, "Failed to create a coded buffer", false);
- const auto is_new_entry = coded_buffers_.insert(*buffer_id).second;
+ const auto is_new_entry = va_buffers_.insert(*buffer_id).second;
DCHECK(is_new_entry);
return true;
}
-bool VaapiWrapper::DownloadFromCodedBuffer(VABufferID buffer_id,
- VASurfaceID sync_surface_id,
- uint8_t* target_ptr,
- size_t target_size,
- size_t* coded_data_size) {
+bool VaapiWrapper::DownloadFromVABuffer(VABufferID buffer_id,
+ VASurfaceID sync_surface_id,
+ uint8_t* target_ptr,
+ size_t target_size,
+ size_t* coded_data_size) {
DCHECK(target_ptr);
base::AutoLock auto_lock(*va_lock_);
@@ -1282,47 +1309,42 @@ bool VaapiWrapper::DownloadFromCodedBuffer(VABufferID buffer_id,
return buffer_segment == nullptr;
}
-bool VaapiWrapper::DownloadAndDestroyCodedBuffer(VABufferID buffer_id,
- VASurfaceID sync_surface_id,
- uint8_t* target_ptr,
- size_t target_size,
- size_t* coded_data_size) {
- bool result = DownloadFromCodedBuffer(buffer_id, sync_surface_id, target_ptr,
- target_size, coded_data_size);
+bool VaapiWrapper::DownloadAndDestroyVABuffer(VABufferID buffer_id,
+ VASurfaceID sync_surface_id,
+ uint8_t* target_ptr,
+ size_t target_size,
+ size_t* coded_data_size) {
+ bool result = DownloadFromVABuffer(buffer_id, sync_surface_id, target_ptr,
+ target_size, coded_data_size);
+ base::AutoLock auto_lock(*va_lock_);
VAStatus va_res = vaDestroyBuffer(va_display_, buffer_id);
VA_LOG_ON_ERROR(va_res, "vaDestroyBuffer failed");
- const auto was_found = coded_buffers_.erase(buffer_id);
+ const auto was_found = va_buffers_.erase(buffer_id);
DCHECK(was_found);
return result;
}
-void VaapiWrapper::DestroyCodedBuffers() {
+void VaapiWrapper::DestroyVABuffers() {
base::AutoLock auto_lock(*va_lock_);
- for (std::set<VABufferID>::const_iterator iter = coded_buffers_.begin();
- iter != coded_buffers_.end(); ++iter) {
- VAStatus va_res = vaDestroyBuffer(va_display_, *iter);
+ for (auto it = va_buffers_.begin(); it != va_buffers_.end(); ++it) {
+ VAStatus va_res = vaDestroyBuffer(va_display_, *it);
VA_LOG_ON_ERROR(va_res, "vaDestroyBuffer failed");
}
- coded_buffers_.clear();
+ va_buffers_.clear();
}
bool VaapiWrapper::BlitSurface(
const scoped_refptr<VASurface>& va_surface_src,
const scoped_refptr<VASurface>& va_surface_dest) {
base::AutoLock auto_lock(*va_lock_);
-
- // Initialize the post processing engine if not already done.
- if (va_vpp_buffer_id_ == VA_INVALID_ID) {
- if (!InitializeVpp_Locked())
- return false;
- }
-
+ DCHECK_EQ(va_buffers_.size(), 1u);
+ VABufferID buffer_id = *va_buffers_.begin();
{
- ScopedVABufferMapping mapping(va_lock_, va_display_, va_vpp_buffer_id_);
+ ScopedVABufferMapping mapping(va_lock_, va_display_, buffer_id);
if (!mapping.IsValid())
return false;
auto* pipeline_param =
@@ -1353,14 +1375,14 @@ bool VaapiWrapper::BlitSurface(
}
VA_SUCCESS_OR_RETURN(
- vaBeginPicture(va_display_, va_vpp_context_id_, va_surface_dest->id()),
+ vaBeginPicture(va_display_, va_context_id_, va_surface_dest->id()),
"Couldn't begin picture", false);
VA_SUCCESS_OR_RETURN(
- vaRenderPicture(va_display_, va_vpp_context_id_, &va_vpp_buffer_id_, 1),
+ vaRenderPicture(va_display_, va_context_id_, &buffer_id, 1),
"Couldn't render picture", false);
- VA_SUCCESS_OR_RETURN(vaEndPicture(va_display_, va_vpp_context_id_),
+ VA_SUCCESS_OR_RETURN(vaEndPicture(va_display_, va_context_id_),
"Couldn't end picture", false);
return true;
@@ -1404,65 +1426,83 @@ void VaapiWrapper::PreSandboxInitialization() {
}
VaapiWrapper::VaapiWrapper()
- : va_surface_format_(0),
+ : va_lock_(VADisplayState::Get()->va_lock()),
+ va_surface_format_(0),
va_display_(NULL),
va_config_id_(VA_INVALID_ID),
- va_context_id_(VA_INVALID_ID),
- va_vpp_config_id_(VA_INVALID_ID),
- va_vpp_context_id_(VA_INVALID_ID),
- va_vpp_buffer_id_(VA_INVALID_ID) {
- va_lock_ = VADisplayState::Get()->va_lock();
-}
+ va_context_id_(VA_INVALID_ID) {}
VaapiWrapper::~VaapiWrapper() {
DestroyPendingBuffers();
- DestroyCodedBuffers();
+ DestroyVABuffers();
DestroyContextAndSurfaces();
- DeinitializeVpp();
Deinitialize();
}
bool VaapiWrapper::Initialize(CodecMode mode, VAProfile va_profile) {
- TryToSetVADisplayAttributeToLocalGPU();
+ if (mode != kVideoProcess)
+ TryToSetVADisplayAttributeToLocalGPU();
VAEntrypoint entrypoint = GetVaEntryPoint(mode, va_profile);
std::vector<VAConfigAttrib> required_attribs =
GetRequiredAttribs(mode, va_profile);
+
base::AutoLock auto_lock(*va_lock_);
+
VAStatus va_res =
- vaCreateConfig(va_display_, va_profile, entrypoint, &required_attribs[0],
+ vaCreateConfig(va_display_, va_profile, entrypoint,
+ required_attribs.empty() ? nullptr : &required_attribs[0],
required_attribs.size(), &va_config_id_);
VA_SUCCESS_OR_RETURN(va_res, "vaCreateConfig failed", false);
+ if (mode != kVideoProcess)
+ return true;
+
+ // Creates context and buffer here in the case of kVideoProcess.
+ constexpr size_t kIrrelevantWidth = 0;
+ constexpr size_t kIrrelevantHeight = 0;
+ va_res = vaCreateContext(va_display_, va_config_id_, kIrrelevantWidth,
+ kIrrelevantHeight, 0, NULL, 0, &va_context_id_);
+ VA_SUCCESS_OR_RETURN(va_res, "Couldn't create context", false);
+
+ VABufferID buffer_id;
+ va_res = vaCreateBuffer(
+ va_display_, va_context_id_, VAProcPipelineParameterBufferType,
+ sizeof(VAProcPipelineParameterBuffer), 1, NULL, &buffer_id);
+ VA_SUCCESS_OR_RETURN(va_res, "Couldn't create buffer", false);
+ DCHECK_NE(buffer_id, VA_INVALID_ID);
+ va_buffers_.emplace(buffer_id);
+
return true;
}
void VaapiWrapper::Deinitialize() {
- base::AutoLock auto_lock(*va_lock_);
-
- if (va_config_id_ != VA_INVALID_ID) {
- VAStatus va_res = vaDestroyConfig(va_display_, va_config_id_);
- VA_LOG_ON_ERROR(va_res, "vaDestroyConfig failed");
+ {
+ base::AutoLock auto_lock(*va_lock_);
+ if (va_config_id_ != VA_INVALID_ID) {
+ VAStatus va_res = vaDestroyConfig(va_display_, va_config_id_);
+ VA_LOG_ON_ERROR(va_res, "vaDestroyConfig failed");
+ }
+ va_config_id_ = VA_INVALID_ID;
+ va_display_ = nullptr;
}
VAStatus va_res = VA_STATUS_SUCCESS;
VADisplayState::Get()->Deinitialize(&va_res);
VA_LOG_ON_ERROR(va_res, "vaTerminate failed");
-
- va_config_id_ = VA_INVALID_ID;
- va_display_ = NULL;
}
bool VaapiWrapper::VaInitialize(const base::Closure& report_error_to_uma_cb) {
report_error_to_uma_cb_ = report_error_to_uma_cb;
+
+ if (!VADisplayState::Get()->Initialize())
+ return false;
+
{
base::AutoLock auto_lock(*va_lock_);
- if (!VADisplayState::Get()->Initialize())
- return false;
+ va_display_ = VADisplayState::Get()->va_display();
+ DCHECK(va_display_) << "VADisplayState hasn't been properly Initialize()d";
}
-
- va_display_ = VADisplayState::Get()->va_display();
- DCHECK(va_display_) << "VADisplayState hasn't been properly Initialize()d";
return true;
}
@@ -1473,46 +1513,6 @@ void VaapiWrapper::DestroySurface(VASurfaceID va_surface_id) {
VA_LOG_ON_ERROR(va_res, "vaDestroySurfaces on surface failed");
}
-bool VaapiWrapper::InitializeVpp_Locked() {
- va_lock_->AssertAcquired();
-
- VA_SUCCESS_OR_RETURN(
- vaCreateConfig(va_display_, VAProfileNone, VAEntrypointVideoProc, NULL, 0,
- &va_vpp_config_id_),
- "Couldn't create config", false);
-
- // The size of the picture for the context is irrelevant in the case
- // of the VPP, just passing 1x1.
- VA_SUCCESS_OR_RETURN(vaCreateContext(va_display_, va_vpp_config_id_, 1, 1, 0,
- NULL, 0, &va_vpp_context_id_),
- "Couldn't create context", false);
-
- VA_SUCCESS_OR_RETURN(vaCreateBuffer(va_display_, va_vpp_context_id_,
- VAProcPipelineParameterBufferType,
- sizeof(VAProcPipelineParameterBuffer), 1,
- NULL, &va_vpp_buffer_id_),
- "Couldn't create buffer", false);
-
- return true;
-}
-
-void VaapiWrapper::DeinitializeVpp() {
- base::AutoLock auto_lock(*va_lock_);
-
- if (va_vpp_buffer_id_ != VA_INVALID_ID) {
- vaDestroyBuffer(va_display_, va_vpp_buffer_id_);
- va_vpp_buffer_id_ = VA_INVALID_ID;
- }
- if (va_vpp_context_id_ != VA_INVALID_ID) {
- vaDestroyContext(va_display_, va_vpp_context_id_);
- va_vpp_context_id_ = VA_INVALID_ID;
- }
- if (va_vpp_config_id_ != VA_INVALID_ID) {
- vaDestroyConfig(va_display_, va_vpp_config_id_);
- va_vpp_config_id_ = VA_INVALID_ID;
- }
-}
-
bool VaapiWrapper::Execute(VASurfaceID va_surface_id) {
TRACE_EVENT0("media,gpu", "VaapiWrapper::Execute");
base::AutoLock auto_lock(*va_lock_);
diff --git a/chromium/media/gpu/vaapi/vaapi_wrapper.h b/chromium/media/gpu/vaapi/vaapi_wrapper.h
index dc1fdef7e5c..07bf1a77ef0 100644
--- a/chromium/media/gpu/vaapi/vaapi_wrapper.h
+++ b/chromium/media/gpu/vaapi/vaapi_wrapper.h
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame.h"
#include "media/gpu/media_gpu_export.h"
@@ -53,13 +54,13 @@ class ScopedVAImage;
// It is also responsible for managing and freeing VABuffers (not VASurfaces),
// which are used to queue parameters and slice data to the HW codec,
// as well as underlying memory for VASurfaces themselves.
-// TODO(crbug.com/909547): Use GUARDED_BY in VaapiWrapper.
class MEDIA_GPU_EXPORT VaapiWrapper
: public base::RefCountedThreadSafe<VaapiWrapper> {
public:
enum CodecMode {
kDecode,
kEncode,
+ kVideoProcess,
kCodecModeMax,
};
@@ -109,7 +110,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// Creates a VA Context associated with |format| and |size|, and sets
// |va_context_id_|. The |va_context_id_| will be destroyed by
// DestroyContextAndSurfaces().
- bool CreateContext(unsigned int va_format, const gfx::Size& size);
+ virtual bool CreateContext(unsigned int va_format, const gfx::Size& size);
// Frees all memory allocated in CreateContextAndSurfaces() and destroys
// |va_context_id_|.
@@ -179,7 +180,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
VASurfaceID va_surface_id);
// Create a buffer of |size| bytes to be used as encode output.
- bool CreateCodedBuffer(size_t size, VABufferID* buffer_id);
+ bool CreateVABuffer(size_t size, VABufferID* buffer_id);
// Download the contents of the buffer with given |buffer_id| into a buffer of
// size |target_size|, pointed to by |target_ptr|. The number of bytes
@@ -187,22 +188,22 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// be used as a sync point, i.e. it will have to become idle before starting
// the download. |sync_surface_id| should be the source surface passed
// to the encode job.
- bool DownloadFromCodedBuffer(VABufferID buffer_id,
- VASurfaceID sync_surface_id,
- uint8_t* target_ptr,
- size_t target_size,
- size_t* coded_data_size);
+ bool DownloadFromVABuffer(VABufferID buffer_id,
+ VASurfaceID sync_surface_id,
+ uint8_t* target_ptr,
+ size_t target_size,
+ size_t* coded_data_size);
- // See DownloadFromCodedBuffer() for details. After downloading, it deletes
+ // See DownloadFromVABuffer() for details. After downloading, it deletes
// the VA buffer with |buffer_id|.
- bool DownloadAndDestroyCodedBuffer(VABufferID buffer_id,
- VASurfaceID sync_surface_id,
- uint8_t* target_ptr,
- size_t target_size,
- size_t* coded_data_size);
+ bool DownloadAndDestroyVABuffer(VABufferID buffer_id,
+ VASurfaceID sync_surface_id,
+ uint8_t* target_ptr,
+ size_t target_size,
+ size_t* coded_data_size);
- // Destroy all previously-allocated (and not yet destroyed) coded buffers.
- void DestroyCodedBuffers();
+ // Destroy all previously-allocated (and not yet destroyed) buffers.
+ void DestroyVABuffers();
// Blits a VASurface |va_surface_src| into another VASurface
// |va_surface_dest| applying pixel format conversion and scaling
@@ -231,13 +232,6 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// Destroys a |va_surface_id|.
void DestroySurface(VASurfaceID va_surface_id);
- // Initialize the video post processing context with the |size| of
- // the input pictures to be processed.
- bool InitializeVpp_Locked();
-
- // Deinitialize the video post processing context.
- void DeinitializeVpp();
-
// Execute pending job in hardware and destroy pending buffers. Return false
// if vaapi driver refuses to accept parameter or slice buffers submitted
// by client, or if execution fails in hardware.
@@ -258,7 +252,7 @@ class MEDIA_GPU_EXPORT VaapiWrapper
// VA handles.
// All valid after successful Initialize() and until Deinitialize().
- VADisplay va_display_;
+ VADisplay va_display_ GUARDED_BY(va_lock_);
VAConfigID va_config_id_;
// Created in CreateContext() or CreateContextAndSurfaces() and valid until
// DestroyContextAndSurfaces().
@@ -268,20 +262,13 @@ class MEDIA_GPU_EXPORT VaapiWrapper
std::vector<VABufferID> pending_slice_bufs_;
std::vector<VABufferID> pending_va_bufs_;
- // Bitstream buffers for encode.
- std::set<VABufferID> coded_buffers_;
+ // Buffers for kEncode or kVideoProcess.
+ std::set<VABufferID> va_buffers_;
// Called to report codec errors to UMA. Errors to clients are reported via
// return values from public methods.
base::Closure report_error_to_uma_cb_;
- // VPP (Video Post Processing) context, this is used to convert
- // pictures used by the decoder to RGBA pictures usable by GL or the
- // display hardware.
- VAConfigID va_vpp_config_id_;
- VAContextID va_vpp_context_id_;
- VABufferID va_vpp_buffer_id_;
-
DISALLOW_COPY_AND_ASSIGN(VaapiWrapper);
};
diff --git a/chromium/media/gpu/vaapi/vp8_encoder.cc b/chromium/media/gpu/vaapi/vp8_encoder.cc
index 054e7f4431d..53c196b7e3d 100644
--- a/chromium/media/gpu/vaapi/vp8_encoder.cc
+++ b/chromium/media/gpu/vaapi/vp8_encoder.cc
@@ -155,12 +155,8 @@ void VP8Encoder::InitializeFrameHeader() {
}
void VP8Encoder::UpdateFrameHeader(bool keyframe) {
- current_frame_hdr_.frame_type =
- keyframe ? Vp8FrameHeader::KEYFRAME : Vp8FrameHeader::INTERFRAME;
-}
-
-void VP8Encoder::UpdateReferenceFrames(scoped_refptr<VP8Picture> picture) {
- if (current_frame_hdr_.IsKeyframe()) {
+ if (keyframe) {
+ current_frame_hdr_.frame_type = Vp8FrameHeader::KEYFRAME;
current_frame_hdr_.refresh_last = true;
current_frame_hdr_.refresh_golden_frame = true;
current_frame_hdr_.refresh_alternate_frame = true;
@@ -169,6 +165,7 @@ void VP8Encoder::UpdateReferenceFrames(scoped_refptr<VP8Picture> picture) {
current_frame_hdr_.copy_buffer_to_alternate =
Vp8FrameHeader::NO_ALT_REFRESH;
} else {
+ current_frame_hdr_.frame_type = Vp8FrameHeader::INTERFRAME;
// TODO(sprang): Add temporal layer support.
current_frame_hdr_.refresh_last = true;
current_frame_hdr_.refresh_golden_frame = false;
@@ -178,7 +175,9 @@ void VP8Encoder::UpdateReferenceFrames(scoped_refptr<VP8Picture> picture) {
current_frame_hdr_.copy_buffer_to_alternate =
Vp8FrameHeader::COPY_GOLDEN_TO_ALT;
}
+}
+void VP8Encoder::UpdateReferenceFrames(scoped_refptr<VP8Picture> picture) {
reference_frames_.Refresh(picture);
}
diff --git a/chromium/media/gpu/video_decode_accelerator_tests.cc b/chromium/media/gpu/video_decode_accelerator_tests.cc
index 5a7e8a28e65..b5e3d2d67b4 100644
--- a/chromium/media/gpu/video_decode_accelerator_tests.cc
+++ b/chromium/media/gpu/video_decode_accelerator_tests.cc
@@ -5,12 +5,14 @@
#include "base/at_exit.h"
#include "base/command_line.h"
#include "base/test/scoped_task_environment.h"
-
+#include "base/test/test_timeouts.h"
#include "media/base/test_data_util.h"
#include "media/gpu/buildflags.h"
+#include "media/gpu/test/video_frame_validator.h"
#include "media/gpu/test/video_player/frame_renderer_dummy.h"
#include "media/gpu/test/video_player/video.h"
#include "media/gpu/test/video_player/video_collection.h"
+#include "media/gpu/test/video_player/video_decoder_client.h"
#include "media/gpu/test/video_player/video_player.h"
#include "mojo/core/embedder/embedder.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,19 +25,21 @@ namespace media {
namespace test {
namespace {
-// Test environment for video decode tests.
+// Test environment for video decode tests. Performs setup and teardown once for
+// the entire test run.
class VideoDecoderTestEnvironment : public ::testing::Environment {
public:
- VideoDecoderTestEnvironment() {}
+ explicit VideoDecoderTestEnvironment(const Video* video) : video_(video) {}
virtual ~VideoDecoderTestEnvironment() {}
- // Setup up the video decode test environment, only called once.
+ // Set up the video decode test environment, only called once.
void SetUp() override;
// Tear down the video decode test environment, only called once.
void TearDown() override;
std::unique_ptr<base::test::ScopedTaskEnvironment> task_environment_;
std::unique_ptr<FrameRendererDummy> dummy_frame_renderer_;
+ const Video* const video_;
// An exit manager is required to run callbacks on shutdown.
base::AtExitManager at_exit_manager;
@@ -45,20 +49,21 @@ void VideoDecoderTestEnvironment::SetUp() {
// Setting up a task environment will create a task runner for the current
// thread and allow posting tasks to other threads. This is required for the
// test video player to function correctly.
+ TestTimeouts::Initialize();
task_environment_ = std::make_unique<base::test::ScopedTaskEnvironment>(
base::test::ScopedTaskEnvironment::MainThreadType::UI);
// Set the default test data path.
media::test::Video::SetTestDataPath(media::GetTestDataPath());
- dummy_frame_renderer_ = FrameRendererDummy::Create();
- ASSERT_NE(dummy_frame_renderer_, nullptr);
-
// Perform all static initialization that is required when running video
// decoders in a test environment.
#if BUILDFLAG(USE_VAAPI)
media::VaapiWrapper::PreSandboxInitialization();
#endif
+
+ dummy_frame_renderer_ = FrameRendererDummy::Create();
+ ASSERT_NE(dummy_frame_renderer_, nullptr);
}
void VideoDecoderTestEnvironment::TearDown() {
@@ -68,20 +73,186 @@ void VideoDecoderTestEnvironment::TearDown() {
media::test::VideoDecoderTestEnvironment* g_env;
+// Video decode test class. Performs setup and teardown for each single test.
+class VideoDecoderTest : public ::testing::Test {
+ public:
+ std::unique_ptr<VideoPlayer> CreateVideoPlayer(
+ const Video* video,
+ const VideoDecoderClientConfig& config = VideoDecoderClientConfig()) {
+ frame_validator_ =
+ media::test::VideoFrameValidator::Create(video->FrameChecksums());
+ return VideoPlayer::Create(video, g_env->dummy_frame_renderer_.get(),
+ {frame_validator_.get()}, config);
+ }
+
+ protected:
+ std::unique_ptr<VideoFrameValidator> frame_validator_;
+};
+
} // namespace
-// TODO(dstaessens@)
-// * Fetch the expected number of frames from the video file's metadata.
-TEST(VideoDecodeAcceleratorTest, BasicPlayTest) {
- const Video* video = &kDefaultTestVideoCollection[0];
- auto tvp = VideoPlayer::Create(video, g_env->dummy_frame_renderer_.get());
- ASSERT_NE(tvp, nullptr);
+// Play video from start to end. Wait for the kFlushDone event at the end of the
+// stream, that notifies us all frames have been decoded.
+TEST_F(VideoDecoderTest, FlushAtEndOfStream) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Flush the decoder immediately after initialization.
+TEST_F(VideoDecoderTest, FlushAfterInitialize) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Flush();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 2u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Flush the decoder immediately after doing a mid-stream reset, without waiting
+// for a kResetDone event.
+TEST_F(VideoDecoderTest, FlushBeforeResetDone) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFrameDecoded(g_env->video_->NumFrames() / 2));
+ tvp->Reset();
+ tvp->Flush();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ // As flush doesn't cancel reset, we should have received a single kResetDone
+ // and kFlushDone event. We didn't decode the entire video, but more frames
+ // might be decoded by the time we called reset, so we can only check whether
+ // the decoded frame count is <= the total number of frames.
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_LE(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Reset the decoder immediately after initialization.
+TEST_F(VideoDecoderTest, ResetAfterInitialize) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Reset();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Reset the decoder when the middle of the stream is reached.
+TEST_F(VideoDecoderTest, ResetMidStream) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFrameDecoded(g_env->video_->NumFrames() / 2));
+ tvp->Reset();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+ size_t numFramesDecoded = tvp->GetFrameDecodedCount();
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(),
+ numFramesDecoded + g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Reset the decoder when the end of the stream is reached.
+TEST_F(VideoDecoderTest, ResetEndOfStream) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ tvp->Reset();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 2u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames() * 2);
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Reset the decoder immediately when the end-of-stream flush starts, without
+// waiting for a kFlushDone event.
+TEST_F(VideoDecoderTest, ResetBeforeFlushDone) {
+ auto tvp = CreateVideoPlayer(g_env->video_);
+
+ // Reset when a kFlushing event is received.
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+ tvp->Reset();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+
+ // Reset will cause the decoder to drop everything it's doing, including the
+ // ongoing flush operation. However the flush might have been completed
+ // already by the time reset is called. So depending on the timing of the
+ // calls we should see 0 or 1 flushes, and the last few video frames might
+ // have been dropped.
+ EXPECT_LE(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_LE(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Play video from start to end. Multiple buffer decodes will be queued in the
+// decoder, without waiting for the result of the previous decode requests.
+TEST_F(VideoDecoderTest, FlushAtEndOfStream_MultipleOutstandingDecodes) {
+ VideoDecoderClientConfig config;
+ config.max_outstanding_decode_requests = 5;
+ auto tvp = CreateVideoPlayer(g_env->video_, config);
+
+ tvp->Play();
+ EXPECT_TRUE(tvp->WaitForFlushDone());
+
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(), g_env->video_->NumFrames());
+ EXPECT_TRUE(frame_validator_->WaitUntilValidated());
+}
+
+// Reset the decoder immediately when encountering the first config info in a
+// H.264 video stream. After resetting the video is played until the end.
+TEST_F(VideoDecoderTest, ResetAfterFirstConfigInfo) {
+ // This test is only relevant for H.264 video streams.
+ if (g_env->video_->Profile() < H264PROFILE_MIN ||
+ g_env->video_->Profile() > H264PROFILE_MAX)
+ GTEST_SKIP();
+
+ auto tvp = CreateVideoPlayer(g_env->video_);
+ tvp->PlayUntil(VideoPlayerEvent::kConfigInfo);
+ EXPECT_TRUE(tvp->WaitForEvent(VideoPlayerEvent::kConfigInfo));
+ tvp->Reset();
+ EXPECT_TRUE(tvp->WaitForResetDone());
+ size_t numFramesDecoded = tvp->GetFrameDecodedCount();
tvp->Play();
- EXPECT_TRUE(tvp->WaitForEvent(VideoPlayerEvent::kFlushDone));
+ EXPECT_TRUE(tvp->WaitForFlushDone());
- EXPECT_EQ(tvp->GetEventCount(VideoPlayerEvent::kFlushDone), 1u);
- EXPECT_EQ(tvp->GetEventCount(VideoPlayerEvent::kFrameDecoded), 250u);
+ EXPECT_EQ(tvp->GetResetDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFlushDoneCount(), 1u);
+ EXPECT_EQ(tvp->GetFrameDecodedCount(),
+ numFramesDecoded + g_env->video_->NumFrames());
+ EXPECT_GE(tvp->GetEventCount(VideoPlayerEvent::kConfigInfo), 1u);
+ EXPECT_EQ(0u, frame_validator_->GetMismatchedFramesCount());
}
} // namespace test
@@ -99,9 +270,12 @@ int main(int argc, char** argv) {
settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
LOG_ASSERT(logging::InitLogging(settings));
+ // Set up our test environment
+ const media::test::Video* video =
+ &media::test::kDefaultTestVideoCollection[0];
media::test::g_env = static_cast<media::test::VideoDecoderTestEnvironment*>(
testing::AddGlobalTestEnvironment(
- new media::test::VideoDecoderTestEnvironment()));
+ new media::test::VideoDecoderTestEnvironment(video)));
return RUN_ALL_TESTS();
}
diff --git a/chromium/media/gpu/video_decode_accelerator_unittest.cc b/chromium/media/gpu/video_decode_accelerator_unittest.cc
index bc624e48d8a..096c1d7ef08 100644
--- a/chromium/media/gpu/video_decode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_decode_accelerator_unittest.cc
@@ -88,7 +88,6 @@
#endif // defined(OS_CHROMEOS)
namespace media {
-
namespace {
// Values optionally filled in from flags; see main() below.
@@ -293,7 +292,7 @@ class GLRenderingVDAClient
std::string encoded_data,
RenderingHelper* rendering_helper,
std::unique_ptr<media::test::VideoFrameValidator> video_frame_validator,
- ClientStateNotification<ClientState>* note);
+ media::test::ClientStateNotification<ClientState>* note);
~GLRenderingVDAClient() override;
void CreateAndStartDecoder();
@@ -353,7 +352,7 @@ class GLRenderingVDAClient
gfx::Size frame_size_;
size_t outstanding_decodes_;
int next_bitstream_buffer_id_;
- ClientStateNotification<ClientState>* const note_;
+ media::test::ClientStateNotification<ClientState>* const note_;
std::unique_ptr<VideoDecodeAccelerator> decoder_;
base::WeakPtr<VideoDecodeAccelerator> weak_vda_;
std::unique_ptr<base::WeakPtrFactory<VideoDecodeAccelerator>>
@@ -414,7 +413,7 @@ GLRenderingVDAClient::GLRenderingVDAClient(
std::string encoded_data,
RenderingHelper* rendering_helper,
std::unique_ptr<media::test::VideoFrameValidator> video_frame_validator,
- ClientStateNotification<ClientState>* note)
+ media::test::ClientStateNotification<ClientState>* note)
: config_(std::move(config)),
rendering_helper_(rendering_helper),
frame_size_(config_.frame_size),
@@ -604,10 +603,11 @@ void GLRenderingVDAClient::PictureReady(const Picture& picture) {
picture.picture_buffer_id()));
pending_textures_.insert(*texture_it);
if (video_frame_validator_) {
- auto video_frame = texture_it->second->CreateVideoFrame(visible_rect);
+ auto video_frame = texture_it->second->ExportVideoFrame(visible_rect);
ASSERT_NE(video_frame.get(), nullptr);
- video_frame_validator_->EvaluateVideoFrame(std::move(video_frame),
- frame_index_);
+ video_frame_validator_->ProcessVideoFrame(std::move(video_frame),
+ frame_index_);
+ video_frame_validator_->WaitUntilValidated();
frame_index_++;
}
rendering_helper_->ConsumeVideoFrame(config_.window_id,
@@ -927,11 +927,13 @@ class VideoDecodeAcceleratorTest : public ::testing::Test {
TestFilesVector* test_video_files);
void InitializeRenderingHelper(const RenderingHelperParams& helper_params);
- void CreateAndStartDecoder(GLRenderingVDAClient* client,
- ClientStateNotification<ClientState>* note);
+ void CreateAndStartDecoder(
+ GLRenderingVDAClient* client,
+ media::test::ClientStateNotification<ClientState>* note);
// Wait until decode finishes and return the last state.
- ClientState WaitUntilDecodeFinish(ClientStateNotification<ClientState>* note);
+ ClientState WaitUntilDecodeFinish(
+ media::test::ClientStateNotification<ClientState>* note);
void WaitUntilIdle();
void OutputLogFile(const base::FilePath::CharType* log_path,
@@ -946,8 +948,8 @@ class VideoDecodeAcceleratorTest : public ::testing::Test {
static void Delete(T item) {
// |item| is cleared when the scope of this function is left.
}
- using NotesVector =
- std::vector<std::unique_ptr<ClientStateNotification<ClientState>>>;
+ using NotesVector = std::vector<
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>>>;
using ClientsVector = std::vector<std::unique_ptr<GLRenderingVDAClient>>;
NotesVector notes_;
@@ -1074,7 +1076,7 @@ void VideoDecodeAcceleratorTest::InitializeRenderingHelper(
void VideoDecodeAcceleratorTest::CreateAndStartDecoder(
GLRenderingVDAClient* client,
- ClientStateNotification<ClientState>* note) {
+ media::test::ClientStateNotification<ClientState>* note) {
g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&GLRenderingVDAClient::CreateAndStartDecoder,
base::Unretained(client)));
@@ -1082,7 +1084,7 @@ void VideoDecodeAcceleratorTest::CreateAndStartDecoder(
}
ClientState VideoDecodeAcceleratorTest::WaitUntilDecodeFinish(
- ClientStateNotification<ClientState>* note) {
+ media::test::ClientStateNotification<ClientState>* note) {
ClientState state = CS_DESTROYED;
for (int i = 0; i < CS_MAX; i++) {
state = note->Wait();
@@ -1132,7 +1134,7 @@ class VideoDecodeAcceleratorParamTest
// Wait for |note| to report a state and if it's not |expected_state| then
// assert |client| has deleted its decoder.
static void AssertWaitForStateOrDeleted(
- ClientStateNotification<ClientState>* note,
+ media::test::ClientStateNotification<ClientState>* note,
GLRenderingVDAClient* client,
ClientState expected_state) {
// Skip waiting state if decoder of |client| is already deleted.
@@ -1220,8 +1222,8 @@ TEST_P(VideoDecodeAcceleratorParamTest, MAYBE_TestSimpleDecode) {
for (size_t index = 0; index < num_concurrent_decoders; ++index) {
TestVideoFile* video_file =
test_video_files_[index % test_video_files_.size()].get();
- std::unique_ptr<ClientStateNotification<ClientState>> note =
- std::make_unique<ClientStateNotification<ClientState>>();
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>> note =
+ std::make_unique<media::test::ClientStateNotification<ClientState>>();
notes_[index] = std::move(note);
size_t delay_reuse_after_frame_num = std::numeric_limits<size_t>::max();
@@ -1274,7 +1276,7 @@ TEST_P(VideoDecodeAcceleratorParamTest, MAYBE_TestSimpleDecode) {
// Only check performance & correctness later if we play through only once.
bool skip_performance_and_correctness_checks = num_play_throughs > 1;
for (size_t i = 0; i < num_concurrent_decoders; ++i) {
- ClientStateNotification<ClientState>* note = notes_[i].get();
+ media::test::ClientStateNotification<ClientState>* note = notes_[i].get();
ClientState state = note->Wait();
EXPECT_TRUE(delete_decoder_state != CS_DECODER_SET ||
state == CS_DESTROYED);
@@ -1597,7 +1599,8 @@ WRAPPED_INSTANTIATE_TEST_CASE_P(
// Measure the median of the decode time when VDA::Decode is called 30 times per
// second.
TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
- notes_.push_back(std::make_unique<ClientStateNotification<ClientState>>());
+ notes_.push_back(
+ std::make_unique<media::test::ClientStateNotification<ClientState>>());
const TestVideoFile* video_file = test_video_files_[0].get();
GLRenderingVDAClient::Config config;
@@ -1632,7 +1635,8 @@ TEST_F(VideoDecodeAcceleratorTest, TestDecodeTimeMedian) {
// is not considered as a failure because the input may be unsupported or
// corrupted videos.
TEST_F(VideoDecodeAcceleratorTest, NoCrash) {
- notes_.push_back(std::make_unique<ClientStateNotification<ClientState>>());
+ notes_.push_back(
+ std::make_unique<media::test::ClientStateNotification<ClientState>>());
const TestVideoFile* video_file = test_video_files_[0].get();
GLRenderingVDAClient::Config config;
@@ -1662,7 +1666,8 @@ TEST_F(VideoDecodeAcceleratorTest, DISABLED_GenMD5) {
g_test_import = true;
ASSERT_EQ(test_video_files_.size(), 1u);
- notes_.push_back(std::make_unique<ClientStateNotification<ClientState>>());
+ notes_.push_back(
+ std::make_unique<media::test::ClientStateNotification<ClientState>>());
const TestVideoFile* video_file = test_video_files_[0].get();
GLRenderingVDAClient::Config config;
config.frame_size = gfx::Size(video_file->width, video_file->height);
@@ -1696,16 +1701,21 @@ class VDATestSuite : public base::TestSuite {
public:
VDATestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {}
- int Run() {
+ private:
+ void Initialize() override {
+ base::TestSuite::Initialize();
+
#if defined(OS_WIN) || defined(OS_CHROMEOS)
// For windows the decoding thread initializes the media foundation decoder
// which uses COM. We need the thread to be a UI thread.
// On Ozone, the backend initializes the event system using a UI
// thread.
- base::test::ScopedTaskEnvironment scoped_task_environment(
- base::test::ScopedTaskEnvironment::MainThreadType::UI);
+ scoped_task_environment_ =
+ std::make_unique<base::test::ScopedTaskEnvironment>(
+ base::test::ScopedTaskEnvironment::MainThreadType::UI);
#else
- base::test::ScopedTaskEnvironment scoped_task_environment;
+ scoped_task_environment_ =
+ std::make_unique<base::test::ScopedTaskEnvironment>();
#endif // OS_WIN || OS_CHROMEOS
media::g_env =
@@ -1725,8 +1735,14 @@ class VDATestSuite : public base::TestSuite {
#elif defined(OS_WIN)
media::DXVAVideoDecodeAccelerator::PreSandboxInitialization();
#endif
- return base::TestSuite::Run();
}
+
+ void Shutdown() override {
+ scoped_task_environment_.reset();
+ base::TestSuite::Shutdown();
+ }
+
+ std::unique_ptr<base::test::ScopedTaskEnvironment> scoped_task_environment_;
};
} // namespace
diff --git a/chromium/media/gpu/video_encode_accelerator_unittest.cc b/chromium/media/gpu/video_encode_accelerator_unittest.cc
index 1dc8cd1fe55..5ef5169184e 100644
--- a/chromium/media/gpu/video_encode_accelerator_unittest.cc
+++ b/chromium/media/gpu/video_encode_accelerator_unittest.cc
@@ -42,7 +42,6 @@
#include "media/base/bitstream_buffer.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
-#include "media/base/media_log.h"
#include "media/base/media_util.h"
#include "media/base/test_data_util.h"
#include "media/base/video_decoder.h"
@@ -500,7 +499,8 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
bool run_at_fps,
bool needs_encode_latency,
bool verify_all_output)
- : test_stream_data_(std::move(data)),
+ : rendering_thread_("GLRenderingVEAClientThread"),
+ test_stream_data_(std::move(data)),
log_path_(log_path),
frame_stats_path_(frame_stats_path),
run_at_fps_(run_at_fps),
@@ -515,21 +515,44 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
}
ParseAndReadTestStreamData(*test_stream_data_, &test_streams_);
- if (g_native_input) {
#if defined(USE_OZONE)
- // If |g_native_input| is true, Ozone needs to be initialized so that
- // DmaBufs is able to be created through Ozone DRM.
- ui::OzonePlatform::InitParams params;
- params.single_process = false;
- ui::OzonePlatform::InitializeForUI(params);
+ // Initialize Ozone so that DMABuf can be created through Ozone DRM.
+ ui::OzonePlatform::InitParams params;
+ params.single_process = false;
+ ui::OzonePlatform::InitializeForUI(params);
+
+ base::Thread::Options options;
+ options.message_loop_type = base::MessageLoop::TYPE_UI;
+ ASSERT_TRUE(rendering_thread_.StartWithOptions(options));
+ base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ rendering_thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&VideoEncodeAcceleratorTestEnvironment::SetupOzone,
+ &done));
+ done.Wait();
+
+ // To create dmabuf through gbm, Ozone needs to be set up.
+ gpu_helper.reset(new ui::OzoneGpuTestHelper());
+ gpu_helper->Initialize(base::ThreadTaskRunnerHandle::Get());
+
+#else
+ ASSERT_TRUE(rendering_thread_.Start());
#endif
- }
}
virtual void TearDown() {
log_file_.reset();
+
+ rendering_thread_.Stop();
}
+ scoped_refptr<base::SingleThreadTaskRunner> GetRenderingTaskRunner() const {
+ return rendering_thread_.task_runner();
+ }
+
+ void FlushRenderingThread() { rendering_thread_.FlushForTesting(); }
+
// Log one entry of machine-readable data to file and LOG(INFO).
// The log has one data entry per line in the format of "<key>: <value>".
// Note that Chrome OS video_VEAPerf autotest parses the output key and value
@@ -560,6 +583,7 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
std::vector<std::unique_ptr<TestStream>> test_streams_;
private:
+ base::Thread rendering_thread_;
std::unique_ptr<base::FilePath::StringType> test_stream_data_;
base::FilePath log_path_;
base::FilePath frame_stats_path_;
@@ -567,6 +591,20 @@ class VideoEncodeAcceleratorTestEnvironment : public ::testing::Environment {
bool run_at_fps_;
bool needs_encode_latency_;
bool verify_all_output_;
+
+#if defined(USE_OZONE)
+ std::unique_ptr<ui::OzoneGpuTestHelper> gpu_helper;
+
+ static void SetupOzone(base::WaitableEvent* done) {
+ base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
+ cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationEGLName);
+ ui::OzonePlatform::InitParams params;
+ params.single_process = true;
+ ui::OzonePlatform::InitializeForGPU(params);
+ ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
+ done->Signal();
+ }
+#endif
};
enum ClientState {
@@ -809,7 +847,7 @@ class VideoFrameQualityValidator
FrameStats CompareFrames(const VideoFrame& original_frame,
const VideoFrame& output_frame);
- MediaLog media_log_;
+ NullMediaLog media_log_;
const VideoCodecProfile profile_;
const VideoPixelFormat pixel_format_;
const bool verify_quality_;
@@ -1185,7 +1223,8 @@ class VEAClientBase : public VideoEncodeAccelerator::Client {
}
protected:
- VEAClientBase(ClientStateNotification<ClientState>* note)
+ explicit VEAClientBase(
+ media::test::ClientStateNotification<ClientState>* note)
: note_(note), next_output_buffer_id_(0) {}
bool has_encoder() { return encoder_.get(); }
@@ -1196,7 +1235,7 @@ class VEAClientBase : public VideoEncodeAccelerator::Client {
// Used to notify another thread about the state. VEAClientBase does not own
// this.
- ClientStateNotification<ClientState>* note_;
+ media::test::ClientStateNotification<ClientState>* note_;
// All methods of this class should be run on the same thread.
base::ThreadChecker thread_checker_;
@@ -1208,7 +1247,7 @@ class VEAClientBase : public VideoEncodeAccelerator::Client {
class VEAClient : public VEAClientBase {
public:
VEAClient(TestStream* test_stream,
- ClientStateNotification<ClientState>* note,
+ media::test::ClientStateNotification<ClientState>* note,
bool save_to_file,
unsigned int keyframe_period,
bool force_bitrate,
@@ -1418,7 +1457,7 @@ class VEAClient : public VEAClientBase {
};
VEAClient::VEAClient(TestStream* test_stream,
- ClientStateNotification<ClientState>* note,
+ media::test::ClientStateNotification<ClientState>* note,
bool save_to_file,
unsigned int keyframe_period,
bool force_bitrate,
@@ -2121,7 +2160,7 @@ class SimpleVEAClientBase : public VEAClientBase {
size_t output_buffer_size) override;
protected:
- SimpleVEAClientBase(ClientStateNotification<ClientState>* note,
+ SimpleVEAClientBase(media::test::ClientStateNotification<ClientState>* note,
const int width,
const int height);
@@ -2137,7 +2176,7 @@ class SimpleVEAClientBase : public VEAClientBase {
};
SimpleVEAClientBase::SimpleVEAClientBase(
- ClientStateNotification<ClientState>* note,
+ media::test::ClientStateNotification<ClientState>* note,
const int width,
const int height)
: VEAClientBase(note),
@@ -2214,7 +2253,8 @@ void SimpleVEAClientBase::FeedEncoderWithOutput(base::SharedMemory* shm,
// frame before getting any input.
class VEANoInputClient : public SimpleVEAClientBase {
public:
- explicit VEANoInputClient(ClientStateNotification<ClientState>* note);
+ explicit VEANoInputClient(
+ media::test::ClientStateNotification<ClientState>* note);
void DestroyEncoder();
// VideoDecodeAccelerator::Client implementation.
@@ -2231,7 +2271,8 @@ class VEANoInputClient : public SimpleVEAClientBase {
std::unique_ptr<base::OneShotTimer> timer_;
};
-VEANoInputClient::VEANoInputClient(ClientStateNotification<ClientState>* note)
+VEANoInputClient::VEANoInputClient(
+ media::test::ClientStateNotification<ClientState>* note)
: SimpleVEAClientBase(note, 320, 240) {}
void VEANoInputClient::DestroyEncoder() {
@@ -2269,7 +2310,7 @@ void VEANoInputClient::BitstreamBufferReady(
class VEACacheLineUnalignedInputClient : public SimpleVEAClientBase {
public:
explicit VEACacheLineUnalignedInputClient(
- ClientStateNotification<ClientState>* note);
+ media::test::ClientStateNotification<ClientState>* note);
// VideoDecodeAccelerator::Client implementation.
void RequireBitstreamBuffers(unsigned int input_count,
@@ -2285,7 +2326,7 @@ class VEACacheLineUnalignedInputClient : public SimpleVEAClientBase {
};
VEACacheLineUnalignedInputClient::VEACacheLineUnalignedInputClient(
- ClientStateNotification<ClientState>* note)
+ media::test::ClientStateNotification<ClientState>* note)
: SimpleVEAClientBase(note, 368, 368) {
} // 368 is divisible by 16 but not 32
@@ -2340,38 +2381,6 @@ void VEACacheLineUnalignedInputClient::FeedEncoderWithOneInput(
encoder_->Encode(video_frame, false);
}
-#if defined(USE_OZONE)
-void SetupOzone(base::WaitableEvent* done) {
- base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- cmd_line->AppendSwitchASCII(switches::kUseGL, gl::kGLImplementationEGLName);
- ui::OzonePlatform::InitParams params;
- params.single_process = true;
- ui::OzonePlatform::InitializeForGPU(params);
- ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
- done->Signal();
-}
-#endif
-
-void StartVEAThread(base::Thread* vea_client_thread) {
- if (g_native_input) {
-#if defined(USE_OZONE)
- // If |g_native_input_| is true, we create DmaBufs through Ozone DRM on
- // Chrome OS. For initializing Ozone DRM, some additional setups are
- // required. Otherwise, a thread should be started with a default settings.
- base::Thread::Options options;
- options.message_loop_type = base::MessageLoop::TYPE_UI;
- ASSERT_TRUE(vea_client_thread->StartWithOptions(options));
- base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- vea_client_thread->task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&SetupOzone, &done));
- done.Wait();
-#endif
- } else {
- ASSERT_TRUE(vea_client_thread->Start());
- }
-}
-
// Test parameters:
// - Number of concurrent encoders. The value takes effect when there is only
// one input stream; otherwise, one encoder per input stream will be
@@ -2401,19 +2410,10 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
std::get<7>(GetParam()) || g_env->verify_all_output();
const bool verify_output_timestamp = std::get<8>(GetParam());
- std::vector<std::unique_ptr<ClientStateNotification<ClientState>>> notes;
+ std::vector<
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>>>
+ notes;
std::vector<std::unique_ptr<VEAClient>> clients;
- base::Thread vea_client_thread("EncoderClientThread");
- StartVEAThread(&vea_client_thread);
-
-#if defined(USE_OZONE)
- std::unique_ptr<ui::OzoneGpuTestHelper> gpu_helper;
- if (g_native_input) {
- // To create dmabuf through gbm, Ozone needs to be set up.
- gpu_helper.reset(new ui::OzoneGpuTestHelper());
- gpu_helper->Initialize(base::ThreadTaskRunnerHandle::Get());
- }
-#endif
if (g_env->test_streams_.size() > 1)
num_concurrent_encoders = g_env->test_streams_.size();
@@ -2426,14 +2426,15 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
(save_to_file &&
!g_env->test_streams_[test_stream_index]->out_filename.empty());
- notes.push_back(std::make_unique<ClientStateNotification<ClientState>>());
+ notes.push_back(
+ std::make_unique<media::test::ClientStateNotification<ClientState>>());
clients.push_back(std::make_unique<VEAClient>(
g_env->test_streams_[test_stream_index].get(), notes.back().get(),
encoder_save_to_file, keyframe_period, force_bitrate, test_perf,
mid_stream_bitrate_switch, mid_stream_framerate_switch, verify_output,
verify_output_timestamp));
- vea_client_thread.task_runner()->PostTask(
+ g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&VEAClient::CreateEncoder,
base::Unretained(clients.back().get())));
}
@@ -2460,13 +2461,12 @@ TEST_P(VideoEncodeAcceleratorTest, TestSimpleEncode) {
}
for (size_t i = 0; i < num_concurrent_encoders; ++i) {
- vea_client_thread.task_runner()->PostTask(
+ g_env->GetRenderingTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&VEAClient::DestroyEncoder,
base::Unretained(clients[i].get())));
}
- // This ensures all tasks have finished.
- vea_client_thread.Stop();
+ g_env->FlushRenderingThread();
}
// Test parameters:
@@ -2478,8 +2478,8 @@ class VideoEncodeAcceleratorSimpleTest : public ::testing::TestWithParam<int> {
template <class TestClient>
void SimpleTestFunc() {
- std::unique_ptr<ClientStateNotification<ClientState>> note(
- new ClientStateNotification<ClientState>());
+ std::unique_ptr<media::test::ClientStateNotification<ClientState>> note(
+ new media::test::ClientStateNotification<ClientState>());
std::unique_ptr<TestClient> client(new TestClient(note.get()));
base::Thread vea_client_thread("EncoderClientThread");
ASSERT_TRUE(vea_client_thread.Start());
@@ -2636,12 +2636,17 @@ class VEATestSuite : public base::TestSuite {
public:
VEATestSuite(int argc, char** argv) : base::TestSuite(argc, argv) {}
- int Run() {
+ private:
+ void Initialize() override {
+ base::TestSuite::Initialize();
+
#if defined(OS_CHROMEOS)
- base::test::ScopedTaskEnvironment scoped_task_environment(
- base::test::ScopedTaskEnvironment::MainThreadType::UI);
+ scoped_task_environment_ =
+ std::make_unique<base::test::ScopedTaskEnvironment>(
+ base::test::ScopedTaskEnvironment::MainThreadType::UI);
#else
- base::test::ScopedTaskEnvironment scoped_task_environment;
+ scoped_task_environment_ =
+ std::make_unique<base::test::ScopedTaskEnvironment>();
#endif
media::g_env =
reinterpret_cast<media::VideoEncodeAcceleratorTestEnvironment*>(
@@ -2657,8 +2662,15 @@ class VEATestSuite : public base::TestSuite {
#elif defined(OS_WIN)
media::MediaFoundationVideoEncodeAccelerator::PreSandboxInitialization();
#endif
- return base::TestSuite::Run();
}
+
+ void Shutdown() override {
+ scoped_task_environment_.reset();
+ base::TestSuite::Shutdown();
+ }
+
+ private:
+ std::unique_ptr<base::test::ScopedTaskEnvironment> scoped_task_environment_;
};
} // namespace
diff --git a/chromium/media/gpu/vp8_decoder.cc b/chromium/media/gpu/vp8_decoder.cc
index 33a181d0d4e..c86f7182560 100644
--- a/chromium/media/gpu/vp8_decoder.cc
+++ b/chromium/media/gpu/vp8_decoder.cc
@@ -7,6 +7,10 @@
namespace media {
+namespace {
+constexpr size_t kVP8NumFramesActive = 4;
+};
+
VP8Decoder::VP8Accelerator::VP8Accelerator() {}
VP8Decoder::VP8Accelerator::~VP8Accelerator() {}
@@ -165,9 +169,14 @@ gfx::Size VP8Decoder::GetPicSize() const {
}
size_t VP8Decoder::GetRequiredNumOfPictures() const {
- const size_t kVP8NumFramesActive = 4;
- const size_t kPicsInPipeline = limits::kMaxVideoFrames + 2;
+ constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
return kVP8NumFramesActive + kPicsInPipeline;
}
+size_t VP8Decoder::GetNumReferenceFrames() const {
+ // Maximum number of reference frames needed plus one for the one being
+ // currently egressed.
+ return kVP8NumFramesActive + 1;
+}
+
} // namespace media
diff --git a/chromium/media/gpu/vp8_decoder.h b/chromium/media/gpu/vp8_decoder.h
index f08a8126cf1..8ed398b89f6 100644
--- a/chromium/media/gpu/vp8_decoder.h
+++ b/chromium/media/gpu/vp8_decoder.h
@@ -72,6 +72,7 @@ class MEDIA_GPU_EXPORT VP8Decoder : public AcceleratedVideoDecoder {
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
size_t GetRequiredNumOfPictures() const override;
+ size_t GetNumReferenceFrames() const override;
private:
bool DecodeAndOutputCurrentFrame(scoped_refptr<VP8Picture> pic);
diff --git a/chromium/media/gpu/vp9_decoder.cc b/chromium/media/gpu/vp9_decoder.cc
index a16c6a9ea30..a8d2f2ac2b6 100644
--- a/chromium/media/gpu/vp9_decoder.cc
+++ b/chromium/media/gpu/vp9_decoder.cc
@@ -261,9 +261,14 @@ gfx::Size VP9Decoder::GetPicSize() const {
}
size_t VP9Decoder::GetRequiredNumOfPictures() const {
- // kMaxVideoFrames to keep higher level media pipeline populated, +2 for the
- // pictures being parsed and decoded currently.
- return limits::kMaxVideoFrames + kVp9NumRefFrames + 2;
+ constexpr size_t kPicsInPipeline = limits::kMaxVideoFrames + 1;
+ return kPicsInPipeline + GetNumReferenceFrames();
+}
+
+size_t VP9Decoder::GetNumReferenceFrames() const {
+ // Maximum number of reference frames needed plus one for the one being
+ // currently egressed.
+ return kVp9NumRefFrames + 1;
}
} // namespace media
diff --git a/chromium/media/gpu/vp9_decoder.h b/chromium/media/gpu/vp9_decoder.h
index 3bf200618d8..b4422f0031e 100644
--- a/chromium/media/gpu/vp9_decoder.h
+++ b/chromium/media/gpu/vp9_decoder.h
@@ -106,6 +106,7 @@ class MEDIA_GPU_EXPORT VP9Decoder : public AcceleratedVideoDecoder {
DecodeResult Decode() override WARN_UNUSED_RESULT;
gfx::Size GetPicSize() const override;
size_t GetRequiredNumOfPictures() const override;
+ size_t GetNumReferenceFrames() const override;
private:
// Update ref_frames_ based on the information in current frame header.
diff --git a/chromium/media/gpu/vt_video_decode_accelerator_mac.cc b/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
index 95f17030ffc..e09762f19c9 100644
--- a/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
+++ b/chromium/media/gpu/vt_video_decode_accelerator_mac.cc
@@ -17,7 +17,6 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "base/macros.h"
#include "base/metrics/histogram_macros.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
@@ -210,8 +209,8 @@ bool InitializeVideoToolboxInternal() {
0x3d, 0xa1, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00,
0x00, 0x03, 0x00, 0x30, 0x8f, 0x16, 0x2d, 0x9a};
const uint8_t pps_normal[] = {0x68, 0xe9, 0x7b, 0xcb};
- if (!CreateVideoToolboxSession(sps_normal, arraysize(sps_normal), pps_normal,
- arraysize(pps_normal), true)) {
+ if (!CreateVideoToolboxSession(sps_normal, base::size(sps_normal), pps_normal,
+ base::size(pps_normal), true)) {
DVLOG(1) << "Hardware decoding with VideoToolbox is not supported";
return false;
}
@@ -222,8 +221,8 @@ bool InitializeVideoToolboxInternal() {
0x22, 0x10, 0x00, 0x00, 0x3e, 0x90, 0x00, 0x0e,
0xa6, 0x08, 0xf1, 0x22, 0x59, 0xa0};
const uint8_t pps_small[] = {0x68, 0xe9, 0x79, 0x72, 0xc0};
- if (!CreateVideoToolboxSession(sps_small, arraysize(sps_small), pps_small,
- arraysize(pps_small), false)) {
+ if (!CreateVideoToolboxSession(sps_small, base::size(sps_small), pps_small,
+ base::size(pps_small), false)) {
DLOG(WARNING) << "Software decoding with VideoToolbox is not supported";
return false;
}
@@ -314,7 +313,7 @@ gfx::ColorSpace GetImageBufferColorSpace(CVImageBufferRef image_buffer) {
},
};
if (!GetImageBufferProperty(image_buffer, kCVImageBufferColorPrimariesKey,
- primaries, arraysize(primaries), &primary_id)) {
+ primaries, base::size(primaries), &primary_id)) {
DLOG(ERROR) << "Filed to find CVImageBufferRef primaries.";
}
@@ -339,7 +338,7 @@ gfx::ColorSpace GetImageBufferColorSpace(CVImageBufferRef image_buffer) {
},
};
if (!GetImageBufferProperty(image_buffer, kCVImageBufferTransferFunctionKey,
- transfers, arraysize(transfers), &transfer_id)) {
+ transfers, base::size(transfers), &transfer_id)) {
DLOG(ERROR) << "Filed to find CVImageBufferRef transfer.";
}
@@ -382,7 +381,7 @@ gfx::ColorSpace GetImageBufferColorSpace(CVImageBufferRef image_buffer) {
gfx::ColorSpace::MatrixID::SMPTE240M,
}};
if (!GetImageBufferProperty(image_buffer, kCVImageBufferYCbCrMatrixKey,
- matrices, arraysize(matrices), &matrix_id)) {
+ matrices, base::size(matrices), &matrix_id)) {
DLOG(ERROR) << "Filed to find CVImageBufferRef YUV matrix.";
}
@@ -408,7 +407,7 @@ bool InitializeVideoToolbox() {
VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {}
-VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
+VTVideoDecodeAccelerator::Task::Task(Task&& other) = default;
VTVideoDecodeAccelerator::Task::~Task() {}
@@ -426,8 +425,8 @@ VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id,
VTVideoDecodeAccelerator::PictureInfo::~PictureInfo() {}
bool VTVideoDecodeAccelerator::FrameOrder::operator()(
- const linked_ptr<Frame>& lhs,
- const linked_ptr<Frame>& rhs) const {
+ const std::unique_ptr<Frame>& lhs,
+ const std::unique_ptr<Frame>& rhs) const {
// TODO(sandersd): When it is provided, use the bitstream timestamp.
if (lhs->pic_order_cnt != rhs->pic_order_cnt)
return lhs->pic_order_cnt > rhs->pic_order_cnt;
@@ -1037,9 +1036,9 @@ void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
}
Task task(TASK_FRAME);
- task.frame = pending_frames_[bitstream_id];
+ task.frame = std::move(pending_frames_[bitstream_id]);
pending_frames_.erase(bitstream_id);
- task_queue_.push(task);
+ task_queue_.push(std::move(task));
ProcessWorkQueues();
}
@@ -1092,7 +1091,7 @@ void VTVideoDecodeAccelerator::Decode(scoped_refptr<DecoderBuffer> buffer,
assigned_bitstream_ids_.insert(bitstream_id);
Frame* frame = new Frame(bitstream_id);
- pending_frames_[bitstream_id] = make_linked_ptr(frame);
+ pending_frames_[bitstream_id] = base::WrapUnique(frame);
decoder_thread_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&VTVideoDecodeAccelerator::DecodeTask,
@@ -1186,7 +1185,7 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
if (task_queue_.empty())
return false;
- const Task& task = task_queue_.front();
+ Task& task = task_queue_.front();
switch (task.type) {
case TASK_FRAME: {
bool reorder_queue_has_space =
@@ -1199,7 +1198,7 @@ bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
DVLOG(2) << "Decode(" << task.frame->bitstream_id << ") complete";
assigned_bitstream_ids_.erase(task.frame->bitstream_id);
client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id);
- reorder_queue_.push(task.frame);
+ reorder_queue_.push(std::move(task.frame));
task_queue_.pop();
return true;
}
@@ -1243,7 +1242,7 @@ bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
if (reorder_queue_.empty())
return false;
- // If the next task is a flush (because there is a pending flush or becuase
+ // If the next task is a flush (because there is a pending flush or because
// the next frame is an IDR), then we don't need a full reorder buffer to send
// the next frame.
bool flushing =
diff --git a/chromium/media/gpu/vt_video_decode_accelerator_mac.h b/chromium/media/gpu/vt_video_decode_accelerator_mac.h
index aa3953b6509..3f01a8b0d75 100644
--- a/chromium/media/gpu/vt_video_decode_accelerator_mac.h
+++ b/chromium/media/gpu/vt_video_decode_accelerator_mac.h
@@ -15,7 +15,6 @@
#include "base/containers/queue.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
@@ -121,11 +120,11 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
struct Task {
Task(TaskType type);
- Task(const Task& other);
+ Task(Task&& other);
~Task();
TaskType type;
- linked_ptr<Frame> frame;
+ std::unique_ptr<Frame> frame;
};
struct PictureInfo {
@@ -145,8 +144,8 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
};
struct FrameOrder {
- bool operator()(const linked_ptr<Frame>& lhs,
- const linked_ptr<Frame>& rhs) const;
+ bool operator()(const std::unique_ptr<Frame>& lhs,
+ const std::unique_ptr<Frame>& rhs) const;
};
//
@@ -210,8 +209,8 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
base::queue<Task> task_queue_;
// Queue of decoded frames in presentation order.
- std::priority_queue<linked_ptr<Frame>,
- std::vector<linked_ptr<Frame>>,
+ std::priority_queue<std::unique_ptr<Frame>,
+ std::vector<std::unique_ptr<Frame>>,
FrameOrder>
reorder_queue_;
@@ -220,7 +219,7 @@ class VTVideoDecodeAccelerator : public VideoDecodeAccelerator,
// Frames that have not yet been decoded, keyed by bitstream ID; maintains
// ownership of Frame objects while they flow through VideoToolbox.
- std::map<int32_t, linked_ptr<Frame>> pending_frames_;
+ std::map<int32_t, std::unique_ptr<Frame>> pending_frames_;
// Set of assigned bitstream IDs, so that Destroy() can release them all.
std::set<int32_t> assigned_bitstream_ids_;
diff --git a/chromium/media/gpu/windows/d3d11_cdm_proxy.cc b/chromium/media/gpu/windows/d3d11_cdm_proxy.cc
index dbfd86109ae..b188fc5478e 100644
--- a/chromium/media/gpu/windows/d3d11_cdm_proxy.cc
+++ b/chromium/media/gpu/windows/d3d11_cdm_proxy.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_observer.h"
+#include "base/stl_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/win/object_watcher.h"
#include "media/base/callback_registry.h"
@@ -203,24 +204,27 @@ class D3D11CdmContext : public CdmContext {
CdmProxy::KeyType key_type,
const std::vector<uint8_t>& key_blob) {
cdm_proxy_context_.SetKey(crypto_session, key_id, key_type, key_blob);
- new_key_callbacks_.Notify();
+ event_callbacks_.Notify(Event::kHasAdditionalUsableKey);
}
void RemoveKey(ID3D11CryptoSession* crypto_session,
const std::vector<uint8_t>& key_id) {
cdm_proxy_context_.RemoveKey(crypto_session, key_id);
}
- // Removes all keys from the context.
- void RemoveAllKeys() { cdm_proxy_context_.RemoveAllKeys(); }
+ // Notifies of hardware reset.
+ void OnHardwareReset() {
+ cdm_proxy_context_.RemoveAllKeys();
+ event_callbacks_.Notify(Event::kHardwareContextLost);
+ }
base::WeakPtr<D3D11CdmContext> GetWeakPtr() {
return weak_factory_.GetWeakPtr();
}
// CdmContext implementation.
- std::unique_ptr<CallbackRegistration> RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) override {
- return new_key_callbacks_.Register(std::move(new_key_cb));
+ std::unique_ptr<CallbackRegistration> RegisterEventCB(
+ EventCB event_cb) override {
+ return event_callbacks_.Register(std::move(event_cb));
}
CdmProxyContext* GetCdmProxyContext() override { return &cdm_proxy_context_; }
@@ -236,7 +240,7 @@ class D3D11CdmContext : public CdmContext {
std::unique_ptr<D3D11Decryptor> decryptor_;
- ClosureRegistry new_key_callbacks_;
+ CallbackRegistry<EventCB::RunType> event_callbacks_;
base::WeakPtrFactory<D3D11CdmContext> weak_factory_;
@@ -260,11 +264,14 @@ base::WeakPtr<CdmContext> D3D11CdmProxy::GetCdmContext() {
}
void D3D11CdmProxy::Initialize(Client* client, InitializeCB init_cb) {
+ DCHECK(client);
+
auto failed = [this, &init_cb]() {
// The value doesn't matter as it shouldn't be used on a failure.
const uint32_t kFailedCryptoSessionId = 0xFF;
std::move(init_cb).Run(Status::kFail, protocol_, kFailedCryptoSessionId);
};
+
if (initialized_) {
failed();
NOTREACHED() << "CdmProxy should not be initialized more than once.";
@@ -279,7 +286,7 @@ void D3D11CdmProxy::Initialize(Client* client, InitializeCB init_cb) {
nullptr, // No adapter.
D3D_DRIVER_TYPE_HARDWARE, nullptr, // No software rasterizer.
0, // flags, none.
- feature_levels, arraysize(feature_levels), D3D11_SDK_VERSION,
+ feature_levels, base::size(feature_levels), D3D11_SDK_VERSION,
device_.GetAddressOf(), nullptr, device_context_.GetAddressOf());
if (FAILED(hresult)) {
DLOG(ERROR) << "Failed to create the D3D11Device:" << hresult;
@@ -288,7 +295,7 @@ void D3D11CdmProxy::Initialize(Client* client, InitializeCB init_cb) {
}
// TODO(rkuroiwa): This should be registered iff
- // D3D11_CONTENT_PROTECTION_CAPS_HARDWARE_TEARDOWN is set in the capabilties.
+ // D3D11_CONTENT_PROTECTION_CAPS_HARDWARE_TEARDOWN is set in the capabilities.
hardware_event_watcher_ = HardwareEventWatcher::Create(
device_, base::BindRepeating(
&D3D11CdmProxy::NotifyHardwareContentProtectionTeardown,
@@ -308,7 +315,7 @@ void D3D11CdmProxy::Initialize(Client* client, InitializeCB init_cb) {
}
if (!CanDoHardwareProtectedKeyExchange(video_device_, crypto_type_)) {
- DLOG(ERROR) << "Cannot do hardware proteted key exhange.";
+ DLOG(ERROR) << "Cannot do hardware protected key exchange.";
failed();
return;
}
@@ -534,9 +541,8 @@ void D3D11CdmProxy::SetCreateDeviceCallbackForTesting(
}
void D3D11CdmProxy::NotifyHardwareContentProtectionTeardown() {
- cdm_context_->RemoveAllKeys();
- if (client_)
- client_->NotifyHardwareReset();
+ cdm_context_->OnHardwareReset();
+ client_->NotifyHardwareReset();
}
D3D11CdmProxy::HardwareEventWatcher::~HardwareEventWatcher() {
diff --git a/chromium/media/gpu/windows/d3d11_cdm_proxy.h b/chromium/media/gpu/windows/d3d11_cdm_proxy.h
index 0651f03953b..1fb77103f95 100644
--- a/chromium/media/gpu/windows/d3d11_cdm_proxy.h
+++ b/chromium/media/gpu/windows/d3d11_cdm_proxy.h
@@ -16,8 +16,8 @@
#include "base/callback.h"
#include "base/memory/weak_ptr.h"
+#include "media/base/win/d3d11_create_device_cb.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/windows/d3d11_create_device_cb.h"
namespace media {
diff --git a/chromium/media/gpu/windows/d3d11_cdm_proxy_unittest.cc b/chromium/media/gpu/windows/d3d11_cdm_proxy_unittest.cc
index cbb6e2f8a6a..ff24d6b98ec 100644
--- a/chromium/media/gpu/windows/d3d11_cdm_proxy_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_cdm_proxy_unittest.cc
@@ -12,18 +12,25 @@
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_source.h"
#include "base/run_loop.h"
+#include "base/test/mock_callback.h"
#include "base/test/scoped_task_environment.h"
+#include "media/base/callback_registry.h"
+#include "media/base/win/d3d11_mocks.h"
#include "media/cdm/cdm_proxy_context.h"
-#include "media/gpu/windows/d3d11_mocks.h"
#include "testing/gtest/include/gtest/gtest.h"
+using Microsoft::WRL::ComPtr;
+
using ::testing::_;
using ::testing::AllOf;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::Invoke;
+using ::testing::InvokeWithoutArgs;
using ::testing::Lt;
+using ::testing::Mock;
using ::testing::Ne;
+using ::testing::NiceMock;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::SaveArg;
@@ -84,7 +91,10 @@ class D3D11CdmProxyTest : public ::testing::Test {
std::map<CdmProxy::Function, uint32_t> function_id_map;
function_id_map[kTestFunction] = kTestFunctionId;
- auto mock_power_monitor_source = std::make_unique<MockPowerMonitorSource>();
+ // Use NiceMock because we don't care about base::PowerMonitorSource events
+ // other than calling Suspend() directly.
+ auto mock_power_monitor_source =
+ std::make_unique<NiceMock<MockPowerMonitorSource>>();
mock_power_monitor_source_ = mock_power_monitor_source.get();
power_monitor_ = std::make_unique<base::PowerMonitor>(
std::move(mock_power_monitor_source));
@@ -100,7 +110,7 @@ class D3D11CdmProxyTest : public ::testing::Test {
video_context_mock_ = CreateD3D11Mock<D3D11VideoContextMock>();
video_context1_mock_ = CreateD3D11Mock<D3D11VideoContext1Mock>();
dxgi_device_ = CreateD3D11Mock<DXGIDevice2Mock>();
- dxgi_adapter_ = CreateD3D11Mock<DXGIAdapter3Mock>();
+ dxgi_adapter_ = CreateD3D11Mock<NiceMock<DXGIAdapter3Mock>>();
// These flags are a reasonable subset of flags to get HARDWARE protected
// playback.
@@ -132,62 +142,49 @@ class D3D11CdmProxyTest : public ::testing::Test {
ON_CALL(create_device_mock_,
Create(_, D3D_DRIVER_TYPE_HARDWARE, _, _, _, _, _, _, _, _))
.WillByDefault(
- DoAll(AddRefAndSetArgPointee<7>(device_mock_.Get()),
- AddRefAndSetArgPointee<9>(device_context_mock_.Get()),
- Return(S_OK)));
+ DoAll(SetComPointee<7>(device_mock_.Get()),
+ SetComPointeeAndReturnOk<9>(device_context_mock_.Get())));
- ON_CALL(*device_mock_.Get(), QueryInterface(IID_ID3D11VideoDevice, _))
- .WillByDefault(DoAll(
- AddRefAndSetArgPointee<1>(video_device_mock_.Get()), Return(S_OK)));
+ COM_ON_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(video_device_mock_.Get()));
- ON_CALL(*device_mock_.Get(), QueryInterface(IID_ID3D11VideoDevice1, _))
- .WillByDefault(
- DoAll(AddRefAndSetArgPointee<1>(video_device1_mock_.Get()),
- Return(S_OK)));
+ COM_ON_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice1, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(video_device1_mock_.Get()));
- ON_CALL(*device_mock_.Get(), QueryInterface(IID_IDXGIDevice2, _))
- .WillByDefault(
- DoAll(AddRefAndSetArgPointee<1>(dxgi_device_.Get()), Return(S_OK)));
+ COM_ON_CALL(device_mock_, QueryInterface(IID_IDXGIDevice2, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(dxgi_device_.Get()));
- ON_CALL(*dxgi_device_.Get(), GetParent(IID_IDXGIAdapter3, _))
- .WillByDefault(DoAll(AddRefAndSetArgPointee<1>(dxgi_adapter_.Get()),
- Return(S_OK)));
+ COM_ON_CALL(dxgi_device_, GetParent(IID_IDXGIAdapter3, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(dxgi_adapter_.Get()));
- ON_CALL(*dxgi_adapter_.Get(),
- RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
+ COM_ON_CALL(dxgi_adapter_,
+ RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
.WillByDefault(DoAll(SaveArg<0>(&teardown_event_), Return(S_OK)));
- ON_CALL(*device_context_mock_.Get(),
- QueryInterface(IID_ID3D11VideoContext, _))
- .WillByDefault(
- DoAll(AddRefAndSetArgPointee<1>(video_context_mock_.Get()),
- Return(S_OK)));
+ COM_ON_CALL(device_context_mock_, QueryInterface(IID_ID3D11VideoContext, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(video_context_mock_.Get()));
- ON_CALL(*device_context_mock_.Get(),
- QueryInterface(IID_ID3D11VideoContext1, _))
- .WillByDefault(
- DoAll(AddRefAndSetArgPointee<1>(video_context1_mock_.Get()),
- Return(S_OK)));
+ COM_ON_CALL(device_context_mock_,
+ QueryInterface(IID_ID3D11VideoContext1, _))
+ .WillByDefault(SetComPointeeAndReturnOk<1>(video_context1_mock_.Get()));
- ON_CALL(*video_device_mock_.Get(),
- CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
- Pointee(D3D11_KEY_EXCHANGE_HW_PROTECTION), _))
- .WillByDefault(
- DoAll(AddRefAndSetArgPointee<3>(crypto_session_mock_.Get()),
- Return(S_OK)));
+ COM_ON_CALL(
+ video_device_mock_,
+ CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
+ Pointee(D3D11_KEY_EXCHANGE_HW_PROTECTION), _))
+ .WillByDefault(SetComPointeeAndReturnOk<3>(crypto_session_mock_.Get()));
- ON_CALL(
- *video_device1_mock_.Get(),
- GetCryptoSessionPrivateDataSize(Pointee(CRYPTO_TYPE_GUID), _, _, _, _))
+ COM_ON_CALL(video_device1_mock_, GetCryptoSessionPrivateDataSize(
+ Pointee(CRYPTO_TYPE_GUID), _, _, _, _))
.WillByDefault(DoAll(SetArgPointee<3>(kPrivateInputSize),
SetArgPointee<4>(kPrivateOutputSize),
Return(S_OK)));
- ON_CALL(*video_device_mock_.Get(), GetContentProtectionCaps(_, _, _))
+ COM_ON_CALL(video_device_mock_, GetContentProtectionCaps(_, _, _))
.WillByDefault(
DoAll(SetArgPointee<2>(content_protection_caps_), Return(S_OK)));
- ON_CALL(*video_device_mock_.Get(), CheckCryptoKeyExchange(_, _, Lt(1u), _))
+ COM_ON_CALL(video_device_mock_, CheckCryptoKeyExchange(_, _, Lt(1u), _))
.WillByDefault(DoAll(SetArgPointee<3>(D3D11_KEY_EXCHANGE_HW_PROTECTION),
Return(S_OK)));
}
@@ -196,74 +193,49 @@ class D3D11CdmProxyTest : public ::testing::Test {
// access to the mocks later.
void Initialize(CdmProxy::Client* client, CdmProxy::InitializeCB callback) {
EXPECT_CALL(create_device_mock_,
- Create(_, D3D_DRIVER_TYPE_HARDWARE, _, _, _, _, _, _, _, _))
- .WillOnce(DoAll(AddRefAndSetArgPointee<7>(device_mock_.Get()),
- AddRefAndSetArgPointee<9>(device_context_mock_.Get()),
- Return(S_OK)));
-
- EXPECT_CALL(*device_mock_.Get(), QueryInterface(IID_ID3D11VideoDevice, _))
- .Times(AtLeast(1))
- .WillRepeatedly(DoAll(
- AddRefAndSetArgPointee<1>(video_device_mock_.Get()), Return(S_OK)));
-
- EXPECT_CALL(*device_mock_.Get(), QueryInterface(IID_IDXGIDevice2, _))
- .Times(AtLeast(1))
- .WillRepeatedly(
- DoAll(AddRefAndSetArgPointee<1>(dxgi_device_.Get()), Return(S_OK)));
-
- EXPECT_CALL(*dxgi_device_.Get(), GetParent(IID_IDXGIAdapter3, _))
- .Times(AtLeast(1))
- .WillRepeatedly(DoAll(AddRefAndSetArgPointee<1>(dxgi_adapter_.Get()),
- Return(S_OK)));
-
- EXPECT_CALL(*dxgi_adapter_.Get(),
- RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
- .Times(AtLeast(1))
- .WillRepeatedly(DoAll(SaveArg<0>(&teardown_event_), Return(S_OK)));
-
- EXPECT_CALL(*device_mock_.Get(), QueryInterface(IID_ID3D11VideoDevice1, _))
- .Times(AtLeast(1))
- .WillRepeatedly(
- DoAll(AddRefAndSetArgPointee<1>(video_device1_mock_.Get()),
- Return(S_OK)));
-
- EXPECT_CALL(*device_context_mock_.Get(),
- QueryInterface(IID_ID3D11VideoContext, _))
- .Times(AtLeast(1))
- .WillRepeatedly(
- DoAll(AddRefAndSetArgPointee<1>(video_context_mock_.Get()),
- Return(S_OK)));
-
- EXPECT_CALL(*device_context_mock_.Get(),
- QueryInterface(IID_ID3D11VideoContext1, _))
- .Times(AtLeast(1))
- .WillRepeatedly(
- DoAll(AddRefAndSetArgPointee<1>(video_context1_mock_.Get()),
- Return(S_OK)));
-
- EXPECT_CALL(
- *video_device_mock_.Get(),
+ Create(_, D3D_DRIVER_TYPE_HARDWARE, _, _, _, _, _, _, _, _));
+ COM_EXPECT_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(device_mock_, QueryInterface(IID_IDXGIDevice2, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(dxgi_device_, GetParent(IID_IDXGIAdapter3, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(dxgi_adapter_,
+ RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(device_mock_, QueryInterface(IID_ID3D11VideoDevice1, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(device_context_mock_,
+ QueryInterface(IID_ID3D11VideoContext, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(device_context_mock_,
+ QueryInterface(IID_ID3D11VideoContext1, _))
+ .Times(AtLeast(1));
+ COM_EXPECT_CALL(
+ video_device_mock_,
CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
- Pointee(D3D11_KEY_EXCHANGE_HW_PROTECTION), _))
- .WillOnce(DoAll(AddRefAndSetArgPointee<3>(crypto_session_mock_.Get()),
- Return(S_OK)));
+ Pointee(D3D11_KEY_EXCHANGE_HW_PROTECTION), _));
+ COM_EXPECT_CALL(
+ video_device1_mock_,
+ GetCryptoSessionPrivateDataSize(Pointee(CRYPTO_TYPE_GUID), _, _, _, _));
- EXPECT_CALL(
- *video_device1_mock_.Get(),
- GetCryptoSessionPrivateDataSize(Pointee(CRYPTO_TYPE_GUID), _, _, _, _))
- .WillOnce(DoAll(SetArgPointee<3>(kPrivateInputSize),
- SetArgPointee<4>(kPrivateOutputSize), Return(S_OK)));
+ COM_EXPECT_CALL(video_device_mock_, GetContentProtectionCaps(_, _, _));
+
+ COM_EXPECT_CALL(video_device_mock_,
+ CheckCryptoKeyExchange(_, _, Lt(1u), _));
proxy_->Initialize(client, std::move(callback));
- ::testing::Mock::VerifyAndClearExpectations(device_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(video_device_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(video_device1_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(crypto_session_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(device_context_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(video_context_mock_.Get());
- ::testing::Mock::VerifyAndClearExpectations(video_context1_mock_.Get());
+
+ Mock::VerifyAndClearExpectations(device_mock_.Get());
+ Mock::VerifyAndClearExpectations(video_device_mock_.Get());
+ Mock::VerifyAndClearExpectations(video_device1_mock_.Get());
+ Mock::VerifyAndClearExpectations(crypto_session_mock_.Get());
+ Mock::VerifyAndClearExpectations(device_context_mock_.Get());
+ Mock::VerifyAndClearExpectations(video_context_mock_.Get());
+ Mock::VerifyAndClearExpectations(video_context1_mock_.Get());
}
+ MockProxyClient client_;
std::unique_ptr<D3D11CdmProxy> proxy_;
std::unique_ptr<base::PowerMonitor> power_monitor_;
// Owned by power_monitor_. Use this to simulate a power-suspend.
@@ -272,15 +244,15 @@ class D3D11CdmProxyTest : public ::testing::Test {
D3D11CreateDeviceMock create_device_mock_;
CallbackMock callback_mock_;
- Microsoft::WRL::ComPtr<D3D11DeviceMock> device_mock_;
- Microsoft::WRL::ComPtr<D3D11VideoDeviceMock> video_device_mock_;
- Microsoft::WRL::ComPtr<D3D11VideoDevice1Mock> video_device1_mock_;
- Microsoft::WRL::ComPtr<D3D11CryptoSessionMock> crypto_session_mock_;
- Microsoft::WRL::ComPtr<D3D11DeviceContextMock> device_context_mock_;
- Microsoft::WRL::ComPtr<D3D11VideoContextMock> video_context_mock_;
- Microsoft::WRL::ComPtr<D3D11VideoContext1Mock> video_context1_mock_;
- Microsoft::WRL::ComPtr<DXGIDevice2Mock> dxgi_device_;
- Microsoft::WRL::ComPtr<DXGIAdapter3Mock> dxgi_adapter_;
+ ComPtr<D3D11DeviceMock> device_mock_;
+ ComPtr<D3D11VideoDeviceMock> video_device_mock_;
+ ComPtr<D3D11VideoDevice1Mock> video_device1_mock_;
+ ComPtr<D3D11CryptoSessionMock> crypto_session_mock_;
+ ComPtr<D3D11DeviceContextMock> device_context_mock_;
+ ComPtr<D3D11VideoContextMock> video_context_mock_;
+ ComPtr<D3D11VideoContext1Mock> video_context1_mock_;
+ ComPtr<DXGIDevice2Mock> dxgi_device_;
+ ComPtr<NiceMock<DXGIAdapter3Mock>> dxgi_adapter_;
D3D11_VIDEO_CONTENT_PROTECTION_CAPS content_protection_caps_ = {};
@@ -303,7 +275,7 @@ TEST_F(D3D11CdmProxyTest, FailedToCreateDevice) {
.WillOnce(Return(E_FAIL));
EXPECT_CALL(callback_mock_,
InitializeCallback(CdmProxy::Status::kFail, _, _));
- proxy_->Initialize(nullptr,
+ proxy_->Initialize(&client_,
base::BindOnce(&CallbackMock::InitializeCallback,
base::Unretained(&callback_mock_)));
}
@@ -312,39 +284,43 @@ TEST_F(D3D11CdmProxyTest, FailedToCreateDevice) {
TEST_F(D3D11CdmProxyTest, Initialize) {
EXPECT_CALL(callback_mock_, InitializeCallback(CdmProxy::Status::kOk, _, _));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
}
// Hardware content protection teardown is notified to the proxy.
// Verify that the client is notified.
TEST_F(D3D11CdmProxyTest, HardwareContentProtectionTeardown) {
base::RunLoop run_loop;
- MockProxyClient client;
- EXPECT_CALL(client, NotifyHardwareReset()).WillOnce(Invoke([&run_loop]() {
- run_loop.Quit();
- }));
+
+ EXPECT_CALL(client_, NotifyHardwareReset());
+
+ base::MockCallback<CdmContext::EventCB> event_cb;
+ auto callback_registration =
+ proxy_->GetCdmContext()->RegisterEventCB(event_cb.Get());
+ EXPECT_CALL(event_cb, Run(CdmContext::Event::kHardwareContextLost))
+ .WillOnce(InvokeWithoutArgs([&run_loop]() { run_loop.Quit(); }));
EXPECT_CALL(callback_mock_, InitializeCallback(CdmProxy::Status::kOk, _, _));
ASSERT_NO_FATAL_FAILURE(
- Initialize(&client, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
SetEvent(teardown_event_);
run_loop.Run();
}
// Verify that failing to register to hardware content protection teardown
-// status event results in initializaion failure.
+// status event results in initialization failure.
TEST_F(D3D11CdmProxyTest, FailedToRegisterForContentProtectionTeardown) {
EXPECT_CALL(callback_mock_,
InitializeCallback(CdmProxy::Status::kFail, _, _));
- EXPECT_CALL(*dxgi_adapter_.Get(),
- RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
+ COM_EXPECT_CALL(dxgi_adapter_,
+ RegisterHardwareContentProtectionTeardownStatusEvent(_, _))
.Times(AtLeast(1))
.WillRepeatedly(Return(E_FAIL));
- proxy_->Initialize(nullptr,
+ proxy_->Initialize(&client_,
base::BindOnce(&CallbackMock::InitializeCallback,
base::Unretained(&callback_mock_)));
}
@@ -352,15 +328,15 @@ TEST_F(D3D11CdmProxyTest, FailedToRegisterForContentProtectionTeardown) {
// Verify that the client is notified on power suspend.
TEST_F(D3D11CdmProxyTest, PowerSuspend) {
base::RunLoop run_loop;
- MockProxyClient client;
- EXPECT_CALL(client, NotifyHardwareReset()).WillOnce(Invoke([&run_loop]() {
+
+ EXPECT_CALL(client_, NotifyHardwareReset()).WillOnce(Invoke([&run_loop]() {
run_loop.Quit();
}));
EXPECT_CALL(callback_mock_, InitializeCallback(CdmProxy::Status::kOk, _, _));
ASSERT_NO_FATAL_FAILURE(
- Initialize(&client, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
mock_power_monitor_source_->Suspend();
run_loop.Run();
}
@@ -371,7 +347,7 @@ TEST_F(D3D11CdmProxyTest, NoPowerMonitor) {
EXPECT_CALL(callback_mock_,
InitializeCallback(CdmProxy::Status::kFail, _, _));
- proxy_->Initialize(nullptr,
+ proxy_->Initialize(&client_,
base::BindOnce(&CallbackMock::InitializeCallback,
base::Unretained(&callback_mock_)));
}
@@ -382,12 +358,11 @@ TEST_F(D3D11CdmProxyTest, NoHwKeyExchange) {
InitializeCallback(CdmProxy::Status::kFail, _, _));
// GUID is set to non-D3D11_KEY_EXCHANGE_HW_PROTECTION, which means no HW key
// exchange.
- EXPECT_CALL(*video_device_mock_.Get(),
- CheckCryptoKeyExchange(_, _, Lt(1u), _))
+ COM_EXPECT_CALL(video_device_mock_, CheckCryptoKeyExchange(_, _, Lt(1u), _))
.WillOnce(
DoAll(SetArgPointee<3>(D3D11_CRYPTO_TYPE_AES128_CTR), Return(S_OK)));
- proxy_->Initialize(nullptr,
+ proxy_->Initialize(&client_,
base::BindOnce(&CallbackMock::InitializeCallback,
base::Unretained(&callback_mock_)));
}
@@ -410,9 +385,9 @@ TEST_F(D3D11CdmProxyTest, ProcessInvalidCryptoSessionID) {
EXPECT_CALL(callback_mock_, InitializeCallback(CdmProxy::Status::kOk, _, _))
.WillOnce(SaveArg<2>(&crypto_session_id));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
// The size nor value here matter, so making non empty non zero vector.
const std::vector<uint8_t> kAnyInput(16, 0xFF);
@@ -503,9 +478,9 @@ TEST_F(D3D11CdmProxyTest, Process) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
// The size nor value here matter, so making non empty non zero vector.
const std::vector<uint8_t> kAnyInput(16, 0xFF);
@@ -555,11 +530,11 @@ TEST_F(D3D11CdmProxyTest, Process) {
test_output_data.size());
};
- EXPECT_CALL(*video_context_mock_.Get(),
- NegotiateCryptoSessionKeyExchange(
- _, sizeof(expected_key_exchange_data),
- MatchesKeyExchangeStructure(&expected_key_exchange_data,
- input_structure_size)))
+ COM_EXPECT_CALL(video_context_mock_,
+ NegotiateCryptoSessionKeyExchange(
+ _, sizeof(expected_key_exchange_data),
+ MatchesKeyExchangeStructure(&expected_key_exchange_data,
+ input_structure_size)))
.WillOnce(DoAll(WithArgs<2>(Invoke(set_test_output_data)), Return(S_OK)));
proxy_->Process(kTestFunction, crypto_session_id, kAnyInput,
@@ -585,27 +560,25 @@ TEST_F(D3D11CdmProxyTest, CreateMediaCryptoSessionNoExtraData) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
// Expect a new crypto session.
EXPECT_CALL(callback_mock_, CreateMediaCryptoSessionCallback(
CdmProxy::Status::kOk,
Ne(crypto_session_id_from_initialize), _));
auto media_crypto_session_mock = CreateD3D11Mock<D3D11CryptoSessionMock>();
- EXPECT_CALL(*video_device_mock_.Get(),
- CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
- Pointee(CRYPTO_TYPE_GUID), _))
- .WillOnce(
- DoAll(AddRefAndSetArgPointee<3>(media_crypto_session_mock.Get()),
- Return(S_OK)));
+ COM_EXPECT_CALL(video_device_mock_,
+ CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
+ Pointee(CRYPTO_TYPE_GUID), _))
+ .WillOnce(SetComPointeeAndReturnOk<3>(media_crypto_session_mock.Get()));
- EXPECT_CALL(*video_context1_mock_.Get(), GetDataForNewHardwareKey(_, _, _, _))
+ COM_EXPECT_CALL(video_context1_mock_, GetDataForNewHardwareKey(_, _, _, _))
.Times(0);
- EXPECT_CALL(*video_context1_mock_.Get(),
- CheckCryptoSessionStatus(media_crypto_session_mock.Get(), _))
+ COM_EXPECT_CALL(video_context1_mock_,
+ CheckCryptoSessionStatus(media_crypto_session_mock.Get(), _))
.WillOnce(DoAll(SetArgPointee<1>(D3D11_CRYPTO_SESSION_STATUS_OK),
Return(S_OK)));
proxy_->CreateMediaCryptoSession(
@@ -634,9 +607,9 @@ TEST_F(D3D11CdmProxyTest, CreateMediaCryptoSessionWithExtraData) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
// Expect a new crypto session.
EXPECT_CALL(callback_mock_, CreateMediaCryptoSessionCallback(
@@ -644,23 +617,21 @@ TEST_F(D3D11CdmProxyTest, CreateMediaCryptoSessionWithExtraData) {
Ne(crypto_session_id_from_initialize), _));
auto media_crypto_session_mock = CreateD3D11Mock<D3D11CryptoSessionMock>();
- EXPECT_CALL(*video_device_mock_.Get(),
- CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
- Pointee(CRYPTO_TYPE_GUID), _))
- .WillOnce(
- DoAll(AddRefAndSetArgPointee<3>(media_crypto_session_mock.Get()),
- Return(S_OK)));
+ COM_EXPECT_CALL(video_device_mock_,
+ CreateCryptoSession(Pointee(CRYPTO_TYPE_GUID), _,
+ Pointee(CRYPTO_TYPE_GUID), _))
+ .WillOnce(SetComPointeeAndReturnOk<3>(media_crypto_session_mock.Get()));
// The size nor value here matter, so making non empty non zero vector.
const std::vector<uint8_t> kAnyInput(16, 0xFF);
const uint64_t kAnyOutputData = 23298u;
- EXPECT_CALL(*video_context1_mock_.Get(),
- GetDataForNewHardwareKey(media_crypto_session_mock.Get(),
- kAnyInput.size(),
- CastedToUint8Are(kAnyInput), _))
+ COM_EXPECT_CALL(video_context1_mock_,
+ GetDataForNewHardwareKey(media_crypto_session_mock.Get(),
+ kAnyInput.size(),
+ CastedToUint8Are(kAnyInput), _))
.WillOnce(DoAll(SetArgPointee<3>(kAnyOutputData), Return(S_OK)));
- EXPECT_CALL(*video_context1_mock_.Get(),
- CheckCryptoSessionStatus(media_crypto_session_mock.Get(), _))
+ COM_EXPECT_CALL(video_context1_mock_,
+ CheckCryptoSessionStatus(media_crypto_session_mock.Get(), _))
.WillOnce(DoAll(SetArgPointee<1>(D3D11_CRYPTO_SESSION_STATUS_OK),
Return(S_OK)));
proxy_->CreateMediaCryptoSession(
@@ -697,13 +668,11 @@ TEST_F(D3D11CdmProxyTest, GetD3D11DecryptContextNoKeyForKeyType) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
- const std::vector<uint8_t> kAnyBlob = {
- 0x01, 0x4f, 0x83,
- };
+ const std::vector<uint8_t> kAnyBlob = {0x01, 0x4f, 0x83};
EXPECT_CALL(callback_mock_, SetKeyCallback(CdmProxy::Status::kOk));
proxy_->SetKey(crypto_session_id_from_initialize, kAnyBlob,
@@ -719,7 +688,7 @@ TEST_F(D3D11CdmProxyTest, GetD3D11DecryptContextNoKeyForKeyType) {
EXPECT_FALSE(decrypt_context);
}
-// Verifies that keys are set and is acccessible with a getter.
+// Verifies that keys are set and is accessible with a getter.
TEST_F(D3D11CdmProxyTest, SetKeyAndGetDecryptContext) {
base::WeakPtr<CdmContext> context = proxy_->GetCdmContext();
ASSERT_TRUE(context);
@@ -730,9 +699,9 @@ TEST_F(D3D11CdmProxyTest, SetKeyAndGetDecryptContext) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
std::vector<uint8_t> kKeyId = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
@@ -742,12 +711,20 @@ TEST_F(D3D11CdmProxyTest, SetKeyAndGetDecryptContext) {
0xab, 0x01, 0x20, 0xd3, 0xee, 0x05, 0x99, 0x87,
0xff, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x7F,
};
+
+ base::MockCallback<CdmContext::EventCB> event_cb;
+ auto callback_registration = context->RegisterEventCB(event_cb.Get());
+ EXPECT_CALL(event_cb, Run(CdmContext::Event::kHasAdditionalUsableKey));
+
EXPECT_CALL(callback_mock_, SetKeyCallback(CdmProxy::Status::kOk));
proxy_->SetKey(crypto_session_id_from_initialize, kKeyId, kTestKeyType,
kKeyBlob,
base::BindOnce(&CallbackMock::SetKeyCallback,
base::Unretained(&callback_mock_)));
+ // |event_cb| is posted. Run the loop to make sure it's fired.
+ base::RunLoop().RunUntilIdle();
+
std::string key_id_str(kKeyId.begin(), kKeyId.end());
auto decrypt_context =
proxy_context->GetD3D11DecryptContext(kTestKeyType, key_id_str);
@@ -766,8 +743,8 @@ TEST_F(D3D11CdmProxyTest, SetKeyAndGetDecryptContext) {
// teardown..
TEST_F(D3D11CdmProxyTest, ClearKeysAfterHardwareContentProtectionTeardown) {
base::RunLoop run_loop;
- MockProxyClient client;
- EXPECT_CALL(client, NotifyHardwareReset()).WillOnce(Invoke([&run_loop]() {
+
+ EXPECT_CALL(client_, NotifyHardwareReset()).WillOnce(Invoke([&run_loop]() {
run_loop.Quit();
}));
@@ -780,9 +757,9 @@ TEST_F(D3D11CdmProxyTest, ClearKeysAfterHardwareContentProtectionTeardown) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(&client, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
std::vector<uint8_t> kKeyId = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
@@ -818,9 +795,9 @@ TEST_F(D3D11CdmProxyTest, RemoveKey) {
InitializeCallback(CdmProxy::Status::kOk, kTestProtocol, _))
.WillOnce(SaveArg<2>(&crypto_session_id_from_initialize));
ASSERT_NO_FATAL_FAILURE(
- Initialize(nullptr, base::BindOnce(&CallbackMock::InitializeCallback,
- base::Unretained(&callback_mock_))));
- ::testing::Mock::VerifyAndClearExpectations(&callback_mock_);
+ Initialize(&client_, base::BindOnce(&CallbackMock::InitializeCallback,
+ base::Unretained(&callback_mock_))));
+ Mock::VerifyAndClearExpectations(&callback_mock_);
std::vector<uint8_t> kKeyId = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
diff --git a/chromium/media/gpu/windows/d3d11_decryptor.h b/chromium/media/gpu/windows/d3d11_decryptor.h
index 2eb3ebff904..b8a1a761eb2 100644
--- a/chromium/media/gpu/windows/d3d11_decryptor.h
+++ b/chromium/media/gpu/windows/d3d11_decryptor.h
@@ -11,9 +11,9 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "media/base/decryptor.h"
+#include "media/base/win/d3d11_create_device_cb.h"
#include "media/cdm/cdm_proxy_context.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/windows/d3d11_create_device_cb.h"
namespace media {
diff --git a/chromium/media/gpu/windows/d3d11_decryptor_unittest.cc b/chromium/media/gpu/windows/d3d11_decryptor_unittest.cc
index c5b7a4a512e..f00dbec95e7 100644
--- a/chromium/media/gpu/windows/d3d11_decryptor_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_decryptor_unittest.cc
@@ -13,8 +13,8 @@
#include "base/stl_util.h"
#include "media/base/decoder_buffer.h"
#include "media/base/subsample_entry.h"
+#include "media/base/win/d3d11_mocks.h"
#include "media/cdm/cdm_proxy_context.h"
-#include "media/gpu/windows/d3d11_mocks.h"
using ::testing::_;
using ::testing::AtLeast;
@@ -169,51 +169,46 @@ class D3D11DecryptorTest : public ::testing::Test {
// crypto session.
EXPECT_CALL(*crypto_session_mock, GetDevice(_))
.Times(AtLeast(1))
- .WillRepeatedly(AddRefAndSetArgPointee<0>(device_mock_.Get()));
+ .WillRepeatedly(SetComPointee<0>(device_mock_.Get()));
// The other components accessible (directly or indirectly) from the device.
- EXPECT_CALL(*device_mock_.Get(), GetImmediateContext(_))
+ COM_EXPECT_CALL(device_mock_, GetImmediateContext(_))
.Times(AtLeast(1))
- .WillRepeatedly(AddRefAndSetArgPointee<0>(device_context_mock_.Get()));
- EXPECT_CALL(*device_context_mock_.Get(),
- QueryInterface(IID_ID3D11VideoContext, _))
+ .WillRepeatedly(SetComPointee<0>(device_context_mock_.Get()));
+ COM_EXPECT_CALL(device_context_mock_,
+ QueryInterface(IID_ID3D11VideoContext, _))
.Times(AtLeast(1))
- .WillRepeatedly(
- DoAll(AddRefAndSetArgPointee<1>(video_context_mock_.Get()),
- Return(S_OK)));
+ .WillRepeatedly(SetComPointeeAndReturnOk<1>(video_context_mock_.Get()));
EXPECT_CALL(mock_proxy_,
GetD3D11DecryptContext(CdmProxy::KeyType::kDecryptOnly, kKeyId))
.WillOnce(Return(*decrypt_context));
// These return big enough size.
- ON_CALL(*staging_buffer1_.Get(), GetDesc(_))
+ COM_ON_CALL(staging_buffer1_, GetDesc(_))
.WillByDefault(SetBufferDescSize(20000));
- ON_CALL(*staging_buffer2_.Get(), GetDesc(_))
+ COM_ON_CALL(staging_buffer2_, GetDesc(_))
.WillByDefault(SetBufferDescSize(20000));
- ON_CALL(*gpu_buffer_.Get(), GetDesc(_))
+ COM_ON_CALL(gpu_buffer_, GetDesc(_))
.WillByDefault(SetBufferDescSize(20000));
// It should be requesting for 2 staging buffers one for writing the data to
// a GPU buffer and one for reading from the a GPU buffer.
- EXPECT_CALL(*device_mock_.Get(),
- CreateBuffer(BufferDescHas(D3D11_USAGE_STAGING, 0u,
- D3D11_CPU_ACCESS_READ |
- D3D11_CPU_ACCESS_WRITE),
- nullptr, _))
- .WillOnce(DoAll(AddRefAndSetArgPointee<2>(staging_buffer1_.Get()),
- Return(S_OK)))
- .WillOnce(DoAll(AddRefAndSetArgPointee<2>(staging_buffer2_.Get()),
- Return(S_OK)));
+ COM_EXPECT_CALL(device_mock_,
+ CreateBuffer(BufferDescHas(D3D11_USAGE_STAGING, 0u,
+ D3D11_CPU_ACCESS_READ |
+ D3D11_CPU_ACCESS_WRITE),
+ nullptr, _))
+ .WillOnce(SetComPointeeAndReturnOk<2>(staging_buffer1_.Get()))
+ .WillOnce(SetComPointeeAndReturnOk<2>(staging_buffer2_.Get()));
// It should be requesting a GPU only accessible buffer to the decrypted
// output.
- EXPECT_CALL(*device_mock_.Get(),
- CreateBuffer(BufferDescHas(D3D11_USAGE_DEFAULT,
- D3D11_BIND_RENDER_TARGET, 0u),
- nullptr, _))
- .WillOnce(
- DoAll(AddRefAndSetArgPointee<2>(gpu_buffer_.Get()), Return(S_OK)));
+ COM_EXPECT_CALL(device_mock_,
+ CreateBuffer(BufferDescHas(D3D11_USAGE_DEFAULT,
+ D3D11_BIND_RENDER_TARGET, 0u),
+ nullptr, _))
+ .WillOnce(SetComPointeeAndReturnOk<2>(gpu_buffer_.Get()));
}
// |input| is the input to the Decrypt() function, the subsample information
@@ -237,22 +232,22 @@ class D3D11DecryptorTest : public ::testing::Test {
// It should be requesting for a memory mapped buffer, from the staging
// buffer, to pass the encrypted data to the GPU.
- EXPECT_CALL(*device_context_mock_.Get(),
- Map(staging_buffer1_.Get(), 0, D3D11_MAP_WRITE, _, _))
+ COM_EXPECT_CALL(device_context_mock_,
+ Map(staging_buffer1_.Get(), 0, D3D11_MAP_WRITE, _, _))
.WillOnce(
DoAll(SetArgPointee<4>(staging_buffer1_subresource), Return(S_OK)));
- EXPECT_CALL(*device_context_mock_.Get(), Unmap(staging_buffer1_.Get(), 0));
+ COM_EXPECT_CALL(device_context_mock_, Unmap(staging_buffer1_.Get(), 0));
- EXPECT_CALL(
- *video_context_mock_.Get(),
+ COM_EXPECT_CALL(
+ video_context_mock_,
DecryptionBlt(
crypto_session_mock,
reinterpret_cast<ID3D11Texture2D*>(staging_buffer1_.Get()),
reinterpret_cast<ID3D11Texture2D*>(gpu_buffer_.Get()),
NumEncryptedBytesAtBeginningGreaterOrEq(encrypted_input.size()),
sizeof(kAnyKeyBlob), kAnyKeyBlob, _, _));
- EXPECT_CALL(*device_context_mock_.Get(),
- CopyResource(staging_buffer2_.Get(), gpu_buffer_.Get()));
+ COM_EXPECT_CALL(device_context_mock_,
+ CopyResource(staging_buffer2_.Get(), gpu_buffer_.Get()));
D3D11_MAPPED_SUBRESOURCE staging_buffer2_subresource = {};
@@ -266,11 +261,11 @@ class D3D11DecryptorTest : public ::testing::Test {
// Tt should be requesting for a memory mapped buffer, from the staging
// buffer, to read the decrypted data out from the GPU buffer.
- EXPECT_CALL(*device_context_mock_.Get(),
- Map(staging_buffer2_.Get(), 0, D3D11_MAP_READ, _, _))
+ COM_EXPECT_CALL(device_context_mock_,
+ Map(staging_buffer2_.Get(), 0, D3D11_MAP_READ, _, _))
.WillOnce(
DoAll(SetArgPointee<4>(staging_buffer2_subresource), Return(S_OK)));
- EXPECT_CALL(*device_context_mock_.Get(), Unmap(staging_buffer2_.Get(), 0));
+ COM_EXPECT_CALL(device_context_mock_, Unmap(staging_buffer2_.Get(), 0));
CallbackMock callbacks;
EXPECT_CALL(callbacks,
@@ -504,16 +499,16 @@ TEST_F(D3D11DecryptorTest, ReuseBuffers) {
Mock::VerifyAndClearExpectations(device_context_mock_.Get());
Mock::VerifyAndClearExpectations(&mock_proxy_);
- EXPECT_CALL(*crypto_session_mock.Get(), GetDevice(_))
+ COM_EXPECT_CALL(crypto_session_mock, GetDevice(_))
.Times(AtLeast(1))
- .WillRepeatedly(AddRefAndSetArgPointee<0>(device_mock_.Get()));
+ .WillRepeatedly(SetComPointee<0>(device_mock_.Get()));
EXPECT_CALL(mock_proxy_,
GetD3D11DecryptContext(CdmProxy::KeyType::kDecryptOnly, kKeyId))
.WillOnce(Return(decrypt_context));
// Buffers should not be (re)initialized on the next call to decrypt because
// it's the same device as the previous call.
- EXPECT_CALL(*device_mock_.Get(), CreateBuffer(_, _, _)).Times(0);
+ COM_EXPECT_CALL(device_mock_, CreateBuffer(_, _, _)).Times(0);
// This calls Decrypt() so that the expectations above are triggered.
ExpectSuccessfulDecryption(crypto_session_mock.Get(), kAnyInput, kAnyInput,
diff --git a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
index ecc7a52482a..edbfb902edf 100644
--- a/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_h264_accelerator.cc
@@ -73,7 +73,12 @@ D3D11H264Accelerator::D3D11H264Accelerator(
cdm_proxy_context_(cdm_proxy_context),
video_decoder_(video_decoder),
video_device_(video_device),
- video_context_(std::move(video_context)) {}
+ video_context_(std::move(video_context)) {
+ DCHECK(client);
+ DCHECK(media_log_);
+ // |cdm_proxy_context_| is non-null for encrypted content but can be null for
+ // clear content.
+}
D3D11H264Accelerator::~D3D11H264Accelerator() {}
@@ -100,6 +105,7 @@ Status D3D11H264Accelerator::SubmitFrameMetadata(
// D3D11_VIDEO_DECODER_BEGIN_FRAME_CRYPTO_SESSION is a pointer (to a GUID).
base::Optional<CdmProxyContext::D3D11DecryptContext> decrypt_context;
if (is_encrypted) {
+ DCHECK(cdm_proxy_context_) << "No CdmProxyContext but picture is encrypted";
decrypt_context = cdm_proxy_context_->GetD3D11DecryptContext(
CdmProxy::KeyType::kDecryptAndDecode, pic->decrypt_config()->key_id());
if (!decrypt_context) {
@@ -559,10 +565,8 @@ void D3D11H264Accelerator::RecordFailure(const std::string& reason,
hr_string = ": " + logging::SystemErrorCodeToString(hr);
DLOG(ERROR) << reason << hr_string;
- if (media_log_) {
- media_log_->AddEvent(media_log_->CreateStringEvent(
- MediaLogEvent::MEDIA_ERROR_LOG_ENTRY, "error", hr_string + reason));
- }
+ media_log_->AddEvent(media_log_->CreateStringEvent(
+ MediaLogEvent::MEDIA_ERROR_LOG_ENTRY, "error", hr_string + reason));
}
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.cc b/chromium/media/gpu/windows/d3d11_video_decoder.cc
index 35b890e960e..082136cf1ad 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.cc
@@ -12,6 +12,7 @@
#include "base/feature_list.h"
#include "base/memory/ref_counted_delete_on_sequence.h"
#include "base/metrics/histogram_macros.h"
+#include "gpu/config/gpu_finch_features.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/cdm_context.h"
#include "media/base/decoder_buffer.h"
@@ -41,11 +42,6 @@ bool IsH264(const VideoDecoderConfig& config) {
return INRANGE(config.profile(), H264);
}
-bool IsUnsupportedVP9Profile(const VideoDecoderConfig& config) {
- return config.profile() == VP9PROFILE_PROFILE1 ||
- config.profile() == VP9PROFILE_PROFILE3;
-}
-
#undef INRANGE
// Holder class, so that we don't keep creating CommandBufferHelpers every time
@@ -88,7 +84,9 @@ std::unique_ptr<VideoDecoder> D3D11VideoDecoder::Create(
std::unique_ptr<MediaLog> media_log,
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
- base::RepeatingCallback<gpu::CommandBufferStub*()> get_stub_cb) {
+ base::RepeatingCallback<gpu::CommandBufferStub*()> get_stub_cb,
+ D3D11VideoDecoder::GetD3D11DeviceCB get_d3d11_device_cb,
+ SupportedConfigs supported_configs) {
// We create |impl_| on the wrong thread, but we never use it here.
// Note that the output callback will hop to our thread, post the video
// frame, and along with a callback that will hop back to the impl thread
@@ -105,7 +103,8 @@ std::unique_ptr<VideoDecoder> D3D11VideoDecoder::Create(
gpu_preferences, gpu_workarounds,
std::make_unique<D3D11VideoDecoderImpl>(
std::move(cloned_media_log), get_helper_cb),
- get_helper_cb));
+ get_helper_cb, std::move(get_d3d11_device_cb),
+ std::move(supported_configs)));
}
D3D11VideoDecoder::D3D11VideoDecoder(
@@ -114,16 +113,20 @@ D3D11VideoDecoder::D3D11VideoDecoder(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
std::unique_ptr<D3D11VideoDecoderImpl> impl,
- base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()> get_helper_cb)
+ base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()> get_helper_cb,
+ GetD3D11DeviceCB get_d3d11_device_cb,
+ SupportedConfigs supported_configs)
: media_log_(std::move(media_log)),
impl_(std::move(impl)),
impl_task_runner_(std::move(gpu_task_runner)),
gpu_preferences_(gpu_preferences),
gpu_workarounds_(gpu_workarounds),
- create_device_func_(base::BindRepeating(D3D11CreateDevice)),
+ get_d3d11_device_cb_(std::move(get_d3d11_device_cb)),
get_helper_cb_(std::move(get_helper_cb)),
+ supported_configs_(std::move(supported_configs)),
weak_factory_(this) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(media_log_);
impl_weak_ = impl_->GetWeakPtr();
}
@@ -197,8 +200,7 @@ bool D3D11VideoDecoder::DeviceHasDecoderID(GUID decoder_guid) {
}
GUID D3D11VideoDecoder::GetD3D11DecoderGUID(const VideoDecoderConfig& config) {
- if (IsVP9(config) && base::FeatureList::IsEnabled(kD3D11VP9Decoder))
- // TODO(tmathmeyer) set up a finch experiment.
+ if (IsVP9(config))
return D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0;
if (IsH264(config))
@@ -207,40 +209,56 @@ GUID D3D11VideoDecoder::GetD3D11DecoderGUID(const VideoDecoderConfig& config) {
return {};
}
-void D3D11VideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) {
+void D3D11VideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(output_cb);
+ DCHECK(waiting_cb);
- if (!IsPotentiallySupported(config)) {
- DVLOG(3) << "D3D11 video decoder not supported for the config.";
- init_cb.Run(false);
- return;
- }
+ state_ = State::kInitializing;
+ config_ = config;
init_cb_ = init_cb;
output_cb_ = output_cb;
- is_encrypted_ = config.is_encrypted();
-
- D3D11VideoDecoderImpl::InitCB cb = base::BindOnce(
- &D3D11VideoDecoder::OnGpuInitComplete, weak_factory_.GetWeakPtr());
+ waiting_cb_ = waiting_cb;
+
+ // Verify that |config| matches one of the supported configurations. This
+ // helps us skip configs that are supported by the VDA but not us, since
+ // GpuMojoMediaClient merges them. This is not hacky, even in the tiniest
+ // little bit, nope. Definitely not. Convinced?
+ bool is_supported = false;
+ for (const auto& supported_config : supported_configs_) {
+ if (supported_config.Matches(config)) {
+ is_supported = true;
+ break;
+ }
+ }
- D3D11VideoDecoderImpl::ReturnPictureBufferCB return_picture_buffer_cb =
- base::BindRepeating(&D3D11VideoDecoder::ReceivePictureBufferFromClient,
- weak_factory_.GetWeakPtr());
+ if (!is_supported) {
+ NotifyError("D3D11VideoDecoder does not support this config");
+ return;
+ }
// Initialize the video decoder.
- // Use the ANGLE device, rather than create our own. It would be nice if we
- // could use our own device, and run on the mojo thread, but texture sharing
- // seems to be difficult.
- // TODO(liberato): take |device_| as input.
+ // Note that we assume that this is the ANGLE device, since we don't implement
+ // texture sharing properly. That also implies that this is the GPU main
+ // thread, since we use non-threadsafe properties of the device (e.g., we get
+ // the immediate context).
+ //
+ // Also note that we don't technically have a guarantee that the ANGLE device
+ // will use the most recent version of D3D11; it might be configured to use
+ // D3D9. In practice, though, it seems to use 11.1 if it's available, unless
+ // it's been specifically configured via switch to avoid d3d11.
+ //
// TODO(liberato): On re-init, we can probably re-use the device.
- device_ = gl::QueryD3D11DeviceObjectFromANGLE();
+ device_ = get_d3d11_device_cb_.Run();
+ usable_feature_level_ = device_->GetFeatureLevel();
+
if (!device_) {
// This happens if, for example, if chrome is configured to use
// D3D9 for ANGLE.
@@ -262,7 +280,7 @@ void D3D11VideoDecoder::Initialize(
GUID decoder_guid = GetD3D11DecoderGUID(config);
if (!DeviceHasDecoderID(decoder_guid)) {
- NotifyError("Did not find a supported profile");
+ NotifyError("D3D11: Did not find a supported profile");
return;
}
@@ -273,7 +291,10 @@ void D3D11VideoDecoder::Initialize(
NotifyError("Failed to query ID3D11Multithread");
return;
}
- multi_threaded->SetMultithreadProtected(TRUE);
+ // TODO(liberato): This is a hack, since the unittest returns
+ // success without providing |multi_threaded|.
+ if (multi_threaded)
+ multi_threaded->SetMultithreadProtected(TRUE);
D3D11_VIDEO_DECODER_DESC desc = {};
desc.Guid = decoder_guid;
@@ -295,7 +316,21 @@ void D3D11VideoDecoder::Initialize(
NotifyError("Failed to get decoder config");
return;
}
- if (dec_config.ConfigBitstreamRaw == 2) {
+
+ if (config.is_encrypted() && dec_config.guidConfigBitstreamEncryption !=
+ D3D11_DECODER_ENCRYPTION_HW_CENC) {
+ // For encrypted media, it has to use HW CENC decoder config.
+ continue;
+ }
+
+ if (IsVP9(config) && dec_config.ConfigBitstreamRaw == 1) {
+ // DXVA VP9 specification mentions ConfigBitstreamRaw "shall be 1".
+ found = true;
+ break;
+ }
+
+ if (IsH264(config) && dec_config.ConfigBitstreamRaw == 2) {
+ // ConfigBitstreamRaw == 2 means the decoder uses DXVA_Slice_H264_Short.
found = true;
break;
}
@@ -305,9 +340,6 @@ void D3D11VideoDecoder::Initialize(
return;
}
- if (is_encrypted_)
- dec_config.guidConfigBitstreamEncryption = D3D11_DECODER_ENCRYPTION_HW_CENC;
-
memcpy(&decoder_guid_, &decoder_guid, sizeof decoder_guid_);
Microsoft::WRL::ComPtr<ID3D11VideoDecoder> video_decoder;
@@ -325,7 +357,7 @@ void D3D11VideoDecoder::Initialize(
#endif
// Ensure that if we are encrypted, that we have a CDM.
- if (is_encrypted_ && !proxy_context) {
+ if (config_.is_encrypted() && !proxy_context) {
NotifyError("Video stream is encrypted, but no cdm was found");
return;
}
@@ -337,20 +369,34 @@ void D3D11VideoDecoder::Initialize(
return;
}
+ // At this point, playback is supported so add a line in the media log to help
+ // us figure that out.
+ media_log_->AddEvent(
+ media_log_->CreateStringEvent(MediaLogEvent::MEDIA_INFO_LOG_ENTRY, "info",
+ "Video is supported by D3D11VideoDecoder"));
+
// |cdm_context| could be null for clear playback.
// TODO(liberato): On re-init, should this still happen?
if (cdm_context) {
new_key_callback_registration_ =
- cdm_context->RegisterNewKeyCB(base::BindRepeating(
- &D3D11VideoDecoder::NotifyNewKey, weak_factory_.GetWeakPtr()));
+ cdm_context->RegisterEventCB(base::BindRepeating(
+ &D3D11VideoDecoder::OnCdmContextEvent, weak_factory_.GetWeakPtr()));
}
+ auto impl_init_cb = base::BindOnce(&D3D11VideoDecoder::OnGpuInitComplete,
+ weak_factory_.GetWeakPtr());
+
+ auto get_picture_buffer_cb =
+ base::BindRepeating(&D3D11VideoDecoder::ReceivePictureBufferFromClient,
+ weak_factory_.GetWeakPtr());
+
// Initialize the gpu side. We wait until everything else is initialized,
// since we allow it to call us back re-entrantly to reduce latency. Note
// that if we're not on the same thread, then we should probably post the
// call earlier, since re-entrancy won't be an issue.
if (impl_task_runner_->RunsTasksInCurrentSequence()) {
- impl_->Initialize(std::move(cb), std::move(return_picture_buffer_cb));
+ impl_->Initialize(std::move(impl_init_cb),
+ std::move(get_picture_buffer_cb));
return;
}
@@ -361,8 +407,8 @@ void D3D11VideoDecoder::Initialize(
impl_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&D3D11VideoDecoderImpl::Initialize, impl_weak_,
- BindToCurrentLoop(std::move(cb)),
- BindToCurrentLoop(std::move(return_picture_buffer_cb))));
+ BindToCurrentLoop(std::move(impl_init_cb)),
+ BindToCurrentLoop(std::move(get_picture_buffer_cb))));
}
void D3D11VideoDecoder::ReceivePictureBufferFromClient(
@@ -383,9 +429,12 @@ void D3D11VideoDecoder::OnGpuInitComplete(bool success) {
if (!init_cb_) {
// We already failed, so just do nothing.
+ DCHECK_EQ(state_, State::kError);
return;
}
+ DCHECK_EQ(state_, State::kInitializing);
+
if (!success) {
NotifyError("Gpu init failed");
return;
@@ -406,6 +455,7 @@ void D3D11VideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
}
input_buffer_queue_.push_back(std::make_pair(std::move(buffer), decode_cb));
+
// Post, since we're not supposed to call back before this returns. It
// probably doesn't matter since we're in the gpu process anyway.
base::ThreadTaskRunnerHandle::Get()->PostTask(
@@ -416,8 +466,11 @@ void D3D11VideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
void D3D11VideoDecoder::DoDecode() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- if (state_ != State::kRunning)
+ if (state_ != State::kRunning) {
+ DVLOG(2) << __func__ << ": Do nothing in " << static_cast<int>(state_)
+ << " state.";
return;
+ }
if (!current_buffer_) {
if (input_buffer_queue_.empty()) {
@@ -471,7 +524,8 @@ void D3D11VideoDecoder::DoDecode() {
CreatePictureBuffers();
} else if (result == media::AcceleratedVideoDecoder::kTryAgain) {
state_ = State::kWaitingForNewKey;
- // Note that another DoDecode() task would be posted in NotifyNewKey().
+ waiting_cb_.Run(WaitingReason::kNoDecryptionKey);
+ // Another DoDecode() task would be posted in OnCdmContextEvent().
return;
} else {
LOG(ERROR) << "VDA Error " << result;
@@ -487,6 +541,7 @@ void D3D11VideoDecoder::DoDecode() {
void D3D11VideoDecoder::Reset(const base::RepeatingClosure& closure) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_NE(state_, State::kInitializing);
current_buffer_ = nullptr;
if (current_decode_cb_)
@@ -498,6 +553,22 @@ void D3D11VideoDecoder::Reset(const base::RepeatingClosure& closure) {
// TODO(liberato): how do we signal an error?
accelerated_video_decoder_->Reset();
+
+ if (state_ == State::kWaitingForReset && config_.is_encrypted()) {
+ // On a hardware context loss event, a new swap chain has to be created (in
+ // the compositor). By clearing the picture buffers, next DoDecode() call
+ // will create a new texture. This makes the compositor to create a new swap
+ // chain.
+ // More detailed explanation at crbug.com/858286
+ picture_buffers_.clear();
+ }
+
+ // Transition out of kWaitingForNewKey since the new buffer could be clear or
+ // have a different key ID. Transition out of kWaitingForReset since reset
+ // just happened.
+ if (state_ == State::kWaitingForNewKey || state_ == State::kWaitingForReset)
+ state_ = State::kRunning;
+
closure.Run();
}
@@ -543,7 +614,14 @@ void D3D11VideoDecoder::CreatePictureBuffers() {
texture_desc.Usage = D3D11_USAGE_DEFAULT;
texture_desc.BindFlags = D3D11_BIND_DECODER | D3D11_BIND_SHADER_RESOURCE;
texture_desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
- if (is_encrypted_)
+ if (base::FeatureList::IsEnabled(
+ features::kDirectCompositionUseNV12DecodeSwapChain)) {
+ // Decode swap chains do not support shared resources.
+ // TODO(sunnyps): Find a workaround for when the decoder moves to its own
+ // thread and D3D device. See https://crbug.com/911847
+ texture_desc.MiscFlags = 0;
+ }
+ if (config_.is_encrypted())
texture_desc.MiscFlags |= D3D11_RESOURCE_MISC_HW_PROTECTED;
Microsoft::WRL::ComPtr<ID3D11Texture2D> out_texture;
@@ -592,11 +670,14 @@ void D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
picture_buffer->set_in_client_use(true);
// Note: The pixel format doesn't matter.
- gfx::Rect visible_rect(picture->visible_rect());
- // TODO(liberato): Pixel aspect ratio should come from the VideoDecoderConfig
- // (except when it should come from the SPS).
- // https://crbug.com/837337
- double pixel_aspect_ratio = 1.0;
+ gfx::Rect visible_rect = picture->visible_rect();
+ if (visible_rect.IsEmpty())
+ visible_rect = config_.visible_rect();
+
+ // TODO(https://crbug.com/843150): Use aspect ratio from decoder (SPS) if
+ // stream metadata doesn't overrride it.
+ double pixel_aspect_ratio = config_.GetPixelAspectRatio();
+
base::TimeDelta timestamp = picture_buffer->timestamp_;
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTextures(
PIXEL_FORMAT_NV12, picture_buffer->mailbox_holders(),
@@ -613,7 +694,7 @@ void D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
// that ALLOW_OVERLAY is required for encrypted video path.
frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
- if (is_encrypted_) {
+ if (config_.is_encrypted()) {
frame->metadata()->SetBoolean(VideoFrameMetadata::PROTECTED_VIDEO, true);
frame->metadata()->SetBoolean(VideoFrameMetadata::HW_PROTECTED, true);
}
@@ -622,30 +703,42 @@ void D3D11VideoDecoder::OutputResult(const CodecPicture* picture,
output_cb_.Run(frame);
}
-void D3D11VideoDecoder::NotifyNewKey() {
+void D3D11VideoDecoder::OnCdmContextEvent(CdmContext::Event event) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DVLOG(1) << __func__ << ": event = " << static_cast<int>(event);
- if (state_ != State::kWaitingForNewKey) {
- // Note that this method may be called before DoDecode() because the key
- // acquisition stack may be running independently of the media decoding
- // stack. So if this isn't in kWaitingForNewKey state no "resuming" is
- // required therefore no special action taken here.
+ if (state_ == State::kInitializing || state_ == State::kError) {
+ DVLOG(1) << "Do nothing in " << static_cast<int>(state_) << " state.";
return;
}
- state_ = State::kRunning;
- base::ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- base::BindOnce(&D3D11VideoDecoder::DoDecode, weak_factory_.GetWeakPtr()));
+ switch (event) {
+ case CdmContext::Event::kHasAdditionalUsableKey:
+ // Note that this event may happen before DoDecode() because the key
+ // acquisition stack runs independently of the media decoding stack.
+ // So if this isn't in kWaitingForNewKey state no "resuming" is
+ // required therefore no special action taken here.
+ if (state_ != State::kWaitingForNewKey)
+ return;
+
+ state_ = State::kRunning;
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&D3D11VideoDecoder::DoDecode,
+ weak_factory_.GetWeakPtr()));
+ return;
+
+ case CdmContext::Event::kHardwareContextLost:
+ state_ = State::kWaitingForReset;
+ waiting_cb_.Run(WaitingReason::kDecoderStateLost);
+ return;
+ }
}
void D3D11VideoDecoder::NotifyError(const char* reason) {
state_ = State::kError;
DLOG(ERROR) << reason;
- if (media_log_) {
- media_log_->AddEvent(media_log_->CreateStringEvent(
- MediaLogEvent::MEDIA_ERROR_LOG_ENTRY, "error", reason));
- }
+ media_log_->AddEvent(media_log_->CreateStringEvent(
+ MediaLogEvent::MEDIA_ERROR_LOG_ENTRY, "error", reason));
if (init_cb_)
std::move(init_cb_).Run(false);
@@ -658,124 +751,88 @@ void D3D11VideoDecoder::NotifyError(const char* reason) {
input_buffer_queue_.clear();
}
-void D3D11VideoDecoder::SetCreateDeviceCallbackForTesting(
- D3D11CreateDeviceCB callback) {
- create_device_func_ = std::move(callback);
-}
-
-void D3D11VideoDecoder::SetWasSupportedReason(
- D3D11VideoNotSupportedReason enum_value) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
- UMA_HISTOGRAM_ENUMERATION("Media.D3D11.WasVideoSupported", enum_value);
-
- const char* reason = nullptr;
- switch (enum_value) {
- case D3D11VideoNotSupportedReason::kVideoIsSupported:
- reason = "Playback is supported by D3D11VideoDecoder";
- break;
- case D3D11VideoNotSupportedReason::kInsufficientD3D11FeatureLevel:
- reason = "Insufficient D3D11 feature level";
- break;
- case D3D11VideoNotSupportedReason::kProfileNotSupported:
- reason = "Video profile is not supported by D3D11VideoDecoder";
- break;
- case D3D11VideoNotSupportedReason::kCodecNotSupported:
- reason = "H264 is required for D3D11VideoDecoder";
- break;
- case D3D11VideoNotSupportedReason::kZeroCopyNv12Required:
- reason = "Must allow zero-copy NV12 for D3D11VideoDecoder";
- break;
- case D3D11VideoNotSupportedReason::kZeroCopyVideoRequired:
- reason = "Must allow zero-copy video for D3D11VideoDecoder";
- break;
- case D3D11VideoNotSupportedReason::kEncryptedMedia:
- reason = "Encrypted media is not enabled for D3D11VideoDecoder";
- break;
- }
-
- DVLOG(2) << reason;
- if (media_log_) {
- media_log_->AddEvent(media_log_->CreateStringEvent(
- MediaLogEvent::MEDIA_INFO_LOG_ENTRY, "info", reason));
- }
-}
-
-bool D3D11VideoDecoder::IsPotentiallySupported(
- const VideoDecoderConfig& config) {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- // TODO(liberato): All of this could be moved into MojoVideoDecoder, so that
- // it could run on the client side and save the IPC hop.
+// static
+std::vector<SupportedVideoDecoderConfig>
+D3D11VideoDecoder::GetSupportedVideoDecoderConfigs(
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
+ GetD3D11DeviceCB get_d3d11_device_cb) {
+ const std::string uma_name("Media.D3D11.WasVideoSupported");
// Must allow zero-copy of nv12 textures.
- if (!gpu_preferences_.enable_zero_copy_dxgi_video) {
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kZeroCopyNv12Required);
- return false;
- }
-
- if (gpu_workarounds_.disable_dxgi_zero_copy_video) {
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kZeroCopyVideoRequired);
- return false;
- }
-
- if (config.profile() == H264PROFILE_HIGH10PROFILE) {
- // H264 HIGH10 is never supported.
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kProfileNotSupported);
- return false;
- }
-
- if (IsUnsupportedVP9Profile(config)) {
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kProfileNotSupported);
- return false;
- }
-
- bool encrypted_stream = config.is_encrypted();
-
- if (encrypted_stream && !base::FeatureList::IsEnabled(kD3D11EncryptedMedia)) {
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kEncryptedMedia);
- return false;
- }
-
- // Converts one of chromium's VideoCodecProfile options to a dxguid value.
- // If this GUID comes back empty then the profile is not supported.
- GUID decoder_GUID = GetD3D11DecoderGUID(config);
-
- // If we got the empty guid, fail.
- GUID empty_guid = {};
- if (decoder_GUID == empty_guid) {
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kCodecNotSupported);
- return false;
- }
-
- // TODO(liberato): It would be nice to QueryD3D11DeviceObjectFromANGLE, but
- // we don't know what thread we're on.
- D3D_FEATURE_LEVEL levels[] = {
- D3D_FEATURE_LEVEL_11_1, // We need 11.1 for encrypted playback,
- D3D_FEATURE_LEVEL_11_0, // but make sure we have at least 11.0 for clear.
- };
-
- // This is also the most expensive check, so make sure it is last.
- HRESULT hr = create_device_func_.Run(
- nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, 0, levels, ARRAYSIZE(levels),
- D3D11_SDK_VERSION, nullptr, &usable_feature_level_, nullptr);
-
- if (FAILED(hr)) {
- SetWasSupportedReason(
- D3D11VideoNotSupportedReason::kInsufficientD3D11FeatureLevel);
- return false;
- }
-
- if (encrypted_stream && usable_feature_level_ == D3D_FEATURE_LEVEL_11_0) {
- SetWasSupportedReason(
- D3D11VideoNotSupportedReason::kInsufficientD3D11FeatureLevel);
- return false;
- }
-
- // TODO(liberato): dxva checks IsHDR() in the target colorspace, but we don't
- // have the target colorspace. It's commented as being for vpx, though, so
- // we skip it here for now.
- SetWasSupportedReason(D3D11VideoNotSupportedReason::kVideoIsSupported);
- return true;
+ if (!gpu_preferences.enable_zero_copy_dxgi_video) {
+ UMA_HISTOGRAM_ENUMERATION(uma_name,
+ NotSupportedReason::kZeroCopyNv12Required);
+ return {};
+ }
+
+ if (gpu_workarounds.disable_dxgi_zero_copy_video) {
+ UMA_HISTOGRAM_ENUMERATION(uma_name,
+ NotSupportedReason::kZeroCopyVideoRequired);
+ return {};
+ }
+
+ // Remember that this might query the angle device, so this won't work if
+ // we're not on the GPU main thread. Also remember that devices are thread
+ // safe (contexts are not), so we could use the angle device from any thread
+ // as long as we're not calling into possible not-thread-safe things to get
+ // it. I.e., if this cached it, then it'd be fine. It's up to our caller
+ // to guarantee that, though.
+ //
+ // Note also that, currently, we are called from the GPU main thread only.
+ auto d3d11_device = get_d3d11_device_cb.Run();
+ if (!d3d11_device) {
+ UMA_HISTOGRAM_ENUMERATION(uma_name,
+ NotSupportedReason::kCouldNotGetD3D11Device);
+ return {};
+ }
+
+ D3D_FEATURE_LEVEL usable_feature_level = d3d11_device->GetFeatureLevel();
+
+ const bool allow_encrypted =
+ (usable_feature_level > D3D_FEATURE_LEVEL_11_0) &&
+ base::FeatureList::IsEnabled(kHardwareSecureDecryption);
+
+ std::vector<SupportedVideoDecoderConfig> configs;
+
+ // Now check specific configs.
+ // For now, just return something that matches everything, since that's
+ // effectively what the workaround in mojo_video_decoder does. Eventually, we
+ // should check resolutions and guids from the device we just created for both
+ // portrait and landscape orientations.
+ const gfx::Size min_resolution(64, 64);
+ const gfx::Size max_resolution(8192, 8192); // Profile or landscape 8k
+
+ // Push H264 configs, except HIGH10.
+ configs.push_back(SupportedVideoDecoderConfig(
+ H264PROFILE_MIN, // profile_min
+ static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE -
+ 1), // profile_max
+ min_resolution, // coded_size_min
+ max_resolution, // coded_size_max
+ allow_encrypted, // allow_encrypted
+ false)); // require_encrypted
+ configs.push_back(SupportedVideoDecoderConfig(
+ static_cast<VideoCodecProfile>(H264PROFILE_HIGH10PROFILE +
+ 1), // profile_min
+ H264PROFILE_MAX, // profile_max
+ min_resolution, // coded_size_min
+ max_resolution, // coded_size_max
+ allow_encrypted, // allow_encrypted
+ false)); // require_encrypted
+
+ configs.push_back(
+ SupportedVideoDecoderConfig(VP9PROFILE_PROFILE0, // profile_min
+ VP9PROFILE_PROFILE0, // profile_max
+ min_resolution, // coded_size_min
+ max_resolution, // coded_size_max
+ allow_encrypted, // allow_encrypted
+ false)); // require_encrypted
+
+ // TODO(liberato): Should we separate out h264, vp9, and encrypted?
+ UMA_HISTOGRAM_ENUMERATION(uma_name, NotSupportedReason::kVideoIsSupported);
+
+ return configs;
}
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder.h b/chromium/media/gpu/windows/d3d11_video_decoder.h
index a80f2a499a4..83ebea722d7 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder.h
+++ b/chromium/media/gpu/windows/d3d11_video_decoder.h
@@ -6,6 +6,7 @@
#define MEDIA_GPU_D3D11_VIDEO_DECODER_H_
#include <string>
+#include <vector>
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
@@ -17,12 +18,13 @@
#include "gpu/config/gpu_preferences.h"
#include "media/base/callback_registry.h"
#include "media/base/video_decoder.h"
+#include "media/base/win/d3d11_create_device_cb.h"
#include "media/gpu/command_buffer_helper.h"
#include "media/gpu/media_gpu_export.h"
-#include "media/gpu/windows/d3d11_create_device_cb.h"
#include "media/gpu/windows/d3d11_h264_accelerator.h"
#include "media/gpu/windows/d3d11_video_decoder_client.h"
#include "media/gpu/windows/d3d11_vp9_accelerator.h"
+#include "media/video/supported_video_decoder_config.h"
namespace gpu {
class CommandBufferStub;
@@ -41,23 +43,33 @@ class MediaLog;
class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
public D3D11VideoDecoderClient {
public:
+ // Callback to get a D3D11 device.
+ using GetD3D11DeviceCB =
+ base::RepeatingCallback<Microsoft::WRL::ComPtr<ID3D11Device>()>;
+
+ // List of configs that we'll check against when initializing. This is only
+ // needed since GpuMojoMediaClient merges our supported configs with the VDA
+ // supported configs.
+ using SupportedConfigs = std::vector<SupportedVideoDecoderConfig>;
+
// |helper| must be called from |gpu_task_runner|.
static std::unique_ptr<VideoDecoder> Create(
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner,
std::unique_ptr<MediaLog> media_log,
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
- base::RepeatingCallback<gpu::CommandBufferStub*()> get_stub_cb);
+ base::RepeatingCallback<gpu::CommandBufferStub*()> get_stub_cb,
+ GetD3D11DeviceCB get_d3d11_device_cb,
+ SupportedConfigs supported_configs);
// VideoDecoder implementation:
std::string GetDisplayName() const override;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) override;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) override;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) override;
void Reset(const base::RepeatingClosure& closure) override;
@@ -70,12 +82,12 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
void OutputResult(const CodecPicture* picture,
D3D11PictureBuffer* picture_buffer) override;
- // Return false |config| definitely isn't going to work, so that we can fail
- // init without bothering with a thread hop.
- bool IsPotentiallySupported(const VideoDecoderConfig& config);
-
- // Override how we create D3D11 devices, to inject mocks.
- void SetCreateDeviceCallbackForTesting(D3D11CreateDeviceCB callback);
+ // Return the set of video decoder configs that we support.
+ static std::vector<SupportedVideoDecoderConfig>
+ GetSupportedVideoDecoderConfigs(
+ const gpu::GpuPreferences& gpu_preferences,
+ const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
+ GetD3D11DeviceCB get_d3d11_device_cb);
protected:
// Owners should call Destroy(). This is automatic via
@@ -93,7 +105,9 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
const gpu::GpuDriverBugWorkarounds& gpu_workarounds,
std::unique_ptr<D3D11VideoDecoderImpl> impl,
base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()>
- get_helper_cb);
+ get_helper_cb,
+ GetD3D11DeviceCB get_d3d11_device_cb,
+ SupportedConfigs supported_configs);
// Receive |buffer|, that is now unused by the client.
void ReceivePictureBufferFromClient(scoped_refptr<D3D11PictureBuffer> buffer);
@@ -117,11 +131,11 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
GUID GetD3D11DecoderGUID(const VideoDecoderConfig& config);
// Create new PictureBuffers. Currently, this completes synchronously, but
- // really should have an async interface since it must do some work on the gpu
- // main thread.
+ // really should have an async interface since it must do some work on the
+ // gpu main thread.
void CreatePictureBuffers();
- enum class D3D11VideoNotSupportedReason {
+ enum class NotSupportedReason {
kVideoIsSupported = 0,
// D3D11 version 11.1 required.
@@ -142,10 +156,13 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
// The media was encrypted.
kEncryptedMedia = 6,
+ // Call to get the D3D11 device failed.
+ kCouldNotGetD3D11Device = 7,
+
// For UMA. Must be the last entry. It should be initialized to the
// numerically largest value above; if you add more entries, then please
// update this to the last one.
- kMaxValue = kEncryptedMedia
+ kMaxValue = kCouldNotGetD3D11Device
};
std::unique_ptr<MediaLog> media_log_;
@@ -153,25 +170,30 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
enum class State {
// Initializing resources required to create a codec.
kInitializing,
+
// Initialization has completed and we're running. This is the only state
// in which |codec_| might be non-null. If |codec_| is null, a codec
// creation is pending.
kRunning,
+
// The decoder cannot make progress because it doesn't have the key to
// decrypt the buffer. Waiting for a new key to be available.
// This should only be transitioned from kRunning, and should only
- // transition to kRunning.
+ // transition to kRunning or kWaitingForReset.
kWaitingForNewKey,
+
+ // The decoder cannot make progress because it's waiting for a Reset(). This
+ // could happen as a result of CdmContext hardware context loss. This should
+ // only be transitioned from kRunning or kWaitingForNewKey, and should only
+ // transition to kRunning.
+ kWaitingForReset,
+
// A fatal error occurred. A terminal state.
kError,
};
- // Record a UMA about why IsPotentiallySupported returned false, or that it
- // returned true. Also will add a MediaLog entry, etc.
- void SetWasSupportedReason(D3D11VideoNotSupportedReason enum_value);
-
- // Callback to notify that new usable key is available.
- void NotifyNewKey();
+ // Callback to notify that new CdmContext event is available.
+ void OnCdmContextEvent(CdmContext::Event event);
// Enter the kError state. This will fail any pending |init_cb_| and / or
// pending decode as well.
@@ -191,16 +213,21 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
gpu::GpuDriverBugWorkarounds gpu_workarounds_;
// During init, these will be set.
+ VideoDecoderConfig config_;
InitCB init_cb_;
OutputCB output_cb_;
- bool is_encrypted_ = false;
+ WaitingCB waiting_cb_;
- D3D11CreateDeviceCB create_device_func_;
+ // Right now, this is used both for the video decoder and for display. In
+ // the future, this should only be for the video decoder. We should use
+ // the ANGLE device for display (plus texture sharing, if needed).
+ GetD3D11DeviceCB get_d3d11_device_cb_;
Microsoft::WRL::ComPtr<ID3D11Device> device_;
Microsoft::WRL::ComPtr<ID3D11DeviceContext> device_context_;
Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device_;
+ // D3D11 version on this device.
D3D_FEATURE_LEVEL usable_feature_level_;
std::unique_ptr<AcceleratedVideoDecoder> accelerated_video_decoder_;
@@ -216,8 +243,8 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
// Callback registration to keep the new key callback registered.
std::unique_ptr<CallbackRegistration> new_key_callback_registration_;
- // Must be called on the gpu main thread. So, don't call it from here, since
- // we don't know what thread we're on.
+ // Must be called on the gpu main thread. So, don't call it from here,
+ // since we don't know what thread we're on.
base::RepeatingCallback<gpu::CommandBufferStub*()> get_stub_cb_;
// It would be nice to unique_ptr these, but we give a ref to the VideoFrame
@@ -226,13 +253,15 @@ class MEDIA_GPU_EXPORT D3D11VideoDecoder : public VideoDecoder,
State state_ = State::kInitializing;
- // Callback to get a command buffer helper. Must be called from the gpu main
- // thread only.
+ // Callback to get a command buffer helper. Must be called from the gpu
+ // main thread only.
base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()> get_helper_cb_;
// Entire class should be single-sequence.
SEQUENCE_CHECKER(sequence_checker_);
+ SupportedConfigs supported_configs_;
+
base::WeakPtrFactory<D3D11VideoDecoder> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(D3D11VideoDecoder);
diff --git a/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc b/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
index 338bedf5268..b48e167466e 100644
--- a/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
+++ b/chromium/media/gpu/windows/d3d11_video_decoder_unittest.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/optional.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/test/scoped_feature_list.h"
@@ -18,16 +19,19 @@
#include "media/base/decoder_buffer.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/test_helpers.h"
-#include "media/gpu/windows/d3d11_mocks.h"
+#include "media/base/win/d3d11_mocks.h"
#include "media/gpu/windows/d3d11_video_decoder_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::_;
using ::testing::DoAll;
+using ::testing::Eq;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::SaveArg;
+using ::testing::SetArgPointee;
namespace media {
@@ -53,15 +57,106 @@ class D3D11VideoDecoderTest : public ::testing::Test {
gpu_preferences_.enable_zero_copy_dxgi_video = true;
gpu_preferences_.use_passthrough_cmd_decoder = false;
gpu_workarounds_.disable_dxgi_zero_copy_video = false;
+
+ // Create a mock D3D11 device that supports 11.0. Note that if you change
+ // this, then you probably also want VideoDevice1 and friends, below.
+ mock_d3d11_device_ = CreateD3D11Mock<D3D11DeviceMock>();
+ ON_CALL(*mock_d3d11_device_.Get(), GetFeatureLevel)
+ .WillByDefault(Return(D3D_FEATURE_LEVEL_11_0));
+
+ mock_d3d11_device_context_ = CreateD3D11Mock<D3D11DeviceContextMock>();
+ ON_CALL(*mock_d3d11_device_.Get(), GetImmediateContext(_))
+ .WillByDefault(SetComPointee<0>(mock_d3d11_device_context_.Get()));
+
+ // Set up an D3D11VideoDevice rather than ...Device1, since Initialize uses
+ // Device for checking decoder GUIDs.
+ // TODO(liberato): Try to use Device1 more often.
+ mock_d3d11_video_device_ = CreateD3D11Mock<D3D11VideoDeviceMock>();
+ ON_CALL(*mock_d3d11_device_.Get(), QueryInterface(IID_ID3D11VideoDevice, _))
+ .WillByDefault(DoAll(SetComPointee<1>(mock_d3d11_video_device_.Get()),
+ Return(S_OK)));
+
+ EnableDecoder(D3D11_DECODER_PROFILE_H264_VLD_NOFGT);
+
+ mock_d3d11_video_decoder_ = CreateD3D11Mock<D3D11VideoDecoderMock>();
+ ON_CALL(*mock_d3d11_video_device_.Get(), CreateVideoDecoder(_, _, _))
+ .WillByDefault(
+ SetComPointeeAndReturnOk<2>(mock_d3d11_video_decoder_.Get()));
+
+ mock_d3d11_video_context_ = CreateD3D11Mock<D3D11VideoContextMock>();
+ ON_CALL(*mock_d3d11_device_context_.Get(),
+ QueryInterface(IID_ID3D11VideoContext, _))
+ .WillByDefault(
+ SetComPointeeAndReturnOk<1>(mock_d3d11_video_context_.Get()));
+ }
+
+ // Enable a decoder for the given GUID. Only one decoder may be enabled at a
+ // time. GUIDs are things like D3D11_DECODER_PROFILE_H264_VLD_NOFGT or
+ // D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0, etc.
+ void EnableDecoder(GUID decoder_profile) {
+ ON_CALL(*mock_d3d11_video_device_.Get(), GetVideoDecoderProfileCount())
+ .WillByDefault(Return(1));
+
+ // Note that we don't check if the guid in the config actually matches
+ // |decoder_profile|. Perhaps we should.
+ ON_CALL(*mock_d3d11_video_device_.Get(), GetVideoDecoderProfile(0, _))
+ .WillByDefault(DoAll(SetArgPointee<1>(decoder_profile), Return(S_OK)));
+
+ ON_CALL(*mock_d3d11_video_device_.Get(), GetVideoDecoderConfigCount(_, _))
+ .WillByDefault(DoAll(
+ Invoke(this, &D3D11VideoDecoderTest::GetVideoDecoderConfigCount),
+ Return(S_OK)));
+
+ video_decoder_config_.ConfigBitstreamRaw =
+ decoder_profile == D3D11_DECODER_PROFILE_H264_VLD_NOFGT ? 2 : 1;
+
+ ON_CALL(*mock_d3d11_video_device_.Get(), GetVideoDecoderConfig(_, 0, _))
+ .WillByDefault(
+ DoAll(SetArgPointee<2>(video_decoder_config_), Return(S_OK)));
}
+ void GetVideoDecoderConfigCount(const D3D11_VIDEO_DECODER_DESC* desc,
+ UINT* config_count_out) {
+ last_video_decoder_desc_ = *desc;
+ *config_count_out = 1;
+ }
+
+ // Most recently provided video decoder desc.
+ base::Optional<D3D11_VIDEO_DECODER_DESC> last_video_decoder_desc_;
+ D3D11_VIDEO_DECODER_CONFIG video_decoder_config_;
+
void TearDown() override {
decoder_.reset();
// Run the gpu thread runner to tear down |impl_|.
base::RunLoop().RunUntilIdle();
}
- void CreateDecoder() {
+ void EnableFeature(const base::Feature& feature) {
+ scoped_feature_list_.emplace();
+ scoped_feature_list_->InitAndEnableFeature(feature);
+ }
+
+ void DisableFeature(const base::Feature& feature) {
+ scoped_feature_list_.emplace();
+ scoped_feature_list_->InitAndDisableFeature(feature);
+ }
+
+ // If provided, |supported_configs| is the list of configs that will be
+ // checked before init can succeed. If one is provided, then we'll
+ // use it. Otherwise, we'll use the list that's autodetected by the
+ // decoder based on the current device mock.
+ void CreateDecoder(
+ base::Optional<D3D11VideoDecoder::SupportedConfigs> supported_configs =
+ base::Optional<D3D11VideoDecoder::SupportedConfigs>()) {
+ auto get_device_cb = base::BindRepeating(
+ [](Microsoft::WRL::ComPtr<ID3D11Device> device) { return device; },
+ mock_d3d11_device_);
+
+ // Autodetect the supported configs, unless it's being overridden.
+ if (!supported_configs) {
+ supported_configs = D3D11VideoDecoder::GetSupportedVideoDecoderConfigs(
+ gpu_preferences_, gpu_workarounds_, get_device_cb);
+ }
std::unique_ptr<MockD3D11VideoDecoderImpl> impl =
std::make_unique<NiceMock<MockD3D11VideoDecoderImpl>>();
impl_ = impl.get();
@@ -72,17 +167,10 @@ class D3D11VideoDecoderTest : public ::testing::Test {
// deleter works. The dtor is protected.
decoder_ = base::WrapUnique<VideoDecoder>(
d3d11_decoder_raw_ = new D3D11VideoDecoder(
- gpu_task_runner_, nullptr /* MediaLog */, gpu_preferences_,
- gpu_workarounds_, std::move(impl),
- base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()>()));
- d3d11_decoder_raw_->SetCreateDeviceCallbackForTesting(
- base::BindRepeating(&D3D11CreateDeviceMock::Create,
- base::Unretained(&create_device_mock_)));
-
- // Configure CreateDevice to succeed by default.
- ON_CALL(create_device_mock_,
- Create(_, D3D_DRIVER_TYPE_HARDWARE, _, _, _, _, _, _, _, _))
- .WillByDefault(Return(S_OK));
+ gpu_task_runner_, std::make_unique<NullMediaLog>(),
+ gpu_preferences_, gpu_workarounds_, std::move(impl),
+ base::RepeatingCallback<scoped_refptr<CommandBufferHelper>()>(),
+ get_device_cb, *supported_configs));
}
enum InitExpectation {
@@ -104,10 +192,12 @@ class D3D11VideoDecoderTest : public ::testing::Test {
decoder_->Initialize(config, low_delay, cdm_context,
base::BindRepeating(&D3D11VideoDecoderTest::MockInitCB,
base::Unretained(this)),
- VideoDecoder::OutputCB(), base::NullCallback());
+ base::DoNothing(), base::DoNothing());
base::RunLoop().RunUntilIdle();
}
+ MOCK_METHOD1(MockInitCB, void(bool));
+
base::test::ScopedTaskEnvironment env_;
scoped_refptr<base::SingleThreadTaskRunner> gpu_task_runner_;
@@ -117,75 +207,69 @@ class D3D11VideoDecoderTest : public ::testing::Test {
gpu::GpuPreferences gpu_preferences_;
gpu::GpuDriverBugWorkarounds gpu_workarounds_;
MockD3D11VideoDecoderImpl* impl_ = nullptr;
- D3D11CreateDeviceMock create_device_mock_;
- MOCK_METHOD1(MockInitCB, void(bool));
+ Microsoft::WRL::ComPtr<D3D11DeviceMock> mock_d3d11_device_;
+ Microsoft::WRL::ComPtr<D3D11DeviceContextMock> mock_d3d11_device_context_;
+ Microsoft::WRL::ComPtr<D3D11VideoDeviceMock> mock_d3d11_video_device_;
+ Microsoft::WRL::ComPtr<D3D11VideoDecoderMock> mock_d3d11_video_decoder_;
+ Microsoft::WRL::ComPtr<D3D11VideoContextMock> mock_d3d11_video_context_;
+
+ base::Optional<base::test::ScopedFeatureList> scoped_feature_list_;
};
-TEST_F(D3D11VideoDecoderTest, RequiresD3D11_0) {
- D3D_FEATURE_LEVEL feature_levels[100];
- int num_levels = 0;
+TEST_F(D3D11VideoDecoderTest, SupportsVP9Profile0WithDecoderEnabled) {
+ VideoDecoderConfig configuration =
+ TestVideoConfig::NormalCodecProfile(kCodecVP9, VP9PROFILE_PROFILE0);
+ EnableDecoder(D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0);
CreateDecoder();
-
- // Fail to create the D3D11 device, but record the results.
- D3D11CreateDeviceCB create_device_cb = base::BindRepeating(
- [](D3D_FEATURE_LEVEL* feature_levels_out, int* num_levels_out,
- IDXGIAdapter*, D3D_DRIVER_TYPE, HMODULE, UINT,
- const D3D_FEATURE_LEVEL* feature_levels, UINT num_levels, UINT,
- ID3D11Device**, D3D_FEATURE_LEVEL*, ID3D11DeviceContext**) -> HRESULT {
- memcpy(feature_levels_out, feature_levels,
- num_levels * sizeof(feature_levels_out[0]));
- *num_levels_out = num_levels;
- return E_NOTIMPL;
- },
- feature_levels, &num_levels);
- d3d11_decoder_raw_->SetCreateDeviceCallbackForTesting(
- std::move(create_device_cb));
- InitializeDecoder(
- TestVideoConfig::NormalCodecProfile(kCodecH264, H264PROFILE_MAIN),
- kExpectFailure);
-
- // Verify that it requests exactly 11.1, and nothing earlier.
- // Later is okay.
- bool min_is_d3d11_0 = false;
- for (int i = 0; i < num_levels; i++) {
- min_is_d3d11_0 |= feature_levels[i] == D3D_FEATURE_LEVEL_11_0;
- ASSERT_TRUE(feature_levels[i] >= D3D_FEATURE_LEVEL_11_0);
- }
- ASSERT_TRUE(min_is_d3d11_0);
+ InitializeDecoder(configuration, kExpectSuccess);
}
-TEST_F(D3D11VideoDecoderTest, OnlySupportsVP9WithFlagEnabled) {
- CreateDecoder();
-
+TEST_F(D3D11VideoDecoderTest, DoesNotSupportVP9WithoutDecoderEnabled) {
VideoDecoderConfig configuration =
TestVideoConfig::NormalCodecProfile(kCodecVP9, VP9PROFILE_PROFILE0);
- {
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitWithFeatures({}, {kD3D11VP9Decoder});
- EXPECT_FALSE(d3d11_decoder_raw_->IsPotentiallySupported(configuration));
- }
-
- {
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitWithFeatures({kD3D11VP9Decoder}, {});
- EXPECT_TRUE(d3d11_decoder_raw_->IsPotentiallySupported(configuration));
- }
+ // Enable a non-VP9 decoder.
+ EnableDecoder(D3D11_DECODER_PROFILE_H264_VLD_NOFGT); // Paranoia, not VP9.
+ CreateDecoder();
+ InitializeDecoder(configuration, kExpectFailure);
}
-TEST_F(D3D11VideoDecoderTest, OnlySupportsH264NonHIGH10Profile) {
+TEST_F(D3D11VideoDecoderTest, DoesNotSupportsH264HIGH10Profile) {
CreateDecoder();
VideoDecoderConfig high10 = TestVideoConfig::NormalCodecProfile(
kCodecH264, H264PROFILE_HIGH10PROFILE);
+ InitializeDecoder(high10, kExpectFailure);
+}
+
+TEST_F(D3D11VideoDecoderTest, SupportsH264WithAutodetectedConfig) {
+ CreateDecoder();
+
+ VideoDecoderConfig normal =
+ TestVideoConfig::NormalCodecProfile(kCodecH264, H264PROFILE_MAIN);
+
+ InitializeDecoder(normal, kExpectSuccess);
+ // TODO(liberato): Check |last_video_decoder_desc_| for sanity.
+}
+
+TEST_F(D3D11VideoDecoderTest, DoesNotSupportH264IfNoSupportedConfig) {
+ // This is identical to SupportsH264, except that we initialize with an empty
+ // list of supported configs. This should match nothing. Assuming that
+ // SupportsH264WithSupportedConfig passes, then this checks that the supported
+ // config check kinda works.
+ // For whatever reason, Optional<SupportedConfigs>({}) results in one that
+ // doesn't have a value, rather than one that has an empty vector.
+ base::Optional<D3D11VideoDecoder::SupportedConfigs> empty_configs;
+ empty_configs.emplace(std::vector<SupportedVideoDecoderConfig>());
+ CreateDecoder(empty_configs);
+
VideoDecoderConfig normal =
TestVideoConfig::NormalCodecProfile(kCodecH264, H264PROFILE_MAIN);
- EXPECT_FALSE(d3d11_decoder_raw_->IsPotentiallySupported(high10));
- EXPECT_TRUE(d3d11_decoder_raw_->IsPotentiallySupported(normal));
+ InitializeDecoder(normal, kExpectFailure);
}
TEST_F(D3D11VideoDecoderTest, RequiresZeroCopyPreference) {
@@ -209,23 +293,34 @@ TEST_F(D3D11VideoDecoderTest, DoesNotSupportEncryptionWithoutFlag) {
VideoDecoderConfig encrypted_config =
TestVideoConfig::NormalCodecProfile(kCodecH264, H264PROFILE_MAIN);
encrypted_config.SetIsEncrypted(true);
- {
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitWithFeatures({}, {kD3D11EncryptedMedia});
- EXPECT_FALSE(d3d11_decoder_raw_->IsPotentiallySupported(encrypted_config));
- }
+
+ DisableFeature(kHardwareSecureDecryption);
+ InitializeDecoder(encrypted_config, kExpectFailure);
}
-TEST_F(D3D11VideoDecoderTest, SupportsEncryptionWithFlag) {
+TEST_F(D3D11VideoDecoderTest, DoesNotSupportEncryptionWithFlagOn11_0) {
CreateDecoder();
VideoDecoderConfig encrypted_config =
- TestVideoConfig::NormalCodecProfile(kCodecH264, H264PROFILE_MAIN);
+ TestVideoConfig::NormalEncrypted(kCodecH264, H264PROFILE_MAIN);
+ // TODO(liberato): Provide a CdmContext, so that this test is identical to the
+ // 11.1 version, except for the D3D11 version.
+
+ EnableFeature(kHardwareSecureDecryption);
+ InitializeDecoder(encrypted_config, kExpectFailure);
+}
+
+TEST_F(D3D11VideoDecoderTest, DISABLED_SupportsEncryptionWithFlagOn11_1) {
+ // This test fails, probably because we don't provide a CdmContext.
+ CreateDecoder();
+ VideoDecoderConfig encrypted_config =
+ TestVideoConfig::NormalEncrypted(kCodecH264, H264PROFILE_MAIN);
encrypted_config.SetIsEncrypted(true);
- {
- base::test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.InitWithFeatures({kD3D11EncryptedMedia}, {});
- EXPECT_TRUE(d3d11_decoder_raw_->IsPotentiallySupported(encrypted_config));
- }
+ ON_CALL(*mock_d3d11_device_.Get(), GetFeatureLevel)
+ .WillByDefault(Return(D3D_FEATURE_LEVEL_11_1));
+ EnableFeature(kHardwareSecureDecryption);
+ InitializeDecoder(encrypted_config, kExpectSuccess);
}
+// TODO(xhwang): Add tests to cover kWaitingForNewKey and kWaitingForReset.
+
} // namespace media
diff --git a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
index be087ab2253..74797fa1aa5 100644
--- a/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
+++ b/chromium/media/gpu/windows/d3d11_vp9_accelerator.cc
@@ -47,7 +47,12 @@ D3D11VP9Accelerator::D3D11VP9Accelerator(
status_feedback_(0),
video_decoder_(std::move(video_decoder)),
video_device_(std::move(video_device)),
- video_context_(std::move(video_context)) {}
+ video_context_(std::move(video_context)) {
+ DCHECK(client);
+ DCHECK(media_log_);
+ // |cdm_proxy_context_| is non-null for encrypted content but can be null for
+ // clear content.
+}
D3D11VP9Accelerator::~D3D11VP9Accelerator() {}
@@ -81,6 +86,7 @@ bool D3D11VP9Accelerator::BeginFrame(D3D11VP9Picture* pic) {
base::Optional<CdmProxyContext::D3D11DecryptContext> decrypt_context;
std::unique_ptr<D3D11_VIDEO_DECODER_BEGIN_FRAME_CRYPTO_SESSION> content_key;
if (const DecryptConfig* config = pic->decrypt_config()) {
+ DCHECK(cdm_proxy_context_) << "No CdmProxyContext but picture is encrypted";
decrypt_context = cdm_proxy_context_->GetD3D11DecryptContext(
CdmProxy::KeyType::kDecryptAndDecode, config->key_id());
if (!decrypt_context) {
@@ -125,6 +131,7 @@ void D3D11VP9Accelerator::CopyFrameParams(
COPY_PARAM(frame_parallel_decoding_mode);
COPY_PARAM(intra_only);
COPY_PARAM(frame_context_idx);
+ COPY_PARAM(reset_frame_context);
COPY_PARAM(allow_high_precision_mv);
COPY_PARAM(refresh_frame_context);
COPY_PARAM(frame_parallel_decoding_mode);
@@ -137,8 +144,8 @@ void D3D11VP9Accelerator::CopyFrameParams(
pic_params->CurrPic.Index7Bits = pic->level();
pic_params->frame_type = !pic->frame_hdr->IsKeyframe();
- pic_params->subsampling_x = pic->frame_hdr->subsampling_x == 1;
- pic_params->subsampling_y = pic->frame_hdr->subsampling_y == 1;
+ pic_params->subsampling_x = pic->frame_hdr->subsampling_x;
+ pic_params->subsampling_y = pic->frame_hdr->subsampling_y;
SET_PARAM(width, frame_width);
SET_PARAM(height, frame_height);
@@ -260,7 +267,8 @@ void D3D11VP9Accelerator::CopyHeaderSizeAndID(
pic_params->first_partition_size =
static_cast<USHORT>(pic->frame_hdr->header_size_in_bytes);
- pic_params->StatusReportFeedbackNumber = status_feedback_++;
+ // StatusReportFeedbackNumber "should not be equal to 0".
+ pic_params->StatusReportFeedbackNumber = ++status_feedback_;
}
bool D3D11VP9Accelerator::SubmitDecoderBuffer(
diff --git a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
index 6ee71c3ccbc..7f647a583fe 100644
--- a/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
+++ b/chromium/media/gpu/windows/dxva_picture_buffer_win.cc
@@ -105,48 +105,46 @@ enum {
};
// static
-linked_ptr<DXVAPictureBuffer> DXVAPictureBuffer::Create(
+std::unique_ptr<DXVAPictureBuffer> DXVAPictureBuffer::Create(
const DXVAVideoDecodeAccelerator& decoder,
const PictureBuffer& buffer,
EGLConfig egl_config) {
switch (decoder.GetPictureBufferMechanism()) {
case DXVAVideoDecodeAccelerator::PictureBufferMechanism::BIND: {
- linked_ptr<EGLStreamPictureBuffer> picture_buffer(
- new EGLStreamPictureBuffer(buffer));
+ auto picture_buffer = std::make_unique<EGLStreamPictureBuffer>(buffer);
if (!picture_buffer->Initialize())
- return linked_ptr<DXVAPictureBuffer>(nullptr);
+ return nullptr;
return picture_buffer;
}
case DXVAVideoDecodeAccelerator::PictureBufferMechanism::
DELAYED_COPY_TO_NV12: {
- linked_ptr<EGLStreamDelayedCopyPictureBuffer> picture_buffer(
- new EGLStreamDelayedCopyPictureBuffer(buffer));
+ auto picture_buffer =
+ std::make_unique<EGLStreamDelayedCopyPictureBuffer>(buffer);
if (!picture_buffer->Initialize(decoder))
- return linked_ptr<DXVAPictureBuffer>(nullptr);
+ return nullptr;
return picture_buffer;
}
case DXVAVideoDecodeAccelerator::PictureBufferMechanism::COPY_TO_NV12: {
- linked_ptr<EGLStreamCopyPictureBuffer> picture_buffer(
- new EGLStreamCopyPictureBuffer(buffer));
+ auto picture_buffer =
+ std::make_unique<EGLStreamCopyPictureBuffer>(buffer);
if (!picture_buffer->Initialize(decoder))
- return linked_ptr<DXVAPictureBuffer>(nullptr);
+ return nullptr;
return picture_buffer;
}
case DXVAVideoDecodeAccelerator::PictureBufferMechanism::COPY_TO_RGB: {
- linked_ptr<PbufferPictureBuffer> picture_buffer(
- new PbufferPictureBuffer(buffer));
+ auto picture_buffer = std::make_unique<PbufferPictureBuffer>(buffer);
if (!picture_buffer->Initialize(decoder, egl_config))
- return linked_ptr<DXVAPictureBuffer>(nullptr);
+ return nullptr;
return picture_buffer;
}
}
NOTREACHED();
- return linked_ptr<DXVAPictureBuffer>(nullptr);
+ return nullptr;
}
DXVAPictureBuffer::~DXVAPictureBuffer() {}
diff --git a/chromium/media/gpu/windows/dxva_picture_buffer_win.h b/chromium/media/gpu/windows/dxva_picture_buffer_win.h
index 9212d3e173a..d38e6a128e3 100644
--- a/chromium/media/gpu/windows/dxva_picture_buffer_win.h
+++ b/chromium/media/gpu/windows/dxva_picture_buffer_win.h
@@ -12,7 +12,6 @@
#include <memory>
-#include "base/memory/linked_ptr.h"
#include "media/video/picture.h"
#include "third_party/angle/include/EGL/egl.h"
#include "third_party/angle/include/EGL/eglext.h"
@@ -30,7 +29,7 @@ class DXVAVideoDecodeAccelerator;
class DXVAPictureBuffer {
public:
enum State { UNUSED, BOUND, COPYING, IN_CLIENT, WAITING_TO_REUSE };
- static linked_ptr<DXVAPictureBuffer> Create(
+ static std::unique_ptr<DXVAPictureBuffer> Create(
const DXVAVideoDecodeAccelerator& decoder,
const PictureBuffer& buffer,
EGLConfig egl_config);
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
index 7553d766dbc..b42ad520235 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.cc
@@ -32,7 +32,6 @@
#include "base/files/file_path.h"
#include "base/location.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/shared_memory.h"
#include "base/metrics/histogram_macros.h"
#include "base/path_service.h"
@@ -270,27 +269,27 @@ bool IsLegacyGPU(ID3D11Device* device) {
bool IsResolutionSupportedForDevice(const gfx::Size& resolution_to_test,
const GUID& decoder_guid,
ID3D11VideoDevice* video_device) {
- D3D11_VIDEO_DECODER_DESC desc = {};
- desc.Guid = decoder_guid;
- desc.SampleWidth = resolution_to_test.width();
- desc.SampleHeight = resolution_to_test.height();
- desc.OutputFormat = DXGI_FORMAT_NV12;
- UINT config_count = 0;
- HRESULT hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count);
- if (FAILED(hr) || config_count == 0)
- return false;
-
- D3D11_VIDEO_DECODER_CONFIG config = {};
- hr = video_device->GetVideoDecoderConfig(&desc, 0, &config);
- UMA_HISTOGRAM_BOOLEAN("Media.DXVAVDA.GetDecoderConfigStatus", SUCCEEDED(hr));
- if (FAILED(hr))
- return false;
-
- Microsoft::WRL::ComPtr<ID3D11VideoDecoder> video_decoder;
- hr = video_device->CreateVideoDecoder(&desc, &config,
- video_decoder.GetAddressOf());
- UMA_HISTOGRAM_BOOLEAN("Media.DXVAVDA.CreateDecoderStatus", !!video_decoder);
- return !!video_decoder;
+ D3D11_VIDEO_DECODER_DESC desc = {
+ decoder_guid, // Guid
+ resolution_to_test.width(), // SampleWidth
+ resolution_to_test.height(), // SampleHeight
+ DXGI_FORMAT_NV12 // OutputFormat
+ };
+
+ // We've chosen the least expensive test for identifying if a given resolution
+ // is supported. Actually creating the VideoDecoder instance only fails ~0.4%
+ // of the time and the outcome is that we will offer support and then
+ // immediately fall back to software; e.g., playback still works. Since these
+ // calls can take hundreds of milliseconds to complete and are often executed
+ // during startup, this seems a reasonably trade off.
+ //
+ // See the deprecated histograms Media.DXVAVDA.GetDecoderConfigStatus which
+ // succeeds 100% of the time and Media.DXVAVDA.CreateDecoderStatus which
+ // only succeeds 99.6% of the time (in a 28 day aggregation).
+ UINT config_count;
+ return SUCCEEDED(
+ video_device->GetVideoDecoderConfigCount(&desc, &config_count)) &&
+ config_count > 0;
}
// Returns a tuple of (LandscapeMax, PortraitMax). If landscape maximum can not
@@ -840,6 +839,8 @@ bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
RETURN_ON_FAILURE(InitDecoder(config.profile), "Failed to initialize decoder",
false);
+ // Record this after we see if it works.
+ UMA_HISTOGRAM_BOOLEAN("Media.DXVAVDA.UseD3D11", use_dx11_);
RETURN_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
"Failed to get input/output stream info.", false);
@@ -1052,7 +1053,7 @@ bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() {
flags |= D3D11_CREATE_DEVICE_DEBUG;
hr = D3D11CreateDevice(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, flags,
- feature_levels, arraysize(feature_levels),
+ feature_levels, base::size(feature_levels),
D3D11_SDK_VERSION, d3d11_device_.GetAddressOf(),
&feature_level_out,
d3d11_device_context_.GetAddressOf());
@@ -1066,7 +1067,7 @@ bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() {
#endif
if (!d3d11_device_context_) {
hr = D3D11CreateDevice(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, flags,
- feature_levels, arraysize(feature_levels),
+ feature_levels, base::size(feature_levels),
D3D11_SDK_VERSION, d3d11_device_.GetAddressOf(),
&feature_level_out,
d3d11_device_context_.GetAddressOf());
@@ -1187,7 +1188,7 @@ void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
// Copy the picture buffers provided by the client to the available list,
// and mark these buffers as available for use.
for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) {
- linked_ptr<DXVAPictureBuffer> picture_buffer =
+ std::unique_ptr<DXVAPictureBuffer> picture_buffer =
DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
"Failed to allocate picture buffer",
@@ -1202,10 +1203,10 @@ void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
}
}
- bool inserted =
- output_picture_buffers_
- .insert(std::make_pair(buffers[buffer_index].id(), picture_buffer))
- .second;
+ bool inserted = output_picture_buffers_
+ .insert(std::make_pair(buffers[buffer_index].id(),
+ std::move(picture_buffer)))
+ .second;
DCHECK(inserted);
}
@@ -1475,7 +1476,8 @@ DXVAVideoDecodeAccelerator::GetSupportedProfiles(
max_vpx_resolutions.first, video_device.Get(),
{D3D11_DECODER_PROFILE_VP9_VLD_PROFILE0},
{gfx::Size(4096, 2160), gfx::Size(4096, 2304),
- gfx::Size(7680, 4320)});
+ gfx::Size(7680, 4320), gfx::Size(8192, 4320),
+ gfx::Size(8192, 8192)});
}
}
}
@@ -2481,7 +2483,7 @@ void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) {
} else {
// Move to |stale_output_picture_buffers_| for deferred deletion.
stale_output_picture_buffers_.insert(
- std::make_pair(index->first, index->second));
+ std::make_pair(index->first, std::move(index->second)));
}
}
@@ -2977,10 +2979,8 @@ bool DXVAVideoDecodeAccelerator::InitializeID3D11VideoProcessor(
PictureBufferMechanism::DELAYED_COPY_TO_NV12) {
// If we're copying NV12 textures, make sure we set the same
// color space on input and output.
- D3D11_VIDEO_PROCESSOR_COLOR_SPACE d3d11_color_space = {0};
- d3d11_color_space.RGB_Range = 1;
- d3d11_color_space.Nominal_Range = D3D11_VIDEO_PROCESSOR_NOMINAL_RANGE_0_255;
-
+ const auto d3d11_color_space =
+ gfx::ColorSpaceWin::GetD3D11ColorSpace(color_space);
video_context_->VideoProcessorSetOutputColorSpace(d3d11_processor_.Get(),
&d3d11_color_space);
diff --git a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
index 82ebec483c7..c5d45ad88de 100644
--- a/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
+++ b/chromium/media/gpu/windows/dxva_video_decode_accelerator_win.h
@@ -26,7 +26,6 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
-#include "base/memory/linked_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
@@ -257,7 +256,7 @@ class MEDIA_GPU_EXPORT DXVAVideoDecodeAccelerator
// Handles mid stream resolution changes.
void HandleResolutionChanged(int width, int height);
- typedef std::map<int32_t, linked_ptr<DXVAPictureBuffer>> OutputBuffers;
+ using OutputBuffers = std::map<int32_t, std::unique_ptr<DXVAPictureBuffer>>;
// Tells the client to dismiss the stale picture buffers passed in.
void DismissStaleBuffers(bool force);
diff --git a/chromium/media/learning/common/BUILD.gn b/chromium/media/learning/common/BUILD.gn
index aa10784ea12..c8dba87ed9f 100644
--- a/chromium/media/learning/common/BUILD.gn
+++ b/chromium/media/learning/common/BUILD.gn
@@ -12,17 +12,20 @@ component("common") {
"//media/learning/mojo/public/mojom:mojom",
"//media/learning/mojo:*",
"//media/learning/common:unit_tests",
+
+ # Actual client code
+ "//media/capabilities",
]
defines = [ "IS_LEARNING_COMMON_IMPL" ]
sources = [
+ "labelled_example.cc",
+ "labelled_example.h",
"learning_session.cc",
"learning_session.h",
"learning_task.cc",
"learning_task.h",
- "training_example.cc",
- "training_example.h",
"value.cc",
"value.h",
]
@@ -35,7 +38,7 @@ component("common") {
source_set("unit_tests") {
testonly = true
sources = [
- "training_example_unittest.cc",
+ "labelled_example_unittest.cc",
"value_unittest.cc",
]
diff --git a/chromium/media/learning/common/labelled_example.cc b/chromium/media/learning/common/labelled_example.cc
new file mode 100644
index 00000000000..6c8cb7d345d
--- /dev/null
+++ b/chromium/media/learning/common/labelled_example.cc
@@ -0,0 +1,94 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/common/labelled_example.h"
+
+#include "base/containers/flat_set.h"
+
+namespace media {
+namespace learning {
+
+LabelledExample::LabelledExample() = default;
+
+LabelledExample::LabelledExample(std::initializer_list<FeatureValue> init_list,
+ TargetValue target)
+ : features(init_list), target_value(target) {}
+
+LabelledExample::LabelledExample(const LabelledExample& rhs) = default;
+
+LabelledExample::LabelledExample(LabelledExample&& rhs) noexcept = default;
+
+LabelledExample::~LabelledExample() = default;
+
+std::ostream& operator<<(std::ostream& out, const LabelledExample& example) {
+ out << example.features << " => " << example.target_value;
+
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const FeatureVector& features) {
+ for (const auto& feature : features)
+ out << " " << feature;
+
+ return out;
+}
+
+bool LabelledExample::operator==(const LabelledExample& rhs) const {
+ // Do not check weight.
+ return target_value == rhs.target_value && features == rhs.features;
+}
+
+bool LabelledExample::operator!=(const LabelledExample& rhs) const {
+ // Do not check weight.
+ return !((*this) == rhs);
+}
+
+bool LabelledExample::operator<(const LabelledExample& rhs) const {
+ // Impose a somewhat arbitrary ordering.
+ // Do not check weight.
+ if (target_value != rhs.target_value)
+ return target_value < rhs.target_value;
+
+ // Note that we could short-circuit this if the feature vector lengths are
+ // unequal, since we don't particularly care how they compare as long as it's
+ // stable. In particular, we don't have any notion of a "prefix".
+ return features < rhs.features;
+}
+
+LabelledExample& LabelledExample::operator=(const LabelledExample& rhs) =
+ default;
+
+LabelledExample& LabelledExample::operator=(LabelledExample&& rhs) = default;
+
+TrainingData::TrainingData() = default;
+
+TrainingData::TrainingData(const TrainingData& rhs) = default;
+
+TrainingData::TrainingData(TrainingData&& rhs) = default;
+
+TrainingData::~TrainingData() = default;
+
+TrainingData& TrainingData::operator=(TrainingData&& rhs) = default;
+
+TrainingData TrainingData::DeDuplicate() const {
+ // flat_set has non-const iterators, while std::set does not. const_cast is
+ // not allowed by chromium style outside of getters, so flat_set it is.
+ base::flat_set<LabelledExample> example_set;
+ for (auto& example : examples_) {
+ auto iter = example_set.find(example);
+ if (iter != example_set.end())
+ iter->weight += example.weight;
+ else
+ example_set.insert(example);
+ }
+
+ TrainingData deduplicated_data;
+ for (auto& example : example_set)
+ deduplicated_data.push_back(example);
+
+ return deduplicated_data;
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/common/labelled_example.h b/chromium/media/learning/common/labelled_example.h
new file mode 100644
index 00000000000..ee89586f5eb
--- /dev/null
+++ b/chromium/media/learning/common/labelled_example.h
@@ -0,0 +1,118 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_COMMON_LABELLED_EXAMPLE_H_
+#define MEDIA_LEARNING_COMMON_LABELLED_EXAMPLE_H_
+
+#include <initializer_list>
+#include <ostream>
+#include <vector>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "media/learning/common/value.h"
+
+namespace media {
+namespace learning {
+
+// Vector of features, for training or prediction.
+// To interpret the features, one probably needs to check a LearningTask. It
+// provides a description for each index. For example, [0]=="height",
+// [1]=="url", etc.
+using FeatureVector = std::vector<FeatureValue>;
+
+using WeightType = size_t;
+
+// One training example == group of feature values, plus the desired target.
+struct COMPONENT_EXPORT(LEARNING_COMMON) LabelledExample {
+ LabelledExample();
+ LabelledExample(std::initializer_list<FeatureValue> init_list,
+ TargetValue target);
+ LabelledExample(const LabelledExample& rhs);
+ LabelledExample(LabelledExample&& rhs) noexcept;
+ ~LabelledExample();
+
+ // Comparisons ignore weight, because it's convenient.
+ bool operator==(const LabelledExample& rhs) const;
+ bool operator!=(const LabelledExample& rhs) const;
+ bool operator<(const LabelledExample& rhs) const;
+
+ LabelledExample& operator=(const LabelledExample& rhs);
+ LabelledExample& operator=(LabelledExample&& rhs);
+
+ // Observed feature values.
+ // Note that to interpret these values, you probably need to have the
+ // LearningTask that they're supposed to be used with.
+ FeatureVector features;
+
+ // Observed output value, when given |features| as input.
+ TargetValue target_value;
+
+ WeightType weight = 1u;
+
+ // Copy / assignment is allowed.
+};
+
+// TODO(liberato): This should probably move to impl/ .
+class COMPONENT_EXPORT(LEARNING_COMMON) TrainingData {
+ public:
+ using ExampleVector = std::vector<LabelledExample>;
+ using const_iterator = ExampleVector::const_iterator;
+
+ TrainingData();
+ TrainingData(const TrainingData& rhs);
+ TrainingData(TrainingData&& rhs);
+
+ TrainingData& operator=(TrainingData&& rhs);
+
+ ~TrainingData();
+
+ // Add |example| with weight |weight|.
+ void push_back(const LabelledExample& example) {
+ DCHECK_GT(example.weight, 0u);
+ examples_.push_back(example);
+ total_weight_ += example.weight;
+ }
+
+ bool empty() const { return !total_weight_; }
+
+ size_t size() const { return examples_.size(); }
+
+ // Returns the number of instances, taking into account their weight. For
+ // example, if one adds an example with weight 2, then this will return two
+ // more than it did before.
+ WeightType total_weight() const { return total_weight_; }
+
+ const_iterator begin() const { return examples_.begin(); }
+ const_iterator end() const { return examples_.end(); }
+
+ bool is_unweighted() const { return examples_.size() == total_weight_; }
+
+ // Provide the |i|-th example, over [0, size()).
+ const LabelledExample& operator[](size_t i) const { return examples_[i]; }
+ LabelledExample& operator[](size_t i) { return examples_[i]; }
+
+ // Return a copy of this data with duplicate entries merged. Example weights
+ // will be summed.
+ TrainingData DeDuplicate() const;
+
+ private:
+ ExampleVector examples_;
+
+ WeightType total_weight_ = 0u;
+
+ // Copy / assignment is allowed.
+};
+
+COMPONENT_EXPORT(LEARNING_COMMON)
+std::ostream& operator<<(std::ostream& out, const LabelledExample& example);
+
+COMPONENT_EXPORT(LEARNING_COMMON)
+std::ostream& operator<<(std::ostream& out, const FeatureVector& features);
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_COMMON_LABELLED_EXAMPLE_H_
diff --git a/chromium/media/learning/common/labelled_example_unittest.cc b/chromium/media/learning/common/labelled_example_unittest.cc
new file mode 100644
index 00000000000..1c9e77f19f5
--- /dev/null
+++ b/chromium/media/learning/common/labelled_example_unittest.cc
@@ -0,0 +1,233 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/common/labelled_example.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class LearnerLabelledExampleTest : public testing::Test {};
+
+TEST_F(LearnerLabelledExampleTest, InitListWorks) {
+ const int kFeature1 = 123;
+ const int kFeature2 = 456;
+ std::vector<FeatureValue> features = {FeatureValue(kFeature1),
+ FeatureValue(kFeature2)};
+ TargetValue target(789);
+ LabelledExample example({FeatureValue(kFeature1), FeatureValue(kFeature2)},
+ target);
+
+ EXPECT_EQ(example.features, features);
+ EXPECT_EQ(example.target_value, target);
+}
+
+TEST_F(LearnerLabelledExampleTest, CopyConstructionWorks) {
+ LabelledExample example_1({FeatureValue(123), FeatureValue(456)},
+ TargetValue(789));
+ LabelledExample example_2(example_1);
+
+ EXPECT_EQ(example_1, example_2);
+}
+
+TEST_F(LearnerLabelledExampleTest, MoveConstructionWorks) {
+ LabelledExample example_1({FeatureValue(123), FeatureValue(456)},
+ TargetValue(789));
+
+ LabelledExample example_1_copy(example_1);
+ LabelledExample example_1_move(std::move(example_1));
+
+ EXPECT_EQ(example_1_copy, example_1_move);
+ EXPECT_NE(example_1_copy, example_1);
+}
+
+TEST_F(LearnerLabelledExampleTest, EqualExamplesCompareAsEqual) {
+ const int kFeature1 = 123;
+ const int kFeature2 = 456;
+ TargetValue target(789);
+ LabelledExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature2)},
+ target);
+ LabelledExample example_2({FeatureValue(kFeature1), FeatureValue(kFeature2)},
+ target);
+ // Verify both that == and != work.
+ EXPECT_EQ(example_1, example_2);
+ EXPECT_FALSE(example_1 != example_2);
+ // Also insist that equal examples are not less.
+ EXPECT_FALSE(example_1 < example_2);
+ EXPECT_FALSE(example_2 < example_1);
+}
+
+TEST_F(LearnerLabelledExampleTest, UnequalFeaturesCompareAsUnequal) {
+ const int kFeature1 = 123;
+ const int kFeature2 = 456;
+ TargetValue target(789);
+ LabelledExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature1)},
+ target);
+ LabelledExample example_2({FeatureValue(kFeature2), FeatureValue(kFeature2)},
+ target);
+ EXPECT_TRUE(example_1 != example_2);
+ EXPECT_FALSE(example_1 == example_2);
+ // We don't care which way is <, but we do care that one is less than the
+ // other but not both.
+ EXPECT_NE((example_1 < example_2), (example_2 < example_1));
+}
+
+TEST_F(LearnerLabelledExampleTest, WeightDoesntChangeExampleEquality) {
+ const int kFeature1 = 123;
+ TargetValue target(789);
+ LabelledExample example_1({FeatureValue(kFeature1)}, target);
+ LabelledExample example_2 = example_1;
+
+ // Set the weights to be unequal. This should not affect the comparison.
+ example_1.weight = 10u;
+ example_2.weight = 20u;
+
+ // Verify both that == and != ignore weights.
+ EXPECT_EQ(example_1, example_2);
+ EXPECT_FALSE(example_1 != example_2);
+ // Also insist that equal examples are not less.
+ EXPECT_FALSE(example_1 < example_2);
+ EXPECT_FALSE(example_2 < example_1);
+}
+
+TEST_F(LearnerLabelledExampleTest, ExampleAssignmentCopiesWeights) {
+ // While comparisons ignore weights, copy / assign should not.
+ const int kFeature1 = 123;
+ TargetValue target(789);
+ LabelledExample example_1({FeatureValue(kFeature1)}, target);
+ example_1.weight = 10u;
+
+ // Copy-assignment.
+ LabelledExample example_2;
+ example_2 = example_1;
+ EXPECT_EQ(example_1, example_2);
+ EXPECT_EQ(example_1.weight, example_2.weight);
+
+ // Copy-construction.
+ LabelledExample example_3(example_1);
+ EXPECT_EQ(example_1, example_3);
+ EXPECT_EQ(example_1.weight, example_3.weight);
+
+ // Move-assignment.
+ LabelledExample example_4;
+ example_4 = std::move(example_2);
+ EXPECT_EQ(example_1, example_4);
+ EXPECT_EQ(example_1.weight, example_4.weight);
+
+ // Move-construction.
+ LabelledExample example_5(std::move(example_3));
+ EXPECT_EQ(example_1, example_5);
+ EXPECT_EQ(example_1.weight, example_5.weight);
+}
+
+TEST_F(LearnerLabelledExampleTest, UnequalTargetsCompareAsUnequal) {
+ const int kFeature1 = 123;
+ const int kFeature2 = 456;
+ LabelledExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature1)},
+ TargetValue(789));
+ LabelledExample example_2({FeatureValue(kFeature2), FeatureValue(kFeature2)},
+ TargetValue(987));
+ EXPECT_TRUE(example_1 != example_2);
+ EXPECT_FALSE(example_1 == example_2);
+ // Exactly one should be less than the other, but we don't care which one.
+ EXPECT_TRUE((example_1 < example_2) ^ (example_2 < example_1));
+}
+
+TEST_F(LearnerLabelledExampleTest, OrderingIsTransitive) {
+ // Verify that ordering is transitive. We don't particularly care what the
+ // ordering is, otherwise.
+
+ const FeatureValue kFeature1(123);
+ const FeatureValue kFeature2(456);
+ const FeatureValue kTarget1(789);
+ const FeatureValue kTarget2(987);
+ std::vector<LabelledExample> examples;
+ examples.push_back(LabelledExample({kFeature1}, kTarget1));
+ examples.push_back(LabelledExample({kFeature1}, kTarget2));
+ examples.push_back(LabelledExample({kFeature2}, kTarget1));
+ examples.push_back(LabelledExample({kFeature2}, kTarget2));
+ examples.push_back(LabelledExample({kFeature1, kFeature2}, kTarget1));
+ examples.push_back(LabelledExample({kFeature1, kFeature2}, kTarget2));
+ examples.push_back(LabelledExample({kFeature2, kFeature1}, kTarget1));
+ examples.push_back(LabelledExample({kFeature2, kFeature1}, kTarget2));
+
+ // Sort, and make sure that it ends up totally ordered.
+ std::sort(examples.begin(), examples.end());
+ for (auto outer = examples.begin(); outer != examples.end(); outer++) {
+ for (auto inner = outer + 1; inner != examples.end(); inner++) {
+ EXPECT_TRUE(*outer < *inner);
+ EXPECT_FALSE(*inner < *outer);
+ }
+ }
+}
+
+TEST_F(LearnerLabelledExampleTest, UnweightedTrainingDataPushBack) {
+ // Test that pushing examples from unweighted storage into TrainingData works.
+ TrainingData training_data;
+ EXPECT_EQ(training_data.total_weight(), 0u);
+ EXPECT_TRUE(training_data.empty());
+
+ LabelledExample example({FeatureValue(123)}, TargetValue(789));
+ training_data.push_back(example);
+ EXPECT_EQ(training_data.total_weight(), 1u);
+ EXPECT_FALSE(training_data.empty());
+ EXPECT_TRUE(training_data.is_unweighted());
+ EXPECT_EQ(training_data[0], example);
+}
+
+TEST_F(LearnerLabelledExampleTest, WeightedTrainingDataPushBack) {
+ // Test that pushing examples from weighted storage into TrainingData works.
+ TrainingData training_data;
+ EXPECT_EQ(training_data.total_weight(), 0u);
+ EXPECT_TRUE(training_data.empty());
+
+ LabelledExample example({FeatureValue(123)}, TargetValue(789));
+ const WeightType weight(10);
+ example.weight = weight;
+ training_data.push_back(example);
+ training_data.push_back(example);
+
+ EXPECT_EQ(training_data.total_weight(), weight * 2);
+ EXPECT_FALSE(training_data.empty());
+ EXPECT_FALSE(training_data.is_unweighted());
+ EXPECT_EQ(training_data[0], example);
+}
+
+TEST_F(LearnerLabelledExampleTest, TrainingDataDeDuplicate) {
+ // Make sure that TrainingData::DeDuplicate works properly.
+
+ const WeightType weight_0_a(100);
+ const WeightType weight_0_b(200);
+ const WeightType weight_1(500);
+ LabelledExample example_0({FeatureValue(123)}, TargetValue(789));
+ LabelledExample example_1({FeatureValue(456)}, TargetValue(789));
+
+ TrainingData training_data;
+ example_0.weight = weight_0_a;
+ training_data.push_back(example_0);
+ example_1.weight = weight_1;
+ training_data.push_back(example_1);
+ example_0.weight = weight_0_b;
+ training_data.push_back(example_0);
+
+ EXPECT_EQ(training_data.total_weight(), weight_0_a + weight_0_b + weight_1);
+ EXPECT_EQ(training_data.size(), 3u);
+ EXPECT_EQ(training_data[0].weight, weight_0_a);
+ EXPECT_EQ(training_data[1].weight, weight_1);
+ EXPECT_EQ(training_data[2].weight, weight_0_b);
+
+ TrainingData dedup = training_data.DeDuplicate();
+ EXPECT_EQ(dedup.total_weight(), weight_0_a + weight_0_b + weight_1);
+ EXPECT_EQ(dedup.size(), 2u);
+ // We don't care which order they're in, so find the index of |example_0|.
+ size_t idx_0 = (dedup[0] == example_0) ? 0 : 1;
+ EXPECT_EQ(dedup[idx_0], example_0);
+ EXPECT_EQ(dedup[idx_0].weight, weight_0_a + weight_0_b);
+ EXPECT_EQ(dedup[1u - idx_0], example_1);
+ EXPECT_EQ(dedup[1u - idx_0].weight, weight_1);
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/common/learning_session.h b/chromium/media/learning/common/learning_session.h
index fdca843442e..22db890c2c4 100644
--- a/chromium/media/learning/common/learning_session.h
+++ b/chromium/media/learning/common/learning_session.h
@@ -9,8 +9,8 @@
#include "base/component_export.h"
#include "base/macros.h"
+#include "media/learning/common/labelled_example.h"
#include "media/learning/common/learning_task.h"
-#include "media/learning/common/training_example.h"
namespace media {
namespace learning {
@@ -24,7 +24,7 @@ class COMPONENT_EXPORT(LEARNING_COMMON) LearningSession {
// Add an observed example |example| to the learning task |task_name|.
// TODO(liberato): Consider making this an enum to match mojo.
virtual void AddExample(const std::string& task_name,
- const TrainingExample& example) = 0;
+ const LabelledExample& example) = 0;
// TODO(liberato): Add prediction API.
diff --git a/chromium/media/learning/common/learning_task.cc b/chromium/media/learning/common/learning_task.cc
index acc339b288a..07cc4079005 100644
--- a/chromium/media/learning/common/learning_task.cc
+++ b/chromium/media/learning/common/learning_task.cc
@@ -8,7 +8,19 @@ namespace media {
namespace learning {
LearningTask::LearningTask() = default;
+
+LearningTask::LearningTask(
+ const std::string& name,
+ Model model,
+ std::initializer_list<ValueDescription> feature_init_list,
+ ValueDescription target_description)
+ : name(name),
+ model(model),
+ feature_descriptions(std::move(feature_init_list)),
+ target_description(target_description) {}
+
LearningTask::LearningTask(const LearningTask&) = default;
+
LearningTask::~LearningTask() = default;
} // namespace learning
diff --git a/chromium/media/learning/common/learning_task.h b/chromium/media/learning/common/learning_task.h
index 51e7ff91146..0d9054c59a8 100644
--- a/chromium/media/learning/common/learning_task.h
+++ b/chromium/media/learning/common/learning_task.h
@@ -23,16 +23,11 @@ namespace learning {
// registering tasks.
struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
// Not all models support all feature / target descriptions. For example,
- // NaiveBayes requires kUnordered features. Similarly, kLogLinear doesn't
+ // NaiveBayes requires kUnordered features. Similarly, LogLinear woudln't
// support kUnordered features or targets. kRandomForest might support more
// combination of orderings and types.
- //
- // Also note that not all of these are implemented yet.
enum class Model {
- kMostCommonTarget,
- kNaiveBayes,
- kRandomForest,
- kLogLinear,
+ kExtraTrees,
};
enum class Ordering {
@@ -81,7 +76,7 @@ struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
// Unique name for this learner.
std::string name;
- Model model = Model::kMostCommonTarget;
+ Model model = Model::kExtraTrees;
std::vector<ValueDescription> feature_descriptions;
@@ -91,6 +86,48 @@ struct COMPONENT_EXPORT(LEARNING_COMMON) LearningTask {
// TODO(liberato): add training parameters, like smoothing constants. It's
// okay if some of these are model-specific.
+ // TODO(liberato): switch to base::DictionaryValue?
+
+ // Maximum data set size until we start replacing examples.
+ size_t max_data_set_size = 100u;
+
+ // Fraction of examples that must be new before the task controller will train
+ // a new model. Note that this is a fraction of the number of examples that
+ // we currently have, which might be less than |max_data_set_size|.
+ double min_new_data_fraction = 0.1;
+
+ // If set, then we'll record a confusion matrix hackily to UMA using this as
+ // the histogram name.
+ std::string uma_hacky_confusion_matrix;
+
+ // RandomTree parameters
+
+ // How RandomTree handles unknown feature values.
+ enum class RTUnknownValueHandling {
+ // Return an empty distribution as the prediction.
+ kEmptyDistribution,
+
+ // Return the sum of the traversal of all splits.
+ kUseAllSplits,
+ };
+ RTUnknownValueHandling rt_unknown_value_handling =
+ RTUnknownValueHandling::kUseAllSplits;
+
+ // RandomForest parameters
+
+ // Number of trees in the random forest.
+ size_t rf_number_of_trees = 100;
+
+ // Reporting parameters
+
+ // This is a hack for the initial media capabilities investigation. It
+ // represents the threshold that we'll use to decide if a prediction would be
+ // T / F. We should not do this -- instead we should report the distribution
+ // average for the prediction and the observation via UKM.
+ //
+ // In particular, if the percentage of dropped frames is greater than this,
+ // then report "false" (not smooth), else we report true.
+ double smoothness_threshold = 0.1;
};
} // namespace learning
diff --git a/chromium/media/learning/common/training_example.cc b/chromium/media/learning/common/training_example.cc
deleted file mode 100644
index e181b97caf5..00000000000
--- a/chromium/media/learning/common/training_example.cc
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/learning/common/training_example.h"
-
-namespace media {
-namespace learning {
-
-TrainingExample::TrainingExample() = default;
-
-TrainingExample::TrainingExample(std::initializer_list<FeatureValue> init_list,
- TargetValue target)
- : features(init_list), target_value(target) {}
-
-TrainingExample::TrainingExample(const TrainingExample& rhs)
- : features(rhs.features), target_value(rhs.target_value) {}
-
-TrainingExample::TrainingExample(TrainingExample&& rhs) noexcept
- : features(std::move(rhs.features)),
- target_value(std::move(rhs.target_value)) {}
-
-TrainingExample::~TrainingExample() = default;
-
-std::ostream& operator<<(std::ostream& out, const TrainingExample& example) {
- for (const auto& feature : example.features)
- out << " " << feature;
-
- out << " => " << example.target_value;
-
- return out;
-}
-
-bool TrainingExample::operator==(const TrainingExample& rhs) const {
- return target_value == rhs.target_value && features == rhs.features;
-}
-
-bool TrainingExample::operator!=(const TrainingExample& rhs) const {
- return !((*this) == rhs);
-}
-
-TrainingExample& TrainingExample::operator=(const TrainingExample& rhs) =
- default;
-
-TrainingExample& TrainingExample::operator=(TrainingExample&& rhs) = default;
-
-TrainingDataStorage::TrainingDataStorage() = default;
-
-TrainingDataStorage::~TrainingDataStorage() = default;
-
-TrainingData::TrainingData(scoped_refptr<TrainingDataStorage> backing_storage)
- : backing_storage_(std::move(backing_storage)) {}
-
-TrainingData::TrainingData(scoped_refptr<TrainingDataStorage> backing_storage,
- TrainingDataStorage::const_iterator begin,
- TrainingDataStorage::const_iterator end)
- : backing_storage_(std::move(backing_storage)) {
- for (; begin != end; begin++)
- examples_.push_back(&(*begin));
-}
-
-TrainingData::TrainingData(const TrainingData& rhs) = default;
-
-TrainingData::TrainingData(TrainingData&& rhs) = default;
-
-TrainingData::~TrainingData() = default;
-
-} // namespace learning
-} // namespace media
diff --git a/chromium/media/learning/common/training_example.h b/chromium/media/learning/common/training_example.h
deleted file mode 100644
index d7a7a07bcbf..00000000000
--- a/chromium/media/learning/common/training_example.h
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_LEARNING_COMMON_TRAINING_EXAMPLE_H_
-#define MEDIA_LEARNING_COMMON_TRAINING_EXAMPLE_H_
-
-#include <initializer_list>
-#include <ostream>
-#include <vector>
-
-#include "base/component_export.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "media/learning/common/value.h"
-
-namespace media {
-namespace learning {
-
-// Vector of features, for training or prediction.
-// To interpret the features, one probably needs to check a LearningTask. It
-// provides a description for each index. For example, [0]=="height",
-// [1]=="url", etc.
-using FeatureVector = std::vector<FeatureValue>;
-
-// One training example == group of feature values, plus the desired target.
-struct COMPONENT_EXPORT(LEARNING_COMMON) TrainingExample {
- TrainingExample();
- TrainingExample(std::initializer_list<FeatureValue> init_list,
- TargetValue target);
- TrainingExample(const TrainingExample& rhs);
- TrainingExample(TrainingExample&& rhs) noexcept;
- ~TrainingExample();
-
- bool operator==(const TrainingExample& rhs) const;
- bool operator!=(const TrainingExample& rhs) const;
-
- TrainingExample& operator=(const TrainingExample& rhs);
- TrainingExample& operator=(TrainingExample&& rhs);
-
- // Observed feature values.
- // Note that to interpret these values, you probably need to have the
- // LearningTask that they're supposed to be used with.
- FeatureVector features;
-
- // Observed output value, when given |features| as input.
- TargetValue target_value;
-
- // Copy / assignment is allowed.
-};
-
-// Collection of training examples. We use a vector since we allow duplicates.
-class COMPONENT_EXPORT(LEARNING_COMMON) TrainingDataStorage
- : public base::RefCountedThreadSafe<TrainingDataStorage> {
- public:
- using StorageVector = std::vector<TrainingExample>;
- using const_iterator = StorageVector::const_iterator;
-
- TrainingDataStorage();
-
- StorageVector::const_iterator begin() const { return examples_.begin(); }
- StorageVector::const_iterator end() const { return examples_.end(); }
-
- void push_back(const TrainingExample& example) {
- examples_.push_back(example);
- }
-
- // Returns true if and only if |example| is included in our data. Note that
- // this checks that the pointer itself is included, so that one might tell if
- // an example is backed by this storage or not. It does not care if there is
- // an example in our storage that would TrainingExample::operator==(*example).
- bool contains(const TrainingExample* example) const {
- return (example >= examples_.data()) &&
- (example < examples_.data() + examples_.size());
- }
-
- private:
- friend class base::RefCountedThreadSafe<TrainingDataStorage>;
-
- ~TrainingDataStorage();
-
- std::vector<TrainingExample> examples_;
-
- DISALLOW_COPY_AND_ASSIGN(TrainingDataStorage);
-};
-
-// Collection of pointers to training data. References would be more convenient
-// but they're not allowed.
-class COMPONENT_EXPORT(LEARNING_COMMON) TrainingData {
- public:
- using ExampleVector = std::vector<const TrainingExample*>;
- using const_iterator = ExampleVector::const_iterator;
-
- // Construct an empty set of examples, with |backing_storage| as the allowed
- // underlying storage.
- TrainingData(scoped_refptr<TrainingDataStorage> backing_storage);
-
- // Construct a list of examples from |begin| to excluding |end|.
- TrainingData(scoped_refptr<TrainingDataStorage> backing_storage,
- TrainingDataStorage::const_iterator begin,
- TrainingDataStorage::const_iterator end);
-
- TrainingData(const TrainingData& rhs);
- TrainingData(TrainingData&& rhs);
-
- ~TrainingData();
-
- void push_back(const TrainingExample* example) {
- DCHECK(backing_storage_);
- DCHECK(backing_storage_->contains(example));
- examples_.push_back(example);
- }
-
- bool empty() const { return examples_.empty(); }
-
- size_t size() const { return examples_.size(); }
-
- const_iterator begin() const { return examples_.begin(); }
- const_iterator end() const { return examples_.end(); }
-
- const TrainingExample* operator[](size_t i) const { return examples_[i]; }
-
- scoped_refptr<TrainingDataStorage> storage() const {
- return backing_storage_;
- }
-
- private:
- // It would be nice if we insisted that
- scoped_refptr<TrainingDataStorage> backing_storage_;
-
- ExampleVector examples_;
-
- // Copy / assignment is allowed.
-};
-
-COMPONENT_EXPORT(LEARNING_COMMON)
-std::ostream& operator<<(std::ostream& out, const TrainingExample& example);
-
-} // namespace learning
-} // namespace media
-
-#endif // MEDIA_LEARNING_COMMON_TRAINING_EXAMPLE_H_
diff --git a/chromium/media/learning/common/training_example_unittest.cc b/chromium/media/learning/common/training_example_unittest.cc
deleted file mode 100644
index 5c104ef265d..00000000000
--- a/chromium/media/learning/common/training_example_unittest.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/learning/common/training_example.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-namespace learning {
-
-class LearnerTrainingExampleTest : public testing::Test {};
-
-TEST_F(LearnerTrainingExampleTest, InitListWorks) {
- const int kFeature1 = 123;
- const int kFeature2 = 456;
- std::vector<FeatureValue> features = {FeatureValue(kFeature1),
- FeatureValue(kFeature2)};
- TargetValue target(789);
- TrainingExample example({FeatureValue(kFeature1), FeatureValue(kFeature2)},
- target);
-
- EXPECT_EQ(example.features, features);
- EXPECT_EQ(example.target_value, target);
-}
-
-TEST_F(LearnerTrainingExampleTest, CopyConstructionWorks) {
- TrainingExample example_1({FeatureValue(123), FeatureValue(456)},
- TargetValue(789));
- TrainingExample example_2(example_1);
-
- EXPECT_EQ(example_1, example_2);
-}
-
-TEST_F(LearnerTrainingExampleTest, MoveConstructionWorks) {
- TrainingExample example_1({FeatureValue(123), FeatureValue(456)},
- TargetValue(789));
-
- TrainingExample example_1_copy(example_1);
- TrainingExample example_1_move(std::move(example_1));
-
- EXPECT_EQ(example_1_copy, example_1_move);
- EXPECT_NE(example_1_copy, example_1);
-}
-
-TEST_F(LearnerTrainingExampleTest, EqualExamplesCompareAsEqual) {
- const int kFeature1 = 123;
- const int kFeature2 = 456;
- TargetValue target(789);
- TrainingExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature2)},
- target);
- TrainingExample example_2({FeatureValue(kFeature1), FeatureValue(kFeature2)},
- target);
- // Verify both that == and != work.
- EXPECT_EQ(example_1, example_2);
- EXPECT_FALSE(example_1 != example_2);
-}
-
-TEST_F(LearnerTrainingExampleTest, UnequalFeaturesCompareAsUnequal) {
- const int kFeature1 = 123;
- const int kFeature2 = 456;
- TargetValue target(789);
- TrainingExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature1)},
- target);
- TrainingExample example_2({FeatureValue(kFeature2), FeatureValue(kFeature2)},
- target);
- EXPECT_NE(example_1, example_2);
- EXPECT_FALSE(example_1 == example_2);
-}
-
-TEST_F(LearnerTrainingExampleTest, UnequalTargetsCompareAsUnequal) {
- const int kFeature1 = 123;
- const int kFeature2 = 456;
- TrainingExample example_1({FeatureValue(kFeature1), FeatureValue(kFeature1)},
- TargetValue(789));
- TrainingExample example_2({FeatureValue(kFeature2), FeatureValue(kFeature2)},
- TargetValue(987));
- EXPECT_NE(example_1, example_2);
- EXPECT_FALSE(example_1 == example_2);
-}
-
-TEST_F(LearnerTrainingExampleTest, StoragePushBack) {
- TrainingExample example({FeatureValue(123)}, TargetValue(789));
- scoped_refptr<TrainingDataStorage> storage =
- base::MakeRefCounted<TrainingDataStorage>();
- EXPECT_EQ(storage->begin(), storage->end());
- storage->push_back(example);
- EXPECT_NE(storage->begin(), storage->end());
- EXPECT_EQ(++storage->begin(), storage->end());
- EXPECT_EQ(*storage->begin(), example);
-}
-
-TEST_F(LearnerTrainingExampleTest, StorageCheckWorks) {
- // Verify that TrainingDataStorage can tell if an example is in its storage.
- TrainingExample example({FeatureValue(123)}, TargetValue(789));
- scoped_refptr<TrainingDataStorage> storage =
- base::MakeRefCounted<TrainingDataStorage>();
- storage->push_back(example);
-
- EXPECT_TRUE(storage->contains(&(*storage->begin())));
- EXPECT_FALSE(storage->contains(&example));
-}
-
-TEST_F(LearnerTrainingExampleTest, TrainingDataPushBack) {
- TrainingExample example({FeatureValue(123)}, TargetValue(789));
- scoped_refptr<TrainingDataStorage> storage =
- base::MakeRefCounted<TrainingDataStorage>();
- storage->push_back(example);
-
- TrainingData training_data(storage);
- EXPECT_EQ(training_data.size(), 0u);
- EXPECT_TRUE(training_data.empty());
- training_data.push_back(&(*storage->begin()));
- EXPECT_EQ(training_data.size(), 1u);
- EXPECT_FALSE(training_data.empty());
- EXPECT_EQ(*training_data.begin(), &(*storage->begin()));
- EXPECT_EQ(training_data[0], &(*storage->begin()));
-}
-
-TEST_F(LearnerTrainingExampleTest, TrainingDataConstructWithRange) {
- TrainingExample example({FeatureValue(123)}, TargetValue(789));
- scoped_refptr<TrainingDataStorage> storage =
- base::MakeRefCounted<TrainingDataStorage>();
- storage->push_back(example);
-
- TrainingData training_data(storage, storage->begin(), storage->end());
- EXPECT_EQ(training_data.size(), 1u);
- EXPECT_FALSE(training_data.empty());
- EXPECT_EQ(*training_data.begin(), &(*storage->begin()));
- EXPECT_EQ(training_data[0], &(*storage->begin()));
-}
-
-} // namespace learning
-} // namespace media
diff --git a/chromium/media/learning/common/value.cc b/chromium/media/learning/common/value.cc
index ff80a8476ac..9c9395c25d4 100644
--- a/chromium/media/learning/common/value.cc
+++ b/chromium/media/learning/common/value.cc
@@ -11,8 +11,6 @@ namespace learning {
Value::Value() = default;
-Value::Value(int x) : value_(x) {}
-
Value::Value(const char* x) {
// std::hash would be nice, but it can (and does) change values between
// different instances of the class. In other words, Value("foo") !=
@@ -37,6 +35,10 @@ bool Value::operator<(const Value& rhs) const {
return value_ < rhs.value_;
}
+bool Value::operator>(const Value& rhs) const {
+ return value_ > rhs.value_;
+}
+
std::ostream& operator<<(std::ostream& out, const Value& value) {
return out << value.value_;
}
diff --git a/chromium/media/learning/common/value.h b/chromium/media/learning/common/value.h
index c468deb29c5..0e64da961f3 100644
--- a/chromium/media/learning/common/value.h
+++ b/chromium/media/learning/common/value.h
@@ -8,6 +8,7 @@
#include <cstdint>
#include <ostream>
#include <string>
+#include <type_traits>
#include "base/component_export.h"
@@ -25,7 +26,14 @@ namespace learning {
class COMPONENT_EXPORT(LEARNING_COMMON) Value {
public:
Value();
- explicit Value(int x);
+ template <typename T>
+ explicit Value(T x) : value_(x) {
+ // We want to rule out mostly pointers, since they wouldn't make much sense.
+ // Note that the implicit cast would likely fail anyway.
+ static_assert(std::is_arithmetic<T>::value || std::is_enum<T>::value,
+ "media::learning::Value works only with arithmetic types");
+ }
+
explicit Value(const char* x);
explicit Value(const std::string& x);
@@ -34,11 +42,12 @@ class COMPONENT_EXPORT(LEARNING_COMMON) Value {
bool operator==(const Value& rhs) const;
bool operator!=(const Value& rhs) const;
bool operator<(const Value& rhs) const;
+ bool operator>(const Value& rhs) const;
- int64_t value() const { return value_; }
+ double value() const { return value_; }
private:
- int64_t value_ = 0;
+ double value_ = 0;
friend COMPONENT_EXPORT(LEARNING_COMMON) std::ostream& operator<<(
std::ostream& out,
diff --git a/chromium/media/learning/common/value_unittest.cc b/chromium/media/learning/common/value_unittest.cc
index f4c9e0a975d..691e4ed72e3 100644
--- a/chromium/media/learning/common/value_unittest.cc
+++ b/chromium/media/learning/common/value_unittest.cc
@@ -42,6 +42,19 @@ TEST_F(LearnerValueTest, IntsCompareCorrectly) {
EXPECT_TRUE(v1 != v3);
EXPECT_TRUE(v1 < v3);
EXPECT_FALSE(v3 < v1);
+ EXPECT_FALSE(v3 < v3);
+ EXPECT_FALSE(v1 < v1);
+ EXPECT_TRUE(v3 > v1);
+ EXPECT_FALSE(v1 > v3);
+ EXPECT_FALSE(v1 > v1);
+ EXPECT_FALSE(v3 > v3);
+}
+
+TEST_F(LearnerValueTest, VariousTypesWork) {
+ EXPECT_EQ(Value(1.2).value(), 1.2);
+ EXPECT_EQ(Value(10).value(), 10);
+ EXPECT_EQ(Value(static_cast<int64_t>(-10)).value(), -10);
+ EXPECT_EQ(Value(static_cast<uint64_t>(10)).value(), 10);
}
} // namespace learning
diff --git a/chromium/media/learning/impl/BUILD.gn b/chromium/media/learning/impl/BUILD.gn
index 94e238d6d16..aae0dddeb53 100644
--- a/chromium/media/learning/impl/BUILD.gn
+++ b/chromium/media/learning/impl/BUILD.gn
@@ -4,26 +4,44 @@
component("impl") {
output_name = "learning_impl"
- visibility = [ "//media/learning/impl:unit_tests" ]
+ visibility = [
+ "//media/learning/impl:unit_tests",
+
+ # Actual clients.
+ "//media/capabilities",
+ ]
sources = [
+ "distribution_reporter.cc",
+ "distribution_reporter.h",
+ "extra_trees_trainer.cc",
+ "extra_trees_trainer.h",
"learning_session_impl.cc",
"learning_session_impl.h",
"learning_task_controller.h",
"learning_task_controller_impl.cc",
"learning_task_controller_impl.h",
+ "lookup_table_trainer.cc",
+ "lookup_table_trainer.h",
"model.h",
+ "one_hot.cc",
+ "one_hot.h",
+ "random_number_generator.cc",
+ "random_number_generator.h",
"random_tree_trainer.cc",
"random_tree_trainer.h",
"target_distribution.cc",
"target_distribution.h",
"training_algorithm.h",
+ "voting_ensemble.cc",
+ "voting_ensemble.h",
]
defines = [ "IS_LEARNING_IMPL_IMPL" ]
deps = [
"//base",
+ "//services/metrics/public/cpp:metrics_cpp",
]
public_deps = [
@@ -35,9 +53,19 @@ source_set("unit_tests") {
testonly = true
sources = [
+ "distribution_reporter_unittest.cc",
+ "extra_trees_trainer_unittest.cc",
+ "fisher_iris_dataset.cc",
+ "fisher_iris_dataset.h",
"learning_session_impl_unittest.cc",
+ "learning_task_controller_impl_unittest.cc",
+ "lookup_table_trainer_unittest.cc",
+ "one_hot_unittest.cc",
+ "random_number_generator_unittest.cc",
"random_tree_trainer_unittest.cc",
"target_distribution_unittest.cc",
+ "test_random_number_generator.cc",
+ "test_random_number_generator.h",
]
deps = [
diff --git a/chromium/media/learning/impl/distribution_reporter.cc b/chromium/media/learning/impl/distribution_reporter.cc
new file mode 100644
index 00000000000..b611f92736f
--- /dev/null
+++ b/chromium/media/learning/impl/distribution_reporter.cc
@@ -0,0 +1,78 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/distribution_reporter.h"
+
+#include "base/bind.h"
+#include "base/metrics/histogram_functions.h"
+
+namespace media {
+namespace learning {
+
+// Low order bit is "observed", second bit is "predicted".
+enum class ConfusionMatrix {
+ TrueNegative = 0, // predicted == observed == false
+ FalseNegative = 1, // predicted == false, observed == true
+ FalsePositive = 2, // predicted == true, observed == false
+ TruePositive = 3, // predicted == observed == true
+ kMaxValue = TruePositive
+};
+
+// TODO(liberato): Currently, this implementation is a hack to collect some
+// sanity-checking data for local learning with MediaCapabilities. We assume
+// that the prediction is the "percentage of dropped frames".
+//
+// Please see https://chromium-review.googlesource.com/c/chromium/src/+/1385107
+// for an actual UKM-based implementation.
+class RegressionReporter : public DistributionReporter {
+ public:
+ RegressionReporter(const LearningTask& task) : DistributionReporter(task) {}
+
+ void OnPrediction(TargetDistribution observed,
+ TargetDistribution predicted) override {
+ DCHECK_EQ(task().target_description.ordering,
+ LearningTask::Ordering::kNumeric);
+ DCHECK(!task().uma_hacky_confusion_matrix.empty());
+
+ // As a complete hack, record accuracy with a fixed threshold. The average
+ // is the observed / predicted percentage of dropped frames.
+ bool observed_smooth = observed.Average() <= task().smoothness_threshold;
+ bool predicted_smooth = predicted.Average() <= task().smoothness_threshold;
+ DVLOG(2) << "Learning: " << task().name
+ << ": predicted: " << predicted_smooth << " ("
+ << predicted.Average() << ") observed: " << observed_smooth << " ("
+ << observed.Average() << ")";
+
+ // Convert to a bucket from which we can get the confusion matrix.
+ ConfusionMatrix uma_bucket = static_cast<ConfusionMatrix>(
+ (observed_smooth ? 1 : 0) | (predicted_smooth ? 2 : 0));
+ base::UmaHistogramEnumeration(task().uma_hacky_confusion_matrix,
+ uma_bucket);
+ }
+};
+
+std::unique_ptr<DistributionReporter> DistributionReporter::Create(
+ const LearningTask& task) {
+ // Hacky reporting is the only thing we know how to report.
+ if (task.uma_hacky_confusion_matrix.empty())
+ return nullptr;
+
+ if (task.target_description.ordering == LearningTask::Ordering::kNumeric)
+ return std::make_unique<RegressionReporter>(task);
+ return nullptr;
+}
+
+DistributionReporter::DistributionReporter(const LearningTask& task)
+ : task_(task), weak_factory_(this) {}
+
+DistributionReporter::~DistributionReporter() = default;
+
+Model::PredictionCB DistributionReporter::GetPredictionCallback(
+ TargetDistribution observed) {
+ return base::BindOnce(&DistributionReporter::OnPrediction,
+ weak_factory_.GetWeakPtr(), observed);
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/distribution_reporter.h b/chromium/media/learning/impl/distribution_reporter.h
new file mode 100644
index 00000000000..78b22e65c93
--- /dev/null
+++ b/chromium/media/learning/impl/distribution_reporter.h
@@ -0,0 +1,54 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_DISTRIBUTION_REPORTER_H_
+#define MEDIA_LEARNING_IMPL_DISTRIBUTION_REPORTER_H_
+
+#include "base/callback.h"
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/impl/model.h"
+#include "media/learning/impl/target_distribution.h"
+
+namespace media {
+namespace learning {
+
+// Helper class to report on predicted distrubutions vs target distributions.
+// Use DistributionReporter::Create() to create one that's appropriate for a
+// specific learning task.
+class COMPONENT_EXPORT(LEARNING_IMPL) DistributionReporter {
+ public:
+ // Create a DistributionReporter that's suitable for |task|.
+ static std::unique_ptr<DistributionReporter> Create(const LearningTask& task);
+
+ virtual ~DistributionReporter();
+
+ // Returns a prediction CB that will be compared to |observed|. |observed| is
+ // the total number of counts that we observed.
+ virtual Model::PredictionCB GetPredictionCallback(
+ TargetDistribution observed);
+
+ protected:
+ DistributionReporter(const LearningTask& task);
+
+ const LearningTask& task() const { return task_; }
+
+ // Implemented by subclasses to report a prediction.
+ virtual void OnPrediction(TargetDistribution observed,
+ TargetDistribution predicted) = 0;
+
+ private:
+ LearningTask task_;
+
+ base::WeakPtrFactory<DistributionReporter> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(DistributionReporter);
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_DISTRIBUTION_REPORTER_H_
diff --git a/chromium/media/learning/impl/distribution_reporter_unittest.cc b/chromium/media/learning/impl/distribution_reporter_unittest.cc
new file mode 100644
index 00000000000..3815d5c4842
--- /dev/null
+++ b/chromium/media/learning/impl/distribution_reporter_unittest.cc
@@ -0,0 +1,71 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <vector>
+
+#include "base/bind.h"
+#include "base/test/scoped_task_environment.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/impl/distribution_reporter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class DistributionReporterTest : public testing::Test {
+ public:
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ LearningTask task_;
+
+ std::unique_ptr<DistributionReporter> reporter_;
+};
+
+TEST_F(DistributionReporterTest, DistributionReporterDoesNotCrash) {
+ // Make sure that we request some sort of reporting.
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ task_.uma_hacky_confusion_matrix = "test";
+ reporter_ = DistributionReporter::Create(task_);
+ EXPECT_NE(reporter_, nullptr);
+
+ const TargetValue Zero(0);
+ const TargetValue One(1);
+
+ TargetDistribution observed;
+ // Observe an average of 2 / 3.
+ observed[Zero] = 100;
+ observed[One] = 200;
+ auto cb = reporter_->GetPredictionCallback(observed);
+
+ TargetDistribution predicted;
+ // Predict an average of 5 / 9.
+ predicted[Zero] = 40;
+ predicted[One] = 50;
+ std::move(cb).Run(predicted);
+
+ // TODO(liberato): When we switch to ukm, use a TestUkmRecorder to make sure
+ // that it fills in the right stuff.
+ // https://chromium-review.googlesource.com/c/chromium/src/+/1385107 .
+}
+
+TEST_F(DistributionReporterTest, DistributionReporterNeedsUmaName) {
+ // Make sure that we don't get a reporter if we don't request any reporting.
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ task_.uma_hacky_confusion_matrix = "";
+ reporter_ = DistributionReporter::Create(task_);
+ EXPECT_EQ(reporter_, nullptr);
+}
+
+TEST_F(DistributionReporterTest,
+ DistributionReporterHackyConfusionMatrixNeedsRegression) {
+ // Hacky confusion matrix reporting only works with regression.
+ task_.target_description.ordering = LearningTask::Ordering::kUnordered;
+ task_.uma_hacky_confusion_matrix = "test";
+ reporter_ = DistributionReporter::Create(task_);
+ EXPECT_EQ(reporter_, nullptr);
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/extra_trees_trainer.cc b/chromium/media/learning/impl/extra_trees_trainer.cc
new file mode 100644
index 00000000000..4e16aae7908
--- /dev/null
+++ b/chromium/media/learning/impl/extra_trees_trainer.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/extra_trees_trainer.h"
+
+#include <set>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "media/learning/impl/voting_ensemble.h"
+
+namespace media {
+namespace learning {
+
+ExtraTreesTrainer::ExtraTreesTrainer() = default;
+
+ExtraTreesTrainer::~ExtraTreesTrainer() = default;
+
+void ExtraTreesTrainer::Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) {
+ // Make sure that there is no training in progress.
+ DCHECK_EQ(trees_.size(), 0u);
+ DCHECK_EQ(converter_.get(), nullptr);
+
+ task_ = task;
+ trees_.reserve(task.rf_number_of_trees);
+
+ // Instantiate our tree trainer if we haven't already. We do this now only
+ // so that we can send it our rng, mostly for tests.
+ // TODO(liberato): We should always take the rng in the ctor, rather than
+ // via SetRngForTesting. Then we can do this earlier.
+ if (!tree_trainer_)
+ tree_trainer_ = std::make_unique<RandomTreeTrainer>(rng());
+
+ // RandomTree requires one-hot vectors to properly choose split points the way
+ // that ExtraTrees require.
+ // TODO(liberato): Modify it not to need this. It's slow.
+ converter_ = std::make_unique<OneHotConverter>(task, training_data);
+ converted_training_data_ = converter_->Convert(training_data);
+
+ // Start training. Send in nullptr to start the process.
+ OnRandomTreeModel(std::move(model_cb), nullptr);
+}
+
+void ExtraTreesTrainer::OnRandomTreeModel(TrainedModelCB model_cb,
+ std::unique_ptr<Model> model) {
+ // Allow a null Model to make it easy to start training.
+ if (model)
+ trees_.push_back(std::move(model));
+
+ // If this is the last tree, then return the finished model.
+ if (trees_.size() == task_.rf_number_of_trees) {
+ std::move(model_cb).Run(std::make_unique<ConvertingModel>(
+ std::move(converter_),
+ std::make_unique<VotingEnsemble>(std::move(trees_))));
+ return;
+ }
+
+ // Train the next tree.
+ auto cb = base::BindOnce(&ExtraTreesTrainer::OnRandomTreeModel, AsWeakPtr(),
+ std::move(model_cb));
+ tree_trainer_->Train(converter_->converted_task(), converted_training_data_,
+ std::move(cb));
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/extra_trees_trainer.h b/chromium/media/learning/impl/extra_trees_trainer.h
new file mode 100644
index 00000000000..45784f2e3f5
--- /dev/null
+++ b/chromium/media/learning/impl/extra_trees_trainer.h
@@ -0,0 +1,61 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_EXTRA_TREES_TRAINER_H_
+#define MEDIA_LEARNING_IMPL_EXTRA_TREES_TRAINER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/impl/one_hot.h"
+#include "media/learning/impl/random_number_generator.h"
+#include "media/learning/impl/random_tree_trainer.h"
+#include "media/learning/impl/training_algorithm.h"
+
+namespace media {
+namespace learning {
+
+// Bagged forest of extremely randomized trees.
+//
+// These are an ensemble of trees. Each tree is constructed from the full
+// training set. The trees are constructed by selecting a random subset of
+// features at each node. For each feature, a uniformly random split point is
+// chosen. The feature with the best randomly chosen split point is used.
+//
+// These will automatically convert nominal values to one-hot vectors.
+class COMPONENT_EXPORT(LEARNING_IMPL) ExtraTreesTrainer
+ : public TrainingAlgorithm,
+ public HasRandomNumberGenerator,
+ public base::SupportsWeakPtr<ExtraTreesTrainer> {
+ public:
+ ExtraTreesTrainer();
+ ~ExtraTreesTrainer() override;
+
+ // TrainingAlgorithm
+ void Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) override;
+
+ private:
+ void OnRandomTreeModel(TrainedModelCB model_cb, std::unique_ptr<Model> model);
+
+ std::unique_ptr<TrainingAlgorithm> tree_trainer_;
+
+ // In-flight training.
+ LearningTask task_;
+ std::vector<std::unique_ptr<Model>> trees_;
+ std::unique_ptr<OneHotConverter> converter_;
+ TrainingData converted_training_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExtraTreesTrainer);
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_EXTRA_TREES_TRAINER_H_
diff --git a/chromium/media/learning/impl/extra_trees_trainer_unittest.cc b/chromium/media/learning/impl/extra_trees_trainer_unittest.cc
new file mode 100644
index 00000000000..46482f855c2
--- /dev/null
+++ b/chromium/media/learning/impl/extra_trees_trainer_unittest.cc
@@ -0,0 +1,211 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/extra_trees_trainer.h"
+
+#include "base/memory/ref_counted.h"
+#include "base/test/scoped_task_environment.h"
+#include "media/learning/impl/fisher_iris_dataset.h"
+#include "media/learning/impl/test_random_number_generator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class ExtraTreesTest : public testing::TestWithParam<LearningTask::Ordering> {
+ public:
+ ExtraTreesTest() : rng_(0), ordering_(GetParam()) {
+ trainer_.SetRandomNumberGeneratorForTesting(&rng_);
+ }
+
+ // Set up |task_| to have |n| features with the given ordering.
+ void SetupFeatures(size_t n) {
+ for (size_t i = 0; i < n; i++) {
+ LearningTask::ValueDescription desc;
+ desc.ordering = ordering_;
+ task_.feature_descriptions.push_back(desc);
+ }
+ }
+
+ std::unique_ptr<Model> Train(const LearningTask& task,
+ const TrainingData& data) {
+ std::unique_ptr<Model> model;
+ trainer_.Train(
+ task_, data,
+ base::BindOnce(
+ [](std::unique_ptr<Model>* model_out,
+ std::unique_ptr<Model> model) { *model_out = std::move(model); },
+ &model));
+ scoped_task_environment_.RunUntilIdle();
+ return model;
+ }
+
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ TestRandomNumberGenerator rng_;
+ ExtraTreesTrainer trainer_;
+ LearningTask task_;
+ // Feature ordering.
+ LearningTask::Ordering ordering_;
+};
+
+TEST_P(ExtraTreesTest, EmptyTrainingDataWorks) {
+ TrainingData empty;
+ auto model = Train(task_, empty);
+ EXPECT_NE(model.get(), nullptr);
+ EXPECT_EQ(model->PredictDistribution(FeatureVector()), TargetDistribution());
+}
+
+TEST_P(ExtraTreesTest, FisherIrisDataset) {
+ SetupFeatures(4);
+ FisherIrisDataset iris;
+ TrainingData training_data = iris.GetTrainingData();
+ auto model = Train(task_, training_data);
+
+ // Verify predictions on the training set, just for sanity.
+ size_t num_correct = 0;
+ for (const LabelledExample& example : training_data) {
+ TargetDistribution distribution =
+ model->PredictDistribution(example.features);
+ TargetValue predicted_value;
+ if (distribution.FindSingularMax(&predicted_value) &&
+ predicted_value == example.target_value) {
+ num_correct += example.weight;
+ }
+ }
+
+ // Expect very high accuracy. We should get ~100%.
+ double train_accuracy = ((double)num_correct) / training_data.total_weight();
+ EXPECT_GT(train_accuracy, 0.95);
+}
+
+TEST_P(ExtraTreesTest, WeightedTrainingSetIsSupported) {
+ // Create a training set with unseparable data, but give one of them a large
+ // weight. See if that one wins.
+ SetupFeatures(1);
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(123)}, TargetValue(2));
+ const size_t weight = 100;
+ TrainingData training_data;
+ example_1.weight = weight;
+ training_data.push_back(example_1);
+ // Push many |example_2|'s, which will win without the weights.
+ training_data.push_back(example_2);
+ training_data.push_back(example_2);
+ training_data.push_back(example_2);
+ training_data.push_back(example_2);
+
+ // Create a weighed set with |weight| for each example's weight.
+ EXPECT_FALSE(training_data.is_unweighted());
+ auto model = Train(task_, training_data);
+
+ // The singular max should be example_1.
+ TargetDistribution distribution =
+ model->PredictDistribution(example_1.features);
+ TargetValue predicted_value;
+ EXPECT_TRUE(distribution.FindSingularMax(&predicted_value));
+ EXPECT_EQ(predicted_value, example_1.target_value);
+}
+
+TEST_P(ExtraTreesTest, RegressionWorks) {
+ // Create a training set with unseparable data, but give one of them a large
+ // weight. See if that one wins.
+ SetupFeatures(2);
+ LabelledExample example_1({FeatureValue(1), FeatureValue(123)},
+ TargetValue(1));
+ LabelledExample example_1_a({FeatureValue(1), FeatureValue(123)},
+ TargetValue(5));
+ LabelledExample example_2({FeatureValue(1), FeatureValue(456)},
+ TargetValue(20));
+ LabelledExample example_2_a({FeatureValue(1), FeatureValue(456)},
+ TargetValue(25));
+ TrainingData training_data;
+ example_1.weight = 100;
+ training_data.push_back(example_1);
+ training_data.push_back(example_1_a);
+ example_2.weight = 100;
+ training_data.push_back(example_2);
+ training_data.push_back(example_2_a);
+
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+
+ // Create a weighed set with |weight| for each example's weight.
+ auto model = Train(task_, training_data);
+
+ // Make sure that the results are in the right range.
+ TargetDistribution distribution =
+ model->PredictDistribution(example_1.features);
+ EXPECT_GT(distribution.Average(), example_1.target_value.value() * 0.95);
+ EXPECT_LT(distribution.Average(), example_1.target_value.value() * 1.05);
+ distribution = model->PredictDistribution(example_2.features);
+ EXPECT_GT(distribution.Average(), example_2.target_value.value() * 0.95);
+ EXPECT_LT(distribution.Average(), example_2.target_value.value() * 1.05);
+}
+
+TEST_P(ExtraTreesTest, RegressionVsBinaryClassification) {
+ // Create a binary classification task and a regression task that are roughly
+ // the same. Verify that the results are the same, too. In particular, for
+ // each set of features, we choose a regression target |pct| between 0 and
+ // 100. For the corresponding binary classification problem, we add |pct|
+ // true instances, and 100-|pct| false instances. The predicted averages
+ // should be roughly the same.
+ SetupFeatures(3);
+ TrainingData c_data, r_data;
+
+ std::set<LabelledExample> r_examples;
+ for (size_t i = 0; i < 4 * 4 * 4; i++) {
+ FeatureValue f1(i & 3);
+ FeatureValue f2((i >> 2) & 3);
+ FeatureValue f3((i >> 4) & 3);
+ int frac = (1.0 * (f1.value() + f2.value() + f3.value())) / 9;
+ LabelledExample e({f1, f2, f3}, TargetValue(0));
+
+ // TODO(liberato): Consider adding noise, and verifying that the model
+ // predictions are roughly the same as each other, rather than the same as
+ // the currently noise-free target.
+
+ // Push some number of false and some number of true instances that is in
+ // the right ratio for |frac|.
+ const int total_examples = 100;
+ const int positive_examples = total_examples * frac;
+ e.weight = total_examples - positive_examples;
+ if (e.weight > 0)
+ c_data.push_back(e);
+ e.target_value = TargetValue(1.0);
+ e.weight = positive_examples;
+ if (e.weight > 0)
+ c_data.push_back(e);
+
+ // For the regression data, add an example with |frac| directly. Also save
+ // it so that we can look up the right answer below.
+ LabelledExample r_example(LabelledExample({f1, f2, f3}, TargetValue(frac)));
+ r_examples.insert(r_example);
+ r_data.push_back(r_example);
+ }
+
+ // Train a model on the binary classification task and the regression task.
+ auto c_model = Train(task_, c_data);
+ task_.target_description.ordering = LearningTask::Ordering::kNumeric;
+ auto r_model = Train(task_, r_data);
+
+ // Verify that, for all feature combinations, the models roughly agree. Since
+ // the data is separable, it probably should be exact.
+ for (auto& r_example : r_examples) {
+ const FeatureVector& fv = r_example.features;
+ TargetDistribution c_dist = c_model->PredictDistribution(fv);
+ EXPECT_LE(c_dist.Average(), r_example.target_value.value() * 1.05);
+ EXPECT_GE(c_dist.Average(), r_example.target_value.value() * 0.95);
+ TargetDistribution r_dist = r_model->PredictDistribution(fv);
+ EXPECT_LE(r_dist.Average(), r_example.target_value.value() * 1.05);
+ EXPECT_GE(r_dist.Average(), r_example.target_value.value() * 0.95);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(ExtraTreesTest,
+ ExtraTreesTest,
+ testing::ValuesIn({LearningTask::Ordering::kUnordered,
+ LearningTask::Ordering::kNumeric}));
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/fisher_iris_dataset.cc b/chromium/media/learning/impl/fisher_iris_dataset.cc
new file mode 100644
index 00000000000..36859787c03
--- /dev/null
+++ b/chromium/media/learning/impl/fisher_iris_dataset.cc
@@ -0,0 +1,198 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/fisher_iris_dataset.h"
+
+#include <vector>
+
+namespace {
+struct IrisExample : public media::learning::LabelledExample {
+ IrisExample(float sepal_length,
+ float sepal_width,
+ float petal_length,
+ float petal_width,
+ const char* class_name) {
+ target_value = media::learning::TargetValue(class_name);
+ features.push_back(
+ media::learning::FeatureValue(static_cast<int>(sepal_length * 10)));
+ features.push_back(
+ media::learning::FeatureValue(static_cast<int>(sepal_width * 10)));
+ features.push_back(
+ media::learning::FeatureValue(static_cast<int>(petal_length * 10)));
+ features.push_back(
+ media::learning::FeatureValue(static_cast<int>(petal_width * 10)));
+ }
+};
+
+std::vector<IrisExample> iris_examples({
+ {5.1, 3.5, 1.4, 0.2, "Iris-setosa"},
+ {4.9, 3.0, 1.4, 0.2, "Iris-setosa"},
+ {4.7, 3.2, 1.3, 0.2, "Iris-setosa"},
+ {4.6, 3.1, 1.5, 0.2, "Iris-setosa"},
+ {5.0, 3.6, 1.4, 0.2, "Iris-setosa"},
+ {5.4, 3.9, 1.7, 0.4, "Iris-setosa"},
+ {4.6, 3.4, 1.4, 0.3, "Iris-setosa"},
+ {5.0, 3.4, 1.5, 0.2, "Iris-setosa"},
+ {4.4, 2.9, 1.4, 0.2, "Iris-setosa"},
+ {4.9, 3.1, 1.5, 0.1, "Iris-setosa"},
+ {5.4, 3.7, 1.5, 0.2, "Iris-setosa"},
+ {4.8, 3.4, 1.6, 0.2, "Iris-setosa"},
+ {4.8, 3.0, 1.4, 0.1, "Iris-setosa"},
+ {4.3, 3.0, 1.1, 0.1, "Iris-setosa"},
+ {5.8, 4.0, 1.2, 0.2, "Iris-setosa"},
+ {5.7, 4.4, 1.5, 0.4, "Iris-setosa"},
+ {5.4, 3.9, 1.3, 0.4, "Iris-setosa"},
+ {5.1, 3.5, 1.4, 0.3, "Iris-setosa"},
+ {5.7, 3.8, 1.7, 0.3, "Iris-setosa"},
+ {5.1, 3.8, 1.5, 0.3, "Iris-setosa"},
+ {5.4, 3.4, 1.7, 0.2, "Iris-setosa"},
+ {5.1, 3.7, 1.5, 0.4, "Iris-setosa"},
+ {4.6, 3.6, 1.0, 0.2, "Iris-setosa"},
+ {5.1, 3.3, 1.7, 0.5, "Iris-setosa"},
+ {4.8, 3.4, 1.9, 0.2, "Iris-setosa"},
+ {5.0, 3.0, 1.6, 0.2, "Iris-setosa"},
+ {5.0, 3.4, 1.6, 0.4, "Iris-setosa"},
+ {5.2, 3.5, 1.5, 0.2, "Iris-setosa"},
+ {5.2, 3.4, 1.4, 0.2, "Iris-setosa"},
+ {4.7, 3.2, 1.6, 0.2, "Iris-setosa"},
+ {4.8, 3.1, 1.6, 0.2, "Iris-setosa"},
+ {5.4, 3.4, 1.5, 0.4, "Iris-setosa"},
+ {5.2, 4.1, 1.5, 0.1, "Iris-setosa"},
+ {5.5, 4.2, 1.4, 0.2, "Iris-setosa"},
+ {4.9, 3.1, 1.5, 0.1, "Iris-setosa"},
+ {5.0, 3.2, 1.2, 0.2, "Iris-setosa"},
+ {5.5, 3.5, 1.3, 0.2, "Iris-setosa"},
+ {4.9, 3.1, 1.5, 0.1, "Iris-setosa"},
+ {4.4, 3.0, 1.3, 0.2, "Iris-setosa"},
+ {5.1, 3.4, 1.5, 0.2, "Iris-setosa"},
+ {5.0, 3.5, 1.3, 0.3, "Iris-setosa"},
+ {4.5, 2.3, 1.3, 0.3, "Iris-setosa"},
+ {4.4, 3.2, 1.3, 0.2, "Iris-setosa"},
+ {5.0, 3.5, 1.6, 0.6, "Iris-setosa"},
+ {5.1, 3.8, 1.9, 0.4, "Iris-setosa"},
+ {4.8, 3.0, 1.4, 0.3, "Iris-setosa"},
+ {5.1, 3.8, 1.6, 0.2, "Iris-setosa"},
+ {4.6, 3.2, 1.4, 0.2, "Iris-setosa"},
+ {5.3, 3.7, 1.5, 0.2, "Iris-setosa"},
+ {5.0, 3.3, 1.4, 0.2, "Iris-setosa"},
+ {7.0, 3.2, 4.7, 1.4, "Iris-versicolor"},
+ {6.4, 3.2, 4.5, 1.5, "Iris-versicolor"},
+ {6.9, 3.1, 4.9, 1.5, "Iris-versicolor"},
+ {5.5, 2.3, 4.0, 1.3, "Iris-versicolor"},
+ {6.5, 2.8, 4.6, 1.5, "Iris-versicolor"},
+ {5.7, 2.8, 4.5, 1.3, "Iris-versicolor"},
+ {6.3, 3.3, 4.7, 1.6, "Iris-versicolor"},
+ {4.9, 2.4, 3.3, 1.0, "Iris-versicolor"},
+ {6.6, 2.9, 4.6, 1.3, "Iris-versicolor"},
+ {5.2, 2.7, 3.9, 1.4, "Iris-versicolor"},
+ {5.0, 2.0, 3.5, 1.0, "Iris-versicolor"},
+ {5.9, 3.0, 4.2, 1.5, "Iris-versicolor"},
+ {6.0, 2.2, 4.0, 1.0, "Iris-versicolor"},
+ {6.1, 2.9, 4.7, 1.4, "Iris-versicolor"},
+ {5.6, 2.9, 3.6, 1.3, "Iris-versicolor"},
+ {6.7, 3.1, 4.4, 1.4, "Iris-versicolor"},
+ {5.6, 3.0, 4.5, 1.5, "Iris-versicolor"},
+ {5.8, 2.7, 4.1, 1.0, "Iris-versicolor"},
+ {6.2, 2.2, 4.5, 1.5, "Iris-versicolor"},
+ {5.6, 2.5, 3.9, 1.1, "Iris-versicolor"},
+ {5.9, 3.2, 4.8, 1.8, "Iris-versicolor"},
+ {6.1, 2.8, 4.0, 1.3, "Iris-versicolor"},
+ {6.3, 2.5, 4.9, 1.5, "Iris-versicolor"},
+ {6.1, 2.8, 4.7, 1.2, "Iris-versicolor"},
+ {6.4, 2.9, 4.3, 1.3, "Iris-versicolor"},
+ {6.6, 3.0, 4.4, 1.4, "Iris-versicolor"},
+ {6.8, 2.8, 4.8, 1.4, "Iris-versicolor"},
+ {6.7, 3.0, 5.0, 1.7, "Iris-versicolor"},
+ {6.0, 2.9, 4.5, 1.5, "Iris-versicolor"},
+ {5.7, 2.6, 3.5, 1.0, "Iris-versicolor"},
+ {5.5, 2.4, 3.8, 1.1, "Iris-versicolor"},
+ {5.5, 2.4, 3.7, 1.0, "Iris-versicolor"},
+ {5.8, 2.7, 3.9, 1.2, "Iris-versicolor"},
+ {6.0, 2.7, 5.1, 1.6, "Iris-versicolor"},
+ {5.4, 3.0, 4.5, 1.5, "Iris-versicolor"},
+ {6.0, 3.4, 4.5, 1.6, "Iris-versicolor"},
+ {6.7, 3.1, 4.7, 1.5, "Iris-versicolor"},
+ {6.3, 2.3, 4.4, 1.3, "Iris-versicolor"},
+ {5.6, 3.0, 4.1, 1.3, "Iris-versicolor"},
+ {5.5, 2.5, 4.0, 1.3, "Iris-versicolor"},
+ {5.5, 2.6, 4.4, 1.2, "Iris-versicolor"},
+ {6.1, 3.0, 4.6, 1.4, "Iris-versicolor"},
+ {5.8, 2.6, 4.0, 1.2, "Iris-versicolor"},
+ {5.0, 2.3, 3.3, 1.0, "Iris-versicolor"},
+ {5.6, 2.7, 4.2, 1.3, "Iris-versicolor"},
+ {5.7, 3.0, 4.2, 1.2, "Iris-versicolor"},
+ {5.7, 2.9, 4.2, 1.3, "Iris-versicolor"},
+ {6.2, 2.9, 4.3, 1.3, "Iris-versicolor"},
+ {5.1, 2.5, 3.0, 1.1, "Iris-versicolor"},
+ {5.7, 2.8, 4.1, 1.3, "Iris-versicolor"},
+ {6.3, 3.3, 6.0, 2.5, "Iris-virginica"},
+ {5.8, 2.7, 5.1, 1.9, "Iris-virginica"},
+ {7.1, 3.0, 5.9, 2.1, "Iris-virginica"},
+ {6.3, 2.9, 5.6, 1.8, "Iris-virginica"},
+ {6.5, 3.0, 5.8, 2.2, "Iris-virginica"},
+ {7.6, 3.0, 6.6, 2.1, "Iris-virginica"},
+ {4.9, 2.5, 4.5, 1.7, "Iris-virginica"},
+ {7.3, 2.9, 6.3, 1.8, "Iris-virginica"},
+ {6.7, 2.5, 5.8, 1.8, "Iris-virginica"},
+ {7.2, 3.6, 6.1, 2.5, "Iris-virginica"},
+ {6.5, 3.2, 5.1, 2.0, "Iris-virginica"},
+ {6.4, 2.7, 5.3, 1.9, "Iris-virginica"},
+ {6.8, 3.0, 5.5, 2.1, "Iris-virginica"},
+ {5.7, 2.5, 5.0, 2.0, "Iris-virginica"},
+ {5.8, 2.8, 5.1, 2.4, "Iris-virginica"},
+ {6.4, 3.2, 5.3, 2.3, "Iris-virginica"},
+ {6.5, 3.0, 5.5, 1.8, "Iris-virginica"},
+ {7.7, 3.8, 6.7, 2.2, "Iris-virginica"},
+ {7.7, 2.6, 6.9, 2.3, "Iris-virginica"},
+ {6.0, 2.2, 5.0, 1.5, "Iris-virginica"},
+ {6.9, 3.2, 5.7, 2.3, "Iris-virginica"},
+ {5.6, 2.8, 4.9, 2.0, "Iris-virginica"},
+ {7.7, 2.8, 6.7, 2.0, "Iris-virginica"},
+ {6.3, 2.7, 4.9, 1.8, "Iris-virginica"},
+ {6.7, 3.3, 5.7, 2.1, "Iris-virginica"},
+ {7.2, 3.2, 6.0, 1.8, "Iris-virginica"},
+ {6.2, 2.8, 4.8, 1.8, "Iris-virginica"},
+ {6.1, 3.0, 4.9, 1.8, "Iris-virginica"},
+ {6.4, 2.8, 5.6, 2.1, "Iris-virginica"},
+ {7.2, 3.0, 5.8, 1.6, "Iris-virginica"},
+ {7.4, 2.8, 6.1, 1.9, "Iris-virginica"},
+ {7.9, 3.8, 6.4, 2.0, "Iris-virginica"},
+ {6.4, 2.8, 5.6, 2.2, "Iris-virginica"},
+ {6.3, 2.8, 5.1, 1.5, "Iris-virginica"},
+ {6.1, 2.6, 5.6, 1.4, "Iris-virginica"},
+ {7.7, 3.0, 6.1, 2.3, "Iris-virginica"},
+ {6.3, 3.4, 5.6, 2.4, "Iris-virginica"},
+ {6.4, 3.1, 5.5, 1.8, "Iris-virginica"},
+ {6.0, 3.0, 4.8, 1.8, "Iris-virginica"},
+ {6.9, 3.1, 5.4, 2.1, "Iris-virginica"},
+ {6.7, 3.1, 5.6, 2.4, "Iris-virginica"},
+ {6.9, 3.1, 5.1, 2.3, "Iris-virginica"},
+ {5.8, 2.7, 5.1, 1.9, "Iris-virginica"},
+ {6.8, 3.2, 5.9, 2.3, "Iris-virginica"},
+ {6.7, 3.3, 5.7, 2.5, "Iris-virginica"},
+ {6.7, 3.0, 5.2, 2.3, "Iris-virginica"},
+ {6.3, 2.5, 5.0, 1.9, "Iris-virginica"},
+ {6.5, 3.0, 5.2, 2.0, "Iris-virginica"},
+ {6.2, 3.4, 5.4, 2.3, "Iris-virginica"},
+ {5.9, 3.0, 5.1, 1.8, "Iris-virginica"},
+});
+
+} // namespace
+
+namespace media {
+namespace learning {
+
+FisherIrisDataset::FisherIrisDataset() {
+ for (auto& example : iris_examples)
+ training_data_.push_back(example);
+}
+
+FisherIrisDataset::~FisherIrisDataset() = default;
+
+const TrainingData& FisherIrisDataset::GetTrainingData() const {
+ return training_data_;
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/fisher_iris_dataset.h b/chromium/media/learning/impl/fisher_iris_dataset.h
new file mode 100644
index 00000000000..09d7c8132d7
--- /dev/null
+++ b/chromium/media/learning/impl/fisher_iris_dataset.h
@@ -0,0 +1,39 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_FISHER_IRIS_DATASET_H_
+#define MEDIA_LEARNING_IMPL_FISHER_IRIS_DATASET_H_
+
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "media/learning/common/labelled_example.h"
+
+namespace media {
+namespace learning {
+
+// Classic machine learning dataset.
+//
+// @misc{Dua:2017 ,
+// author = "Dheeru, Dua and Karra Taniskidou, Efi",
+// year = "2017",
+// title = "{UCI} Machine Learning Repository",
+// url = "http://archive.ics.uci.edu/ml",
+// institution = "University of California, Irvine, "
+// "School of Information and Computer Sciences" }
+class FisherIrisDataset {
+ public:
+ FisherIrisDataset();
+ ~FisherIrisDataset();
+
+ const TrainingData& GetTrainingData() const;
+
+ private:
+ TrainingData training_data_;
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_FISHER_IRIS_DATASET_H_
diff --git a/chromium/media/learning/impl/learning_session_impl.cc b/chromium/media/learning/impl/learning_session_impl.cc
index 73dbcf57211..aee1e93cada 100644
--- a/chromium/media/learning/impl/learning_session_impl.cc
+++ b/chromium/media/learning/impl/learning_session_impl.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "media/learning/impl/distribution_reporter.h"
#include "media/learning/impl/learning_task_controller_impl.h"
namespace media {
@@ -15,7 +16,8 @@ LearningSessionImpl::LearningSessionImpl()
: controller_factory_(
base::BindRepeating([](const LearningTask& task)
-> std::unique_ptr<LearningTaskController> {
- return std::make_unique<LearningTaskControllerImpl>(task);
+ return std::make_unique<LearningTaskControllerImpl>(
+ task, DistributionReporter::Create(task));
})) {}
LearningSessionImpl::~LearningSessionImpl() = default;
@@ -26,7 +28,7 @@ void LearningSessionImpl::SetTaskControllerFactoryCBForTesting(
}
void LearningSessionImpl::AddExample(const std::string& task_name,
- const TrainingExample& example) {
+ const LabelledExample& example) {
auto iter = task_map_.find(task_name);
if (iter != task_map_.end())
iter->second->AddExample(example);
diff --git a/chromium/media/learning/impl/learning_session_impl.h b/chromium/media/learning/impl/learning_session_impl.h
index da8e5ffefb1..80c50fa8595 100644
--- a/chromium/media/learning/impl/learning_session_impl.h
+++ b/chromium/media/learning/impl/learning_session_impl.h
@@ -30,7 +30,7 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningSessionImpl
// LearningSession
void AddExample(const std::string& task_name,
- const TrainingExample& example) override;
+ const LabelledExample& example) override;
// Registers |task|, so that calls to AddExample with |task.name| will work.
// This will create a new controller for the task.
diff --git a/chromium/media/learning/impl/learning_session_impl_unittest.cc b/chromium/media/learning/impl/learning_session_impl_unittest.cc
index 10d497995a8..96dae4f14b1 100644
--- a/chromium/media/learning/impl/learning_session_impl_unittest.cc
+++ b/chromium/media/learning/impl/learning_session_impl_unittest.cc
@@ -19,11 +19,11 @@ class LearningSessionImplTest : public testing::Test {
public:
FakeLearningTaskController(const LearningTask& task) {}
- void AddExample(const TrainingExample& example) override {
+ void AddExample(const LabelledExample& example) override {
example_ = example;
}
- TrainingExample example_;
+ LabelledExample example_;
};
using ControllerVector = std::vector<FakeLearningTaskController*>;
@@ -63,11 +63,11 @@ TEST_F(LearningSessionImplTest, ExamplesAreForwardedToCorrectTask) {
session_->RegisterTask(task_0_);
session_->RegisterTask(task_1_);
- TrainingExample example_0({FeatureValue(123), FeatureValue(456)},
+ LabelledExample example_0({FeatureValue(123), FeatureValue(456)},
TargetValue(1234));
session_->AddExample(task_0_.name, example_0);
- TrainingExample example_1({FeatureValue(321), FeatureValue(654)},
+ LabelledExample example_1({FeatureValue(321), FeatureValue(654)},
TargetValue(4321));
session_->AddExample(task_1_.name, example_1);
EXPECT_EQ(task_controllers_[0]->example_, example_0);
diff --git a/chromium/media/learning/impl/learning_task_controller.h b/chromium/media/learning/impl/learning_task_controller.h
index 2d3d7a39a6d..a62f8fc75a6 100644
--- a/chromium/media/learning/impl/learning_task_controller.h
+++ b/chromium/media/learning/impl/learning_task_controller.h
@@ -8,8 +8,8 @@
#include "base/callback.h"
#include "base/component_export.h"
#include "base/macros.h"
+#include "media/learning/common/labelled_example.h"
#include "media/learning/common/learning_task.h"
-#include "media/learning/common/training_example.h"
namespace media {
namespace learning {
@@ -29,7 +29,7 @@ class COMPONENT_EXPORT(LEARNING_IMPL) LearningTaskController {
virtual ~LearningTaskController() = default;
// Receive an example for this task.
- virtual void AddExample(const TrainingExample& example) = 0;
+ virtual void AddExample(const LabelledExample& example) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(LearningTaskController);
diff --git a/chromium/media/learning/impl/learning_task_controller_impl.cc b/chromium/media/learning/impl/learning_task_controller_impl.cc
index c8cb70485da..e78428101f6 100644
--- a/chromium/media/learning/impl/learning_task_controller_impl.cc
+++ b/chromium/media/learning/impl/learning_task_controller_impl.cc
@@ -7,16 +7,82 @@
#include <memory>
#include "base/bind.h"
+#include "media/learning/impl/extra_trees_trainer.h"
+#include "media/learning/impl/random_tree_trainer.h"
namespace media {
namespace learning {
LearningTaskControllerImpl::LearningTaskControllerImpl(
- const LearningTask& task) {}
+ const LearningTask& task,
+ std::unique_ptr<DistributionReporter> reporter)
+ : task_(task),
+ training_data_(std::make_unique<TrainingData>()),
+ reporter_(std::move(reporter)) {
+ switch (task_.model) {
+ case LearningTask::Model::kExtraTrees:
+ trainer_ = std::make_unique<ExtraTreesTrainer>();
+ break;
+ }
+}
+
LearningTaskControllerImpl::~LearningTaskControllerImpl() = default;
-void LearningTaskControllerImpl::AddExample(const TrainingExample& example) {
- // TODO: do something.
+void LearningTaskControllerImpl::AddExample(const LabelledExample& example) {
+ if (training_data_->size() >= task_.max_data_set_size) {
+ // Replace a random example. We don't necessarily want to replace the
+ // oldest, since we don't necessarily want to enforce an ad-hoc recency
+ // constraint here. That's a different issue.
+ (*training_data_)[rng()->Generate(training_data_->size())] = example;
+ } else {
+ training_data_->push_back(example);
+ }
+ // Either way, we have one more example that we haven't used for training yet.
+ num_untrained_examples_++;
+
+ // Once we have a model, see if we'd get |example| correct.
+ if (model_ && reporter_) {
+ TargetDistribution predicted =
+ model_->PredictDistribution(example.features);
+
+ TargetDistribution observed;
+ observed += example.target_value;
+ reporter_->GetPredictionCallback(observed).Run(predicted);
+ }
+
+ // Can't train more than one model concurrently.
+ if (training_is_in_progress_)
+ return;
+
+ // Train every time we get enough new examples. Note that this works even if
+ // we are replacing old examples rather than adding new ones.
+ double frac = ((double)num_untrained_examples_) / training_data_->size();
+ if (frac < task_.min_new_data_fraction)
+ return;
+
+ num_untrained_examples_ = 0;
+
+ // TODO(liberato): don't do this if one is in-flight.
+ TrainedModelCB model_cb =
+ base::BindOnce(&LearningTaskControllerImpl::OnModelTrained, AsWeakPtr());
+ training_is_in_progress_ = true;
+ // Note that this copies the training data, so it's okay if we add more
+ // examples to our copy before this returns.
+ // TODO(liberato): Post to a background task runner, and bind |model_cb| to
+ // the current one. Be careful about ownership if we invalidate |trainer_|
+ // on this thread. Be sure to post destruction to that sequence.
+ trainer_->Train(task_, *training_data_, std::move(model_cb));
+}
+
+void LearningTaskControllerImpl::OnModelTrained(std::unique_ptr<Model> model) {
+ DCHECK(training_is_in_progress_);
+ training_is_in_progress_ = false;
+ model_ = std::move(model);
+}
+
+void LearningTaskControllerImpl::SetTrainerForTesting(
+ std::unique_ptr<TrainingAlgorithm> trainer) {
+ trainer_ = std::move(trainer);
}
} // namespace learning
diff --git a/chromium/media/learning/impl/learning_task_controller_impl.h b/chromium/media/learning/impl/learning_task_controller_impl.h
index 76612e28c73..b2d4a6acdaf 100644
--- a/chromium/media/learning/impl/learning_task_controller_impl.h
+++ b/chromium/media/learning/impl/learning_task_controller_impl.h
@@ -7,20 +7,60 @@
#include <memory>
+#include "base/callback.h"
#include "base/component_export.h"
+#include "base/memory/weak_ptr.h"
+#include "media/learning/impl/distribution_reporter.h"
#include "media/learning/impl/learning_task_controller.h"
+#include "media/learning/impl/random_number_generator.h"
+#include "media/learning/impl/training_algorithm.h"
namespace media {
namespace learning {
+class LearningTaskControllerImplTest;
+
class COMPONENT_EXPORT(LEARNING_IMPL) LearningTaskControllerImpl
- : public LearningTaskController {
+ : public LearningTaskController,
+ public HasRandomNumberGenerator,
+ public base::SupportsWeakPtr<LearningTaskControllerImpl> {
public:
- explicit LearningTaskControllerImpl(const LearningTask& task);
+ LearningTaskControllerImpl(
+ const LearningTask& task,
+ std::unique_ptr<DistributionReporter> reporter = nullptr);
~LearningTaskControllerImpl() override;
// LearningTaskController
- void AddExample(const TrainingExample& example) override;
+ void AddExample(const LabelledExample& example) override;
+
+ private:
+ // Called by |training_cb_| when the model is trained.
+ void OnModelTrained(std::unique_ptr<Model> model);
+
+ void SetTrainerForTesting(std::unique_ptr<TrainingAlgorithm> trainer);
+
+ LearningTask task_;
+
+ // Current batch of examples.
+ std::unique_ptr<TrainingData> training_data_;
+
+ // Most recently trained model, or null.
+ std::unique_ptr<Model> model_;
+
+ // We don't want to have multiple models in flight.
+ bool training_is_in_progress_ = false;
+
+ // Number of examples in |training_data_| that haven't been used for training.
+ // This helps us decide when to train a new model.
+ int num_untrained_examples_ = 0;
+
+ // Training algorithm that we'll use.
+ std::unique_ptr<TrainingAlgorithm> trainer_;
+
+ // Optional reporter for training accuracy.
+ std::unique_ptr<DistributionReporter> reporter_;
+
+ friend class LearningTaskControllerImplTest;
};
} // namespace learning
diff --git a/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc b/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc
new file mode 100644
index 00000000000..32cfa214fe5
--- /dev/null
+++ b/chromium/media/learning/impl/learning_task_controller_impl_unittest.cc
@@ -0,0 +1,143 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/learning_task_controller_impl.h"
+
+#include "base/bind.h"
+#include "media/learning/impl/distribution_reporter.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class LearningTaskControllerImplTest : public testing::Test {
+ public:
+ class FakeDistributionReporter : public DistributionReporter {
+ public:
+ FakeDistributionReporter(const LearningTask& task)
+ : DistributionReporter(task) {}
+
+ protected:
+ void OnPrediction(TargetDistribution observed,
+ TargetDistribution predicted) override {
+ num_reported_++;
+ if (observed == predicted)
+ num_correct_++;
+ }
+
+ public:
+ int num_reported_ = 0;
+ int num_correct_ = 0;
+ };
+
+ // Model that always predicts a constant.
+ class FakeModel : public Model {
+ public:
+ FakeModel(TargetValue target) : target_(target) {}
+
+ // Model
+ TargetDistribution PredictDistribution(
+ const FeatureVector& features) override {
+ TargetDistribution dist;
+ dist += target_;
+ return dist;
+ }
+
+ private:
+ // The value we predict.
+ TargetValue target_;
+ };
+
+ class FakeTrainer : public TrainingAlgorithm {
+ public:
+ // |num_models| is where we'll record how many models we've trained.
+ // |target_value| is the prediction that our trained model will make.
+ FakeTrainer(int* num_models, TargetValue target_value)
+ : num_models_(num_models), target_value_(target_value) {}
+ ~FakeTrainer() override {}
+
+ void Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) override {
+ (*num_models_)++;
+ std::move(model_cb).Run(std::make_unique<FakeModel>(target_value_));
+ }
+
+ private:
+ int* num_models_ = nullptr;
+ TargetValue target_value_;
+ };
+
+ LearningTaskControllerImplTest()
+ : predicted_target_(123), not_predicted_target_(456) {
+ // Don't require too many training examples per report.
+ task_.max_data_set_size = 20;
+ task_.min_new_data_fraction = 0.1;
+
+ std::unique_ptr<FakeDistributionReporter> reporter =
+ std::make_unique<FakeDistributionReporter>(task_);
+ reporter_raw_ = reporter.get();
+
+ controller_ = std::make_unique<LearningTaskControllerImpl>(
+ task_, std::move(reporter));
+ controller_->SetTrainerForTesting(
+ std::make_unique<FakeTrainer>(&num_models_, predicted_target_));
+ }
+
+ // Number of models that we trained.
+ int num_models_ = 0;
+
+ // Two distinct targets.
+ const TargetValue predicted_target_;
+ const TargetValue not_predicted_target_;
+
+ FakeDistributionReporter* reporter_raw_ = nullptr;
+
+ LearningTask task_;
+ std::unique_ptr<LearningTaskControllerImpl> controller_;
+};
+
+TEST_F(LearningTaskControllerImplTest, AddingExamplesTrainsModelAndReports) {
+ LabelledExample example;
+
+ // Up to the first 1/training_fraction examples should train on each example.
+ // Make each of the examples agree on |predicted_target_|.
+ example.target_value = predicted_target_;
+ int count = static_cast<int>(1.0 / task_.min_new_data_fraction);
+ for (int i = 0; i < count; i++) {
+ controller_->AddExample(example);
+ EXPECT_EQ(num_models_, i + 1);
+ // All examples except the first should be reported as correct. For the
+ // first, there's no model to test again.
+ EXPECT_EQ(reporter_raw_->num_reported_, i);
+ EXPECT_EQ(reporter_raw_->num_correct_, i);
+ }
+ // The next |count| should train every other one.
+ for (int i = 0; i < count; i++) {
+ controller_->AddExample(example);
+ EXPECT_EQ(num_models_, count + (i + 1) / 2);
+ }
+
+ // The next |count| should be the same, since we've reached the max training
+ // set size.
+ for (int i = 0; i < count; i++) {
+ controller_->AddExample(example);
+ EXPECT_EQ(num_models_, count + count / 2 + (i + 1) / 2);
+ }
+
+ // We should have reported results for each except the first. All of them
+ // should be correct, since there's only one target so far.
+ EXPECT_EQ(reporter_raw_->num_reported_, count * 3 - 1);
+ EXPECT_EQ(reporter_raw_->num_correct_, count * 3 - 1);
+
+ // Adding a value that doesn't match should report one more attempt, with an
+ // incorrect prediction.
+ example.target_value = not_predicted_target_;
+ controller_->AddExample(example);
+ EXPECT_EQ(reporter_raw_->num_reported_, count * 3);
+ EXPECT_EQ(reporter_raw_->num_correct_, count * 3 - 1); // Unchanged.
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/lookup_table_trainer.cc b/chromium/media/learning/impl/lookup_table_trainer.cc
new file mode 100644
index 00000000000..4c698c71022
--- /dev/null
+++ b/chromium/media/learning/impl/lookup_table_trainer.cc
@@ -0,0 +1,50 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/lookup_table_trainer.h"
+
+#include <map>
+
+#include "base/logging.h"
+
+namespace media {
+namespace learning {
+
+class LookupTable : public Model {
+ public:
+ LookupTable(const TrainingData& training_data) {
+ for (auto& example : training_data)
+ buckets_[example.features] += example;
+ }
+
+ // Model
+ TargetDistribution PredictDistribution(
+ const FeatureVector& instance) override {
+ auto iter = buckets_.find(instance);
+ if (iter == buckets_.end())
+ return TargetDistribution();
+
+ return iter->second;
+ }
+
+ private:
+ std::map<FeatureVector, TargetDistribution> buckets_;
+};
+
+LookupTableTrainer::LookupTableTrainer() = default;
+
+LookupTableTrainer::~LookupTableTrainer() = default;
+
+void LookupTableTrainer::Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) {
+ std::unique_ptr<LookupTable> lookup_table =
+ std::make_unique<LookupTable>(training_data);
+
+ // TODO(liberato): post?
+ std::move(model_cb).Run(std::move(lookup_table));
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/lookup_table_trainer.h b/chromium/media/learning/impl/lookup_table_trainer.h
new file mode 100644
index 00000000000..5417c84823d
--- /dev/null
+++ b/chromium/media/learning/impl/lookup_table_trainer.h
@@ -0,0 +1,37 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_LOOKUP_TABLE_TRAINER_H_
+#define MEDIA_LEARNING_IMPL_LOOKUP_TABLE_TRAINER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/impl/training_algorithm.h"
+
+namespace media {
+namespace learning {
+
+// Trains a lookup table model.
+class COMPONENT_EXPORT(LEARNING_IMPL) LookupTableTrainer
+ : public TrainingAlgorithm {
+ public:
+ LookupTableTrainer();
+ ~LookupTableTrainer() override;
+
+ void Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LookupTableTrainer);
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_LOOKUP_TABLE_TRAINER_H_
diff --git a/chromium/media/learning/impl/lookup_table_trainer_unittest.cc b/chromium/media/learning/impl/lookup_table_trainer_unittest.cc
new file mode 100644
index 00000000000..323d69d471e
--- /dev/null
+++ b/chromium/media/learning/impl/lookup_table_trainer_unittest.cc
@@ -0,0 +1,176 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/lookup_table_trainer.h"
+
+#include "base/bind.h"
+#include "base/run_loop.h"
+#include "base/test/scoped_task_environment.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class LookupTableTrainerTest : public testing::Test {
+ public:
+ std::unique_ptr<Model> Train(const LearningTask& task,
+ const TrainingData& data) {
+ std::unique_ptr<Model> model;
+ trainer_.Train(
+ task_, data,
+ base::BindOnce(
+ [](std::unique_ptr<Model>* model_out,
+ std::unique_ptr<Model> model) { *model_out = std::move(model); },
+ &model));
+ scoped_task_environment_.RunUntilIdle();
+ return model;
+ }
+
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+
+ LookupTableTrainer trainer_;
+ LearningTask task_;
+};
+
+TEST_F(LookupTableTrainerTest, EmptyTrainingDataWorks) {
+ TrainingData empty;
+ std::unique_ptr<Model> model = Train(task_, empty);
+ EXPECT_NE(model.get(), nullptr);
+ EXPECT_EQ(model->PredictDistribution(FeatureVector()), TargetDistribution());
+}
+
+TEST_F(LookupTableTrainerTest, UniformTrainingDataWorks) {
+ LabelledExample example({FeatureValue(123), FeatureValue(456)},
+ TargetValue(789));
+ TrainingData training_data;
+ const size_t n_examples = 10;
+ for (size_t i = 0; i < n_examples; i++)
+ training_data.push_back(example);
+ std::unique_ptr<Model> model = Train(task_, training_data);
+
+ // The tree should produce a distribution for one value (our target), which
+ // has |n_examples| counts.
+ TargetDistribution distribution =
+ model->PredictDistribution(example.features);
+ EXPECT_EQ(distribution.size(), 1u);
+ EXPECT_EQ(distribution[example.target_value], n_examples);
+}
+
+TEST_F(LookupTableTrainerTest, SimpleSeparableTrainingData) {
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(456)}, TargetValue(2));
+ TrainingData training_data;
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+ std::unique_ptr<Model> model = Train(task_, training_data);
+
+ // Each value should have a distribution with one target value with one count.
+ TargetDistribution distribution =
+ model->PredictDistribution(example_1.features);
+ EXPECT_NE(model.get(), nullptr);
+ EXPECT_EQ(distribution.size(), 1u);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+
+ distribution = model->PredictDistribution(example_2.features);
+ EXPECT_EQ(distribution.size(), 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+}
+
+TEST_F(LookupTableTrainerTest, ComplexSeparableTrainingData) {
+ // Build a four-feature training set that's completely separable, but one
+ // needs all four features to do it.
+ TrainingData training_data;
+ for (int f1 = 0; f1 < 2; f1++) {
+ for (int f2 = 0; f2 < 2; f2++) {
+ for (int f3 = 0; f3 < 2; f3++) {
+ for (int f4 = 0; f4 < 2; f4++) {
+ // Add two copies of each example.
+ training_data.push_back(
+ LabelledExample({FeatureValue(f1), FeatureValue(f2),
+ FeatureValue(f3), FeatureValue(f4)},
+ TargetValue(f1 * 1 + f2 * 2 + f3 * 4 + f4 * 8)));
+ training_data.push_back(
+ LabelledExample({FeatureValue(f1), FeatureValue(f2),
+ FeatureValue(f3), FeatureValue(f4)},
+ TargetValue(f1 * 1 + f2 * 2 + f3 * 4 + f4 * 8)));
+ }
+ }
+ }
+ }
+
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ EXPECT_NE(model.get(), nullptr);
+
+ // Each example should have a distribution that selects the right value.
+ for (const auto& example : training_data) {
+ TargetDistribution distribution =
+ model->PredictDistribution(example.features);
+ TargetValue singular_max;
+ EXPECT_TRUE(distribution.FindSingularMax(&singular_max));
+ EXPECT_EQ(singular_max, example.target_value);
+ }
+}
+
+TEST_F(LookupTableTrainerTest, UnseparableTrainingData) {
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(123)}, TargetValue(2));
+ TrainingData training_data;
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ EXPECT_NE(model.get(), nullptr);
+
+ // Each value should have a distribution with two targets with one count each.
+ TargetDistribution distribution =
+ model->PredictDistribution(example_1.features);
+ EXPECT_EQ(distribution.size(), 2u);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+
+ distribution = model->PredictDistribution(example_2.features);
+ EXPECT_EQ(distribution.size(), 2u);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+}
+
+TEST_F(LookupTableTrainerTest, UnknownFeatureValueHandling) {
+ // Verify how a previously unseen feature value is handled.
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(456)}, TargetValue(2));
+ TrainingData training_data;
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ TargetDistribution distribution =
+ model->PredictDistribution(FeatureVector({FeatureValue(789)}));
+ // OOV data should return an empty distribution (nominal).
+ EXPECT_EQ(distribution.size(), 0u);
+}
+
+TEST_F(LookupTableTrainerTest, RegressionWithWeightedExamplesWorks) {
+ // Verify that regression results are sane.
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ example_1.weight = 50;
+ LabelledExample example_2({FeatureValue(123)}, TargetValue(2));
+ example_2.weight = 200;
+ TrainingData training_data;
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ TargetDistribution distribution =
+ model->PredictDistribution(FeatureVector({FeatureValue(123)}));
+ double avg = distribution.Average();
+ const double expected =
+ static_cast<double>(
+ ((example_1.target_value.value() * example_1.weight) +
+ (example_2.target_value.value() * example_2.weight))) /
+ (example_1.weight + example_2.weight);
+ EXPECT_GT(avg, expected * 0.99);
+ EXPECT_LT(avg, expected * 1.01);
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/model.h b/chromium/media/learning/impl/model.h
index 9139116b8aa..0950b6c9713 100644
--- a/chromium/media/learning/impl/model.h
+++ b/chromium/media/learning/impl/model.h
@@ -6,7 +6,7 @@
#define MEDIA_LEARNING_IMPL_MODEL_H_
#include "base/component_export.h"
-#include "media/learning/common/training_example.h"
+#include "media/learning/common/labelled_example.h"
#include "media/learning/impl/model.h"
#include "media/learning/impl/target_distribution.h"
@@ -18,10 +18,15 @@ namespace learning {
// can support it.
class COMPONENT_EXPORT(LEARNING_IMPL) Model {
public:
+ // Callback for asynchronous predictions.
+ using PredictionCB = base::OnceCallback<void(TargetDistribution predicted)>;
+
virtual ~Model() = default;
virtual TargetDistribution PredictDistribution(
const FeatureVector& instance) = 0;
+
+ // TODO(liberato): Consider adding an async prediction helper.
};
} // namespace learning
diff --git a/chromium/media/learning/impl/one_hot.cc b/chromium/media/learning/impl/one_hot.cc
new file mode 100644
index 00000000000..b8dab81e142
--- /dev/null
+++ b/chromium/media/learning/impl/one_hot.cc
@@ -0,0 +1,121 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/one_hot.h"
+
+#include <set>
+
+namespace media {
+namespace learning {
+
+OneHotConverter::OneHotConverter(const LearningTask& task,
+ const TrainingData& training_data)
+ : converted_task_(task) {
+ converted_task_.feature_descriptions.clear();
+
+ // store
+ converters_.resize(task.feature_descriptions.size());
+
+ for (size_t i = 0; i < task.feature_descriptions.size(); i++) {
+ const LearningTask::ValueDescription& feature =
+ task.feature_descriptions[i];
+
+ // If this is already a numeric feature, then we will copy it since
+ // converters[i] will be unset.
+ if (feature.ordering == LearningTask::Ordering::kNumeric) {
+ converted_task_.feature_descriptions.push_back(feature);
+ continue;
+ }
+
+ ProcessOneFeature(i, feature, training_data);
+ }
+}
+
+OneHotConverter::~OneHotConverter() = default;
+
+TrainingData OneHotConverter::Convert(const TrainingData& training_data) const {
+ TrainingData converted_training_data;
+ for (auto& example : training_data) {
+ LabelledExample converted_example(example);
+ converted_example.features = Convert(example.features);
+ converted_training_data.push_back(converted_example);
+ }
+
+ return converted_training_data;
+}
+
+FeatureVector OneHotConverter::Convert(
+ const FeatureVector& feature_vector) const {
+ FeatureVector converted_feature_vector;
+ converted_feature_vector.reserve(converted_task_.feature_descriptions.size());
+ for (size_t i = 0; i < converters_.size(); i++) {
+ auto& converter = converters_[i];
+ if (!converter) {
+ // There's no conversion needed for this feature, since it was numeric.
+ converted_feature_vector.push_back(feature_vector[i]);
+ continue;
+ }
+
+ // Convert this feature to a one-hot vector.
+ const size_t vector_size = converter->size();
+
+ // Start with a zero-hot vector. Is that a thing?
+ for (size_t v = 0; v < vector_size; v++)
+ converted_feature_vector.push_back(FeatureValue(0));
+
+ // Set the appropriate entry to 1, if any. Otherwise, this is a
+ // previously unseen value and all of them should be zero.
+ auto iter = converter->find(feature_vector[i]);
+ if (iter != converter->end())
+ converted_feature_vector[iter->second] = FeatureValue(1);
+ }
+
+ return converted_feature_vector;
+}
+
+void OneHotConverter::ProcessOneFeature(
+ size_t index,
+ const LearningTask::ValueDescription& original_description,
+ const TrainingData& training_data) {
+ // Collect all the distinct values for |index|.
+ std::set<Value> values;
+ for (auto& example : training_data) {
+ DCHECK_GE(example.features.size(), index);
+ values.insert(example.features[index]);
+ }
+
+ // We let the set's ordering be the one-hot value. It doesn't really matter
+ // as long as we don't change it once we pick it.
+ ValueVectorIndexMap value_map;
+ // Vector index that should be set to one for each distinct value. This will
+ // start at the next feature in the adjusted task.
+ size_t next_vector_index = converted_task_.feature_descriptions.size();
+
+ // Add one feature for each value, and construct a map from value to the
+ // feature index that should be 1 when the feature takes that value.
+ for (auto& value : values) {
+ LearningTask::ValueDescription adjusted_description = original_description;
+ adjusted_description.ordering = LearningTask::Ordering::kNumeric;
+ converted_task_.feature_descriptions.push_back(adjusted_description);
+ // |value| will converted into a 1 in the |next_vector_index|-th feature.
+ value_map[value] = next_vector_index++;
+ }
+
+ // Record |values| for the |index|-th original feature.
+ converters_[index] = std::move(value_map);
+}
+
+ConvertingModel::ConvertingModel(std::unique_ptr<OneHotConverter> converter,
+ std::unique_ptr<Model> model)
+ : converter_(std::move(converter)), model_(std::move(model)) {}
+ConvertingModel::~ConvertingModel() = default;
+
+TargetDistribution ConvertingModel::PredictDistribution(
+ const FeatureVector& instance) {
+ FeatureVector converted_instance = converter_->Convert(instance);
+ return model_->PredictDistribution(converted_instance);
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/one_hot.h b/chromium/media/learning/impl/one_hot.h
new file mode 100644
index 00000000000..0a3f479b721
--- /dev/null
+++ b/chromium/media/learning/impl/one_hot.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_ONE_HOT_H_
+#define MEDIA_LEARNING_IMPL_ONE_HOT_H_
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "media/learning/common/labelled_example.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/common/value.h"
+#include "media/learning/impl/model.h"
+
+namespace media {
+namespace learning {
+
+// Converter class that memorizes a mapping from nominal features to numeric
+// features with a one-hot encoding.
+class COMPONENT_EXPORT(LEARNING_IMPL) OneHotConverter {
+ public:
+ // Build a one-hot converter for all nominal features |task|, using the values
+ // found in |training_data|.
+ OneHotConverter(const LearningTask& task, const TrainingData& training_data);
+ ~OneHotConverter();
+
+ // Return the LearningTask that has only nominal features.
+ const LearningTask& converted_task() const { return converted_task_; }
+
+ // Convert |training_data| to be a one-hot model.
+ TrainingData Convert(const TrainingData& training_data) const;
+
+ // Convert |feature_vector| to match the one-hot model.
+ FeatureVector Convert(const FeatureVector& feature_vector) const;
+
+ private:
+ // Build a converter for original feature |index|.
+ void ProcessOneFeature(
+ size_t index,
+ const LearningTask::ValueDescription& original_description,
+ const TrainingData& training_data);
+
+ // Learning task with the feature descriptions adjusted for the one-hot model.
+ LearningTask converted_task_;
+
+ // [value] == vector index that should be 1 in the one-hot vector.
+ using ValueVectorIndexMap = std::map<Value, size_t>;
+
+ // [original task feature index] = optional converter for it. If the feature
+ // was kNumeric to begin with, then there will be no converter.
+ std::vector<base::Optional<ValueVectorIndexMap>> converters_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneHotConverter);
+};
+
+// Model that uses |Converter| to convert instances before sending them to the
+// underlying model.
+class COMPONENT_EXPORT(LEARNING_IMPL) ConvertingModel : public Model {
+ public:
+ ConvertingModel(std::unique_ptr<OneHotConverter> converter,
+ std::unique_ptr<Model> model);
+ ~ConvertingModel() override;
+
+ // Model
+ TargetDistribution PredictDistribution(
+ const FeatureVector& instance) override;
+
+ private:
+ std::unique_ptr<OneHotConverter> converter_;
+ std::unique_ptr<Model> model_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConvertingModel);
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_ONE_HOT_H_
diff --git a/chromium/media/learning/impl/one_hot_unittest.cc b/chromium/media/learning/impl/one_hot_unittest.cc
new file mode 100644
index 00000000000..53d884cfdea
--- /dev/null
+++ b/chromium/media/learning/impl/one_hot_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/one_hot.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace learning {
+
+class OneHotTest : public testing::Test {
+ public:
+ OneHotTest() {}
+};
+
+TEST_F(OneHotTest, EmptyLearningTaskWorks) {
+ LearningTask empty_task("EmptyTask", LearningTask::Model::kExtraTrees, {},
+ LearningTask::ValueDescription({"target"}));
+ TrainingData empty_training_data;
+ OneHotConverter one_hot(empty_task, empty_training_data);
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions.size(), 0u);
+}
+
+TEST_F(OneHotTest, SimpleConversionWorks) {
+ LearningTask task("SimpleTask", LearningTask::Model::kExtraTrees,
+ {{"feature1", LearningTask::Ordering::kUnordered}},
+ LearningTask::ValueDescription({"target"}));
+ TrainingData training_data;
+ training_data.push_back({{FeatureValue("abc")}, TargetValue(0)});
+ training_data.push_back({{FeatureValue("def")}, TargetValue(1)});
+ training_data.push_back({{FeatureValue("ghi")}, TargetValue(2)});
+ // Push a duplicate as the last one.
+ training_data.push_back({{FeatureValue("def")}, TargetValue(3)});
+ OneHotConverter one_hot(task, training_data);
+ // There should be one feature for each distinct value in features[0].
+ const size_t adjusted_feature_size = 3u;
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions.size(),
+ adjusted_feature_size);
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions[0].ordering,
+ LearningTask::Ordering::kNumeric);
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions[1].ordering,
+ LearningTask::Ordering::kNumeric);
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions[2].ordering,
+ LearningTask::Ordering::kNumeric);
+
+ TrainingData converted_training_data = one_hot.Convert(training_data);
+ EXPECT_EQ(converted_training_data.size(), training_data.size());
+ // Exactly one feature should be 1.
+ for (size_t i = 0; i < converted_training_data.size(); i++) {
+ EXPECT_EQ(converted_training_data[i].features[0].value() +
+ converted_training_data[i].features[1].value() +
+ converted_training_data[i].features[2].value(),
+ 1);
+ }
+
+ // Each of the first three training examples should have distinct vectors.
+ for (size_t f = 0; f < adjusted_feature_size; f++) {
+ int num_ones = 0;
+ // 3u is the number of distinct examples. [3] is a duplicate.
+ for (size_t i = 0; i < 3u; i++)
+ num_ones += converted_training_data[i].features[f].value();
+ EXPECT_EQ(num_ones, 1);
+ }
+
+ // The features of examples 1 and 3 should be the same.
+ for (size_t f = 0; f < adjusted_feature_size; f++) {
+ EXPECT_EQ(converted_training_data[1].features[f],
+ converted_training_data[3].features[f]);
+ }
+
+ // Converting each feature vector should result in the same one as before.
+ for (size_t f = 0; f < adjusted_feature_size; f++) {
+ FeatureVector converted_feature_vector =
+ one_hot.Convert(training_data[f].features);
+ EXPECT_EQ(converted_feature_vector, converted_training_data[f].features);
+ }
+}
+
+TEST_F(OneHotTest, NumericsAreNotConverted) {
+ LearningTask task("SimpleTask", LearningTask::Model::kExtraTrees,
+ {{"feature1", LearningTask::Ordering::kNumeric}},
+ LearningTask::ValueDescription({"target"}));
+ OneHotConverter one_hot(task, TrainingData());
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions.size(), 1u);
+ EXPECT_EQ(one_hot.converted_task().feature_descriptions[0].ordering,
+ LearningTask::Ordering::kNumeric);
+
+ TrainingData training_data;
+ training_data.push_back({{FeatureValue(5)}, TargetValue(0)});
+ TrainingData converted_training_data = one_hot.Convert(training_data);
+ EXPECT_EQ(converted_training_data[0], training_data[0]);
+
+ FeatureVector converted_feature_vector =
+ one_hot.Convert(training_data[0].features);
+ EXPECT_EQ(converted_feature_vector, training_data[0].features);
+}
+
+TEST_F(OneHotTest, UnknownValuesAreZeroHot) {
+ LearningTask task("SimpleTask", LearningTask::Model::kExtraTrees,
+ {{"feature1", LearningTask::Ordering::kUnordered}},
+ LearningTask::ValueDescription({"target"}));
+ TrainingData training_data;
+ training_data.push_back({{FeatureValue("abc")}, TargetValue(0)});
+ training_data.push_back({{FeatureValue("def")}, TargetValue(1)});
+ training_data.push_back({{FeatureValue("ghi")}, TargetValue(2)});
+ OneHotConverter one_hot(task, training_data);
+
+ // Send in an unknown value, and see if it becomes {0, 0, 0}.
+ FeatureVector converted_feature_vector =
+ one_hot.Convert(FeatureVector({FeatureValue("jkl")}));
+ EXPECT_EQ(converted_feature_vector.size(), 3u);
+ for (size_t i = 0; i < converted_feature_vector.size(); i++)
+ EXPECT_EQ(converted_feature_vector[i], FeatureValue(0));
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/random_number_generator.cc b/chromium/media/learning/impl/random_number_generator.cc
new file mode 100644
index 00000000000..fb503bf2d52
--- /dev/null
+++ b/chromium/media/learning/impl/random_number_generator.cc
@@ -0,0 +1,60 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/random_number_generator.h"
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/rand_util.h"
+
+namespace media {
+
+class BaseRandomNumberGenerator : public RandomNumberGenerator {
+ public:
+ uint64_t Generate() override { return base::RandUint64(); }
+
+ protected:
+ ~BaseRandomNumberGenerator() override = default;
+};
+
+// static
+RandomNumberGenerator* RandomNumberGenerator::Default() {
+ static BaseRandomNumberGenerator* rng = nullptr;
+ // TODO(liberato): locking?
+ if (!rng)
+ rng = new BaseRandomNumberGenerator();
+
+ return rng;
+}
+
+uint64_t RandomNumberGenerator::Generate(uint64_t range) {
+ // Don't just % generate(), since that wouldn't be uniform anymore.
+ // This is copied from base/rand_util.cc .
+ uint64_t max_acceptable_value =
+ (std::numeric_limits<uint64_t>::max() / range) * range - 1;
+
+ uint64_t value;
+ do {
+ value = Generate();
+ } while (value > max_acceptable_value);
+
+ return value % range;
+}
+
+double RandomNumberGenerator::GenerateDouble(double range) {
+ return base::BitsToOpenEndedUnitInterval(Generate()) * range;
+}
+
+HasRandomNumberGenerator::HasRandomNumberGenerator(RandomNumberGenerator* rng)
+ : rng_(rng ? rng : RandomNumberGenerator::Default()) {}
+
+HasRandomNumberGenerator::~HasRandomNumberGenerator() = default;
+
+void HasRandomNumberGenerator::SetRandomNumberGeneratorForTesting(
+ RandomNumberGenerator* rng) {
+ rng_ = rng;
+}
+
+} // namespace media
diff --git a/chromium/media/learning/impl/random_number_generator.h b/chromium/media/learning/impl/random_number_generator.h
new file mode 100644
index 00000000000..e4ee413dad9
--- /dev/null
+++ b/chromium/media/learning/impl/random_number_generator.h
@@ -0,0 +1,62 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_RANDOM_NUMBER_GENERATOR_H_
+#define MEDIA_LEARNING_IMPL_RANDOM_NUMBER_GENERATOR_H_
+
+#include <cstdint>
+#include <memory>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+
+namespace media {
+
+// Class to encapsulate a random number generator with an implementation for
+// tests that provides repeatable, platform-independent sequences.
+class COMPONENT_EXPORT(LEARNING_IMPL) RandomNumberGenerator {
+ public:
+ RandomNumberGenerator() = default;
+ virtual ~RandomNumberGenerator() = default;
+
+ // Return a random generator that will return unpredictable values in the
+ // //base/rand_util.h sense. See TestRandomGenerator if you'd like one that's
+ // more predictable for tests.
+ static RandomNumberGenerator* Default();
+
+ // Taken from rand_util.h
+ // Returns a random number in range [0, UINT64_MAX]. Thread-safe.
+ virtual uint64_t Generate() = 0;
+
+ // Returns a random number in range [0, range). Thread-safe.
+ uint64_t Generate(uint64_t range);
+
+ // Returns a floating point number in the range [0, range). Thread-safe.
+ // This isn't an overload of Generate() to be sure that one isn't surprised by
+ // the result.
+ double GenerateDouble(double range);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RandomNumberGenerator);
+};
+
+// Handy mix-in class if you want to support rng injection.
+class COMPONENT_EXPORT(LEARNING_IMPL) HasRandomNumberGenerator {
+ public:
+ // If |rng| is null, then we'll create a new one as a convenience.
+ explicit HasRandomNumberGenerator(RandomNumberGenerator* rng = nullptr);
+ ~HasRandomNumberGenerator();
+
+ void SetRandomNumberGeneratorForTesting(RandomNumberGenerator* rng);
+
+ protected:
+ RandomNumberGenerator* rng() const { return rng_; }
+
+ private:
+ RandomNumberGenerator* rng_ = nullptr;
+};
+
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_RANDOM_NUMBER_GENERATOR_H_
diff --git a/chromium/media/learning/impl/random_number_generator_unittest.cc b/chromium/media/learning/impl/random_number_generator_unittest.cc
new file mode 100644
index 00000000000..880d7dad92c
--- /dev/null
+++ b/chromium/media/learning/impl/random_number_generator_unittest.cc
@@ -0,0 +1,102 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "media/learning/impl/test_random_number_generator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class RandomNumberGeneratorTest : public testing::Test {
+ public:
+ RandomNumberGeneratorTest() : rng_(0) {}
+
+ void GenerateAndVerify(base::RepeatingCallback<int64_t()> generate,
+ int64_t lower_inclusive,
+ int64_t upper_inclusive) {
+ const size_t n = 10000;
+ std::map<int64_t, size_t> counts;
+ for (size_t i = 0; i < n; i++)
+ counts[generate.Run()]++;
+ // Verify that it's uniform over |lower_inclusive| and
+ // |upper_inclusive|, at least approximately.
+ size_t min_counts = counts[lower_inclusive];
+ size_t max_counts = min_counts;
+ size_t total_counts = min_counts;
+ for (int64_t i = lower_inclusive + 1; i <= upper_inclusive; i++) {
+ size_t c = counts[i];
+ if (c < min_counts)
+ min_counts = c;
+
+ if (c > max_counts)
+ max_counts = c;
+
+ total_counts += c;
+ }
+
+ // See if the min and the max are too far from the expected counts.
+ // Note that this will catch only egregious problems. Also note that
+ // random variation might actually exceed these limits fairly often.
+ // It's only because the test rng has no variation that we know it
+ // won't happen. However, there might be reasonable implementation
+ // changes that trip these tests (deterministically!); manual
+ // verification of the result is needed in those cases.
+ //
+ // These just catch things like "rng range is off by one", etc.
+ size_t expected_counts = n / (upper_inclusive - lower_inclusive + 1);
+ EXPECT_LT(max_counts, expected_counts * 1.05);
+ EXPECT_GT(max_counts, expected_counts * 0.95);
+ EXPECT_LT(min_counts, expected_counts * 1.05);
+ EXPECT_GT(min_counts, expected_counts * 0.95);
+
+ // Verify that the counts between the limits accounts for all of them.
+ // Otherwise, some rng values were out of range.
+ EXPECT_EQ(total_counts, n);
+ }
+
+ // We use TestRandomNumberGenerator, since we really want to test the base
+ // class method implementations with a predictable random source.
+ TestRandomNumberGenerator rng_;
+};
+
+TEST_F(RandomNumberGeneratorTest, ExclusiveUpTo) {
+ // Try Generate with something that's not a divisor of max_int, to try to
+ // catch any bias. I.e., an implementation like "rng % range" should fail
+ // this test.
+ //
+ // Unfortunately, it won't.
+ //
+ // With uint64_t random values, it's unlikely that we would ever notice such a
+ // problem. For example, a range of size three would just remove ~three from
+ // the upper range of the rng, and it's unlikely that we'd ever pick the three
+ // highest values anyway. If, instead, we make |range| really big, then we're
+ // not going to sample enough points to notice the deviation from uniform.
+ //
+ // However, we still look for issues like "off by one".
+ const uint64_t range = 5;
+ GenerateAndVerify(base::BindRepeating(
+ [](RandomNumberGenerator* rng, uint64_t range) {
+ return static_cast<int64_t>(rng->Generate(range));
+ },
+ &rng_, range),
+ 0, range - 1);
+}
+
+TEST_F(RandomNumberGeneratorTest, DoublesStayInRange) {
+ const double limit = 1000.5;
+ int num_non_integer = 0;
+ for (int i = 0; i < 1000; i++) {
+ double v = rng_.GenerateDouble(limit);
+ EXPECT_GE(v, 0.);
+ EXPECT_LT(v, limit);
+ // Also count how many non-integers we get.
+ num_non_integer += (v != static_cast<int>(v));
+ }
+
+ // Expect a lot of non-integers.
+ EXPECT_GE(num_non_integer, 900);
+}
+
+} // namespace media
diff --git a/chromium/media/learning/impl/random_tree_trainer.cc b/chromium/media/learning/impl/random_tree_trainer.cc
index ea908458a80..3f5713d1e22 100644
--- a/chromium/media/learning/impl/random_tree_trainer.cc
+++ b/chromium/media/learning/impl/random_tree_trainer.cc
@@ -8,18 +8,12 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "base/optional.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace media {
namespace learning {
-// static
-TrainingAlgorithmCB RandomTreeTrainer::GetTrainingAlgorithmCB() {
- return base::BindRepeating(
- [](TrainingData training_data, TrainedModelCB model_cb) {
- std::move(model_cb).Run(RandomTreeTrainer().Train(training_data));
- });
-}
-
RandomTreeTrainer::Split::Split() = default;
RandomTreeTrainer::Split::Split(int index) : split_index(index) {}
@@ -31,28 +25,68 @@ RandomTreeTrainer::Split::~Split() = default;
RandomTreeTrainer::Split& RandomTreeTrainer::Split::operator=(Split&& rhs) =
default;
-RandomTreeTrainer::Split::BranchInfo::BranchInfo(
- scoped_refptr<TrainingDataStorage> storage)
- : training_data(std::move(storage)) {}
+RandomTreeTrainer::Split::BranchInfo::BranchInfo() = default;
RandomTreeTrainer::Split::BranchInfo::BranchInfo(BranchInfo&& rhs) = default;
RandomTreeTrainer::Split::BranchInfo::~BranchInfo() = default;
struct InteriorNode : public Model {
- InteriorNode(int split_index) : split_index_(split_index) {}
+ InteriorNode(const LearningTask& task,
+ int split_index,
+ FeatureValue split_point)
+ : split_index_(split_index),
+ rt_unknown_value_handling_(task.rt_unknown_value_handling),
+ ordering_(task.feature_descriptions[split_index].ordering),
+ split_point_(split_point) {}
// Model
TargetDistribution PredictDistribution(
const FeatureVector& features) override {
- auto iter = children_.find(features[split_index_]);
- // If we've never seen this feature value, then make no prediction.
- if (iter == children_.end())
- return TargetDistribution();
+ // Figure out what feature value we should use for the split.
+ FeatureValue f;
+ switch (ordering_) {
+ case LearningTask::Ordering::kUnordered:
+ // Use the nominal value directly.
+ f = features[split_index_];
+ break;
+ case LearningTask::Ordering::kNumeric:
+ // Use 0 for "<=" and 1 for ">".
+ f = FeatureValue(features[split_index_] > split_point_);
+ break;
+ }
+
+ auto iter = children_.find(f);
+
+ // If we've never seen this feature value, then average all our branches.
+ // This is an attempt to mimic one-hot encoding, where we'll take the zero
+ // branch but it depends on the tree structure which of the one-hot values
+ // we're choosing.
+ if (iter == children_.end()) {
+ switch (rt_unknown_value_handling_) {
+ case LearningTask::RTUnknownValueHandling::kEmptyDistribution:
+ return TargetDistribution();
+ case LearningTask::RTUnknownValueHandling::kUseAllSplits:
+ return PredictDistributionWithMissingValues(features);
+ }
+ }
return iter->second->PredictDistribution(features);
}
+ TargetDistribution PredictDistributionWithMissingValues(
+ const FeatureVector& features) {
+ TargetDistribution total;
+ for (auto& child_pair : children_) {
+ TargetDistribution predicted =
+ child_pair.second->PredictDistribution(features);
+ // TODO(liberato): Normalize? Weight?
+ total += predicted;
+ }
+
+ return total;
+ }
+
// Add |child| has the node for feature value |v|.
void AddChild(FeatureValue v, std::unique_ptr<Model> child) {
DCHECK_EQ(children_.count(v), 0u);
@@ -63,12 +97,28 @@ struct InteriorNode : public Model {
// Feature value that we split on.
int split_index_ = -1;
base::flat_map<FeatureValue, std::unique_ptr<Model>> children_;
+
+ // How we handle unknown values.
+ LearningTask::RTUnknownValueHandling rt_unknown_value_handling_;
+
+ // How is our feature value ordered?
+ LearningTask::Ordering ordering_;
+
+ // For kNumeric features, this is the split point.
+ FeatureValue split_point_;
};
struct LeafNode : public Model {
- LeafNode(const TrainingData& training_data) {
- for (const TrainingExample* example : training_data)
- distribution_ += example->target_value;
+ LeafNode(const TrainingData& training_data,
+ const std::vector<size_t> training_idx,
+ LearningTask::Ordering ordering) {
+ for (size_t idx : training_idx)
+ distribution_ += training_data[idx];
+
+ // Note that we don't treat numeric targets any differently. We want to
+ // weight the leaf by the number of examples, so replacing it with an
+ // average would just introduce rounding errors. One might as well take the
+ // average of the final distribution.
}
// TreeNode
@@ -80,38 +130,125 @@ struct LeafNode : public Model {
TargetDistribution distribution_;
};
-RandomTreeTrainer::RandomTreeTrainer() = default;
+RandomTreeTrainer::RandomTreeTrainer(RandomNumberGenerator* rng)
+ : HasRandomNumberGenerator(rng) {}
+
+RandomTreeTrainer::~RandomTreeTrainer() {}
+
+void RandomTreeTrainer::Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) {
+ // Start with all the training data.
+ std::vector<size_t> training_idx;
+ training_idx.reserve(training_data.size());
+ for (size_t idx = 0; idx < training_data.size(); idx++)
+ training_idx.push_back(idx);
-RandomTreeTrainer::~RandomTreeTrainer() = default;
+ // It's a little odd that we don't post training. Perhaps we should.
+ auto model = Train(task, training_data, training_idx);
+ base::SequencedTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(std::move(model_cb), std::move(model)));
+}
std::unique_ptr<Model> RandomTreeTrainer::Train(
- const TrainingData& training_data) {
- if (training_data.empty())
- return std::make_unique<InteriorNode>(-1);
+ const LearningTask& task,
+ const TrainingData& training_data,
+ const std::vector<size_t>& training_idx) {
+ if (training_data.empty()) {
+ return std::make_unique<LeafNode>(training_data, std::vector<size_t>(),
+ LearningTask::Ordering::kUnordered);
+ }
+
+ DCHECK_EQ(task.feature_descriptions.size(), training_data[0].features.size());
- return Build(training_data, FeatureSet());
+ // Start with all features unused.
+ FeatureSet unused_set;
+ for (size_t idx = 0; idx < task.feature_descriptions.size(); idx++)
+ unused_set.insert(idx);
+
+ return Build(task, training_data, training_idx, unused_set);
}
std::unique_ptr<Model> RandomTreeTrainer::Build(
+ const LearningTask& task,
const TrainingData& training_data,
- const FeatureSet& used_set) {
- DCHECK(training_data.size());
+ const std::vector<size_t>& training_idx,
+ const FeatureSet& unused_set) {
+ DCHECK_GT(training_idx.size(), 0u);
+
+ // TODO: enforce a minimum number of samples. ExtraTrees uses 2 for
+ // classification, and 5 for regression.
+
+ // Remove any constant attributes in |training_data| from |unused_set|. Also
+ // check if our training data has a constant target value. For both features
+ // and the target value, if the Optional has a value then it's the singular
+ // value that we've found so far. If we find a second one, then we'll clear
+ // the Optional.
+ base::Optional<TargetValue> target_value(
+ training_data[training_idx[0]].target_value);
+ std::vector<base::Optional<FeatureValue>> feature_values;
+ feature_values.resize(training_data[0].features.size());
+ for (size_t feature_idx : unused_set) {
+ feature_values[feature_idx] =
+ training_data[training_idx[0]].features[feature_idx];
+ }
+ for (size_t idx : training_idx) {
+ const LabelledExample& example = training_data[idx];
+ // Record this target value to see if there is more than one. We skip the
+ // insertion if we've already determined that it's not constant.
+ if (target_value && target_value != example.target_value)
+ target_value.reset();
+
+ // For all features in |unused_set|, see if it's a constant in our subset of
+ // the training data.
+ for (size_t feature_idx : unused_set) {
+ auto& value = feature_values[feature_idx];
+ if (value && *value != example.features[feature_idx])
+ value.reset();
+ }
+ }
- // TODO(liberato): Does it help if we refuse to split without an info gain?
- Split best_potential_split;
+ // Is the output constant in |training_data|? If so, then generate a leaf.
+ // If we're not normalizing leaves, then this matters since this training data
+ // might be split across multiple leaves.
+ if (target_value) {
+ return std::make_unique<LeafNode>(training_data, training_idx,
+ task.target_description.ordering);
+ }
+
+ // Remove any constant features from the unused set, so that we don't try to
+ // split on them. It would work, but it would be trivially useless. We also
+ // don't want to use one of our potential splits on it.
+ FeatureSet new_unused_set = unused_set;
+ for (size_t feature_idx : unused_set) {
+ auto& value = feature_values[feature_idx];
+ if (value)
+ new_unused_set.erase(feature_idx);
+ }
// Select the feature subset to consider at this leaf.
- // TODO(liberato): subset.
- FeatureSet feature_candidates;
- for (size_t i = 0; i < training_data[0]->features.size(); i++) {
- if (used_set.find(i) != used_set.end())
- continue;
- feature_candidates.insert(i);
+ FeatureSet feature_candidates = new_unused_set;
+ // TODO(liberato): Let our caller override this.
+ const size_t features_per_split =
+ std::max(static_cast<int>(sqrt(feature_candidates.size())), 3);
+ // Note that it's okay if there are fewer features left; we'll select all of
+ // them instead.
+ while (feature_candidates.size() > features_per_split) {
+ // Remove a random feature.
+ size_t which = rng()->Generate(feature_candidates.size());
+ auto iter = feature_candidates.begin();
+ for (; which; which--, iter++)
+ ;
+ feature_candidates.erase(iter);
}
+ // TODO(liberato): Does it help if we refuse to split without an info gain?
+ Split best_potential_split;
+
// Find the best split among the candidates that we have.
for (int i : feature_candidates) {
- Split potential_split = ConstructSplit(training_data, i);
+ Split potential_split =
+ ConstructSplit(task, training_data, training_idx, i);
if (potential_split.nats_remaining < best_potential_split.nats_remaining) {
best_potential_split = std::move(potential_split);
}
@@ -122,65 +259,189 @@ std::unique_ptr<Model> RandomTreeTrainer::Build(
// but all had the same value). Either way, we should end up with a leaf.
if (best_potential_split.branch_infos.size() < 2) {
// Stop when there is no more tree.
- return std::make_unique<LeafNode>(training_data);
+ return std::make_unique<LeafNode>(training_data, training_idx,
+ task.target_description.ordering);
}
// Build an interior node
- std::unique_ptr<InteriorNode> node =
- std::make_unique<InteriorNode>(best_potential_split.split_index);
-
- // Don't let the subtree use this feature.
- FeatureSet new_used_set(used_set);
- new_used_set.insert(best_potential_split.split_index);
+ std::unique_ptr<InteriorNode> node = std::make_unique<InteriorNode>(
+ task, best_potential_split.split_index, best_potential_split.split_point);
+
+ // Don't let the subtree use this feature if this is nominal split, since
+ // there's nothing left to split. For numeric splits, we might want to split
+ // it further. Note that if there is only one branch for this split, then
+ // we returned a leaf anyway.
+ if (task.feature_descriptions[best_potential_split.split_index].ordering ==
+ LearningTask::Ordering::kUnordered) {
+ DCHECK(new_unused_set.find(best_potential_split.split_index) !=
+ new_unused_set.end());
+ new_unused_set.erase(best_potential_split.split_index);
+ }
for (auto& branch_iter : best_potential_split.branch_infos) {
node->AddChild(branch_iter.first,
- Build(branch_iter.second.training_data, new_used_set));
+ Build(task, training_data, branch_iter.second.training_idx,
+ new_unused_set));
}
return node;
}
RandomTreeTrainer::Split RandomTreeTrainer::ConstructSplit(
+ const LearningTask& task,
const TrainingData& training_data,
- int index) {
+ const std::vector<size_t>& training_idx,
+ int split_index) {
// We should not be given a training set of size 0, since there's no need to
// check an empty split.
- DCHECK_GT(training_data.size(), 0u);
+ DCHECK_GT(training_idx.size(), 0u);
- Split split(index);
+ Split split(split_index);
+ base::Optional<FeatureValue> split_point;
+
+ // TODO(liberato): Consider removing nominal feature support and RF. That
+ // would make this code somewhat simpler.
+
+ // For a numeric split, find the split point. Otherwise, we'll split on every
+ // nominal value that this feature has in |training_data|.
+ if (task.feature_descriptions[split_index].ordering ==
+ LearningTask::Ordering::kNumeric) {
+ split_point =
+ FindNumericSplitPoint(split.split_index, training_data, training_idx);
+ split.split_point = *split_point;
+ }
// Find the split's feature values and construct the training set for each.
// I think we want to iterate on the underlying vector, and look up the int in
// the training data directly.
- for (const TrainingExample* example : training_data) {
- // Get the value of the |index|-th feature for
- FeatureValue v_i = example->features[split.split_index];
+ double total_weight = 0.;
+ for (size_t idx : training_idx) {
+ const LabelledExample& example = training_data[idx];
+ total_weight += example.weight;
+
+ // Get the value of the |index|-th feature for |example|.
+ FeatureValue v_i = example.features[split.split_index];
+
+ // Figure out what value this example would use for splitting. For nominal,
+ // it's just |v_i|. For numeric, it's whether |v_i| is <= the split point
+ // or not (0 for <=, 1 for >).
+ FeatureValue split_feature;
+ if (split_point)
+ split_feature = FeatureValue(v_i > *split_point);
+ else
+ split_feature = v_i;
// Add |v_i| to the right training set. Remember that emplace will do
// nothing if the key already exists.
- auto result = split.branch_infos.emplace(
- v_i, Split::BranchInfo(training_data.storage()));
+ auto result =
+ split.branch_infos.emplace(split_feature, Split::BranchInfo());
auto iter = result.first;
Split::BranchInfo& branch_info = iter->second;
- branch_info.training_data.push_back(example);
- branch_info.class_counts[example->target_value]++;
+ branch_info.training_idx.push_back(idx);
+ branch_info.target_distribution += example;
}
+ // Figure out how good / bad this split is.
+ switch (task.target_description.ordering) {
+ case LearningTask::Ordering::kUnordered:
+ ComputeNominalSplitScore(&split, total_weight);
+ break;
+ case LearningTask::Ordering::kNumeric:
+ ComputeNumericSplitScore(&split, total_weight);
+ break;
+ }
+
+ return split;
+}
+
+void RandomTreeTrainer::ComputeNominalSplitScore(Split* split,
+ double total_weight) {
// Compute the nats given that we're at this node.
- split.nats_remaining = 0;
- for (auto& info_iter : split.branch_infos) {
+ split->nats_remaining = 0;
+ for (auto& info_iter : split->branch_infos) {
Split::BranchInfo& branch_info = info_iter.second;
- const int total_counts = branch_info.training_data.size();
- for (auto& iter : branch_info.class_counts) {
- double p = ((double)iter.second) / total_counts;
- split.nats_remaining -= p * log(p);
+ const double total_counts = branch_info.target_distribution.total_counts();
+ // |p_branch| is the probability of following this branch.
+ const double p_branch = total_counts / total_weight;
+ for (auto& iter : branch_info.target_distribution) {
+ double p = iter.second / total_counts;
+ // p*log(p) is the expected nats if the answer is |iter|. We multiply
+ // that by the probability of being in this bucket at all.
+ split->nats_remaining -= (p * log(p)) * p_branch;
}
}
+}
- return split;
+void RandomTreeTrainer::ComputeNumericSplitScore(Split* split,
+ double total_weight) {
+ // Compute the nats given that we're at this node.
+ split->nats_remaining = 0;
+ for (auto& info_iter : split->branch_infos) {
+ Split::BranchInfo& branch_info = info_iter.second;
+
+ const double total_counts = branch_info.target_distribution.total_counts();
+ // |p_branch| is the probability of following this branch.
+ const double p_branch = total_counts / total_weight;
+
+ // Compute the average at this node. Note that we have no idea if the leaf
+ // node would actually use an average, but really it should match. It would
+ // be really nice if we could compute the value (or TargetDistribution) as
+ // part of computing the split, and have somebody just hand that target
+ // distribution to the leaf if it ends up as one.
+ double average = branch_info.target_distribution.Average();
+
+ for (auto& iter : branch_info.target_distribution) {
+ // Compute the squared error for all |iter.second| counts that each have a
+ // value of |iter.first|, when this leaf approximates them as |average|.
+ double sq_err = (iter.first.value() - average) *
+ (iter.first.value() - average) * iter.second;
+ split->nats_remaining += sq_err * p_branch;
+ }
+ }
+}
+
+FeatureValue RandomTreeTrainer::FindNumericSplitPoint(
+ size_t split_index,
+ const TrainingData& training_data,
+ const std::vector<size_t>& training_idx) {
+ // We should not be given a training set of size 0, since there's no need to
+ // check an empty split.
+ DCHECK_GT(training_idx.size(), 0u);
+
+ // We should either (a) choose the single best split point given all our
+ // training data (i.e., choosing between the splits that are equally between
+ // adjacent feature values), or (b) choose the best split point by drawing
+ // uniformly over the range that contains our feature values. (a) is
+ // appropriate with RandomForest, while (b) is appropriate with ExtraTrees.
+ FeatureValue v_min = training_data[training_idx[0]].features[split_index];
+ FeatureValue v_max = training_data[training_idx[0]].features[split_index];
+ for (size_t idx : training_idx) {
+ const LabelledExample& example = training_data[idx];
+ // Get the value of the |split_index|-th feature for
+ FeatureValue v_i = example.features[split_index];
+ if (v_i < v_min)
+ v_min = v_i;
+
+ if (v_i > v_max)
+ v_max = v_i;
+ }
+
+ FeatureValue v_split;
+ if (v_max == v_min) {
+ // Pick |v_split| to return a trivial split, so that this ends up as a
+ // leaf node anyway.
+ v_split = v_max;
+ } else {
+ // Choose a random split point. Note that we want to end up with two
+ // buckets, so we don't have a trivial split. By picking [v_min, v_max),
+ // |v_min| will always be in one bucket and |v_max| will always not be.
+ v_split = FeatureValue(
+ rng()->GenerateDouble(v_max.value() - v_min.value()) + v_min.value());
+ }
+
+ return v_split;
}
} // namespace learning
diff --git a/chromium/media/learning/impl/random_tree_trainer.h b/chromium/media/learning/impl/random_tree_trainer.h
index 57bd6d44f05..0c764bc1ccc 100644
--- a/chromium/media/learning/impl/random_tree_trainer.h
+++ b/chromium/media/learning/impl/random_tree_trainer.h
@@ -13,6 +13,8 @@
#include "base/component_export.h"
#include "base/containers/flat_map.h"
#include "base/macros.h"
+#include "media/learning/common/learning_task.h"
+#include "media/learning/impl/random_number_generator.h"
#include "media/learning/impl/training_algorithm.h"
namespace media {
@@ -40,6 +42,22 @@ namespace learning {
// target values that ended up in each group. The index with the best score is
// chosen for the split.
//
+// For nominal features, we split the feature into all of its nominal values.
+// This is somewhat nonstandard; one would normally convert to one-hot numeric
+// features first. See OneHotConverter if you'd like to do this.
+//
+// For numeric features, we choose a split point uniformly at random between its
+// min and max values in the training data. We do this because it's suitable
+// for extra trees. RandomForest trees want to select the best split point for
+// each feature, rather than uniformly. Either way, of course, we choose the
+// best split among the (feature, split point) pairs we're considering.
+//
+// Also note that for one-hot features, these are the same thing. So, this
+// implementation is suitable for extra trees with numeric (possibly one hot)
+// features, or RF with one-hot nominal features. Note that non-one-hot nominal
+// features probably work fine with RF too. Numeric, non-binary features don't
+// work with RF, unless one changes the split point selection.
+//
// The training algorithm then recurses to build child nodes. One child node is
// created for each observed value of the |i|-th feature in the training set.
// The child node is trained using the subset of the training set that shares
@@ -56,17 +74,25 @@ namespace learning {
// TODO(liberato): Right now, it not-so-randomly selects from the entire set.
// TODO(liberato): consider PRF or other simplified approximations.
// TODO(liberato): separate Model and TrainingAlgorithm. This is the latter.
-class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer {
+class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer
+ : public TrainingAlgorithm,
+ public HasRandomNumberGenerator {
public:
- RandomTreeTrainer();
- ~RandomTreeTrainer();
+ explicit RandomTreeTrainer(RandomNumberGenerator* rng = nullptr);
+ ~RandomTreeTrainer() override;
- // Return a callback that can be used to train a random tree.
- static TrainingAlgorithmCB GetTrainingAlgorithmCB();
-
- std::unique_ptr<Model> Train(const TrainingData& examples);
+ // Train on all examples. Calls |model_cb| with the trained model, which
+ // won't happen before this returns.
+ void Train(const LearningTask& task,
+ const TrainingData& examples,
+ TrainedModelCB model_cb) override;
private:
+ // Train on the subset |training_idx|.
+ std::unique_ptr<Model> Train(const LearningTask& task,
+ const TrainingData& examples,
+ const std::vector<size_t>& training_idx);
+
// Set of feature indices.
using FeatureSet = std::set<int>;
@@ -83,6 +109,9 @@ class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer {
// Feature index to split on.
size_t split_index = 0;
+ // For numeric splits, branch 0 is <= |split_point|, and 1 is > .
+ FeatureValue split_point;
+
// Expected nats needed to compute the class, given that we're at this
// node in the tree.
// "nat" == entropy measured with natural log rather than base-2.
@@ -90,7 +119,7 @@ class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer {
// Per-branch (i.e. per-child node) information about this split.
struct BranchInfo {
- explicit BranchInfo(scoped_refptr<TrainingDataStorage> storage);
+ explicit BranchInfo();
BranchInfo(const BranchInfo& rhs) = delete;
BranchInfo(BranchInfo&& rhs);
~BranchInfo();
@@ -98,14 +127,15 @@ class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer {
BranchInfo& operator=(const BranchInfo& rhs) = delete;
BranchInfo& operator=(BranchInfo&& rhs) = delete;
- // Training set for this branch of the split.
- TrainingData training_data;
+ // Training set for this branch of the split. |training_idx| holds the
+ // indices that we're using out of our training data.
+ std::vector<size_t> training_idx;
// Number of occurances of each target value in |training_data| along this
// branch of the split.
// This is a flat_map since we're likely to have a very small (e.g.,
// "true / "false") number of targets.
- base::flat_map<TargetValue, int> class_counts;
+ TargetDistribution target_distribution;
};
// [feature value at this split] = info about which examples take this
@@ -117,11 +147,28 @@ class COMPONENT_EXPORT(LEARNING_IMPL) RandomTreeTrainer {
// Build this node from |training_data|. |used_set| is the set of features
// that we already used higher in the tree.
- std::unique_ptr<Model> Build(const TrainingData& training_data,
+ std::unique_ptr<Model> Build(const LearningTask& task,
+ const TrainingData& training_data,
+ const std::vector<size_t>& training_idx,
const FeatureSet& used_set);
// Compute and return a split of |training_data| on the |index|-th feature.
- Split ConstructSplit(const TrainingData& training_data, int index);
+ Split ConstructSplit(const LearningTask& task,
+ const TrainingData& training_data,
+ const std::vector<size_t>& training_idx,
+ int index);
+
+ // Fill in |nats_remaining| for |split| for a nominal target. |total_weight|
+ // is the total weight of all instances coming into this split.
+ void ComputeNominalSplitScore(Split* split, double total_weight);
+
+ // Fill in |nats_remaining| for |split| for a numeric target.
+ void ComputeNumericSplitScore(Split* split, double total_weight);
+
+ // Compute the split point for |training_data| for a numeric feature.
+ FeatureValue FindNumericSplitPoint(size_t index,
+ const TrainingData& training_data,
+ const std::vector<size_t>& training_idx);
DISALLOW_COPY_AND_ASSIGN(RandomTreeTrainer);
};
diff --git a/chromium/media/learning/impl/random_tree_trainer_unittest.cc b/chromium/media/learning/impl/random_tree_trainer_unittest.cc
index 5c976bc3151..8edfe956420 100644
--- a/chromium/media/learning/impl/random_tree_trainer_unittest.cc
+++ b/chromium/media/learning/impl/random_tree_trainer_unittest.cc
@@ -7,36 +7,66 @@
#include "base/bind.h"
#include "base/run_loop.h"
#include "base/test/scoped_task_environment.h"
+#include "media/learning/impl/test_random_number_generator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace learning {
-class RandomTreeTest : public testing::Test {
+class RandomTreeTest : public testing::TestWithParam<LearningTask::Ordering> {
public:
- RandomTreeTest() : storage_(base::MakeRefCounted<TrainingDataStorage>()) {}
+ RandomTreeTest()
+ : rng_(0),
+ trainer_(&rng_),
+ ordering_(GetParam()) {}
+
+ // Set up |task_| to have |n| features with the given ordering.
+ void SetupFeatures(size_t n) {
+ for (size_t i = 0; i < n; i++) {
+ LearningTask::ValueDescription desc;
+ desc.ordering = ordering_;
+ task_.feature_descriptions.push_back(desc);
+ }
+ }
+
+ std::unique_ptr<Model> Train(const LearningTask& task,
+ const TrainingData& data) {
+ std::unique_ptr<Model> model;
+ trainer_.Train(
+ task_, data,
+ base::BindOnce(
+ [](std::unique_ptr<Model>* model_out,
+ std::unique_ptr<Model> model) { *model_out = std::move(model); },
+ &model));
+ scoped_task_environment_.RunUntilIdle();
+ return model;
+ }
base::test::ScopedTaskEnvironment scoped_task_environment_;
+ TestRandomNumberGenerator rng_;
RandomTreeTrainer trainer_;
- scoped_refptr<TrainingDataStorage> storage_;
+ LearningTask task_;
+ // Feature ordering.
+ LearningTask::Ordering ordering_;
};
-TEST_F(RandomTreeTest, EmptyTrainingDataWorks) {
- TrainingData empty(storage_);
- std::unique_ptr<Model> model = trainer_.Train(empty);
+TEST_P(RandomTreeTest, EmptyTrainingDataWorks) {
+ TrainingData empty;
+ std::unique_ptr<Model> model = Train(task_, empty);
EXPECT_NE(model.get(), nullptr);
EXPECT_EQ(model->PredictDistribution(FeatureVector()), TargetDistribution());
}
-TEST_F(RandomTreeTest, UniformTrainingDataWorks) {
- TrainingExample example({FeatureValue(123), FeatureValue(456)},
+TEST_P(RandomTreeTest, UniformTrainingDataWorks) {
+ SetupFeatures(2);
+ LabelledExample example({FeatureValue(123), FeatureValue(456)},
TargetValue(789));
- const int n_examples = 10;
- for (int i = 0; i < n_examples; i++)
- storage_->push_back(example);
- TrainingData training_data(storage_, storage_->begin(), storage_->end());
- std::unique_ptr<Model> model = trainer_.Train(training_data);
+ TrainingData training_data;
+ const size_t n_examples = 10;
+ for (size_t i = 0; i < n_examples; i++)
+ training_data.push_back(example);
+ std::unique_ptr<Model> model = Train(task_, training_data);
// The tree should produce a distribution for one value (our target), which
// has |n_examples| counts.
@@ -46,110 +76,161 @@ TEST_F(RandomTreeTest, UniformTrainingDataWorks) {
EXPECT_EQ(distribution[example.target_value], n_examples);
}
-TEST_F(RandomTreeTest, UniformTrainingDataWorksWithCallback) {
- TrainingExample example({FeatureValue(123), FeatureValue(456)},
- TargetValue(789));
- const int n_examples = 10;
- for (int i = 0; i < n_examples; i++)
- storage_->push_back(example);
- TrainingData training_data(storage_, storage_->begin(), storage_->end());
-
- // Construct a TrainedModelCB that will store the model locally.
- std::unique_ptr<Model> model;
- TrainedModelCB model_cb = base::BindOnce(
- [](std::unique_ptr<Model>* model_out, std::unique_ptr<Model> model) {
- *model_out = std::move(model);
- },
- &model);
-
- // Run the trainer.
- RandomTreeTrainer::GetTrainingAlgorithmCB().Run(training_data,
- std::move(model_cb));
- base::RunLoop().RunUntilIdle();
-
- TargetDistribution distribution =
- model->PredictDistribution(example.features);
- EXPECT_EQ(distribution.size(), 1u);
- EXPECT_EQ(distribution[example.target_value], n_examples);
-}
-
-TEST_F(RandomTreeTest, SimpleSeparableTrainingData) {
- TrainingExample example_1({FeatureValue(123)}, TargetValue(1));
- TrainingExample example_2({FeatureValue(456)}, TargetValue(2));
- storage_->push_back(example_1);
- storage_->push_back(example_2);
- TrainingData training_data(storage_, storage_->begin(), storage_->end());
- std::unique_ptr<Model> model = trainer_.Train(training_data);
+TEST_P(RandomTreeTest, SimpleSeparableTrainingData) {
+ SetupFeatures(1);
+ TrainingData training_data;
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(456)}, TargetValue(2));
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+ std::unique_ptr<Model> model = Train(task_, training_data);
// Each value should have a distribution with one target value with one count.
TargetDistribution distribution =
model->PredictDistribution(example_1.features);
EXPECT_NE(model.get(), nullptr);
EXPECT_EQ(distribution.size(), 1u);
- EXPECT_EQ(distribution[example_1.target_value], 1);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
distribution = model->PredictDistribution(example_2.features);
EXPECT_EQ(distribution.size(), 1u);
- EXPECT_EQ(distribution[example_2.target_value], 1);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
}
-TEST_F(RandomTreeTest, ComplexSeparableTrainingData) {
+TEST_P(RandomTreeTest, ComplexSeparableTrainingData) {
+ // Building a random tree with numeric splits isn't terribly likely to work,
+ // so just skip it. Entirely randomized splits are just too random. The
+ // RandomForest unittests will test them as part of an ensemble.
+ if (ordering_ == LearningTask::Ordering::kNumeric)
+ return;
+
+ SetupFeatures(4);
// Build a four-feature training set that's completely separable, but one
// needs all four features to do it.
+ TrainingData training_data;
for (int f1 = 0; f1 < 2; f1++) {
for (int f2 = 0; f2 < 2; f2++) {
for (int f3 = 0; f3 < 2; f3++) {
for (int f4 = 0; f4 < 2; f4++) {
- storage_->push_back(
- TrainingExample({FeatureValue(f1), FeatureValue(f2),
- FeatureValue(f3), FeatureValue(f4)},
- TargetValue(f1 * 1 + f2 * 2 + f3 * 4 + f4 * 8)));
+ LabelledExample example(
+ {FeatureValue(f1), FeatureValue(f2), FeatureValue(f3),
+ FeatureValue(f4)},
+ TargetValue(f1 * 1 + f2 * 2 + f3 * 4 + f4 * 8));
+ // Add two copies of each example.
+ training_data.push_back(example);
+ training_data.push_back(example);
}
}
}
}
- // Add two copies of each example. Note that we do this after fully
- // constructing |training_data_storage|, since it may realloc.
- TrainingData training_data(storage_);
- for (auto& example : *storage_) {
- training_data.push_back(&example);
- training_data.push_back(&example);
- }
-
- std::unique_ptr<Model> model = trainer_.Train(training_data);
+ std::unique_ptr<Model> model = Train(task_, training_data);
EXPECT_NE(model.get(), nullptr);
- // Each example should have a distribution by itself, with two counts.
- for (const TrainingExample* example : training_data) {
+ // Each example should have a distribution that selects the right value.
+ for (const LabelledExample& example : training_data) {
TargetDistribution distribution =
- model->PredictDistribution(example->features);
- EXPECT_EQ(distribution.size(), 1u);
- EXPECT_EQ(distribution[example->target_value], 2);
+ model->PredictDistribution(example.features);
+ TargetValue singular_max;
+ EXPECT_TRUE(distribution.FindSingularMax(&singular_max));
+ EXPECT_EQ(singular_max, example.target_value);
}
}
-TEST_F(RandomTreeTest, UnseparableTrainingData) {
- TrainingExample example_1({FeatureValue(123)}, TargetValue(1));
- TrainingExample example_2({FeatureValue(123)}, TargetValue(2));
- storage_->push_back(example_1);
- storage_->push_back(example_2);
- TrainingData training_data(storage_, storage_->begin(), storage_->end());
- std::unique_ptr<Model> model = trainer_.Train(training_data);
+TEST_P(RandomTreeTest, UnseparableTrainingData) {
+ SetupFeatures(1);
+ TrainingData training_data;
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(123)}, TargetValue(2));
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+ std::unique_ptr<Model> model = Train(task_, training_data);
EXPECT_NE(model.get(), nullptr);
// Each value should have a distribution with two targets with one count each.
TargetDistribution distribution =
model->PredictDistribution(example_1.features);
EXPECT_EQ(distribution.size(), 2u);
- EXPECT_EQ(distribution[example_1.target_value], 1);
- EXPECT_EQ(distribution[example_2.target_value], 1);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
distribution = model->PredictDistribution(example_2.features);
EXPECT_EQ(distribution.size(), 2u);
- EXPECT_EQ(distribution[example_1.target_value], 1);
- EXPECT_EQ(distribution[example_2.target_value], 1);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
}
+TEST_P(RandomTreeTest, UnknownFeatureValueHandling) {
+ // Verify how a previously unseen feature value is handled.
+ SetupFeatures(1);
+ TrainingData training_data;
+ LabelledExample example_1({FeatureValue(123)}, TargetValue(1));
+ LabelledExample example_2({FeatureValue(456)}, TargetValue(2));
+ training_data.push_back(example_1);
+ training_data.push_back(example_2);
+
+ task_.rt_unknown_value_handling =
+ LearningTask::RTUnknownValueHandling::kEmptyDistribution;
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ TargetDistribution distribution =
+ model->PredictDistribution(FeatureVector({FeatureValue(789)}));
+ if (ordering_ == LearningTask::Ordering::kUnordered) {
+ // OOV data should return an empty distribution (nominal).
+ EXPECT_EQ(distribution.size(), 0u);
+ } else {
+ // OOV data should end up in the |example_2| bucket, since the feature is
+ // numerically higher.
+ EXPECT_EQ(distribution.size(), 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+ }
+
+ task_.rt_unknown_value_handling =
+ LearningTask::RTUnknownValueHandling::kUseAllSplits;
+ model = Train(task_, training_data);
+ distribution = model->PredictDistribution(FeatureVector({FeatureValue(789)}));
+ if (ordering_ == LearningTask::Ordering::kUnordered) {
+ // OOV data should return with the sum of all splits.
+ EXPECT_EQ(distribution.size(), 2u);
+ EXPECT_EQ(distribution[example_1.target_value], 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+ } else {
+ // The unknown feature is numerically higher than |example_2|, so we
+ // expect it to fall into that bucket.
+ EXPECT_EQ(distribution.size(), 1u);
+ EXPECT_EQ(distribution[example_2.target_value], 1u);
+ }
+}
+
+TEST_P(RandomTreeTest, NumericFeaturesSplitMultipleTimes) {
+ // Verify that numeric features can be split more than once in the tree.
+ // This should also pass for nominal features, though it's less interesting.
+ SetupFeatures(1);
+ TrainingData training_data;
+ const int feature_mult = 10;
+ for (size_t i = 0; i < 4; i++) {
+ LabelledExample example({FeatureValue(i * feature_mult)}, TargetValue(i));
+ training_data.push_back(example);
+ }
+
+ task_.rt_unknown_value_handling =
+ LearningTask::RTUnknownValueHandling::kEmptyDistribution;
+ std::unique_ptr<Model> model = Train(task_, training_data);
+ for (size_t i = 0; i < 4; i++) {
+ // Get a prediction for the |i|-th feature value.
+ TargetDistribution distribution = model->PredictDistribution(
+ FeatureVector({FeatureValue(i * feature_mult)}));
+ // The distribution should have one count that should be correct. If
+ // the feature isn't split four times, then some feature value will have too
+ // many or too few counts.
+ EXPECT_EQ(distribution.total_counts(), 1u);
+ EXPECT_EQ(distribution[TargetValue(i)], 1u);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(RandomTreeTest,
+ RandomTreeTest,
+ testing::ValuesIn({LearningTask::Ordering::kUnordered,
+ LearningTask::Ordering::kNumeric}));
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/target_distribution.cc b/chromium/media/learning/impl/target_distribution.cc
index bb8b2c77a2b..2fe271733bc 100644
--- a/chromium/media/learning/impl/target_distribution.cc
+++ b/chromium/media/learning/impl/target_distribution.cc
@@ -4,6 +4,8 @@
#include "media/learning/impl/target_distribution.h"
+#include <sstream>
+
namespace media {
namespace learning {
@@ -38,7 +40,13 @@ TargetDistribution& TargetDistribution::operator+=(const TargetValue& rhs) {
return *this;
}
-int TargetDistribution::operator[](const TargetValue& value) const {
+TargetDistribution& TargetDistribution::operator+=(
+ const LabelledExample& example) {
+ counts_[example.target_value] += example.weight;
+ return *this;
+}
+
+size_t TargetDistribution::operator[](const TargetValue& value) const {
auto iter = counts_.find(value);
if (iter == counts_.end())
return 0;
@@ -46,16 +54,16 @@ int TargetDistribution::operator[](const TargetValue& value) const {
return iter->second;
}
-int& TargetDistribution::operator[](const TargetValue& value) {
+size_t& TargetDistribution::operator[](const TargetValue& value) {
return counts_[value];
}
bool TargetDistribution::FindSingularMax(TargetValue* value_out,
- int* counts_out) const {
+ size_t* counts_out) const {
if (!counts_.size())
return false;
- int unused_counts;
+ size_t unused_counts;
if (!counts_out)
counts_out = &unused_counts;
@@ -77,5 +85,34 @@ bool TargetDistribution::FindSingularMax(TargetValue* value_out,
return singular_max;
}
+double TargetDistribution::Average() const {
+ double total_value = 0.;
+ size_t total_counts = 0;
+ for (auto& iter : counts_) {
+ total_value += iter.first.value() * iter.second;
+ total_counts += iter.second;
+ }
+
+ if (!total_counts)
+ return 0.;
+
+ return total_value / total_counts;
+}
+
+std::string TargetDistribution::ToString() const {
+ std::ostringstream ss;
+ ss << "[";
+ for (auto& entry : counts_)
+ ss << " " << entry.first << ":" << entry.second;
+ ss << " ]";
+
+ return ss.str();
+}
+
+std::ostream& operator<<(std::ostream& out,
+ const media::learning::TargetDistribution& dist) {
+ return out << dist.ToString();
+}
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/target_distribution.h b/chromium/media/learning/impl/target_distribution.h
index 99ca6d40e02..3f4a9cd7274 100644
--- a/chromium/media/learning/impl/target_distribution.h
+++ b/chromium/media/learning/impl/target_distribution.h
@@ -5,9 +5,13 @@
#ifndef MEDIA_LEARNING_IMPL_TARGET_DISTRIBUTION_H_
#define MEDIA_LEARNING_IMPL_TARGET_DISTRIBUTION_H_
+#include <ostream>
+#include <string>
+
#include "base/component_export.h"
#include "base/containers/flat_map.h"
#include "base/macros.h"
+#include "media/learning/common/labelled_example.h"
#include "media/learning/common/value.h"
namespace media {
@@ -15,6 +19,11 @@ namespace learning {
// TargetDistribution of target values.
class COMPONENT_EXPORT(LEARNING_IMPL) TargetDistribution {
+ private:
+ // We use a flat_map since this will often have only one or two TargetValues,
+ // such as "true" or "false".
+ using DistributionMap = base::flat_map<TargetValue, size_t>;
+
public:
TargetDistribution();
TargetDistribution(const TargetDistribution& rhs);
@@ -32,18 +41,25 @@ class COMPONENT_EXPORT(LEARNING_IMPL) TargetDistribution {
// Increment |rhs| by one.
TargetDistribution& operator+=(const TargetValue& rhs);
+ // Increment the distribution by |example|'s target value and weight.
+ TargetDistribution& operator+=(const LabelledExample& example);
+
// Return the number of counts for |value|.
- int operator[](const TargetValue& value) const;
- int& operator[](const TargetValue& value);
+ size_t operator[](const TargetValue& value) const;
+ size_t& operator[](const TargetValue& value);
// Return the total counts in the map.
- int total_counts() const {
- size_t total = 0u;
+ size_t total_counts() const {
+ size_t total = 0.;
for (auto& entry : counts_)
total += entry.second;
return total;
}
+ DistributionMap::const_iterator begin() const { return counts_.begin(); }
+
+ DistributionMap::const_iterator end() const { return counts_.end(); }
+
// Return the number of buckets in the distribution.
// TODO(liberato): Do we want this?
size_t size() const { return counts_.size(); }
@@ -51,21 +67,27 @@ class COMPONENT_EXPORT(LEARNING_IMPL) TargetDistribution {
// Find the singular value with the highest counts, and copy it into
// |value_out| and (optionally) |counts_out|. Returns true if there is a
// singular maximum, else returns false with the out params undefined.
- bool FindSingularMax(TargetValue* value_out, int* counts_out = nullptr) const;
+ bool FindSingularMax(TargetValue* value_out,
+ size_t* counts_out = nullptr) const;
- private:
- // We use a flat_map since this will often have only one or two TargetValues,
- // such as "true" or "false".
- using distribution_map_t = base::flat_map<TargetValue, int>;
+ // Return the average value of the entries in this distribution. Of course,
+ // this only makes sense if the TargetValues can be interpreted as numeric.
+ double Average() const;
- const distribution_map_t& counts() const { return counts_; }
+ std::string ToString() const;
+
+ private:
+ const DistributionMap& counts() const { return counts_; }
// [value] == counts
- distribution_map_t counts_;
+ DistributionMap counts_;
// Allow copy and assign.
};
+COMPONENT_EXPORT(LEARNING_IMPL)
+std::ostream& operator<<(std::ostream& out, const TargetDistribution& dist);
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/target_distribution_unittest.cc b/chromium/media/learning/impl/target_distribution_unittest.cc
index 8ff2b50d0b7..1c7564aa5e3 100644
--- a/chromium/media/learning/impl/target_distribution_unittest.cc
+++ b/chromium/media/learning/impl/target_distribution_unittest.cc
@@ -16,16 +16,16 @@ class TargetDistributionTest : public testing::Test {
TargetDistribution distribution_;
TargetValue value_1;
- const int counts_1 = 100;
+ const size_t counts_1 = 100;
TargetValue value_2;
- const int counts_2 = 10;
+ const size_t counts_2 = 10;
TargetValue value_3;
};
TEST_F(TargetDistributionTest, EmptyTargetDistributionHasZeroCounts) {
- EXPECT_EQ(distribution_.total_counts(), 0);
+ EXPECT_EQ(distribution_.total_counts(), 0u);
}
TEST_F(TargetDistributionTest, AddingCountsWorks) {
@@ -33,8 +33,8 @@ TEST_F(TargetDistributionTest, AddingCountsWorks) {
EXPECT_EQ(distribution_.total_counts(), counts_1);
EXPECT_EQ(distribution_[value_1], counts_1);
distribution_[value_1] += counts_1;
- EXPECT_EQ(distribution_.total_counts(), counts_1 * 2);
- EXPECT_EQ(distribution_[value_1], counts_1 * 2);
+ EXPECT_EQ(distribution_.total_counts(), counts_1 * 2u);
+ EXPECT_EQ(distribution_[value_1], counts_1 * 2u);
}
TEST_F(TargetDistributionTest, MultipleValuesAreSeparate) {
@@ -47,19 +47,19 @@ TEST_F(TargetDistributionTest, MultipleValuesAreSeparate) {
TEST_F(TargetDistributionTest, AddingTargetValues) {
distribution_ += value_1;
- EXPECT_EQ(distribution_.total_counts(), 1);
- EXPECT_EQ(distribution_[value_1], 1);
- EXPECT_EQ(distribution_[value_2], 0);
+ EXPECT_EQ(distribution_.total_counts(), 1u);
+ EXPECT_EQ(distribution_[value_1], 1u);
+ EXPECT_EQ(distribution_[value_2], 0u);
distribution_ += value_1;
- EXPECT_EQ(distribution_.total_counts(), 2);
- EXPECT_EQ(distribution_[value_1], 2);
- EXPECT_EQ(distribution_[value_2], 0);
+ EXPECT_EQ(distribution_.total_counts(), 2u);
+ EXPECT_EQ(distribution_[value_1], 2u);
+ EXPECT_EQ(distribution_[value_2], 0u);
distribution_ += value_2;
- EXPECT_EQ(distribution_.total_counts(), 3);
- EXPECT_EQ(distribution_[value_1], 2);
- EXPECT_EQ(distribution_[value_2], 1);
+ EXPECT_EQ(distribution_.total_counts(), 3u);
+ EXPECT_EQ(distribution_[value_1], 2u);
+ EXPECT_EQ(distribution_[value_2], 1u);
}
TEST_F(TargetDistributionTest, AddingTargetDistributions) {
@@ -81,7 +81,7 @@ TEST_F(TargetDistributionTest, FindSingularMaxFindsTheSingularMax) {
ASSERT_TRUE(counts_1 > counts_2);
TargetValue max_value(0);
- int max_counts = 0;
+ size_t max_counts = 0;
EXPECT_TRUE(distribution_.FindSingularMax(&max_value, &max_counts));
EXPECT_EQ(max_value, value_1);
EXPECT_EQ(max_counts, counts_1);
@@ -95,7 +95,7 @@ TEST_F(TargetDistributionTest,
ASSERT_TRUE(counts_1 > counts_2);
TargetValue max_value(0);
- int max_counts = 0;
+ size_t max_counts = 0;
EXPECT_TRUE(distribution_.FindSingularMax(&max_value, &max_counts));
EXPECT_EQ(max_value, value_2);
EXPECT_EQ(max_counts, counts_1);
@@ -106,7 +106,7 @@ TEST_F(TargetDistributionTest, FindSingularMaxReturnsFalsForNonSingularMax) {
distribution_[value_2] = counts_1;
TargetValue max_value(0);
- int max_counts = 0;
+ size_t max_counts = 0;
EXPECT_FALSE(distribution_.FindSingularMax(&max_value, &max_counts));
}
@@ -118,7 +118,7 @@ TEST_F(TargetDistributionTest, FindSingularMaxIgnoresNonSingularNonMax) {
ASSERT_TRUE(counts_1 > counts_2);
TargetValue max_value(0);
- int max_counts = 0;
+ size_t max_counts = 0;
EXPECT_TRUE(distribution_.FindSingularMax(&max_value, &max_counts));
EXPECT_EQ(max_value, value_1);
EXPECT_EQ(max_counts, counts_1);
@@ -148,5 +148,17 @@ TEST_F(TargetDistributionTest, UnequalDistributionsCompareAsNotEqual) {
EXPECT_FALSE(distribution_ == distribution_2);
}
+TEST_F(TargetDistributionTest, WeightedLabelledExamplesCountCorrectly) {
+ LabelledExample example = {{}, value_1};
+ example.weight = counts_1;
+ distribution_ += example;
+
+ TargetDistribution distribution_2;
+ for (size_t i = 0; i < counts_1; i++)
+ distribution_2 += value_1;
+
+ EXPECT_EQ(distribution_, distribution_2);
+}
+
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/test_random_number_generator.cc b/chromium/media/learning/impl/test_random_number_generator.cc
new file mode 100644
index 00000000000..d4650af4a2a
--- /dev/null
+++ b/chromium/media/learning/impl/test_random_number_generator.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/test_random_number_generator.h"
+
+namespace media {
+
+TestRandomNumberGenerator::TestRandomNumberGenerator(uint32_t seed) {
+ seed_ = seed & 0x7fffffff; // make this a non-negative number
+ if (seed_ == 0 || seed_ == M) {
+ seed_ = 1;
+ }
+}
+
+TestRandomNumberGenerator::~TestRandomNumberGenerator() = default;
+
+uint64_t TestRandomNumberGenerator::Generate() {
+ static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
+ uint64_t result = seed_ = static_cast<int32_t>((seed_ * A) % M);
+ result <<= 32;
+ result |= seed_ = static_cast<int32_t>((seed_ * A) % M);
+ return result;
+}
+
+} // namespace media
diff --git a/chromium/media/learning/impl/test_random_number_generator.h b/chromium/media/learning/impl/test_random_number_generator.h
new file mode 100644
index 00000000000..e4f931b48d4
--- /dev/null
+++ b/chromium/media/learning/impl/test_random_number_generator.h
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_TEST_RANDOM_NUMBER_GENERATOR_H_
+#define MEDIA_LEARNING_IMPL_TEST_RANDOM_NUMBER_GENERATOR_H_
+
+#include "media/learning/impl/random_number_generator.h"
+
+namespace media {
+
+// RandomGenerator implementation that provides repeatable (given a seed)
+// sequences of numbers that is also platform agnostic.
+class TestRandomNumberGenerator : public RandomNumberGenerator {
+ public:
+ explicit TestRandomNumberGenerator(uint32_t seed);
+ ~TestRandomNumberGenerator() override;
+
+ // RandomGenerator
+ uint64_t Generate() override;
+
+ static const uint64_t M = 2147483647L; // 2^32-1
+ int32_t seed_;
+};
+
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_TEST_RANDOM_NUMBER_GENERATOR_H_
diff --git a/chromium/media/learning/impl/training_algorithm.h b/chromium/media/learning/impl/training_algorithm.h
index a0394b04ebf..a5fea2ca347 100644
--- a/chromium/media/learning/impl/training_algorithm.h
+++ b/chromium/media/learning/impl/training_algorithm.h
@@ -8,7 +8,7 @@
#include <memory>
#include "base/callback.h"
-#include "media/learning/common/training_example.h"
+#include "media/learning/common/labelled_example.h"
#include "media/learning/impl/model.h"
namespace media {
@@ -17,12 +17,19 @@ namespace learning {
// Returns a trained model.
using TrainedModelCB = base::OnceCallback<void(std::unique_ptr<Model>)>;
-// A TrainingAlgorithm takes as input training examples, and produces as output
-// a trained model that can be used for prediction.
-// Train a model with on |examples| and return it via |model_cb|.
-using TrainingAlgorithmCB =
- base::RepeatingCallback<void(TrainingData examples,
- TrainedModelCB model_cb)>;
+// Base class for training algorithms.
+class TrainingAlgorithm {
+ public:
+ TrainingAlgorithm() = default;
+ virtual ~TrainingAlgorithm() = default;
+
+ virtual void Train(const LearningTask& task,
+ const TrainingData& training_data,
+ TrainedModelCB model_cb) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TrainingAlgorithm);
+};
} // namespace learning
} // namespace media
diff --git a/chromium/media/learning/impl/voting_ensemble.cc b/chromium/media/learning/impl/voting_ensemble.cc
new file mode 100644
index 00000000000..667e739975d
--- /dev/null
+++ b/chromium/media/learning/impl/voting_ensemble.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/learning/impl/voting_ensemble.h"
+
+namespace media {
+namespace learning {
+
+VotingEnsemble::VotingEnsemble(std::vector<std::unique_ptr<Model>> models)
+ : models_(std::move(models)) {}
+
+VotingEnsemble::~VotingEnsemble() = default;
+
+TargetDistribution VotingEnsemble::PredictDistribution(
+ const FeatureVector& instance) {
+ TargetDistribution distribution;
+
+ for (auto iter = models_.begin(); iter != models_.end(); iter++)
+ distribution += (*iter)->PredictDistribution(instance);
+
+ return distribution;
+}
+
+} // namespace learning
+} // namespace media
diff --git a/chromium/media/learning/impl/voting_ensemble.h b/chromium/media/learning/impl/voting_ensemble.h
new file mode 100644
index 00000000000..2b4bf11a3fa
--- /dev/null
+++ b/chromium/media/learning/impl/voting_ensemble.h
@@ -0,0 +1,38 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_LEARNING_IMPL_VOTING_ENSEMBLE_H_
+#define MEDIA_LEARNING_IMPL_VOTING_ENSEMBLE_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/component_export.h"
+#include "base/macros.h"
+#include "media/learning/impl/model.h"
+
+namespace media {
+namespace learning {
+
+// Ensemble classifier. Takes multiple models and returns an aggregate of the
+// individual predictions.
+class COMPONENT_EXPORT(LEARNING_IMPL) VotingEnsemble : public Model {
+ public:
+ VotingEnsemble(std::vector<std::unique_ptr<Model>> models);
+ ~VotingEnsemble() override;
+
+ // Model
+ TargetDistribution PredictDistribution(
+ const FeatureVector& instance) override;
+
+ private:
+ std::vector<std::unique_ptr<Model>> models_;
+
+ DISALLOW_COPY_AND_ASSIGN(VotingEnsemble);
+};
+
+} // namespace learning
+} // namespace media
+
+#endif // MEDIA_LEARNING_IMPL_VOTING_ENSEMBLE_H_
diff --git a/chromium/media/learning/mojo/mojo_learning_session_impl.cc b/chromium/media/learning/mojo/mojo_learning_session_impl.cc
index 1523f454977..3cfb9789ff6 100644
--- a/chromium/media/learning/mojo/mojo_learning_session_impl.cc
+++ b/chromium/media/learning/mojo/mojo_learning_session_impl.cc
@@ -20,7 +20,7 @@ void MojoLearningSessionImpl::Bind(mojom::LearningSessionRequest request) {
}
void MojoLearningSessionImpl::AddExample(mojom::LearningTaskType task_type,
- const TrainingExample& example) {
+ const LabelledExample& example) {
// TODO(liberato): Convert |task_type| into a task name.
std::string task_name("no_task");
diff --git a/chromium/media/learning/mojo/mojo_learning_session_impl.h b/chromium/media/learning/mojo/mojo_learning_session_impl.h
index 3146fb38943..83d0eb72018 100644
--- a/chromium/media/learning/mojo/mojo_learning_session_impl.h
+++ b/chromium/media/learning/mojo/mojo_learning_session_impl.h
@@ -29,7 +29,7 @@ class COMPONENT_EXPORT(MEDIA_LEARNING_MOJO) MojoLearningSessionImpl
// mojom::LearningSession
void AddExample(mojom::LearningTaskType task_type,
- const TrainingExample& example) override;
+ const LabelledExample& example) override;
protected:
explicit MojoLearningSessionImpl(
diff --git a/chromium/media/learning/mojo/mojo_learning_session_impl_unittest.cc b/chromium/media/learning/mojo/mojo_learning_session_impl_unittest.cc
index efefaa1c6b6..ba75f871b86 100644
--- a/chromium/media/learning/mojo/mojo_learning_session_impl_unittest.cc
+++ b/chromium/media/learning/mojo/mojo_learning_session_impl_unittest.cc
@@ -18,13 +18,13 @@ class MojoLearningSessionImplTest : public ::testing::Test {
class FakeLearningSession : public ::media::learning::LearningSession {
public:
void AddExample(const std::string& task_name,
- const TrainingExample& example) override {
+ const LabelledExample& example) override {
most_recent_task_name_ = task_name;
most_recent_example_ = example;
}
std::string most_recent_task_name_;
- TrainingExample most_recent_example_;
+ LabelledExample most_recent_example_;
};
public:
@@ -50,8 +50,8 @@ class MojoLearningSessionImplTest : public ::testing::Test {
};
TEST_F(MojoLearningSessionImplTest, FeaturesAndTargetValueAreCopied) {
- mojom::TrainingExamplePtr example_ptr = mojom::TrainingExample::New();
- const TrainingExample example = {{Value(123), Value(456), Value(890)},
+ mojom::LabelledExamplePtr example_ptr = mojom::LabelledExample::New();
+ const LabelledExample example = {{Value(123), Value(456), Value(890)},
TargetValue(1234)};
learning_session_impl_->AddExample(task_type_, example);
diff --git a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.cc b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.cc
index 8abf5b27659..aa308d9de9f 100644
--- a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.cc
+++ b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.cc
@@ -7,10 +7,10 @@
namespace mojo {
// static
-bool StructTraits<media::learning::mojom::TrainingExampleDataView,
- media::learning::TrainingExample>::
- Read(media::learning::mojom::TrainingExampleDataView data,
- media::learning::TrainingExample* out_example) {
+bool StructTraits<media::learning::mojom::LabelledExampleDataView,
+ media::learning::LabelledExample>::
+ Read(media::learning::mojom::LabelledExampleDataView data,
+ media::learning::LabelledExample* out_example) {
out_example->features.clear();
if (!data.ReadFeatures(&out_example->features))
return false;
diff --git a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
index 963d58a2443..932a5cb7d4a 100644
--- a/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
+++ b/chromium/media/learning/mojo/public/cpp/learning_mojom_traits.h
@@ -14,20 +14,20 @@
namespace mojo {
template <>
-class StructTraits<media::learning::mojom::TrainingExampleDataView,
- media::learning::TrainingExample> {
+class StructTraits<media::learning::mojom::LabelledExampleDataView,
+ media::learning::LabelledExample> {
public:
static const std::vector<media::learning::FeatureValue>& features(
- const media::learning::TrainingExample& e) {
+ const media::learning::LabelledExample& e) {
return e.features;
}
static media::learning::TargetValue target_value(
- const media::learning::TrainingExample& e) {
+ const media::learning::LabelledExample& e) {
return e.target_value;
}
- static bool Read(media::learning::mojom::TrainingExampleDataView data,
- media::learning::TrainingExample* out_example);
+ static bool Read(media::learning::mojom::LabelledExampleDataView data,
+ media::learning::LabelledExample* out_example);
};
template <>
diff --git a/chromium/media/learning/mojo/public/cpp/mojo_learning_session.cc b/chromium/media/learning/mojo/public/cpp/mojo_learning_session.cc
index c3192447c69..c67f642842a 100644
--- a/chromium/media/learning/mojo/public/cpp/mojo_learning_session.cc
+++ b/chromium/media/learning/mojo/public/cpp/mojo_learning_session.cc
@@ -15,7 +15,7 @@ MojoLearningSession::MojoLearningSession(mojom::LearningSessionPtr session_ptr)
MojoLearningSession::~MojoLearningSession() = default;
void MojoLearningSession::AddExample(const std::string& task_name,
- const TrainingExample& example) {
+ const LabelledExample& example) {
// TODO(liberato): Convert from |task_name| to a task type.
session_ptr_->AddExample(mojom::LearningTaskType::kPlaceHolderTask, example);
}
diff --git a/chromium/media/learning/mojo/public/cpp/mojo_learning_session.h b/chromium/media/learning/mojo/public/cpp/mojo_learning_session.h
index 8d9026e60eb..0e8af2b6aca 100644
--- a/chromium/media/learning/mojo/public/cpp/mojo_learning_session.h
+++ b/chromium/media/learning/mojo/public/cpp/mojo_learning_session.h
@@ -22,7 +22,7 @@ class COMPONENT_EXPORT(MEDIA_LEARNING_MOJO) MojoLearningSession
// LearningSession
void AddExample(const std::string& task_name,
- const TrainingExample& example) override;
+ const LabelledExample& example) override;
private:
mojom::LearningSessionPtr session_ptr_;
diff --git a/chromium/media/learning/mojo/public/cpp/mojo_learning_session_unittest.cc b/chromium/media/learning/mojo/public/cpp/mojo_learning_session_unittest.cc
index e52ff92ad35..37cecb4db21 100644
--- a/chromium/media/learning/mojo/public/cpp/mojo_learning_session_unittest.cc
+++ b/chromium/media/learning/mojo/public/cpp/mojo_learning_session_unittest.cc
@@ -22,13 +22,13 @@ class MojoLearningSessionTest : public ::testing::Test {
class FakeMojoLearningSessionImpl : public mojom::LearningSession {
public:
void AddExample(mojom::LearningTaskType task_type,
- const TrainingExample& example) override {
+ const LabelledExample& example) override {
task_type_ = std::move(task_type);
example_ = example;
}
mojom::LearningTaskType task_type_;
- TrainingExample example_;
+ LabelledExample example_;
};
public:
@@ -57,7 +57,7 @@ class MojoLearningSessionTest : public ::testing::Test {
};
TEST_F(MojoLearningSessionTest, ExampleIsCopied) {
- TrainingExample example({FeatureValue(123), FeatureValue(456)},
+ LabelledExample example({FeatureValue(123), FeatureValue(456)},
TargetValue(1234));
learning_session_->AddExample("unused task id", example);
learning_session_binding_.FlushForTesting();
diff --git a/chromium/media/learning/mojo/public/mojom/learning_session.mojom b/chromium/media/learning/mojo/public/mojom/learning_session.mojom
index e2f79a243ae..f7a2b1d7b3f 100644
--- a/chromium/media/learning/mojo/public/mojom/learning_session.mojom
+++ b/chromium/media/learning/mojo/public/mojom/learning_session.mojom
@@ -15,5 +15,5 @@ enum LearningTaskType {
// media/learning/public/learning_session.h
interface LearningSession {
// Add |example| to |task_type|.
- AddExample(LearningTaskType task_type, TrainingExample example);
+ AddExample(LearningTaskType task_type, LabelledExample example);
};
diff --git a/chromium/media/learning/mojo/public/mojom/learning_types.mojom b/chromium/media/learning/mojo/public/mojom/learning_types.mojom
index e83ceece2b5..9a51bd970c5 100644
--- a/chromium/media/learning/mojo/public/mojom/learning_types.mojom
+++ b/chromium/media/learning/mojo/public/mojom/learning_types.mojom
@@ -14,8 +14,8 @@ struct TargetValue {
int64 value;
};
-// learning::TrainingExample (common/training_example.h)
-struct TrainingExample {
+// learning::LabelledExample (common/training_example.h)
+struct LabelledExample {
array<FeatureValue> features;
TargetValue target_value;
};
diff --git a/chromium/media/learning/mojo/public/mojom/learning_types.typemap b/chromium/media/learning/mojo/public/mojom/learning_types.typemap
index ea59e602495..4e6a27b67dd 100644
--- a/chromium/media/learning/mojo/public/mojom/learning_types.typemap
+++ b/chromium/media/learning/mojo/public/mojom/learning_types.typemap
@@ -1,6 +1,6 @@
mojom = "//media/learning/mojo/public/mojom/learning_types.mojom"
public_headers = [
- "//media/learning/common/training_example.h",
+ "//media/learning/common/labelled_example.h",
"//media/learning/common/value.h",
]
traits_headers = [ "//media/learning/mojo/public/cpp/learning_mojom_traits.h" ]
@@ -12,7 +12,7 @@ public_deps = [
"//media/learning/common",
]
type_mappings = [
- "media.learning.mojom.TrainingExample=media::learning::TrainingExample",
+ "media.learning.mojom.LabelledExample=media::learning::LabelledExample",
"media.learning.mojom.FeatureValue=media::learning::FeatureValue",
"media.learning.mojom.TargetValue=media::learning::TargetValue",
]
diff --git a/chromium/media/media_options.gni b/chromium/media/media_options.gni
index 46eaa581816..a31937713ac 100644
--- a/chromium/media/media_options.gni
+++ b/chromium/media/media_options.gni
@@ -5,6 +5,7 @@
import("//build/config/chrome_build.gni")
import("//build/config/chromecast_build.gni")
import("//build/config/features.gni")
+import("//media/gpu/args.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
# Do not expand this list without double-checking with OWNERS, this is a list of
@@ -76,6 +77,9 @@ declare_args() {
# Enable HLS with SAMPLE-AES decryption.
enable_hls_sample_aes = proprietary_codecs && is_chromecast
+ # Enable logging override, e.g. enable DVLOGs at build time.
+ enable_logging_override = is_chromecast
+
# If true, use cast CMA backend instead of default chromium media pipeline.
# TODO(sanfin): Remove this flag when all builds enable CMA.
is_cast_using_cma_backend = true
@@ -129,8 +133,9 @@ declare_args() {
# |mojo_media_services|). When enabled, selected mojo paths will be enabled in
# the media pipeline and corresponding services will hosted in the selected
# remote process (e.g. "utility" process, see |mojo_media_host|).
- enable_mojo_media = is_android || is_chromecast || is_chromeos || is_mac ||
- is_win || enable_library_cdms
+ enable_mojo_media =
+ is_android || is_chromecast || is_chromeos || is_mac || is_win ||
+ enable_library_cdms || (is_desktop_linux && use_vaapi)
# Enable the TestMojoMediaClient to be used in mojo MediaService. This is for
# testing only and will override the default platform MojoMediaClient, if any.
@@ -200,7 +205,8 @@ if (enable_mojo_media) {
]
_default_mojo_media_host = "gpu"
}
- } else if (is_chromeos || is_mac || is_win) {
+ } else if (is_chromeos || is_mac || is_win ||
+ (is_desktop_linux && use_vaapi)) {
_default_mojo_media_services = [ "video_decoder" ]
_default_mojo_media_host = "gpu"
}
diff --git a/chromium/media/midi/java/src/org/chromium/midi/MidiInputPortAndroid.java b/chromium/media/midi/java/src/org/chromium/midi/MidiInputPortAndroid.java
index 2ca80b63102..f866c51e24d 100644
--- a/chromium/media/midi/java/src/org/chromium/midi/MidiInputPortAndroid.java
+++ b/chromium/media/midi/java/src/org/chromium/midi/MidiInputPortAndroid.java
@@ -69,7 +69,12 @@ class MidiInputPortAndroid {
mPort.connect(new MidiReceiver() {
@Override
public void onSend(byte[] bs, int offset, int count, long timestamp) {
- nativeOnData(mNativeReceiverPointer, bs, offset, count, timestamp);
+ synchronized (MidiInputPortAndroid.this) {
+ if (mPort == null) {
+ return;
+ }
+ nativeOnData(mNativeReceiverPointer, bs, offset, count, timestamp);
+ }
}
});
return true;
@@ -79,7 +84,7 @@ class MidiInputPortAndroid {
* Closes the port.
*/
@CalledByNative
- void close() {
+ synchronized void close() {
if (mPort == null) {
return;
}
diff --git a/chromium/media/midi/java/src/org/chromium/midi/MidiManagerAndroid.java b/chromium/media/midi/java/src/org/chromium/midi/MidiManagerAndroid.java
index bd073e0e8f3..e0a4cbbdaf1 100644
--- a/chromium/media/midi/java/src/org/chromium/midi/MidiManagerAndroid.java
+++ b/chromium/media/midi/java/src/org/chromium/midi/MidiManagerAndroid.java
@@ -55,6 +55,14 @@ class MidiManagerAndroid {
private final long mNativeManagerPointer;
/**
+ * True is this object is stopped.
+ * This is needed because MidiManagerAndroid functions are called from the IO thread but
+ * callbacks are called on the UI thread (because the IO thread doesn't have a Looper). We need
+ * to protect each native function call with a synchronized block that also checks this flag.
+ */
+ private boolean mStopped;
+
+ /**
* Checks if Android MIDI is supported on the device.
*/
@CalledByNative
@@ -94,7 +102,12 @@ class MidiManagerAndroid {
mHandler.post(new Runnable() {
@Override
public void run() {
- nativeOnInitializationFailed(mNativeManagerPointer);
+ synchronized (MidiManagerAndroid.this) {
+ if (mStopped) {
+ return;
+ }
+ nativeOnInitializationFailed(mNativeManagerPointer);
+ }
}
});
return;
@@ -119,15 +132,28 @@ class MidiManagerAndroid {
mHandler.post(new Runnable() {
@Override
public void run() {
- if (mPendingDevices.isEmpty() && !mIsInitialized) {
- nativeOnInitialized(
- mNativeManagerPointer, mDevices.toArray(new MidiDeviceAndroid[0]));
- mIsInitialized = true;
+ synchronized (MidiManagerAndroid.this) {
+ if (mStopped) {
+ return;
+ }
+ if (mPendingDevices.isEmpty() && !mIsInitialized) {
+ nativeOnInitialized(
+ mNativeManagerPointer, mDevices.toArray(new MidiDeviceAndroid[0]));
+ mIsInitialized = true;
+ }
}
}
});
}
+ /**
+ * Marks this object as stopped.
+ */
+ @CalledByNative
+ synchronized void stop() {
+ mStopped = true;
+ }
+
private void openDevice(final MidiDeviceInfo info) {
mManager.openDevice(info, new MidiManager.OnDeviceOpenedListener() {
@Override
@@ -152,7 +178,10 @@ class MidiManagerAndroid {
* Called when a midi device is detached.
* @param info the detached device information.
*/
- private void onDeviceRemoved(MidiDeviceInfo info) {
+ private synchronized void onDeviceRemoved(MidiDeviceInfo info) {
+ if (mStopped) {
+ return;
+ }
for (MidiDeviceAndroid device : mDevices) {
if (device.isOpen() && device.getInfo().getId() == info.getId()) {
device.close();
@@ -161,7 +190,10 @@ class MidiManagerAndroid {
}
}
- private void onDeviceOpened(MidiDevice device, MidiDeviceInfo info) {
+ private synchronized void onDeviceOpened(MidiDevice device, MidiDeviceInfo info) {
+ if (mStopped) {
+ return;
+ }
mPendingDevices.remove(info);
if (device != null) {
MidiDeviceAndroid xdevice = new MidiDeviceAndroid(device);
diff --git a/chromium/media/midi/message_util_unittest.cc b/chromium/media/midi/message_util_unittest.cc
index 91450ff43af..b1434363659 100644
--- a/chromium/media/midi/message_util_unittest.cc
+++ b/chromium/media/midi/message_util_unittest.cc
@@ -6,7 +6,7 @@
#include <stdint.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace midi {
@@ -51,10 +51,11 @@ void PushToVector(const T (&data)[N], std::vector<T>* buffer) {
TEST(MidiMessageUtilTest, GetMessageLength) {
// Check basic functionarity
- EXPECT_EQ(arraysize(kNoteOn), GetMessageLength(kNoteOn[0]));
- EXPECT_EQ(arraysize(kChannelPressure), GetMessageLength(kChannelPressure[0]));
- EXPECT_EQ(arraysize(kTimingClock), GetMessageLength(kTimingClock[0]));
- EXPECT_EQ(arraysize(kSystemCommonMessageTuneRequest),
+ EXPECT_EQ(base::size(kNoteOn), GetMessageLength(kNoteOn[0]));
+ EXPECT_EQ(base::size(kChannelPressure),
+ GetMessageLength(kChannelPressure[0]));
+ EXPECT_EQ(base::size(kTimingClock), GetMessageLength(kTimingClock[0]));
+ EXPECT_EQ(base::size(kSystemCommonMessageTuneRequest),
GetMessageLength(kSystemCommonMessageTuneRequest[0]));
// SysEx message should be mapped to 0-length
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index 6de6c7763db..00299281200 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -64,6 +64,8 @@ MidiManager::MidiManager(MidiService* service) : service_(service) {
MidiManager::~MidiManager() {
base::AutoLock auto_lock(lock_);
+ DCHECK(pending_clients_.empty() && clients_.empty());
+
if (session_thread_runner_) {
DCHECK(session_thread_runner_->BelongsToCurrentThread());
session_thread_runner_ = nullptr;
@@ -79,12 +81,6 @@ MidiManager::~MidiManager() {
: (data_received_ ? SendReceiveUsage::RECEIVED
: SendReceiveUsage::NO_USE),
static_cast<Sample>(SendReceiveUsage::MAX) + 1);
-
- // Detach all clients so that they do not call MidiManager methods any more.
- for (auto* client : pending_clients_)
- client->Detach();
- for (auto* client : clients_)
- client->Detach();
}
#if !defined(OS_MACOSX) && !defined(OS_WIN) && \
@@ -182,6 +178,16 @@ void MidiManager::DispatchSendMidiData(MidiManagerClient* client,
NOTREACHED();
}
+void MidiManager::EndAllSessions() {
+ base::AutoLock lock(lock_);
+ for (auto* client : pending_clients_)
+ client->Detach();
+ for (auto* client : clients_)
+ client->Detach();
+ pending_clients_.clear();
+ clients_.clear();
+}
+
void MidiManager::StartInitialization() {
CompleteInitialization(Result::NOT_SUPPORTED);
}
@@ -286,6 +292,7 @@ size_t MidiManager::GetClientCountForTesting() {
}
size_t MidiManager::GetPendingClientCountForTesting() {
+ base::AutoLock auto_lock(lock_);
return pending_clients_.size();
}
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index fab53ef0690..facb5f3b2d0 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -118,6 +118,10 @@ class MIDI_EXPORT MidiManager {
const std::vector<uint8_t>& data,
base::TimeTicks timestamp);
+ // This method ends all sessions by detaching and removing all registered
+ // clients. This method can be called from any thread.
+ void EndAllSessions();
+
protected:
friend class MidiManagerUsb;
@@ -179,7 +183,7 @@ class MIDI_EXPORT MidiManager {
mojom::Result result_ = mojom::Result::NOT_INITIALIZED;
// Keeps track of all clients who are waiting for CompleteStartSession().
- std::set<MidiManagerClient*> pending_clients_;
+ std::set<MidiManagerClient*> pending_clients_ GUARDED_BY(lock_);
// Keeps track of all clients who wish to receive MIDI data.
std::set<MidiManagerClient*> clients_ GUARDED_BY(lock_);
diff --git a/chromium/media/midi/midi_manager_alsa.cc b/chromium/media/midi/midi_manager_alsa.cc
index 85dc3142277..b13df63d200 100644
--- a/chromium/media/midi/midi_manager_alsa.cc
+++ b/chromium/media/midi/midi_manager_alsa.cc
@@ -16,10 +16,10 @@
#include "base/bind.h"
#include "base/json/json_string_value_serializer.h"
#include "base/logging.h"
-#include "base/macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/posix/safe_strerror.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
@@ -847,7 +847,7 @@ void MidiManagerAlsa::EventLoop() {
pfd[1].fd = device::udev_monitor_get_fd(udev_monitor_.get());
pfd[1].events = POLLIN;
- int err = HANDLE_EINTR(poll(pfd, arraysize(pfd), -1));
+ int err = HANDLE_EINTR(poll(pfd, base::size(pfd), -1));
if (err < 0) {
VLOG(1) << "poll fails: " << base::safe_strerror(errno);
loop_again = false;
diff --git a/chromium/media/midi/midi_manager_alsa.h b/chromium/media/midi/midi_manager_alsa.h
index 92fb8309096..e293f24eb8f 100644
--- a/chromium/media/midi/midi_manager_alsa.h
+++ b/chromium/media/midi/midi_manager_alsa.h
@@ -10,10 +10,10 @@
#include <map>
#include <memory>
+#include <unordered_map>
#include <utility>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
@@ -362,8 +362,8 @@ class MIDI_EXPORT MidiManagerAlsa final : public MidiManager {
};
};
- using SourceMap = base::hash_map<int, uint32_t>;
- using OutPortMap = base::hash_map<uint32_t, int>;
+ using SourceMap = std::unordered_map<int, uint32_t>;
+ using OutPortMap = std::unordered_map<uint32_t, int>;
using ScopedSndSeqPtr = std::unique_ptr<snd_seq_t, SndSeqDeleter>;
using ScopedSndMidiEventPtr =
std::unique_ptr<snd_midi_event_t, SndMidiEventDeleter>;
diff --git a/chromium/media/midi/midi_manager_android.cc b/chromium/media/midi/midi_manager_android.cc
index c49e7755b52..dee877e07cc 100644
--- a/chromium/media/midi/midi_manager_android.cc
+++ b/chromium/media/midi/midi_manager_android.cc
@@ -50,6 +50,8 @@ MidiManagerAndroid::MidiManagerAndroid(MidiService* service)
: MidiManager(service) {}
MidiManagerAndroid::~MidiManagerAndroid() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MidiManagerAndroid_stop(env, raw_manager_);
bool result = service()->task_service()->UnbindInstance();
CHECK(result);
}
diff --git a/chromium/media/midi/midi_manager_android.h b/chromium/media/midi/midi_manager_android.h
index f63fde6fdfc..7fd29e2ffe8 100644
--- a/chromium/media/midi/midi_manager_android.h
+++ b/chromium/media/midi/midi_manager_android.h
@@ -10,10 +10,10 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
#include <vector>
#include "base/android/scoped_java_ref.h"
-#include "base/containers/hash_tables.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
#include "media/midi/midi_input_port_android.h"
@@ -79,11 +79,11 @@ class MidiManagerAndroid final : public MidiManager,
std::vector<MidiInputPortAndroid*> all_input_ports_;
// A dictionary from a port to its index.
// input_port_to_index_[all_input_ports_[i]] == i for each valid |i|.
- base::hash_map<MidiInputPortAndroid*, size_t> input_port_to_index_;
+ std::unordered_map<MidiInputPortAndroid*, size_t> input_port_to_index_;
// Ditto for output ports.
std::vector<MidiOutputPortAndroid*> all_output_ports_;
- base::hash_map<MidiOutputPortAndroid*, size_t> output_port_to_index_;
+ std::unordered_map<MidiOutputPortAndroid*, size_t> output_port_to_index_;
base::android::ScopedJavaGlobalRef<jobject> raw_manager_;
};
diff --git a/chromium/media/midi/midi_manager_usb.h b/chromium/media/midi/midi_manager_usb.h
index 90aa2d54473..49e6b17baf5 100644
--- a/chromium/media/midi/midi_manager_usb.h
+++ b/chromium/media/midi/midi_manager_usb.h
@@ -9,11 +9,12 @@
#include <stdint.h>
#include <memory>
+#include <unordered_map>
#include <utility>
#include <vector>
#include "base/compiler_specific.h"
-#include "base/containers/hash_tables.h"
+#include "base/hash.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
@@ -87,7 +88,10 @@ class USB_MIDI_EXPORT MidiManagerUsb : public MidiManager,
std::unique_ptr<UsbMidiInputStream> input_stream_;
// A map from <endpoint_number, cable_number> to the index of input jacks.
- base::hash_map<std::pair<int, int>, size_t> input_jack_dictionary_;
+ std::unordered_map<std::pair<int, int>,
+ size_t,
+ base::IntPairHash<std::pair<int, int>>>
+ input_jack_dictionary_;
DISALLOW_COPY_AND_ASSIGN(MidiManagerUsb);
};
diff --git a/chromium/media/midi/midi_manager_usb_unittest.cc b/chromium/media/midi/midi_manager_usb_unittest.cc
index f7fe4a6ea54..7161757bf97 100644
--- a/chromium/media/midi/midi_manager_usb_unittest.cc
+++ b/chromium/media/midi/midi_manager_usb_unittest.cc
@@ -10,9 +10,9 @@
#include <string>
#include <utility>
-#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "media/midi/midi_service.h"
@@ -553,7 +553,7 @@ TEST_F(MidiManagerUsbTest, Receive) {
RunCallbackUntilCallbackInvoked(true, &devices);
EXPECT_EQ(Result::OK, GetInitializationResult());
- manager()->ReceiveUsbMidiData(device_raw, 2, data, arraysize(data),
+ manager()->ReceiveUsbMidiData(device_raw, 2, data, base::size(data),
base::TimeTicks());
Finalize();
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
index e21a853b167..902399f011b 100644
--- a/chromium/media/midi/midi_manager_win.cc
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -18,6 +18,7 @@
#include "base/callback.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
+#include "base/stl_util.h"
#include "base/strings/string16.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
@@ -643,7 +644,7 @@ MidiManagerWin::PortManager::HandleMidiInCallback(HMIDIIN hmi,
static_cast<uint8_t>((param1 >> 16) & 0xff);
const uint8_t kData[] = {status_byte, first_data_byte, second_data_byte};
const size_t len = GetMessageLength(status_byte);
- DCHECK_LE(len, arraysize(kData));
+ DCHECK_LE(len, base::size(kData));
std::vector<uint8_t> data;
data.assign(kData, kData + len);
manager->PostReplyTask(base::BindOnce(
diff --git a/chromium/media/midi/midi_service.cc b/chromium/media/midi/midi_service.cc
index a602337cc2a..e624e82c54b 100644
--- a/chromium/media/midi/midi_service.cc
+++ b/chromium/media/midi/midi_service.cc
@@ -42,6 +42,7 @@ MidiService::~MidiService() {
void MidiService::Shutdown() {
base::AutoLock lock(lock_);
if (manager_) {
+ manager_->EndAllSessions();
DCHECK(manager_destructor_runner_);
manager_destructor_runner_->DeleteSoon(FROM_HERE, std::move(manager_));
manager_destructor_runner_ = nullptr;
diff --git a/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
index ac66be160f1..1f24a87b88c 100644
--- a/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
+++ b/chromium/media/midi/usb_midi_descriptor_parser_unittest.cc
@@ -6,7 +6,7 @@
#include <stdint.h>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace midi {
@@ -24,7 +24,7 @@ TEST(UsbMidiDescriptorParserTest, InvalidSize) {
UsbMidiDescriptorParser parser;
std::vector<UsbMidiJack> jacks;
uint8_t data[] = {0x04};
- EXPECT_FALSE(parser.Parse(nullptr, data, arraysize(data), &jacks));
+ EXPECT_FALSE(parser.Parse(nullptr, data, base::size(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -37,7 +37,7 @@ TEST(UsbMidiDescriptorParserTest, NonExistingJackIsAssociated) {
0x09, 0x04, 0x01, 0x00, 0x02, 0x01, 0x03, 0x00, 0x00, 0x07, 0x24,
0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01, 0x01,
};
- EXPECT_FALSE(parser.Parse(nullptr, data, arraysize(data), &jacks));
+ EXPECT_FALSE(parser.Parse(nullptr, data, base::size(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -51,7 +51,7 @@ TEST(UsbMidiDescriptorParserTest,
0x09, 0x04, 0x01, 0x00, 0x02, 0x01, 0x02, 0x00, 0x00, 0x07, 0x24,
0x01, 0x00, 0x01, 0x07, 0x00, 0x05, 0x25, 0x01, 0x01, 0x01,
};
- EXPECT_TRUE(parser.Parse(nullptr, data, arraysize(data), &jacks));
+ EXPECT_TRUE(parser.Parse(nullptr, data, base::size(data), &jacks));
EXPECT_TRUE(jacks.empty());
}
@@ -73,7 +73,7 @@ TEST(UsbMidiDescriptorParserTest, Parse) {
0x03, 0x09, 0x05, 0x82, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, 0x05, 0x25,
0x01, 0x01, 0x07,
};
- EXPECT_TRUE(parser.Parse(nullptr, data, arraysize(data), &jacks));
+ EXPECT_TRUE(parser.Parse(nullptr, data, base::size(data), &jacks));
ASSERT_EQ(3u, jacks.size());
EXPECT_EQ(2u, jacks[0].jack_id);
@@ -108,7 +108,7 @@ TEST(UsbMidiDescriptorParserTest, ParseDeviceInfo) {
0x12, 0x01, 0x10, 0x01, 0x00, 0x00, 0x00, 0x08, 0x01,
0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x00, 0x0a,
};
- EXPECT_TRUE(parser.ParseDeviceInfo(data, arraysize(data), &info));
+ EXPECT_TRUE(parser.ParseDeviceInfo(data, base::size(data), &info));
EXPECT_EQ(0x2301, info.vendor_id);
EXPECT_EQ(0x6745, info.product_id);
diff --git a/chromium/media/midi/usb_midi_device_factory_android.cc b/chromium/media/midi/usb_midi_device_factory_android.cc
index 9c9eb391465..1d1e90c7d8a 100644
--- a/chromium/media/midi/usb_midi_device_factory_android.cc
+++ b/chromium/media/midi/usb_midi_device_factory_android.cc
@@ -8,7 +8,6 @@
#include <memory>
#include "base/bind.h"
-#include "base/containers/hash_tables.h"
#include "base/synchronization/lock.h"
#include "jni/UsbMidiDeviceFactoryAndroid_jni.h"
#include "media/midi/usb_midi_device_android.h"
diff --git a/chromium/media/midi/usb_midi_input_stream.h b/chromium/media/midi/usb_midi_input_stream.h
index 6d7a5f74769..830af0dc022 100644
--- a/chromium/media/midi/usb_midi_input_stream.h
+++ b/chromium/media/midi/usb_midi_input_stream.h
@@ -11,7 +11,6 @@
#include <map>
#include <vector>
-#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/time/time.h"
#include "media/midi/usb_midi_export.h"
diff --git a/chromium/media/midi/usb_midi_input_stream_unittest.cc b/chromium/media/midi/usb_midi_input_stream_unittest.cc
index a88489ba761..ff0f3b84467 100644
--- a/chromium/media/midi/usb_midi_input_stream_unittest.cc
+++ b/chromium/media/midi/usb_midi_input_stream_unittest.cc
@@ -11,7 +11,7 @@
#include <string>
#include <vector>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "media/midi/usb_midi_device.h"
@@ -96,7 +96,7 @@ TEST_F(UsbMidiInputStreamTest, UnknownMessage) {
0x40, 0xff, 0xff, 0xff, 0x41, 0xff, 0xff, 0xff,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("", delegate_.received_data());
}
@@ -105,7 +105,7 @@ TEST_F(UsbMidiInputStreamTest, SystemCommonMessage) {
0x45, 0xf8, 0x00, 0x00, 0x42, 0xf3, 0x22, 0x00, 0x43, 0xf2, 0x33, 0x44,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0xf8 \n"
"0xf3 0x22 \n"
"0xf2 0x33 0x44 \n", delegate_.received_data());
@@ -117,7 +117,7 @@ TEST_F(UsbMidiInputStreamTest, SystemExclusiveMessage) {
0x46, 0xf0, 0xf7, 0x00, 0x47, 0xf0, 0x33, 0xf7,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0xf0 0x11 0x22 \n"
"0xf7 \n"
"0xf0 0xf7 \n"
@@ -131,7 +131,7 @@ TEST_F(UsbMidiInputStreamTest, ChannelMessage) {
0x4d, 0xd0, 0xaa, 0x00, 0x4e, 0xe0, 0xbb, 0xcc,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0x80 0x11 0x22 \n"
"0x90 0x33 0x44 \n"
"0xa0 0x55 0x66 \n"
@@ -146,7 +146,7 @@ TEST_F(UsbMidiInputStreamTest, SingleByteMessage) {
0x4f, 0xf8, 0x00, 0x00,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0xf8 \n", delegate_.received_data());
}
@@ -155,14 +155,14 @@ TEST_F(UsbMidiInputStreamTest, DispatchForMultipleCables) {
0x4f, 0xf8, 0x00, 0x00, 0x5f, 0xfa, 0x00, 0x00, 0x6f, 0xfb, 0x00, 0x00,
};
- stream_->OnReceivedData(&device1_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device1_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0xf8 \n0xfa \n", delegate_.received_data());
}
TEST_F(UsbMidiInputStreamTest, DispatchForDevice2) {
uint8_t data[] = {0x4f, 0xf8, 0x00, 0x00};
- stream_->OnReceivedData(&device2_, 7, data, arraysize(data), TimeTicks());
+ stream_->OnReceivedData(&device2_, 7, data, base::size(data), TimeTicks());
EXPECT_EQ("0xf8 \n", delegate_.received_data());
}
diff --git a/chromium/media/midi/usb_midi_output_stream.cc b/chromium/media/midi/usb_midi_output_stream.cc
index 6ec1194ed14..44def01cf82 100644
--- a/chromium/media/midi/usb_midi_output_stream.cc
+++ b/chromium/media/midi/usb_midi_output_stream.cc
@@ -5,7 +5,7 @@
#include "media/midi/usb_midi_output_stream.h"
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/midi/message_util.h"
#include "media/midi/usb_midi_device.h"
@@ -83,9 +83,8 @@ bool UsbMidiOutputStream::PushSysExMessage(const std::vector<uint8_t>& data,
// We can't find the end-of-message mark in the three bytes.
*current = index;
data_to_send->push_back((jack_.cable_number << 4) | 0x4);
- data_to_send->insert(data_to_send->end(),
- message,
- message + arraysize(message));
+ data_to_send->insert(data_to_send->end(), message,
+ message + base::size(message));
is_sending_sysex_ = true;
return true;
}
@@ -102,9 +101,8 @@ bool UsbMidiOutputStream::PushSysExMessage(const std::vector<uint8_t>& data,
uint8_t code_index = static_cast<uint8_t>(message_size) + 0x4;
DCHECK(code_index == 0x5 || code_index == 0x6 || code_index == 0x7);
data_to_send->push_back((jack_.cable_number << 4) | code_index);
- data_to_send->insert(data_to_send->end(),
- message,
- message + arraysize(message));
+ data_to_send->insert(data_to_send->end(), message,
+ message + base::size(message));
*current = index + 1;
is_sending_sysex_ = false;
return true;
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder.cc b/chromium/media/mojo/clients/mojo_audio_decoder.cc
index 158b3f5038d..c10d8bcd14f 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder.cc
+++ b/chromium/media/mojo/clients/mojo_audio_decoder.cc
@@ -42,12 +42,11 @@ bool MojoAudioDecoder::IsPlatformDecoder() const {
return true;
}
-void MojoAudioDecoder::Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void MojoAudioDecoder::Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(1) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -74,6 +73,7 @@ void MojoAudioDecoder::Initialize(
init_cb_ = init_cb;
output_cb_ = output_cb;
+ waiting_cb_ = waiting_cb;
// Using base::Unretained(this) is safe because |this| owns |remote_decoder_|,
// and the callback won't be dispatched if |remote_decoder_| is destroyed.
@@ -161,6 +161,13 @@ void MojoAudioDecoder::OnBufferDecoded(mojom::AudioBufferPtr buffer) {
output_cb_.Run(buffer.To<scoped_refptr<AudioBuffer>>());
}
+void MojoAudioDecoder::OnWaiting(WaitingReason reason) {
+ DVLOG(1) << __func__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ waiting_cb_.Run(reason);
+}
+
void MojoAudioDecoder::OnConnectionError() {
DVLOG(1) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder.h b/chromium/media/mojo/clients/mojo_audio_decoder.h
index e3f5b1e1e21..2c77d69afdc 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder.h
+++ b/chromium/media/mojo/clients/mojo_audio_decoder.h
@@ -33,12 +33,11 @@ class MojoAudioDecoder : public AudioDecoder, public mojom::AudioDecoderClient {
// AudioDecoder implementation.
std::string GetDisplayName() const final;
bool IsPlatformDecoder() const final;
- void Initialize(
- const AudioDecoderConfig& config,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) final;
+ void Initialize(const AudioDecoderConfig& config,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) final;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) final;
void Reset(const base::Closure& closure) final;
@@ -46,6 +45,7 @@ class MojoAudioDecoder : public AudioDecoder, public mojom::AudioDecoderClient {
// AudioDecoderClient implementation.
void OnBufferDecoded(mojom::AudioBufferPtr buffer) final;
+ void OnWaiting(WaitingReason reason) final;
void set_writer_capacity_for_testing(uint32_t capacity) {
writer_capacity_ = capacity;
@@ -83,11 +83,11 @@ class MojoAudioDecoder : public AudioDecoder, public mojom::AudioDecoderClient {
// Binding for AudioDecoderClient, bound to the |task_runner_|.
mojo::AssociatedBinding<AudioDecoderClient> client_binding_;
- // We call the following callbacks to pass the information to the pipeline.
- // |output_cb_| is permanent while other three are called only once,
- // |decode_cb_| and |reset_cb_| are replaced by every by Decode() and Reset().
InitCB init_cb_;
OutputCB output_cb_;
+ WaitingCB waiting_cb_;
+
+ // |decode_cb_| and |reset_cb_| are replaced by every by Decode() and Reset().
DecodeCB decode_cb_;
base::Closure reset_cb_;
diff --git a/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc b/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
index 44319b12fd1..aa7b71d94fa 100644
--- a/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_audio_decoder_unittest.cc
@@ -18,6 +18,7 @@
#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/base/waiting.h"
#include "media/mojo/clients/mojo_audio_decoder.h"
#include "media/mojo/interfaces/audio_decoder.mojom.h"
#include "media/mojo/services/mojo_audio_decoder_service.h"
@@ -78,6 +79,7 @@ class MojoAudioDecoderTest : public ::testing::Test {
// Completion callbacks.
MOCK_METHOD1(OnInitialized, void(bool));
MOCK_METHOD1(OnOutput, void(const scoped_refptr<AudioBuffer>&));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnDecoded, void(DecodeStatus));
MOCK_METHOD0(OnReset, void());
@@ -109,7 +111,8 @@ class MojoAudioDecoderTest : public ::testing::Test {
mock_audio_decoder_ = mock_audio_decoder.get();
EXPECT_CALL(*mock_audio_decoder_, Initialize(_, _, _, _, _))
- .WillRepeatedly(DoAll(SaveArg<3>(&output_cb_), RunCallback<2>(true)));
+ .WillRepeatedly(DoAll(SaveArg<3>(&output_cb_), SaveArg<4>(&waiting_cb_),
+ RunCallback<2>(true)));
EXPECT_CALL(*mock_audio_decoder_, Decode(_, _))
.WillRepeatedly(
DoAll(InvokeWithoutArgs(this, &MojoAudioDecoderTest::ReturnOutput),
@@ -141,7 +144,7 @@ class MojoAudioDecoderTest : public ::testing::Test {
base::Bind(&MojoAudioDecoderTest::OnInitialized,
base::Unretained(this)),
base::Bind(&MojoAudioDecoderTest::OnOutput, base::Unretained(this)),
- base::NullCallback());
+ base::Bind(&MojoAudioDecoderTest::OnWaiting, base::Unretained(this)));
RunLoop();
}
@@ -178,6 +181,8 @@ class MojoAudioDecoderTest : public ::testing::Test {
}
}
+ void WaitForKey() { waiting_cb_.Run(WaitingReason::kNoDecryptionKey); }
+
void DecodeMultipleTimes(int num_of_decodes) {
num_of_decodes_ = num_of_decodes;
KeepDecodingOrQuit();
@@ -218,6 +223,7 @@ class MojoAudioDecoderTest : public ::testing::Test {
std::unique_ptr<MojoAudioDecoder> mojo_audio_decoder_;
MojoCdmServiceContext mojo_cdm_service_context_;
AudioDecoder::OutputCB output_cb_;
+ WaitingCB waiting_cb_;
AudioTimestampHelper input_timestamp_helper_;
// The thread where the service runs. This provides test coverage in an
@@ -275,6 +281,19 @@ TEST_F(MojoAudioDecoderTest, Reset_DuringDecode_ChunkedWrite) {
DecodeAndReset();
}
+TEST_F(MojoAudioDecoderTest, WaitingForKey) {
+ Initialize();
+ EXPECT_CALL(*mock_audio_decoder_, Decode(_, _))
+ .WillOnce(
+ DoAll(InvokeWithoutArgs(this, &MojoAudioDecoderTest::WaitForKey),
+ RunCallback<1>(DecodeStatus::OK)));
+ EXPECT_CALL(*this, OnWaiting(WaitingReason::kNoDecryptionKey)).Times(1);
+ EXPECT_CALL(*this, OnDecoded(DecodeStatus::OK))
+ .WillOnce(InvokeWithoutArgs(this, &MojoAudioDecoderTest::QuitLoop));
+ Decode();
+ RunLoop();
+}
+
// TODO(xhwang): Add more tests.
} // namespace media
diff --git a/chromium/media/mojo/clients/mojo_cdm.cc b/chromium/media/mojo/clients/mojo_cdm.cc
index 8654081d5c1..cbe9ed3bdec 100644
--- a/chromium/media/mojo/clients/mojo_cdm.cc
+++ b/chromium/media/mojo/clients/mojo_cdm.cc
@@ -352,7 +352,7 @@ void MojoCdm::OnSessionClosed(const std::string& session_id) {
void MojoCdm::OnSessionKeysChange(
const std::string& session_id,
bool has_additional_usable_key,
- std::vector<mojom::CdmKeyInformationPtr> keys_info) {
+ std::vector<std::unique_ptr<CdmKeyInformation>> keys_info) {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
@@ -368,13 +368,8 @@ void MojoCdm::OnSessionKeysChange(
}
}
- CdmKeysInfo key_data;
- key_data.reserve(keys_info.size());
- for (size_t i = 0; i < keys_info.size(); ++i) {
- key_data.push_back(keys_info[i].To<std::unique_ptr<CdmKeyInformation>>());
- }
session_keys_change_cb_.Run(session_id, has_additional_usable_key,
- std::move(key_data));
+ std::move(keys_info));
}
void MojoCdm::OnSessionExpirationUpdate(const std::string& session_id,
diff --git a/chromium/media/mojo/clients/mojo_cdm.h b/chromium/media/mojo/clients/mojo_cdm.h
index a5fd3cf8c42..2bd16b2a5cb 100644
--- a/chromium/media/mojo/clients/mojo_cdm.h
+++ b/chromium/media/mojo/clients/mojo_cdm.h
@@ -114,7 +114,7 @@ class MojoCdm : public ContentDecryptionModule,
void OnSessionKeysChange(
const std::string& session_id,
bool has_additional_usable_key,
- std::vector<mojom::CdmKeyInformationPtr> keys_info) final;
+ std::vector<std::unique_ptr<CdmKeyInformation>> keys_info) final;
void OnSessionExpirationUpdate(const std::string& session_id,
double new_expiry_time_sec) final;
diff --git a/chromium/media/mojo/clients/mojo_cdm_factory.cc b/chromium/media/mojo/clients/mojo_cdm_factory.cc
index 4250576b071..d1103f5ac3d 100644
--- a/chromium/media/mojo/clients/mojo_cdm_factory.cc
+++ b/chromium/media/mojo/clients/mojo_cdm_factory.cc
@@ -44,12 +44,11 @@ void MojoCdmFactory::Create(
return;
}
-// When MojoRenderer is used, the real Renderer is running in a remote process,
-// which cannot use an AesDecryptor running locally. In this case, always
-// create the MojoCdm, giving the remote CDM a chance to handle |key_system|.
-// Note: We should not run AesDecryptor in the browser process except for
-// testing. See http://crbug.com/441957
-#if !BUILDFLAG(ENABLE_MOJO_RENDERER)
+ // If AesDecryptor can be used, always use it here in the local process.
+ // Note: We should not run AesDecryptor in the browser process except for
+ // testing. See http://crbug.com/441957.
+ // Note: Previously MojoRenderer doesn't work with local CDMs, this has
+ // been solved by using DecryptingRenderer. See http://crbug.com/913775.
if (CanUseAesDecryptor(key_system)) {
scoped_refptr<ContentDecryptionModule> cdm(
new AesDecryptor(session_message_cb, session_closed_cb,
@@ -58,7 +57,6 @@ void MojoCdmFactory::Create(
FROM_HERE, base::BindOnce(cdm_created_cb, cdm, ""));
return;
}
-#endif
mojom::ContentDecryptionModulePtr cdm_ptr;
interface_factory_->CreateCdm(key_system, mojo::MakeRequest(&cdm_ptr));
diff --git a/chromium/media/mojo/clients/mojo_cdm_unittest.cc b/chromium/media/mojo/clients/mojo_cdm_unittest.cc
index 36e7a20376f..72348ac5438 100644
--- a/chromium/media/mojo/clients/mojo_cdm_unittest.cc
+++ b/chromium/media/mojo/clients/mojo_cdm_unittest.cc
@@ -7,9 +7,9 @@
#include <memory>
#include "base/bind.h"
-#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/test/test_message_loop.h"
#include "base/time/time.h"
#include "media/base/cdm_config.h"
@@ -163,7 +163,7 @@ class MojoCdmTest : public ::testing::Test {
// order to verify that the data is passed properly.
const CdmSessionType session_type = CdmSessionType::kTemporary;
const EmeInitDataType data_type = EmeInitDataType::WEBM;
- const std::vector<uint8_t> key_id(kKeyId, kKeyId + arraysize(kKeyId));
+ const std::vector<uint8_t> key_id(kKeyId, kKeyId + base::size(kKeyId));
std::string created_session_id;
if (expected_result == CONNECTION_ERROR_BEFORE) {
diff --git a/chromium/media/mojo/clients/mojo_renderer.cc b/chromium/media/mojo/clients/mojo_renderer.cc
index ef9b82c3448..08a4ee0a307 100644
--- a/chromium/media/mojo/clients/mojo_renderer.cc
+++ b/chromium/media/mojo/clients/mojo_renderer.cc
@@ -244,9 +244,14 @@ void MojoRenderer::OnEnded() {
void MojoRenderer::InitiateScopedSurfaceRequest(
const ReceiveSurfaceRequestTokenCB& receive_request_token_cb) {
+ DCHECK(remote_renderer_.is_bound());
DVLOG(1) << __func__;
- remote_renderer_->InitiateScopedSurfaceRequest(receive_request_token_cb);
+ if (encountered_error_) {
+ receive_request_token_cb.Run(base::UnguessableToken::Null());
+ } else {
+ remote_renderer_->InitiateScopedSurfaceRequest(receive_request_token_cb);
+ }
}
void MojoRenderer::OnError() {
@@ -277,6 +282,11 @@ void MojoRenderer::OnDurationChange(base::TimeDelta duration) {
client_->OnDurationChange(duration);
}
+void MojoRenderer::OnRemotePlayStateChange(media::MediaStatus::State state) {
+ DVLOG(2) << __func__ << ": state [" << static_cast<int>(state) << "]";
+ client_->OnRemotePlayStateChange(state);
+}
+
void MojoRenderer::OnVideoOpacityChange(bool opaque) {
DVLOG(2) << __func__ << ": " << opaque;
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -305,10 +315,10 @@ void MojoRenderer::OnStatisticsUpdate(const PipelineStatistics& stats) {
client_->OnStatisticsUpdate(stats);
}
-void MojoRenderer::OnWaitingForDecryptionKey() {
+void MojoRenderer::OnWaiting(WaitingReason reason) {
DVLOG(1) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void MojoRenderer::OnConnectionError() {
diff --git a/chromium/media/mojo/clients/mojo_renderer.h b/chromium/media/mojo/clients/mojo_renderer.h
index 90a950c814c..4232b229239 100644
--- a/chromium/media/mojo/clients/mojo_renderer.h
+++ b/chromium/media/mojo/clients/mojo_renderer.h
@@ -82,9 +82,10 @@ class MojoRenderer : public Renderer, public mojom::RendererClient {
void OnVideoConfigChange(const VideoDecoderConfig& config) override;
void OnVideoNaturalSizeChange(const gfx::Size& size) override;
void OnVideoOpacityChange(bool opaque) override;
- void OnWaitingForDecryptionKey() override;
+ void OnWaiting(WaitingReason reason) override;
void OnStatisticsUpdate(const PipelineStatistics& stats) override;
void OnDurationChange(base::TimeDelta duration) override;
+ void OnRemotePlayStateChange(media::MediaStatus::State state) override;
// Binds |remote_renderer_| to the mojo message pipe. Can be called multiple
// times. If an error occurs during connection, OnConnectionError will be
diff --git a/chromium/media/mojo/clients/mojo_renderer_factory.cc b/chromium/media/mojo/clients/mojo_renderer_factory.cc
index 55be954298f..8107f206949 100644
--- a/chromium/media/mojo/clients/mojo_renderer_factory.cc
+++ b/chromium/media/mojo/clients/mojo_renderer_factory.cc
@@ -8,6 +8,7 @@
#include "base/single_thread_task_runner.h"
#include "media/mojo/clients/mojo_renderer.h"
+#include "media/renderers/decrypting_renderer.h"
#include "media/renderers/video_overlay_factory.h"
#include "mojo/public/cpp/bindings/interface_request.h"
#include "services/service_manager/public/cpp/connect.h"
@@ -48,9 +49,12 @@ std::unique_ptr<Renderer> MojoRendererFactory::CreateRenderer(
std::make_unique<VideoOverlayFactory>(get_gpu_factories_cb_.Run());
}
- return std::unique_ptr<Renderer>(
- new MojoRenderer(media_task_runner, std::move(overlay_factory),
- video_renderer_sink, GetRendererPtr()));
+ // MediaPlayerRendererClientFactory depends on |this| always returning a MR,
+ // since it uses a static_cast to use some MojoRenderer specific interfaces.
+ // Therefore, |this| should never return anything else than a MojoRenderer.
+ return std::make_unique<MojoRenderer>(media_task_runner,
+ std::move(overlay_factory),
+ video_renderer_sink, GetRendererPtr());
}
mojom::RendererPtr MojoRendererFactory::GetRendererPtr() {
diff --git a/chromium/media/mojo/clients/mojo_renderer_factory.h b/chromium/media/mojo/clients/mojo_renderer_factory.h
index 5096df53eec..91d39769e20 100644
--- a/chromium/media/mojo/clients/mojo_renderer_factory.h
+++ b/chromium/media/mojo/clients/mojo_renderer_factory.h
@@ -21,6 +21,18 @@ namespace media {
class GpuVideoAcceleratorFactories;
// The default factory class for creating MojoRenderer.
+//
+// The MojoRenderer should be thought of as a pure communication layer between
+// media::Pipeline and a media::Renderer in a different process.
+//
+// Implementors of new media::Renderer types are encouraged to create small
+// wrapper factories that use MRF, rather than creating derived MojoRenderer
+// types, or extending MRF. See DecryptingRendererFactory and
+// MediaPlayerRendererClientFactory for examples of small wrappers around MRF.
+//
+// NOTE: MediaPlayerRendererClientFactory uses MojoRenderer specific methods,
+// and uses a static_cast<MojoRenderer*> internally. |this| should
+// never return anything but a MojoRenderer. See crbug.com/919494.
class MojoRendererFactory : public RendererFactory {
public:
using GetGpuFactoriesCB = base::Callback<GpuVideoAcceleratorFactories*()>;
diff --git a/chromium/media/mojo/clients/mojo_video_decoder.cc b/chromium/media/mojo/clients/mojo_video_decoder.cc
index 55f78de5cb6..d9021e22baa 100644
--- a/chromium/media/mojo/clients/mojo_video_decoder.cc
+++ b/chromium/media/mojo/clients/mojo_video_decoder.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
+#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
@@ -29,6 +30,37 @@
#include "mojo/public/cpp/bindings/interface_request.h"
namespace media {
+namespace {
+
+void ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB(
+ const VideoDecoder::InitCB& init_cb,
+ bool success) {
+ // Send the same histogram as GpuVideoDecoder to avoid breaking the existing
+ // tests.
+ // TODO(crbug.com/902968): Remove it after deprecating GpuVideoDecoder.
+ PipelineStatus status = success ? PIPELINE_OK : DECODER_ERROR_NOT_SUPPORTED;
+ UMA_HISTOGRAM_ENUMERATION("Media.GpuVideoDecoderInitializeStatus", status,
+ PIPELINE_STATUS_MAX + 1);
+
+ init_cb.Run(success);
+}
+
+void ReportMojoVideoDecoderErrorStatusToUMAAndRunCB(
+ const VideoDecoder::DecodeCB& decode_cb,
+ DecodeStatus status) {
+ // Send the same histogram as GpuVideoDecoder to avoid breaking the existing
+ // tests.
+ // TODO(crbug.com/902968): Remove it after deprecating GpuVideoDecoder.
+ if (status == DecodeStatus::DECODE_ERROR) {
+ UMA_HISTOGRAM_ENUMERATION("Media.GpuVideoDecoderError",
+ media::VideoDecodeAccelerator::PLATFORM_FAILURE,
+ media::VideoDecodeAccelerator::ERROR_MAX + 1);
+ }
+
+ decode_cb.Run(status);
+}
+
+} // namespace
// Provides a thread-safe channel for VideoFrame destruction events.
class MojoVideoFrameHandleReleaser
@@ -111,24 +143,23 @@ std::string MojoVideoDecoder::GetDisplayName() const {
return "MojoVideoDecoder";
}
-void MojoVideoDecoder::Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& /* waiting_for_decryption_key_cb */) {
+void MojoVideoDecoder::Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) {
DVLOG(1) << __func__;
DCHECK(task_runner_->BelongsToCurrentThread());
+ InitCB bound_init_cb =
+ base::Bind(&ReportMojoVideoDecoderInitializeStatusToUMAAndRunCB, init_cb);
+
// Fail immediately if we know that the remote side cannot support |config|.
if (gpu_factories_ && !gpu_factories_->IsDecoderConfigSupported(config)) {
- // TODO(liberato): Remove bypass once D3D11VideoDecoder provides
- // SupportedVideoDecoderConfigs.
- if (!base::FeatureList::IsEnabled(kD3D11VideoDecoder)) {
- task_runner_->PostTask(FROM_HERE, base::BindRepeating(init_cb, false));
- return;
- }
+ task_runner_->PostTask(FROM_HERE,
+ base::BindRepeating(bound_init_cb, false));
+ return;
}
int cdm_id =
@@ -141,7 +172,7 @@ void MojoVideoDecoder::Initialize(
// is passed for reinitialization.
if (config.is_encrypted() && CdmContext::kInvalidCdmId == cdm_id) {
DVLOG(1) << __func__ << ": Invalid CdmContext.";
- task_runner_->PostTask(FROM_HERE, base::BindOnce(init_cb, false));
+ task_runner_->PostTask(FROM_HERE, base::BindOnce(bound_init_cb, false));
return;
}
@@ -149,13 +180,16 @@ void MojoVideoDecoder::Initialize(
BindRemoteDecoder();
if (has_connection_error_) {
- task_runner_->PostTask(FROM_HERE, base::BindRepeating(init_cb, false));
+ task_runner_->PostTask(FROM_HERE,
+ base::BindRepeating(bound_init_cb, false));
return;
}
initialized_ = false;
- init_cb_ = init_cb;
+ init_cb_ = bound_init_cb;
output_cb_ = output_cb;
+ waiting_cb_ = waiting_cb;
+
remote_decoder_->Initialize(
config, low_delay, cdm_id,
base::Bind(&MojoVideoDecoder::OnInitializeDone, base::Unretained(this)));
@@ -177,22 +211,25 @@ void MojoVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DVLOG(3) << __func__ << ": " << buffer->AsHumanReadableString();
DCHECK(task_runner_->BelongsToCurrentThread());
+ DecodeCB bound_decode_cb =
+ base::Bind(&ReportMojoVideoDecoderErrorStatusToUMAAndRunCB, decode_cb);
+
if (has_connection_error_) {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(decode_cb, DecodeStatus::DECODE_ERROR));
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(bound_decode_cb, DecodeStatus::DECODE_ERROR));
return;
}
mojom::DecoderBufferPtr mojo_buffer =
mojo_decoder_buffer_writer_->WriteDecoderBuffer(std::move(buffer));
if (!mojo_buffer) {
- task_runner_->PostTask(FROM_HERE,
- base::Bind(decode_cb, DecodeStatus::DECODE_ERROR));
+ task_runner_->PostTask(
+ FROM_HERE, base::Bind(bound_decode_cb, DecodeStatus::DECODE_ERROR));
return;
}
uint64_t decode_id = decode_counter_++;
- pending_decodes_[decode_id] = decode_cb;
+ pending_decodes_[decode_id] = bound_decode_cb;
remote_decoder_->Decode(std::move(mojo_buffer),
base::Bind(&MojoVideoDecoder::OnDecodeDone,
base::Unretained(this), decode_id));
@@ -322,9 +359,18 @@ void MojoVideoDecoder::BindRemoteDecoder() {
std::move(command_buffer_id), target_color_space_);
}
+void MojoVideoDecoder::OnWaiting(WaitingReason reason) {
+ DVLOG(2) << __func__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
+ waiting_cb_.Run(reason);
+}
+
void MojoVideoDecoder::RequestOverlayInfo(bool restart_for_transitions) {
DVLOG(2) << __func__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(request_overlay_info_cb_);
+
overlay_info_requested_ = true;
request_overlay_info_cb_.Run(
restart_for_transitions,
@@ -334,6 +380,8 @@ void MojoVideoDecoder::RequestOverlayInfo(bool restart_for_transitions) {
void MojoVideoDecoder::OnOverlayInfoChanged(const OverlayInfo& overlay_info) {
DVLOG(2) << __func__;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+
if (has_connection_error_)
return;
remote_decoder_->OnOverlayInfoChanged(overlay_info);
diff --git a/chromium/media/mojo/clients/mojo_video_decoder.h b/chromium/media/mojo/clients/mojo_video_decoder.h
index aa6cea8ff35..a3db308a875 100644
--- a/chromium/media/mojo/clients/mojo_video_decoder.h
+++ b/chromium/media/mojo/clients/mojo_video_decoder.h
@@ -45,13 +45,12 @@ class MojoVideoDecoder final : public VideoDecoder,
// VideoDecoder implementation.
std::string GetDisplayName() const final;
bool IsPlatformDecoder() const final;
- void Initialize(
- const VideoDecoderConfig& config,
- bool low_delay,
- CdmContext* cdm_context,
- const InitCB& init_cb,
- const OutputCB& output_cb,
- const WaitingForDecryptionKeyCB& waiting_for_decryption_key_cb) final;
+ void Initialize(const VideoDecoderConfig& config,
+ bool low_delay,
+ CdmContext* cdm_context,
+ const InitCB& init_cb,
+ const OutputCB& output_cb,
+ const WaitingCB& waiting_cb) final;
void Decode(scoped_refptr<DecoderBuffer> buffer,
const DecodeCB& decode_cb) final;
void Reset(const base::Closure& closure) final;
@@ -64,6 +63,7 @@ class MojoVideoDecoder final : public VideoDecoder,
const scoped_refptr<VideoFrame>& frame,
bool can_read_without_stalling,
const base::Optional<base::UnguessableToken>& release_token) final;
+ void OnWaiting(WaitingReason reason) final;
void RequestOverlayInfo(bool restart_for_transitions) final;
void set_writer_capacity_for_testing(uint32_t capacity) {
@@ -99,6 +99,7 @@ class MojoVideoDecoder final : public VideoDecoder,
InitCB init_cb_;
OutputCB output_cb_;
+ WaitingCB waiting_cb_;
uint64_t decode_counter_ = 0;
std::map<uint64_t, DecodeCB> pending_decodes_;
base::Closure reset_cb_;
diff --git a/chromium/media/mojo/common/media_type_converters.cc b/chromium/media/mojo/common/media_type_converters.cc
index 3841de42cbf..ea1e77d9106 100644
--- a/chromium/media/mojo/common/media_type_converters.cc
+++ b/chromium/media/mojo/common/media_type_converters.cc
@@ -11,7 +11,6 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "media/base/audio_buffer.h"
-#include "media/base/cdm_key_information.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/subsample_entry.h"
@@ -119,27 +118,6 @@ TypeConverter<scoped_refptr<media::DecoderBuffer>,
}
// static
-media::mojom::CdmKeyInformationPtr TypeConverter<
- media::mojom::CdmKeyInformationPtr,
- media::CdmKeyInformation>::Convert(const media::CdmKeyInformation& input) {
- media::mojom::CdmKeyInformationPtr info(
- media::mojom::CdmKeyInformation::New());
- info->key_id = input.key_id;
- info->status = input.status;
- info->system_code = input.system_code;
- return info;
-}
-
-// static
-std::unique_ptr<media::CdmKeyInformation>
-TypeConverter<std::unique_ptr<media::CdmKeyInformation>,
- media::mojom::CdmKeyInformationPtr>::
- Convert(const media::mojom::CdmKeyInformationPtr& input) {
- return std::make_unique<media::CdmKeyInformation>(
- input->key_id, input->status, input->system_code);
-}
-
-// static
media::mojom::AudioBufferPtr
TypeConverter<media::mojom::AudioBufferPtr, scoped_refptr<media::AudioBuffer>>::
Convert(const scoped_refptr<media::AudioBuffer>& input) {
diff --git a/chromium/media/mojo/common/media_type_converters.h b/chromium/media/mojo/common/media_type_converters.h
index 3a8d219c0b0..95bd75d972a 100644
--- a/chromium/media/mojo/common/media_type_converters.h
+++ b/chromium/media/mojo/common/media_type_converters.h
@@ -16,7 +16,6 @@ namespace media {
class AudioBuffer;
class DecoderBuffer;
class DecryptConfig;
-struct CdmKeyInformation;
}
// These are specializations of mojo::TypeConverter and have to be in the mojo
@@ -48,19 +47,6 @@ struct TypeConverter<scoped_refptr<media::DecoderBuffer>,
};
template <>
-struct TypeConverter<media::mojom::CdmKeyInformationPtr,
- media::CdmKeyInformation> {
- static media::mojom::CdmKeyInformationPtr Convert(
- const media::CdmKeyInformation& input);
-};
-template <>
-struct TypeConverter<std::unique_ptr<media::CdmKeyInformation>,
- media::mojom::CdmKeyInformationPtr> {
- static std::unique_ptr<media::CdmKeyInformation> Convert(
- const media::mojom::CdmKeyInformationPtr& input);
-};
-
-template <>
struct TypeConverter<media::mojom::AudioBufferPtr,
scoped_refptr<media::AudioBuffer>> {
static media::mojom::AudioBufferPtr Convert(
diff --git a/chromium/media/mojo/common/media_type_converters_unittest.cc b/chromium/media/mojo/common/media_type_converters_unittest.cc
index 336b549a7ac..1074c3bfd11 100644
--- a/chromium/media/mojo/common/media_type_converters_unittest.cc
+++ b/chromium/media/mojo/common/media_type_converters_unittest.cc
@@ -9,7 +9,7 @@
#include <string.h>
#include <memory>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
@@ -61,8 +61,8 @@ void CompareAudioBuffers(SampleFormat sample_format,
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
const uint8_t kData[] = "hello, world";
const uint8_t kSideData[] = "sideshow bob";
- const size_t kDataSize = arraysize(kData);
- const size_t kSideDataSize = arraysize(kSideData);
+ const size_t kDataSize = base::size(kData);
+ const size_t kSideDataSize = base::size(kSideData);
// Original.
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
@@ -108,7 +108,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EOS) {
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_KeyFrame) {
const uint8_t kData[] = "hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
// Original.
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
@@ -129,7 +129,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_KeyFrame) {
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_CencEncryptedBuffer) {
const uint8_t kData[] = "hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
const char kKeyId[] = "00112233445566778899aabbccddeeff";
const char kIv[] = "0123456789abcdef";
@@ -165,7 +165,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_CencEncryptedBuffer) {
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_CbcsEncryptedBuffer) {
const uint8_t kData[] = "hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
const char kKeyId[] = "00112233445566778899aabbccddeeff";
const char kIv[] = "0123456789abcdef";
diff --git a/chromium/media/mojo/common/mojo_decoder_buffer_converter_unittest.cc b/chromium/media/mojo/common/mojo_decoder_buffer_converter_unittest.cc
index 2a798e8e490..afd0a87a447 100644
--- a/chromium/media/mojo/common/mojo_decoder_buffer_converter_unittest.cc
+++ b/chromium/media/mojo/common/mojo_decoder_buffer_converter_unittest.cc
@@ -8,9 +8,9 @@
#include <memory>
-#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/test/mock_callback.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
@@ -62,8 +62,8 @@ TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_Normal) {
base::MessageLoop message_loop;
const uint8_t kData[] = "hello, world";
const uint8_t kSideData[] = "sideshow bob";
- const size_t kDataSize = arraysize(kData);
- const size_t kSideDataSize = arraysize(kSideData);
+ const size_t kDataSize = base::size(kData);
+ const size_t kSideDataSize = base::size(kSideData);
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(&kData), kDataSize,
@@ -99,7 +99,7 @@ TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_ZeroByteBuffer) {
TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_KeyFrame) {
base::MessageLoop message_loop;
const uint8_t kData[] = "hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(&kData), kDataSize));
@@ -113,7 +113,7 @@ TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_KeyFrame) {
TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_EncryptedBuffer) {
base::MessageLoop message_loop;
const uint8_t kData[] = "hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
const char kKeyId[] = "00112233445566778899aabbccddeeff";
const char kIv[] = "0123456789abcdef";
@@ -152,7 +152,7 @@ TEST(MojoDecoderBufferConverterTest, ConvertDecoderBuffer_EncryptedBuffer) {
TEST(MojoDecoderBufferConverterTest, Chunked) {
base::MessageLoop message_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -165,7 +165,7 @@ TEST(MojoDecoderBufferConverterTest, Chunked) {
TEST(MojoDecoderBufferConverterTest, WriterSidePipeError) {
base::MessageLoop message_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -200,7 +200,7 @@ TEST(MojoDecoderBufferConverterTest, ConcurrentDecoderBuffers) {
// Three buffers: normal, EOS, normal.
const uint8_t kData[] = "Hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer1 =
DecoderBuffer::CopyFrom(kData, kDataSize);
scoped_refptr<DecoderBuffer> media_buffer2(DecoderBuffer::CreateEOSBuffer());
@@ -254,7 +254,7 @@ TEST(MojoDecoderBufferConverterTest, FlushAfterRead) {
base::RunLoop run_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -275,7 +275,7 @@ TEST(MojoDecoderBufferConverterTest, FlushBeforeRead) {
base::RunLoop run_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -303,7 +303,7 @@ TEST(MojoDecoderBufferConverterTest, FlushBeforeChunkedRead) {
base::RunLoop run_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -332,7 +332,7 @@ TEST(MojoDecoderBufferConverterTest, FlushDuringChunkedRead) {
base::RunLoop run_loop;
const uint8_t kData[] = "Lorem ipsum dolor sit amet, consectetur cras amet";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
scoped_refptr<DecoderBuffer> media_buffer =
DecoderBuffer::CopyFrom(kData, kDataSize);
@@ -368,7 +368,7 @@ TEST(MojoDecoderBufferConverterTest, FlushDuringConcurrentReads) {
// Three buffers: normal, EOS, normal.
const uint8_t kData[] = "Hello, world";
- const size_t kDataSize = arraysize(kData);
+ const size_t kDataSize = base::size(kData);
auto media_buffer1 = DecoderBuffer::CopyFrom(kData, kDataSize);
auto media_buffer2 = DecoderBuffer::CreateEOSBuffer();
auto media_buffer3 = DecoderBuffer::CopyFrom(kData, kDataSize);
diff --git a/chromium/media/mojo/interfaces/BUILD.gn b/chromium/media/mojo/interfaces/BUILD.gn
index fe46891f41a..ec8b4bbc78a 100644
--- a/chromium/media/mojo/interfaces/BUILD.gn
+++ b/chromium/media/mojo/interfaces/BUILD.gn
@@ -67,6 +67,15 @@ mojom("interfaces") {
# remove this dependency.
public_deps += [ "//sandbox/mac/mojom" ]
}
+
+ # Windows component builds require this to avoid link errors related to URL
+ # classes. Enabling this for other builds would result in ODR violations.
+ # TODO(crbug.com/921170): Remove this once the issue is resolved.
+ if (is_win && is_component_build) {
+ export_class_attribute_blink = "PLATFORM_EXPORT"
+ export_define_blink = "BLINK_PLATFORM_IMPLEMENTATION=1"
+ export_header_blink = "third_party/blink/public/platform/web_common.h"
+ }
}
mojom("constants") {
@@ -116,6 +125,7 @@ source_set("unit_tests") {
sources = [
"audio_decoder_config_struct_traits_unittest.cc",
+ "cdm_key_information_mojom_traits_unittest.cc",
"encryption_scheme_struct_traits_unittest.cc",
"video_decoder_config_struct_traits_unittest.cc",
"video_frame_struct_traits_unittest.cc",
diff --git a/chromium/media/mojo/interfaces/audio_decoder.mojom b/chromium/media/mojo/interfaces/audio_decoder.mojom
index 4ee086b0df2..683624023c6 100644
--- a/chromium/media/mojo/interfaces/audio_decoder.mojom
+++ b/chromium/media/mojo/interfaces/audio_decoder.mojom
@@ -42,4 +42,8 @@ interface AudioDecoder {
interface AudioDecoderClient {
// Sends the decoded audio buffer back to the proxy.
OnBufferDecoded(AudioBuffer buffer);
+
+ // Called when the remote decoder is waiting because of |reason|, e.g. waiting
+ // for decryption key.
+ OnWaiting(WaitingReason reason);
};
diff --git a/chromium/media/mojo/interfaces/audio_decoder_config_struct_traits_unittest.cc b/chromium/media/mojo/interfaces/audio_decoder_config_struct_traits_unittest.cc
index 00c81d51101..6bf0f8e1d4f 100644
--- a/chromium/media/mojo/interfaces/audio_decoder_config_struct_traits_unittest.cc
+++ b/chromium/media/mojo/interfaces/audio_decoder_config_struct_traits_unittest.cc
@@ -6,7 +6,7 @@
#include <utility>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_util.h"
#include "mojo/public/cpp/base/time_mojom_traits.h"
@@ -17,7 +17,7 @@ namespace media {
TEST(AudioDecoderConfigStructTraitsTest, ConvertAudioDecoderConfig_Normal) {
const uint8_t kExtraData[] = "input extra data";
const std::vector<uint8_t> kExtraDataVector(
- &kExtraData[0], &kExtraData[0] + arraysize(kExtraData));
+ &kExtraData[0], &kExtraData[0] + base::size(kExtraData));
AudioDecoderConfig input;
input.Initialize(kCodecAAC, kSampleFormatU8, CHANNEL_LAYOUT_SURROUND, 48000,
diff --git a/chromium/media/mojo/interfaces/cdm_key_information.typemap b/chromium/media/mojo/interfaces/cdm_key_information.typemap
new file mode 100644
index 00000000000..89c293033a6
--- /dev/null
+++ b/chromium/media/mojo/interfaces/cdm_key_information.typemap
@@ -0,0 +1,23 @@
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//media/mojo/interfaces/content_decryption_module.mojom"
+
+public_headers = [ "//media/base/cdm_key_information.h" ]
+
+traits_headers =
+ [ "//media/mojo/interfaces/cdm_key_information_mojom_traits.h" ]
+
+sources = [
+ "//media/mojo/interfaces/cdm_key_information_mojom_traits.cc",
+]
+
+public_deps = [
+ "//media",
+]
+
+type_mappings = [
+ "media.mojom.CdmKeyInformation=std::unique_ptr<media::CdmKeyInformation>[move_only]",
+ "media.mojom.CdmKeyStatus=media::CdmKeyInformation::KeyStatus",
+]
diff --git a/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.cc b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.cc
new file mode 100644
index 00000000000..7a3ce049495
--- /dev/null
+++ b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.cc
@@ -0,0 +1,85 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/interfaces/cdm_key_information_mojom_traits.h"
+
+#include "base/logging.h"
+
+namespace mojo {
+
+using MojomKeyStatus = media::mojom::CdmKeyStatus;
+using NativeKeyStatus = media::CdmKeyInformation::KeyStatus;
+
+// static
+MojomKeyStatus EnumTraits<MojomKeyStatus, NativeKeyStatus>::ToMojom(
+ NativeKeyStatus error) {
+ switch (error) {
+ case NativeKeyStatus::USABLE:
+ return MojomKeyStatus::USABLE;
+ case NativeKeyStatus::INTERNAL_ERROR:
+ return MojomKeyStatus::INTERNAL_ERROR;
+ case NativeKeyStatus::EXPIRED:
+ return MojomKeyStatus::EXPIRED;
+ case NativeKeyStatus::OUTPUT_RESTRICTED:
+ return MojomKeyStatus::OUTPUT_RESTRICTED;
+ case NativeKeyStatus::OUTPUT_DOWNSCALED:
+ return MojomKeyStatus::OUTPUT_DOWNSCALED;
+ case NativeKeyStatus::KEY_STATUS_PENDING:
+ return MojomKeyStatus::KEY_STATUS_PENDING;
+ case NativeKeyStatus::RELEASED:
+ return MojomKeyStatus::RELEASED;
+ }
+ NOTREACHED();
+ return MojomKeyStatus::INTERNAL_ERROR;
+}
+
+// static
+bool EnumTraits<MojomKeyStatus, NativeKeyStatus>::FromMojom(
+ MojomKeyStatus error,
+ NativeKeyStatus* out) {
+ switch (error) {
+ case MojomKeyStatus::USABLE:
+ *out = NativeKeyStatus::USABLE;
+ return true;
+ case MojomKeyStatus::INTERNAL_ERROR:
+ *out = NativeKeyStatus::INTERNAL_ERROR;
+ return true;
+ case MojomKeyStatus::EXPIRED:
+ *out = NativeKeyStatus::EXPIRED;
+ return true;
+ case MojomKeyStatus::OUTPUT_RESTRICTED:
+ *out = NativeKeyStatus::OUTPUT_RESTRICTED;
+ return true;
+ case MojomKeyStatus::OUTPUT_DOWNSCALED:
+ *out = NativeKeyStatus::OUTPUT_DOWNSCALED;
+ return true;
+ case MojomKeyStatus::KEY_STATUS_PENDING:
+ *out = NativeKeyStatus::KEY_STATUS_PENDING;
+ return true;
+ case MojomKeyStatus::RELEASED:
+ *out = NativeKeyStatus::RELEASED;
+ return true;
+ }
+ NOTREACHED();
+ return false;
+}
+
+// static
+bool StructTraits<media::mojom::CdmKeyInformationDataView,
+ std::unique_ptr<media::CdmKeyInformation>>::
+ Read(media::mojom::CdmKeyInformationDataView input,
+ std::unique_ptr<media::CdmKeyInformation>* output) {
+ mojo::ArrayDataView<uint8_t> key_id;
+ input.GetKeyIdDataView(&key_id);
+
+ NativeKeyStatus status;
+ if (!input.ReadStatus(&status))
+ return false;
+
+ *output = std::make_unique<media::CdmKeyInformation>(
+ key_id.data(), key_id.size(), status, input.system_code());
+ return true;
+}
+
+} // namespace mojo \ No newline at end of file
diff --git a/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.h b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.h
new file mode 100644
index 00000000000..eb047224e6f
--- /dev/null
+++ b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits.h
@@ -0,0 +1,47 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_INTERFACES_CDM_KEY_INFORMATION_MOJOM_TRAITS_H_
+#define MEDIA_MOJO_INTERFACES_CDM_KEY_INFORMATION_MOJOM_TRAITS_H_
+
+#include "media/base/cdm_key_information.h"
+#include "media/mojo/interfaces/content_decryption_module.mojom.h"
+
+namespace mojo {
+
+template <>
+struct EnumTraits<media::mojom::CdmKeyStatus,
+ media::CdmKeyInformation::KeyStatus> {
+ static media::mojom::CdmKeyStatus ToMojom(
+ media::CdmKeyInformation::KeyStatus key_status);
+
+ static bool FromMojom(media::mojom::CdmKeyStatus input,
+ media::CdmKeyInformation::KeyStatus* out);
+};
+
+template <>
+struct StructTraits<media::mojom::CdmKeyInformationDataView,
+ std::unique_ptr<media::CdmKeyInformation>> {
+ static const std::vector<uint8_t>& key_id(
+ const std::unique_ptr<media::CdmKeyInformation>& input) {
+ return input->key_id;
+ }
+
+ static media::CdmKeyInformation::KeyStatus status(
+ const std::unique_ptr<media::CdmKeyInformation>& input) {
+ return input->status;
+ }
+
+ static uint32_t system_code(
+ const std::unique_ptr<media::CdmKeyInformation>& input) {
+ return input->system_code;
+ }
+
+ static bool Read(media::mojom::CdmKeyInformationDataView input,
+ std::unique_ptr<media::CdmKeyInformation>* output);
+};
+
+} // namespace mojo
+
+#endif // MEDIA_MOJO_INTERFACES_CDM_KEY_INFORMATION_MOJOM_TRAITS_H_
diff --git a/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits_unittest.cc b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits_unittest.cc
new file mode 100644
index 00000000000..916b4c713e9
--- /dev/null
+++ b/chromium/media/mojo/interfaces/cdm_key_information_mojom_traits_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/interfaces/cdm_key_information_mojom_traits.h"
+
+#include "media/base/cdm_key_information.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(CdmKeyInformationStructTraitsTest, ConvertCdmKeyInformation) {
+ auto input = std::make_unique<CdmKeyInformation>(
+ "key_id", CdmKeyInformation::KeyStatus::USABLE, 23);
+ std::vector<uint8_t> data =
+ media::mojom::CdmKeyInformation::Serialize(&input);
+
+ std::unique_ptr<CdmKeyInformation> output;
+ EXPECT_TRUE(
+ media::mojom::CdmKeyInformation::Deserialize(std::move(data), &output));
+ EXPECT_EQ(input->key_id, output->key_id);
+ EXPECT_EQ(input->status, output->status);
+ EXPECT_EQ(input->system_code, output->system_code);
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/interfaces/content_decryption_module.mojom b/chromium/media/mojo/interfaces/content_decryption_module.mojom
index 612932f9d72..61994ad3125 100644
--- a/chromium/media/mojo/interfaces/content_decryption_module.mojom
+++ b/chromium/media/mojo/interfaces/content_decryption_module.mojom
@@ -21,8 +21,15 @@ enum CdmSessionType;
enum CdmMessageType;
// See media::CdmKeyInformation::KeyStatus
-[Native]
-enum CdmKeyStatus;
+enum CdmKeyStatus {
+ USABLE,
+ INTERNAL_ERROR,
+ EXPIRED,
+ OUTPUT_RESTRICTED,
+ OUTPUT_DOWNSCALED,
+ KEY_STATUS_PENDING,
+ RELEASED,
+};
// See media::HdcpVersion
[Native]
diff --git a/chromium/media/mojo/interfaces/content_decryption_module.typemap b/chromium/media/mojo/interfaces/content_decryption_module.typemap
index 60c33b07fd5..45160097114 100644
--- a/chromium/media/mojo/interfaces/content_decryption_module.typemap
+++ b/chromium/media/mojo/interfaces/content_decryption_module.typemap
@@ -21,7 +21,6 @@ deps = [
type_mappings = [
"media.mojom.CdmConfig=media::CdmConfig",
- "media.mojom.CdmKeyStatus=media::CdmKeyInformation::KeyStatus",
"media.mojom.CdmPromiseResult.Exception=media::CdmPromise::Exception",
"media.mojom.CdmSessionType=media::CdmSessionType",
"media.mojom.CdmMessageType=media::CdmMessageType",
diff --git a/chromium/media/mojo/interfaces/media_types.mojom b/chromium/media/mojo/interfaces/media_types.mojom
index 1bfac91c47c..a1fa1c649c5 100644
--- a/chromium/media/mojo/interfaces/media_types.mojom
+++ b/chromium/media/mojo/interfaces/media_types.mojom
@@ -62,6 +62,10 @@ enum VideoPixelFormat;
[Native]
enum VideoRotation;
+// See media/base/waiting.h for descriptions.
+[Native]
+enum WaitingReason;
+
// See media/base/watch_time_keys.h for descriptions.
[Native]
enum WatchTimeKey;
@@ -74,6 +78,10 @@ enum EncryptionMode;
[Native]
enum MediaContainerName;
+// See media/base/media_status.h for description.
+[Native]
+enum MediaStatusState;
+
// This defines a mojo transport format for media::EncryptionPattern
// See media/base/encryption_pattern.h for description.
struct EncryptionPattern {
diff --git a/chromium/media/mojo/interfaces/media_types.typemap b/chromium/media/mojo/interfaces/media_types.typemap
index b6ac3c15acf..ec45153b222 100644
--- a/chromium/media/mojo/interfaces/media_types.typemap
+++ b/chromium/media/mojo/interfaces/media_types.typemap
@@ -15,6 +15,7 @@ public_headers = [
"//media/base/encryption_scheme.h",
"//media/base/hdr_metadata.h",
"//media/base/media_log_event.h",
+ "//media/base/media_status.h",
"//media/base/output_device_info.h",
"//media/base/pipeline_status.h",
"//media/base/sample_format.h",
@@ -22,6 +23,7 @@ public_headers = [
"//media/base/video_codecs.h",
"//media/base/video_rotation.h",
"//media/base/video_types.h",
+ "//media/base/waiting.h",
"//media/base/watch_time_keys.h",
]
@@ -53,6 +55,8 @@ type_mappings = [
"media.mojom.VideoCodecProfile=media::VideoCodecProfile",
"media.mojom.VideoPixelFormat=media::VideoPixelFormat",
"media.mojom.VideoRotation=media::VideoRotation",
+ "media.mojom.WaitingReason=media::WaitingReason",
"media.mojom.WatchTimeKey=media::WatchTimeKey",
"media.mojom.EncryptionPattern=media::EncryptionPattern",
+ "media.mojom.MediaStatusState=media::MediaStatus::State",
]
diff --git a/chromium/media/mojo/interfaces/renderer.mojom b/chromium/media/mojo/interfaces/renderer.mojom
index 9f17e04c30e..3b601e18571 100644
--- a/chromium/media/mojo/interfaces/renderer.mojom
+++ b/chromium/media/mojo/interfaces/renderer.mojom
@@ -87,11 +87,17 @@ interface RendererClient {
// media_types.mojom.
OnStatisticsUpdate(PipelineStatistics stats);
- // Called when the remote renderering service is waiting on the decryption
- // key.
- OnWaitingForDecryptionKey();
+ // Called when the remote renderering service is waiting for |reason|,
+ // e.g. waiting for decryption key.
+ OnWaiting(WaitingReason reason);
// Executed the first time the metadata is updated, and whenever the duration
// changes.
OnDurationChange(mojo_base.mojom.TimeDelta duration);
+
+ // Executed whenever a renderer receives notification of a status change that
+ // was not originated by its owner.
+ // Only used with the FlingingRenderer (when external devices play/pause the
+ // video playing remotely).
+ OnRemotePlayStateChange(MediaStatusState state);
};
diff --git a/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.cc b/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.cc
new file mode 100644
index 00000000000..812d86a63cc
--- /dev/null
+++ b/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.cc
@@ -0,0 +1,32 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/interfaces/supported_video_decoder_config_struct_traits.h"
+
+namespace mojo {
+
+// static
+bool StructTraits<media::mojom::SupportedVideoDecoderConfigDataView,
+ media::SupportedVideoDecoderConfig>::
+ Read(media::mojom::SupportedVideoDecoderConfigDataView input,
+ media::SupportedVideoDecoderConfig* output) {
+ if (!input.ReadProfileMin(&output->profile_min))
+ return false;
+
+ if (!input.ReadProfileMax(&output->profile_max))
+ return false;
+
+ if (!input.ReadCodedSizeMin(&output->coded_size_min))
+ return false;
+
+ if (!input.ReadCodedSizeMax(&output->coded_size_max))
+ return false;
+
+ output->allow_encrypted = input.allow_encrypted();
+ output->require_encrypted = input.require_encrypted();
+
+ return true;
+}
+
+} // namespace mojo
diff --git a/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.h b/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.h
new file mode 100644
index 00000000000..875615759f4
--- /dev/null
+++ b/chromium/media/mojo/interfaces/supported_video_decoder_config_struct_traits.h
@@ -0,0 +1,54 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_INTERFACES_SUPPORTED_VIDEO_DECODER_CONFIG_STRUCT_TRAITS_H_
+#define MEDIA_MOJO_INTERFACES_SUPPORTED_VIDEO_DECODER_CONFIG_STRUCT_TRAITS_H_
+
+#include "media/base/ipc/media_param_traits.h"
+#include "media/mojo/interfaces/media_types.mojom.h"
+#include "media/mojo/interfaces/video_decoder.mojom.h"
+#include "media/video/supported_video_decoder_config.h"
+#include "ui/gfx/geometry/mojo/geometry_struct_traits.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<media::mojom::SupportedVideoDecoderConfigDataView,
+ media::SupportedVideoDecoderConfig> {
+ static media::VideoCodecProfile profile_min(
+ const media::SupportedVideoDecoderConfig& input) {
+ return input.profile_min;
+ }
+
+ static media::VideoCodecProfile profile_max(
+ const media::SupportedVideoDecoderConfig& input) {
+ return input.profile_max;
+ }
+
+ static const gfx::Size& coded_size_min(
+ const media::SupportedVideoDecoderConfig& input) {
+ return input.coded_size_min;
+ }
+
+ static const gfx::Size& coded_size_max(
+ const media::SupportedVideoDecoderConfig& input) {
+ return input.coded_size_max;
+ }
+
+ static bool allow_encrypted(const media::SupportedVideoDecoderConfig& input) {
+ return input.allow_encrypted;
+ }
+
+ static bool require_encrypted(
+ const media::SupportedVideoDecoderConfig& input) {
+ return input.require_encrypted;
+ }
+
+ static bool Read(media::mojom::SupportedVideoDecoderConfigDataView input,
+ media::SupportedVideoDecoderConfig* output);
+};
+
+} // namespace mojo
+
+#endif // MEDIA_MOJO_INTERFACES_SUPPORTED_VIDEO_DECODER_CONFIG_STRUCT_TRAITS_H_
diff --git a/chromium/media/mojo/interfaces/typemaps.gni b/chromium/media/mojo/interfaces/typemaps.gni
index 46aa55d4de1..d8cb918056e 100644
--- a/chromium/media/mojo/interfaces/typemaps.gni
+++ b/chromium/media/mojo/interfaces/typemaps.gni
@@ -5,6 +5,7 @@
typemaps = [
"//media/mojo/interfaces/audio_decoder_config.typemap",
"//media/mojo/interfaces/audio_parameters.typemap",
+ "//media/mojo/interfaces/cdm_key_information.typemap",
"//media/mojo/interfaces/cdm_proxy.typemap",
"//media/mojo/interfaces/content_decryption_module.typemap",
"//media/mojo/interfaces/decryptor.typemap",
diff --git a/chromium/media/mojo/interfaces/video_decoder.mojom b/chromium/media/mojo/interfaces/video_decoder.mojom
index 1e3ff82ca3d..3b70b99bbcc 100644
--- a/chromium/media/mojo/interfaces/video_decoder.mojom
+++ b/chromium/media/mojo/interfaces/video_decoder.mojom
@@ -151,6 +151,10 @@ interface VideoDecoderClient {
bool can_read_without_stalling,
mojo_base.mojom.UnguessableToken? release_token);
+ // Called when the remote decoder is waiting because of |reason|, e.g. waiting
+ // for decryption key.
+ OnWaiting(WaitingReason reason);
+
// Request to be notified when the current OverlayInfo changes. This results
// in at least one call to OnOverlayInfoChanged() for the initial OverlayInfo.
// |restart_for_transitions| sets whether the decoder should be restarted on
diff --git a/chromium/media/mojo/interfaces/video_decoder.typemap b/chromium/media/mojo/interfaces/video_decoder.typemap
index 8b58cdeb769..bcc9160c402 100644
--- a/chromium/media/mojo/interfaces/video_decoder.typemap
+++ b/chromium/media/mojo/interfaces/video_decoder.typemap
@@ -4,12 +4,26 @@
mojom = "//media/mojo/interfaces/video_decoder.mojom"
-public_headers = [ "//media/base/overlay_info.h" ]
+public_headers = [
+ "//media/base/overlay_info.h",
+ "//media/video/supported_video_decoder_config.h",
+]
-traits_headers = [ "//media/base/ipc/media_param_traits_macros.h" ]
+traits_headers = [
+ "//media/base/ipc/media_param_traits_macros.h",
+ "//media/mojo/interfaces/supported_video_decoder_config_struct_traits.h",
+]
+
+sources = [
+ "supported_video_decoder_config_struct_traits.cc",
+ "supported_video_decoder_config_struct_traits.h",
+]
deps = [
"//media/gpu/ipc/common",
]
-type_mappings = [ "media.mojom.OverlayInfo=media::OverlayInfo" ]
+type_mappings = [
+ "media.mojom.OverlayInfo=media::OverlayInfo",
+ "media.mojom.SupportedVideoDecoderConfig=media::SupportedVideoDecoderConfig",
+]
diff --git a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
index fde934c5f9f..d028eb5d10b 100644
--- a/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
+++ b/chromium/media/mojo/interfaces/video_decoder_config_struct_traits_unittest.cc
@@ -6,7 +6,7 @@
#include <utility>
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/base/media_util.h"
#include "media/base/video_decoder_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -24,7 +24,7 @@ static const gfx::Size kNaturalSize(320, 240);
TEST(VideoDecoderConfigStructTraitsTest, ConvertVideoDecoderConfig_Normal) {
const uint8_t kExtraData[] = "config extra data";
const std::vector<uint8_t> kExtraDataVector(
- &kExtraData[0], &kExtraData[0] + arraysize(kExtraData));
+ &kExtraData[0], &kExtraData[0] + base::size(kExtraData));
VideoDecoderConfig input(kCodecVP8, VP8PROFILE_ANY, PIXEL_FORMAT_I420,
VideoColorSpace(), VIDEO_ROTATION_0, kCodedSize,
kVisibleRect, kNaturalSize, kExtraDataVector,
diff --git a/chromium/media/mojo/services/BUILD.gn b/chromium/media/mojo/services/BUILD.gn
index fb4d010f8df..4e6b4a3481a 100644
--- a/chromium/media/mojo/services/BUILD.gn
+++ b/chromium/media/mojo/services/BUILD.gn
@@ -5,7 +5,7 @@
import("//build/config/jumbo.gni")
import("//media/media_options.gni")
import("//services/catalog/public/tools/catalog.gni")
-import("//services/service_manager/public/cpp/service.gni")
+import("//services/service_manager/public/cpp/service_executable.gni")
import("//services/service_manager/public/service_manifest.gni")
import("//testing/test.gni")
@@ -205,7 +205,7 @@ source_set("unit_tests") {
# service out-of-process. While CdmService is tested as a packaged service,
# which runs the service in-process.
-service("media") {
+service_executable("media") {
testonly = true
sources = [
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client.cc b/chromium/media/mojo/services/gpu_mojo_media_client.cc
index 75f5e611c72..302affc739a 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client.cc
+++ b/chromium/media/mojo/services/gpu_mojo_media_client.cc
@@ -9,7 +9,6 @@
#include "base/bind.h"
#include "base/feature_list.h"
#include "base/memory/ptr_util.h"
-#include "build/build_config.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/audio_decoder.h"
#include "media/base/cdm_factory.h"
@@ -22,6 +21,7 @@
#include "media/gpu/ipc/service/media_gpu_channel_manager.h"
#include "media/gpu/ipc/service/vda_video_decoder.h"
#include "media/mojo/interfaces/video_decoder.mojom.h"
+#include "media/video/supported_video_decoder_config.h"
#include "media/video/video_decode_accelerator.h"
#if defined(OS_ANDROID)
@@ -41,6 +41,7 @@
#if defined(OS_WIN)
#include "media/gpu/windows/d3d11_video_decoder.h"
+#include "ui/gl/gl_angle_util_win.h"
#endif // defined(OS_WIN)
#if defined(OS_ANDROID)
@@ -54,7 +55,7 @@ namespace media {
namespace {
#if defined(OS_ANDROID) || defined(OS_CHROMEOS) || defined(OS_MACOSX) || \
- defined(OS_WIN)
+ defined(OS_WIN) || defined(OS_LINUX)
gpu::CommandBufferStub* GetCommandBufferStub(
base::WeakPtr<MediaGpuChannelManager> media_gpu_channel_manager,
base::UnguessableToken channel_token,
@@ -71,6 +72,15 @@ gpu::CommandBufferStub* GetCommandBufferStub(
}
#endif
+#if defined(OS_WIN)
+// Return a callback to get the D3D11 device for D3D11VideoDecoder. Since it
+// only supports the ANGLE device right now, that's what we return.
+D3D11VideoDecoder::GetD3D11DeviceCB GetD3D11DeviceCallback() {
+ return base::BindRepeating(
+ []() { return gl::QueryD3D11DeviceObjectFromANGLE(); });
+}
+#endif
+
} // namespace
GpuMojoMediaClient::GpuMojoMediaClient(
@@ -102,7 +112,7 @@ std::unique_ptr<AudioDecoder> GpuMojoMediaClient::CreateAudioDecoder(
#endif // defined(OS_ANDROID)
}
-std::vector<mojom::SupportedVideoDecoderConfigPtr>
+std::vector<SupportedVideoDecoderConfig>
GpuMojoMediaClient::GetSupportedVideoDecoderConfigs() {
// TODO(liberato): Implement for D3D11VideoDecoder and MediaCodecVideoDecoder.
VideoDecodeAccelerator::Capabilities capabilities =
@@ -113,9 +123,22 @@ GpuMojoMediaClient::GetSupportedVideoDecoderConfigs() {
capabilities.flags &
VideoDecodeAccelerator::Capabilities::SUPPORTS_ENCRYPTED_STREAMS;
- std::vector<mojom::SupportedVideoDecoderConfigPtr> supported_configs;
+ std::vector<SupportedVideoDecoderConfig> supported_configs;
+
+#if defined(OS_ANDROID)
+ // TODO(liberato): Add MCVD.
+#elif defined(OS_WIN)
+ if (!d3d11_supported_configs_) {
+ d3d11_supported_configs_ =
+ D3D11VideoDecoder::GetSupportedVideoDecoderConfigs(
+ gpu_preferences_, gpu_workarounds_, GetD3D11DeviceCallback());
+ }
+ supported_configs = *d3d11_supported_configs_;
+#endif
+
+ // Merge the VDA supported profiles.
for (const auto& supported_profile : capabilities.supported_profiles) {
- supported_configs.push_back(mojom::SupportedVideoDecoderConfig::New(
+ supported_configs.push_back(SupportedVideoDecoderConfig(
supported_profile.profile, // profile_min
supported_profile.profile, // profile_max
supported_profile.min_resolution, // coded_size_min
@@ -148,7 +171,8 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
android_overlay_factory_cb_, std::move(request_overlay_info_cb),
std::make_unique<VideoFrameFactoryImpl>(gpu_task_runner_,
std::move(get_stub_cb)));
-#elif defined(OS_CHROMEOS) || defined(OS_MACOSX) || defined(OS_WIN)
+#elif defined(OS_CHROMEOS) || defined(OS_MACOSX) || defined(OS_WIN) || \
+ defined(OS_LINUX)
std::unique_ptr<VideoDecoder> vda_video_decoder = VdaVideoDecoder::Create(
task_runner, gpu_task_runner_, media_log->Clone(), target_color_space,
gpu_preferences_, gpu_workarounds_,
@@ -157,13 +181,18 @@ std::unique_ptr<VideoDecoder> GpuMojoMediaClient::CreateVideoDecoder(
command_buffer_id->route_id));
#if defined(OS_WIN)
if (base::FeatureList::IsEnabled(kD3D11VideoDecoder)) {
+ // If nothing has cached the configs yet, then do so now.
+ if (!d3d11_supported_configs_)
+ GetSupportedVideoDecoderConfigs();
+
std::unique_ptr<VideoDecoder> d3d11_video_decoder =
D3D11VideoDecoder::Create(
gpu_task_runner_, media_log->Clone(), gpu_preferences_,
gpu_workarounds_,
base::BindRepeating(
&GetCommandBufferStub, media_gpu_channel_manager_,
- command_buffer_id->channel_token, command_buffer_id->route_id));
+ command_buffer_id->channel_token, command_buffer_id->route_id),
+ GetD3D11DeviceCallback(), *d3d11_supported_configs_);
return base::WrapUnique<VideoDecoder>(new FallbackVideoDecoder(
std::move(d3d11_video_decoder), std::move(vda_video_decoder)));
}
diff --git a/chromium/media/mojo/services/gpu_mojo_media_client.h b/chromium/media/mojo/services/gpu_mojo_media_client.h
index 9981dc15dd9..943b3ab994f 100644
--- a/chromium/media/mojo/services/gpu_mojo_media_client.h
+++ b/chromium/media/mojo/services/gpu_mojo_media_client.h
@@ -10,7 +10,9 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/optional.h"
#include "base/single_thread_task_runner.h"
+#include "build/build_config.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
@@ -39,8 +41,8 @@ class GpuMojoMediaClient : public MojoMediaClient {
~GpuMojoMediaClient() final;
// MojoMediaClient implementation.
- std::vector<mojom::SupportedVideoDecoderConfigPtr>
- GetSupportedVideoDecoderConfigs() final;
+ std::vector<SupportedVideoDecoderConfig> GetSupportedVideoDecoderConfigs()
+ final;
void Initialize(service_manager::Connector* connector) final;
std::unique_ptr<AudioDecoder> CreateAudioDecoder(
scoped_refptr<base::SingleThreadTaskRunner> task_runner) final;
@@ -64,6 +66,10 @@ class GpuMojoMediaClient : public MojoMediaClient {
base::WeakPtr<MediaGpuChannelManager> media_gpu_channel_manager_;
AndroidOverlayMojoFactoryCB android_overlay_factory_cb_;
CdmProxyFactoryCB cdm_proxy_factory_cb_;
+#if defined(OS_WIN)
+ base::Optional<std::vector<SupportedVideoDecoderConfig>>
+ d3d11_supported_configs_;
+#endif // defined(OS_WIN)
DISALLOW_COPY_AND_ASSIGN(GpuMojoMediaClient);
};
diff --git a/chromium/media/mojo/services/interface_factory_impl.cc b/chromium/media/mojo/services/interface_factory_impl.cc
index a2a103e30f0..311ca5ae264 100644
--- a/chromium/media/mojo/services/interface_factory_impl.cc
+++ b/chromium/media/mojo/services/interface_factory_impl.cc
@@ -10,7 +10,6 @@
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
-#include "media/base/media_log.h"
#include "media/mojo/services/mojo_decryptor_service.h"
#include "media/mojo/services/mojo_media_client.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
@@ -43,13 +42,9 @@ namespace media {
InterfaceFactoryImpl::InterfaceFactoryImpl(
service_manager::mojom::InterfaceProviderPtr interfaces,
- MediaLog* media_log,
std::unique_ptr<service_manager::ServiceKeepaliveRef> keepalive_ref,
MojoMediaClient* mojo_media_client)
:
-#if BUILDFLAG(ENABLE_MOJO_RENDERER)
- media_log_(media_log),
-#endif
#if BUILDFLAG(ENABLE_MOJO_CDM)
interfaces_(std::move(interfaces)),
#endif
@@ -116,7 +111,7 @@ void InterfaceFactoryImpl::CreateRenderer(
// audio device ID. See interface_factory.mojom.
const std::string& audio_device_id = type_specific_id;
auto renderer = mojo_media_client_->CreateRenderer(
- interfaces_.get(), base::ThreadTaskRunnerHandle::Get(), media_log_,
+ interfaces_.get(), base::ThreadTaskRunnerHandle::Get(), &media_log_,
audio_device_id);
if (!renderer) {
DLOG(ERROR) << "Renderer creation failed.";
diff --git a/chromium/media/mojo/services/interface_factory_impl.h b/chromium/media/mojo/services/interface_factory_impl.h
index 1de15a9f231..9e3d0533a08 100644
--- a/chromium/media/mojo/services/interface_factory_impl.h
+++ b/chromium/media/mojo/services/interface_factory_impl.h
@@ -8,6 +8,7 @@
#include <memory>
#include "base/macros.h"
+#include "media/base/media_util.h"
#include "media/mojo/buildflags.h"
#include "media/mojo/interfaces/interface_factory.mojom.h"
#include "media/mojo/services/deferred_destroy_strong_binding_set.h"
@@ -19,14 +20,12 @@
namespace media {
class CdmFactory;
-class MediaLog;
class MojoMediaClient;
class InterfaceFactoryImpl : public DeferredDestroy<mojom::InterfaceFactory> {
public:
InterfaceFactoryImpl(
service_manager::mojom::InterfaceProviderPtr interfaces,
- MediaLog* media_log,
std::unique_ptr<service_manager::ServiceKeepaliveRef> keepalive_ref,
MojoMediaClient* mojo_media_client);
~InterfaceFactoryImpl() final;
@@ -72,7 +71,8 @@ class InterfaceFactoryImpl : public DeferredDestroy<mojom::InterfaceFactory> {
#endif // BUILDFLAG(ENABLE_MOJO_VIDEO_DECODER)
#if BUILDFLAG(ENABLE_MOJO_RENDERER)
- MediaLog* media_log_;
+ // TODO(xhwang): Use MojoMediaLog for Renderer.
+ NullMediaLog media_log_;
mojo::StrongBindingSet<mojom::Renderer> renderer_bindings_;
#endif // BUILDFLAG(ENABLE_MOJO_RENDERER)
diff --git a/chromium/media/mojo/services/main.cc b/chromium/media/mojo/services/main.cc
index 1ffad7ebbf8..b8d4318dd7c 100644
--- a/chromium/media/mojo/services/main.cc
+++ b/chromium/media/mojo/services/main.cc
@@ -2,25 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "services/service_manager/public/c/main.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
#include "media/mojo/services/media_service_factory.h"
+#include "services/service_manager/public/cpp/service_executable/service_main.h"
#include "services/service_manager/public/mojom/service.mojom.h"
-MojoResult ServiceMain(MojoHandle service_request_handle) {
+void ServiceMain(service_manager::mojom::ServiceRequest request) {
logging::LoggingSettings settings;
settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
logging::InitLogging(settings);
base::MessageLoop message_loop;
- base::RunLoop run_loop;
- std::unique_ptr<service_manager::Service> service =
- media::CreateMediaServiceForTesting(
- service_manager::mojom::ServiceRequest(mojo::ScopedMessagePipeHandle(
- mojo::MessagePipeHandle(service_request_handle))));
- service->set_termination_closure(run_loop.QuitClosure());
- run_loop.Run();
- return MOJO_RESULT_OK;
+ media::CreateMediaServiceForTesting(std::move(request))
+ ->RunUntilTermination();
}
diff --git a/chromium/media/mojo/services/media_service.cc b/chromium/media/mojo/services/media_service.cc
index 59799f99232..b272103ce7c 100644
--- a/chromium/media/mojo/services/media_service.cc
+++ b/chromium/media/mojo/services/media_service.cc
@@ -57,9 +57,9 @@ void MediaService::CreateInterfaceFactory(
return;
interface_factory_bindings_.AddBinding(
- std::make_unique<InterfaceFactoryImpl>(
- std::move(host_interfaces), &media_log_, keepalive_.CreateRef(),
- mojo_media_client_.get()),
+ std::make_unique<InterfaceFactoryImpl>(std::move(host_interfaces),
+ keepalive_.CreateRef(),
+ mojo_media_client_.get()),
std::move(request));
}
diff --git a/chromium/media/mojo/services/media_service.h b/chromium/media/mojo/services/media_service.h
index 6a0b93f3486..98972d56841 100644
--- a/chromium/media/mojo/services/media_service.h
+++ b/chromium/media/mojo/services/media_service.h
@@ -9,7 +9,6 @@
#include "base/macros.h"
#include "build/build_config.h"
-#include "media/base/media_log.h"
#include "media/mojo/interfaces/interface_factory.mojom.h"
#include "media/mojo/interfaces/media_service.mojom.h"
#include "media/mojo/services/deferred_destroy_strong_binding_set.h"
@@ -46,7 +45,6 @@ class MEDIA_MOJO_EXPORT MediaService : public service_manager::Service,
mojom::InterfaceFactoryRequest request,
service_manager::mojom::InterfaceProviderPtr host_interfaces) final;
- MediaLog media_log_;
service_manager::ServiceBinding service_binding_;
service_manager::ServiceKeepalive keepalive_;
@@ -58,8 +56,6 @@ class MEDIA_MOJO_EXPORT MediaService : public service_manager::Service,
// |mojo_media_client_| must be destructed before |ref_factory_|.
std::unique_ptr<MojoMediaClient> mojo_media_client_;
- // Note: Since |&media_log_| is passed to bindings, the bindings must be
- // destructed first.
DeferredDestroyStrongBindingSet<mojom::InterfaceFactory>
interface_factory_bindings_;
diff --git a/chromium/media/mojo/services/media_service_unittest.cc b/chromium/media/mojo/services/media_service_unittest.cc
index d3c51ce93af..b48c9f888db 100644
--- a/chromium/media/mojo/services/media_service_unittest.cc
+++ b/chromium/media/mojo/services/media_service_unittest.cc
@@ -106,8 +106,9 @@ class MockRendererClient : public mojom::RendererClient {
MOCK_METHOD1(OnVideoNaturalSizeChange, void(const gfx::Size& size));
MOCK_METHOD1(OnStatisticsUpdate,
void(const media::PipelineStatistics& stats));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void());
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnDurationChange, void(base::TimeDelta duration));
+ MOCK_METHOD1(OnRemotePlayStateChange, void(MediaStatus::State state));
private:
DISALLOW_COPY_AND_ASSIGN(MockRendererClient);
diff --git a/chromium/media/mojo/services/mojo_audio_decoder_service.cc b/chromium/media/mojo/services/mojo_audio_decoder_service.cc
index 15cae7c515f..eba8b656e92 100644
--- a/chromium/media/mojo/services/mojo_audio_decoder_service.cc
+++ b/chromium/media/mojo/services/mojo_audio_decoder_service.cc
@@ -57,7 +57,7 @@ void MojoAudioDecoderService::Initialize(const AudioDecoderConfig& config,
base::Bind(&MojoAudioDecoderService::OnInitialized, weak_this_,
base::Passed(&callback)),
base::Bind(&MojoAudioDecoderService::OnAudioBufferReady, weak_this_),
- base::NullCallback());
+ base::Bind(&MojoAudioDecoderService::OnWaiting, weak_this_));
}
void MojoAudioDecoderService::SetDataSource(
@@ -140,4 +140,9 @@ void MojoAudioDecoderService::OnAudioBufferReady(
client_->OnBufferDecoded(mojom::AudioBuffer::From(audio_buffer));
}
+void MojoAudioDecoderService::OnWaiting(WaitingReason reason) {
+ DVLOG(1) << __func__;
+ client_->OnWaiting(reason);
+}
+
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_audio_decoder_service.h b/chromium/media/mojo/services/mojo_audio_decoder_service.h
index 58fce42840e..2cb52f44e4e 100644
--- a/chromium/media/mojo/services/mojo_audio_decoder_service.h
+++ b/chromium/media/mojo/services/mojo_audio_decoder_service.h
@@ -59,6 +59,10 @@ class MEDIA_MOJO_EXPORT MojoAudioDecoderService : public mojom::AudioDecoder {
// Called by |decoder_| for each decoded buffer.
void OnAudioBufferReady(const scoped_refptr<AudioBuffer>& audio_buffer);
+ // Called by |decoder_| when it's waiting because of |reason|, e.g. waiting
+ // for decryption key.
+ void OnWaiting(WaitingReason reason);
+
std::unique_ptr<MojoDecoderBufferReader> mojo_decoder_buffer_reader_;
// A helper object required to get CDM from CDM id.
diff --git a/chromium/media/mojo/services/mojo_cdm_service.cc b/chromium/media/mojo/services/mojo_cdm_service.cc
index 9790fccd9e2..1ccfd2f05a7 100644
--- a/chromium/media/mojo/services/mojo_cdm_service.cc
+++ b/chromium/media/mojo/services/mojo_cdm_service.cc
@@ -209,11 +209,8 @@ void MojoCdmService::OnSessionKeysChange(const std::string& session_id,
DVLOG(2) << __func__
<< " has_additional_usable_key = " << has_additional_usable_key;
- std::vector<mojom::CdmKeyInformationPtr> keys_data;
- for (auto& key : keys_info)
- keys_data.push_back(mojom::CdmKeyInformation::From(*(key.get())));
client_->OnSessionKeysChange(session_id, has_additional_usable_key,
- std::move(keys_data));
+ std::move(keys_info));
}
void MojoCdmService::OnSessionExpirationUpdate(const std::string& session_id,
diff --git a/chromium/media/mojo/services/mojo_cdm_service_context.cc b/chromium/media/mojo/services/mojo_cdm_service_context.cc
index 394e33bf36a..198241baf62 100644
--- a/chromium/media/mojo/services/mojo_cdm_service_context.cc
+++ b/chromium/media/mojo/services/mojo_cdm_service_context.cc
@@ -39,10 +39,10 @@ class CdmProxyContextRef : public CdmContextRef, public CdmContext {
private:
// CdmContext implementation.
- std::unique_ptr<CallbackRegistration> RegisterNewKeyCB(
- base::RepeatingClosure new_key_cb) final {
+ std::unique_ptr<CallbackRegistration> RegisterEventCB(
+ EventCB event_cb) final {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- return cdm_context_ ? cdm_context_->RegisterNewKeyCB(std::move(new_key_cb))
+ return cdm_context_ ? cdm_context_->RegisterEventCB(std::move(event_cb))
: nullptr;
}
diff --git a/chromium/media/mojo/services/mojo_media_client.cc b/chromium/media/mojo/services/mojo_media_client.cc
index ef4a342e285..1abaa147cbd 100644
--- a/chromium/media/mojo/services/mojo_media_client.cc
+++ b/chromium/media/mojo/services/mojo_media_client.cc
@@ -28,9 +28,9 @@ std::unique_ptr<AudioDecoder> MojoMediaClient::CreateAudioDecoder(
return nullptr;
}
-std::vector<mojom::SupportedVideoDecoderConfigPtr>
+std::vector<SupportedVideoDecoderConfig>
MojoMediaClient::GetSupportedVideoDecoderConfigs() {
- return std::vector<mojom::SupportedVideoDecoderConfigPtr>();
+ return {};
}
std::unique_ptr<VideoDecoder> MojoMediaClient::CreateVideoDecoder(
diff --git a/chromium/media/mojo/services/mojo_media_client.h b/chromium/media/mojo/services/mojo_media_client.h
index a3adfb19e6c..acf246b0c48 100644
--- a/chromium/media/mojo/services/mojo_media_client.h
+++ b/chromium/media/mojo/services/mojo_media_client.h
@@ -15,6 +15,7 @@
#include "media/media_buildflags.h"
#include "media/mojo/interfaces/video_decoder.mojom.h"
#include "media/mojo/services/media_mojo_export.h"
+#include "media/video/supported_video_decoder_config.h"
namespace base {
class SingleThreadTaskRunner;
@@ -55,7 +56,7 @@ class MEDIA_MOJO_EXPORT MojoMediaClient {
virtual std::unique_ptr<AudioDecoder> CreateAudioDecoder(
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
- virtual std::vector<mojom::SupportedVideoDecoderConfigPtr>
+ virtual std::vector<SupportedVideoDecoderConfig>
GetSupportedVideoDecoderConfigs();
virtual std::unique_ptr<VideoDecoder> CreateVideoDecoder(
diff --git a/chromium/media/mojo/services/mojo_renderer_service.cc b/chromium/media/mojo/services/mojo_renderer_service.cc
index 8f04efc2714..7afa721f531 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.cc
+++ b/chromium/media/mojo/services/mojo_renderer_service.cc
@@ -169,9 +169,9 @@ void MojoRendererService::OnBufferingStateChange(BufferingState state) {
client_->OnBufferingStateChange(state);
}
-void MojoRendererService::OnWaitingForDecryptionKey() {
+void MojoRendererService::OnWaiting(WaitingReason reason) {
DVLOG(1) << __func__;
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void MojoRendererService::OnAudioConfigChange(
@@ -195,6 +195,10 @@ void MojoRendererService::OnDurationChange(base::TimeDelta duration) {
client_->OnDurationChange(duration);
}
+void MojoRendererService::OnRemotePlayStateChange(MediaStatus::State state) {
+ client_->OnRemotePlayStateChange(state);
+}
+
void MojoRendererService::OnVideoOpacityChange(bool opaque) {
DVLOG(2) << __func__ << "(" << opaque << ")";
client_->OnVideoOpacityChange(opaque);
diff --git a/chromium/media/mojo/services/mojo_renderer_service.h b/chromium/media/mojo/services/mojo_renderer_service.h
index 49a192a55d3..e18812796fc 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.h
+++ b/chromium/media/mojo/services/mojo_renderer_service.h
@@ -86,12 +86,13 @@ class MEDIA_MOJO_EXPORT MojoRendererService : public mojom::Renderer,
void OnEnded() final;
void OnStatisticsUpdate(const PipelineStatistics& stats) final;
void OnBufferingStateChange(BufferingState state) final;
- void OnWaitingForDecryptionKey() final;
+ void OnWaiting(WaitingReason reason) final;
void OnAudioConfigChange(const AudioDecoderConfig& config) final;
void OnVideoConfigChange(const VideoDecoderConfig& config) final;
void OnVideoNaturalSizeChange(const gfx::Size& size) final;
void OnVideoOpacityChange(bool opaque) final;
void OnDurationChange(base::TimeDelta duration) final;
+ void OnRemotePlayStateChange(MediaStatus::State state) final;
// Called when the MediaResourceShim is ready to go (has a config,
// pipe handle, etc) and can be handed off to a renderer for use.
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.cc b/chromium/media/mojo/services/mojo_video_decoder_service.cc
index e7a115931fb..4ec54f1a0fb 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.cc
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.cc
@@ -207,13 +207,12 @@ void MojoVideoDecoderService::Initialize(const VideoDecoderConfig& config,
DCHECK(cdm_context);
}
+ using Self = MojoVideoDecoderService;
decoder_->Initialize(
config, low_delay, cdm_context,
- base::BindRepeating(&MojoVideoDecoderService::OnDecoderInitialized,
- weak_this_),
- base::BindRepeating(&MojoVideoDecoderService::OnDecoderOutput,
- weak_this_),
- base::NullCallback());
+ base::BindRepeating(&Self::OnDecoderInitialized, weak_this_),
+ base::BindRepeating(&Self::OnDecoderOutput, weak_this_),
+ base::BindRepeating(&Self::OnDecoderWaiting, weak_this_));
}
void MojoVideoDecoderService::Decode(mojom::DecoderBufferPtr buffer,
@@ -358,6 +357,14 @@ void MojoVideoDecoderService::OnDecoderOutput(
std::move(release_token));
}
+void MojoVideoDecoderService::OnDecoderWaiting(WaitingReason reason) {
+ DVLOG(3) << __func__;
+ DCHECK(client_);
+ TRACE_EVENT1("media", "MojoVideoDecoderService::OnDecoderWaiting", "reason",
+ static_cast<int>(reason));
+ client_->OnWaiting(reason);
+}
+
void MojoVideoDecoderService::OnOverlayInfoChanged(
const OverlayInfo& overlay_info) {
DVLOG(2) << __func__;
diff --git a/chromium/media/mojo/services/mojo_video_decoder_service.h b/chromium/media/mojo/services/mojo_video_decoder_service.h
index 72f52386504..bb33a0fd936 100644
--- a/chromium/media/mojo/services/mojo_video_decoder_service.h
+++ b/chromium/media/mojo/services/mojo_video_decoder_service.h
@@ -76,6 +76,8 @@ class MEDIA_MOJO_EXPORT MojoVideoDecoderService final
void OnDecoderReset();
void OnDecoderOutput(const scoped_refptr<VideoFrame>& frame);
+ void OnDecoderWaiting(WaitingReason reason);
+
void OnDecoderRequestedOverlayInfo(
bool restart_for_transitions,
const ProvideOverlayInfoCB& provide_overlay_info_cb);
diff --git a/chromium/media/mojo/services/video_decode_perf_history.cc b/chromium/media/mojo/services/video_decode_perf_history.cc
index d99fcd6e73b..1863430018e 100644
--- a/chromium/media/mojo/services/video_decode_perf_history.cc
+++ b/chromium/media/mojo/services/video_decode_perf_history.cc
@@ -13,6 +13,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_switches.h"
#include "media/base/video_codecs.h"
+#include "media/capabilities/learning_helper.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
#include "services/metrics/public/cpp/ukm_builders.h"
#include "services/metrics/public/cpp/ukm_recorder.h"
@@ -42,6 +43,11 @@ VideoDecodePerfHistory::VideoDecodePerfHistory(
weak_ptr_factory_(this) {
DVLOG(2) << __func__;
DCHECK(db_);
+
+ // If the local learning experiment is enabled, then also create
+ // |learning_helper_| to send data to it.
+ if (base::FeatureList::IsEnabled(kMediaLearningExperiment))
+ learning_helper_ = std::make_unique<LearningHelper>();
}
VideoDecodePerfHistory::~VideoDecodePerfHistory() {
@@ -233,6 +239,9 @@ void VideoDecodePerfHistory::SavePerfRecord(ukm::SourceId source_id,
targets.frames_decoded, targets.frames_dropped,
targets.frames_power_efficient);
+ if (learning_helper_)
+ learning_helper_->AppendStats(video_key, new_stats);
+
// Get past perf info and report UKM metrics before saving this record.
db_->GetDecodeStats(
video_key,
@@ -341,6 +350,11 @@ void VideoDecodePerfHistory::ClearHistory(base::OnceClosure clear_done_cb) {
DVLOG(2) << __func__;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // If we have a learning helper, then replace it. This will erase any data
+ // that it currently has.
+ if (learning_helper_)
+ learning_helper_ = std::make_unique<LearningHelper>();
+
if (db_init_status_ == FAILED) {
DVLOG(3) << __func__ << " Can't clear history - No DB!";
std::move(clear_done_cb).Run();
diff --git a/chromium/media/mojo/services/video_decode_perf_history.h b/chromium/media/mojo/services/video_decode_perf_history.h
index bed28bb99f9..2af7c96f2ff 100644
--- a/chromium/media/mojo/services/video_decode_perf_history.h
+++ b/chromium/media/mojo/services/video_decode_perf_history.h
@@ -24,6 +24,8 @@
namespace media {
+class LearningHelper;
+
// This class saves and retrieves video decode performance statistics on behalf
// of the MediaCapabilities API. It also helps to grade the accuracy of the API
// by comparing its history-based assessment of smoothness/power-efficiency to
@@ -180,6 +182,9 @@ class MEDIA_MOJO_EXPORT VideoDecodePerfHistory
// service.
mojo::BindingSet<mojom::VideoDecodePerfHistory> bindings_;
+ // Optional helper for local learning.
+ std::unique_ptr<LearningHelper> learning_helper_;
+
// Ensures all access to class members come on the same sequence.
SEQUENCE_CHECKER(sequence_checker_);
diff --git a/chromium/media/muxers/webm_muxer_fuzzertest.cc b/chromium/media/muxers/webm_muxer_fuzzertest.cc
index d92e7862cbc..eff598f5ced 100644
--- a/chromium/media/muxers/webm_muxer_fuzzertest.cc
+++ b/chromium/media/muxers/webm_muxer_fuzzertest.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/stl_util.h"
#include "base/strings/string_piece.h"
#include "media/base/audio_parameters.h"
#include "media/base/video_frame.h"
@@ -54,9 +55,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
for (const auto& input_type : kVideoAudioInputTypes) {
const auto video_codec = static_cast<VideoCodec>(
- kSupportedVideoCodecs[rng() % arraysize(kSupportedVideoCodecs)]);
+ kSupportedVideoCodecs[rng() % base::size(kSupportedVideoCodecs)]);
const auto audio_codec = static_cast<AudioCodec>(
- kSupportedAudioCodecs[rng() % arraysize(kSupportedAudioCodecs)]);
+ kSupportedAudioCodecs[rng() % base::size(kSupportedAudioCodecs)]);
WebmMuxer muxer(video_codec, audio_codec, input_type.has_video,
input_type.has_audio, base::Bind(&OnWriteCallback));
base::RunLoop run_loop;
@@ -83,7 +84,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
const ChannelLayout layout = rng() % 2 ? media::CHANNEL_LAYOUT_STEREO
: media::CHANNEL_LAYOUT_MONO;
const int sample_rate =
- kSampleRatesInKHz[rng() % arraysize(kSampleRatesInKHz)];
+ kSampleRatesInKHz[rng() % base::size(kSampleRatesInKHz)];
const AudioParameters params(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY, layout, sample_rate,
diff --git a/chromium/media/remoting/courier_renderer.cc b/chromium/media/remoting/courier_renderer.cc
index 1c88c0ea1dd..7c4a4be8412 100644
--- a/chromium/media/remoting/courier_renderer.cc
+++ b/chromium/media/remoting/courier_renderer.cc
@@ -21,6 +21,7 @@
#include "media/base/media_resource.h"
#include "media/base/renderer_client.h"
#include "media/base/video_renderer_sink.h"
+#include "media/base/waiting.h"
#include "media/remoting/demuxer_stream_adapter.h"
#include "media/remoting/proto_enum_utils.h"
#include "media/remoting/proto_utils.h"
@@ -421,7 +422,7 @@ void CourierRenderer::OnReceivedRpc(std::unique_ptr<pb::RpcMessage> message) {
break;
case pb::RpcMessage::RPC_RC_ONWAITINGFORDECRYPTIONKEY:
VLOG(2) << __func__ << ": Received RPC_RC_ONWAITINGFORDECRYPTIONKEY.";
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(WaitingReason::kNoDecryptionKey);
break;
case pb::RpcMessage::RPC_RC_ONDURATIONCHANGE:
OnDurationChange(std::move(message));
diff --git a/chromium/media/remoting/courier_renderer_unittest.cc b/chromium/media/remoting/courier_renderer_unittest.cc
index a11a1dc4ae9..1577138d2b5 100644
--- a/chromium/media/remoting/courier_renderer_unittest.cc
+++ b/chromium/media/remoting/courier_renderer_unittest.cc
@@ -88,10 +88,11 @@ class RendererClientImpl final : public RendererClient {
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState state));
MOCK_METHOD1(OnAudioConfigChange, void(const AudioDecoderConfig& config));
MOCK_METHOD1(OnVideoConfigChange, void(const VideoDecoderConfig& config));
- void OnWaitingForDecryptionKey() override {}
+ void OnWaiting(WaitingReason reason) override {}
MOCK_METHOD1(OnVideoNaturalSizeChange, void(const gfx::Size& size));
MOCK_METHOD1(OnVideoOpacityChange, void(bool opaque));
MOCK_METHOD1(OnDurationChange, void(base::TimeDelta duration));
+ MOCK_METHOD1(OnRemotePlayStateChange, void(MediaStatus::State state));
void DelegateOnStatisticsUpdate(const PipelineStatistics& stats) {
stats_ = stats;
diff --git a/chromium/media/remoting/media_remoting_rpc.proto b/chromium/media/remoting/media_remoting_rpc.proto
index 9bf902d2ba2..d51b2b0d759 100644
--- a/chromium/media/remoting/media_remoting_rpc.proto
+++ b/chromium/media/remoting/media_remoting_rpc.proto
@@ -223,6 +223,7 @@ message VideoDecoderConfig {
PIXEL_FORMAT_Y16 = 26;
PIXEL_FORMAT_ABGR = 27;
PIXEL_FORMAT_XBGR = 28;
+ PIXEL_FORMAT_P016LE = 29;
};
// Proto version of media::ColorSpace.
diff --git a/chromium/media/remoting/proto_enum_utils.cc b/chromium/media/remoting/proto_enum_utils.cc
index 044a9f55670..9f41d7f992c 100644
--- a/chromium/media/remoting/proto_enum_utils.cc
+++ b/chromium/media/remoting/proto_enum_utils.cc
@@ -365,6 +365,7 @@ base::Optional<VideoPixelFormat> ToMediaVideoPixelFormat(
CASE_RETURN_OTHER(PIXEL_FORMAT_Y16);
CASE_RETURN_OTHER(PIXEL_FORMAT_ABGR);
CASE_RETURN_OTHER(PIXEL_FORMAT_XBGR);
+ CASE_RETURN_OTHER(PIXEL_FORMAT_P016LE);
}
return base::nullopt; // Not a 'default' to ensure compile-time checks.
}
@@ -402,6 +403,7 @@ base::Optional<pb::VideoDecoderConfig::Format> ToProtoVideoDecoderConfigFormat(
CASE_RETURN_OTHER(PIXEL_FORMAT_Y16);
CASE_RETURN_OTHER(PIXEL_FORMAT_ABGR);
CASE_RETURN_OTHER(PIXEL_FORMAT_XBGR);
+ CASE_RETURN_OTHER(PIXEL_FORMAT_P016LE);
}
return base::nullopt; // Not a 'default' to ensure compile-time checks.
}
diff --git a/chromium/media/remoting/proto_utils.cc b/chromium/media/remoting/proto_utils.cc
index 5b63745e358..ae23fe79b1a 100644
--- a/chromium/media/remoting/proto_utils.cc
+++ b/chromium/media/remoting/proto_utils.cc
@@ -184,11 +184,10 @@ scoped_refptr<DecoderBuffer> ByteArrayToDecoderBuffer(const uint8_t* data,
pb::DecoderBuffer segment;
uint32_t buffer_size = 0;
if (reader.ReadU8(&payload_version) && payload_version == 0 &&
- reader.ReadU16(&proto_size) &&
- static_cast<int>(proto_size) < reader.remaining() &&
+ reader.ReadU16(&proto_size) && proto_size < reader.remaining() &&
segment.ParseFromArray(reader.ptr(), proto_size) &&
reader.Skip(proto_size) && reader.ReadU32(&buffer_size) &&
- static_cast<int64_t>(buffer_size) <= reader.remaining()) {
+ buffer_size <= reader.remaining()) {
// Deserialize proto buffer. It passes the pre allocated DecoderBuffer into
// the function because the proto buffer may overwrite DecoderBuffer since
// it may be EOS buffer.
diff --git a/chromium/media/remoting/receiver.cc b/chromium/media/remoting/receiver.cc
index ace73dc7c6b..6d6134eb9dc 100644
--- a/chromium/media/remoting/receiver.cc
+++ b/chromium/media/remoting/receiver.cc
@@ -264,7 +264,8 @@ void Receiver::OnBufferingStateChange(BufferingState state) {
rpc_broker_->SendMessageToRemote(std::move(rpc));
}
-void Receiver::OnWaitingForDecryptionKey() {
+// TODO: Passes |reason| over.
+void Receiver::OnWaiting(WaitingReason reason) {
DVLOG(3) << __func__ << ": Issues RPC_RC_ONWAITINGFORDECRYPTIONKEY message.";
std::unique_ptr<pb::RpcMessage> rpc(new pb::RpcMessage());
rpc->set_handle(remote_handle_);
@@ -325,5 +326,10 @@ void Receiver::OnDurationChange(base::TimeDelta duration) {
rpc_broker_->SendMessageToRemote(std::move(rpc));
}
+void Receiver::OnRemotePlayStateChange(MediaStatus::State state) {
+ // Only used with the FlingingRenderer.
+ NOTREACHED();
+}
+
} // namespace remoting
} // namespace media
diff --git a/chromium/media/remoting/receiver.h b/chromium/media/remoting/receiver.h
index 0abd1e586b6..ca44ab994ca 100644
--- a/chromium/media/remoting/receiver.h
+++ b/chromium/media/remoting/receiver.h
@@ -35,12 +35,13 @@ class Receiver final : public RendererClient {
void OnEnded() override;
void OnStatisticsUpdate(const PipelineStatistics& stats) override;
void OnBufferingStateChange(BufferingState state) override;
- void OnWaitingForDecryptionKey() override;
+ void OnWaiting(WaitingReason reason) override;
void OnAudioConfigChange(const AudioDecoderConfig& config) override;
void OnVideoConfigChange(const VideoDecoderConfig& config) override;
void OnVideoNaturalSizeChange(const gfx::Size& size) override;
void OnVideoOpacityChange(bool opaque) override;
void OnDurationChange(base::TimeDelta duration) override;
+ void OnRemotePlayStateChange(MediaStatus::State state) override;
void OnReceivedRpc(std::unique_ptr<pb::RpcMessage> message);
void OnReceivedBuffer(DemuxerStream::Type type,
diff --git a/chromium/media/remoting/renderer_controller.cc b/chromium/media/remoting/renderer_controller.cc
index 62ed9e93f75..025fc56f8d3 100644
--- a/chromium/media/remoting/renderer_controller.cc
+++ b/chromium/media/remoting/renderer_controller.cc
@@ -117,12 +117,7 @@ RendererController::RendererController(
RendererController::~RendererController() {
DCHECK(thread_checker_.CalledOnValidThread());
-
- CancelDelayedStart();
- if (remote_rendering_started_) {
- metrics_recorder_.WillStopSession(MEDIA_ELEMENT_DESTROYED);
- remoter_->Stop(mojom::RemotingStopReason::UNEXPECTED_FAILURE);
- }
+ SetClient(nullptr);
}
void RendererController::OnSinkAvailable(
@@ -150,9 +145,8 @@ void RendererController::OnStarted() {
DCHECK(thread_checker_.CalledOnValidThread());
VLOG(1) << "Remoting started successively.";
- if (remote_rendering_started_) {
+ if (remote_rendering_started_ && client_) {
metrics_recorder_.DidStartSession();
- DCHECK(client_);
client_->SwitchToRemoteRenderer(sink_metadata_.friendly_name);
}
}
@@ -305,9 +299,6 @@ void RendererController::OnDataSourceInitialized(
}
void RendererController::UpdateRemotePlaybackAvailabilityMonitoringState() {
- if (!client_)
- return;
-
// Currently RemotePlayback-initated media remoting only supports URL flinging
// thus the source is supported when the URL is either http or https, video and
// audio codecs are supported by the remote playback device; HLS is playable by
@@ -327,7 +318,8 @@ void RendererController::UpdateRemotePlaybackAvailabilityMonitoringState() {
url_after_redirects_.SchemeIs("https")) &&
is_media_supported;
- client_->UpdateRemotePlaybackCompatibility(is_source_supported);
+ if (client_)
+ client_->UpdateRemotePlaybackCompatibility(is_source_supported);
}
bool RendererController::IsVideoCodecSupported() const {
@@ -408,10 +400,8 @@ void RendererController::OnPaused() {
bool RendererController::CanBeRemoting() const {
DCHECK(thread_checker_.CalledOnValidThread());
- if (!client_) {
- DCHECK(!remote_rendering_started_);
+ if (!client_)
return false; // No way to switch to the remoting renderer.
- }
if (permanently_disable_remoting_)
return false;
@@ -448,10 +438,9 @@ void RendererController::UpdateAndMaybeSwitch(StartTrigger start_trigger,
(is_dominant_content_ && !encountered_renderer_fatal_error_);
if ((remote_rendering_started_ ||
- delayed_start_stability_timer_.IsRunning()) == should_be_remoting)
+ delayed_start_stability_timer_.IsRunning()) == should_be_remoting) {
return;
-
- DCHECK(client_);
+ }
// Only switch to remoting when media is playing. Since the renderer is
// created when video starts loading/playing, receiver will display a black
@@ -470,7 +459,8 @@ void RendererController::UpdateAndMaybeSwitch(StartTrigger start_trigger,
remote_rendering_started_ = false;
DCHECK_NE(UNKNOWN_STOP_TRIGGER, stop_trigger);
metrics_recorder_.WillStopSession(stop_trigger);
- client_->SwitchToLocalRenderer(GetSwitchReason(stop_trigger));
+ if (client_)
+ client_->SwitchToLocalRenderer(GetSwitchReason(stop_trigger));
VLOG(2) << "Request to stop remoting: stop_trigger=" << stop_trigger;
remoter_->Stop(mojom::RemotingStopReason::LOCAL_PLAYBACK);
}
@@ -480,6 +470,8 @@ void RendererController::WaitForStabilityBeforeStart(
StartTrigger start_trigger) {
DCHECK(!delayed_start_stability_timer_.IsRunning());
DCHECK(!remote_rendering_started_);
+ DCHECK(client_);
+
delayed_start_stability_timer_.Start(
FROM_HERE, kDelayedStart,
base::BindRepeating(&RendererController::OnDelayedStartTimerFired,
@@ -497,6 +489,7 @@ void RendererController::OnDelayedStartTimerFired(
base::TimeTicks delayed_start_time) {
DCHECK(is_dominant_content_);
DCHECK(!remote_rendering_started_);
+ DCHECK(client_); // This task is canceled otherwise.
base::TimeDelta elapsed = clock_->NowTicks() - delayed_start_time;
DCHECK(!elapsed.is_zero());
@@ -516,7 +509,6 @@ void RendererController::OnDelayedStartTimerFired(
}
}
- DCHECK(client_);
remote_rendering_started_ = true;
DCHECK_NE(UNKNOWN_START_TRIGGER, start_trigger);
metrics_recorder_.WillStartSession(start_trigger);
@@ -539,10 +531,18 @@ void RendererController::OnRendererFatalError(StopTrigger stop_trigger) {
void RendererController::SetClient(MediaObserverClient* client) {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(client);
- DCHECK(!client_);
client_ = client;
+ if (!client_) {
+ CancelDelayedStart();
+ if (remote_rendering_started_) {
+ metrics_recorder_.WillStopSession(MEDIA_ELEMENT_DESTROYED);
+ remoter_->Stop(mojom::RemotingStopReason::UNEXPECTED_FAILURE);
+ remote_rendering_started_ = false;
+ }
+ return;
+ }
+
client_->ActivateViewportIntersectionMonitoring(CanBeRemoting());
}
diff --git a/chromium/media/remoting/renderer_controller_unittest.cc b/chromium/media/remoting/renderer_controller_unittest.cc
index 2628b0d5b27..950719538ce 100644
--- a/chromium/media/remoting/renderer_controller_unittest.cc
+++ b/chromium/media/remoting/renderer_controller_unittest.cc
@@ -396,5 +396,15 @@ TEST_F(RendererControllerTest, StartFailed) {
ExpectInLocalRendering();
}
+TEST_F(RendererControllerTest, SetClientNullptr) {
+ controller_ = FakeRemoterFactory::CreateController(true);
+ InitializeControllerAndBecomeDominant(DefaultMetadata(VideoCodec::kCodecVP8),
+ GetDefaultSinkMetadata(true));
+ RunUntilIdle();
+ controller_->SetClient(nullptr);
+ RunUntilIdle();
+ ExpectInLocalRendering();
+}
+
} // namespace remoting
} // namespace media
diff --git a/chromium/media/renderers/BUILD.gn b/chromium/media/renderers/BUILD.gn
index 56af800cd2d..8c8bd609db3 100644
--- a/chromium/media/renderers/BUILD.gn
+++ b/chromium/media/renderers/BUILD.gn
@@ -13,6 +13,10 @@ source_set("renderers") {
sources = [
"audio_renderer_impl.cc",
"audio_renderer_impl.h",
+ "decrypting_renderer.cc",
+ "decrypting_renderer.h",
+ "decrypting_renderer_factory.cc",
+ "decrypting_renderer_factory.h",
"default_decoder_factory.cc",
"default_decoder_factory.h",
"default_renderer_factory.cc",
@@ -64,6 +68,7 @@ source_set("unit_tests") {
testonly = true
sources = [
"audio_renderer_impl_unittest.cc",
+ "decrypting_renderer_unittest.cc",
"paint_canvas_video_renderer_unittest.cc",
"renderer_impl_unittest.cc",
"video_renderer_impl_unittest.cc",
diff --git a/chromium/media/renderers/audio_renderer_impl.cc b/chromium/media/renderers/audio_renderer_impl.cc
index 287361281bd..167f91818d9 100644
--- a/chromium/media/renderers/audio_renderer_impl.cc
+++ b/chromium/media/renderers/audio_renderer_impl.cc
@@ -580,7 +580,7 @@ void AudioRendererImpl::OnDeviceInfoReceived(
cdm_context,
base::BindRepeating(&AudioRendererImpl::OnStatisticsUpdate,
weak_factory_.GetWeakPtr()),
- base::BindRepeating(&AudioRendererImpl::OnWaitingForDecryptionKey,
+ base::BindRepeating(&AudioRendererImpl::OnWaiting,
weak_factory_.GetWeakPtr()));
}
@@ -667,9 +667,9 @@ void AudioRendererImpl::OnBufferingStateChange(BufferingState state) {
client_->OnBufferingStateChange(state);
}
-void AudioRendererImpl::OnWaitingForDecryptionKey() {
+void AudioRendererImpl::OnWaiting(WaitingReason reason) {
DCHECK(task_runner_->BelongsToCurrentThread());
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void AudioRendererImpl::SetVolume(float volume) {
diff --git a/chromium/media/renderers/audio_renderer_impl.h b/chromium/media/renderers/audio_renderer_impl.h
index ddd7db18c39..8a522bb2a8a 100644
--- a/chromium/media/renderers/audio_renderer_impl.h
+++ b/chromium/media/renderers/audio_renderer_impl.h
@@ -196,7 +196,7 @@ class MEDIA_EXPORT AudioRendererImpl
void OnPlaybackEnded();
void OnStatisticsUpdate(const PipelineStatistics& stats);
void OnBufferingStateChange(BufferingState state);
- void OnWaitingForDecryptionKey();
+ void OnWaiting(WaitingReason reason);
// Generally called by the AudioDecoderStream when a config change occurs. May
// also be called internally with an empty config to reset config-based state.
diff --git a/chromium/media/renderers/audio_renderer_impl_unittest.cc b/chromium/media/renderers/audio_renderer_impl_unittest.cc
index ad4f11e63f8..e637a84278c 100644
--- a/chromium/media/renderers/audio_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/audio_renderer_impl_unittest.cc
@@ -57,12 +57,8 @@ class MockMediaClient : public MediaClient {
std::vector<std::unique_ptr<KeySystemProperties>>* key_systems) override {
}
bool IsKeySystemsUpdateNeeded() override { return false; }
- bool IsSupportedAudioConfig(const AudioConfig& config) override {
- return true;
- }
- bool IsSupportedVideoConfig(const VideoConfig& config) override {
- return true;
- }
+ bool IsSupportedAudioType(const AudioType& type) override { return true; }
+ bool IsSupportedVideoType(const VideoType& type) override { return true; }
bool IsSupportedBitstreamAudioCodec(AudioCodec codec) override {
return true;
}
@@ -192,16 +188,17 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
last_statistics_.audio_memory_usage += stats.audio_memory_usage;
}
MOCK_METHOD1(OnBufferingStateChange, void(BufferingState));
- MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
+ MOCK_METHOD1(OnWaiting, void(WaitingReason));
MOCK_METHOD1(OnAudioConfigChange, void(const AudioDecoderConfig&));
MOCK_METHOD1(OnVideoConfigChange, void(const VideoDecoderConfig&));
MOCK_METHOD1(OnVideoNaturalSizeChange, void(const gfx::Size&));
MOCK_METHOD1(OnVideoOpacityChange, void(bool));
MOCK_METHOD1(OnDurationChange, void(base::TimeDelta));
+ MOCK_METHOD1(OnRemotePlayStateChange, void(MediaStatus::State state));
void InitializeRenderer(DemuxerStream* demuxer_stream,
const PipelineStatusCB& pipeline_status_cb) {
- EXPECT_CALL(*this, OnWaitingForDecryptionKey()).Times(0);
+ EXPECT_CALL(*this, OnWaiting(_)).Times(0);
EXPECT_CALL(*this, OnVideoNaturalSizeChange(_)).Times(0);
EXPECT_CALL(*this, OnVideoOpacityChange(_)).Times(0);
EXPECT_CALL(*this, OnVideoConfigChange(_)).Times(0);
@@ -511,7 +508,7 @@ class AudioRendererImplTest : public ::testing::Test, public RendererClient {
AudioParameters hardware_params_;
base::test::ScopedTaskEnvironment task_environment_;
const scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
std::unique_ptr<AudioRendererImpl> renderer_;
scoped_refptr<FakeAudioRendererSink> sink_;
base::SimpleTestTickClock tick_clock_;
diff --git a/chromium/media/renderers/decrypting_renderer.cc b/chromium/media/renderers/decrypting_renderer.cc
new file mode 100644
index 00000000000..509cd939ea6
--- /dev/null
+++ b/chromium/media/renderers/decrypting_renderer.cc
@@ -0,0 +1,197 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/decrypting_renderer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/media_log.h"
+#include "media/base/media_resource.h"
+#include "media/base/renderer_client.h"
+#include "media/filters/decrypting_demuxer_stream.h"
+#include "media/filters/decrypting_media_resource.h"
+
+namespace media {
+
+DecryptingRenderer::DecryptingRenderer(
+ std::unique_ptr<Renderer> renderer,
+ MediaLog* media_log,
+ const scoped_refptr<base::SingleThreadTaskRunner> media_task_runner)
+ : renderer_(std::move(renderer)),
+ media_log_(media_log),
+ media_task_runner_(media_task_runner),
+ client_(nullptr),
+ media_resource_(nullptr),
+ decrypting_media_resource_(nullptr),
+ weak_factory_(this) {
+ DCHECK(renderer_);
+}
+
+DecryptingRenderer::~DecryptingRenderer() {}
+
+// The behavior of Initialize():
+//
+// Streams CdmContext Action
+// ---------------------------------------------------------------------
+// Clear nullptr InitializeRenderer()
+// Clear AesDecryptor CreateAndInitializeDecryptingMediaResource()
+// Clear Other InitializeRenderer()
+// Encrypted nullptr Wait
+// Encrypted AesDecryptor CreateAndInitializeDecryptingMediaResource()
+// Encrypted Other InitializeRenderer()
+void DecryptingRenderer::Initialize(MediaResource* media_resource,
+ RendererClient* client,
+ const PipelineStatusCB& init_cb) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DCHECK(media_resource);
+ DCHECK(client);
+
+ // Using |this| with a MediaResource::Type::URL will result in a crash.
+ DCHECK_EQ(media_resource->GetType(), MediaResource::Type::STREAM);
+
+ media_resource_ = media_resource;
+ client_ = client;
+ init_cb_ = std::move(init_cb);
+
+ bool has_encrypted_stream = HasEncryptedStream();
+
+ // If we do not have a valid |cdm_context_| and there are encrypted streams we
+ // need to wait.
+ if (!cdm_context_ && has_encrypted_stream) {
+ waiting_for_cdm_ = true;
+ return;
+ }
+
+ if (cdm_context_ && cdm_context_->GetDecryptor() &&
+ cdm_context_->GetDecryptor()->CanAlwaysDecrypt()) {
+ CreateAndInitializeDecryptingMediaResource();
+ return;
+ }
+
+ InitializeRenderer(true);
+}
+
+void DecryptingRenderer::SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ if (cdm_context_) {
+ DVLOG(1) << "Switching CDM not supported.";
+ cdm_attached_cb.Run(false);
+ return;
+ }
+
+ cdm_context_ = cdm_context;
+
+ // If we are using an AesDecryptor all decryption will be handled by the
+ // DecryptingMediaResource instead of the renderer implementation.
+ if (cdm_context_->GetDecryptor() &&
+ cdm_context_->GetDecryptor()->CanAlwaysDecrypt()) {
+ // If Initialize() was invoked prior to this function then
+ // |waiting_for_cdm_| will be true (if we reached this branch). In this
+ // scenario we want to initialize the DecryptingMediaResource here.
+ if (waiting_for_cdm_)
+ CreateAndInitializeDecryptingMediaResource();
+ cdm_attached_cb.Run(true);
+ return;
+ }
+
+ renderer_->SetCdm(cdm_context_, cdm_attached_cb);
+
+ // We only want to initialize the renderer if we were waiting for the
+ // CdmContext, otherwise it will already have been initialized.
+ if (waiting_for_cdm_)
+ InitializeRenderer(true);
+}
+
+void DecryptingRenderer::Flush(const base::Closure& flush_cb) {
+ renderer_->Flush(flush_cb);
+}
+
+void DecryptingRenderer::StartPlayingFrom(base::TimeDelta time) {
+ renderer_->StartPlayingFrom(time);
+}
+
+void DecryptingRenderer::SetPlaybackRate(double playback_rate) {
+ renderer_->SetPlaybackRate(playback_rate);
+}
+
+void DecryptingRenderer::SetVolume(float volume) {
+ renderer_->SetVolume(volume);
+}
+
+base::TimeDelta DecryptingRenderer::GetMediaTime() {
+ return renderer_->GetMediaTime();
+}
+
+void DecryptingRenderer::OnSelectedVideoTracksChanged(
+ const std::vector<DemuxerStream*>& enabled_tracks,
+ base::OnceClosure change_completed_cb) {
+ renderer_->OnSelectedVideoTracksChanged(enabled_tracks,
+ std::move(change_completed_cb));
+}
+
+void DecryptingRenderer::OnEnabledAudioTracksChanged(
+ const std::vector<DemuxerStream*>& enabled_tracks,
+ base::OnceClosure change_completed_cb) {
+ renderer_->OnEnabledAudioTracksChanged(enabled_tracks,
+ std::move(change_completed_cb));
+}
+
+void DecryptingRenderer::CreateAndInitializeDecryptingMediaResource() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DCHECK(init_cb_);
+
+ decrypting_media_resource_ = std::make_unique<DecryptingMediaResource>(
+ media_resource_, cdm_context_, media_log_, media_task_runner_);
+ decrypting_media_resource_->Initialize(
+ base::BindOnce(&DecryptingRenderer::InitializeRenderer,
+ weak_factory_.GetWeakPtr()),
+ base::BindRepeating(&DecryptingRenderer::OnWaiting,
+ weak_factory_.GetWeakPtr()));
+}
+
+void DecryptingRenderer::InitializeRenderer(bool success) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ if (!success) {
+ std::move(init_cb_).Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
+ return;
+ }
+
+ // |decrypting_media_resource_| when |cdm_context_| is null and there are no
+ // encrypted streams.
+ MediaResource* const maybe_decrypting_media_resource =
+ decrypting_media_resource_ ? decrypting_media_resource_.get()
+ : media_resource_;
+ renderer_->Initialize(maybe_decrypting_media_resource, client_,
+ std::move(init_cb_));
+}
+
+bool DecryptingRenderer::HasEncryptedStream() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ for (auto* stream : media_resource_->GetAllStreams()) {
+ if ((stream->type() == DemuxerStream::AUDIO &&
+ stream->audio_decoder_config().is_encrypted()) ||
+ (stream->type() == DemuxerStream::VIDEO &&
+ stream->video_decoder_config().is_encrypted())) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool DecryptingRenderer::HasDecryptingMediaResourceForTesting() const {
+ return decrypting_media_resource_ != nullptr;
+}
+
+void DecryptingRenderer::OnWaiting(WaitingReason reason) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ client_->OnWaiting(reason);
+}
+
+} // namespace media
diff --git a/chromium/media/renderers/decrypting_renderer.h b/chromium/media/renderers/decrypting_renderer.h
new file mode 100644
index 00000000000..582eaf93c8b
--- /dev/null
+++ b/chromium/media/renderers/decrypting_renderer.h
@@ -0,0 +1,95 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_RENDERERS_DECRYPTING_RENDERER_H_
+#define MEDIA_RENDERERS_DECRYPTING_RENDERER_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/pipeline.h"
+#include "media/base/renderer.h"
+
+namespace media {
+
+class CdmContext;
+class DemuxerStream;
+class MediaLog;
+class MediaResource;
+class DecryptingMediaResource;
+class RendererClient;
+
+// DecryptingRenderer is used as a wrapper around a Renderer
+// implementation that decrypts streams when an AesDecryptor is available. In
+// this case only clear streams are passed on to the internally owned renderer
+// implementation.
+//
+// All methods are pass-through except Initialize() and SetCdm().
+//
+// The caller must guarantee that DecryptingRenderer will never be initialized
+// with a |media_resource| of type MediaResource::Type::URL.
+class MEDIA_EXPORT DecryptingRenderer : public Renderer {
+ public:
+ DecryptingRenderer(
+ std::unique_ptr<Renderer> renderer,
+ MediaLog* media_log,
+ const scoped_refptr<base::SingleThreadTaskRunner> media_task_runner);
+ ~DecryptingRenderer() override;
+
+ // Renderer implementation:
+ void Initialize(MediaResource* media_resource,
+ RendererClient* client,
+ const PipelineStatusCB& init_cb) override;
+ void SetCdm(CdmContext* cdm_context,
+ const CdmAttachedCB& cdm_attached_cb) override;
+
+ void Flush(const base::Closure& flush_cb) override;
+ void StartPlayingFrom(base::TimeDelta time) override;
+ void SetPlaybackRate(double playback_rate) override;
+ void SetVolume(float volume) override;
+ base::TimeDelta GetMediaTime() override;
+ void OnSelectedVideoTracksChanged(
+ const std::vector<DemuxerStream*>& enabled_tracks,
+ base::OnceClosure change_completed_cb) override;
+ void OnEnabledAudioTracksChanged(
+ const std::vector<DemuxerStream*>& enabled_tracks,
+ base::OnceClosure change_completed_cb) override;
+
+ bool HasDecryptingMediaResourceForTesting() const;
+
+ private:
+ friend class DecryptingRendererTest;
+
+ // Cannot be called before Initialize() has been called.
+ void CreateAndInitializeDecryptingMediaResource();
+
+ // Invoked as a callback after |decrypting_media_resource_| has been
+ // initialized.
+ void InitializeRenderer(bool success);
+ bool HasEncryptedStream();
+ void OnWaiting(WaitingReason reason);
+
+ const std::unique_ptr<Renderer> renderer_;
+ MediaLog* const media_log_;
+ const scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
+
+ bool waiting_for_cdm_ = false;
+ CdmContext* cdm_context_ = nullptr;
+ RendererClient* client_;
+ MediaResource* media_resource_;
+ PipelineStatusCB init_cb_;
+
+ std::unique_ptr<DecryptingMediaResource> decrypting_media_resource_;
+
+ base::WeakPtrFactory<DecryptingRenderer> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecryptingRenderer);
+};
+
+} // namespace media
+
+#endif // MEDIA_RENDERERS_DECRYPTING_RENDERER_H_
diff --git a/chromium/media/renderers/decrypting_renderer_factory.cc b/chromium/media/renderers/decrypting_renderer_factory.cc
new file mode 100644
index 00000000000..42171d4e813
--- /dev/null
+++ b/chromium/media/renderers/decrypting_renderer_factory.cc
@@ -0,0 +1,34 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/decrypting_renderer_factory.h"
+
+#include "media/base/media_log.h"
+#include "media/renderers/decrypting_renderer.h"
+
+namespace media {
+
+DecryptingRendererFactory::DecryptingRendererFactory(
+ media::MediaLog* media_log,
+ std::unique_ptr<media::RendererFactory> renderer_factory)
+ : media_log_(media_log), renderer_factory_(std::move(renderer_factory)) {}
+
+DecryptingRendererFactory::~DecryptingRendererFactory() = default;
+
+std::unique_ptr<Renderer> DecryptingRendererFactory::CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink,
+ const RequestOverlayInfoCB& request_overlay_info_cb,
+ const gfx::ColorSpace& target_color_space) {
+ std::unique_ptr<media::Renderer> renderer = renderer_factory_->CreateRenderer(
+ media_task_runner, worker_task_runner, audio_renderer_sink,
+ video_renderer_sink, request_overlay_info_cb, target_color_space);
+
+ return std::make_unique<DecryptingRenderer>(std::move(renderer), media_log_,
+ media_task_runner);
+}
+
+} // namespace media
diff --git a/chromium/media/renderers/decrypting_renderer_factory.h b/chromium/media/renderers/decrypting_renderer_factory.h
new file mode 100644
index 00000000000..3f115c827dc
--- /dev/null
+++ b/chromium/media/renderers/decrypting_renderer_factory.h
@@ -0,0 +1,50 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_RENDERERS_DECRYPTING_RENDERER_FACTORY_H_
+#define MEDIA_RENDERERS_DECRYPTING_RENDERER_FACTORY_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "media/base/media_export.h"
+#include "media/base/renderer_factory.h"
+
+namespace media {
+
+class MediaLog;
+
+// Simple RendererFactory wrapper class. It wraps any Renderer created by the
+// underlying factory, and returns it as a DecryptingRenderer.
+//
+// See DecryptingRenderer for more information.
+//
+// The caller must guarantee that the returned DecryptingRenderer will never
+// be initialized with a |media_resource| of type MediaResource::Type::URL.
+class MEDIA_EXPORT DecryptingRendererFactory : public RendererFactory {
+ public:
+ DecryptingRendererFactory(
+ MediaLog* media_log,
+ std::unique_ptr<media::RendererFactory> renderer_factory);
+ ~DecryptingRendererFactory() final;
+
+ // RendererFactory implementation.
+ std::unique_ptr<Renderer> CreateRenderer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
+ AudioRendererSink* audio_renderer_sink,
+ VideoRendererSink* video_renderer_sink,
+ const RequestOverlayInfoCB& request_overlay_info_cb,
+ const gfx::ColorSpace& target_color_space) final;
+
+ private:
+ MediaLog* media_log_;
+
+ std::unique_ptr<media::RendererFactory> renderer_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecryptingRendererFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_RENDERERS_DECRYPTING_RENDERER_FACTORY_H_
diff --git a/chromium/media/renderers/decrypting_renderer_unittest.cc b/chromium/media/renderers/decrypting_renderer_unittest.cc
new file mode 100644
index 00000000000..69634d89842
--- /dev/null
+++ b/chromium/media/renderers/decrypting_renderer_unittest.cc
@@ -0,0 +1,268 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/renderers/decrypting_renderer.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/mock_callback.h"
+#include "base/test/scoped_task_environment.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/gmock_callback_support.h"
+#include "media/base/media_util.h"
+#include "media/base/mock_filters.h"
+#include "media/base/test_helpers.h"
+#include "media/filters/decrypting_media_resource.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using ::testing::_;
+using ::testing::AnyNumber;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::ReturnPointee;
+using ::testing::StrictMock;
+
+namespace media {
+
+class CdmContext;
+class DemuxerStream;
+class MediaLog;
+
+class DecryptingRendererTest : public testing::Test {
+ public:
+ DecryptingRendererTest() {
+ auto renderer = std::make_unique<StrictMock<MockRenderer>>();
+ renderer_ = renderer.get();
+ decrypting_renderer_ = std::make_unique<DecryptingRenderer>(
+ std::move(renderer), &null_media_log_,
+ scoped_task_environment_.GetMainThreadTaskRunner());
+
+ EXPECT_CALL(cdm_context_, GetDecryptor())
+ .WillRepeatedly(Return(&decryptor_));
+ EXPECT_CALL(decryptor_, CanAlwaysDecrypt())
+ .WillRepeatedly(ReturnPointee(&use_aes_decryptor_));
+ EXPECT_CALL(decryptor_, CancelDecrypt(_)).Times(AnyNumber());
+ EXPECT_CALL(decryptor_, RegisterNewKeyCB(_, _)).Times(AnyNumber());
+ EXPECT_CALL(media_resource_, GetAllStreams())
+ .WillRepeatedly(Invoke(this, &DecryptingRendererTest::GetAllStreams));
+ EXPECT_CALL(media_resource_, GetType())
+ .WillRepeatedly(Return(MediaResource::STREAM));
+ }
+
+ ~DecryptingRendererTest() override {
+ // Ensure that the DecryptingRenderer is destructed before other objects
+ // that it internally references but does not own.
+ decrypting_renderer_.reset();
+ }
+
+ void AddStream(DemuxerStream::Type type, bool encrypted) {
+ streams_.push_back(CreateMockDemuxerStream(type, encrypted));
+ }
+
+ void UseAesDecryptor(bool use_aes_decryptor) {
+ use_aes_decryptor_ = use_aes_decryptor;
+ }
+
+ std::vector<DemuxerStream*> GetAllStreams() {
+ std::vector<DemuxerStream*> streams;
+
+ for (auto& stream : streams_) {
+ streams.push_back(stream.get());
+ }
+
+ return streams;
+ }
+
+ protected:
+ // Invoking InitializeRenderer(false) will cause the initialization of the
+ // DecryptingRenderer to halt and an error will be propagated to the media
+ // pipeline.
+ void InitializeDecryptingRendererWithFalse() {
+ decrypting_renderer_->InitializeRenderer(false);
+ }
+
+ bool use_aes_decryptor_ = false;
+ base::test::ScopedTaskEnvironment scoped_task_environment_;
+ base::MockCallback<CdmAttachedCB> set_cdm_cb_;
+ base::MockCallback<PipelineStatusCB> renderer_init_cb_;
+ NullMediaLog null_media_log_;
+ StrictMock<MockCdmContext> cdm_context_;
+ StrictMock<MockDecryptor> decryptor_;
+ StrictMock<MockMediaResource> media_resource_;
+ StrictMock<MockRendererClient> renderer_client_;
+ StrictMock<MockRenderer>* renderer_;
+ std::unique_ptr<DecryptingRenderer> decrypting_renderer_;
+ std::vector<std::unique_ptr<StrictMock<MockDemuxerStream>>> streams_;
+};
+
+TEST_F(DecryptingRendererTest, ClearStreams_NoCdm) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ false);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, ClearStreams_AesDecryptor) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ false);
+ UseAesDecryptor(true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_TRUE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, ClearStreams_OtherCdm) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ false);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(*renderer_, SetCdm(_, _)).WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedStreams_NoCdm) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedStreams_AesDecryptor) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+ UseAesDecryptor(true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_TRUE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedStreams_OtherCdm) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(*renderer_, SetCdm(_, _)).WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedStreams_AesDecryptor_CdmSetBeforeInit) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+ UseAesDecryptor(true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_TRUE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedStreams_OtherCdm_CdmSetBeforeInit) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ true);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(*renderer_, SetCdm(_, _)).WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, EncryptedAndClearStream_OtherCdm) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+
+ EXPECT_CALL(*renderer_, Initialize(_, _, _))
+ .WillOnce(RunCallback<2>(PIPELINE_OK));
+ EXPECT_CALL(*renderer_, SetCdm(_, _)).WillOnce(RunCallback<1>(true));
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_OK));
+ EXPECT_CALL(set_cdm_cb_, Run(true));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ decrypting_renderer_->SetCdm(&cdm_context_, set_cdm_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ EXPECT_FALSE(decrypting_renderer_->HasDecryptingMediaResourceForTesting());
+}
+
+TEST_F(DecryptingRendererTest, DecryptingMediaResourceInitFails) {
+ AddStream(DemuxerStream::AUDIO, /* encrypted = */ false);
+ AddStream(DemuxerStream::VIDEO, /* encrypted = */ true);
+ UseAesDecryptor(true);
+
+ EXPECT_CALL(renderer_init_cb_, Run(PIPELINE_ERROR_INITIALIZATION_FAILED));
+
+ decrypting_renderer_->Initialize(&media_resource_, &renderer_client_,
+ renderer_init_cb_.Get());
+ scoped_task_environment_.RunUntilIdle();
+
+ // Cause a PIPELINE_ERROR_INITIALIZATION_FAILED error to be passed as a
+ // parameter to the initialization callback.
+ InitializeDecryptingRendererWithFalse();
+}
+
+} // namespace media
diff --git a/chromium/media/renderers/default_decoder_factory.cc b/chromium/media/renderers/default_decoder_factory.cc
index 1229941e4de..b0272277af5 100644
--- a/chromium/media/renderers/default_decoder_factory.cc
+++ b/chromium/media/renderers/default_decoder_factory.cc
@@ -97,14 +97,13 @@ void DefaultDecoderFactory::CreateVideoDecoders(
// factories, require that their message loops are identical.
DCHECK_EQ(gpu_factories->GetTaskRunner(), task_runner);
- if (external_decoder_factory_) {
+ // MojoVideoDecoder replaces any VDA for this platform when it's enabled.
+ if (external_decoder_factory_ &&
+ base::FeatureList::IsEnabled(media::kMojoVideoDecoder)) {
external_decoder_factory_->CreateVideoDecoders(
task_runner, gpu_factories, media_log, request_overlay_info_cb,
target_color_space, video_decoders);
- }
-
- // MojoVideoDecoder replaces any VDA for this platform when it's enabled.
- if (!base::FeatureList::IsEnabled(media::kMojoVideoDecoder)) {
+ } else {
video_decoders->push_back(std::make_unique<GpuVideoDecoder>(
gpu_factories, request_overlay_info_cb, target_color_space,
media_log));
diff --git a/chromium/media/renderers/paint_canvas_video_renderer.cc b/chromium/media/renderers/paint_canvas_video_renderer.cc
index 93de42e1bc7..dc35e298c35 100644
--- a/chromium/media/renderers/paint_canvas_video_renderer.cc
+++ b/chromium/media/renderers/paint_canvas_video_renderer.cc
@@ -342,6 +342,12 @@ class VideoImageGenerator : public cc::PaintImageGenerator {
bool QueryYUVA8(SkYUVASizeInfo* sizeInfo,
SkYUVAIndex indices[SkYUVAIndex::kIndexCount],
SkYUVColorSpace* color_space) const override {
+ // Temporarily disabling this path to avoid creating YUV ImageData in
+ // GpuImageDecodeCache.
+ // TODO(crbug.com/921636): Restore the code below once YUV rendering support
+ // is added for VideoImageGenerator.
+ return false;
+#if 0
if (!media::IsYuvPlanar(frame_->format()) ||
// TODO(rileya): Skia currently doesn't support YUVA conversion. Remove
// this case once it does. As-is we will fall back on the pure-software
@@ -376,6 +382,7 @@ class VideoImageGenerator : public cc::PaintImageGenerator {
indices[SkYUVAIndex::kA_Index] = {-1, SkColorChannel::kR};
return true;
+#endif
}
bool GetYUVA8Planes(const SkYUVASizeInfo& sizeInfo,
@@ -936,6 +943,7 @@ void PaintCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
case PIXEL_FORMAT_MT21:
case PIXEL_FORMAT_ABGR:
case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
case PIXEL_FORMAT_UNKNOWN:
NOTREACHED() << "Only YUV formats and Y16 are supported, got: "
<< media::VideoPixelFormatToString(video_frame->format());
diff --git a/chromium/media/renderers/renderer_impl.cc b/chromium/media/renderers/renderer_impl.cc
index 63e3f74e73c..4f49abbaad9 100644
--- a/chromium/media/renderers/renderer_impl.cc
+++ b/chromium/media/renderers/renderer_impl.cc
@@ -48,8 +48,8 @@ class RendererImpl::RendererClientInternal final : public RendererClient {
void OnBufferingStateChange(BufferingState state) override {
renderer_->OnBufferingStateChange(type_, state);
}
- void OnWaitingForDecryptionKey() override {
- renderer_->OnWaitingForDecryptionKey();
+ void OnWaiting(WaitingReason reason) override {
+ renderer_->OnWaiting(reason);
}
void OnAudioConfigChange(const AudioDecoderConfig& config) override {
renderer_->OnAudioConfigChange(config);
@@ -72,6 +72,10 @@ class RendererImpl::RendererClientInternal final : public RendererClient {
// the DemuxerHost interface.
NOTREACHED();
}
+ void OnRemotePlayStateChange(MediaStatus::State state) override {
+ // Only used with FlingingRenderer.
+ NOTREACHED();
+ }
private:
DemuxerStream::Type type_;
@@ -909,9 +913,9 @@ void RendererImpl::OnError(PipelineStatus error) {
FinishFlush();
}
-void RendererImpl::OnWaitingForDecryptionKey() {
+void RendererImpl::OnWaiting(WaitingReason reason) {
DCHECK(task_runner_->BelongsToCurrentThread());
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void RendererImpl::OnAudioConfigChange(const AudioDecoderConfig& config) {
diff --git a/chromium/media/renderers/renderer_impl.h b/chromium/media/renderers/renderer_impl.h
index 18f63848ccd..1a4d76fa26e 100644
--- a/chromium/media/renderers/renderer_impl.h
+++ b/chromium/media/renderers/renderer_impl.h
@@ -25,6 +25,7 @@
#include "media/base/pipeline_status.h"
#include "media/base/renderer.h"
#include "media/base/video_decoder_config.h"
+#include "media/base/waiting.h"
#include "ui/gfx/geometry/size.h"
namespace base {
@@ -189,7 +190,7 @@ class MEDIA_EXPORT RendererImpl : public Renderer {
// Callback executed when a runtime error happens.
void OnError(PipelineStatus error);
- void OnWaitingForDecryptionKey();
+ void OnWaiting(WaitingReason reason);
void OnVideoNaturalSizeChange(const gfx::Size& size);
void OnAudioConfigChange(const AudioDecoderConfig& config);
void OnVideoConfigChange(const VideoDecoderConfig& config);
diff --git a/chromium/media/renderers/renderer_impl_unittest.cc b/chromium/media/renderers/renderer_impl_unittest.cc
index 5ac6881f1cf..cadf70731ab 100644
--- a/chromium/media/renderers/renderer_impl_unittest.cc
+++ b/chromium/media/renderers/renderer_impl_unittest.cc
@@ -126,7 +126,7 @@ class RendererImplTest : public ::testing::Test {
void InitializeAndExpect(PipelineStatus start_status) {
EXPECT_CALL(callbacks_, OnInitialize(start_status))
.WillOnce(SaveArg<0>(&initialization_status_));
- EXPECT_CALL(callbacks_, OnWaitingForDecryptionKey()).Times(0);
+ EXPECT_CALL(callbacks_, OnWaiting(_)).Times(0);
if (start_status == PIPELINE_OK && audio_stream_) {
EXPECT_CALL(*audio_renderer_, GetTimeSource())
diff --git a/chromium/media/renderers/video_renderer_impl.cc b/chromium/media/renderers/video_renderer_impl.cc
index bb59b11cd2a..2ff838bfebd 100644
--- a/chromium/media/renderers/video_renderer_impl.cc
+++ b/chromium/media/renderers/video_renderer_impl.cc
@@ -261,7 +261,7 @@ void VideoRendererImpl::Initialize(
cdm_context,
base::BindRepeating(&VideoRendererImpl::OnStatisticsUpdate,
weak_factory_.GetWeakPtr()),
- base::BindRepeating(&VideoRendererImpl::OnWaitingForDecryptionKey,
+ base::BindRepeating(&VideoRendererImpl::OnWaiting,
weak_factory_.GetWeakPtr()));
}
@@ -387,9 +387,9 @@ void VideoRendererImpl::OnBufferingStateChange(BufferingState state) {
client_->OnBufferingStateChange(state);
}
-void VideoRendererImpl::OnWaitingForDecryptionKey() {
+void VideoRendererImpl::OnWaiting(WaitingReason reason) {
DCHECK(task_runner_->BelongsToCurrentThread());
- client_->OnWaitingForDecryptionKey();
+ client_->OnWaiting(reason);
}
void VideoRendererImpl::OnConfigChange(const VideoDecoderConfig& config) {
diff --git a/chromium/media/renderers/video_renderer_impl.h b/chromium/media/renderers/video_renderer_impl.h
index 0eb6391fecf..88815e72647 100644
--- a/chromium/media/renderers/video_renderer_impl.h
+++ b/chromium/media/renderers/video_renderer_impl.h
@@ -97,7 +97,7 @@ class MEDIA_EXPORT VideoRendererImpl
void OnPlaybackEnded();
void OnStatisticsUpdate(const PipelineStatistics& stats);
void OnBufferingStateChange(BufferingState state);
- void OnWaitingForDecryptionKey();
+ void OnWaiting(WaitingReason reason);
// Called by the VideoDecoderStream when a config change occurs. Will notify
// RenderClient of the new config.
diff --git a/chromium/media/renderers/video_renderer_impl_unittest.cc b/chromium/media/renderers/video_renderer_impl_unittest.cc
index 27a1ec34f18..d13692ed9ac 100644
--- a/chromium/media/renderers/video_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/video_renderer_impl_unittest.cc
@@ -28,6 +28,7 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
+#include "media/base/media_util.h"
#include "media/base/mock_filters.h"
#include "media/base/null_video_sink.h"
#include "media/base/test_helpers.h"
@@ -133,7 +134,7 @@ class VideoRendererImplTest : public testing::Test {
bool expect_success) {
if (low_delay)
demuxer_stream->set_liveness(DemuxerStream::LIVENESS_LIVE);
- EXPECT_CALL(mock_cb_, OnWaitingForDecryptionKey()).Times(0);
+ EXPECT_CALL(mock_cb_, OnWaiting(_)).Times(0);
EXPECT_CALL(mock_cb_, OnAudioConfigChange(_)).Times(0);
EXPECT_CALL(mock_cb_, OnStatisticsUpdate(_)).Times(AnyNumber());
renderer_->Initialize(
@@ -445,7 +446,7 @@ class VideoRendererImplTest : public testing::Test {
protected:
base::test::ScopedTaskEnvironment task_environment_;
- MediaLog media_log_;
+ NullMediaLog media_log_;
// Fixture members.
std::unique_ptr<VideoRendererImpl> renderer_;
diff --git a/chromium/media/renderers/video_resource_updater.cc b/chromium/media/renderers/video_resource_updater.cc
index c07333dcc7d..25dc5fdf2e3 100644
--- a/chromium/media/renderers/video_resource_updater.cc
+++ b/chromium/media/renderers/video_resource_updater.cc
@@ -25,7 +25,6 @@
#include "components/viz/client/client_resource_provider.h"
#include "components/viz/client/shared_bitmap_reporter.h"
#include "components/viz/common/gpu/context_provider.h"
-#include "components/viz/common/gpu/texture_allocation.h"
#include "components/viz/common/quads/render_pass.h"
#include "components/viz/common/quads/stream_video_draw_quad.h"
#include "components/viz/common/quads/texture_draw_quad.h"
@@ -35,6 +34,9 @@
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
+#include "gpu/command_buffer/client/shared_image_interface.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "media/base/video_frame.h"
#include "media/renderers/paint_canvas_video_renderer.h"
#include "media/video/half_float_maker.h"
@@ -114,6 +116,7 @@ VideoFrameResourceType ExternalResourceTypeForHardwarePlanes(
case PIXEL_FORMAT_Y16:
case PIXEL_FORMAT_ABGR:
case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
case PIXEL_FORMAT_UNKNOWN:
break;
}
@@ -299,31 +302,82 @@ class VideoResourceUpdater::SoftwarePlaneResource
class VideoResourceUpdater::HardwarePlaneResource
: public VideoResourceUpdater::PlaneResource {
public:
+ // Provides a RAII scope to access the HardwarePlaneResource as a texture on a
+ // GL context. This will wait on the sync token and provide the shared image
+ // access scope.
+ class ScopedTexture {
+ public:
+ ScopedTexture(gpu::gles2::GLES2Interface* gl,
+ HardwarePlaneResource* resource)
+ : gl_(gl) {
+ texture_id_ = gl_->CreateAndTexStorage2DSharedImageCHROMIUM(
+ resource->mailbox().name);
+ gl_->BeginSharedImageAccessDirectCHROMIUM(
+ texture_id_, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ }
+
+ ~ScopedTexture() {
+ gl_->EndSharedImageAccessDirectCHROMIUM(texture_id_);
+ gl_->DeleteTextures(1, &texture_id_);
+ }
+
+ GLuint texture_id() const { return texture_id_; }
+
+ private:
+ gpu::gles2::GLES2Interface* gl_;
+ GLuint texture_id_;
+ };
+
HardwarePlaneResource(uint32_t plane_resource_id,
const gfx::Size& size,
viz::ResourceFormat format,
- viz::ContextProvider* context_provider,
- viz::TextureAllocation allocation)
+ const gfx::ColorSpace& color_space,
+ bool use_gpu_memory_buffer_resources,
+ viz::ContextProvider* context_provider)
: PlaneResource(plane_resource_id, size, format, /*is_software=*/false),
- context_provider_(context_provider),
- allocation_(std::move(allocation)) {
+ context_provider_(context_provider) {
DCHECK(context_provider_);
- context_provider_->ContextGL()->ProduceTextureDirectCHROMIUM(
- allocation_.texture_id, mailbox_.name);
+ const gpu::Capabilities& caps = context_provider_->ContextCapabilities();
+ overlay_candidate_ = use_gpu_memory_buffer_resources &&
+ caps.texture_storage_image &&
+ IsGpuMemoryBufferFormatSupported(format);
+ uint32_t shared_image_usage =
+ gpu::SHARED_IMAGE_USAGE_GLES2 | gpu::SHARED_IMAGE_USAGE_DISPLAY;
+ if (overlay_candidate_) {
+ shared_image_usage |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
+ texture_target_ = gpu::GetBufferTextureTarget(gfx::BufferUsage::SCANOUT,
+ BufferFormat(format), caps);
+ }
+ auto* sii = context_provider_->SharedImageInterface();
+ DCHECK(sii);
+ auto* gl = context_provider_->ContextGL();
+ DCHECK(gl);
+
+ mailbox_ =
+ sii->CreateSharedImage(format, size, color_space, shared_image_usage);
+ gl->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
}
+
~HardwarePlaneResource() override {
- context_provider_->ContextGL()->DeleteTextures(1, &allocation_.texture_id);
+ auto* sii = context_provider_->SharedImageInterface();
+ DCHECK(sii);
+ auto* gl = context_provider_->ContextGL();
+ DCHECK(gl);
+ gpu::SyncToken sync_token;
+ gl->GenUnverifiedSyncTokenCHROMIUM(sync_token.GetData());
+ sii->DestroySharedImage(sync_token, mailbox_);
}
const gpu::Mailbox& mailbox() const { return mailbox_; }
- GLuint texture_id() const { return allocation_.texture_id; }
- GLenum texture_target() const { return allocation_.texture_target; }
- bool overlay_candidate() const { return allocation_.overlay_candidate; }
+
+ GLenum texture_target() const { return texture_target_; }
+ bool overlay_candidate() const { return overlay_candidate_; }
private:
viz::ContextProvider* const context_provider_;
gpu::Mailbox mailbox_;
- const viz::TextureAllocation allocation_;
+ GLenum texture_target_ = GL_TEXTURE_2D;
+ bool overlay_candidate_ = false;
DISALLOW_COPY_AND_ASSIGN(HardwarePlaneResource);
};
@@ -628,22 +682,9 @@ VideoResourceUpdater::PlaneResource* VideoResourceUpdater::AllocateResource(
all_resources_.push_back(std::make_unique<SoftwarePlaneResource>(
plane_resource_id, plane_size, shared_bitmap_reporter_));
} else {
- // Video textures get composited into the display frame, the GPU doesn't
- // draw to them directly.
- constexpr bool kForFrameBufferAttachment = false;
-
- viz::TextureAllocation alloc = viz::TextureAllocation::MakeTextureId(
- context_provider_->ContextGL(),
- context_provider_->ContextCapabilities(), format,
- use_gpu_memory_buffer_resources_, kForFrameBufferAttachment);
- viz::TextureAllocation::AllocateStorage(
- context_provider_->ContextGL(),
- context_provider_->ContextCapabilities(), format, plane_size, alloc,
- color_space);
-
all_resources_.push_back(std::make_unique<HardwarePlaneResource>(
- plane_resource_id, plane_size, format, context_provider_,
- std::move(alloc)));
+ plane_resource_id, plane_size, format, color_space,
+ use_gpu_memory_buffer_resources_, context_provider_));
}
return all_resources_.back().get();
}
@@ -673,12 +714,17 @@ void VideoResourceUpdater::CopyHardwarePlane(
gpu::gles2::GLES2Interface* gl = context_provider_->ContextGL();
gl->WaitSyncTokenCHROMIUM(mailbox_holder.sync_token.GetConstData());
- uint32_t src_texture_id =
+ // TODO(piman): convert to CreateAndTexStorage2DSharedImageCHROMIUM once
+ // VideoFrame is all converted to SharedImage.
+ GLuint src_texture_id =
gl->CreateAndConsumeTextureCHROMIUM(mailbox_holder.mailbox.name);
- gl->CopySubTextureCHROMIUM(
- src_texture_id, 0, GL_TEXTURE_2D, hardware_resource->texture_id(), 0, 0,
- 0, 0, 0, output_plane_resource_size.width(),
- output_plane_resource_size.height(), false, false, false);
+ {
+ HardwarePlaneResource::ScopedTexture scope(gl, hardware_resource);
+ gl->CopySubTextureCHROMIUM(
+ src_texture_id, 0, GL_TEXTURE_2D, scope.texture_id(), 0, 0, 0, 0, 0,
+ output_plane_resource_size.width(), output_plane_resource_size.height(),
+ false, false, false);
+ }
gl->DeleteTextures(1, &src_texture_id);
// Pass an empty sync token to force generation of a new sync token.
@@ -899,13 +945,17 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
// Copy pixels into texture.
auto* gl = context_provider_->ContextGL();
- gl->BindTexture(hardware_resource->texture_target(),
- hardware_resource->texture_id());
+
const gfx::Size& plane_size = hardware_resource->resource_size();
- gl->TexSubImage2D(
- hardware_resource->texture_target(), 0, 0, 0, plane_size.width(),
- plane_size.height(), GLDataFormat(viz::ResourceFormat::RGBA_8888),
- GLDataType(viz::ResourceFormat::RGBA_8888), upload_pixels_.get());
+ {
+ HardwarePlaneResource::ScopedTexture scope(gl, hardware_resource);
+ gl->BindTexture(hardware_resource->texture_target(),
+ scope.texture_id());
+ gl->TexSubImage2D(
+ hardware_resource->texture_target(), 0, 0, 0, plane_size.width(),
+ plane_size.height(), GLDataFormat(viz::ResourceFormat::RGBA_8888),
+ GLDataType(viz::ResourceFormat::RGBA_8888), upload_pixels_.get());
+ }
}
plane_resource->SetUniqueId(video_frame->unique_id(), 0);
}
@@ -1054,13 +1104,16 @@ VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes(
// Copy pixels into texture. TexSubImage2D() is applicable because
// |yuv_resource_format| is LUMINANCE_F16, R16_EXT, LUMINANCE_8 or RED_8.
auto* gl = context_provider_->ContextGL();
- gl->BindTexture(plane_resource->texture_target(),
- plane_resource->texture_id());
DCHECK(GLSupportsFormat(plane_resource_format));
- gl->TexSubImage2D(
- plane_resource->texture_target(), 0, 0, 0, resource_size_pixels.width(),
- resource_size_pixels.height(), GLDataFormat(plane_resource_format),
- GLDataType(plane_resource_format), pixels);
+ {
+ HardwarePlaneResource::ScopedTexture scope(gl, plane_resource);
+ gl->BindTexture(plane_resource->texture_target(), scope.texture_id());
+ gl->TexSubImage2D(plane_resource->texture_target(), 0, 0, 0,
+ resource_size_pixels.width(),
+ resource_size_pixels.height(),
+ GLDataFormat(plane_resource_format),
+ GLDataType(plane_resource_format), pixels);
+ }
plane_resource->SetUniqueId(video_frame->unique_id(), i);
}
@@ -1154,9 +1207,7 @@ bool VideoResourceUpdater::OnMemoryDump(
pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shm_guid, kImportance);
} else {
base::trace_event::MemoryAllocatorDumpGuid guid =
- gl::GetGLTextureClientGUIDForTracing(
- context_provider_->ContextSupport()->ShareGroupTracingGUID(),
- resource->AsHardware()->texture_id());
+ gpu::GetSharedImageGUIDForTracing(resource->AsHardware()->mailbox());
pmd->CreateSharedGlobalAllocatorDump(guid);
pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
}
diff --git a/chromium/media/renderers/video_resource_updater_unittest.cc b/chromium/media/renderers/video_resource_updater_unittest.cc
index 02743bd083a..ff8b4266cfc 100644
--- a/chromium/media/renderers/video_resource_updater_unittest.cc
+++ b/chromium/media/renderers/video_resource_updater_unittest.cc
@@ -58,41 +58,19 @@ class UploadCounterGLES2Interface : public viz::TestGLES2Interface {
++upload_count_;
}
- void TexStorage2DEXT(GLenum target,
- GLint levels,
- GLuint internalformat,
- GLint width,
- GLint height) override {}
-
- void GenTextures(GLsizei n, GLuint* textures) override {
- created_texture_count_ += n;
- viz::TestGLES2Interface::GenTextures(n, textures);
- }
-
- void DeleteTextures(GLsizei n, const GLuint* textures) override {
- created_texture_count_ -= n;
- viz::TestGLES2Interface::DeleteTextures(n, textures);
- }
-
int UploadCount() { return upload_count_; }
void ResetUploadCount() { upload_count_ = 0; }
- int TextureCreationCount() { return created_texture_count_; }
- void ResetTextureCreationCount() { created_texture_count_ = 0; }
-
private:
int upload_count_;
- int created_texture_count_;
};
class VideoResourceUpdaterTest : public testing::Test {
protected:
VideoResourceUpdaterTest() {
- std::unique_ptr<UploadCounterGLES2Interface> gl(
- new UploadCounterGLES2Interface());
+ auto gl = std::make_unique<UploadCounterGLES2Interface>();
gl_ = gl.get();
- gl_->set_support_texture_storage(true);
context_provider_ = viz::TestContextProvider::Create(std::move(gl));
context_provider_->BindToCurrentThread();
@@ -256,6 +234,10 @@ class VideoResourceUpdaterTest : public testing::Test {
return video_frame;
}
+ size_t GetSharedImageCount() {
+ return context_provider_->SharedImageInterface()->shared_image_count();
+ }
+
static const gpu::SyncToken kMailboxSyncToken;
// VideoResourceUpdater registers as a MemoryDumpProvider, which requires
@@ -560,7 +542,7 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_StreamTexture) {
// Note that |use_stream_video_draw_quad| is true for this test.
std::unique_ptr<VideoResourceUpdater> updater =
CreateUpdaterForHardware(true);
- gl_->ResetTextureCreationCount();
+ EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<media::VideoFrame> video_frame =
CreateTestStreamTextureHardwareVideoFrame(false);
@@ -571,11 +553,10 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_StreamTexture) {
EXPECT_EQ((GLenum)GL_TEXTURE_EXTERNAL_OES,
resources.resources[0].mailbox_holder.texture_target);
EXPECT_EQ(1u, resources.release_callbacks.size());
- EXPECT_EQ(0, gl_->TextureCreationCount());
+ EXPECT_EQ(0u, GetSharedImageCount());
// A copied stream texture should return an RGBA resource in a new
// GL_TEXTURE_2D texture.
- gl_->ResetTextureCreationCount();
video_frame = CreateTestStreamTextureHardwareVideoFrame(true);
resources = updater->CreateExternalResourcesFromVideoFrame(video_frame);
EXPECT_EQ(VideoFrameResourceType::RGBA_PREMULTIPLIED, resources.type);
@@ -583,12 +564,12 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_StreamTexture) {
EXPECT_EQ((GLenum)GL_TEXTURE_2D,
resources.resources[0].mailbox_holder.texture_target);
EXPECT_EQ(1u, resources.release_callbacks.size());
- EXPECT_EQ(1, gl_->TextureCreationCount());
+ EXPECT_EQ(1u, GetSharedImageCount());
}
TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_TextureQuad) {
std::unique_ptr<VideoResourceUpdater> updater = CreateUpdaterForHardware();
- gl_->ResetTextureCreationCount();
+ EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<media::VideoFrame> video_frame =
CreateTestStreamTextureHardwareVideoFrame(false);
@@ -599,7 +580,7 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_TextureQuad) {
EXPECT_EQ((GLenum)GL_TEXTURE_EXTERNAL_OES,
resources.resources[0].mailbox_holder.texture_target);
EXPECT_EQ(1u, resources.release_callbacks.size());
- EXPECT_EQ(0, gl_->TextureCreationCount());
+ EXPECT_EQ(0u, GetSharedImageCount());
}
// Passthrough the sync token returned by the compositor if we don't have an
@@ -695,7 +676,7 @@ TEST_F(VideoResourceUpdaterTest, GenerateSyncTokenOnTextureCopy) {
// of the underlying buffer, that is YUV_420_BIPLANAR.
TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_SingleNV12) {
std::unique_ptr<VideoResourceUpdater> updater = CreateUpdaterForHardware();
- gl_->ResetTextureCreationCount();
+ EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<media::VideoFrame> video_frame = CreateTestHardwareVideoFrame(
media::PIXEL_FORMAT_NV12, GL_TEXTURE_EXTERNAL_OES);
@@ -716,12 +697,12 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_SingleNV12) {
resources.resources[0].mailbox_holder.texture_target);
EXPECT_EQ(viz::YUV_420_BIPLANAR, resources.resources[0].format);
- EXPECT_EQ(0, gl_->TextureCreationCount());
+ EXPECT_EQ(0u, GetSharedImageCount());
}
TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_DualNV12) {
std::unique_ptr<VideoResourceUpdater> updater = CreateUpdaterForHardware();
- gl_->ResetTextureCreationCount();
+ EXPECT_EQ(0u, GetSharedImageCount());
scoped_refptr<media::VideoFrame> video_frame =
CreateTestYuvHardwareVideoFrame(media::PIXEL_FORMAT_NV12, 2,
GL_TEXTURE_EXTERNAL_OES);
@@ -744,7 +725,7 @@ TEST_F(VideoResourceUpdaterTest, CreateForHardwarePlanes_DualNV12) {
EXPECT_EQ((GLenum)GL_TEXTURE_RECTANGLE_ARB,
resources.resources[0].mailbox_holder.texture_target);
EXPECT_EQ(viz::RGBA_8888, resources.resources[0].format);
- EXPECT_EQ(0, gl_->TextureCreationCount());
+ EXPECT_EQ(0u, GetSharedImageCount());
}
} // namespace
diff --git a/chromium/media/test/BUILD.gn b/chromium/media/test/BUILD.gn
index 00f24331503..5bd9de49805 100644
--- a/chromium/media/test/BUILD.gn
+++ b/chromium/media/test/BUILD.gn
@@ -210,6 +210,7 @@ foreach(variant, pipeline_integration_fuzzer_variants) {
deps = [
":pipeline_integration_test_base",
"//base",
+ "//base/test:test_support",
"//media",
# TODO(dalecurtis): Required since the gmock header is included in the
@@ -220,11 +221,6 @@ foreach(variant, pipeline_integration_fuzzer_variants) {
"//ui/gfx:test_support",
]
- libfuzzer_options = [
- # This is done to suppress tons of log messages generated by gmock asserts.
- "close_fd_mask=1",
- ]
-
defines = [ "PIPELINE_FUZZER_VARIANT=${variant}" ]
seed_corpus = "//media/test/data/"
diff --git a/chromium/media/video/BUILD.gn b/chromium/media/video/BUILD.gn
index 23e5ace1461..f453174afad 100644
--- a/chromium/media/video/BUILD.gn
+++ b/chromium/media/video/BUILD.gn
@@ -35,6 +35,10 @@ source_set("video") {
"jpeg_encode_accelerator.h",
"picture.cc",
"picture.h",
+ "supported_video_decoder_config.cc",
+ "supported_video_decoder_config.h",
+ "trace_util.cc",
+ "trace_util.h",
"video_decode_accelerator.cc",
"video_decode_accelerator.h",
"video_encode_accelerator.cc",
@@ -104,6 +108,7 @@ source_set("unit_tests") {
"h264_parser_unittest.cc",
"h264_poc_unittest.cc",
"half_float_maker_unittest.cc",
+ "supported_video_decoder_config_unittest.cc",
]
if (enable_hevc_demuxing) {
sources += [ "h265_parser_unittest.cc" ]
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
index 86b0d69a4b1..5711bcff9b4 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
@@ -682,6 +682,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
case PIXEL_FORMAT_Y16:
case PIXEL_FORMAT_ABGR:
case PIXEL_FORMAT_XBGR:
+ case PIXEL_FORMAT_P016LE:
case PIXEL_FORMAT_UNKNOWN:
if (is_software_backed_video_frame) {
UMA_HISTOGRAM_ENUMERATION(
diff --git a/chromium/media/video/gpu_video_accelerator_factories.h b/chromium/media/video/gpu_video_accelerator_factories.h
index 25e0dbbad2d..a4328d372b4 100644
--- a/chromium/media/video/gpu_video_accelerator_factories.h
+++ b/chromium/media/video/gpu_video_accelerator_factories.h
@@ -35,6 +35,7 @@ class Size;
}
namespace gpu {
+class ContextSupport;
struct SyncToken;
}
@@ -80,6 +81,8 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories {
// Return true if |config| is potentially supported by a decoder created with
// CreateVideoDecoder().
+ //
+ // May be called on any thread.
virtual bool IsDecoderConfigSupported(const VideoDecoderConfig& config) = 0;
virtual std::unique_ptr<media::VideoDecoder> CreateVideoDecoder(
@@ -154,6 +157,7 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories {
virtual scoped_refptr<ws::ContextProviderCommandBuffer>
GetMediaContextProvider() = 0;
+ virtual gpu::ContextSupport* GetMediaContextProviderContextSupport() = 0;
// Sets the current pipeline rendering color space.
virtual void SetRenderingColorSpace(const gfx::ColorSpace& color_space) = 0;
diff --git a/chromium/media/video/h264_parser.cc b/chromium/media/video/h264_parser.cc
index f786ee3391e..75d71967da9 100644
--- a/chromium/media/video/h264_parser.cc
+++ b/chromium/media/video/h264_parser.cc
@@ -8,8 +8,8 @@
#include <memory>
#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/safe_math.h"
+#include "base/stl_util.h"
#include "media/base/subsample_entry.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
@@ -295,7 +295,7 @@ static const int kTableSarWidth[] = {0, 1, 12, 10, 16, 40, 24, 20, 32,
80, 18, 15, 64, 160, 4, 3, 2};
static const int kTableSarHeight[] = {0, 1, 11, 11, 11, 33, 11, 11, 11,
33, 11, 11, 33, 99, 3, 2, 1};
-static_assert(arraysize(kTableSarWidth) == arraysize(kTableSarHeight),
+static_assert(base::size(kTableSarWidth) == base::size(kTableSarHeight),
"sar tables must have the same size");
H264Parser::H264Parser() {
@@ -809,7 +809,7 @@ H264Parser::Result H264Parser::ParseSPSScalingLists(H264SPS* sps) {
READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
if (seq_scaling_list_present_flag) {
- res = ParseScalingList(arraysize(sps->scaling_list4x4[i]),
+ res = ParseScalingList(base::size(sps->scaling_list4x4[i]),
sps->scaling_list4x4[i], &use_default);
if (res != kOk)
return res;
@@ -828,7 +828,7 @@ H264Parser::Result H264Parser::ParseSPSScalingLists(H264SPS* sps) {
READ_BOOL_OR_RETURN(&seq_scaling_list_present_flag);
if (seq_scaling_list_present_flag) {
- res = ParseScalingList(arraysize(sps->scaling_list8x8[i]),
+ res = ParseScalingList(base::size(sps->scaling_list8x8[i]),
sps->scaling_list8x8[i], &use_default);
if (res != kOk)
return res;
@@ -856,7 +856,7 @@ H264Parser::Result H264Parser::ParsePPSScalingLists(const H264SPS& sps,
READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
if (pic_scaling_list_present_flag) {
- res = ParseScalingList(arraysize(pps->scaling_list4x4[i]),
+ res = ParseScalingList(base::size(pps->scaling_list4x4[i]),
pps->scaling_list4x4[i], &use_default);
if (res != kOk)
return res;
@@ -882,7 +882,7 @@ H264Parser::Result H264Parser::ParsePPSScalingLists(const H264SPS& sps,
READ_BOOL_OR_RETURN(&pic_scaling_list_present_flag);
if (pic_scaling_list_present_flag) {
- res = ParseScalingList(arraysize(pps->scaling_list8x8[i]),
+ res = ParseScalingList(base::size(pps->scaling_list8x8[i]),
pps->scaling_list8x8[i], &use_default);
if (res != kOk)
return res;
@@ -939,7 +939,7 @@ H264Parser::Result H264Parser::ParseVUIParameters(H264SPS* sps) {
READ_BITS_OR_RETURN(16, &sps->sar_width);
READ_BITS_OR_RETURN(16, &sps->sar_height);
} else {
- const int max_aspect_ratio_idc = arraysize(kTableSarWidth) - 1;
+ const int max_aspect_ratio_idc = base::size(kTableSarWidth) - 1;
IN_RANGE_OR_RETURN(aspect_ratio_idc, 0, max_aspect_ratio_idc);
sps->sar_width = kTableSarWidth[aspect_ratio_idc];
sps->sar_height = kTableSarHeight[aspect_ratio_idc];
@@ -1373,7 +1373,7 @@ H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
H264DecRefPicMarking* marking;
if (shdr->adaptive_ref_pic_marking_mode_flag) {
size_t i;
- for (i = 0; i < arraysize(shdr->ref_pic_marking); ++i) {
+ for (i = 0; i < base::size(shdr->ref_pic_marking); ++i) {
marking = &shdr->ref_pic_marking[i];
READ_UE_OR_RETURN(&marking->memory_mgmnt_control_operation);
@@ -1398,7 +1398,7 @@ H264Parser::Result H264Parser::ParseDecRefPicMarking(H264SliceHeader* shdr) {
return kInvalidStream;
}
- if (i == arraysize(shdr->ref_pic_marking)) {
+ if (i == base::size(shdr->ref_pic_marking)) {
DVLOG(1) << "Ran out of dec ref pic marking fields";
return kUnsupportedStream;
}
diff --git a/chromium/media/video/h264_poc.cc b/chromium/media/video/h264_poc.cc
index 8f6c619fc07..59c0ae7b651 100644
--- a/chromium/media/video/h264_poc.cc
+++ b/chromium/media/video/h264_poc.cc
@@ -7,7 +7,7 @@
#include <algorithm>
#include "base/logging.h"
-#include "base/macros.h"
+#include "base/stl_util.h"
#include "media/video/h264_parser.h"
#include "media/video/h264_poc.h"
@@ -25,7 +25,7 @@ bool HasMMCO5(const media::H264SliceHeader& slice_hdr) {
return false;
}
- for (size_t i = 0; i < arraysize(slice_hdr.ref_pic_marking); i++) {
+ for (size_t i = 0; i < base::size(slice_hdr.ref_pic_marking); i++) {
int32_t op = slice_hdr.ref_pic_marking[i].memory_mgmnt_control_operation;
if (op == 5)
return true;
diff --git a/chromium/media/video/mock_gpu_video_accelerator_factories.h b/chromium/media/video/mock_gpu_video_accelerator_factories.h
index b16cd0c7318..121c95d7f72 100644
--- a/chromium/media/video/mock_gpu_video_accelerator_factories.h
+++ b/chromium/media/video/mock_gpu_video_accelerator_factories.h
@@ -67,6 +67,7 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
VideoEncodeAccelerator::SupportedProfiles());
MOCK_METHOD0(GetMediaContextProvider,
scoped_refptr<ws::ContextProviderCommandBuffer>());
+ MOCK_METHOD0(GetMediaContextProviderContextSupport, gpu::ContextSupport*());
MOCK_METHOD1(SetRenderingColorSpace, void(const gfx::ColorSpace&));
std::unique_ptr<gfx::GpuMemoryBuffer> CreateGpuMemoryBuffer(
diff --git a/chromium/media/video/supported_video_decoder_config.cc b/chromium/media/video/supported_video_decoder_config.cc
new file mode 100644
index 00000000000..7da92f98381
--- /dev/null
+++ b/chromium/media/video/supported_video_decoder_config.cc
@@ -0,0 +1,53 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/supported_video_decoder_config.h"
+
+namespace media {
+
+SupportedVideoDecoderConfig::SupportedVideoDecoderConfig() = default;
+
+SupportedVideoDecoderConfig::SupportedVideoDecoderConfig(
+ VideoCodecProfile profile_min,
+ VideoCodecProfile profile_max,
+ const gfx::Size& coded_size_min,
+ const gfx::Size& coded_size_max,
+ bool allow_encrypted,
+ bool require_encrypted)
+ : profile_min(profile_min),
+ profile_max(profile_max),
+ coded_size_min(coded_size_min),
+ coded_size_max(coded_size_max),
+ allow_encrypted(allow_encrypted),
+ require_encrypted(require_encrypted) {}
+
+SupportedVideoDecoderConfig::~SupportedVideoDecoderConfig() = default;
+
+bool SupportedVideoDecoderConfig::Matches(
+ const VideoDecoderConfig& config) const {
+ if (config.profile() < profile_min || config.profile() > profile_max)
+ return false;
+
+ if (config.is_encrypted()) {
+ if (!allow_encrypted)
+ return false;
+ } else {
+ if (require_encrypted)
+ return false;
+ }
+
+ if (config.coded_size().width() < coded_size_min.width())
+ return false;
+ if (config.coded_size().height() < coded_size_min.height())
+ return false;
+
+ if (config.coded_size().width() > coded_size_max.width())
+ return false;
+ if (config.coded_size().height() > coded_size_max.height())
+ return false;
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/video/supported_video_decoder_config.h b/chromium/media/video/supported_video_decoder_config.h
new file mode 100644
index 00000000000..c0791af58fe
--- /dev/null
+++ b/chromium/media/video/supported_video_decoder_config.h
@@ -0,0 +1,54 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_SUPPORTED_VIDEO_DECODER_CONFIG_H_
+#define MEDIA_VIDEO_SUPPORTED_VIDEO_DECODER_CONFIG_H_
+
+#include "base/macros.h"
+#include "media/base/media_export.h"
+#include "media/base/video_codecs.h"
+#include "media/base/video_decoder_config.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+
+// Specification of a range of configurations that are supported by a video
+// decoder. Also provides the ability to check if a VideoDecoderConfig matches
+// the supported range.
+struct MEDIA_EXPORT SupportedVideoDecoderConfig {
+ SupportedVideoDecoderConfig();
+ SupportedVideoDecoderConfig(VideoCodecProfile profile_min,
+ VideoCodecProfile profile_max,
+ const gfx::Size& coded_size_min,
+ const gfx::Size& coded_size_max,
+ bool allow_encrypted,
+ bool require_encrypted);
+ ~SupportedVideoDecoderConfig();
+
+ // Returns true if and only if |config| is a supported config.
+ bool Matches(const VideoDecoderConfig& config) const;
+
+ // Range of VideoCodecProfiles to match, inclusive.
+ VideoCodecProfile profile_min = VIDEO_CODEC_PROFILE_UNKNOWN;
+ VideoCodecProfile profile_max = VIDEO_CODEC_PROFILE_UNKNOWN;
+
+ // Coded size range, inclusive.
+ gfx::Size coded_size_min;
+ gfx::Size coded_size_max;
+
+ // TODO(liberato): consider switching these to "allow_clear" and
+ // "allow_encrypted", so that they're orthogonal.
+
+ // If true, then this will match encrypted configs.
+ bool allow_encrypted = true;
+
+ // If true, then unencrypted configs will not match.
+ bool require_encrypted = false;
+
+ // Allow copy and assignment.
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_SUPPORTED_VIDEO_DECODER_CONFIG_H_
diff --git a/chromium/media/video/supported_video_decoder_config_unittest.cc b/chromium/media/video/supported_video_decoder_config_unittest.cc
new file mode 100644
index 00000000000..c2e3593698b
--- /dev/null
+++ b/chromium/media/video/supported_video_decoder_config_unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/supported_video_decoder_config.h"
+#include "media/base/test_helpers.h"
+#include "media/base/video_codecs.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class SupportedVideoDecoderConfigTest : public ::testing::Test {
+ public:
+ SupportedVideoDecoderConfigTest()
+ : decoder_config_(
+ TestVideoConfig::NormalCodecProfile(kCodecH264,
+ H264PROFILE_EXTENDED)) {
+ supported_config_.profile_min = H264PROFILE_MIN;
+ supported_config_.profile_max = H264PROFILE_MAX;
+ supported_config_.coded_size_min = gfx::Size(10, 20);
+ supported_config_.coded_size_max = gfx::Size(10000, 20000);
+ supported_config_.allow_encrypted = true;
+ supported_config_.require_encrypted = false;
+ }
+
+ SupportedVideoDecoderConfig supported_config_;
+
+ // Decoder config that matches |supported_config_|.
+ VideoDecoderConfig decoder_config_;
+};
+
+TEST_F(SupportedVideoDecoderConfigTest, ConstructionWithArgs) {
+ SupportedVideoDecoderConfig config2(
+ supported_config_.profile_min, supported_config_.profile_max,
+ supported_config_.coded_size_min, supported_config_.coded_size_max,
+ supported_config_.allow_encrypted, supported_config_.require_encrypted);
+ EXPECT_EQ(supported_config_.profile_min, config2.profile_min);
+ EXPECT_EQ(supported_config_.profile_max, config2.profile_max);
+ EXPECT_EQ(supported_config_.coded_size_min, config2.coded_size_min);
+ EXPECT_EQ(supported_config_.coded_size_max, config2.coded_size_max);
+ EXPECT_EQ(supported_config_.allow_encrypted, config2.allow_encrypted);
+ EXPECT_EQ(supported_config_.require_encrypted, config2.require_encrypted);
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, MatchingConfigMatches) {
+ EXPECT_TRUE(supported_config_.Matches(decoder_config_));
+
+ // Since |supported_config_| allows encrypted, this should also succeed.
+ decoder_config_.SetIsEncrypted(true);
+ EXPECT_TRUE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, LowerProfileMismatches) {
+ // Raise |profile_min| above |decoder_config_|.
+ supported_config_.profile_min = H264PROFILE_HIGH;
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, HigherProfileMismatches) {
+ // Lower |profile_max| below |decoder_config_|.
+ supported_config_.profile_max = H264PROFILE_MAIN;
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, SmallerMinWidthMismatches) {
+ supported_config_.coded_size_min =
+ gfx::Size(decoder_config_.coded_size().width() + 1, 0);
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, SmallerMinHeightMismatches) {
+ supported_config_.coded_size_min =
+ gfx::Size(0, decoder_config_.coded_size().height() + 1);
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, LargerMaxWidthMismatches) {
+ supported_config_.coded_size_max =
+ gfx::Size(decoder_config_.coded_size().width() - 1, 10000);
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, LargerMaxHeightMismatches) {
+ supported_config_.coded_size_max =
+ gfx::Size(10000, decoder_config_.coded_size().height() - 1);
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, RequiredEncryptionMismatches) {
+ supported_config_.require_encrypted = true;
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+
+ // The encrypted version should succeed.
+ decoder_config_.SetIsEncrypted(true);
+ EXPECT_TRUE(supported_config_.Matches(decoder_config_));
+}
+
+TEST_F(SupportedVideoDecoderConfigTest, AllowedEncryptionMismatches) {
+ supported_config_.allow_encrypted = false;
+ decoder_config_.SetIsEncrypted(true);
+ EXPECT_FALSE(supported_config_.Matches(decoder_config_));
+}
+
+} // namespace media
diff --git a/chromium/media/video/trace_util.cc b/chromium/media/video/trace_util.cc
new file mode 100644
index 00000000000..7557ff6df17
--- /dev/null
+++ b/chromium/media/video/trace_util.cc
@@ -0,0 +1,19 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/video/trace_util.h"
+
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "ui/gl/trace_util.h"
+
+namespace media {
+
+base::trace_event::MemoryAllocatorDumpGuid GetGLTextureClientGUIDForTracing(
+ uint64_t context_group_tracing_id,
+ uint32_t texture_id) {
+ return gl::GetGLTextureClientGUIDForTracing(context_group_tracing_id,
+ texture_id);
+}
+
+} // namespace media
diff --git a/chromium/media/video/trace_util.h b/chromium/media/video/trace_util.h
new file mode 100644
index 00000000000..83892509641
--- /dev/null
+++ b/chromium/media/video/trace_util.h
@@ -0,0 +1,24 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_TRACE_UTIL_H_
+#define MEDIA_VIDEO_TRACE_UTIL_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace trace_event {
+class MemoryAllocatorDumpGuid;
+}
+} // namespace base
+
+namespace media {
+
+base::trace_event::MemoryAllocatorDumpGuid GetGLTextureClientGUIDForTracing(
+ uint64_t context_group_tracing_id,
+ uint32_t texture_id);
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_TRACE_UTIL_H_
diff --git a/chromium/media/webrtc/audio_processor.cc b/chromium/media/webrtc/audio_processor.cc
index 448bd03b54a..3eaaea90f30 100644
--- a/chromium/media/webrtc/audio_processor.cc
+++ b/chromium/media/webrtc/audio_processor.cc
@@ -244,6 +244,8 @@ void AudioProcessor::InitializeAPM() {
settings_.automatic_gain_control ==
AutomaticGainControlType::kHybridExperimental;
ap_config.Set<webrtc::ExperimentalAgc>(experimental_agc);
+ } else {
+ ap_config.Set<webrtc::ExperimentalAgc>(new webrtc::ExperimentalAgc(false));
}
// Noise suppression setup part 1.
@@ -261,18 +263,6 @@ void AudioProcessor::InitializeAPM() {
DCHECK_EQ(err, 0);
}
- // Typing detection setup.
- if (settings_.typing_detection) {
- typing_detector_ = std::make_unique<webrtc::TypingDetection>();
- int err = audio_processing_->voice_detection()->Enable(true);
- err |= audio_processing_->voice_detection()->set_likelihood(
- webrtc::VoiceDetection::kVeryLowLikelihood);
- DCHECK_EQ(err, 0);
-
- // Configure the update period to 1s (100 * 10ms) in the typing detector.
- typing_detector_->SetParameters(0, 0, 0, 0, 0, 100);
- }
-
// AGC setup part 2.
if (settings_.automatic_gain_control != AutomaticGainControlType::kDisabled) {
int err = audio_processing_->gain_control()->set_mode(
@@ -280,15 +270,18 @@ void AudioProcessor::InitializeAPM() {
err |= audio_processing_->gain_control()->Enable(true);
DCHECK_EQ(err, 0);
}
- if (settings_.automatic_gain_control == AutomaticGainControlType::kDefault) {
- int err = audio_processing_->gain_control()->set_mode(
- webrtc::GainControl::kAdaptiveAnalog);
- err |= audio_processing_->gain_control()->Enable(true);
- DCHECK_EQ(err, 0);
- }
webrtc::AudioProcessing::Config apm_config = audio_processing_->GetConfig();
+ // Typing detection setup.
+ if (settings_.typing_detection) {
+ typing_detector_ = std::make_unique<webrtc::TypingDetection>();
+ // Configure the update period to 1s (100 * 10ms) in the typing detector.
+ typing_detector_->SetParameters(0, 0, 0, 0, 0, 100);
+
+ apm_config.voice_detection.enabled = true;
+ }
+
// AEC setup part 2.
apm_config.echo_canceller.enabled =
settings_.echo_cancellation == EchoCancellationType::kAec2 ||
@@ -370,10 +363,12 @@ void AudioProcessor::FeedDataToAPM(const AudioBus& source) {
void AudioProcessor::UpdateTypingDetected(bool key_pressed) {
if (typing_detector_) {
- webrtc::VoiceDetection* vad = audio_processing_->voice_detection();
- DCHECK(vad->is_enabled());
- typing_detected_ =
- typing_detector_->Process(key_pressed, vad->stream_has_voice());
+ // Ignore remote tracks to avoid unnecessary stats computation.
+ auto voice_detected =
+ audio_processing_->GetStatistics(false /* has_remote_tracks */)
+ .voice_detected;
+ DCHECK(voice_detected.has_value());
+ typing_detected_ = typing_detector_->Process(key_pressed, *voice_detected);
}
}
diff --git a/chromium/media/webrtc/audio_processor_controls.h b/chromium/media/webrtc/audio_processor_controls.h
index 4b3a28b6ecc..79d0cf9d087 100644
--- a/chromium/media/webrtc/audio_processor_controls.h
+++ b/chromium/media/webrtc/audio_processor_controls.h
@@ -6,7 +6,7 @@
#define MEDIA_WEBRTC_AUDIO_PROCESSOR_CONTROLS_H_
#include "base/callback.h"
-#include "third_party/webrtc/api/mediastreaminterface.h"
+#include "third_party/webrtc/api/media_stream_interface.h"
namespace media {
diff --git a/chromium/media/webrtc/audio_processor_unittest.cc b/chromium/media/webrtc/audio_processor_unittest.cc
index 4fb99640119..7213b082b19 100644
--- a/chromium/media/webrtc/audio_processor_unittest.cc
+++ b/chromium/media/webrtc/audio_processor_unittest.cc
@@ -132,6 +132,7 @@ class WebRtcAudioProcessorTest : public ::testing::Test {
EXPECT_TRUE(ap_config.echo_canceller.enabled);
EXPECT_FALSE(ap_config.echo_canceller.mobile_mode);
EXPECT_TRUE(ap_config.high_pass_filter.enabled);
+ EXPECT_TRUE(ap_config.voice_detection.enabled);
EXPECT_TRUE(audio_processing->noise_suppression()->is_enabled());
EXPECT_TRUE(audio_processing->noise_suppression()->level() ==
@@ -139,9 +140,6 @@ class WebRtcAudioProcessorTest : public ::testing::Test {
EXPECT_TRUE(audio_processing->gain_control()->is_enabled());
EXPECT_TRUE(audio_processing->gain_control()->mode() ==
webrtc::GainControl::kAdaptiveAnalog);
- EXPECT_TRUE(audio_processing->voice_detection()->is_enabled());
- EXPECT_TRUE(audio_processing->voice_detection()->likelihood() ==
- webrtc::VoiceDetection::kVeryLowLikelihood);
}
AudioProcessingSettings GetEnabledAudioProcessingSettings() const {