summaryrefslogtreecommitdiff
path: root/chromium/media
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2015-10-13 13:24:50 +0200
committerAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2015-10-14 10:57:25 +0000
commitaf3d4809763ef308f08ced947a73b624729ac7ea (patch)
tree4402b911e30383f6c6dace1e8cf3b8e85355db3a /chromium/media
parent0e8ff63a407fe323e215bb1a2c423c09a4747c8a (diff)
downloadqtwebengine-chromium-af3d4809763ef308f08ced947a73b624729ac7ea.tar.gz
BASELINE: Update Chromium to 47.0.2526.14
Also adding in sources needed for spellchecking. Change-Id: Idd44170fa1616f26315188970a8d5ba7d472b18a Reviewed-by: Michael BrĂ¼ning <michael.bruning@theqtcompany.com>
Diffstat (limited to 'chromium/media')
-rw-r--r--chromium/media/BUILD.gn247
-rw-r--r--chromium/media/DEPS4
-rw-r--r--chromium/media/OWNERS2
-rw-r--r--chromium/media/audio/BUILD.gn20
-rw-r--r--chromium/media/audio/agc_audio_stream.h2
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc12
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc66
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc27
-rw-r--r--chromium/media/audio/android/audio_manager_android.h3
-rw-r--r--chromium/media/audio/audio_device_thread.cc11
-rw-r--r--chromium/media/audio/audio_input_controller.cc63
-rw-r--r--chromium/media/audio/audio_input_controller.h29
-rw-r--r--chromium/media/audio/audio_input_device.cc38
-rw-r--r--chromium/media/audio/audio_input_volume_unittest.cc170
-rw-r--r--chromium/media/audio/audio_input_writer.h24
-rw-r--r--chromium/media/audio/audio_manager.cc53
-rw-r--r--chromium/media/audio/audio_manager.h16
-rw-r--r--chromium/media/audio/audio_manager_base.cc22
-rw-r--r--chromium/media/audio/audio_manager_base.h57
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc6
-rw-r--r--chromium/media/audio/audio_output_controller.cc2
-rw-r--r--chromium/media/audio/audio_output_controller.h2
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc2
-rw-r--r--chromium/media/audio/audio_output_device.cc230
-rw-r--r--chromium/media/audio/audio_output_device.h70
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc131
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h2
-rw-r--r--chromium/media/audio/audio_output_ipc.h52
-rw-r--r--chromium/media/audio/audio_output_resampler.cc7
-rw-r--r--chromium/media/audio/audio_output_stream_sink.cc7
-rw-r--r--chromium/media/audio/audio_output_stream_sink.h4
-rw-r--r--chromium/media/audio/audio_parameters.cc81
-rw-r--r--chromium/media/audio/audio_parameters.h108
-rw-r--r--chromium/media/audio/audio_parameters_unittest.cc23
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc45
-rw-r--r--chromium/media/audio/clockless_audio_sink.h12
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc169
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h8
-rw-r--r--chromium/media/audio/fake_audio_manager.cc5
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc9
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h2
-rw-r--r--chromium/media/audio/null_audio_sink.cc7
-rw-r--r--chromium/media/audio/null_audio_sink.h5
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc160
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h55
-rw-r--r--chromium/media/audio/point.cc61
-rw-r--r--chromium/media/audio/point.h31
-rw-r--r--chromium/media/audio/point_unittest.cc41
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc13
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc7
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc6
-rw-r--r--chromium/media/audio/pulse/pulse_util.h1
-rw-r--r--chromium/media/audio/sounds/sounds_manager.cc47
-rw-r--r--chromium/media/audio/sounds/sounds_manager.h4
-rw-r--r--chromium/media/audio/sounds/sounds_manager_unittest.cc45
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.h1
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc80
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.h17
-rw-r--r--chromium/media/audio/win/audio_device_listener_win_unittest.cc25
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc92
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h5
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc16
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc142
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h8
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc3
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc34
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc63
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h3
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc5
-rw-r--r--chromium/media/audio_unittests.isolate1
-rw-r--r--chromium/media/base/BUILD.gn20
-rw-r--r--chromium/media/base/android/BUILD.gn9
-rw-r--r--chromium/media/base/android/access_unit_queue.cc28
-rw-r--r--chromium/media/base/android/access_unit_queue.h11
-rw-r--r--chromium/media/base/android/audio_decoder_job.cc24
-rw-r--r--chromium/media/base/android/audio_decoder_job.h6
-rw-r--r--chromium/media/base/android/browser_cdm_factory_android.cc12
-rw-r--r--chromium/media/base/android/browser_cdm_factory_android.h2
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.cc51
-rw-r--r--chromium/media/base/android/demuxer_stream_player_params.h3
-rw-r--r--chromium/media/base/android/media_codec_audio_decoder.cc156
-rw-r--r--chromium/media/base/android/media_codec_audio_decoder.h16
-rw-r--r--chromium/media/base/android/media_codec_bridge.cc95
-rw-r--r--chromium/media/base/android/media_codec_bridge.h22
-rw-r--r--chromium/media/base/android/media_codec_decoder.cc569
-rw-r--r--chromium/media/base/android/media_codec_decoder.h241
-rw-r--r--chromium/media/base/android/media_codec_decoder_unittest.cc269
-rw-r--r--chromium/media/base/android/media_codec_player.cc998
-rw-r--r--chromium/media/base/android/media_codec_player.h240
-rw-r--r--chromium/media/base/android/media_codec_player_unittest.cc1935
-rw-r--r--chromium/media/base/android/media_codec_video_decoder.cc213
-rw-r--r--chromium/media/base/android/media_codec_video_decoder.h41
-rw-r--r--chromium/media/base/android/media_decoder_job.cc45
-rw-r--r--chromium/media/base/android/media_decoder_job.h26
-rw-r--r--chromium/media/base/android/media_drm_bridge.cc262
-rw-r--r--chromium/media/base/android/media_drm_bridge.h76
-rw-r--r--chromium/media/base/android/media_drm_bridge_unittest.cc6
-rw-r--r--chromium/media/base/android/media_player_android.cc12
-rw-r--r--chromium/media/base/android/media_player_android.h25
-rw-r--r--chromium/media/base/android/media_player_bridge.cc74
-rw-r--r--chromium/media/base/android/media_player_bridge.h30
-rw-r--r--chromium/media/base/android/media_player_bridge_unittest.cc109
-rw-r--r--chromium/media/base/android/media_player_manager.h11
-rw-r--r--chromium/media/base/android/media_source_player.cc72
-rw-r--r--chromium/media/base/android/media_source_player.h28
-rw-r--r--chromium/media/base/android/media_source_player_unittest.cc183
-rw-r--r--chromium/media/base/android/media_statistics.cc119
-rw-r--r--chromium/media/base/android/media_statistics.h88
-rw-r--r--chromium/media/base/android/media_task_runner.cc43
-rw-r--r--chromium/media/base/android/media_task_runner.h23
-rw-r--r--chromium/media/base/android/test_data_factory.cc114
-rw-r--r--chromium/media/base/android/test_data_factory.h41
-rw-r--r--chromium/media/base/android/test_statistics.h49
-rw-r--r--chromium/media/base/android/video_decoder_job.cc10
-rw-r--r--chromium/media/base/android/video_decoder_job.h13
-rw-r--r--chromium/media/base/android/webaudio_media_codec_bridge.cc7
-rw-r--r--chromium/media/base/android/webaudio_media_codec_bridge.h4
-rw-r--r--chromium/media/base/audio_buffer.cc2
-rw-r--r--chromium/media/base/audio_buffer_converter.cc4
-rw-r--r--chromium/media/base/audio_buffer_converter_unittest.cc11
-rw-r--r--chromium/media/base/audio_buffer_queue.cc1
-rw-r--r--chromium/media/base/audio_buffer_queue_unittest.cc2
-rw-r--r--chromium/media/base/audio_buffer_unittest.cc9
-rw-r--r--chromium/media/base/audio_capturer_source.h2
-rw-r--r--chromium/media/base/audio_converter_unittest.cc12
-rw-r--r--chromium/media/base/audio_decoder_config.cc22
-rw-r--r--chromium/media/base/audio_decoder_config.h11
-rw-r--r--chromium/media/base/audio_discard_helper.cc7
-rw-r--r--chromium/media/base/audio_discard_helper.h2
-rw-r--r--chromium/media/base/audio_discard_helper_unittest.cc2
-rw-r--r--chromium/media/base/audio_hardware_config.cc19
-rw-r--r--chromium/media/base/audio_hardware_config.h2
-rw-r--r--chromium/media/base/audio_renderer_mixer.cc10
-rw-r--r--chromium/media/base/audio_renderer_mixer.h8
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.cc73
-rw-r--r--chromium/media/base/audio_renderer_mixer_input.h38
-rw-r--r--chromium/media/base/audio_renderer_mixer_input_unittest.cc137
-rw-r--r--chromium/media/base/audio_renderer_mixer_unittest.cc32
-rw-r--r--chromium/media/base/audio_renderer_sink.h26
-rw-r--r--chromium/media/base/audio_splicer.cc10
-rw-r--r--chromium/media/base/audio_splicer.h2
-rw-r--r--chromium/media/base/audio_splicer_unittest.cc2
-rw-r--r--chromium/media/base/audio_timestamp_helper.cc2
-rw-r--r--chromium/media/base/audio_timestamp_helper_unittest.cc2
-rw-r--r--chromium/media/base/bind_to_current_loop.h1
-rw-r--r--chromium/media/base/bitstream_buffer.h25
-rw-r--r--chromium/media/base/browser_cdm.cc5
-rw-r--r--chromium/media/base/browser_cdm.h12
-rw-r--r--chromium/media/base/browser_cdm_factory.cc4
-rw-r--r--chromium/media/base/browser_cdm_factory.h4
-rw-r--r--chromium/media/base/buffers.h45
-rw-r--r--chromium/media/base/cdm_config.h2
-rw-r--r--chromium/media/base/cdm_key_information.h5
-rw-r--r--chromium/media/base/cdm_promise_adapter.cc4
-rw-r--r--chromium/media/base/cdm_promise_adapter.h2
-rw-r--r--chromium/media/base/channel_mixer_unittest.cc23
-rw-r--r--chromium/media/base/decoder_buffer.h5
-rw-r--r--chromium/media/base/decoder_buffer_queue.cc4
-rw-r--r--chromium/media/base/decoder_buffer_queue_unittest.cc2
-rw-r--r--chromium/media/base/demuxer_perftest.cc1
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.cc12
-rw-r--r--chromium/media/base/fake_audio_renderer_sink.h7
-rw-r--r--chromium/media/base/fake_demuxer_stream.cc9
-rw-r--r--chromium/media/base/fake_media_resources.cc21
-rw-r--r--chromium/media/base/fake_media_resources.h15
-rw-r--r--chromium/media/base/fake_output_device.cc36
-rw-r--r--chromium/media/base/fake_output_device.h34
-rw-r--r--chromium/media/base/key_systems.cc39
-rw-r--r--chromium/media/base/key_systems.h5
-rw-r--r--chromium/media/base/key_systems_unittest.cc4
-rw-r--r--chromium/media/base/mac/video_frame_mac.cc6
-rw-r--r--chromium/media/base/mac/video_frame_mac_unittests.cc22
-rw-r--r--chromium/media/base/media.cc4
-rw-r--r--chromium/media/base/media_log.cc10
-rw-r--r--chromium/media/base/media_log.h19
-rw-r--r--chromium/media/base/media_permission.h2
-rw-r--r--chromium/media/base/media_resources.cc29
-rw-r--r--chromium/media/base/media_resources.h52
-rw-r--r--chromium/media/base/media_switches.cc9
-rw-r--r--chromium/media/base/media_switches.h4
-rw-r--r--chromium/media/base/mime_util.cc145
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.cc11
-rw-r--r--chromium/media/base/mock_audio_renderer_sink.h11
-rw-r--r--chromium/media/base/mock_media_log.cc13
-rw-r--r--chromium/media/base/mock_media_log.h45
-rw-r--r--chromium/media/base/null_video_sink_unittest.cc2
-rw-r--r--chromium/media/base/output_device.h67
-rw-r--r--chromium/media/base/renderer_factory.h2
-rw-r--r--chromium/media/base/run_all_unittests.cc8
-rw-r--r--chromium/media/base/seekable_buffer.cc1
-rw-r--r--chromium/media/base/seekable_buffer.h3
-rw-r--r--chromium/media/base/seekable_buffer_unittest.cc1
-rw-r--r--chromium/media/base/stream_parser.cc1
-rw-r--r--chromium/media/base/stream_parser.h2
-rw-r--r--chromium/media/base/stream_parser_buffer.cc20
-rw-r--r--chromium/media/base/stream_parser_buffer.h2
-rw-r--r--chromium/media/base/test_helpers.cc11
-rw-r--r--chromium/media/base/test_helpers.h5
-rw-r--r--chromium/media/base/time_delta_interpolator.cc2
-rw-r--r--chromium/media/base/timestamp_constants.h25
-rw-r--r--chromium/media/base/video_capture_types.cc101
-rw-r--r--chromium/media/base/video_capture_types.h51
-rw-r--r--chromium/media/base/video_capturer_source.cc7
-rw-r--r--chromium/media/base/video_capturer_source.h40
-rw-r--r--chromium/media/base/video_codecs.h64
-rw-r--r--chromium/media/base/video_decoder_config.cc119
-rw-r--r--chromium/media/base/video_decoder_config.h95
-rw-r--r--chromium/media/base/video_frame.cc479
-rw-r--r--chromium/media/base/video_frame.h105
-rw-r--r--chromium/media/base/video_frame_metadata.h11
-rw-r--r--chromium/media/base/video_frame_pool.cc13
-rw-r--r--chromium/media/base/video_frame_pool.h6
-rw-r--r--chromium/media/base/video_frame_pool_unittest.cc22
-rw-r--r--chromium/media/base/video_frame_unittest.cc105
-rw-r--r--chromium/media/base/video_types.cc75
-rw-r--r--chromium/media/base/video_types.h74
-rw-r--r--chromium/media/base/video_util.cc12
-rw-r--r--chromium/media/base/video_util_unittest.cc39
-rw-r--r--chromium/media/base/yuv_convert_perftest.cc4
-rw-r--r--chromium/media/base/yuv_convert_unittest.cc5
-rw-r--r--chromium/media/blink/BUILD.gn88
-rw-r--r--chromium/media/blink/DEPS1
-rw-r--r--chromium/media/blink/buffered_data_source.cc3
-rw-r--r--chromium/media/blink/buffered_data_source_host_impl.cc2
-rw-r--r--chromium/media/blink/buffered_resource_loader.h34
-rw-r--r--chromium/media/blink/cache_util.cc3
-rw-r--r--chromium/media/blink/cdm_session_adapter.cc37
-rw-r--r--chromium/media/blink/cdm_session_adapter.h7
-rw-r--r--chromium/media/blink/encrypted_media_player_support.cc14
-rw-r--r--chromium/media/blink/encrypted_media_player_support.h6
-rw-r--r--chromium/media/blink/key_system_config_selector.cc14
-rw-r--r--chromium/media/blink/media_blink.gyp37
-rw-r--r--chromium/media/blink/media_blink_unittests.isolate63
-rw-r--r--chromium/media/blink/new_session_cdm_result_promise.cc31
-rw-r--r--chromium/media/blink/new_session_cdm_result_promise.h21
-rw-r--r--chromium/media/blink/run_all_unittests.cc55
-rw-r--r--chromium/media/blink/skcanvas_video_renderer.cc503
-rw-r--r--chromium/media/blink/skcanvas_video_renderer.h53
-rw-r--r--chromium/media/blink/skcanvas_video_renderer_unittest.cc110
-rw-r--r--chromium/media/blink/video_frame_compositor.cc31
-rw-r--r--chromium/media/blink/video_frame_compositor_unittest.cc10
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl.cc11
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl.h12
-rw-r--r--chromium/media/blink/webaudiosourceprovider_impl_unittest.cc2
-rw-r--r--chromium/media/blink/webcontentdecryptionmodule_impl.cc3
-rw-r--r--chromium/media/blink/webcontentdecryptionmodule_impl.h8
-rw-r--r--chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h8
-rw-r--r--chromium/media/blink/webcontentdecryptionmodulesession_impl.cc44
-rw-r--r--chromium/media/blink/webcontentdecryptionmodulesession_impl.h34
-rw-r--r--chromium/media/blink/webencryptedmediaclient_impl.cc2
-rw-r--r--chromium/media/blink/webencryptedmediaclient_impl.h6
-rw-r--r--chromium/media/blink/webinbandtexttrack_impl.h14
-rw-r--r--chromium/media/blink/webmediaplayer_impl.cc95
-rw-r--r--chromium/media/blink/webmediaplayer_impl.h120
-rw-r--r--chromium/media/blink/webmediaplayer_params.cc6
-rw-r--r--chromium/media/blink/webmediaplayer_params.h7
-rw-r--r--chromium/media/blink/webmediaplayer_util.cc52
-rw-r--r--chromium/media/blink/webmediaplayer_util.h11
-rw-r--r--chromium/media/blink/webmediasource_impl.cc18
-rw-r--r--chromium/media/blink/webmediasource_impl.h21
-rw-r--r--chromium/media/blink/websourcebuffer_impl.cc21
-rw-r--r--chromium/media/blink/websourcebuffer_impl.h26
-rw-r--r--chromium/media/capture/content/OWNERS (renamed from chromium/media/capture/OWNERS)0
-rw-r--r--chromium/media/capture/content/README5
-rw-r--r--chromium/media/capture/content/animated_content_sampler.cc (renamed from chromium/media/capture/animated_content_sampler.cc)32
-rw-r--r--chromium/media/capture/content/animated_content_sampler.h (renamed from chromium/media/capture/animated_content_sampler.h)0
-rw-r--r--chromium/media/capture/content/animated_content_sampler_unittest.cc (renamed from chromium/media/capture/animated_content_sampler_unittest.cc)253
-rw-r--r--chromium/media/capture/content/capture_resolution_chooser.cc (renamed from chromium/media/capture/capture_resolution_chooser.cc)23
-rw-r--r--chromium/media/capture/content/capture_resolution_chooser.h (renamed from chromium/media/capture/capture_resolution_chooser.h)9
-rw-r--r--chromium/media/capture/content/capture_resolution_chooser_unittest.cc (renamed from chromium/media/capture/capture_resolution_chooser_unittest.cc)93
-rw-r--r--chromium/media/capture/content/feedback_signal_accumulator.cc (renamed from chromium/media/capture/feedback_signal_accumulator.cc)2
-rw-r--r--chromium/media/capture/content/feedback_signal_accumulator.h (renamed from chromium/media/capture/feedback_signal_accumulator.h)6
-rw-r--r--chromium/media/capture/content/feedback_signal_accumulator_unittest.cc (renamed from chromium/media/capture/feedback_signal_accumulator_unittest.cc)2
-rw-r--r--chromium/media/capture/content/screen_capture_device_core.cc (renamed from chromium/media/capture/screen_capture_device_core.cc)28
-rw-r--r--chromium/media/capture/content/screen_capture_device_core.h (renamed from chromium/media/capture/screen_capture_device_core.h)14
-rw-r--r--chromium/media/capture/content/smooth_event_sampler.cc (renamed from chromium/media/capture/smooth_event_sampler.cc)20
-rw-r--r--chromium/media/capture/content/smooth_event_sampler.h (renamed from chromium/media/capture/smooth_event_sampler.h)0
-rw-r--r--chromium/media/capture/content/smooth_event_sampler_unittest.cc704
-rw-r--r--chromium/media/capture/content/thread_safe_capture_oracle.cc (renamed from chromium/media/capture/thread_safe_capture_oracle.cc)87
-rw-r--r--chromium/media/capture/content/thread_safe_capture_oracle.h (renamed from chromium/media/capture/thread_safe_capture_oracle.h)19
-rw-r--r--chromium/media/capture/content/video_capture_oracle.cc (renamed from chromium/media/capture/video_capture_oracle.cc)96
-rw-r--r--chromium/media/capture/content/video_capture_oracle.h (renamed from chromium/media/capture/video_capture_oracle.h)11
-rw-r--r--chromium/media/capture/content/video_capture_oracle_unittest.cc (renamed from chromium/media/capture/video_capture_oracle_unittest.cc)250
-rw-r--r--chromium/media/capture/smooth_event_sampler_unittest.cc488
-rw-r--r--chromium/media/capture/video/OWNERS (renamed from chromium/media/video/capture/OWNERS)1
-rw-r--r--chromium/media/capture/video/android/video_capture_device_android.cc (renamed from chromium/media/video/capture/android/video_capture_device_android.cc)25
-rw-r--r--chromium/media/capture/video/android/video_capture_device_android.h (renamed from chromium/media/video/capture/android/video_capture_device_android.h)17
-rw-r--r--chromium/media/capture/video/android/video_capture_device_factory_android.cc (renamed from chromium/media/video/capture/android/video_capture_device_factory_android.cc)27
-rw-r--r--chromium/media/capture/video/android/video_capture_device_factory_android.h (renamed from chromium/media/video/capture/android/video_capture_device_factory_android.h)12
-rw-r--r--chromium/media/capture/video/fake_video_capture_device.cc (renamed from chromium/media/video/capture/fake_video_capture_device.cc)153
-rw-r--r--chromium/media/capture/video/fake_video_capture_device.h (renamed from chromium/media/video/capture/fake_video_capture_device.h)24
-rw-r--r--chromium/media/capture/video/fake_video_capture_device_factory.cc (renamed from chromium/media/video/capture/fake_video_capture_device_factory.cc)41
-rw-r--r--chromium/media/capture/video/fake_video_capture_device_factory.h (renamed from chromium/media/video/capture/fake_video_capture_device_factory.h)6
-rw-r--r--chromium/media/capture/video/fake_video_capture_device_unittest.cc (renamed from chromium/media/video/capture/fake_video_capture_device_unittest.cc)56
-rw-r--r--chromium/media/capture/video/file_video_capture_device.cc386
-rw-r--r--chromium/media/capture/video/file_video_capture_device.h (renamed from chromium/media/video/capture/file_video_capture_device.h)46
-rw-r--r--chromium/media/capture/video/file_video_capture_device_factory.cc (renamed from chromium/media/video/capture/file_video_capture_device_factory.cc)32
-rw-r--r--chromium/media/capture/video/file_video_capture_device_factory.h (renamed from chromium/media/video/capture/file_video_capture_device_factory.h)6
-rw-r--r--chromium/media/capture/video/linux/OWNERS1
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate.cc (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate.cc)55
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate.h (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate.h)11
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc)19
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h)2
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc)9
-rw-r--r--chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.h (renamed from chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h)4
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_chromeos.cc (renamed from chromium/media/video/capture/linux/video_capture_device_chromeos.cc)7
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_chromeos.h (renamed from chromium/media/video/capture/linux/video_capture_device_chromeos.h)2
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_factory_linux.cc (renamed from chromium/media/video/capture/linux/video_capture_device_factory_linux.cc)28
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_factory_linux.h (renamed from chromium/media/video/capture/linux/video_capture_device_factory_linux.h)2
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_linux.cc (renamed from chromium/media/video/capture/linux/video_capture_device_linux.cc)25
-rw-r--r--chromium/media/capture/video/linux/video_capture_device_linux.h (renamed from chromium/media/video/capture/linux/video_capture_device_linux.h)7
-rw-r--r--chromium/media/capture/video/mac/DEPS (renamed from chromium/media/video/capture/mac/DEPS)0
-rw-r--r--chromium/media/capture/video/mac/platform_video_capturing_mac.h (renamed from chromium/media/video/capture/mac/platform_video_capturing_mac.h)2
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.h (renamed from chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h)4
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm (renamed from chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm)97
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_decklink_mac.h (renamed from chromium/media/video/capture/mac/video_capture_device_decklink_mac.h)2
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm (renamed from chromium/media/video/capture/mac/video_capture_device_decklink_mac.mm)64
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_factory_mac.h (renamed from chromium/media/video/capture/mac/video_capture_device_factory_mac.h)6
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_factory_mac.mm (renamed from chromium/media/video/capture/mac/video_capture_device_factory_mac.mm)38
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm (renamed from chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm)6
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_mac.h (renamed from chromium/media/video/capture/mac/video_capture_device_mac.h)9
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_mac.mm (renamed from chromium/media/video/capture/mac/video_capture_device_mac.mm)160
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_qtkit_mac.h (renamed from chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h)10
-rw-r--r--chromium/media/capture/video/mac/video_capture_device_qtkit_mac.mm (renamed from chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm)165
-rw-r--r--chromium/media/capture/video/video_capture_device.cc (renamed from chromium/media/video/capture/video_capture_device.cc)66
-rw-r--r--chromium/media/capture/video/video_capture_device.h (renamed from chromium/media/video/capture/video_capture_device.h)73
-rw-r--r--chromium/media/capture/video/video_capture_device_factory.cc (renamed from chromium/media/video/capture/video_capture_device_factory.cc)20
-rw-r--r--chromium/media/capture/video/video_capture_device_factory.h (renamed from chromium/media/video/capture/video_capture_device_factory.h)2
-rw-r--r--chromium/media/capture/video/video_capture_device_info.cc (renamed from chromium/media/video/capture/video_capture_device_info.cc)12
-rw-r--r--chromium/media/capture/video/video_capture_device_info.h (renamed from chromium/media/video/capture/video_capture_device_info.h)2
-rw-r--r--chromium/media/capture/video/video_capture_device_unittest.cc (renamed from chromium/media/video/capture/video_capture_device_unittest.cc)58
-rw-r--r--chromium/media/capture/video/win/capability_list_win.cc (renamed from chromium/media/video/capture/win/capability_list_win.cc)10
-rw-r--r--chromium/media/capture/video/win/capability_list_win.h (renamed from chromium/media/video/capture/win/capability_list_win.h)8
-rw-r--r--chromium/media/capture/video/win/filter_base_win.cc (renamed from chromium/media/video/capture/win/filter_base_win.cc)18
-rw-r--r--chromium/media/capture/video/win/filter_base_win.h (renamed from chromium/media/video/capture/win/filter_base_win.h)4
-rw-r--r--chromium/media/capture/video/win/pin_base_win.cc (renamed from chromium/media/video/capture/win/pin_base_win.cc)23
-rw-r--r--chromium/media/capture/video/win/pin_base_win.h (renamed from chromium/media/video/capture/win/pin_base_win.h)7
-rw-r--r--chromium/media/capture/video/win/sink_filter_observer_win.h (renamed from chromium/media/video/capture/win/sink_filter_observer_win.h)4
-rw-r--r--chromium/media/capture/video/win/sink_filter_win.cc (renamed from chromium/media/video/capture/win/sink_filter_win.cc)28
-rw-r--r--chromium/media/capture/video/win/sink_filter_win.h (renamed from chromium/media/video/capture/win/sink_filter_win.h)12
-rw-r--r--chromium/media/capture/video/win/sink_input_pin_win.cc (renamed from chromium/media/video/capture/win/sink_input_pin_win.cc)59
-rw-r--r--chromium/media/capture/video/win/sink_input_pin_win.h (renamed from chromium/media/video/capture/win/sink_input_pin_win.h)10
-rw-r--r--chromium/media/capture/video/win/video_capture_device_factory_win.cc (renamed from chromium/media/video/capture/win/video_capture_device_factory_win.cc)94
-rw-r--r--chromium/media/capture/video/win/video_capture_device_factory_win.h (renamed from chromium/media/video/capture/win/video_capture_device_factory_win.h)6
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win.cc (renamed from chromium/media/video/capture/win/video_capture_device_mf_win.cc)40
-rw-r--r--chromium/media/capture/video/win/video_capture_device_mf_win.h (renamed from chromium/media/video/capture/win/video_capture_device_mf_win.h)7
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.cc (renamed from chromium/media/video/capture/win/video_capture_device_win.cc)103
-rw-r--r--chromium/media/capture/video/win/video_capture_device_win.h (renamed from chromium/media/video/capture/win/video_capture_device_win.h)38
-rw-r--r--chromium/media/capture/webm_muxer.cc128
-rw-r--r--chromium/media/capture/webm_muxer.h93
-rw-r--r--chromium/media/capture/webm_muxer_unittest.cc118
-rw-r--r--chromium/media/cast/BUILD.gn27
-rw-r--r--chromium/media/cast/cast.gyp12
-rw-r--r--chromium/media/cast/cast_config.cc4
-rw-r--r--chromium/media/cast/cast_defines.h3
-rw-r--r--chromium/media/cast/cast_testing.gypi12
-rw-r--r--chromium/media/cast/cast_unittests.isolate1
-rw-r--r--chromium/media/cast/common/transport_encryption_handler.cc4
-rw-r--r--chromium/media/cast/common/transport_encryption_handler.h2
-rw-r--r--chromium/media/cast/net/cast_transport_sender_impl.cc2
-rw-r--r--chromium/media/cast/net/cast_transport_sender_impl.h2
-rw-r--r--chromium/media/cast/net/rtcp/rtcp_unittest.cc2
-rw-r--r--chromium/media/cast/receiver/video_decoder.cc13
-rw-r--r--chromium/media/cast/receiver/video_decoder_unittest.cc9
-rw-r--r--chromium/media/cast/sender/audio_encoder.cc11
-rw-r--r--chromium/media/cast/sender/congestion_control.cc233
-rw-r--r--chromium/media/cast/sender/congestion_control.h22
-rw-r--r--chromium/media/cast/sender/congestion_control_unittest.cc31
-rw-r--r--chromium/media/cast/sender/external_video_encoder.cc271
-rw-r--r--chromium/media/cast/sender/external_video_encoder.h55
-rw-r--r--chromium/media/cast/sender/external_video_encoder_unittest.cc81
-rw-r--r--chromium/media/cast/sender/frame_sender.cc29
-rw-r--r--chromium/media/cast/sender/h264_vt_encoder_unittest.cc7
-rw-r--r--chromium/media/cast/sender/performance_metrics_overlay.cc6
-rw-r--r--chromium/media/cast/sender/video_encoder_unittest.cc4
-rw-r--r--chromium/media/cast/sender/video_sender.cc84
-rw-r--r--chromium/media/cast/sender/video_sender.h9
-rw-r--r--chromium/media/cast/sender/video_sender_unittest.cc72
-rw-r--r--chromium/media/cast/sender/vp8_encoder.cc2
-rw-r--r--chromium/media/cast/sender/vp8_encoder.h2
-rw-r--r--chromium/media/cast/sender/vp8_quantizer_parser.cc209
-rw-r--r--chromium/media/cast/sender/vp8_quantizer_parser.h20
-rw-r--r--chromium/media/cast/sender/vp8_quantizer_parser_unittest.cc147
-rw-r--r--chromium/media/cdm/json_web_key.cc4
-rw-r--r--chromium/media/cdm/player_tracker_impl.cc6
-rw-r--r--chromium/media/cdm/ppapi/BUILD.gn11
-rw-r--r--chromium/media/cdm/ppapi/api/codereview.settings9
-rw-r--r--chromium/media/cdm/ppapi/api/content_decryption_module.h5
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.cc6
-rw-r--r--chromium/media/cdm/ppapi/cdm_adapter.gni98
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc6
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc2
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc23
-rw-r--r--chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc5
-rw-r--r--chromium/media/cdm/stub/stub_cdm.cc12
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.cc179
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common.h34
-rw-r--r--chromium/media/ffmpeg/ffmpeg_common_unittest.cc409
-rw-r--r--chromium/media/ffmpeg/ffmpeg_regression_tests.cc21
-rw-r--r--chromium/media/filters/audio_clock.cc88
-rw-r--r--chromium/media/filters/audio_clock.h21
-rw-r--r--chromium/media/filters/audio_clock_unittest.cc23
-rw-r--r--chromium/media/filters/audio_decoder_unittest.cc19
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.cc28
-rw-r--r--chromium/media/filters/audio_renderer_algorithm.h3
-rw-r--r--chromium/media/filters/audio_renderer_algorithm_unittest.cc4
-rw-r--r--chromium/media/filters/chunk_demuxer.cc261
-rw-r--r--chromium/media/filters/chunk_demuxer.h54
-rw-r--r--chromium/media/filters/chunk_demuxer_unittest.cc330
-rw-r--r--chromium/media/filters/decoder_stream.cc4
-rw-r--r--chromium/media/filters/decoder_stream.h1
-rw-r--r--chromium/media/filters/decrypting_audio_decoder.cc2
-rw-r--r--chromium/media/filters/decrypting_audio_decoder_unittest.cc4
-rw-r--r--chromium/media/filters/decrypting_demuxer_stream.cc19
-rw-r--r--chromium/media/filters/ffmpeg_aac_bitstream_converter.cc2
-rw-r--r--chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc2
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.cc22
-rw-r--r--chromium/media/filters/ffmpeg_audio_decoder.h4
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.cc244
-rw-r--r--chromium/media/filters/ffmpeg_demuxer.h20
-rw-r--r--chromium/media/filters/ffmpeg_demuxer_unittest.cc136
-rw-r--r--chromium/media/filters/ffmpeg_glue.cc16
-rw-r--r--chromium/media/filters/ffmpeg_glue_unittest.cc8
-rw-r--r--chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc93
-rw-r--r--chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h49
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.cc47
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder.h2
-rw-r--r--chromium/media/filters/ffmpeg_video_decoder_unittest.cc29
-rw-r--r--chromium/media/filters/file_data_source.cc7
-rw-r--r--chromium/media/filters/file_data_source.h3
-rw-r--r--chromium/media/filters/frame_processor.cc93
-rw-r--r--chromium/media/filters/frame_processor.h13
-rw-r--r--chromium/media/filters/frame_processor_unittest.cc48
-rw-r--r--chromium/media/filters/gpu_video_decoder.cc43
-rw-r--r--chromium/media/filters/h264_parser.cc7
-rw-r--r--chromium/media/filters/h264_parser.h12
-rw-r--r--chromium/media/filters/h265_parser.cc159
-rw-r--r--chromium/media/filters/h265_parser.h151
-rw-r--r--chromium/media/filters/h265_parser_unittest.cc43
-rw-r--r--chromium/media/filters/ivf_parser.cc89
-rw-r--r--chromium/media/filters/ivf_parser.h86
-rw-r--r--chromium/media/filters/ivf_parser_unittest.cc56
-rw-r--r--chromium/media/filters/jpeg_parser.cc161
-rw-r--r--chromium/media/filters/jpeg_parser.h50
-rw-r--r--chromium/media/filters/jpeg_parser_unittest.cc1
-rw-r--r--chromium/media/filters/opus_audio_decoder.cc28
-rw-r--r--chromium/media/filters/opus_audio_decoder.h5
-rw-r--r--chromium/media/filters/source_buffer_platform.cc4
-rw-r--r--chromium/media/filters/source_buffer_platform.h5
-rw-r--r--chromium/media/filters/source_buffer_platform_lowmem.cc4
-rw-r--r--chromium/media/filters/source_buffer_range.cc51
-rw-r--r--chromium/media/filters/source_buffer_range.h15
-rw-r--r--chromium/media/filters/source_buffer_stream.cc437
-rw-r--r--chromium/media/filters/source_buffer_stream.h84
-rw-r--r--chromium/media/filters/source_buffer_stream_unittest.cc482
-rw-r--r--chromium/media/filters/stream_parser_factory.cc87
-rw-r--r--chromium/media/filters/stream_parser_factory.h11
-rw-r--r--chromium/media/filters/video_cadence_estimator.cc200
-rw-r--r--chromium/media/filters/video_cadence_estimator.h78
-rw-r--r--chromium/media/filters/video_cadence_estimator_unittest.cc64
-rw-r--r--chromium/media/filters/video_frame_stream_unittest.cc1
-rw-r--r--chromium/media/filters/video_renderer_algorithm_unittest.cc51
-rw-r--r--chromium/media/filters/vp8_parser_unittest.cc42
-rw-r--r--chromium/media/filters/vp9_parser.cc689
-rw-r--r--chromium/media/filters/vp9_parser.h290
-rw-r--r--chromium/media/filters/vp9_parser_unittest.cc159
-rw-r--r--chromium/media/filters/vp9_raw_bits_reader.cc55
-rw-r--r--chromium/media/filters/vp9_raw_bits_reader.h62
-rw-r--r--chromium/media/filters/vp9_raw_bits_reader_unittest.cc66
-rw-r--r--chromium/media/filters/vpx_video_decoder.cc115
-rw-r--r--chromium/media/formats/common/stream_parser_test_base.cc2
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video.cc2
-rw-r--r--chromium/media/formats/mp2t/es_adapter_video_unittest.cc12
-rw-r--r--chromium/media/formats/mp2t/es_parser.cc1
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts.cc3
-rw-r--r--chromium/media/formats/mp2t/es_parser_adts_unittest.cc9
-rw-r--r--chromium/media/formats/mp2t/es_parser_h264.cc11
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio.cc16
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio.h4
-rw-r--r--chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc6
-rw-r--r--chromium/media/formats/mp2t/es_parser_test_base.cc2
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.cc40
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser.h19
-rw-r--r--chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc17
-rw-r--r--chromium/media/formats/mp2t/ts_section_pes.cc2
-rw-r--r--chromium/media/formats/mp4/aac.cc56
-rw-r--r--chromium/media/formats/mp4/aac.h3
-rw-r--r--chromium/media/formats/mp4/aac_unittest.cc168
-rw-r--r--chromium/media/formats/mp4/avc.cc71
-rw-r--r--chromium/media/formats/mp4/avc.h24
-rw-r--r--chromium/media/formats/mp4/avc_unittest.cc56
-rw-r--r--chromium/media/formats/mp4/bitstream_converter.cc14
-rw-r--r--chromium/media/formats/mp4/bitstream_converter.h46
-rw-r--r--chromium/media/formats/mp4/box_definitions.cc178
-rw-r--r--chromium/media/formats/mp4/box_definitions.h47
-rw-r--r--chromium/media/formats/mp4/box_reader.cc29
-rw-r--r--chromium/media/formats/mp4/box_reader.h19
-rw-r--r--chromium/media/formats/mp4/box_reader_unittest.cc52
-rw-r--r--chromium/media/formats/mp4/fourccs.h5
-rw-r--r--chromium/media/formats/mp4/hevc.cc237
-rw-r--r--chromium/media/formats/mp4/hevc.h106
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.cc103
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser.h22
-rw-r--r--chromium/media/formats/mp4/mp4_stream_parser_unittest.cc59
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.cc163
-rw-r--r--chromium/media/formats/mp4/track_run_iterator.h7
-rw-r--r--chromium/media/formats/mp4/track_run_iterator_unittest.cc179
-rw-r--r--chromium/media/formats/mpeg/adts_stream_parser.cc2
-rw-r--r--chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc28
-rw-r--r--chromium/media/formats/mpeg/mpeg1_audio_stream_parser.h7
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc14
-rw-r--r--chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h6
-rw-r--r--chromium/media/formats/webm/cluster_builder.h1
-rw-r--r--chromium/media/formats/webm/webm_audio_client.cc21
-rw-r--r--chromium/media/formats/webm/webm_audio_client.h4
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.cc94
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser.h32
-rw-r--r--chromium/media/formats/webm/webm_cluster_parser_unittest.cc406
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.cc54
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client.h4
-rw-r--r--chromium/media/formats/webm/webm_content_encodings_client_unittest.cc48
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.cc23
-rw-r--r--chromium/media/formats/webm/webm_stream_parser.h5
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser.cc62
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser.h5
-rw-r--r--chromium/media/formats/webm/webm_tracks_parser_unittest.cc75
-rw-r--r--chromium/media/formats/webm/webm_video_client.cc27
-rw-r--r--chromium/media/formats/webm/webm_video_client.h4
-rw-r--r--chromium/media/media.gyp335
-rw-r--r--chromium/media/media_cdm.gypi24
-rw-r--r--chromium/media/media_nacl.gyp25
-rw-r--r--chromium/media/media_options.gni27
-rw-r--r--chromium/media/media_unittests.isolate1
-rw-r--r--chromium/media/media_variables.gypi23
-rw-r--r--chromium/media/midi/BUILD.gn25
-rw-r--r--chromium/media/midi/midi.gyp13
-rw-r--r--chromium/media/midi/midi_device_android.cc68
-rw-r--r--chromium/media/midi/midi_device_android.h52
-rw-r--r--chromium/media/midi/midi_input_port_android.cc59
-rw-r--r--chromium/media/midi/midi_input_port_android.h51
-rw-r--r--chromium/media/midi/midi_manager.cc49
-rw-r--r--chromium/media/midi/midi_manager.h3
-rw-r--r--chromium/media/midi/midi_manager_android.cc152
-rw-r--r--chromium/media/midi/midi_manager_android.h82
-rw-r--r--chromium/media/midi/midi_manager_usb.cc2
-rw-r--r--chromium/media/midi/midi_manager_usb_unittest.cc36
-rw-r--r--chromium/media/midi/midi_manager_win.cc14
-rw-r--r--chromium/media/midi/midi_output_port_android.cc46
-rw-r--r--chromium/media/midi/midi_output_port_android.h36
-rw-r--r--chromium/media/midi/midi_switches.cc14
-rw-r--r--chromium/media/midi/midi_switches.h21
-rw-r--r--chromium/media/midi/midi_unittests.isolate1
-rw-r--r--chromium/media/midi/usb_midi_device_factory_android.h1
-rw-r--r--chromium/media/mojo/interfaces/BUILD.gn3
-rw-r--r--chromium/media/mojo/interfaces/content_decryption_module.mojom6
-rw-r--r--chromium/media/mojo/interfaces/decryptor.mojom10
-rw-r--r--chromium/media/mojo/interfaces/demuxer_stream.mojom6
-rw-r--r--chromium/media/mojo/interfaces/media_types.mojom39
-rw-r--r--chromium/media/mojo/interfaces/renderer.mojom (renamed from chromium/media/mojo/interfaces/media_renderer.mojom)16
-rw-r--r--chromium/media/mojo/interfaces/service_factory.mojom18
-rw-r--r--chromium/media/mojo/services/BUILD.gn6
-rw-r--r--chromium/media/mojo/services/demuxer_stream_provider_shim.cc4
-rw-r--r--chromium/media/mojo/services/demuxer_stream_provider_shim.h11
-rw-r--r--chromium/media/mojo/services/media_apptest.cc87
-rw-r--r--chromium/media/mojo/services/media_type_converters.cc241
-rw-r--r--chromium/media/mojo/services/media_type_converters.h78
-rw-r--r--chromium/media/mojo/services/media_type_converters_unittest.cc103
-rw-r--r--chromium/media/mojo/services/mojo_cdm.cc31
-rw-r--r--chromium/media/mojo/services/mojo_cdm.h25
-rw-r--r--chromium/media/mojo/services/mojo_cdm_factory.cc14
-rw-r--r--chromium/media/mojo/services/mojo_cdm_factory.h12
-rw-r--r--chromium/media/mojo/services/mojo_cdm_promise.cc11
-rw-r--r--chromium/media/mojo/services/mojo_cdm_promise.h2
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service.cc53
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service.h72
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service_context.cc8
-rw-r--r--chromium/media/mojo/services/mojo_cdm_service_context.h12
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc28
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_adapter.h28
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_impl.cc35
-rw-r--r--chromium/media/mojo/services/mojo_demuxer_stream_impl.h16
-rw-r--r--chromium/media/mojo/services/mojo_media_application.cc43
-rw-r--r--chromium/media/mojo/services/mojo_media_application.h27
-rw-r--r--chromium/media/mojo/services/mojo_renderer_factory.cc18
-rw-r--r--chromium/media/mojo/services/mojo_renderer_factory.h15
-rw-r--r--chromium/media/mojo/services/mojo_renderer_impl.cc52
-rw-r--r--chromium/media/mojo/services/mojo_renderer_impl.h27
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.cc45
-rw-r--r--chromium/media/mojo/services/mojo_renderer_service.h35
-rw-r--r--chromium/media/mojo/services/service_factory_impl.cc62
-rw-r--r--chromium/media/mojo/services/service_factory_impl.h58
-rw-r--r--chromium/media/renderers/audio_renderer_impl.cc15
-rw-r--r--chromium/media/renderers/audio_renderer_impl.h1
-rw-r--r--chromium/media/renderers/default_renderer_factory.cc17
-rw-r--r--chromium/media/renderers/default_renderer_factory.h1
-rw-r--r--chromium/media/renderers/gpu_video_accelerator_factories.h10
-rw-r--r--chromium/media/renderers/mock_gpu_video_accelerator_factories.cc54
-rw-r--r--chromium/media/renderers/mock_gpu_video_accelerator_factories.h23
-rw-r--r--chromium/media/renderers/video_renderer_impl.cc347
-rw-r--r--chromium/media/renderers/video_renderer_impl.h98
-rw-r--r--chromium/media/renderers/video_renderer_impl_unittest.cc182
-rw-r--r--chromium/media/shared_memory_support.gypi2
-rw-r--r--chromium/media/video/capture/file_video_capture_device.cc264
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.cc11
-rw-r--r--chromium/media/video/fake_video_encode_accelerator.h2
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.cc609
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool.h24
-rw-r--r--chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc203
-rw-r--r--chromium/media/video/jpeg_decode_accelerator.h4
-rw-r--r--chromium/media/video/video_decode_accelerator.h6
-rw-r--r--chromium/media/video/video_encode_accelerator.h2
611 files changed, 23100 insertions, 10249 deletions
diff --git a/chromium/media/BUILD.gn b/chromium/media/BUILD.gn
index aa0b3ca6070..68c1e5f2423 100644
--- a/chromium/media/BUILD.gn
+++ b/chromium/media/BUILD.gn
@@ -32,6 +32,9 @@ config("media_config") {
if (use_cras) {
defines += [ "USE_CRAS" ]
}
+ if (proprietary_codecs && enable_hevc_demuxing) {
+ defines += [ "ENABLE_HEVC_DEMUXING" ]
+ }
}
config("media_implementation") {
@@ -88,20 +91,74 @@ component("media") {
sources = [
"blink/skcanvas_video_renderer.cc",
"blink/skcanvas_video_renderer.h",
- "capture/animated_content_sampler.cc",
- "capture/animated_content_sampler.h",
- "capture/capture_resolution_chooser.cc",
- "capture/capture_resolution_chooser.h",
- "capture/feedback_signal_accumulator.cc",
- "capture/feedback_signal_accumulator.h",
- "capture/screen_capture_device_core.cc",
- "capture/screen_capture_device_core.h",
- "capture/smooth_event_sampler.cc",
- "capture/smooth_event_sampler.h",
- "capture/thread_safe_capture_oracle.cc",
- "capture/thread_safe_capture_oracle.h",
- "capture/video_capture_oracle.cc",
- "capture/video_capture_oracle.h",
+ "capture/content/animated_content_sampler.cc",
+ "capture/content/animated_content_sampler.h",
+ "capture/content/capture_resolution_chooser.cc",
+ "capture/content/capture_resolution_chooser.h",
+ "capture/content/feedback_signal_accumulator.cc",
+ "capture/content/feedback_signal_accumulator.h",
+ "capture/content/screen_capture_device_core.cc",
+ "capture/content/screen_capture_device_core.h",
+ "capture/content/smooth_event_sampler.cc",
+ "capture/content/smooth_event_sampler.h",
+ "capture/content/thread_safe_capture_oracle.cc",
+ "capture/content/thread_safe_capture_oracle.h",
+ "capture/content/video_capture_oracle.cc",
+ "capture/content/video_capture_oracle.h",
+ "capture/video/fake_video_capture_device.cc",
+ "capture/video/fake_video_capture_device.h",
+ "capture/video/fake_video_capture_device_factory.cc",
+ "capture/video/fake_video_capture_device_factory.h",
+ "capture/video/file_video_capture_device.cc",
+ "capture/video/file_video_capture_device.h",
+ "capture/video/file_video_capture_device_factory.cc",
+ "capture/video/file_video_capture_device_factory.h",
+ "capture/video/linux/v4l2_capture_delegate.cc",
+ "capture/video/linux/v4l2_capture_delegate.h",
+ "capture/video/linux/v4l2_capture_delegate_multi_plane.cc",
+ "capture/video/linux/v4l2_capture_delegate_multi_plane.h",
+ "capture/video/linux/v4l2_capture_delegate_single_plane.cc",
+ "capture/video/linux/v4l2_capture_delegate_single_plane.h",
+ "capture/video/linux/video_capture_device_chromeos.cc",
+ "capture/video/linux/video_capture_device_chromeos.h",
+ "capture/video/linux/video_capture_device_factory_linux.cc",
+ "capture/video/linux/video_capture_device_factory_linux.h",
+ "capture/video/linux/video_capture_device_linux.cc",
+ "capture/video/linux/video_capture_device_linux.h",
+ "capture/video/mac/platform_video_capturing_mac.h",
+ "capture/video/mac/video_capture_device_avfoundation_mac.h",
+ "capture/video/mac/video_capture_device_avfoundation_mac.mm",
+ "capture/video/mac/video_capture_device_decklink_mac.h",
+ "capture/video/mac/video_capture_device_decklink_mac.mm",
+ "capture/video/mac/video_capture_device_factory_mac.h",
+ "capture/video/mac/video_capture_device_factory_mac.mm",
+ "capture/video/mac/video_capture_device_mac.h",
+ "capture/video/mac/video_capture_device_mac.mm",
+ "capture/video/mac/video_capture_device_qtkit_mac.h",
+ "capture/video/mac/video_capture_device_qtkit_mac.mm",
+ "capture/video/video_capture_device.cc",
+ "capture/video/video_capture_device.h",
+ "capture/video/video_capture_device_factory.cc",
+ "capture/video/video_capture_device_factory.h",
+ "capture/video/video_capture_device_info.cc",
+ "capture/video/video_capture_device_info.h",
+ "capture/video/win/capability_list_win.cc",
+ "capture/video/win/capability_list_win.h",
+ "capture/video/win/filter_base_win.cc",
+ "capture/video/win/filter_base_win.h",
+ "capture/video/win/pin_base_win.cc",
+ "capture/video/win/pin_base_win.h",
+ "capture/video/win/sink_filter_observer_win.h",
+ "capture/video/win/sink_filter_win.cc",
+ "capture/video/win/sink_filter_win.h",
+ "capture/video/win/sink_input_pin_win.cc",
+ "capture/video/win/sink_input_pin_win.h",
+ "capture/video/win/video_capture_device_factory_win.cc",
+ "capture/video/win/video_capture_device_factory_win.h",
+ "capture/video/win/video_capture_device_mf_win.cc",
+ "capture/video/win/video_capture_device_mf_win.h",
+ "capture/video/win/video_capture_device_win.cc",
+ "capture/video/win/video_capture_device_win.h",
"cdm/aes_decryptor.cc",
"cdm/aes_decryptor.h",
"cdm/default_cdm_factory.cc",
@@ -113,7 +170,7 @@ component("media") {
"cdm/player_tracker_impl.cc",
"cdm/player_tracker_impl.h",
"cdm/proxy_decryptor.cc",
- "cdm/proxy_decyrptor.h",
+ "cdm/proxy_decryptor.h",
"ffmpeg/ffmpeg_deleters.h",
"filters/audio_clock.cc",
"filters/audio_clock.h",
@@ -146,10 +203,10 @@ component("media") {
"filters/h264_bit_reader.h",
"filters/h264_parser.cc",
"filters/h264_parser.h",
+ "filters/ivf_parser.cc",
+ "filters/ivf_parser.h",
"filters/jpeg_parser.cc",
"filters/jpeg_parser.h",
- "filters/source_buffer_platform.cc",
- "filters/source_buffer_platform.h",
"filters/source_buffer_range.cc",
"filters/source_buffer_range.h",
"filters/source_buffer_stream.cc",
@@ -164,6 +221,10 @@ component("media") {
"filters/vp8_bool_decoder.h",
"filters/vp8_parser.cc",
"filters/vp8_parser.h",
+ "filters/vp9_parser.cc",
+ "filters/vp9_parser.h",
+ "filters/vp9_raw_bits_reader.cc",
+ "filters/vp9_raw_bits_reader.h",
"filters/webvtt_util.h",
"filters/wsola_internals.cc",
"filters/wsola_internals.h",
@@ -202,60 +263,6 @@ component("media") {
"renderers/renderer_impl.h",
"renderers/video_renderer_impl.cc",
"renderers/video_renderer_impl.h",
- "video/capture/fake_video_capture_device.cc",
- "video/capture/fake_video_capture_device.h",
- "video/capture/fake_video_capture_device_factory.cc",
- "video/capture/fake_video_capture_device_factory.h",
- "video/capture/file_video_capture_device.cc",
- "video/capture/file_video_capture_device.h",
- "video/capture/file_video_capture_device_factory.cc",
- "video/capture/file_video_capture_device_factory.h",
- "video/capture/linux/v4l2_capture_delegate.cc",
- "video/capture/linux/v4l2_capture_delegate.h",
- "video/capture/linux/v4l2_capture_delegate_multi_plane.cc",
- "video/capture/linux/v4l2_capture_delegate_multi_plane.h",
- "video/capture/linux/v4l2_capture_delegate_single_plane.cc",
- "video/capture/linux/v4l2_capture_delegate_single_plane.h",
- "video/capture/linux/video_capture_device_chromeos.cc",
- "video/capture/linux/video_capture_device_chromeos.h",
- "video/capture/linux/video_capture_device_factory_linux.cc",
- "video/capture/linux/video_capture_device_factory_linux.h",
- "video/capture/linux/video_capture_device_linux.cc",
- "video/capture/linux/video_capture_device_linux.h",
- "video/capture/mac/platform_video_capturing_mac.h",
- "video/capture/mac/video_capture_device_avfoundation_mac.h",
- "video/capture/mac/video_capture_device_avfoundation_mac.mm",
- "video/capture/mac/video_capture_device_decklink_mac.h",
- "video/capture/mac/video_capture_device_decklink_mac.mm",
- "video/capture/mac/video_capture_device_factory_mac.h",
- "video/capture/mac/video_capture_device_factory_mac.mm",
- "video/capture/mac/video_capture_device_mac.h",
- "video/capture/mac/video_capture_device_mac.mm",
- "video/capture/mac/video_capture_device_qtkit_mac.h",
- "video/capture/mac/video_capture_device_qtkit_mac.mm",
- "video/capture/video_capture_device.cc",
- "video/capture/video_capture_device.h",
- "video/capture/video_capture_device_factory.cc",
- "video/capture/video_capture_device_factory.h",
- "video/capture/video_capture_device_info.cc",
- "video/capture/video_capture_device_info.h",
- "video/capture/win/capability_list_win.cc",
- "video/capture/win/capability_list_win.h",
- "video/capture/win/filter_base_win.cc",
- "video/capture/win/filter_base_win.h",
- "video/capture/win/pin_base_win.cc",
- "video/capture/win/pin_base_win.h",
- "video/capture/win/sink_filter_observer_win.h",
- "video/capture/win/sink_filter_win.cc",
- "video/capture/win/sink_filter_win.h",
- "video/capture/win/sink_input_pin_win.cc",
- "video/capture/win/sink_input_pin_win.h",
- "video/capture/win/video_capture_device_factory_win.cc",
- "video/capture/win/video_capture_device_factory_win.h",
- "video/capture/win/video_capture_device_mf_win.cc",
- "video/capture/win/video_capture_device_mf_win.h",
- "video/capture/win/video_capture_device_win.cc",
- "video/capture/win/video_capture_device_win.h",
"video/fake_video_encode_accelerator.cc",
"video/fake_video_encode_accelerator.h",
"video/gpu_memory_buffer_video_frame_pool.cc",
@@ -275,6 +282,7 @@ component("media") {
configs += [
":media_config",
":media_implementation",
+ "//build/config:precompiled_headers",
# TODO(wolenetz): Fix size_t to int trunctaion in win64.
# See http://crbug.com/171009
@@ -320,6 +328,21 @@ component("media") {
}
}
+ if (proprietary_codecs && enable_hevc_demuxing) {
+ sources += [
+ "filters/h265_parser.cc",
+ "filters/h265_parser.h",
+ "formats/mp4/hevc.cc",
+ "formats/mp4/hevc.h",
+ ]
+ if (media_use_ffmpeg) {
+ sources += [
+ "filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc",
+ "filters/ffmpeg_h265_to_annex_b_bitstream_converter.h",
+ ]
+ }
+ }
+
if (current_cpu == "arm" && arm_use_neon) {
defines += [ "USE_NEON" ]
}
@@ -329,15 +352,23 @@ component("media") {
"filters/vpx_video_decoder.cc",
"filters/vpx_video_decoder.h",
]
- deps += [ "//third_party/libvpx" ]
+ deps += [ "//third_party/libvpx_new" ]
+ }
+
+ if (media_use_libwebm) {
+ sources += [
+ "capture/webm_muxer.cc",
+ "capture/webm_muxer.h",
+ ]
+ deps += [ "//third_party/libwebm" ]
}
if (is_android) {
sources += [
- "video/capture/android/video_capture_device_android.cc",
- "video/capture/android/video_capture_device_android.h",
- "video/capture/android/video_capture_device_factory_android.cc",
- "video/capture/android/video_capture_device_factory_android.h",
+ "capture/video/android/video_capture_device_android.cc",
+ "capture/video/android/video_capture_device_android.h",
+ "capture/video/android/video_capture_device_factory_android.cc",
+ "capture/video/android/video_capture_device_factory_android.h",
]
deps += [
"//media/base/android",
@@ -390,8 +421,8 @@ component("media") {
if (is_openbsd) {
sources -= [
- "video/capture/linux/v4l2_capture_delegate_multi_plane.cc",
- "video/capture/linux/v4l2_capture_delegate_multi_plane.h",
+ "capture/video/linux/v4l2_capture_delegate_multi_plane.cc",
+ "capture/video/linux/v4l2_capture_delegate_multi_plane.h",
]
}
@@ -445,6 +476,8 @@ component("media") {
"formats/mp4/aac.h",
"formats/mp4/avc.cc",
"formats/mp4/avc.h",
+ "formats/mp4/bitstream_converter.cc",
+ "formats/mp4/bitstream_converter.h",
"formats/mp4/box_definitions.cc",
"formats/mp4/box_definitions.h",
"formats/mp4/box_reader.cc",
@@ -470,6 +503,18 @@ component("media") {
]
}
+ if (use_low_memory_buffer) {
+ sources += [
+ "filters/source_buffer_platform.h",
+ "filters/source_buffer_platform_lowmem.cc",
+ ]
+ } else {
+ sources += [
+ "filters/source_buffer_platform.cc",
+ "filters/source_buffer_platform.h",
+ ]
+ }
+
public_deps = [
"//media/base",
"//media/audio",
@@ -515,14 +560,25 @@ if (is_ios) {
}
}
+# TODO(GYP): Delete this after we've converted everything to GN.
+# The _run targets exist only for compatibility w/ GYP.
+group("media_unittests_run") {
+ testonly = true
+ deps = [
+ ":media_unittests",
+ ]
+}
+
test("media_unittests") {
sources = [
"blink/skcanvas_video_renderer_unittest.cc",
- "capture/animated_content_sampler_unittest.cc",
- "capture/capture_resolution_chooser_unittest.cc",
- "capture/feedback_signal_accumulator_unittest.cc",
- "capture/smooth_event_sampler_unittest.cc",
- "capture/video_capture_oracle_unittest.cc",
+ "capture/content/animated_content_sampler_unittest.cc",
+ "capture/content/capture_resolution_chooser_unittest.cc",
+ "capture/content/feedback_signal_accumulator_unittest.cc",
+ "capture/content/smooth_event_sampler_unittest.cc",
+ "capture/content/video_capture_oracle_unittest.cc",
+ "capture/video/fake_video_capture_device_unittest.cc",
+ "capture/video/video_capture_device_unittest.cc",
"cdm/aes_decryptor_unittest.cc",
"cdm/json_web_key_unittest.cc",
"filters/audio_clock_unittest.cc",
@@ -539,6 +595,7 @@ test("media_unittests") {
"filters/frame_processor_unittest.cc",
"filters/h264_bit_reader_unittest.cc",
"filters/h264_parser_unittest.cc",
+ "filters/ivf_parser_unittest.cc",
"filters/jpeg_parser_unittest.cc",
"filters/source_buffer_stream_unittest.cc",
"filters/video_cadence_estimator_unittest.cc",
@@ -547,6 +604,8 @@ test("media_unittests") {
"filters/video_renderer_algorithm_unittest.cc",
"filters/vp8_bool_decoder_unittest.cc",
"filters/vp8_parser_unittest.cc",
+ "filters/vp9_parser_unittest.cc",
+ "filters/vp9_raw_bits_reader_unittest.cc",
"formats/common/offset_byte_queue_unittest.cc",
"formats/webm/cluster_builder.cc",
"formats/webm/cluster_builder.h",
@@ -562,11 +621,14 @@ test("media_unittests") {
"renderers/audio_renderer_impl_unittest.cc",
"renderers/renderer_impl_unittest.cc",
"renderers/video_renderer_impl_unittest.cc",
- "video/capture/fake_video_capture_device_unittest.cc",
- "video/capture/video_capture_device_unittest.cc",
+ "video/gpu_memory_buffer_video_frame_pool_unittest.cc",
"video/h264_poc_unittest.cc",
]
+ data = [
+ "test/data/",
+ ]
+
# TODO(wolenetz): Fix size_t to int trunctaion in win64.
# See http://crbug.com/171009
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
@@ -576,6 +638,8 @@ test("media_unittests") {
":test_support",
"//base/allocator",
"//base/test:test_support",
+ "//gpu/command_buffer/common",
+ "//gpu:test_support",
"//media/audio:unittests",
"//media/audio:test_support",
"//media/base:unittests",
@@ -611,6 +675,11 @@ test("media_unittests") {
]
}
+ if (media_use_libwebm) {
+ sources += [ "capture/webm_muxer_unittest.cc" ]
+ deps += [ "//third_party/libwebm" ]
+ }
+
if (current_cpu != "arm" && is_chromeos) {
sources += [ "filters/h264_bitstream_buffer_unittest.cc" ]
}
@@ -645,6 +714,9 @@ test("media_unittests") {
"filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc",
]
}
+ if (enable_hevc_demuxing) {
+ sources += [ "filters/h265_parser_unittest.cc" ]
+ }
}
if (is_mac || is_ios) {
@@ -653,7 +725,7 @@ test("media_unittests") {
if (is_mac) {
sources +=
- [ "video/capture/mac/video_capture_device_factory_mac_unittest.mm" ]
+ [ "capture/video/mac/video_capture_device_factory_mac_unittest.mm" ]
}
# include_dirs += [
@@ -731,6 +803,8 @@ component("shared_memory_support") {
sources = [
"audio/audio_parameters.cc",
"audio/audio_parameters.h",
+ "audio/point.cc",
+ "audio/point.h",
"base/audio_bus.cc",
"base/audio_bus.h",
"base/channel_layout.cc",
@@ -746,6 +820,7 @@ component("shared_memory_support") {
]
deps = [
"//base",
+ "//ui/gfx/geometry",
]
}
diff --git a/chromium/media/DEPS b/chromium/media/DEPS
index 6d3642e55b7..c3a4f49fc87 100644
--- a/chromium/media/DEPS
+++ b/chromium/media/DEPS
@@ -1,5 +1,6 @@
# Do NOT add net/ or ui/base without a great reason, they're huge!
include_rules = [
+ "+chromeos/audio",
"+crypto",
"+device/udev_linux",
"+device/usb",
@@ -7,7 +8,8 @@ include_rules = [
"+jni",
"+skia/ext",
"+third_party/ffmpeg",
- "+third_party/libvpx",
+ "+third_party/libwebm",
+ "+third_party/libvpx_new",
"+third_party/libyuv",
"+third_party/opus",
"+third_party/skia",
diff --git a/chromium/media/OWNERS b/chromium/media/OWNERS
index c992388c5cf..e65ac6fb6f6 100644
--- a/chromium/media/OWNERS
+++ b/chromium/media/OWNERS
@@ -3,7 +3,7 @@
# - cast
# - midi
# - ozone
-# - video/capture
+# - capture/{content,video}
# Instead prefer the OWNERS in the subdirectory as they will be more familiar,
# and to load balance. Only use OWNERS in this file for these subdirectories
# when doing refactorings and general cleanups.
diff --git a/chromium/media/audio/BUILD.gn b/chromium/media/audio/BUILD.gn
index 68e66c3ad20..28e424efcaa 100644
--- a/chromium/media/audio/BUILD.gn
+++ b/chromium/media/audio/BUILD.gn
@@ -44,6 +44,12 @@ if (!link_pulseaudio) {
}
}
+config("platform_config") {
+ if (use_alsa) {
+ defines = [ "USE_ALSA" ]
+ }
+}
+
source_set("audio") {
visibility = [ "//media/*" ]
sources = [
@@ -116,6 +122,7 @@ source_set("audio") {
deps = []
libs = []
configs += [
+ ":platform_config",
"//media:media_config",
"//media:media_implementation",
]
@@ -178,20 +185,12 @@ source_set("audio") {
deps += [ "//media/base/android:media_jni_headers" ]
}
- if (is_openbsd) {
- sources += [
- "openbsd/audio_manager_openbsd.cc",
- "openbsd/audio_manager_openbsd.h",
- ]
- }
-
if (is_linux) {
sources += [ "linux/audio_manager_linux.cc" ]
}
if (use_alsa) {
libs += [ "asound" ]
- defines = [ "USE_ALSA" ]
sources += [
"alsa/alsa_input.cc",
"alsa/alsa_input.h",
@@ -220,6 +219,7 @@ source_set("audio") {
packages = [ "libcras" ]
}
configs += [ ":libcras" ]
+ deps += [ "//chromeos:chromeos" ]
}
if (use_pulseaudio) {
@@ -262,6 +262,7 @@ source_set("test_support") {
"test_audio_input_controller_factory.cc",
"test_audio_input_controller_factory.h",
]
+ configs += [ ":platform_config" ]
deps = [
"//testing/gmock",
]
@@ -294,6 +295,7 @@ source_set("unittests") {
]
configs += [
+ ":platform_config",
"//build/config/compiler:no_size_t_to_int_warning",
"//media:media_config",
]
@@ -301,8 +303,6 @@ source_set("unittests") {
if (is_android) {
sources += [ "android/audio_android_unittest.cc" ]
deps += [ "//ui/gl" ]
- } else {
- sources += [ "audio_input_volume_unittest.cc" ]
}
if (is_mac) {
diff --git a/chromium/media/audio/agc_audio_stream.h b/chromium/media/audio/agc_audio_stream.h
index a2958ce391a..b0117e1962c 100644
--- a/chromium/media/audio/agc_audio_stream.h
+++ b/chromium/media/audio/agc_audio_stream.h
@@ -180,7 +180,7 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// Repeating timer which cancels itself when it goes out of scope.
// Used to check the microphone volume periodically.
- base::RepeatingTimer<AgcAudioStream<AudioInterface> > timer_;
+ base::RepeatingTimer timer_;
// True when automatic gain control is enabled, false otherwise.
bool agc_is_enabled_;
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
index e44c8c85afe..29b6cb292e5 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.cc
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -57,6 +57,7 @@ void AudioManagerAlsa::ShowLinuxAudioInputSettings() {
break;
case base::nix::DESKTOP_ENVIRONMENT_KDE3:
case base::nix::DESKTOP_ENVIRONMENT_KDE4:
+ case base::nix::DESKTOP_ENVIRONMENT_KDE5:
command_line.SetProgram(base::FilePath("kmix"));
break;
case base::nix::DESKTOP_ENVIRONMENT_UNITY:
@@ -163,9 +164,9 @@ void AudioManagerAlsa::GetAlsaDevicesInfo(
// still empty. Note, pulse has exclusively opened the default
// device, so we must open the device via the "default" moniker.
if (device_names->empty()) {
- device_names->push_front(media::AudioDeviceName(
- AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
+ device_names->push_front(
+ media::AudioDeviceName(AudioManager::GetDefaultDeviceName(),
+ AudioManagerBase::kDefaultDeviceId));
}
// Get the unique device name for the device.
@@ -327,9 +328,8 @@ AudioParameters AudioManagerAlsa::GetPreferredOutputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
}
AudioOutputStream* AudioManagerAlsa::MakeOutputStream(
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index 64b3b4ba613..a6f94217c21 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -35,6 +35,7 @@ using ::testing::NotNull;
using ::testing::Return;
namespace media {
+namespace {
ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
if (++*count >= limit) {
@@ -42,17 +43,17 @@ ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
}
}
-static const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
-static const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw";
-static const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
-static const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw";
+const char kSpeechFile_16b_s_48k[] = "speech_16b_stereo_48kHz.raw";
+const char kSpeechFile_16b_m_48k[] = "speech_16b_mono_48kHz.raw";
+const char kSpeechFile_16b_s_44k[] = "speech_16b_stereo_44kHz.raw";
+const char kSpeechFile_16b_m_44k[] = "speech_16b_mono_44kHz.raw";
-static const float kCallbackTestTimeMs = 2000.0;
-static const int kBitsPerSample = 16;
-static const int kBytesPerSample = kBitsPerSample / 8;
+const float kCallbackTestTimeMs = 2000.0;
+const int kBitsPerSample = 16;
+const int kBytesPerSample = kBitsPerSample / 8;
// Converts AudioParameters::Format enumerator to readable string.
-static std::string FormatToString(AudioParameters::Format format) {
+std::string FormatToString(AudioParameters::Format format) {
switch (format) {
case AudioParameters::AUDIO_PCM_LINEAR:
return std::string("AUDIO_PCM_LINEAR");
@@ -67,7 +68,7 @@ static std::string FormatToString(AudioParameters::Format format) {
// Converts ChannelLayout enumerator to readable string. Does not include
// multi-channel cases since these layouts are not supported on Android.
-static std::string LayoutToString(ChannelLayout channel_layout) {
+std::string LayoutToString(ChannelLayout channel_layout) {
switch (channel_layout) {
case CHANNEL_LAYOUT_NONE:
return std::string("CHANNEL_LAYOUT_NONE");
@@ -81,7 +82,7 @@ static std::string LayoutToString(ChannelLayout channel_layout) {
}
}
-static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
+double ExpectedTimeBetweenCallbacks(AudioParameters params) {
return (base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
static_cast<double>(params.sample_rate()))).InMillisecondsF();
@@ -89,7 +90,7 @@ static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
// Helper method which verifies that the device list starts with a valid
// default device name followed by non-default device names.
-static void CheckDeviceNames(const AudioDeviceNames& device_names) {
+void CheckDeviceNames(const AudioDeviceNames& device_names) {
DVLOG(2) << "Got " << device_names.size() << " audio devices.";
if (device_names.empty()) {
// Log a warning so we can see the status on the build bots. No need to
@@ -102,8 +103,7 @@ static void CheckDeviceNames(const AudioDeviceNames& device_names) {
AudioDeviceNames::const_iterator it = device_names.begin();
// The first device in the list should always be the default device.
- EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
+ EXPECT_EQ(AudioManager::GetDefaultDeviceName(), it->device_name);
EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
++it;
@@ -114,20 +114,20 @@ static void CheckDeviceNames(const AudioDeviceNames& device_names) {
EXPECT_FALSE(it->unique_id.empty());
DVLOG(2) << "Device ID(" << it->unique_id
<< "), label: " << it->device_name;
- EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
- EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
- it->unique_id);
+ EXPECT_NE(AudioManager::GetDefaultDeviceName(), it->device_name);
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
++it;
}
}
// We clear the data bus to ensure that the test does not cause noise.
-static int RealOnMoreData(AudioBus* dest, uint32 total_bytes_delay) {
+int RealOnMoreData(AudioBus* dest, uint32 total_bytes_delay) {
dest->Zero();
return dest->frames();
}
+} // namespace
+
std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
using namespace std;
os << endl << "format: " << FormatToString(params.format()) << endl
@@ -589,16 +589,11 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
AudioParameters GetInputStreamParameters() {
GetDefaultInputStreamParametersOnAudioThread();
+ AudioParameters params = audio_input_parameters();
// Override the platform effects setting to use the AudioRecord or OpenSLES
// path as requested.
- int effects = GetParam() ? AudioParameters::ECHO_CANCELLER :
- AudioParameters::NO_EFFECTS;
- AudioParameters params(audio_input_parameters().format(),
- audio_input_parameters().channel_layout(),
- audio_input_parameters().sample_rate(),
- audio_input_parameters().bits_per_sample(),
- audio_input_parameters().frames_per_buffer(),
- effects);
+ params.set_effects(GetParam() ? AudioParameters::ECHO_CANCELLER
+ : AudioParameters::NO_EFFECTS);
return params;
}
@@ -796,13 +791,8 @@ TEST_P(AudioAndroidInputTest, DISABLED_StartInputStreamCallbacks) {
// a 10ms buffer size instead of the default size.
TEST_P(AudioAndroidInputTest,
DISABLED_StartInputStreamCallbacksNonDefaultParameters) {
- AudioParameters native_params = GetInputStreamParameters();
- AudioParameters params(native_params.format(),
- native_params.channel_layout(),
- native_params.sample_rate(),
- native_params.bits_per_sample(),
- native_params.sample_rate() / 100,
- native_params.effects());
+ AudioParameters params = GetInputStreamParameters();
+ params.set_frames_per_buffer(params.sample_rate() / 100);
StartInputStreamCallbacks(params);
}
@@ -933,14 +923,8 @@ TEST_P(AudioAndroidInputTest,
// parameters by selecting 10ms as buffer size. This will also ensure that
// the output stream will be a mono stream since mono is default for input
// audio on Android.
- AudioParameters io_params(default_input_params.format(),
- default_input_params.channel_layout(),
- ChannelLayoutToChannelCount(
- default_input_params.channel_layout()),
- default_input_params.sample_rate(),
- default_input_params.bits_per_sample(),
- default_input_params.sample_rate() / 100,
- default_input_params.effects());
+ AudioParameters io_params = default_input_params;
+ default_input_params.set_frames_per_buffer(io_params.sample_rate() / 100);
DVLOG(1) << io_params;
// Create input and output streams using the common audio parameters.
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 0590ffcc144..01379b8a97c 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -28,19 +28,21 @@ using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
namespace media {
+namespace {
-static void AddDefaultDevice(AudioDeviceNames* device_names) {
+void AddDefaultDevice(AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
- device_names->push_front(
- AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
+ device_names->push_front(AudioDeviceName(AudioManager::GetDefaultDeviceName(),
+ AudioManagerBase::kDefaultDeviceId));
}
// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 10;
+const int kMaxOutputStreams = 10;
-static const int kDefaultInputBufferSize = 1024;
-static const int kDefaultOutputBufferSize = 2048;
+const int kDefaultInputBufferSize = 1024;
+const int kDefaultOutputBufferSize = 2048;
+
+} // namespace
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
return new AudioManagerAndroid(audio_log_factory);
@@ -140,9 +142,9 @@ AudioParameters AudioManagerAndroid::GetInputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- GetNativeOutputSampleRate(), 16, buffer_size, effects);
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ GetNativeOutputSampleRate(), 16, buffer_size);
+ params.set_effects(effects);
return params;
}
@@ -306,9 +308,8 @@ AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
}
bool AudioManagerAndroid::HasNoAudioInputStreams() {
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index c64e6ee267d..87745b2a06b 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -8,7 +8,6 @@
#include <set>
#include "base/android/jni_android.h"
-#include "base/gtest_prod_util.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager_base.h"
@@ -20,7 +19,7 @@ class OpenSLESOutputStream;
// Android implemention of AudioManager.
class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
public:
- AudioManagerAndroid(AudioLogFactory* audio_log_factory);
+ explicit AudioManagerAndroid(AudioLogFactory* audio_log_factory);
// Implementation of AudioManager.
bool HasAudioOutputDevices() override;
diff --git a/chromium/media/audio/audio_device_thread.cc b/chromium/media/audio/audio_device_thread.cc
index a0f283ee563..94472939794 100644
--- a/chromium/media/audio/audio_device_thread.cc
+++ b/chromium/media/audio/audio_device_thread.cc
@@ -183,8 +183,15 @@ void AudioDeviceThread::Thread::Run() {
callback_->Process(pending_data);
}
- // Let the other end know which buffer we just filled. The buffer index is
- // used to ensure the other end is getting the buffer it expects. For more
+ // The usage of |synchronized_buffers_| differs between input and output
+ // cases.
+ // Input:
+ // Let the other end know that we have read data, so that it can verify
+ // it doesn't overwrite any data before read. The |buffer_index| value is
+ // not used. For more details, see AudioInputSyncWriter::Write().
+ // Output:
+ // Let the other end know which buffer we just filled. The |buffer_index| is
+ // used to ensure the other end is getting the buffer it expects. For more
// details on how this works see AudioSyncReader::WaitUntilDataIsReady().
if (synchronized_buffers_) {
++buffer_index;
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
index 323de961581..54eda2f0616 100644
--- a/chromium/media/audio/audio_input_controller.cc
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -12,6 +12,7 @@
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
+#include "media/audio/audio_input_writer.h"
#include "media/base/user_input_monitor.h"
using base::TimeDelta;
@@ -134,7 +135,8 @@ AudioInputController::AudioInputController(EventHandler* handler,
log_silence_state_(false),
silence_state_(SILENCE_STATE_NO_MEASUREMENT),
#endif
- prev_key_down_count_(0) {
+ prev_key_down_count_(0),
+ input_writer_(nullptr) {
DCHECK(creator_task_runner_.get());
}
@@ -432,6 +434,8 @@ void AudioInputController::DoClose() {
log_silence_state_ = false;
#endif
+ input_writer_ = nullptr;
+
state_ = CLOSED;
}
@@ -505,6 +509,21 @@ void AudioInputController::OnData(AudioInputStream* stream,
const AudioBus* source,
uint32 hardware_delay_bytes,
double volume) {
+ // |input_writer_| should only be accessed on the audio thread, but as a means
+ // to avoid copying data and posting on the audio thread, we just check for
+ // non-null here.
+ if (input_writer_) {
+ scoped_ptr<AudioBus> source_copy =
+ AudioBus::Create(source->channels(), source->frames());
+ source->CopyTo(source_copy.get());
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioInputController::WriteInputDataForDebugging,
+ this,
+ base::Passed(&source_copy)));
+ }
+
// Mark data as active to ensure that the periodic calls to
// DoCheckForNoData() does not report an error to the event handler.
SetDataIsActive(true);
@@ -526,8 +545,7 @@ void AudioInputController::OnData(AudioInputStream* stream,
// Use SharedMemory and SyncSocket if the client has created a SyncWriter.
// Used by all low-latency clients except WebSpeech.
if (SharedMemoryAndSyncSocketMode()) {
- sync_writer_->Write(source, volume, key_pressed);
- sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
+ sync_writer_->Write(source, volume, key_pressed, hardware_delay_bytes);
#if defined(AUDIO_POWER_MONITORING)
// Only do power-level measurements if DoCreate() has been called. It will
@@ -624,6 +642,26 @@ void AudioInputController::OnError(AudioInputStream* stream) {
&AudioInputController::DoReportError, this));
}
+void AudioInputController::EnableDebugRecording(
+ AudioInputWriter* input_writer) {
+ task_runner_->PostTask(FROM_HERE, base::Bind(
+ &AudioInputController::DoEnableDebugRecording,
+ this,
+ input_writer));
+}
+
+void AudioInputController::DisableDebugRecording(
+ const base::Closure& callback) {
+ DCHECK(creator_task_runner_->BelongsToCurrentThread());
+ DCHECK(!callback.is_null());
+
+ task_runner_->PostTaskAndReply(
+ FROM_HERE,
+ base::Bind(&AudioInputController::DoDisableDebugRecording,
+ this),
+ callback);
+}
+
void AudioInputController::DoStopCloseAndClearStream() {
DCHECK(task_runner_->BelongsToCurrentThread());
@@ -676,4 +714,23 @@ void AudioInputController::LogSilenceState(SilenceState value) {
}
#endif
+void AudioInputController::DoEnableDebugRecording(
+ AudioInputWriter* input_writer) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(!input_writer_);
+ input_writer_ = input_writer;
+}
+
+void AudioInputController::DoDisableDebugRecording() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ input_writer_ = nullptr;
+}
+
+void AudioInputController::WriteInputDataForDebugging(
+ scoped_ptr<AudioBus> data) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (input_writer_)
+ input_writer_->Write(data.Pass());
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_input_controller.h b/chromium/media/audio/audio_input_controller.h
index 5bfe6bd1e8c..58e04640aaa 100644
--- a/chromium/media/audio/audio_input_controller.h
+++ b/chromium/media/audio/audio_input_controller.h
@@ -8,6 +8,7 @@
#include <string>
#include "base/atomicops.h"
#include "base/callback.h"
+#include "base/files/file.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
@@ -79,6 +80,7 @@ namespace media {
#define AUDIO_POWER_MONITORING
#endif
+class AudioInputWriter;
class UserInputMonitor;
class MEDIA_EXPORT AudioInputController
@@ -127,18 +129,15 @@ class MEDIA_EXPORT AudioInputController
// A synchronous writer interface used by AudioInputController for
// synchronous writing.
- class SyncWriter {
+ class MEDIA_EXPORT SyncWriter {
public:
virtual ~SyncWriter() {}
- // Notify the synchronous writer about the number of bytes in the
- // soundcard which has been recorded.
- virtual void UpdateRecordedBytes(uint32 bytes) = 0;
-
// Write certain amount of data from |data|.
virtual void Write(const AudioBus* data,
double volume,
- bool key_pressed) = 0;
+ bool key_pressed,
+ uint32 hardware_delay_bytes) = 0;
// Close this synchronous writer.
virtual void Close() = 0;
@@ -232,6 +231,13 @@ class MEDIA_EXPORT AudioInputController
bool SharedMemoryAndSyncSocketMode() const { return sync_writer_ != NULL; }
+ // Enable debug recording of audio input.
+ void EnableDebugRecording(AudioInputWriter* input_writer);
+
+ // Disbale debug recording of audio input. Must be called before owner of
+ // |input_writer| deletes it.
+ void DisableDebugRecording(const base::Closure& callback);
+
protected:
friend class base::RefCountedThreadSafe<AudioInputController>;
@@ -306,6 +312,14 @@ class MEDIA_EXPORT AudioInputController
void LogSilenceState(SilenceState value);
#endif
+ // Enable and disable debug recording of audio input. Called on the audio
+ // thread.
+ void DoEnableDebugRecording(AudioInputWriter* input_writer);
+ void DoDisableDebugRecording();
+
+ // Called on the audio thread.
+ void WriteInputDataForDebugging(scoped_ptr<AudioBus> data);
+
// Gives access to the task runner of the creating thread.
scoped_refptr<base::SingleThreadTaskRunner> creator_task_runner_;
@@ -369,6 +383,9 @@ class MEDIA_EXPORT AudioInputController
// Time when a low-latency stream is created.
base::TimeTicks low_latency_create_time_;
+ // Used for audio debug recordings. Accessed on audio thread.
+ AudioInputWriter* input_writer_;
+
DISALLOW_COPY_AND_ASSIGN(AudioInputController);
};
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index 9f46b55a1b9..02fa7a7ad68 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/memory/scoped_vector.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_manager_base.h"
@@ -39,6 +40,7 @@ class AudioInputDevice::AudioThreadCallback
private:
int current_segment_id_;
+ uint32 last_buffer_id_;
ScopedVector<media::AudioBus> audio_buses_;
CaptureCallback* capture_callback_;
@@ -140,7 +142,7 @@ void AudioInputDevice::OnStreamCreated(
audio_callback_.reset(new AudioInputDevice::AudioThreadCallback(
audio_parameters_, handle, length, total_segments, callback_));
audio_thread_.Start(
- audio_callback_.get(), socket_handle, "AudioInputDevice", false);
+ audio_callback_.get(), socket_handle, "AudioInputDevice", true);
state_ = RECORDING;
ipc_->RecordStream();
@@ -176,7 +178,8 @@ void AudioInputDevice::OnStateChanged(
// object. Possibly require calling Initialize again or provide
// a callback object via Start() and clear it in Stop().
if (!audio_thread_.IsStopped())
- callback_->OnCaptureError();
+ callback_->OnCaptureError(
+ "AudioInputDevice::OnStateChanged - audio thread still running");
break;
default:
NOTREACHED();
@@ -272,6 +275,7 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
: AudioDeviceThread::Callback(audio_parameters, memory, memory_length,
total_segments),
current_segment_id_(0),
+ last_buffer_id_(UINT32_MAX),
capture_callback_(capture_callback) {
}
@@ -300,21 +304,39 @@ void AudioInputDevice::AudioThreadCallback::Process(uint32 pending_data) {
uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
ptr += current_segment_id_ * segment_length_;
AudioInputBuffer* buffer = reinterpret_cast<AudioInputBuffer*>(ptr);
+
// Usually this will be equal but in the case of low sample rate (e.g. 8kHz,
// the buffer may be bigger (on mac at least)).
DCHECK_GE(buffer->params.size,
segment_length_ - sizeof(AudioInputBufferParameters));
- double volume = buffer->params.volume;
- bool key_pressed = buffer->params.key_pressed;
+
+ // Verify correct sequence.
+ if (buffer->params.id != last_buffer_id_ + 1) {
+ std::string message = base::StringPrintf(
+ "Incorrect buffer sequence. Expected = %u. Actual = %u.",
+ last_buffer_id_ + 1, buffer->params.id);
+ LOG(ERROR) << message;
+ capture_callback_->OnCaptureError(message);
+ }
+ if (current_segment_id_ != static_cast<int>(pending_data)) {
+ std::string message = base::StringPrintf(
+ "Segment id not matching. Remote = %u. Local = %d.",
+ pending_data, current_segment_id_);
+ LOG(ERROR) << message;
+ capture_callback_->OnCaptureError(message);
+ }
+ last_buffer_id_ = buffer->params.id;
// Use pre-allocated audio bus wrapping existing block of shared memory.
media::AudioBus* audio_bus = audio_buses_[current_segment_id_];
- // Deliver captured data to the client in floating point format
- // and update the audio-delay measurement.
- int audio_delay_milliseconds = pending_data / bytes_per_ms_;
+ // Deliver captured data to the client in floating point format and update
+ // the audio delay measurement.
capture_callback_->Capture(
- audio_bus, audio_delay_milliseconds, volume, key_pressed);
+ audio_bus,
+ buffer->params.hardware_delay_bytes / bytes_per_ms_, // Delay in ms
+ buffer->params.volume,
+ buffer->params.key_pressed);
if (++current_segment_id_ >= total_segments_)
current_segment_id_ = 0;
diff --git a/chromium/media/audio/audio_input_volume_unittest.cc b/chromium/media/audio/audio_input_volume_unittest.cc
deleted file mode 100644
index 3fa1f40b907..00000000000
--- a/chromium/media/audio/audio_input_volume_unittest.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cmath>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_unittest_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_WIN)
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/core_audio_util_win.h"
-#endif
-
-namespace media {
-
-double GetVolumeAfterSetVolumeOnLinux(AudioInputStream* ais,
- double target_volume) {
- // SetVolume() is asynchronous on Linux, we need to keep trying until
- // the SetVolume() operation is done.
- static const int kTimesToRun = 10;
- double volume = 0.0;
- for (int i = 0; i < kTimesToRun; ++i) {
- volume = ais->GetVolume();
- if (volume == target_volume)
- break;
-
- // Sleep 100ms to wait for the operation.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
- }
-
- return volume;
-}
-
-class AudioInputVolumeTest : public ::testing::Test {
- protected:
- AudioInputVolumeTest() : audio_manager_(AudioManager::CreateForTesting()) {}
-
- bool HasCoreAudioAndInputDevices() {
-#if defined(OS_WIN)
- // TODO(henrika): add support for volume control on Windows XP as well.
- if (!CoreAudioUtil::IsSupported())
- return false;
-#endif
- return audio_manager_->HasAudioInputDevices();
- }
-
- // Helper method which checks if the stream has volume support.
- bool HasDeviceVolumeControl(AudioInputStream* stream) {
- if (!stream)
- return false;
-
- return (stream->GetMaxVolume() != 0.0);
- }
-
- AudioInputStream* CreateAndOpenStream(const std::string& device_id) {
- const AudioParameters& params =
- audio_manager_->GetInputStreamParameters(device_id);
- AudioInputStream* ais = audio_manager_->MakeAudioInputStream(
- params, device_id);
- EXPECT_TRUE(NULL != ais);
-
-#if defined(OS_LINUX) || defined(OS_OPENBSD)
- // Some linux devices do not support our settings, we may fail to open
- // those devices.
- if (!ais->Open()) {
- // Default device should always be able to be opened.
- EXPECT_TRUE(AudioManagerBase::kDefaultDeviceId != device_id);
- ais->Close();
- ais = NULL;
- }
-#elif defined(OS_WIN) || defined(OS_MACOSX)
- EXPECT_TRUE(ais->Open());
-#endif
-
- return ais;
- }
-
- scoped_ptr<AudioManager> audio_manager_;
-};
-
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
-// Currently failing on linux ARM bot: http://crbug/238490
-// Also flaky on x86_64: http://crbug/236936
-#define MAYBE_InputVolumeTest DISABLED_InputVolumeTest
-#else
-#define MAYBE_InputVolumeTest InputVolumeTest
-#endif
-
-TEST_F(AudioInputVolumeTest, MAYBE_InputVolumeTest) {
- ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices());
-
- // Retrieve a list of all available input devices.
- AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
- if (device_names.empty()) {
- LOG(WARNING) << "Could not find any available input device";
- return;
- }
-
- // Scan all available input devices and repeat the same test for all of them.
- for (AudioDeviceNames::const_iterator it = device_names.begin();
- it != device_names.end();
- ++it) {
- AudioInputStream* ais = CreateAndOpenStream(it->unique_id);
- if (!ais) {
- DLOG(WARNING) << "Failed to open stream for device " << it->unique_id;
- continue;
- }
-
- if (!HasDeviceVolumeControl(ais)) {
- DLOG(WARNING) << "Device: " << it->unique_id
- << ", does not have volume control.";
- ais->Close();
- continue;
- }
-
- double max_volume = ais->GetMaxVolume();
- EXPECT_GT(max_volume, 0.0);
-
- // Store the current input-device volume level.
- double original_volume = ais->GetVolume();
- EXPECT_GE(original_volume, 0.0);
-#if defined(OS_WIN) || defined(OS_MACOSX)
- // Note that |original_volume| can be higher than |max_volume| on Linux.
- EXPECT_LE(original_volume, max_volume);
-#endif
-
- // Set the volume to the maxiumum level..
- ais->SetVolume(max_volume);
- double current_volume = ais->GetVolume();
- EXPECT_EQ(max_volume, current_volume);
-
- // Set the volume to the minimum level (=0).
- double new_volume = 0.0;
- ais->SetVolume(new_volume);
-#if defined(OS_LINUX)
- current_volume = GetVolumeAfterSetVolumeOnLinux(ais, new_volume);
-#else
- current_volume = ais->GetVolume();
-#endif
- EXPECT_EQ(new_volume, current_volume);
-
- // Set the volume to the mid level (50% of max).
- // Verify that the absolute error is small enough.
- new_volume = max_volume / 2;
- ais->SetVolume(new_volume);
-#if defined(OS_LINUX)
- current_volume = GetVolumeAfterSetVolumeOnLinux(ais, new_volume);
-#else
- current_volume = ais->GetVolume();
-#endif
- EXPECT_LT(current_volume, max_volume);
- EXPECT_GT(current_volume, 0);
- EXPECT_NEAR(current_volume, new_volume, 0.25 * max_volume);
-
- // Restores the volume to the original value.
- ais->SetVolume(original_volume);
- current_volume = ais->GetVolume();
- EXPECT_EQ(original_volume, current_volume);
-
- ais->Close();
- }
-}
-
-} // namespace media
diff --git a/chromium/media/audio/audio_input_writer.h b/chromium/media/audio/audio_input_writer.h
new file mode 100644
index 00000000000..700570b0318
--- /dev/null
+++ b/chromium/media/audio/audio_input_writer.h
@@ -0,0 +1,24 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_AUDIO_INPUT_WRITER_H_
+#define MEDIA_AUDIO_AUDIO_INPUT_WRITER_H_
+
+namespace media {
+
+class AudioBus;
+
+// A writer interface used by AudioInputController for writing audio data to
+// file for debugging purposes.
+class AudioInputWriter {
+ public:
+ virtual ~AudioInputWriter() {}
+
+ // Write |data| to file.
+ virtual void Write(scoped_ptr<AudioBus> data) = 0;
+};
+
+} // namspace media
+
+#endif // MEDIA_AUDIO_AUDIO_INPUT_WRITER_H_
diff --git a/chromium/media/audio/audio_manager.cc b/chromium/media/audio/audio_manager.cc
index 0c2aeb87892..ebdfc4e2d91 100644
--- a/chromium/media/audio/audio_manager.cc
+++ b/chromium/media/audio/audio_manager.cc
@@ -15,6 +15,7 @@
#include "build/build_config.h"
#include "media/audio/audio_manager_factory.h"
#include "media/audio/fake_audio_log_factory.h"
+#include "media/base/media_resources.h"
#include "media/base/media_switches.h"
#if defined(OS_WIN)
@@ -25,14 +26,14 @@ namespace media {
namespace {
// The singleton instance of AudioManager. This is set when Create() is called.
-AudioManager* g_last_created = NULL;
+AudioManager* g_last_created = nullptr;
// The singleton instance of AudioManagerFactory. This is only set if
// SetFactory() is called. If it is set when Create() is called, its
// CreateInstance() function is used to set |g_last_created|. Otherwise, the
// linked implementation of media::CreateAudioManager is used to set
// |g_last_created|.
-AudioManagerFactory* g_audio_manager_factory = NULL;
+AudioManagerFactory* g_audio_manager_factory = nullptr;
// Maximum number of failed pings to the audio thread allowed. A crash will be
// issued once this count is reached. We require at least two pings before
@@ -156,6 +157,16 @@ class AudioManagerHelper : public base::PowerObserver {
}
#endif
+#if defined(OS_LINUX)
+ void set_app_name(const std::string& app_name) {
+ app_name_ = app_name;
+ }
+
+ const std::string& app_name() const {
+ return app_name_;
+ }
+#endif
+
private:
FakeAudioLogFactory fake_log_factory_;
@@ -173,13 +184,18 @@ class AudioManagerHelper : public base::PowerObserver {
scoped_ptr<base::win::ScopedCOMInitializer> com_initializer_for_testing_;
#endif
+#if defined(OS_LINUX)
+ std::string app_name_;
+#endif
+
DISALLOW_COPY_AND_ASSIGN(AudioManagerHelper);
};
-static bool g_hang_monitor_enabled = false;
+bool g_hang_monitor_enabled = false;
-static base::LazyInstance<AudioManagerHelper>::Leaky g_helper =
+base::LazyInstance<AudioManagerHelper>::Leaky g_helper =
LAZY_INSTANCE_INITIALIZER;
+
} // namespace
// Forward declaration of the platform specific AudioManager factory function.
@@ -189,7 +205,7 @@ AudioManager::AudioManager() {}
AudioManager::~AudioManager() {
CHECK(!g_last_created || g_last_created == this);
- g_last_created = NULL;
+ g_last_created = nullptr;
}
// static
@@ -251,9 +267,36 @@ void AudioManager::EnableHangMonitor() {
#endif
}
+#if defined(OS_LINUX)
+// static
+void AudioManager::SetGlobalAppName(const std::string& app_name) {
+ g_helper.Pointer()->set_app_name(app_name);
+}
+
+// static
+const std::string& AudioManager::GetGlobalAppName() {
+ return g_helper.Pointer()->app_name();
+}
+#endif
+
// static
AudioManager* AudioManager::Get() {
return g_last_created;
}
+// static
+std::string AudioManager::GetDefaultDeviceName() {
+ return GetLocalizedStringUTF8(DEFAULT_AUDIO_DEVICE_NAME);
+}
+
+// static
+std::string AudioManager::GetCommunicationsDeviceName() {
+#if defined(OS_WIN)
+ return GetLocalizedStringUTF8(COMMUNICATIONS_AUDIO_DEVICE_NAME);
+#else
+ NOTREACHED();
+ return "";
+#endif
+}
+
} // namespace media
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index 1391742fe5f..4da250df2d7 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -61,6 +61,15 @@ class MEDIA_EXPORT AudioManager {
// called previously to start the hang monitor. Does nothing on OSX.
static void EnableHangMonitor();
+#if defined(OS_LINUX)
+ // Sets the name of the audio source as seen by external apps. Only actually
+ // used with PulseAudio as of this writing.
+ static void SetGlobalAppName(const std::string& app_name);
+
+ // Returns the app name or an empty string if it is not set.
+ static const std::string& GetGlobalAppName();
+#endif
+
// Should only be used for testing. Resets a previously-set
// AudioManagerFactory. The instance of AudioManager is not affected.
static void ResetFactoryForTesting();
@@ -70,6 +79,13 @@ class MEDIA_EXPORT AudioManager {
// like src/chrome.
static AudioManager* Get();
+ // Returns the localized name of the generic "default" device.
+ static std::string GetDefaultDeviceName();
+
+ // Returns the localized name of the generic default communications device.
+ // This device is not supported on all platforms.
+ static std::string GetCommunicationsDeviceName();
+
// Returns true if the OS reports existence of audio devices. This does not
// guarantee that the existing devices support all formats and sample rates.
virtual bool HasAudioOutputDevices() = 0;
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index d5106f6acb1..8c1dda29f70 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -19,21 +19,24 @@
#include "media/base/media_switches.h"
namespace media {
+namespace {
-static const int kStreamCloseDelaySeconds = 5;
+const int kStreamCloseDelaySeconds = 5;
// Default maximum number of output streams that can be open simultaneously
// for all platforms.
-static const int kDefaultMaxOutputStreams = 16;
+const int kDefaultMaxOutputStreams = 16;
// Default maximum number of input streams that can be open simultaneously
// for all platforms.
-static const int kDefaultMaxInputStreams = 16;
+const int kDefaultMaxInputStreams = 16;
-static const int kMaxInputChannels = 3;
+const int kMaxInputChannels = 3;
+
+} // namespace
-const char AudioManagerBase::kDefaultDeviceName[] = "Default";
const char AudioManagerBase::kDefaultDeviceId[] = "default";
+const char AudioManagerBase::kCommunicationsDeviceId[] = "communications";
const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
@@ -261,10 +264,11 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
<< output_params.frames_per_buffer();
// Tell the AudioManager to create a fake output device.
- output_params = AudioParameters(
- AudioParameters::AUDIO_FAKE, params.channel_layout(),
- params.sample_rate(), params.bits_per_sample(),
- params.frames_per_buffer());
+ output_params = params;
+ output_params.set_format(AudioParameters::AUDIO_FAKE);
+ } else if (params.effects() != output_params.effects()) {
+ // Turn off effects that weren't requested.
+ output_params.set_effects(params.effects() & output_params.effects());
}
}
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
index 0ac7020435e..cd9471316ff 100644
--- a/chromium/media/audio/audio_manager_base.h
+++ b/chromium/media/audio/audio_manager_base.h
@@ -28,14 +28,15 @@ class AudioOutputDispatcher;
// AudioManagerBase provides AudioManager functions common for all platforms.
class MEDIA_EXPORT AudioManagerBase : public AudioManager {
public:
- // TODO(sergeyu): The constants below belong to AudioManager interface, not
- // to the base implementation.
-
- // Name of the generic "default" device.
- static const char kDefaultDeviceName[];
- // Unique Id of the generic "default" device.
+ // TODO(ajm): Move these strings to AudioManager.
+ // Unique Id of the generic "default" device. Associated with the localized
+ // name returned from GetDefaultDeviceName().
static const char kDefaultDeviceId[];
+ // Unique Id of the generic default communications device. Associated with
+ // the localized name returned from GetCommunicationsDeviceName().
+ static const char kCommunicationsDeviceId[];
+
// Input device ID used to capture the default system playback stream. When
// this device ID is passed to MakeAudioInputStream() the returned
// AudioInputStream will be capturing audio currently being played on the
@@ -48,28 +49,39 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
~AudioManagerBase() override;
+ // AudioManager:
scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() override;
scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner() override;
-
base::string16 GetAudioInputDeviceModel() override;
-
void ShowAudioInputSettings() override;
-
void GetAudioInputDeviceNames(AudioDeviceNames* device_names) override;
-
void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) override;
-
AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
const std::string& device_id) override;
-
AudioInputStream* MakeAudioInputStream(const AudioParameters& params,
const std::string& device_id) override;
-
AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
const std::string& device_id) override;
+ // Listeners will be notified on the GetTaskRunner() task runner.
+ void AddOutputDeviceChangeListener(AudioDeviceListener* listener) override;
+ void RemoveOutputDeviceChangeListener(AudioDeviceListener* listener) override;
+
+ AudioParameters GetDefaultOutputStreamParameters() override;
+ AudioParameters GetOutputStreamParameters(
+ const std::string& device_id) override;
+ AudioParameters GetInputStreamParameters(
+ const std::string& device_id) override;
+ std::string GetAssociatedOutputDeviceID(
+ const std::string& input_device_id) override;
+ scoped_ptr<AudioLog> CreateAudioLog(
+ AudioLogFactory::AudioComponent component) override;
+ void SetHasKeyboardMic() override;
+
+ // AudioManagerBase:
+
// Called internally by the audio stream when it has been closed.
virtual void ReleaseOutputStream(AudioOutputStream* stream);
virtual void ReleaseInputStream(AudioInputStream* stream);
@@ -93,25 +105,6 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) = 0;
- // Listeners will be notified on the GetTaskRunner() task runner.
- void AddOutputDeviceChangeListener(AudioDeviceListener* listener) override;
- void RemoveOutputDeviceChangeListener(AudioDeviceListener* listener) override;
-
- AudioParameters GetDefaultOutputStreamParameters() override;
- AudioParameters GetOutputStreamParameters(
- const std::string& device_id) override;
-
- AudioParameters GetInputStreamParameters(
- const std::string& device_id) override;
-
- std::string GetAssociatedOutputDeviceID(
- const std::string& input_device_id) override;
-
- scoped_ptr<AudioLog> CreateAudioLog(
- AudioLogFactory::AudioComponent component) override;
-
- void SetHasKeyboardMic() override;
-
// Get number of input or output streams.
int input_stream_count() const { return num_input_streams_; }
int output_stream_count() const { return num_output_streams_; }
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 48bb51cee5c..9fb0182de0e 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -81,8 +81,7 @@ class AudioManagerTest : public ::testing::Test {
AudioDeviceNames::const_iterator it = device_names.begin();
// The first device in the list should always be the default device.
- EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
+ EXPECT_EQ(AudioManager::GetDefaultDeviceName(), it->device_name);
EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
++it;
@@ -93,8 +92,7 @@ class AudioManagerTest : public ::testing::Test {
EXPECT_FALSE(it->unique_id.empty());
DVLOG(2) << "Device ID(" << it->unique_id
<< "), label: " << it->device_name;
- EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
- it->device_name);
+ EXPECT_NE(AudioManager::GetDefaultDeviceName(), it->device_name);
EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
it->unique_id);
++it;
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index 8af0aabb246..59f541e7ac3 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -176,7 +176,7 @@ void AudioOutputController::DoPlay() {
// Timer self-manages its lifetime and WedgeCheck() will only record the UMA
// statistic if state is still kPlaying. Additional Start() calls will
// invalidate the previous timer.
- wedge_timer_.reset(new base::OneShotTimer<AudioOutputController>());
+ wedge_timer_.reset(new base::OneShotTimer());
wedge_timer_->Start(
FROM_HERE, TimeDelta::FromSeconds(5), this,
&AudioOutputController::WedgeCheck);
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index 298c56576dc..f1711d5a607 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -248,7 +248,7 @@ class MEDIA_EXPORT AudioOutputController
// Flags when we've asked for a stream to start but it never did.
base::AtomicRefCount on_more_io_data_called_;
- scoped_ptr<base::OneShotTimer<AudioOutputController> > wedge_timer_;
+ scoped_ptr<base::OneShotTimer> wedge_timer_;
DISALLOW_COPY_AND_ASSIGN(AudioOutputController);
};
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index 3e9229da9ba..dd7cb8075d3 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -217,7 +217,7 @@ class AudioOutputControllerTest : public testing::Test {
.WillOnce(SignalEvent(&play_event_));
}
- controller_->SwitchOutputDevice(AudioManagerBase::kDefaultDeviceName,
+ controller_->SwitchOutputDevice(AudioManager::GetDefaultDeviceName(),
base::Bind(&base::DoNothing));
}
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index 3213400ff34..8452bd65d8e 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -4,8 +4,6 @@
#include "media/audio/audio_output_device.h"
-#include <string>
-
#include "base/callback_helpers.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
@@ -41,38 +39,42 @@ class AudioOutputDevice::AudioThreadCallback
AudioOutputDevice::AudioOutputDevice(
scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ int session_id,
+ const std::string& device_id,
+ const url::Origin& security_origin)
: ScopedTaskRunnerObserver(io_task_runner),
callback_(NULL),
ipc_(ipc.Pass()),
state_(IDLE),
+ start_on_authorized_(false),
play_on_start_(true),
- session_id_(-1),
+ session_id_(session_id),
+ device_id_(device_id),
+ security_origin_(security_origin),
stopping_hack_(false),
- current_switch_request_id_(0) {
+ switch_output_device_on_start_(false),
+ did_receive_auth_(true, false),
+ device_status_(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL) {
CHECK(ipc_);
// The correctness of the code depends on the relative values assigned in the
// State enum.
static_assert(IPC_CLOSED < IDLE, "invalid enum value assignment 0");
- static_assert(IDLE < CREATING_STREAM, "invalid enum value assignment 1");
- static_assert(CREATING_STREAM < PAUSED, "invalid enum value assignment 2");
- static_assert(PAUSED < PLAYING, "invalid enum value assignment 3");
+ static_assert(IDLE < AUTHORIZING, "invalid enum value assignment 1");
+ static_assert(AUTHORIZING < AUTHORIZED, "invalid enum value assignment 2");
+ static_assert(AUTHORIZED < CREATING_STREAM,
+ "invalid enum value assignment 3");
+ static_assert(CREATING_STREAM < PAUSED, "invalid enum value assignment 4");
+ static_assert(PAUSED < PLAYING, "invalid enum value assignment 5");
}
-void AudioOutputDevice::InitializeWithSessionId(const AudioParameters& params,
- RenderCallback* callback,
- int session_id) {
- DCHECK(!callback_) << "Calling InitializeWithSessionId() twice?";
+void AudioOutputDevice::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!callback_) << "Calling Initialize() twice?";
DCHECK(params.IsValid());
audio_parameters_ = params;
callback_ = callback;
- session_id_ = session_id;
-}
-
-void AudioOutputDevice::Initialize(const AudioParameters& params,
- RenderCallback* callback) {
- InitializeWithSessionId(params, callback, 0);
}
AudioOutputDevice::~AudioOutputDevice() {
@@ -84,11 +86,18 @@ AudioOutputDevice::~AudioOutputDevice() {
// its bound parameters in the correct thread instead of implicitly releasing
// them in the thread where this destructor runs.
if (!current_switch_callback_.is_null()) {
- base::ResetAndReturn(&current_switch_callback_).Run(
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_OBSOLETE);
+ base::ResetAndReturn(&current_switch_callback_)
+ .Run(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL);
}
}
+void AudioOutputDevice::RequestDeviceAuthorization() {
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioOutputDevice::RequestDeviceAuthorizationOnIOThread,
+ this));
+}
+
void AudioOutputDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
task_runner()->PostTask(FROM_HERE,
@@ -129,21 +138,73 @@ bool AudioOutputDevice::SetVolume(double volume) {
return true;
}
+OutputDevice* AudioOutputDevice::GetOutputDevice() {
+ return this;
+}
+
void AudioOutputDevice::SwitchOutputDevice(
const std::string& device_id,
- const GURL& security_origin,
+ const url::Origin& security_origin,
const SwitchOutputDeviceCB& callback) {
- DVLOG(1) << __FUNCTION__ << "(" << device_id << ")";
task_runner()->PostTask(
FROM_HERE, base::Bind(&AudioOutputDevice::SwitchOutputDeviceOnIOThread,
this, device_id, security_origin, callback));
}
+AudioParameters AudioOutputDevice::GetOutputParameters() {
+ CHECK(!task_runner()->BelongsToCurrentThread());
+ did_receive_auth_.Wait();
+ return output_params_;
+}
+
+OutputDeviceStatus AudioOutputDevice::GetDeviceStatus() {
+ CHECK(!task_runner()->BelongsToCurrentThread());
+ did_receive_auth_.Wait();
+ return device_status_;
+}
+
+void AudioOutputDevice::RequestDeviceAuthorizationOnIOThread() {
+ DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(state_, IDLE);
+ state_ = AUTHORIZING;
+ ipc_->RequestDeviceAuthorization(this, session_id_, device_id_,
+ security_origin_);
+}
+
void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
DCHECK(task_runner()->BelongsToCurrentThread());
- if (state_ == IDLE) {
- state_ = CREATING_STREAM;
- ipc_->CreateStream(this, params, session_id_);
+ switch (state_) {
+ case IPC_CLOSED:
+ if (callback_)
+ callback_->OnRenderError();
+ break;
+
+ case IDLE:
+ if (did_receive_auth_.IsSignaled() && device_id_.empty() &&
+ security_origin_.unique()) {
+ state_ = CREATING_STREAM;
+ ipc_->CreateStream(this, params);
+ } else {
+ RequestDeviceAuthorizationOnIOThread();
+ start_on_authorized_ = true;
+ }
+ break;
+
+ case AUTHORIZING:
+ start_on_authorized_ = true;
+ break;
+
+ case AUTHORIZED:
+ state_ = CREATING_STREAM;
+ ipc_->CreateStream(this, params);
+ start_on_authorized_ = false;
+ break;
+
+ case CREATING_STREAM:
+ case PAUSED:
+ case PLAYING:
+ NOTREACHED();
+ break;
}
}
@@ -175,10 +236,11 @@ void AudioOutputDevice::ShutDownOnIOThread() {
DCHECK(task_runner()->BelongsToCurrentThread());
// Close the stream, if we haven't already.
- if (state_ >= CREATING_STREAM) {
+ if (state_ >= AUTHORIZING) {
ipc_->CloseStream();
state_ = IDLE;
}
+ start_on_authorized_ = false;
// We can run into an issue where ShutDownOnIOThread is called right after
// OnStreamCreated is called in cases where Start/Stop are called before we
@@ -203,16 +265,24 @@ void AudioOutputDevice::SetVolumeOnIOThread(double volume) {
void AudioOutputDevice::SwitchOutputDeviceOnIOThread(
const std::string& device_id,
- const GURL& security_origin,
+ const url::Origin& security_origin,
const SwitchOutputDeviceCB& callback) {
DCHECK(task_runner()->BelongsToCurrentThread());
- DVLOG(1) << __FUNCTION__ << "(" << device_id << "," << security_origin << ")";
+
+ // Do not allow concurrent SwitchOutputDevice requests
+ if (!current_switch_callback_.is_null()) {
+ callback.Run(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL);
+ return;
+ }
+
+ current_switch_callback_ = callback;
+ current_switch_device_id_ = device_id;
+ current_switch_security_origin_ = security_origin;
if (state_ >= CREATING_STREAM) {
- SetCurrentSwitchRequest(callback);
- ipc_->SwitchOutputDevice(device_id, security_origin,
- current_switch_request_id_);
+ ipc_->SwitchOutputDevice(current_switch_device_id_,
+ current_switch_security_origin_);
} else {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
+ switch_output_device_on_start_ = true;
}
}
@@ -246,6 +316,41 @@ void AudioOutputDevice::OnStateChanged(AudioOutputIPCDelegateState state) {
}
}
+void AudioOutputDevice::OnDeviceAuthorized(
+ OutputDeviceStatus device_status,
+ const media::AudioParameters& output_params) {
+ DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK_EQ(state_, AUTHORIZING);
+
+ // It may happen that a second authorization is received as a result to a
+ // call to Start() after Stop(). If the status for the second authorization
+ // differs from the first, it will not be reflected in |device_status_|
+ // to avoid a race.
+ // This scenario is unlikely. If it occurs, the new value will be
+ // different from OUTPUT_DEVICE_STATUS_OK, so the AudioOutputDevice
+ // will enter the IPC_CLOSED state anyway, which is the safe thing to do.
+ // This is preferable to holding a lock.
+ if (!did_receive_auth_.IsSignaled())
+ device_status_ = device_status;
+
+ if (device_status == OUTPUT_DEVICE_STATUS_OK) {
+ state_ = AUTHORIZED;
+ if (!did_receive_auth_.IsSignaled()) {
+ output_params_ = output_params;
+ did_receive_auth_.Signal();
+ }
+ if (start_on_authorized_)
+ CreateStreamOnIOThread(audio_parameters_);
+ } else {
+ // Closing IPC forces a Signal(), so no clients are locked waiting
+ // indefinitely after this method returns.
+ ipc_->CloseStream();
+ OnIPCClosed();
+ if (callback_)
+ callback_->OnRenderError();
+ }
+}
+
void AudioOutputDevice::OnStreamCreated(
base::SharedMemoryHandle handle,
base::SyncSocket::Handle socket_handle,
@@ -274,46 +379,38 @@ void AudioOutputDevice::OnStreamCreated(
// delete as they see fit. AudioOutputDevice should internally use WeakPtr
// to handle teardown and thread hopping. See http://crbug.com/151051 for
// details.
- base::AutoLock auto_lock(audio_thread_lock_);
- if (stopping_hack_)
- return;
+ {
+ base::AutoLock auto_lock(audio_thread_lock_);
+ if (stopping_hack_)
+ return;
+
+ DCHECK(audio_thread_.IsStopped());
+ audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback(
+ audio_parameters_, handle, length, callback_));
+ audio_thread_.Start(audio_callback_.get(), socket_handle,
+ "AudioOutputDevice", true);
+ state_ = PAUSED;
- DCHECK(audio_thread_.IsStopped());
- audio_callback_.reset(new AudioOutputDevice::AudioThreadCallback(
- audio_parameters_, handle, length, callback_));
- audio_thread_.Start(
- audio_callback_.get(), socket_handle, "AudioOutputDevice", true);
- state_ = PAUSED;
-
- // We handle the case where Play() and/or Pause() may have been called
- // multiple times before OnStreamCreated() gets called.
- if (play_on_start_)
- PlayOnIOThread();
-}
+ // We handle the case where Play() and/or Pause() may have been called
+ // multiple times before OnStreamCreated() gets called.
+ if (play_on_start_)
+ PlayOnIOThread();
+ }
-void AudioOutputDevice::SetCurrentSwitchRequest(
- const SwitchOutputDeviceCB& callback) {
- DCHECK(task_runner()->BelongsToCurrentThread());
- DVLOG(1) << __FUNCTION__;
- // If there is a previous unresolved request, resolve it as obsolete
- if (!current_switch_callback_.is_null()) {
- base::ResetAndReturn(&current_switch_callback_).Run(
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_OBSOLETE);
+ if (switch_output_device_on_start_) {
+ ipc_->SwitchOutputDevice(current_switch_device_id_,
+ current_switch_security_origin_);
}
- current_switch_callback_ = callback;
- current_switch_request_id_++;
}
-void AudioOutputDevice::OnOutputDeviceSwitched(
- int request_id,
- SwitchOutputDeviceResult result) {
+void AudioOutputDevice::OnOutputDeviceSwitched(OutputDeviceStatus result) {
DCHECK(task_runner()->BelongsToCurrentThread());
- DCHECK(request_id <= current_switch_request_id_);
- DVLOG(1) << __FUNCTION__
- << "(" << request_id << ", " << result << ")";
- if (request_id != current_switch_request_id_) {
- return;
+ if (result == OUTPUT_DEVICE_STATUS_OK) {
+ session_id_ = 0; // Output device is no longer attached to an input device
+ device_id_ = current_switch_device_id_;
+ security_origin_ = current_switch_security_origin_;
}
+ DCHECK(!current_switch_callback_.is_null());
base::ResetAndReturn(&current_switch_callback_).Run(result);
}
@@ -321,6 +418,9 @@ void AudioOutputDevice::OnIPCClosed() {
DCHECK(task_runner()->BelongsToCurrentThread());
state_ = IPC_CLOSED;
ipc_.reset();
+
+ // Signal to unblock any blocked threads waiting for parameters
+ did_receive_auth_.Signal();
}
void AudioOutputDevice::WillDestroyCurrentMessageLoop() {
diff --git a/chromium/media/audio/audio_output_device.h b/chromium/media/audio/audio_output_device.h
index dc28352958e..a3618c82b80 100644
--- a/chromium/media/audio/audio_output_device.h
+++ b/chromium/media/audio/audio_output_device.h
@@ -22,16 +22,22 @@
// State sequences.
//
// Task [IO thread] IPC [IO thread]
+// RequestDeviceAuthorization -> RequestDeviceAuthorizationOnIOThread ------>
+// RequestDeviceAuthorization ->
+// <- OnDeviceAuthorized <- AudioMsg_NotifyDeviceAuthorized <-
//
// Start -> CreateStreamOnIOThread -----> CreateStream ------>
// <- OnStreamCreated <- AudioMsg_NotifyStreamCreated <-
// ---> PlayOnIOThread -----------> PlayStream -------->
//
-// Optionally Play() / Pause() sequences may occur:
+// Optionally Play() / Pause() / SwitchOutputDevice() sequences may occur:
// Play -> PlayOnIOThread --------------> PlayStream --------->
// Pause -> PauseOnIOThread ------------> PauseStream -------->
-// (note that Play() / Pause() sequences before OnStreamCreated are
-// deferred until OnStreamCreated, with the last valid state being used)
+// SwitchOutputDevice -> SwitchOutputDeviceOnIOThread -> SwitchOutputDevice ->
+// <- OnOutputDeviceSwitched <- AudioMsg_NotifyOutputDeviceSwitched <-
+// (note that Play() / Pause() / SwitchOutptuDevice() sequences before
+// OnStreamCreated are deferred until OnStreamCreated, with the last valid
+// state being used)
//
// AudioOutputDevice::Render => audio transport on audio thread =>
// |
@@ -64,30 +70,33 @@
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/shared_memory.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_device_thread.h"
#include "media/audio/audio_output_ipc.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/scoped_task_runner_observer.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_export.h"
+#include "media/base/output_device.h"
namespace media {
class MEDIA_EXPORT AudioOutputDevice
: NON_EXPORTED_BASE(public AudioRendererSink),
NON_EXPORTED_BASE(public AudioOutputIPCDelegate),
+ NON_EXPORTED_BASE(public OutputDevice),
NON_EXPORTED_BASE(public ScopedTaskRunnerObserver) {
public:
// NOTE: Clients must call Initialize() before using.
AudioOutputDevice(
scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner,
+ int session_id,
+ const std::string& device_id,
+ const url::Origin& security_origin);
- // Initialize the stream using |session_id|, which is used for the browser
- // to select the correct input device.
- void InitializeWithSessionId(const AudioParameters& params,
- RenderCallback* callback,
- int session_id);
+ // Request authorization to use the device specified in the constructor.
+ void RequestDeviceAuthorization();
// AudioRendererSink implementation.
void Initialize(const AudioParameters& params,
@@ -97,18 +106,24 @@ class MEDIA_EXPORT AudioOutputDevice
void Play() override;
void Pause() override;
bool SetVolume(double volume) override;
+ OutputDevice* GetOutputDevice() override;
+
+ // OutputDevice implementation
void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
+ const url::Origin& security_origin,
const SwitchOutputDeviceCB& callback) override;
+ AudioParameters GetOutputParameters() override;
+ OutputDeviceStatus GetDeviceStatus() override;
// Methods called on IO thread ----------------------------------------------
// AudioOutputIPCDelegate methods.
void OnStateChanged(AudioOutputIPCDelegateState state) override;
+ void OnDeviceAuthorized(OutputDeviceStatus device_status,
+ const media::AudioParameters& output_params) override;
void OnStreamCreated(base::SharedMemoryHandle handle,
base::SyncSocket::Handle socket_handle,
int length) override;
- void OnOutputDeviceSwitched(int request_id,
- SwitchOutputDeviceResult result) override;
+ void OnOutputDeviceSwitched(OutputDeviceStatus result) override;
void OnIPCClosed() override;
protected:
@@ -120,10 +135,12 @@ class MEDIA_EXPORT AudioOutputDevice
private:
// Note: The ordering of members in this enum is critical to correct behavior!
enum State {
- IPC_CLOSED, // No more IPCs can take place.
- IDLE, // Not started.
+ IPC_CLOSED, // No more IPCs can take place.
+ IDLE, // Not started.
+ AUTHORIZING, // Sent device authorization request, waiting for reply.
+ AUTHORIZED, // Successful device authorization received.
CREATING_STREAM, // Waiting for OnStreamCreated() to be called back.
- PAUSED, // Paused. OnStreamCreated() has been called. Can Play()/Stop().
+ PAUSED, // Paused. OnStreamCreated() has been called. Can Play()/Stop().
PLAYING, // Playing back. Can Pause()/Stop().
};
@@ -131,20 +148,24 @@ class MEDIA_EXPORT AudioOutputDevice
// The following methods are tasks posted on the IO thread that need to
// be executed on that thread. They use AudioOutputIPC to send IPC messages
// upon state changes.
+ void RequestDeviceAuthorizationOnIOThread();
void CreateStreamOnIOThread(const AudioParameters& params);
void PlayOnIOThread();
void PauseOnIOThread();
void ShutDownOnIOThread();
void SetVolumeOnIOThread(double volume);
void SwitchOutputDeviceOnIOThread(const std::string& device_id,
- const GURL& security_origin,
+ const url::Origin& security_origin,
const SwitchOutputDeviceCB& callback);
// base::MessageLoop::DestructionObserver implementation for the IO loop.
// If the IO loop dies before we do, we shut down the audio thread from here.
void WillDestroyCurrentMessageLoop() override;
- void SetCurrentSwitchRequest(const SwitchOutputDeviceCB& callback);
+ void SetCurrentSwitchRequest(const SwitchOutputDeviceCB& callback,
+ const std::string& device_id,
+ const url::Origin& security_origin);
+ void SetDeviceStatus(OutputDeviceStatus status);
AudioParameters audio_parameters_;
@@ -159,6 +180,9 @@ class MEDIA_EXPORT AudioOutputDevice
// State enum above.
State state_;
+ // State of Start() calls before OnDeviceAuthorized() is called.
+ bool start_on_authorized_;
+
// State of Play() / Pause() calls before OnStreamCreated() is called.
bool play_on_start_;
@@ -166,6 +190,10 @@ class MEDIA_EXPORT AudioOutputDevice
// Only used by Unified IO.
int session_id_;
+ // ID of hardware output device to be used (provided session_id_ is zero)
+ std::string device_id_;
+ url::Origin security_origin_;
+
// Our audio thread callback class. See source file for details.
class AudioThreadCallback;
@@ -183,8 +211,14 @@ class MEDIA_EXPORT AudioOutputDevice
// the callback via Start(). See http://crbug.com/151051 for details.
bool stopping_hack_;
- int current_switch_request_id_;
SwitchOutputDeviceCB current_switch_callback_;
+ std::string current_switch_device_id_;
+ url::Origin current_switch_security_origin_;
+ bool switch_output_device_on_start_;
+
+ base::WaitableEvent did_receive_auth_;
+ media::AudioParameters output_params_;
+ OutputDeviceStatus device_status_;
DISALLOW_COPY_AND_ASSIGN(AudioOutputDevice);
};
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
index 3b3ee512f5e..15ed4c3f156 100644
--- a/chromium/media/audio/audio_output_device_unittest.cc
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -35,6 +35,11 @@ namespace media {
namespace {
+const std::string kDefaultDeviceId;
+const std::string kNonDefaultDeviceId("valid-nondefault-device-id");
+const std::string kUnauthorizedDeviceId("unauthorized-device-id");
+const url::Origin kDefaultSecurityOrigin;
+
class MockRenderCallback : public AudioRendererSink::RenderCallback {
public:
MockRenderCallback() {}
@@ -49,22 +54,26 @@ class MockAudioOutputIPC : public AudioOutputIPC {
MockAudioOutputIPC() {}
virtual ~MockAudioOutputIPC() {}
- MOCK_METHOD3(CreateStream, void(AudioOutputIPCDelegate* delegate,
- const AudioParameters& params,
- int session_id));
+ MOCK_METHOD4(RequestDeviceAuthorization,
+ void(AudioOutputIPCDelegate* delegate,
+ int session_id,
+ const std::string& device_id,
+ const url::Origin& security_origin));
+ MOCK_METHOD2(CreateStream,
+ void(AudioOutputIPCDelegate* delegate,
+ const AudioParameters& params));
MOCK_METHOD0(PlayStream, void());
MOCK_METHOD0(PauseStream, void());
MOCK_METHOD0(CloseStream, void());
MOCK_METHOD1(SetVolume, void(double volume));
- MOCK_METHOD3(SwitchOutputDevice,
+ MOCK_METHOD2(SwitchOutputDevice,
void(const std::string& device_id,
- const GURL& security_origin,
- int request_id));
+ const url::Origin& security_origin));
};
class MockSwitchOutputDeviceCallback {
public:
- MOCK_METHOD1(Callback, void(media::SwitchOutputDeviceResult result));
+ MOCK_METHOD1(Callback, void(OutputDeviceStatus result));
};
ACTION_P2(SendPendingBytes, socket, pending_bytes) {
@@ -86,12 +95,14 @@ class AudioOutputDeviceTest
AudioOutputDeviceTest();
~AudioOutputDeviceTest();
+ void ReceiveAuthorization(OutputDeviceStatus device_status);
void StartAudioDevice();
void CreateStream();
void ExpectRenderCallback();
void WaitUntilRenderCallback();
void StopAudioDevice();
void SwitchOutputDevice();
+ void SetDevice(const std::string& device_id);
protected:
// Used to clean up TLS pointers that the test(s) will initialize.
@@ -103,10 +114,11 @@ class AudioOutputDeviceTest
MockAudioOutputIPC* audio_output_ipc_; // owned by audio_device_
scoped_refptr<AudioOutputDevice> audio_device_;
MockSwitchOutputDeviceCallback switch_output_device_callback_;
+ OutputDeviceStatus device_status_;
private:
int CalculateMemorySize();
- void SwitchOutputDeviceCallback(SwitchOutputDeviceResult result);
+ void SwitchOutputDeviceCallback(OutputDeviceStatus result);
SharedMemory shared_memory_;
CancelableSyncSocket browser_socket_;
@@ -120,31 +132,54 @@ int AudioOutputDeviceTest::CalculateMemorySize() {
return AudioBus::CalculateMemorySize(default_audio_parameters_);
}
-AudioOutputDeviceTest::AudioOutputDeviceTest() {
- default_audio_parameters_.Reset(
- AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 2, 48000, 16, 1024);
+AudioOutputDeviceTest::AudioOutputDeviceTest()
+ : device_status_(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL) {
+ default_audio_parameters_.Reset(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 48000, 16, 1024);
+ SetDevice(kDefaultDeviceId);
+}
+
+AudioOutputDeviceTest::~AudioOutputDeviceTest() {
+ audio_device_ = NULL;
+}
+void AudioOutputDeviceTest::SetDevice(const std::string& device_id) {
audio_output_ipc_ = new MockAudioOutputIPC();
audio_device_ = new AudioOutputDevice(
- scoped_ptr<AudioOutputIPC>(audio_output_ipc_),
- io_loop_.task_runner());
+ scoped_ptr<AudioOutputIPC>(audio_output_ipc_), io_loop_.task_runner(), 0,
+ device_id, kDefaultSecurityOrigin);
+ EXPECT_CALL(*audio_output_ipc_,
+ RequestDeviceAuthorization(audio_device_.get(), 0, device_id, _));
+ audio_device_->RequestDeviceAuthorization();
+ io_loop_.RunUntilIdle();
+
+ // Simulate response from browser
+ OutputDeviceStatus device_status =
+ (device_id == kUnauthorizedDeviceId)
+ ? OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED
+ : OUTPUT_DEVICE_STATUS_OK;
+ ReceiveAuthorization(device_status);
audio_device_->Initialize(default_audio_parameters_,
&callback_);
-
- io_loop_.RunUntilIdle();
}
-AudioOutputDeviceTest::~AudioOutputDeviceTest() {
- audio_device_ = NULL;
+void AudioOutputDeviceTest::ReceiveAuthorization(OutputDeviceStatus status) {
+ device_status_ = status;
+ if (device_status_ != OUTPUT_DEVICE_STATUS_OK)
+ EXPECT_CALL(*audio_output_ipc_, CloseStream());
+
+ audio_device_->OnDeviceAuthorized(device_status_, default_audio_parameters_);
+ io_loop_.RunUntilIdle();
}
void AudioOutputDeviceTest::StartAudioDevice() {
- audio_device_->Start();
-
- EXPECT_CALL(*audio_output_ipc_, CreateStream(audio_device_.get(), _, 0));
+ if (device_status_ == OUTPUT_DEVICE_STATUS_OK)
+ EXPECT_CALL(*audio_output_ipc_, CreateStream(audio_device_.get(), _));
+ else
+ EXPECT_CALL(callback_, OnRenderError());
+ audio_device_->Start();
io_loop_.RunUntilIdle();
}
@@ -205,38 +240,33 @@ void AudioOutputDeviceTest::WaitUntilRenderCallback() {
}
void AudioOutputDeviceTest::StopAudioDevice() {
- audio_device_->Stop();
-
- EXPECT_CALL(*audio_output_ipc_, CloseStream());
+ if (device_status_ == OUTPUT_DEVICE_STATUS_OK)
+ EXPECT_CALL(*audio_output_ipc_, CloseStream());
+ audio_device_->Stop();
io_loop_.RunUntilIdle();
}
void AudioOutputDeviceTest::SwitchOutputDevice() {
- const GURL security_origin("http://localhost");
- const std::string device_id;
- const int request_id = 1;
-
// Switch the output device and check that the IPC message is sent
- EXPECT_CALL(*audio_output_ipc_,
- SwitchOutputDevice(device_id, security_origin, request_id));
+ EXPECT_CALL(*audio_output_ipc_, SwitchOutputDevice(kNonDefaultDeviceId, _));
audio_device_->SwitchOutputDevice(
- device_id, security_origin,
+ kNonDefaultDeviceId, url::Origin(),
base::Bind(&MockSwitchOutputDeviceCallback::Callback,
base::Unretained(&switch_output_device_callback_)));
io_loop_.RunUntilIdle();
// Simulate the reception of a successful response from the browser
EXPECT_CALL(switch_output_device_callback_,
- Callback(SWITCH_OUTPUT_DEVICE_RESULT_SUCCESS));
- audio_device_->OnOutputDeviceSwitched(request_id,
- SWITCH_OUTPUT_DEVICE_RESULT_SUCCESS);
+ Callback(OUTPUT_DEVICE_STATUS_OK));
+ audio_device_->OnOutputDeviceSwitched(OUTPUT_DEVICE_STATUS_OK);
io_loop_.RunUntilIdle();
}
TEST_P(AudioOutputDeviceTest, Initialize) {
// Tests that the object can be constructed, initialized and destructed
- // without having ever been started/stopped.
+ // without having ever been started.
+ StopAudioDevice();
}
// Calls Start() followed by an immediate Stop() and check for the basic message
@@ -284,6 +314,37 @@ TEST_P(AudioOutputDeviceTest, SwitchOutputDevice) {
StopAudioDevice();
}
+// Full test with output only with nondefault device.
+TEST_P(AudioOutputDeviceTest, NonDefaultCreateStream) {
+ SetDevice(kNonDefaultDeviceId);
+ StartAudioDevice();
+ ExpectRenderCallback();
+ CreateStream();
+ WaitUntilRenderCallback();
+ StopAudioDevice();
+}
+
+// Multiple start/stop with nondefault device
+TEST_P(AudioOutputDeviceTest, NonDefaultStartStopStartStop) {
+ SetDevice(kNonDefaultDeviceId);
+ StartAudioDevice();
+ StopAudioDevice();
+
+ EXPECT_CALL(*audio_output_ipc_,
+ RequestDeviceAuthorization(audio_device_.get(), 0, _, _));
+ StartAudioDevice();
+ // Simulate reply from browser
+ ReceiveAuthorization(OUTPUT_DEVICE_STATUS_OK);
+
+ StopAudioDevice();
+}
+
+TEST_P(AudioOutputDeviceTest, UnauthorizedDevice) {
+ SetDevice(kUnauthorizedDeviceId);
+ StartAudioDevice();
+ StopAudioDevice();
+}
+
INSTANTIATE_TEST_CASE_P(Render, AudioOutputDeviceTest, Values(false));
} // namespace media.
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index d27178458fe..ab290c5d2d1 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -83,7 +83,7 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
// When streams are stopped they're added to |idle_streams_|, if no stream is
// reused before |close_delay_| elapses |close_timer_| will run
// CloseIdleStreams().
- base::DelayTimer<AudioOutputDispatcherImpl> close_timer_;
+ base::DelayTimer close_timer_;
typedef std::map<AudioOutputProxy*, AudioOutputStream*> AudioStreamMap;
AudioStreamMap proxy_to_physical_map_;
diff --git a/chromium/media/audio/audio_output_ipc.h b/chromium/media/audio/audio_output_ipc.h
index 21155084464..fa9c08e81a4 100644
--- a/chromium/media/audio/audio_output_ipc.h
+++ b/chromium/media/audio/audio_output_ipc.h
@@ -11,21 +11,11 @@
#include "base/sync_socket.h"
#include "media/audio/audio_parameters.h"
#include "media/base/media_export.h"
-#include "url/gurl.h"
+#include "media/base/output_device.h"
+#include "url/origin.h"
namespace media {
-// Result of an audio output device switch operation
-enum SwitchOutputDeviceResult {
- SWITCH_OUTPUT_DEVICE_RESULT_SUCCESS = 0,
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_FOUND,
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_AUTHORIZED,
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_OBSOLETE,
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED,
- SWITCH_OUTPUT_DEVICE_RESULT_LAST =
- SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED,
-};
-
// Current status of the audio output stream in the browser process. Browser
// sends information about the current playback state and error to the
// renderer process using this type.
@@ -41,10 +31,15 @@ enum AudioOutputIPCDelegateState {
// has been created. Implemented by AudioOutputDevice.
class MEDIA_EXPORT AudioOutputIPCDelegate {
public:
-
// Called when state of an audio stream has changed.
virtual void OnStateChanged(AudioOutputIPCDelegateState state) = 0;
+ // Called when an authorization request for an output device has been
+ // completed
+ virtual void OnDeviceAuthorized(
+ OutputDeviceStatus device_status,
+ const media::AudioParameters& output_params) = 0;
+
// Called when an audio stream has been created.
// The shared memory |handle| points to a memory section that's used to
// transfer audio buffers from the AudioOutputIPCDelegate back to the
@@ -58,8 +53,7 @@ class MEDIA_EXPORT AudioOutputIPCDelegate {
int length) = 0;
// Called when an attempt to switch the output device has been completed
- virtual void OnOutputDeviceSwitched(int request_id,
- SwitchOutputDeviceResult result) = 0;
+ virtual void OnOutputDeviceSwitched(OutputDeviceStatus result) = 0;
// Called when the AudioOutputIPC object is going away and/or when the IPC
// channel has been closed and no more ipc requests can be made.
@@ -79,15 +73,28 @@ class MEDIA_EXPORT AudioOutputIPC {
public:
virtual ~AudioOutputIPC();
+ // Sends a request to authorize the use of a specific audio output device
+ // in the peer process.
+ // If |session_id| is nonzero, the browser selects the output device
+ // associated with an opened input device indicated by |session_id|. If no
+ // such device is found, the browser attempts to select the device indicated
+ // by |device_id|. If |device_id| is the empty string, the default
+ // audio output device will be selected.
+ // Once the authorization process is complete, the implementation will
+ // notify |delegate| by calling OnDeviceAuthorized().
+ virtual void RequestDeviceAuthorization(
+ AudioOutputIPCDelegate* delegate,
+ int session_id,
+ const std::string& device_id,
+ const url::Origin& security_origin) = 0;
+
// Sends a request to create an AudioOutputController object in the peer
// process and configures it to use the specified audio |params| including
- // number of synchronized input channels.|session_id| is used by the browser
- // to select the correct input device if the input channel in |params| is
- // valid, otherwise it will be ignored. Once the stream has been created,
- // the implementation will notify |delegate| by calling OnStreamCreated().
+ // number of synchronized input channels.
+ // Once the stream has been created, the implementation will notify
+ // |delegate| by calling OnStreamCreated().
virtual void CreateStream(AudioOutputIPCDelegate* delegate,
- const AudioParameters& params,
- int session_id) = 0;
+ const AudioParameters& params) = 0;
// Starts playing the stream. This should generate a call to
// AudioOutputController::Play().
@@ -106,8 +113,7 @@ class MEDIA_EXPORT AudioOutputIPC {
// Switches the output device of the audio stream.
virtual void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- int request_id) = 0;
+ const url::Origin& security_origin) = 0;
};
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index 6fb970b9f2b..d85768b7b6a 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -241,10 +241,9 @@ bool AudioOutputResampler::OpenStream() {
<< "back to fake audio output.";
// Finally fall back to a fake audio output device.
- output_params_.Reset(
- AudioParameters::AUDIO_FAKE, params_.channel_layout(),
- params_.channels(), params_.sample_rate(),
- params_.bits_per_sample(), params_.frames_per_buffer());
+ output_params_ = params_;
+ output_params_.set_format(AudioParameters::AUDIO_FAKE);
+
Initialize();
if (dispatcher_->OpenStream()) {
streams_opened_ = true;
diff --git a/chromium/media/audio/audio_output_stream_sink.cc b/chromium/media/audio/audio_output_stream_sink.cc
index e290e0f5bd9..cd08fdd3eb2 100644
--- a/chromium/media/audio/audio_output_stream_sink.cc
+++ b/chromium/media/audio/audio_output_stream_sink.cc
@@ -59,11 +59,8 @@ bool AudioOutputStreamSink::SetVolume(double volume) {
return true;
}
-void AudioOutputStreamSink::SwitchOutputDevice(
- const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
+OutputDevice* AudioOutputStreamSink::GetOutputDevice() {
+ return nullptr;
}
int AudioOutputStreamSink::OnMoreData(AudioBus* dest,
diff --git a/chromium/media/audio/audio_output_stream_sink.h b/chromium/media/audio/audio_output_stream_sink.h
index 9d99fb72145..6ab5dc1d052 100644
--- a/chromium/media/audio/audio_output_stream_sink.h
+++ b/chromium/media/audio/audio_output_stream_sink.h
@@ -36,9 +36,7 @@ class MEDIA_EXPORT AudioOutputStreamSink
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
// AudioSourceCallback implementation.
int OnMoreData(AudioBus* dest, uint32 total_bytes_delay) override;
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
index 872413f8083..14c32147a4d 100644
--- a/chromium/media/audio/audio_parameters.cc
+++ b/chromium/media/audio/audio_parameters.cc
@@ -10,61 +10,35 @@
namespace media {
AudioParameters::AudioParameters()
- : format_(AUDIO_PCM_LINEAR),
- channel_layout_(CHANNEL_LAYOUT_NONE),
- sample_rate_(0),
- bits_per_sample_(0),
- frames_per_buffer_(0),
- channels_(0),
- effects_(NO_EFFECTS) {
+ : AudioParameters(AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_NONE, 0, 0, 0) {}
+
+AudioParameters::AudioParameters(Format format,
+ ChannelLayout channel_layout,
+ int sample_rate,
+ int bits_per_sample,
+ int frames_per_buffer) {
+ Reset(format, channel_layout, sample_rate, bits_per_sample,
+ frames_per_buffer);
}
-AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer)
- : format_(format),
- channel_layout_(channel_layout),
- sample_rate_(sample_rate),
- bits_per_sample_(bits_per_sample),
- frames_per_buffer_(frames_per_buffer),
- channels_(ChannelLayoutToChannelCount(channel_layout)),
- effects_(NO_EFFECTS) {
-}
-
-AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer, int effects)
- : format_(format),
- channel_layout_(channel_layout),
- sample_rate_(sample_rate),
- bits_per_sample_(bits_per_sample),
- frames_per_buffer_(frames_per_buffer),
- channels_(ChannelLayoutToChannelCount(channel_layout)),
- effects_(effects) {
-}
+AudioParameters::~AudioParameters() {}
-AudioParameters::AudioParameters(Format format, ChannelLayout channel_layout,
- int channels, int sample_rate,
- int bits_per_sample, int frames_per_buffer,
- int effects)
- : format_(format),
- channel_layout_(channel_layout),
- sample_rate_(sample_rate),
- bits_per_sample_(bits_per_sample),
- frames_per_buffer_(frames_per_buffer),
- channels_(channels),
- effects_(effects) {
-}
+AudioParameters::AudioParameters(const AudioParameters&) = default;
+AudioParameters& AudioParameters::operator=(const AudioParameters&) = default;
-void AudioParameters::Reset(Format format, ChannelLayout channel_layout,
- int channels, int sample_rate,
- int bits_per_sample, int frames_per_buffer) {
+void AudioParameters::Reset(Format format,
+ ChannelLayout channel_layout,
+ int sample_rate,
+ int bits_per_sample,
+ int frames_per_buffer) {
format_ = format;
channel_layout_ = channel_layout;
- channels_ = channels;
+ channels_ = ChannelLayoutToChannelCount(channel_layout);
sample_rate_ = sample_rate;
bits_per_sample_ = bits_per_sample;
frames_per_buffer_ = frames_per_buffer;
+ effects_ = NO_EFFECTS;
+ mic_positions_.clear();
}
bool AudioParameters::IsValid() const {
@@ -82,12 +56,12 @@ bool AudioParameters::IsValid() const {
std::string AudioParameters::AsHumanReadableString() const {
std::ostringstream s;
- s << "format: " << format()
- << " channels: " << channels()
- << " channel_layout: " << channel_layout()
- << " sample_rate: " << sample_rate()
+ s << "format: " << format() << " channel_layout: " << channel_layout()
+ << " channels: " << channels() << " sample_rate: " << sample_rate()
<< " bits_per_sample: " << bits_per_sample()
- << " frames_per_buffer: " << frames_per_buffer();
+ << " frames_per_buffer: " << frames_per_buffer()
+ << " effects: " << effects()
+ << " mic_positions: " << PointsToString(mic_positions_);
return s.str();
}
@@ -110,13 +84,12 @@ base::TimeDelta AudioParameters::GetBufferDuration() const {
}
bool AudioParameters::Equals(const AudioParameters& other) const {
- return format_ == other.format() &&
- sample_rate_ == other.sample_rate() &&
+ return format_ == other.format() && sample_rate_ == other.sample_rate() &&
channel_layout_ == other.channel_layout() &&
channels_ == other.channels() &&
bits_per_sample_ == other.bits_per_sample() &&
frames_per_buffer_ == other.frames_per_buffer() &&
- effects_ == other.effects();
+ effects_ == other.effects() && mic_positions_ == other.mic_positions_;
}
} // namespace media
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index 3820d8cc3c9..61ca8121f9e 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -5,24 +5,48 @@
#ifndef MEDIA_AUDIO_AUDIO_PARAMETERS_H_
#define MEDIA_AUDIO_AUDIO_PARAMETERS_H_
+#include <stdint.h>
#include <string>
#include "base/basictypes.h"
+#include "base/compiler_specific.h"
#include "base/time/time.h"
+#include "media/audio/point.h"
+#include "media/base/audio_bus.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
namespace media {
-struct MEDIA_EXPORT AudioInputBufferParameters {
+// Use a struct-in-struct approach to ensure that we can calculate the required
+// size as sizeof(AudioInputBufferParameters) + #(bytes in audio buffer) without
+// using packing. Also align AudioInputBufferParameters instead of in
+// AudioInputBuffer to be able to calculate size like so. Use a macro for the
+// alignment value that's the same as AudioBus::kChannelAlignment, since MSVC
+// doesn't accept the latter to be used.
+#if defined(OS_WIN)
+#pragma warning(push)
+#pragma warning(disable: 4324) // Disable warning for added padding.
+#endif
+#define PARAMETERS_ALIGNMENT 16
+COMPILE_ASSERT(AudioBus::kChannelAlignment == PARAMETERS_ALIGNMENT,
+ AudioInputBufferParameters_alignment_not_same_as_AudioBus);
+struct MEDIA_EXPORT ALIGNAS(PARAMETERS_ALIGNMENT) AudioInputBufferParameters {
double volume;
uint32 size;
+ uint32_t hardware_delay_bytes;
+ uint32_t id;
bool key_pressed;
};
+#undef PARAMETERS_ALIGNMENT
+#if defined(OS_WIN)
+#pragma warning(pop)
+#endif
+
+COMPILE_ASSERT(
+ sizeof(AudioInputBufferParameters) % AudioBus::kChannelAlignment == 0,
+ AudioInputBufferParameters_not_aligned);
-// Use a struct-in-struct approach to ensure that we can calculate the required
-// size as sizeof(AudioInputBufferParameters) + #(bytes in audio buffer) without
-// using packing.
struct MEDIA_EXPORT AudioInputBuffer {
AudioInputBufferParameters params;
int8 audio[1];
@@ -57,18 +81,19 @@ class MEDIA_EXPORT AudioParameters {
};
AudioParameters();
- AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
+ AudioParameters(Format format,
+ ChannelLayout channel_layout,
+ int sample_rate,
+ int bits_per_sample,
int frames_per_buffer);
- AudioParameters(Format format, ChannelLayout channel_layout,
- int sample_rate, int bits_per_sample,
- int frames_per_buffer, int effects);
- AudioParameters(Format format, ChannelLayout channel_layout,
- int channels, int sample_rate, int bits_per_sample,
- int frames_per_buffer, int effects);
-
- void Reset(Format format, ChannelLayout channel_layout,
- int channels, int sample_rate, int bits_per_sample,
+
+ ~AudioParameters();
+
+ // Re-initializes all members.
+ void Reset(Format format,
+ ChannelLayout channel_layout,
+ int sample_rate,
+ int bits_per_sample,
int frames_per_buffer);
// Checks that all values are in the expected range. All limits are specified
@@ -95,26 +120,67 @@ class MEDIA_EXPORT AudioParameters {
// Comparison with other AudioParams.
bool Equals(const AudioParameters& other) const;
+ void set_format(Format format) { format_ = format; }
Format format() const { return format_; }
+
+ // A setter for channel_layout_ is intentionally excluded.
ChannelLayout channel_layout() const { return channel_layout_; }
+
+ // The number of channels is usually computed from channel_layout_. Setting
+ // this explictly is only required with CHANNEL_LAYOUT_DISCRETE.
+ void set_channels_for_discrete(int channels) {
+ DCHECK(channel_layout_ == CHANNEL_LAYOUT_DISCRETE ||
+ channels == ChannelLayoutToChannelCount(channel_layout_));
+ channels_ = channels;
+ }
+ int channels() const { return channels_; }
+
+ void set_sample_rate(int sample_rate) { sample_rate_ = sample_rate; }
int sample_rate() const { return sample_rate_; }
+
+ void set_bits_per_sample(int bits_per_sample) {
+ bits_per_sample_ = bits_per_sample;
+ }
int bits_per_sample() const { return bits_per_sample_; }
+
+ void set_frames_per_buffer(int frames_per_buffer) {
+ frames_per_buffer_ = frames_per_buffer;
+ }
int frames_per_buffer() const { return frames_per_buffer_; }
- int channels() const { return channels_; }
+
+ void set_effects(int effects) { effects_ = effects; }
int effects() const { return effects_; }
+ void set_mic_positions(const std::vector<Point>& mic_positions) {
+ mic_positions_ = mic_positions;
+ }
+ const std::vector<Point>& mic_positions() const { return mic_positions_; }
+
+ AudioParameters(const AudioParameters&);
+ AudioParameters& operator=(const AudioParameters&);
+
private:
- // These members are mutable to support entire struct assignment. They should
- // not be mutated individually.
Format format_; // Format of the stream.
ChannelLayout channel_layout_; // Order of surround sound channels.
+ int channels_; // Number of channels. Value set based on
+ // |channel_layout|.
int sample_rate_; // Sampling frequency/rate.
int bits_per_sample_; // Number of bits per sample.
int frames_per_buffer_; // Number of frames in a buffer.
-
- int channels_; // Number of channels. Value set based on
- // |channel_layout|.
int effects_; // Bitmask using PlatformEffectsMask.
+
+ // Microphone positions using Cartesian coordinates:
+ // x: the horizontal dimension, with positive to the right from the camera's
+ // perspective.
+ // y: the depth dimension, with positive forward from the camera's
+ // perspective.
+ // z: the vertical dimension, with positive upwards.
+ //
+ // Usually, the center of the microphone array will be treated as the origin
+ // (often the position of the camera).
+ //
+ // An empty vector indicates unknown positions.
+ std::vector<Point> mic_positions_;
};
// Comparison is useful when AudioParameters is used with std structures.
diff --git a/chromium/media/audio/audio_parameters_unittest.cc b/chromium/media/audio/audio_parameters_unittest.cc
index 92677eb6c6e..71d23ec35d9 100644
--- a/chromium/media/audio/audio_parameters_unittest.cc
+++ b/chromium/media/audio/audio_parameters_unittest.cc
@@ -16,6 +16,9 @@ TEST(AudioParameters, Constructor_Default) {
ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_NONE;
int expected_rate = 0;
int expected_samples = 0;
+ AudioParameters::PlatformEffectsMask expected_effects =
+ AudioParameters::NO_EFFECTS;
+ std::vector<Point> expected_mic_positions;
AudioParameters params;
@@ -25,6 +28,8 @@ TEST(AudioParameters, Constructor_Default) {
EXPECT_EQ(expected_channel_layout, params.channel_layout());
EXPECT_EQ(expected_rate, params.sample_rate());
EXPECT_EQ(expected_samples, params.frames_per_buffer());
+ EXPECT_EQ(expected_effects, params.effects());
+ EXPECT_EQ(expected_mic_positions, params.mic_positions());
}
TEST(AudioParameters, Constructor_ParameterValues) {
@@ -194,7 +199,8 @@ TEST(AudioParameters, Compare) {
for (size_t i = 0; i < arraysize(values); ++i) {
for (size_t j = 0; j < arraysize(values); ++j) {
- SCOPED_TRACE("i=" + base::IntToString(i) + " j=" + base::IntToString(j));
+ SCOPED_TRACE("i=" + base::SizeTToString(i) + " j=" +
+ base::SizeTToString(j));
EXPECT_EQ(i < j, values[i] < values[j]);
}
@@ -205,20 +211,11 @@ TEST(AudioParameters, Compare) {
TEST(AudioParameters, Constructor_ValidChannelCounts) {
int expected_channels = 8;
- ChannelLayout expected_layout = CHANNEL_LAYOUT_5_1;
+ ChannelLayout expected_layout = CHANNEL_LAYOUT_DISCRETE;
AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- expected_layout, expected_channels, 44100, 16, 880,
- AudioParameters::NO_EFFECTS);
-
- EXPECT_EQ(expected_channels, params.channels());
- EXPECT_EQ(expected_layout, params.channel_layout());
- EXPECT_FALSE(params.IsValid());
-
- expected_layout = CHANNEL_LAYOUT_DISCRETE;
- params.Reset(AudioParameters::AUDIO_PCM_LOW_LATENCY, expected_layout,
- expected_channels, 44100, 16, 880);
-
+ expected_layout, 44100, 16, 880);
+ params.set_channels_for_discrete(expected_channels);
EXPECT_EQ(expected_channels, params.channels());
EXPECT_EQ(expected_layout, params.channel_layout());
EXPECT_TRUE(params.IsValid());
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
index 60c839c124b..940ab2ea69b 100644
--- a/chromium/media/audio/clockless_audio_sink.cc
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -8,6 +8,7 @@
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/simple_thread.h"
+#include "media/base/audio_hash.h"
namespace media {
@@ -15,11 +16,15 @@ namespace media {
// thread, running as fast as it can read the data.
class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
public:
- explicit ClocklessAudioSinkThread(const AudioParameters& params,
- AudioRendererSink::RenderCallback* callback)
+ ClocklessAudioSinkThread(const AudioParameters& params,
+ AudioRendererSink::RenderCallback* callback,
+ bool hashing)
: callback_(callback),
audio_bus_(AudioBus::Create(params)),
- stop_event_(new base::WaitableEvent(false, false)) {}
+ stop_event_(new base::WaitableEvent(false, false)) {
+ if (hashing)
+ audio_hash_.reset(new AudioHash());
+ }
void Start() {
stop_event_->Reset();
@@ -34,13 +39,21 @@ class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
return playback_time_;
}
+ std::string GetAudioHash() {
+ DCHECK(audio_hash_);
+ return audio_hash_->ToString();
+ }
+
private:
// Call Render() repeatedly, keeping track of the rendering time.
void Run() override {
base::TimeTicks start;
while (!stop_event_->IsSignaled()) {
- int frames_received = callback_->Render(audio_bus_.get(), 0);
- if (frames_received <= 0) {
+ const int frames_received = callback_->Render(audio_bus_.get(), 0);
+ DCHECK_GE(frames_received, 0);
+ if (audio_hash_)
+ audio_hash_->Update(audio_bus_.get(), frames_received);
+ if (!frames_received) {
// No data received, so let other threads run to provide data.
base::PlatformThread::YieldCurrentThread();
} else if (start.is_null()) {
@@ -58,18 +71,18 @@ class ClocklessAudioSinkThread : public base::DelegateSimpleThread::Delegate {
scoped_ptr<base::WaitableEvent> stop_event_;
scoped_ptr<base::DelegateSimpleThread> thread_;
base::TimeDelta playback_time_;
+ scoped_ptr<AudioHash> audio_hash_;
};
ClocklessAudioSink::ClocklessAudioSink()
- : initialized_(false),
- playing_(false) {}
+ : initialized_(false), playing_(false), hashing_(false) {}
ClocklessAudioSink::~ClocklessAudioSink() {}
void ClocklessAudioSink::Initialize(const AudioParameters& params,
RenderCallback* callback) {
DCHECK(!initialized_);
- thread_.reset(new ClocklessAudioSinkThread(params, callback));
+ thread_.reset(new ClocklessAudioSinkThread(params, callback, hashing_));
initialized_ = true;
}
@@ -108,11 +121,17 @@ bool ClocklessAudioSink::SetVolume(double volume) {
return volume == 0.0;
}
-void ClocklessAudioSink::SwitchOutputDevice(
- const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
+OutputDevice* ClocklessAudioSink::GetOutputDevice() {
+ return nullptr;
+}
+
+void ClocklessAudioSink::StartAudioHashForTesting() {
+ DCHECK(!initialized_);
+ hashing_ = true;
+}
+
+std::string ClocklessAudioSink::GetAudioHashForTesting() {
+ return thread_ && hashing_ ? thread_->GetAudioHash() : std::string();
}
} // namespace media
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
index 2251b007c56..96745a51ed7 100644
--- a/chromium/media/audio/clockless_audio_sink.h
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -18,6 +18,7 @@ class SingleThreadTaskRunner;
namespace media {
class AudioBus;
class ClocklessAudioSinkThread;
+class OutputDevice;
// Implementation of an AudioRendererSink that consumes the audio as fast as
// possible. This class does not support multiple Play()/Pause() events.
@@ -34,13 +35,17 @@ class MEDIA_EXPORT ClocklessAudioSink
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
// Returns the time taken to consume all the audio.
base::TimeDelta render_time() { return playback_time_; }
+ // Enables audio frame hashing. Must be called prior to Initialize().
+ void StartAudioHashForTesting();
+
+ // Returns the hash of all audio frames seen since construction.
+ std::string GetAudioHashForTesting();
+
protected:
~ClocklessAudioSink() override;
@@ -48,6 +53,7 @@ class MEDIA_EXPORT ClocklessAudioSink
scoped_ptr<ClocklessAudioSinkThread> thread_;
bool initialized_;
bool playing_;
+ bool hashing_;
// Time taken in last set of Render() calls.
base::TimeDelta playback_time_;
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index befd3df6dfc..e872ada6c32 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -9,11 +9,16 @@
#include "base/command_line.h"
#include "base/environment.h"
#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
#include "base/nix/xdg_util.h"
#include "base/stl_util.h"
+#include "chromeos/audio/audio_device.h"
+#include "chromeos/audio/cras_audio_handler.h"
#include "media/audio/cras/cras_input.h"
#include "media/audio/cras/cras_unified.h"
#include "media/base/channel_layout.h"
+#include "media/base/media_resources.h"
// cras_util.h headers pull in min/max macros...
// TODO(dgreid): Fix headers such that these aren't imported.
@@ -21,40 +26,118 @@
#undef max
namespace media {
-
-static void AddDefaultDevice(AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
-
- // Cras will route audio from a proper physical device automatically.
- device_names->push_back(
- AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
- AudioManagerBase::kDefaultDeviceId));
-}
+namespace {
// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
+const int kMaxOutputStreams = 50;
// Default sample rate for input and output streams.
-static const int kDefaultSampleRate = 48000;
+const int kDefaultSampleRate = 48000;
// Define bounds for the output buffer size.
-static const int kMinimumOutputBufferSize = 512;
-static const int kMaximumOutputBufferSize = 8192;
+const int kMinimumOutputBufferSize = 512;
+const int kMaximumOutputBufferSize = 8192;
// Default input buffer size.
-static const int kDefaultInputBufferSize = 1024;
+const int kDefaultInputBufferSize = 1024;
+
+const char kBeamformingOnDeviceId[] = "default-beamforming-on";
+const char kBeamformingOffDeviceId[] = "default-beamforming-off";
+
+enum CrosBeamformingDeviceState {
+ BEAMFORMING_DEFAULT_ENABLED = 0,
+ BEAMFORMING_USER_ENABLED,
+ BEAMFORMING_DEFAULT_DISABLED,
+ BEAMFORMING_USER_DISABLED,
+ BEAMFORMING_STATE_MAX = BEAMFORMING_USER_DISABLED
+};
+
+void RecordBeamformingDeviceState(CrosBeamformingDeviceState state) {
+ UMA_HISTOGRAM_ENUMERATION("Media.CrosBeamformingDeviceState", state,
+ BEAMFORMING_STATE_MAX + 1);
+}
+
+bool IsBeamformingDefaultEnabled() {
+ return base::FieldTrialList::FindFullName("ChromebookBeamforming") ==
+ "Enabled";
+}
+
+void AddDefaultDevice(AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+
+ // Cras will route audio from a proper physical device automatically.
+ device_names->push_back(AudioDeviceName(AudioManager::GetDefaultDeviceName(),
+ AudioManagerBase::kDefaultDeviceId));
+}
+
+// Returns a mic positions string if the machine has a beamforming capable
+// internal mic and otherwise an empty string.
+std::string MicPositions() {
+ // Get the list of devices from CRAS. An internal mic with a non-empty
+ // positions field indicates the machine has a beamforming capable mic array.
+ chromeos::AudioDeviceList devices;
+ chromeos::CrasAudioHandler::Get()->GetAudioDevices(&devices);
+ for (const auto& device : devices) {
+ if (device.type == chromeos::AUDIO_TYPE_INTERNAL_MIC) {
+ // There should be only one internal mic device.
+ return device.mic_positions;
+ }
+ }
+ return "";
+}
+
+} // namespace
+
+// Adds the beamforming on and off devices to |device_names|.
+void AudioManagerCras::AddBeamformingDevices(AudioDeviceNames* device_names) {
+ DCHECK(device_names->empty());
+ const std::string beamforming_on_name =
+ GetLocalizedStringUTF8(BEAMFORMING_ON_DEFAULT_AUDIO_INPUT_DEVICE_NAME);
+ const std::string beamforming_off_name =
+ GetLocalizedStringUTF8(BEAMFORMING_OFF_DEFAULT_AUDIO_INPUT_DEVICE_NAME);
+
+ if (IsBeamformingDefaultEnabled()) {
+ // The first device in the list is expected to have a "default" device ID.
+ // Web apps may depend on this behavior.
+ beamforming_on_device_id_ = AudioManagerBase::kDefaultDeviceId;
+ beamforming_off_device_id_ = kBeamformingOffDeviceId;
+
+ // Users in the experiment will have the "beamforming on" device appear
+ // first in the list. This causes it to be selected by default.
+ device_names->push_back(
+ AudioDeviceName(beamforming_on_name, beamforming_on_device_id_));
+ device_names->push_back(
+ AudioDeviceName(beamforming_off_name, beamforming_off_device_id_));
+ } else {
+ beamforming_off_device_id_ = AudioManagerBase::kDefaultDeviceId;
+ beamforming_on_device_id_ = kBeamformingOnDeviceId;
+
+ device_names->push_back(
+ AudioDeviceName(beamforming_off_name, beamforming_off_device_id_));
+ device_names->push_back(
+ AudioDeviceName(beamforming_on_name, beamforming_on_device_id_));
+ }
+}
bool AudioManagerCras::HasAudioOutputDevices() {
return true;
}
bool AudioManagerCras::HasAudioInputDevices() {
- return true;
+ chromeos::AudioDeviceList devices;
+ chromeos::CrasAudioHandler::Get()->GetAudioDevices(&devices);
+ for (size_t i = 0; i < devices.size(); ++i) {
+ if (devices[i].is_input && devices[i].is_for_simple_usage())
+ return true;
+ }
+ return false;
}
AudioManagerCras::AudioManagerCras(AudioLogFactory* audio_log_factory)
: AudioManagerBase(audio_log_factory),
- has_keyboard_mic_(false) {
+ has_keyboard_mic_(false),
+ beamforming_on_device_id_(nullptr),
+ beamforming_off_device_id_(nullptr) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
}
@@ -68,7 +151,16 @@ void AudioManagerCras::ShowAudioInputSettings() {
void AudioManagerCras::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
- AddDefaultDevice(device_names);
+ DCHECK(device_names->empty());
+
+ mic_positions_ = ParsePointsFromString(MicPositions());
+ // At least two mic positions indicates we have a beamforming capable mic
+ // array. Add the virtual beamforming device to the list. When this device is
+ // queried through GetInputStreamParameters, provide the cached mic positions.
+ if (mic_positions_.size() > 1)
+ AddBeamformingDevices(device_names);
+ else
+ AddDefaultDevice(device_names);
}
void AudioManagerCras::GetAudioOutputDeviceNames(
@@ -83,15 +175,41 @@ AudioParameters AudioManagerCras::GetInputStreamParameters(
int user_buffer_size = GetUserBufferSize();
int buffer_size = user_buffer_size ?
user_buffer_size : kDefaultInputBufferSize;
- AudioParameters::PlatformEffectsMask effects =
- has_keyboard_mic_ ? AudioParameters::KEYBOARD_MIC
- : AudioParameters::NO_EFFECTS;
// TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal
// parameters for the loopback stream may differ from the default.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, buffer_size, effects);
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, kDefaultSampleRate, 16,
+ buffer_size);
+ if (has_keyboard_mic_)
+ params.set_effects(AudioParameters::KEYBOARD_MIC);
+
+ if (mic_positions_.size() > 1) {
+ // We have the mic_positions_ check here because one of the beamforming
+ // devices will have been assigned the "default" ID, which could otherwise
+ // be confused with the ID in the non-beamforming-capable-device case.
+ DCHECK(beamforming_on_device_id_);
+ DCHECK(beamforming_off_device_id_);
+
+ if (device_id == beamforming_on_device_id_) {
+ params.set_mic_positions(mic_positions_);
+
+ // Record a UMA metric based on the state of the experiment and the
+ // selected device. This will tell us i) how common it is for users to
+ // manually adjust the beamforming device and ii) how contaminated our
+ // metric experiment buckets are.
+ if (IsBeamformingDefaultEnabled())
+ RecordBeamformingDeviceState(BEAMFORMING_DEFAULT_ENABLED);
+ else
+ RecordBeamformingDeviceState(BEAMFORMING_USER_ENABLED);
+ } else if (device_id == beamforming_off_device_id_) {
+ if (!IsBeamformingDefaultEnabled())
+ RecordBeamformingDeviceState(BEAMFORMING_DEFAULT_DISABLED);
+ else
+ RecordBeamformingDeviceState(BEAMFORMING_USER_DISABLED);
+ }
+ }
+ return params;
}
void AudioManagerCras::SetHasKeyboardMic() {
@@ -148,9 +266,8 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
}
AudioOutputStream* AudioManagerCras::MakeOutputStream(
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index 4c8f992ee05..fbe01027150 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -59,8 +59,16 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
AudioInputStream* MakeInputStream(const AudioParameters& params,
const std::string& device_id);
+ void AddBeamformingDevices(AudioDeviceNames* device_names);
+
bool has_keyboard_mic_;
+ // Stores the mic positions field from the device.
+ std::vector<Point> mic_positions_;
+
+ const char* beamforming_on_device_id_;
+ const char* beamforming_off_device_id_;
+
DISALLOW_COPY_AND_ASSIGN(AudioManagerCras);
};
diff --git a/chromium/media/audio/fake_audio_manager.cc b/chromium/media/audio/fake_audio_manager.cc
index a181a2f2631..3d7640144a9 100644
--- a/chromium/media/audio/fake_audio_manager.cc
+++ b/chromium/media/audio/fake_audio_manager.cc
@@ -65,9 +65,8 @@ AudioParameters FakeAudioManager::GetPreferredOutputStreamParameters(
buffer_size = std::min(input_params.frames_per_buffer(), buffer_size);
}
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
}
AudioParameters FakeAudioManager::GetInputStreamParameters(
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index 42651ffbea8..f2f73dab059 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -152,7 +152,7 @@ static void GetAudioDeviceInfo(bool is_input,
// on the top of the list for all platforms. There is no duplicate
// counting here since the default device has been abstracted out before.
media::AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
+ name.device_name = AudioManager::GetDefaultDeviceName();
name.unique_id = AudioManagerBase::kDefaultDeviceId;
device_names->push_front(name);
}
@@ -672,9 +672,10 @@ AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
channel_layout = CHANNEL_LAYOUT_DISCRETE;
}
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, output_channels,
- hardware_sample_rate, 16, buffer_size, AudioParameters::NO_EFFECTS);
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ hardware_sample_rate, 16, buffer_size);
+ params.set_channels_for_discrete(output_channels);
+ return params;
}
void AudioManagerMac::InitializeOnAudioThread() {
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index 4bfb6b17335..c15dd2e77be 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -76,7 +76,7 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
// Streams should consult ShouldDeferStreamStart() and if true check the value
// again after |kStartDelayInSecsForPowerEvents| has elapsed. If false, the
// stream may be started immediately.
- enum { kStartDelayInSecsForPowerEvents = 1 };
+ enum { kStartDelayInSecsForPowerEvents = 2 };
bool ShouldDeferStreamStart();
// Changes the buffer size for |device_id| if there are no active input or
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
index fc5e900d705..ba257180a6c 100644
--- a/chromium/media/audio/null_audio_sink.cc
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -71,11 +71,8 @@ bool NullAudioSink::SetVolume(double volume) {
return volume == 0.0;
}
-void NullAudioSink::SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
- DCHECK(task_runner_->BelongsToCurrentThread());
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
+OutputDevice* NullAudioSink::GetOutputDevice() {
+ return nullptr;
}
void NullAudioSink::CallRender() {
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
index d8d5f0401ee..85c7731acfc 100644
--- a/chromium/media/audio/null_audio_sink.h
+++ b/chromium/media/audio/null_audio_sink.h
@@ -18,6 +18,7 @@ namespace media {
class AudioBus;
class AudioHash;
class FakeAudioWorker;
+class OutputDevice;
class MEDIA_EXPORT NullAudioSink
: NON_EXPORTED_BASE(public AudioRendererSink) {
@@ -32,9 +33,7 @@ class MEDIA_EXPORT NullAudioSink
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
// Enables audio frame hashing. Must be called prior to Initialize().
void StartAudioHashForTesting();
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
deleted file mode 100644
index 618fb0caab8..00000000000
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/openbsd/audio_manager_openbsd.h"
-
-#include <fcntl.h>
-
-#include "base/command_line.h"
-#include "base/file_path.h"
-#include "base/stl_util.h"
-#include "media/audio/audio_output_dispatcher.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/pulse/pulse_output.h"
-#include "media/audio/pulse/pulse_stubs.h"
-#include "media/base/channel_layout.h"
-#include "media/base/limits.h"
-#include "media/base/media_switches.h"
-
-using media_audio_pulse::kModulePulse;
-using media_audio_pulse::InitializeStubs;
-using media_audio_pulse::StubPathMap;
-
-namespace media {
-
-// Maximum number of output streams that can be open simultaneously.
-static const int kMaxOutputStreams = 50;
-
-// Default sample rate for input and output streams.
-static const int kDefaultSampleRate = 48000;
-
-static const base::FilePath::CharType kPulseLib[] =
- FILE_PATH_LITERAL("libpulse.so.0");
-
-// Implementation of AudioManager.
-static bool HasAudioHardware() {
- int fd;
- const char *file;
-
- if ((file = getenv("AUDIOCTLDEVICE")) == 0 || *file == '\0')
- file = "/dev/audioctl";
-
- if ((fd = open(file, O_RDONLY)) < 0)
- return false;
-
- close(fd);
- return true;
-}
-
-bool AudioManagerOpenBSD::HasAudioOutputDevices() {
- return HasAudioHardware();
-}
-
-bool AudioManagerOpenBSD::HasAudioInputDevices() {
- return HasAudioHardware();
-}
-
-AudioParameters AudioManagerOpenBSD::GetInputStreamParameters(
- const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
-
- int user_buffer_size = GetUserBufferSize();
- int buffer_size = user_buffer_size ?
- user_buffer_size : kDefaultInputBufferSize;
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, buffer_size);
-}
-
-AudioManagerOpenBSD::AudioManagerOpenBSD(AudioLogFactory* audio_log_factory)
- : AudioManagerBase(audio_log_factory),
- pulse_library_is_initialized_(false) {
- SetMaxOutputStreamsAllowed(kMaxOutputStreams);
- StubPathMap paths;
-
- // Check if the pulse library is avialbale.
- paths[kModulePulse].push_back(kPulseLib);
- if (!InitializeStubs(paths)) {
- DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
- return;
- }
-
- pulse_library_is_initialized_ = true;
-}
-
-AudioManagerOpenBSD::~AudioManagerOpenBSD() {
- Shutdown();
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
- const AudioParameters& params) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
- return MakeOutputStream(params);
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
- const AudioParameters& params,
- const std::string& device_id) {
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
- return MakeOutputStream(params);
-}
-
-AudioInputStream* AudioManagerOpenBSD::MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format);
- NOTIMPLEMENTED();
- return NULL;
-}
-
-AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) {
- DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
- NOTIMPLEMENTED();
- return NULL;
-}
-
-AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters(
- const std::string& output_device_id,
- const AudioParameters& input_params) {
- // TODO(tommi): Support |output_device_id|.
- DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 512;
-
- ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- int sample_rate = kDefaultSampleRate;
- int buffer_size = kDefaultOutputBufferSize;
- int bits_per_sample = 16;
- if (input_params.IsValid()) {
- sample_rate = input_params.sample_rate();
- bits_per_sample = input_params.bits_per_sample();
- channel_layout = input_params.channel_layout();
- buffer_size = std::min(buffer_size, input_params.frames_per_buffer());
- }
-
- int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size)
- buffer_size = user_buffer_size;
-
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
-}
-
-AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream(
- const AudioParameters& params) {
- if (pulse_library_is_initialized_)
- return new PulseAudioOutputStream(params, this);
-
- return NULL;
-}
-
-// TODO(xians): Merge AudioManagerOpenBSD with AudioManagerPulse;
-// static
-AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
- return new AudioManagerOpenBSD(audio_log_factory);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
deleted file mode 100644
index 3326952bb04..00000000000
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
-#define MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
-
-#include <set>
-
-#include "base/compiler_specific.h"
-#include "media/audio/audio_manager_base.h"
-
-namespace media {
-
-class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
- public:
- AudioManagerOpenBSD(AudioLogFactory* audio_log_factory);
-
- // Implementation of AudioManager.
- bool HasAudioOutputDevices() override;
- bool HasAudioInputDevices() override;
- AudioParameters GetInputStreamParameters(
- const std::string& device_id) override;
-
- // Implementation of AudioManagerBase.
- AudioOutputStream* MakeLinearOutputStream(
- const AudioParameters& params) override;
- AudioOutputStream* MakeLowLatencyOutputStream(
- const AudioParameters& params,
- const std::string& device_id) override;
- AudioInputStream* MakeLinearInputStream(
- const AudioParameters& params, const std::string& device_id) override;
- AudioInputStream* MakeLowLatencyInputStream(
- const AudioParameters& params, const std::string& device_id) override;
-
- protected:
- ~AudioManagerOpenBSD() override;
-
- AudioParameters GetPreferredOutputStreamParameters(
- const std::string& output_device_id,
- const AudioParameters& input_params) override;
-
- private:
- // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
- AudioOutputStream* MakeOutputStream(const AudioParameters& params);
-
- // Flag to indicate whether the pulse library has been initialized or not.
- bool pulse_library_is_initialized_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioManagerOpenBSD);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_
diff --git a/chromium/media/audio/point.cc b/chromium/media/audio/point.cc
new file mode 100644
index 00000000000..3246089fd13
--- /dev/null
+++ b/chromium/media/audio/point.cc
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/point.h"
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace media {
+
+std::string PointsToString(const std::vector<Point>& points) {
+ std::string points_string;
+ if (!points.empty()) {
+ for (size_t i = 0; i < points.size() - 1; ++i) {
+ points_string.append(points[i].ToString());
+ points_string.append(", ");
+ }
+ points_string.append(points.back().ToString());
+ }
+ return points_string;
+}
+
+std::vector<Point> ParsePointsFromString(const std::string& points_string) {
+ std::vector<Point> points;
+ if (points_string.empty())
+ return points;
+
+ const auto& tokens =
+ base::SplitString(points_string, base::kWhitespaceASCII,
+ base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ if (tokens.size() < 3 || tokens.size() % 3 != 0) {
+ LOG(ERROR) << "Malformed points string: " << points_string;
+ return points;
+ }
+
+ std::vector<float> float_tokens;
+ float_tokens.reserve(tokens.size());
+ for (const auto& token : tokens) {
+ double float_token;
+ if (!base::StringToDouble(token, &float_token)) {
+ LOG(ERROR) << "Unable to convert token=" << token
+ << " to double from points string: " << points_string;
+ return points;
+ }
+ float_tokens.push_back(float_token);
+ }
+
+ points.reserve(float_tokens.size() / 3);
+ for (size_t i = 0; i < float_tokens.size(); i += 3) {
+ points.push_back(
+ Point(float_tokens[i + 0], float_tokens[i + 1], float_tokens[i + 2]));
+ }
+
+ return points;
+}
+
+} // namespace media
diff --git a/chromium/media/audio/point.h b/chromium/media/audio/point.h
new file mode 100644
index 00000000000..d215a599119
--- /dev/null
+++ b/chromium/media/audio/point.h
@@ -0,0 +1,31 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_POINT_H_
+#define MEDIA_AUDIO_POINT_H_
+
+#include <string>
+#include <vector>
+
+#include "media/base/media_export.h"
+#include "ui/gfx/geometry/point3_f.h"
+
+namespace media {
+
+using Point = gfx::Point3F;
+
+// Returns a vector of points parsed from a whitespace-separated string
+// formatted as: "x1 y1 z1 ... zn yn zn" for n points.
+//
+// Returns an empty vector if |points_string| is empty or isn't parseable.
+MEDIA_EXPORT std::vector<Point> ParsePointsFromString(
+ const std::string& points_string);
+
+// Returns |points| as a human-readable string. (Not necessarily in the format
+// required by ParsePointsFromString).
+MEDIA_EXPORT std::string PointsToString(const std::vector<Point>& points);
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_POINT_H_
diff --git a/chromium/media/audio/point_unittest.cc b/chromium/media/audio/point_unittest.cc
new file mode 100644
index 00000000000..98aec649306
--- /dev/null
+++ b/chromium/media/audio/point_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+
+#include "media/audio/point.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace {
+
+TEST(PointTest, PointsToString) {
+ std::vector<Point> points(1, Point(1, 0, 0.01f));
+ points.push_back(Point(0, 2, 0.02f));
+ EXPECT_EQ("1.000000,0.000000,0.010000, 0.000000,2.000000,0.020000",
+ PointsToString(points));
+
+ EXPECT_EQ("", PointsToString(std::vector<Point>()));
+}
+
+TEST(PointTest, ParsePointString) {
+ const std::vector<Point> expected_empty;
+ EXPECT_EQ(expected_empty, ParsePointsFromString(""));
+ EXPECT_EQ(expected_empty, ParsePointsFromString("0 0 a"));
+ EXPECT_EQ(expected_empty, ParsePointsFromString("1 2"));
+ EXPECT_EQ(expected_empty, ParsePointsFromString("1 2 3 4"));
+
+ {
+ std::vector<Point> expected(1, Point(-0.02f, 0, 0));
+ expected.push_back(Point(0.02f, 0, 0));
+ EXPECT_EQ(expected, ParsePointsFromString("-0.02 0 0 0.02 0 0"));
+ }
+ {
+ std::vector<Point> expected(1, Point(1, 2, 3));
+ EXPECT_EQ(expected, ParsePointsFromString("1 2 3"));
+ }
+}
+
+} // namespace
+} // namespace media
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index c752a9e28bd..50a322f81e7 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -113,7 +113,7 @@ void AudioManagerPulse::GetAudioDeviceNames(
// Prepend the default device if the list is not empty.
if (!device_names->empty()) {
device_names->push_front(
- AudioDeviceName(AudioManagerBase::kDefaultDeviceName,
+ AudioDeviceName(AudioManager::GetDefaultDeviceName(),
AudioManagerBase::kDefaultDeviceId));
}
}
@@ -135,9 +135,9 @@ AudioParameters AudioManagerPulse::GetInputStreamParameters(
user_buffer_size : kDefaultInputBufferSize;
// TODO(xians): add support for querying native channel layout for pulse.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- GetNativeSampleRate(), 16, buffer_size);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO, GetNativeSampleRate(), 16,
+ buffer_size);
}
AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
@@ -189,9 +189,8 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
}
AudioOutputStream* AudioManagerPulse::MakeOutputStream(
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
index 5168f356a3c..0ba728d9751 100644
--- a/chromium/media/audio/pulse/pulse_output.cc
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -62,9 +62,10 @@ PulseAudioOutputStream::~PulseAudioOutputStream() {
bool PulseAudioOutputStream::Open() {
DCHECK(thread_checker_.CalledOnValidThread());
- return pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &pa_stream_,
- params_, device_id_, &StreamNotifyCallback,
- &StreamRequestCallback, this);
+ return pulse::CreateOutputStream(
+ &pa_mainloop_, &pa_context_, &pa_stream_, params_, device_id_,
+ AudioManager::GetGlobalAppName(), &StreamNotifyCallback,
+ &StreamRequestCallback, this);
}
void PulseAudioOutputStream::Reset() {
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
index ee37c03829b..72217079948 100644
--- a/chromium/media/audio/pulse/pulse_util.cc
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -156,7 +156,7 @@ int GetHardwareLatencyInBytes(pa_stream* stream,
DLOG(ERROR) << message; \
return false; \
} \
-} while(0)
+} while (0)
bool CreateInputStream(pa_threaded_mainloop* mainloop,
pa_context* context,
@@ -234,6 +234,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_stream** stream,
const AudioParameters& params,
const std::string& device_id,
+ const std::string& app_name,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data) {
@@ -244,7 +245,8 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
RETURN_ON_FAILURE(*mainloop, "Failed to create PulseAudio main loop.");
pa_mainloop_api* pa_mainloop_api = pa_threaded_mainloop_get_api(*mainloop);
- *context = pa_context_new(pa_mainloop_api, "Chromium");
+ *context = pa_context_new(pa_mainloop_api,
+ app_name.empty() ? "Chromium" : app_name.c_str());
RETURN_ON_FAILURE(*context, "Failed to create PulseAudio context.");
// A state callback must be set before calling pa_threaded_mainloop_lock() or
diff --git a/chromium/media/audio/pulse/pulse_util.h b/chromium/media/audio/pulse/pulse_util.h
index 791d6ade83a..94c4aa4a446 100644
--- a/chromium/media/audio/pulse/pulse_util.h
+++ b/chromium/media/audio/pulse/pulse_util.h
@@ -70,6 +70,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_stream** stream,
const AudioParameters& params,
const std::string& device_id,
+ const std::string& app_name,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data);
diff --git a/chromium/media/audio/sounds/sounds_manager.cc b/chromium/media/audio/sounds/sounds_manager.cc
index 72a7a3b15c1..17f0dd87436 100644
--- a/chromium/media/audio/sounds/sounds_manager.cc
+++ b/chromium/media/audio/sounds/sounds_manager.cc
@@ -29,9 +29,12 @@ class SoundsManagerImpl : public SoundsManager {
// SoundsManager implementation:
bool Initialize(SoundKey key, const base::StringPiece& data) override;
bool Play(SoundKey key) override;
+ bool Stop(SoundKey key) override;
base::TimeDelta GetDuration(SoundKey key) override;
private:
+ linked_ptr<AudioStreamHandler> GetHandler(SoundKey key);
+
base::hash_map<SoundKey, linked_ptr<AudioStreamHandler> > handlers_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
@@ -46,36 +49,56 @@ SoundsManagerImpl::~SoundsManagerImpl() { DCHECK(CalledOnValidThread()); }
bool SoundsManagerImpl::Initialize(SoundKey key,
const base::StringPiece& data) {
- if (handlers_.find(key) != handlers_.end() && handlers_[key]->IsInitialized())
+ linked_ptr<AudioStreamHandler> current_handler = GetHandler(key);
+ if (current_handler.get() && current_handler->IsInitialized())
return true;
- linked_ptr<AudioStreamHandler> handler(new AudioStreamHandler(data));
- if (!handler->IsInitialized()) {
+ linked_ptr<AudioStreamHandler> new_handler(new AudioStreamHandler(data));
+ if (!new_handler->IsInitialized()) {
LOG(WARNING) << "Can't initialize AudioStreamHandler for key=" << key;
return false;
}
- handlers_[key] = handler;
+ handlers_[key] = new_handler;
return true;
}
bool SoundsManagerImpl::Play(SoundKey key) {
DCHECK(CalledOnValidThread());
- if (handlers_.find(key) == handlers_.end() ||
- !handlers_[key]->IsInitialized()) {
+ linked_ptr<AudioStreamHandler> handler = GetHandler(key);
+ if (!handler.get())
return false;
- }
- return handlers_[key]->Play();
+ if (!handler->IsInitialized())
+ return false;
+ return handler->Play();
+}
+
+bool SoundsManagerImpl::Stop(SoundKey key) {
+ DCHECK(CalledOnValidThread());
+ linked_ptr<AudioStreamHandler> handler = GetHandler(key);
+ if (!handler.get())
+ return false;
+ if (!handler->IsInitialized())
+ return false;
+ handler->Stop();
+ return true;
}
base::TimeDelta SoundsManagerImpl::GetDuration(SoundKey key) {
DCHECK(CalledOnValidThread());
- if (handlers_.find(key) == handlers_.end() ||
- !handlers_[key]->IsInitialized()) {
+ linked_ptr<AudioStreamHandler> handler = GetHandler(key);
+ if (!handler.get())
+ return base::TimeDelta();
+ if (!handler->IsInitialized())
return base::TimeDelta();
- }
- const WavAudioHandler& wav_audio = handlers_[key]->wav_audio_handler();
+ const WavAudioHandler& wav_audio = handler->wav_audio_handler();
return wav_audio.GetDuration();
}
+linked_ptr<AudioStreamHandler> SoundsManagerImpl::GetHandler(SoundKey key) {
+ auto key_handler_pair_iter = handlers_.find(key);
+ return key_handler_pair_iter == handlers_.end() ?
+ linked_ptr<AudioStreamHandler>() : key_handler_pair_iter->second;
+}
+
} // namespace
SoundsManager::SoundsManager() {}
diff --git a/chromium/media/audio/sounds/sounds_manager.h b/chromium/media/audio/sounds/sounds_manager.h
index 71184da3522..7db164fa3be 100644
--- a/chromium/media/audio/sounds/sounds_manager.h
+++ b/chromium/media/audio/sounds/sounds_manager.h
@@ -42,6 +42,10 @@ class MEDIA_EXPORT SoundsManager : public base::NonThreadSafe {
// was not properly initialized.
virtual bool Play(SoundKey key) = 0;
+ // Stops playing sound identified by |key|, returns false if SoundsManager
+ // was not properly initialized.
+ virtual bool Stop(SoundKey key) = 0;
+
// Returns duration of the sound identified by |key|. If SoundsManager
// was not properly initialized or |key| was not registered, this
// method returns an empty value.
diff --git a/chromium/media/audio/sounds/sounds_manager_unittest.cc b/chromium/media/audio/sounds/sounds_manager_unittest.cc
index 9741e6ef029..018fb5c1f37 100644
--- a/chromium/media/audio/sounds/sounds_manager_unittest.cc
+++ b/chromium/media/audio/sounds/sounds_manager_unittest.cc
@@ -11,6 +11,7 @@
#include "base/run_loop.h"
#include "base/strings/string_piece.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/simple_sources.h"
#include "media/audio/sounds/audio_stream_handler.h"
#include "media/audio/sounds/sounds_manager.h"
#include "media/audio/sounds/test_data.h"
@@ -37,6 +38,11 @@ class SoundsManagerTest : public testing::Test {
AudioStreamHandler::SetObserverForTesting(observer);
}
+ void SetAudioSourceForTesting(
+ AudioOutputStream::AudioSourceCallback* source) {
+ AudioStreamHandler::SetAudioSourceForTesting(source);
+ }
+
private:
scoped_ptr<AudioManager> audio_manager_;
@@ -66,4 +72,43 @@ TEST_F(SoundsManagerTest, Play) {
SetObserverForTesting(NULL);
}
+TEST_F(SoundsManagerTest, Stop) {
+ ASSERT_TRUE(SoundsManager::Get());
+
+ base::RunLoop run_loop;
+ TestObserver observer(run_loop.QuitClosure());
+
+ SetObserverForTesting(&observer);
+
+ ASSERT_TRUE(SoundsManager::Get()->Initialize(
+ kTestAudioKey,
+ base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
+
+ // This overrides the wav data set by kTestAudioData and results in
+ // a never-ending sine wave being played.
+ const int kChannels = 1;
+ const double kFreq = 200;
+ const double kSampleFreq = 44100;
+ SineWaveAudioSource sine_source(kChannels, kFreq, kSampleFreq);
+ SetAudioSourceForTesting(&sine_source);
+
+ ASSERT_EQ(0, observer.num_play_requests());
+ ASSERT_EQ(0, observer.num_stop_requests());
+
+ ASSERT_TRUE(SoundsManager::Get()->Play(kTestAudioKey));
+ ASSERT_TRUE(SoundsManager::Get()->Stop(kTestAudioKey));
+ run_loop.Run();
+
+ ASSERT_EQ(1, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_stop_requests());
+
+ SetObserverForTesting(NULL);
+}
+
+TEST_F(SoundsManagerTest, Uninitialized) {
+ ASSERT_TRUE(SoundsManager::Get());
+ ASSERT_FALSE(SoundsManager::Get()->Play(kTestAudioKey));
+ ASSERT_FALSE(SoundsManager::Get()->Stop(kTestAudioKey));
+}
+
} // namespace media
diff --git a/chromium/media/audio/virtual_audio_input_stream.h b/chromium/media/audio/virtual_audio_input_stream.h
index d64ef359e96..1ab0da4b347 100644
--- a/chromium/media/audio/virtual_audio_input_stream.h
+++ b/chromium/media/audio/virtual_audio_input_stream.h
@@ -8,7 +8,6 @@
#include <map>
#include <set>
-#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
index 624c0ccd7ab..505007bae37 100644
--- a/chromium/media/audio/win/audio_device_listener_win.cc
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/system_monitor/system_monitor.h"
+#include "base/time/default_tick_clock.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/windows_version.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -30,29 +31,8 @@ static std::string RoleToString(ERole role) {
}
}
-static std::string GetDeviceId(EDataFlow flow,
- ERole role) {
- ScopedComPtr<IMMDevice> device =
- CoreAudioUtil::CreateDefaultDevice(flow, role);
- if (!device.get()) {
- // Most probable reason for ending up here is that all audio devices are
- // disabled or unplugged.
- DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
- return std::string();
- }
-
- AudioDeviceName device_name;
- HRESULT hr = CoreAudioUtil::GetDeviceName(device.get(), &device_name);
- if (FAILED(hr)) {
- DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
- return std::string();
- }
-
- return device_name.unique_id;
-}
-
AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
- : listener_cb_(listener_cb) {
+ : listener_cb_(listener_cb), tick_clock_(new base::DefaultTickClock()) {
CHECK(CoreAudioUtil::IsSupported());
ScopedComPtr<IMMDeviceEnumerator> device_enumerator(
@@ -68,13 +48,6 @@ AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
}
device_enumerator_ = device_enumerator;
-
- default_render_device_id_ = GetDeviceId(eRender, eConsole);
- default_capture_device_id_ = GetDeviceId(eCapture, eConsole);
- default_communications_render_device_id_ =
- GetDeviceId(eRender, eCommunications);
- default_communications_capture_device_id_ =
- GetDeviceId(eCapture, eCommunications);
}
AudioDeviceListenerWin::~AudioDeviceListenerWin() {
@@ -140,41 +113,36 @@ STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
return S_OK;
}
- // Grab a pointer to the appropriate ID member.
- // Note that there are three "?:"'s here to select the right ID.
- std::string* current_device_id =
- flow == eRender ? (
- role == eConsole ?
- &default_render_device_id_ :
- &default_communications_render_device_id_
- ) : (
- role == eConsole ?
- &default_capture_device_id_ :
- &default_communications_capture_device_id_
- );
-
// If no device is now available, |new_default_device_id| will be NULL.
std::string new_device_id;
if (new_default_device_id)
new_device_id = base::WideToUTF8(new_default_device_id);
+ // Only output device changes should be forwarded. Do not attempt to filter
+ // changes based on device id since some devices may not change their device
+ // id and instead trigger some internal flow change: http://crbug.com/506712
+ //
+ // We rate limit device changes to avoid a single device change causing back
+ // to back changes for eCommunications and eConsole; this is worth doing as
+ // it provides a substantially faster resumption of playback.
+ bool did_run_listener_cb = false;
+ const base::TimeTicks now = tick_clock_->NowTicks();
+ if (flow == eRender &&
+ now - last_device_change_time_ >
+ base::TimeDelta::FromMilliseconds(kDeviceChangeLimitMs)) {
+ last_device_change_time_ = now;
+ listener_cb_.Run();
+ did_run_listener_cb = true;
+ }
+
DVLOG(1) << "OnDefaultDeviceChanged() "
<< "new_default_device: "
- << (new_default_device_id ?
- CoreAudioUtil::GetFriendlyName(new_device_id) : "No device")
+ << (new_default_device_id
+ ? CoreAudioUtil::GetFriendlyName(new_device_id)
+ : "no device")
<< ", flow: " << FlowToString(flow)
- << ", role: " << RoleToString(role);
-
- // Only fire a state change event if the device has actually changed.
- // TODO(dalecurtis): This still seems to fire an extra event on my machine for
- // an unplug event (probably others too); e.g., we get two transitions to a
- // new default device id.
- if (new_device_id.compare(*current_device_id) == 0)
- return S_OK;
-
- // Store the new id in the member variable (that current_device_id points to).
- *current_device_id = new_device_id;
- listener_cb_.Run();
+ << ", role: " << RoleToString(role)
+ << ", notified manager: " << (did_run_listener_cb ? "Yes" : "No");
return S_OK;
}
diff --git a/chromium/media/audio/win/audio_device_listener_win.h b/chromium/media/audio/win/audio_device_listener_win.h
index 9c2ac4824a6..053afa64380 100644
--- a/chromium/media/audio/win/audio_device_listener_win.h
+++ b/chromium/media/audio/win/audio_device_listener_win.h
@@ -11,11 +11,16 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
#include "base/win/scoped_comptr.h"
#include "media/base/media_export.h"
using base::win::ScopedComPtr;
+namespace base {
+class TickClock;
+}
+
namespace media {
// IMMNotificationClient implementation for listening for default device changes
@@ -35,6 +40,9 @@ class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
private:
friend class AudioDeviceListenerWinTest;
+ // Minimum allowed time between device change notifications.
+ static const int kDeviceChangeLimitMs = 250;
+
// IMMNotificationClient implementation.
STDMETHOD_(ULONG, AddRef)() override;
STDMETHOD_(ULONG, Release)() override;
@@ -50,14 +58,15 @@ class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
base::Closure listener_cb_;
ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
- std::string default_render_device_id_;
- std::string default_capture_device_id_;
- std::string default_communications_render_device_id_;
- std::string default_communications_capture_device_id_;
+
+ // Used to rate limit device change events.
+ base::TimeTicks last_device_change_time_;
// AudioDeviceListenerWin must be constructed and destructed on one thread.
base::ThreadChecker thread_checker_;
+ scoped_ptr<base::TickClock> tick_clock_;
+
DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWin);
};
diff --git a/chromium/media/audio/win/audio_device_listener_win_unittest.cc b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
index 052b1bb8c39..4b78d934f10 100644
--- a/chromium/media/audio/win/audio_device_listener_win_unittest.cc
+++ b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind_helpers.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/test/simple_test_tick_clock.h"
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_unittest_util.h"
@@ -20,7 +21,6 @@ using base::win::ScopedCOMInitializer;
namespace media {
-static const char kNoDevice[] = "";
static const char kFirstTestDevice[] = "test_device_0";
static const char kSecondTestDevice[] = "test_device_1";
@@ -36,6 +36,15 @@ class AudioDeviceListenerWinTest : public testing::Test {
output_device_listener_.reset(new AudioDeviceListenerWin(base::Bind(
&AudioDeviceListenerWinTest::OnDeviceChange, base::Unretained(this))));
+
+ tick_clock_ = new base::SimpleTestTickClock();
+ tick_clock_->Advance(base::TimeDelta::FromSeconds(12345));
+ output_device_listener_->tick_clock_.reset(tick_clock_);
+ }
+
+ void AdvanceLastDeviceChangeTime() {
+ tick_clock_->Advance(base::TimeDelta::FromMilliseconds(
+ AudioDeviceListenerWin::kDeviceChangeLimitMs + 1));
}
// Simulate a device change where no output devices are available.
@@ -51,15 +60,13 @@ class AudioDeviceListenerWinTest : public testing::Test {
base::ASCIIToUTF16(new_device_id).c_str()) == S_OK;
}
- void SetOutputDeviceId(std::string new_device_id) {
- output_device_listener_->default_render_device_id_ = new_device_id;
- }
MOCK_METHOD0(OnDeviceChange, void());
private:
ScopedCOMInitializer com_init_;
scoped_ptr<AudioDeviceListenerWin> output_device_listener_;
+ base::SimpleTestTickClock* tick_clock_;
DISALLOW_COPY_AND_ASSIGN(AudioDeviceListenerWinTest);
};
@@ -68,16 +75,15 @@ class AudioDeviceListenerWinTest : public testing::Test {
TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
ABORT_AUDIO_TEST_IF_NOT(CoreAudioUtil::IsSupported());
- SetOutputDeviceId(kNoDevice);
EXPECT_CALL(*this, OnDeviceChange()).Times(1);
ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
testing::Mock::VerifyAndClear(this);
+ AdvanceLastDeviceChangeTime();
EXPECT_CALL(*this, OnDeviceChange()).Times(1);
ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
- // The second device event should be ignored since the device id has not
- // changed.
+ // The second device event should be ignored since it occurs too soon.
ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kSecondTestDevice));
}
@@ -86,15 +92,16 @@ TEST_F(AudioDeviceListenerWinTest, OutputDeviceChange) {
TEST_F(AudioDeviceListenerWinTest, NullOutputDeviceChange) {
ABORT_AUDIO_TEST_IF_NOT(CoreAudioUtil::IsSupported());
- SetOutputDeviceId(kNoDevice);
- EXPECT_CALL(*this, OnDeviceChange()).Times(0);
+ EXPECT_CALL(*this, OnDeviceChange()).Times(1);
ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
testing::Mock::VerifyAndClear(this);
+ AdvanceLastDeviceChangeTime();
EXPECT_CALL(*this, OnDeviceChange()).Times(1);
ASSERT_TRUE(SimulateDefaultOutputDeviceChange(kFirstTestDevice));
testing::Mock::VerifyAndClear(this);
+ AdvanceLastDeviceChangeTime();
EXPECT_CALL(*this, OnDeviceChange()).Times(1);
ASSERT_TRUE(SimulateNullDefaultOutputDeviceChange());
}
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index 66792fcd985..2939522df50 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -7,6 +7,7 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/trace_event/trace_event.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
#include "media/audio/win/core_audio_util_win.h"
@@ -28,7 +29,6 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
packet_size_frames_(0),
packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
- effects_(params.effects()),
device_id_(device_id),
perf_count_to_100ns_units_(0.0),
ms_to_frame_count_(0.0),
@@ -293,13 +293,16 @@ void WASAPIAudioInputStream::Run() {
2 * packet_size_frames_ * frame_size_);
scoped_ptr<uint8[]> capture_buffer(new uint8[capture_buffer_size]);
- LARGE_INTEGER now_count;
+ LARGE_INTEGER now_count = {};
bool recording = true;
bool error = false;
double volume = GetVolume();
HANDLE wait_array[2] =
{ stop_capture_event_.Get(), audio_samples_ready_event_.Get() };
+ base::win::ScopedComPtr<IAudioClock> audio_clock;
+ audio_client_->GetService(__uuidof(IAudioClock), audio_clock.ReceiveVoid());
+
while (recording && !error) {
HRESULT hr = S_FALSE;
@@ -315,6 +318,7 @@ void WASAPIAudioInputStream::Run() {
break;
case WAIT_OBJECT_0 + 1:
{
+ TRACE_EVENT0("audio", "WASAPIAudioInputStream::Run_0");
// |audio_samples_ready_event_| has been set.
BYTE* data_ptr = NULL;
UINT32 num_frames_to_read = 0;
@@ -335,6 +339,16 @@ void WASAPIAudioInputStream::Run() {
continue;
}
+ if (audio_clock) {
+ // The reported timestamp from GetBuffer is not as reliable as the
+ // clock from the client. We've seen timestamps reported for
+ // USB audio devices, be off by several days. Furthermore we've
+ // seen them jump back in time every 2 seconds or so.
+ audio_clock->GetPosition(
+ &device_position, &first_audio_frame_timestamp);
+ }
+
+
if (num_frames_to_read != 0) {
size_t pos = buffer_frame_index * frame_size_;
size_t num_bytes = num_frames_to_read * frame_size_;
@@ -359,7 +373,9 @@ void WASAPIAudioInputStream::Run() {
// first audio frame in the packet and B is the extra delay
// contained in any stored data. Unit is in audio frames.
QueryPerformanceCounter(&now_count);
- double audio_delay_frames =
+ // first_audio_frame_timestamp will be 0 if we didn't get a timestamp.
+ double audio_delay_frames = first_audio_frame_timestamp == 0 ?
+ num_frames_to_read :
((perf_count_to_100ns_units_ * now_count.QuadPart -
first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ +
buffer_frame_index - num_frames_to_read;
@@ -387,12 +403,21 @@ void WASAPIAudioInputStream::Run() {
// using the current packet size. The stored section will be used
// either in the next while-loop iteration or in the next
// capture event.
+ // TODO(tommi): If this data will be used in the next capture
+ // event, we will report incorrect delay estimates because
+ // we'll use the one for the captured data that time around
+ // (i.e. in the future).
memmove(&capture_buffer[0],
&capture_buffer[packet_size_bytes_],
(buffer_frame_index - packet_size_frames_) * frame_size_);
+ DCHECK_GE(buffer_frame_index, packet_size_frames_);
buffer_frame_index -= packet_size_frames_;
- delay_frames -= packet_size_frames_;
+ if (delay_frames > packet_size_frames_) {
+ delay_frames -= packet_size_frames_;
+ } else {
+ delay_frames = 0;
+ }
}
}
break;
@@ -433,47 +458,22 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
- if (effects_ & AudioParameters::DUCKING) {
- // Ducking has been requested and it is only supported for the default
- // communication device. So, let's open up the communication device and
- // see if the ID of that device matches the requested ID.
- // We consider a kDefaultDeviceId as well as an explicit device id match,
- // to be valid matches.
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device_.Receive());
+ } else if (device_id_ == AudioManagerBase::kCommunicationsDeviceId) {
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
endpoint_device_.Receive());
- if (endpoint_device_.get() &&
- device_id_ != AudioManagerBase::kDefaultDeviceId) {
- base::win::ScopedCoMem<WCHAR> communications_id;
- endpoint_device_->GetId(&communications_id);
- if (device_id_ !=
- base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
- DLOG(WARNING) << "Ducking has been requested for a non-default device."
- "Not supported.";
- // We can't honor the requested effect flag, so turn it off and
- // continue. We'll check this flag later to see if we've actually
- // opened up the communications device, so it's important that it
- // reflects the active state.
- effects_ &= ~AudioParameters::DUCKING;
- endpoint_device_.Release(); // Fall back on code below.
- }
- }
- }
-
- if (!endpoint_device_.get()) {
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device_.Receive());
- } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
- endpoint_device_.Receive());
- } else {
- hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
- }
+ } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device_.Receive());
+ } else {
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
}
if (FAILED(hr))
@@ -571,8 +571,7 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
flags = AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
} else {
- flags =
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
+ flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_NOPERSIST;
}
// Initialize the audio stream between the client and the device.
@@ -587,7 +586,8 @@ HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
0, // hnsBufferDuration
0,
&format_,
- (effects_ & AudioParameters::DUCKING) ? &kCommunicationsSessionId : NULL);
+ device_id_ == AudioManagerBase::kCommunicationsDeviceId ?
+ &kCommunicationsSessionId : nullptr);
if (FAILED(hr))
return hr;
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index f88b8dd9e85..f88c614a777 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -147,11 +147,6 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
- // A copy of the supplied AudioParameter's |effects|. If ducking was
- // specified (desired device=communications) but we ended up not being
- // able to open the communications device, this flag will be cleared.
- int effects_;
-
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
// device role and is not a valid ID as such.
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 902b03f15f3..5b8f46e551c 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -65,7 +65,8 @@ class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) override {
- EXPECT_NE(hardware_delay_bytes, 0u);
+ EXPECT_GE(hardware_delay_bytes, 0u);
+ EXPECT_LT(hardware_delay_bytes, 0xFFFFu); // Arbitrarily picked.
num_received_audio_frames_ += src->frames();
data_event_.Signal();
}
@@ -193,11 +194,10 @@ class AudioInputStreamWrapper {
private:
AudioInputStream* CreateInputStream() {
+ AudioParameters params = default_params_;
+ params.set_frames_per_buffer(frames_per_buffer_);
AudioInputStream* ais = audio_man_->MakeAudioInputStream(
- AudioParameters(format(), default_params_.channel_layout(),
- sample_rate(), bits_per_sample(), frames_per_buffer_,
- default_params_.effects()),
- AudioManagerBase::kDefaultDeviceId);
+ params, AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
return ais;
}
@@ -366,7 +366,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
// We use 10ms packets and will run the test until ten packets are received.
// All should contain valid packets of the same size and a valid delay
// estimate.
- EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), _, _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
@@ -386,7 +386,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), _, _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
@@ -402,7 +402,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), _, _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index 494d1b1f9fe..829d18fdbab 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -72,6 +72,12 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
+ // The empty string is used to indicate a default device and the
+ // |device_role_| member controls whether that's the default or default
+ // communications device.
+ DCHECK_NE(device_id_, AudioManagerBase::kDefaultDeviceId);
+ DCHECK_NE(device_id_, AudioManagerBase::kCommunicationsDeviceId);
+
DVLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
@@ -140,8 +146,7 @@ bool WASAPIAudioOutputStream::Open() {
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
- if (device_id_.empty() ||
- CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
+ if (device_id_.empty()) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
communications_device = (device_role_ == eCommunications);
} else {
@@ -467,69 +472,88 @@ bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
}
// Check if there is enough available space to fit the packet size
- // specified by the client, wait until a future callback.
+ // specified by the client. If not, wait until a future callback.
if (num_available_frames < packet_size_frames_)
return true;
- // Grab all available space in the rendering endpoint buffer
- // into which the client can write a data packet.
- hr = audio_render_client_->GetBuffer(packet_size_frames_,
- &audio_data);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to use rendering audio buffer: "
- << std::hex << hr;
- return false;
- }
+ // Derive the number of packets we need to get from the client to fill up the
+ // available area in the endpoint buffer. Well-behaved (> Vista) clients and
+ // exclusive mode streams should generally have a |num_packets| value of 1.
+ //
+ // Vista clients are not able to maintain reliable callbacks, so the endpoint
+ // buffer may exhaust itself such that back-to-back callbacks are occasionally
+ // necessary to avoid glitches. In such cases we have no choice but to issue
+ // back-to-back reads and pray that the browser side has enough data cached or
+ // that the render can fulfill the read before we glitch anyways.
+ //
+ // API documentation does not guarantee that even on Win7+ clients we won't
+ // need to fill more than a period size worth of buffers; but in practice this
+ // appears to be infrequent.
+ //
+ // See http://crbug.com/524947.
+ const size_t num_packets = num_available_frames / packet_size_frames_;
+ for (size_t n = 0; n < num_packets; ++n) {
+ // Grab all available space in the rendering endpoint buffer
+ // into which the client can write a data packet.
+ hr = audio_render_client_->GetBuffer(packet_size_frames_,
+ &audio_data);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to use rendering audio buffer: "
+ << std::hex << hr;
+ return false;
+ }
+
+ // Derive the audio delay which corresponds to the delay between
+ // a render event and the time when the first audio sample in a
+ // packet is played out through the speaker. This delay value
+ // can typically be utilized by an acoustic echo-control (AEC)
+ // unit at the render side.
+ UINT64 position = 0;
+ uint32 audio_delay_bytes = 0;
+ hr = audio_clock_->GetPosition(&position, NULL);
+ if (SUCCEEDED(hr)) {
+ // Stream position of the sample that is currently playing
+ // through the speaker.
+ double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
+ (static_cast<double>(position) / device_frequency);
+
+ // Stream position of the last sample written to the endpoint
+ // buffer. Note that, the packet we are about to receive in
+ // the upcoming callback is also included.
+ size_t pos_last_sample_written_frames =
+ num_written_frames_ + packet_size_frames_;
+
+ // Derive the actual delay value which will be fed to the
+ // render client using the OnMoreData() callback.
+ audio_delay_bytes = (pos_last_sample_written_frames -
+ pos_sample_playing_frames) * format_.Format.nBlockAlign;
+ }
- // Derive the audio delay which corresponds to the delay between
- // a render event and the time when the first audio sample in a
- // packet is played out through the speaker. This delay value
- // can typically be utilized by an acoustic echo-control (AEC)
- // unit at the render side.
- UINT64 position = 0;
- uint32 audio_delay_bytes = 0;
- hr = audio_clock_->GetPosition(&position, NULL);
- if (SUCCEEDED(hr)) {
- // Stream position of the sample that is currently playing
- // through the speaker.
- double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
- (static_cast<double>(position) / device_frequency);
-
- // Stream position of the last sample written to the endpoint
- // buffer. Note that, the packet we are about to receive in
- // the upcoming callback is also included.
- size_t pos_last_sample_written_frames =
- num_written_frames_ + packet_size_frames_;
-
- // Derive the actual delay value which will be fed to the
- // render client using the OnMoreData() callback.
- audio_delay_bytes = (pos_last_sample_written_frames -
- pos_sample_playing_frames) * format_.Format.nBlockAlign;
+ // Read a data packet from the registered client source and
+ // deliver a delay estimate in the same callback to the client.
+
+ int frames_filled = source_->OnMoreData(
+ audio_bus_.get(), audio_delay_bytes);
+ uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
+ DCHECK_LE(num_filled_bytes, packet_size_bytes_);
+
+ // Note: If this ever changes to output raw float the data must be
+ // clipped and sanitized since it may come from an untrusted
+ // source such as NaCl.
+ const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
+ audio_bus_->Scale(volume_);
+ audio_bus_->ToInterleaved(
+ frames_filled, bytes_per_sample, audio_data);
+
+ // Release the buffer space acquired in the GetBuffer() call.
+ // Render silence if we were not able to fill up the buffer totally.
+ DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
+ AUDCLNT_BUFFERFLAGS_SILENT : 0;
+ audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
+
+ num_written_frames_ += packet_size_frames_;
}
- // Read a data packet from the registered client source and
- // deliver a delay estimate in the same callback to the client.
-
- int frames_filled = source_->OnMoreData(
- audio_bus_.get(), audio_delay_bytes);
- uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
- DCHECK_LE(num_filled_bytes, packet_size_bytes_);
-
- // Note: If this ever changes to output raw float the data must be
- // clipped and sanitized since it may come from an untrusted
- // source such as NaCl.
- const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
- audio_bus_->Scale(volume_);
- audio_bus_->ToInterleaved(
- frames_filled, bytes_per_sample, audio_data);
-
- // Release the buffer space acquired in the GetBuffer() call.
- // Render silence if we were not able to fill up the buffer totally.
- DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
- AUDCLNT_BUFFERFLAGS_SILENT : 0;
- audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
-
- num_written_frames_ += packet_size_frames_;
return true;
}
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 9d612b33b25..a62e470ad3f 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -173,10 +173,10 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
void StopThread();
// Contains the thread ID of the creating thread.
- base::PlatformThreadId creating_thread_id_;
+ const base::PlatformThreadId creating_thread_id_;
// Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
+ AudioManagerWin* const manager_;
// Rendering is driven by this thread (which has no message loop).
// All OnMoreData() callbacks will be called from this thread.
@@ -208,12 +208,12 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
const std::string device_id_;
// Defines the role that the system has assigned to an audio endpoint device.
- ERole device_role_;
+ const ERole device_role_;
// The sharing mode for the connection.
// Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
// where AUDCLNT_SHAREMODE_SHARED is the default.
- AUDCLNT_SHAREMODE share_mode_;
+ const AUDCLNT_SHAREMODE share_mode_;
// Counts the number of audio frames written to the endpoint buffer.
UINT64 num_written_frames_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 348543ce932..fae02bd3e03 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -16,6 +16,7 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_unittest_util.h"
#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/win/audio_low_latency_output_win.h"
@@ -164,7 +165,7 @@ class AudioOutputStreamWrapper {
bits_per_sample_(kBitsPerSample) {
AudioParameters preferred_params;
EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &preferred_params)));
+ AudioManagerBase::kDefaultDeviceId, true, &preferred_params)));
channel_layout_ = preferred_params.channel_layout();
sample_rate_ = preferred_params.sample_rate();
samples_per_packet_ = preferred_params.frames_per_buffer();
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index 7da916d3f07..70e6c1bf7c3 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -283,10 +283,15 @@ void AudioManagerWin::GetAudioDeviceNamesImpl(
GetOutputDeviceNamesWin(device_names);
}
- // Always add default device parameters as first element.
if (!device_names->empty()) {
AudioDeviceName name;
- name.device_name = AudioManagerBase::kDefaultDeviceName;
+ if (enumeration_type() == kMMDeviceEnumeration) {
+ name.device_name = AudioManager::GetCommunicationsDeviceName();
+ name.unique_id = AudioManagerBase::kCommunicationsDeviceId;
+ device_names->push_front(name);
+ }
+ // Always add default device parameters as first element.
+ name.device_name = AudioManager::GetDefaultDeviceName();
name.unique_id = AudioManagerBase::kDefaultDeviceId;
device_names->push_front(name);
}
@@ -312,17 +317,14 @@ AudioParameters AudioManagerWin::GetInputStreamParameters(
if (FAILED(hr) || !parameters.IsValid()) {
// Windows Wave implementation is being used.
- parameters = AudioParameters(
- AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 48000, 16,
- kFallbackBufferSize, AudioParameters::NO_EFFECTS);
+ parameters =
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO, 48000, 16, kFallbackBufferSize);
}
int user_buffer_size = GetUserBufferSize();
- if (user_buffer_size) {
- parameters.Reset(parameters.format(), parameters.channel_layout(),
- parameters.channels(), parameters.sample_rate(),
- parameters.bits_per_sample(), user_buffer_size);
- }
+ if (user_buffer_size)
+ parameters.set_frames_per_buffer(user_buffer_size);
return parameters;
}
@@ -377,11 +379,12 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
// Pass an empty string to indicate that we want the default device
// since we consistently only check for an empty string in
// WASAPIAudioOutputStream.
+ bool communications = device_id == AudioManagerBase::kCommunicationsDeviceId;
return new WASAPIAudioOutputStream(this,
- device_id == AudioManagerBase::kDefaultDeviceId ?
+ communications || device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id,
params,
- params.effects() & AudioParameters::DUCKING ? eCommunications : eConsole);
+ communications ? eCommunications : eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -511,9 +514,10 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
if (user_buffer_size)
buffer_size = user_buffer_size;
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, bits_per_sample, buffer_size, effects);
+ AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ sample_rate, bits_per_sample, buffer_size);
+ params.set_effects(effects);
+ return params;
}
AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index ce1793e0c28..1ce01911eee 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -423,7 +423,26 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
std::string CoreAudioUtil::GetMatchingOutputDeviceID(
const std::string& input_device_id) {
- ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
+ // Special handling for the default communications device.
+ // We always treat the configured communications devices, as a pair.
+ // If we didn't do that and the user has e.g. configured a mic of a headset
+ // as the default comms input device and a different device (not the speakers
+ // of the headset) as the default comms output device, then we would otherwise
+ // here pick the headset as the matched output device. That's technically
+ // correct, but the user experience would be that any audio played out to
+ // the matched device, would get ducked since it's not the default comms
+ // device. So here, we go with the user's configuration.
+ if (input_device_id == AudioManagerBase::kCommunicationsDeviceId)
+ return AudioManagerBase::kCommunicationsDeviceId;
+
+ ScopedComPtr<IMMDevice> input_device;
+ if (input_device_id.empty() ||
+ input_device_id == AudioManagerBase::kDefaultDeviceId) {
+ input_device = CreateDefaultDevice(eCapture, eConsole);
+ } else {
+ input_device = CreateDevice(input_device_id);
+ }
+
if (!input_device.get())
return std::string();
@@ -711,31 +730,6 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
return hr;
}
-HRESULT CoreAudioUtil::GetPreferredAudioParameters(
- EDataFlow data_flow, ERole role, AudioParameters* params) {
- DCHECK(IsSupported());
- ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
- if (!client.get()) {
- // Map NULL-pointer to new error code which can be different from the
- // actual error code. The exact value is not important here.
- return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
- }
-
- HRESULT hr = GetPreferredAudioParameters(client.get(), params);
- if (FAILED(hr))
- return hr;
-
- if (role == eCommunications) {
- // Raise the 'DUCKING' flag for default communication devices.
- *params = AudioParameters(params->format(), params->channel_layout(),
- params->channels(), params->sample_rate(), params->bits_per_sample(),
- params->frames_per_buffer(),
- params->effects() | AudioParameters::DUCKING);
- }
-
- return hr;
-}
-
HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
AudioParameters* params) {
@@ -748,6 +742,9 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
} else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
DCHECK(!is_output_device);
device = CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
+ } else if (device_id == AudioManagerBase::kCommunicationsDeviceId) {
+ device = CoreAudioUtil::CreateDefaultDevice(
+ is_output_device ? eRender : eCapture, eCommunications);
} else {
device = CreateDevice(device_id);
}
@@ -775,23 +772,11 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(const std::string& device_id,
// TODO(dalecurtis): Old code rewrote != 1 channels to stereo, do we still
// need to do the same thing?
if (params->channels() != 1) {
- params->Reset(params->format(), CHANNEL_LAYOUT_STEREO, 2,
+ params->Reset(params->format(), CHANNEL_LAYOUT_STEREO,
params->sample_rate(), params->bits_per_sample(),
params->frames_per_buffer());
}
- ScopedComPtr<IMMDevice> communications_device(
- CreateDefaultDevice(eCapture, eCommunications));
- if (communications_device &&
- GetDeviceID(communications_device.get()) == GetDeviceID(device.get())) {
- // Raise the 'DUCKING' flag for default communication devices.
- *params =
- AudioParameters(params->format(), params->channel_layout(),
- params->channels(), params->sample_rate(),
- params->bits_per_sample(), params->frames_per_buffer(),
- params->effects() | AudioParameters::DUCKING);
- }
-
return hr;
}
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index d91d18c43f8..9110272c891 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -174,9 +174,6 @@ class MEDIA_EXPORT CoreAudioUtil {
// there are no preferred settings for an exclusive mode stream.
static HRESULT GetPreferredAudioParameters(IAudioClient* client,
AudioParameters* params);
- static HRESULT GetPreferredAudioParameters(EDataFlow data_flow,
- ERole role,
- AudioParameters* params);
static HRESULT GetPreferredAudioParameters(const std::string& device_id,
bool is_output_device,
AudioParameters* params);
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index f337849eafb..e1351e4d481 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -8,6 +8,7 @@
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_com_initializer.h"
#include "base/win/scoped_handle.h"
+#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_unittest_util.h"
#include "media/audio/win/core_audio_util_win.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -255,8 +256,8 @@ TEST_F(CoreAudioUtilWinTest, IsChannelLayoutSupported) {
// means that it is possible to initialize a shared mode stream with the
// particular channel layout.
AudioParameters mix_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(eRender, eConsole,
- &mix_params);
+ HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
+ AudioManagerBase::kDefaultDeviceId, true, &mix_params);
EXPECT_TRUE(SUCCEEDED(hr));
EXPECT_TRUE(mix_params.IsValid());
EXPECT_TRUE(CoreAudioUtil::IsChannelLayoutSupported(
diff --git a/chromium/media/audio_unittests.isolate b/chromium/media/audio_unittests.isolate
index d91c6360b48..8baf2df6a3b 100644
--- a/chromium/media/audio_unittests.isolate
+++ b/chromium/media/audio_unittests.isolate
@@ -48,7 +48,6 @@
'variables': {
'files': [
'../testing/test_env.py',
- '<(PRODUCT_DIR)/audio_unittests<(EXECUTABLE_SUFFIX)',
],
},
}],
diff --git a/chromium/media/base/BUILD.gn b/chromium/media/base/BUILD.gn
index 098b6e5563d..3565553d752 100644
--- a/chromium/media/base/BUILD.gn
+++ b/chromium/media/base/BUILD.gn
@@ -58,7 +58,6 @@ source_set("base") {
"bit_reader_core.h",
"bitstream_buffer.h",
"buffering_state.h",
- "buffers.h",
"byte_queue.cc",
"byte_queue.h",
"cdm_callback_promise.cc",
@@ -80,7 +79,6 @@ source_set("base") {
"channel_mixer.h",
"channel_mixing_matrix.cc",
"channel_mixing_matrix.h",
- "clock.h",
"data_buffer.cc",
"data_buffer.h",
"data_source.cc",
@@ -120,6 +118,8 @@ source_set("base") {
"media_log_event.h",
"media_permission.cc",
"media_permission.h",
+ "media_resources.cc",
+ "media_resources.h",
"media_switches.cc",
"media_switches.h",
"mime_util.cc",
@@ -130,6 +130,7 @@ source_set("base") {
"multi_channel_resampler.h",
"null_video_sink.cc",
"null_video_sink.h",
+ "output_device.h",
"pipeline.cc",
"pipeline.h",
"pipeline_status.h",
@@ -171,6 +172,7 @@ source_set("base") {
"time_delta_interpolator.cc",
"time_delta_interpolator.h",
"time_source.h",
+ "timestamp_constants.h",
"user_input_monitor.cc",
"user_input_monitor.h",
"video_capture_types.cc",
@@ -190,6 +192,8 @@ source_set("base") {
"video_renderer.cc",
"video_renderer.h",
"video_rotation.h",
+ "video_types.cc",
+ "video_types.h",
"video_util.cc",
"video_util.h",
"wall_clock_time_source.cc",
@@ -207,6 +211,9 @@ source_set("base") {
]
libs = []
configs += [
+ # This target uses the ALLOCATOR_SHIM define.
+ "//base/allocator:allocator_shim_define",
+ "//build/config:precompiled_headers",
"//media:media_config",
"//media:media_implementation",
]
@@ -307,6 +314,8 @@ if (is_ios) {
"video_frame.h",
"video_frame_metadata.cc",
"video_frame_metadata.h",
+ "video_types.cc",
+ "video_types.h",
"video_util.cc",
"video_util.h",
"yuv_convert.cc",
@@ -329,6 +338,10 @@ source_set("test_support") {
"fake_audio_renderer_sink.h",
"fake_demuxer_stream.cc",
"fake_demuxer_stream.h",
+ "fake_media_resources.cc",
+ "fake_media_resources.h",
+ "fake_output_device.cc",
+ "fake_output_device.h",
"fake_text_track_stream.cc",
"fake_text_track_stream.h",
"gmock_callback_support.h",
@@ -338,6 +351,8 @@ source_set("test_support") {
"mock_demuxer_host.h",
"mock_filters.cc",
"mock_filters.h",
+ "mock_media_log.cc",
+ "mock_media_log.h",
"test_data_util.cc",
"test_data_util.h",
"test_helpers.cc",
@@ -394,6 +409,7 @@ source_set("unittests") {
"stream_parser_unittest.cc",
"text_ranges_unittest.cc",
"text_renderer_unittest.cc",
+ "time_delta_interpolator_unittest.cc",
"user_input_monitor_unittest.cc",
"vector_math_testing.h",
"vector_math_unittest.cc",
diff --git a/chromium/media/base/android/BUILD.gn b/chromium/media/base/android/BUILD.gn
index 7c87a7f50d6..78108a6509e 100644
--- a/chromium/media/base/android/BUILD.gn
+++ b/chromium/media/base/android/BUILD.gn
@@ -52,6 +52,10 @@ source_set("android") {
"media_resource_getter.h",
"media_source_player.cc",
"media_source_player.h",
+ "media_statistics.cc",
+ "media_statistics.h",
+ "media_task_runner.cc",
+ "media_task_runner.h",
"media_url_interceptor.h",
"video_decoder_job.cc",
"video_decoder_job.h",
@@ -83,6 +87,7 @@ source_set("unittests") {
"media_source_player_unittest.cc",
"test_data_factory.cc",
"test_data_factory.h",
+ "test_statistics.h",
]
deps = [
":android",
@@ -117,8 +122,8 @@ generate_jni("video_capture_jni_headers") {
java_cpp_enum("media_java_enums_srcjar") {
sources = [
- "//media/video/capture/android/video_capture_device_android.h",
- "//media/video/capture/video_capture_device.h",
+ "//media/capture/video/android/video_capture_device_android.h",
+ "//media/capture/video/video_capture_device.h",
]
outputs = [
"org/chromium/media/AndroidImageFormat.java",
diff --git a/chromium/media/base/android/access_unit_queue.cc b/chromium/media/base/android/access_unit_queue.cc
index e1973c0fa6c..151e4aa885f 100644
--- a/chromium/media/base/android/access_unit_queue.cc
+++ b/chromium/media/base/android/access_unit_queue.cc
@@ -39,7 +39,7 @@ void AccessUnitQueue::PushBack(const DemuxerData& data) {
for (size_t i = 0; i < data.access_units.size(); ++i) {
const AccessUnit& unit = data.access_units[i];
- // EOS must be the last unit in the chunk
+ // EOS must be the last unit in the chunk.
if (unit.is_end_of_stream) {
DCHECK(i == data.access_units.size() - 1);
}
@@ -164,7 +164,8 @@ AccessUnitQueue::Info AccessUnitQueue::GetInfo() const {
Info info;
base::AutoLock lock(lock_);
- info.length = GetUnconsumedAccessUnitLength();
+ GetUnconsumedAccessUnitLength(&info.length, &info.data_length);
+
info.has_eos = has_eos_;
info.front_unit = nullptr;
info.configs = nullptr;
@@ -186,14 +187,25 @@ void AccessUnitQueue::SetHistorySizeForTesting(size_t history_chunks_amount) {
history_chunks_amount_ = history_chunks_amount;
}
-int AccessUnitQueue::GetUnconsumedAccessUnitLength() const {
- int result = 0;
+void AccessUnitQueue::GetUnconsumedAccessUnitLength(int* total_length,
+ int* data_length) const {
+ *total_length = *data_length = 0;
+
DataChunkQueue::const_iterator chunk;
- for (chunk = current_chunk_; chunk != chunks_.end(); ++chunk)
- result += (*chunk)->access_units.size();
+ for (chunk = current_chunk_; chunk != chunks_.end(); ++chunk) {
+ size_t chunk_size = (*chunk)->access_units.size();
+ *total_length += chunk_size;
+ *data_length += chunk_size;
+
+ // Do not count configuration changes for |data_length|.
+ if (!(*chunk)->demuxer_configs.empty()) {
+ DCHECK((*chunk)->demuxer_configs.size() == 1);
+ --(*data_length);
+ }
+ }
- result -= index_in_chunk_;
- return result;
+ *total_length -= index_in_chunk_;
+ *data_length -= index_in_chunk_;
}
} // namespace media
diff --git a/chromium/media/base/android/access_unit_queue.h b/chromium/media/base/android/access_unit_queue.h
index 9fae42000f4..d6ea33d74a9 100644
--- a/chromium/media/base/android/access_unit_queue.h
+++ b/chromium/media/base/android/access_unit_queue.h
@@ -40,6 +40,9 @@ class AccessUnitQueue {
// Number of access units in the queue.
int length;
+ // Number of access units in the queue excluding config units.
+ int data_length;
+
// Whether End Of Stream has been added to the queue. Cleared by Flush().
bool has_eos;
@@ -75,9 +78,11 @@ class AccessUnitQueue {
void SetHistorySizeForTesting(size_t number_of_history_chunks);
private:
- // Returns the amount of access units between the current one and the end,
- // incuding current. Logically these are units that have not been consumed.
- int GetUnconsumedAccessUnitLength() const;
+ // Returns the total number of access units (total_length) and the number of
+ // units excluding configiration change requests (data_length). The number is
+ // calculated between the current one and the end, incuding the current.
+ // Logically these are units that have not been consumed.
+ void GetUnconsumedAccessUnitLength(int* total_length, int* data_length) const;
// The queue of data chunks. It owns the chunks.
typedef std::list<DemuxerData*> DataChunkQueue;
diff --git a/chromium/media/base/android/audio_decoder_job.cc b/chromium/media/base/android/audio_decoder_job.cc
index 4e6036c952b..4ef622a7e80 100644
--- a/chromium/media/base/android/audio_decoder_job.cc
+++ b/chromium/media/base/android/audio_decoder_job.cc
@@ -9,6 +9,7 @@
#include "base/threading/thread.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/audio_timestamp_helper.h"
+#include "media/base/timestamp_constants.h"
namespace {
@@ -95,29 +96,46 @@ void AudioDecoderJob::ResetTimestampHelper() {
void AudioDecoderJob::ReleaseOutputBuffer(
int output_buffer_index,
+ size_t offset,
size_t size,
bool render_output,
+ bool /* is_late_frame */,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) {
render_output = render_output && (size != 0u);
+ bool is_audio_underrun = false;
if (render_output) {
int64 head_position = (static_cast<AudioCodecBridge*>(
media_codec_bridge_.get()))->PlayOutputBuffer(
- output_buffer_index, size);
+ output_buffer_index, size, offset);
+
+ base::TimeTicks current_time = base::TimeTicks::Now();
+
size_t new_frames_count = size / bytes_per_frame_;
frame_count_ += new_frames_count;
audio_timestamp_helper_->AddFrames(new_frames_count);
int64 frames_to_play = frame_count_ - head_position;
DCHECK_GE(frames_to_play, 0);
+
+ const base::TimeDelta last_buffered =
+ audio_timestamp_helper_->GetTimestamp();
+
current_presentation_timestamp =
- audio_timestamp_helper_->GetTimestamp() -
+ last_buffered -
audio_timestamp_helper_->GetFrameDuration(frames_to_play);
+
+ // Potential audio underrun is considered a late frame for UMA.
+ is_audio_underrun = !next_frame_time_limit_.is_null() &&
+ next_frame_time_limit_ < current_time;
+
+ next_frame_time_limit_ =
+ current_time + (last_buffered - current_presentation_timestamp);
} else {
current_presentation_timestamp = kNoTimestamp();
}
media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, false);
- callback.Run(current_presentation_timestamp,
+ callback.Run(is_audio_underrun, current_presentation_timestamp,
audio_timestamp_helper_->GetTimestamp());
}
diff --git a/chromium/media/base/android/audio_decoder_job.h b/chromium/media/base/android/audio_decoder_job.h
index 0a7523fa778..f302a59942d 100644
--- a/chromium/media/base/android/audio_decoder_job.h
+++ b/chromium/media/base/android/audio_decoder_job.h
@@ -34,7 +34,6 @@ class AudioDecoderJob : public MediaDecoderJob {
// Sets the volume of the audio output.
void SetVolume(double volume);
- double volume() const { return volume_; }
// Sets the base timestamp for |audio_timestamp_helper_|.
void SetBaseTimestamp(base::TimeDelta base_timestamp);
@@ -43,8 +42,10 @@ class AudioDecoderJob : public MediaDecoderJob {
// MediaDecoderJob implementation.
void ReleaseOutputBuffer(
int output_buffer_index,
+ size_t offset,
size_t size,
bool render_output,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) override;
bool ComputeTimeToRender() const override;
@@ -79,6 +80,9 @@ class AudioDecoderJob : public MediaDecoderJob {
// Object to calculate the current audio timestamp for A/V sync.
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
+ // The time limit for the next frame to avoid underrun.
+ base::TimeTicks next_frame_time_limit_;
+
DISALLOW_COPY_AND_ASSIGN(AudioDecoderJob);
};
diff --git a/chromium/media/base/android/browser_cdm_factory_android.cc b/chromium/media/base/android/browser_cdm_factory_android.cc
index 82f11d1385f..7e6d9197a22 100644
--- a/chromium/media/base/android/browser_cdm_factory_android.cc
+++ b/chromium/media/base/android/browser_cdm_factory_android.cc
@@ -13,7 +13,7 @@
namespace media {
-scoped_ptr<BrowserCdm> BrowserCdmFactoryAndroid::CreateBrowserCdm(
+ScopedBrowserCdmPtr BrowserCdmFactoryAndroid::CreateBrowserCdm(
const std::string& key_system,
bool use_hw_secure_codecs,
const SessionMessageCB& session_message_cb,
@@ -23,16 +23,16 @@ scoped_ptr<BrowserCdm> BrowserCdmFactoryAndroid::CreateBrowserCdm(
const SessionExpirationUpdateCB& session_expiration_update_cb) {
if (!MediaDrmBridge::IsKeySystemSupported(key_system)) {
NOTREACHED() << "Unsupported key system: " << key_system;
- return scoped_ptr<BrowserCdm>();
+ return ScopedBrowserCdmPtr();
}
- scoped_ptr<MediaDrmBridge> cdm(
+ ScopedMediaDrmBridgePtr cdm(
MediaDrmBridge::Create(key_system, session_message_cb, session_closed_cb,
legacy_session_error_cb, session_keys_change_cb,
session_expiration_update_cb));
if (!cdm) {
NOTREACHED() << "MediaDrmBridge cannot be created for " << key_system;
- return scoped_ptr<BrowserCdm>();
+ return ScopedBrowserCdmPtr();
}
if (key_system == kWidevineKeySystem) {
@@ -41,7 +41,7 @@ scoped_ptr<BrowserCdm> BrowserCdmFactoryAndroid::CreateBrowserCdm(
: MediaDrmBridge::SECURITY_LEVEL_3;
if (!cdm->SetSecurityLevel(security_level)) {
DVLOG(1) << "failed to set security level " << security_level;
- return scoped_ptr<BrowserCdm>();
+ return ScopedBrowserCdmPtr();
}
} else {
// Assume other key systems require hardware-secure codecs and thus do not
@@ -50,7 +50,7 @@ scoped_ptr<BrowserCdm> BrowserCdmFactoryAndroid::CreateBrowserCdm(
NOTREACHED()
<< key_system
<< " may require use_video_overlay_for_embedded_encrypted_video";
- return scoped_ptr<BrowserCdm>();
+ return ScopedBrowserCdmPtr();
}
}
diff --git a/chromium/media/base/android/browser_cdm_factory_android.h b/chromium/media/base/android/browser_cdm_factory_android.h
index 71b7970cfea..df6ca19228f 100644
--- a/chromium/media/base/android/browser_cdm_factory_android.h
+++ b/chromium/media/base/android/browser_cdm_factory_android.h
@@ -16,7 +16,7 @@ class MEDIA_EXPORT BrowserCdmFactoryAndroid : public BrowserCdmFactory {
BrowserCdmFactoryAndroid() {}
~BrowserCdmFactoryAndroid() final {};
- scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ ScopedBrowserCdmPtr CreateBrowserCdm(
const std::string& key_system,
bool use_hw_secure_codecs,
const SessionMessageCB& session_message_cb,
diff --git a/chromium/media/base/android/demuxer_stream_player_params.cc b/chromium/media/base/android/demuxer_stream_player_params.cc
index ae656f8911c..6e86a7feeb5 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.cc
+++ b/chromium/media/base/android/demuxer_stream_player_params.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "media/base/android/demuxer_stream_player_params.h"
+#include <iomanip>
namespace media {
@@ -28,6 +29,23 @@ DemuxerData::~DemuxerData() {}
namespace {
+const char* AsString(DemuxerStream::Type stream_type) {
+ switch (stream_type) {
+ case DemuxerStream::UNKNOWN:
+ return "UNKNOWN";
+ case DemuxerStream::AUDIO:
+ return "AUDIO";
+ case DemuxerStream::VIDEO:
+ return "VIDEO";
+ case DemuxerStream::TEXT:
+ return "TEXT";
+ case DemuxerStream::NUM_TYPES:
+ return "NUM_TYPES";
+ }
+ NOTREACHED();
+ return nullptr; // crash early
+}
+
#undef RETURN_STRING
#define RETURN_STRING(x) \
case x: \
@@ -59,6 +77,7 @@ const char* AsString(VideoCodec codec) {
switch (codec) {
RETURN_STRING(kUnknownVideoCodec);
RETURN_STRING(kCodecH264);
+ RETURN_STRING(kCodecHEVC);
RETURN_STRING(kCodecVC1);
RETURN_STRING(kCodecMPEG2);
RETURN_STRING(kCodecMPEG4);
@@ -70,14 +89,33 @@ const char* AsString(VideoCodec codec) {
return nullptr; // crash early
}
+const char* AsString(DemuxerStream::Status status) {
+ switch (status) {
+ case DemuxerStream::kOk:
+ return "kOk";
+ case DemuxerStream::kAborted:
+ return "kAborted";
+ case DemuxerStream::kConfigChanged:
+ return "kConfigChanged";
+ }
+ NOTREACHED();
+ return nullptr; // crash early
+}
+
#undef RETURN_STRING
} // namespace (anonymous)
} // namespace media
+std::ostream& operator<<(std::ostream& os, media::DemuxerStream::Type type) {
+ os << media::AsString(type);
+ return os;
+}
+
std::ostream& operator<<(std::ostream& os, const media::AccessUnit& au) {
- os << "status:" << au.status << (au.is_end_of_stream ? " EOS" : "")
+ os << "status:" << media::AsString(au.status)
+ << (au.is_end_of_stream ? " EOS" : "")
<< (au.is_key_frame ? " KEY_FRAME" : "") << " pts:" << au.timestamp
<< " size:" << au.data.size();
return os;
@@ -96,7 +134,16 @@ std::ostream& operator<<(std::ostream& os, const media::DemuxerConfigs& conf) {
os << " audio:" << media::AsString(conf.audio_codec)
<< " channels:" << conf.audio_channels
<< " rate:" << conf.audio_sampling_rate
- << (conf.is_audio_encrypted ? " encrypted" : "");
+ << (conf.is_audio_encrypted ? " encrypted" : "")
+ << " delay (ns):" << conf.audio_codec_delay_ns
+ << " preroll (ns):" << conf.audio_seek_preroll_ns;
+
+ if (!conf.audio_extra_data.empty()) {
+ os << " extra:{" << std::hex;
+ for (uint8 byte : conf.audio_extra_data)
+ os << " " << std::setfill('0') << std::setw(2) << (int)byte;
+ os << "}" << std::dec;
+ }
}
if (conf.video_codec != media::kUnknownVideoCodec) {
diff --git a/chromium/media/base/android/demuxer_stream_player_params.h b/chromium/media/base/android/demuxer_stream_player_params.h
index e5e96f37a69..33d8976a2ca 100644
--- a/chromium/media/base/android/demuxer_stream_player_params.h
+++ b/chromium/media/base/android/demuxer_stream_player_params.h
@@ -69,6 +69,9 @@ struct MEDIA_EXPORT DemuxerData {
// For logging
MEDIA_EXPORT
+std::ostream& operator<<(std::ostream& os, media::DemuxerStream::Type type);
+
+MEDIA_EXPORT
std::ostream& operator<<(std::ostream& os, const media::AccessUnit& au);
MEDIA_EXPORT
diff --git a/chromium/media/base/android/media_codec_audio_decoder.cc b/chromium/media/base/android/media_codec_audio_decoder.cc
index 6bef03de322..4b8a0768b82 100644
--- a/chromium/media/base/android/media_codec_audio_decoder.cc
+++ b/chromium/media/base/android/media_codec_audio_decoder.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/logging.h"
#include "media/base/android/media_codec_bridge.h"
+#include "media/base/android/media_statistics.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/demuxer_stream.h"
@@ -21,17 +22,23 @@ namespace media {
MediaCodecAudioDecoder::MediaCodecAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& decoder_drained_cb,
const base::Closure& stop_done_cb,
+ const base::Closure& waiting_for_decryption_key_cb,
const base::Closure& error_cb,
const SetTimeCallback& update_current_time_cb)
- : MediaCodecDecoder(media_task_runner,
+ : MediaCodecDecoder("AudioDecoder",
+ media_task_runner,
+ frame_statistics,
request_data_cb,
starvation_cb,
+ decoder_drained_cb,
stop_done_cb,
- error_cb,
- "AudioDecoder"),
+ waiting_for_decryption_key_cb,
+ error_cb),
volume_(-1.0),
bytes_per_frame_(0),
output_sampling_rate_(0),
@@ -40,6 +47,7 @@ MediaCodecAudioDecoder::MediaCodecAudioDecoder(
}
MediaCodecAudioDecoder::~MediaCodecAudioDecoder() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << "AudioDecoder::~AudioDecoder()";
ReleaseDecoderResources();
}
@@ -55,8 +63,6 @@ bool MediaCodecAudioDecoder::HasStream() const {
}
void MediaCodecAudioDecoder::SetDemuxerConfigs(const DemuxerConfigs& configs) {
- DCHECK(media_task_runner_->BelongsToCurrentThread());
-
DVLOG(1) << class_name() << "::" << __FUNCTION__ << " " << configs;
configs_ = configs;
@@ -64,6 +70,21 @@ void MediaCodecAudioDecoder::SetDemuxerConfigs(const DemuxerConfigs& configs) {
output_sampling_rate_ = configs.audio_sampling_rate;
}
+bool MediaCodecAudioDecoder::IsContentEncrypted() const {
+ // Make sure SetDemuxerConfigs() as been called.
+ DCHECK(configs_.audio_codec != kUnknownAudioCodec);
+ return configs_.is_audio_encrypted;
+}
+
+void MediaCodecAudioDecoder::ReleaseDecoderResources() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << class_name() << "::" << __FUNCTION__;
+
+ DoEmergencyStop();
+
+ ReleaseMediaCodec();
+}
+
void MediaCodecAudioDecoder::Flush() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
@@ -81,7 +102,8 @@ void MediaCodecAudioDecoder::SetVolume(double volume) {
}
void MediaCodecAudioDecoder::SetBaseTimestamp(base::TimeDelta base_timestamp) {
- DCHECK(media_task_runner_->BelongsToCurrentThread());
+ // Called from Media thread and Decoder thread. When called from Media thread
+ // Decoder thread should not be running.
DVLOG(1) << __FUNCTION__ << " " << base_timestamp;
@@ -91,25 +113,35 @@ void MediaCodecAudioDecoder::SetBaseTimestamp(base::TimeDelta base_timestamp) {
}
bool MediaCodecAudioDecoder::IsCodecReconfigureNeeded(
- const DemuxerConfigs& curr,
const DemuxerConfigs& next) const {
- return curr.audio_codec != next.audio_codec ||
- curr.audio_channels != next.audio_channels ||
- curr.audio_sampling_rate != next.audio_sampling_rate ||
- next.is_audio_encrypted != next.is_audio_encrypted ||
- curr.audio_extra_data.size() != next.audio_extra_data.size() ||
- !std::equal(curr.audio_extra_data.begin(), curr.audio_extra_data.end(),
+ if (always_reconfigure_for_tests_)
+ return true;
+
+ return configs_.audio_codec != next.audio_codec ||
+ configs_.audio_channels != next.audio_channels ||
+ configs_.audio_sampling_rate != next.audio_sampling_rate ||
+ configs_.is_audio_encrypted != next.is_audio_encrypted ||
+ configs_.audio_extra_data.size() != next.audio_extra_data.size() ||
+ !std::equal(configs_.audio_extra_data.begin(),
+ configs_.audio_extra_data.end(),
next.audio_extra_data.begin());
}
-MediaCodecDecoder::ConfigStatus MediaCodecAudioDecoder::ConfigureInternal() {
+MediaCodecDecoder::ConfigStatus MediaCodecAudioDecoder::ConfigureInternal(
+ jobject media_crypto) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << class_name() << "::" << __FUNCTION__;
+ if (configs_.audio_codec == kUnknownAudioCodec) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << " configuration parameters are required";
+ return kConfigFailure;
+ }
+
media_codec_bridge_.reset(AudioCodecBridge::Create(configs_.audio_codec));
if (!media_codec_bridge_)
- return CONFIG_FAILURE;
+ return kConfigFailure;
if (!(static_cast<AudioCodecBridge*>(media_codec_bridge_.get()))
->Start(
@@ -121,14 +153,15 @@ MediaCodecDecoder::ConfigStatus MediaCodecAudioDecoder::ConfigureInternal() {
configs_.audio_codec_delay_ns,
configs_.audio_seek_preroll_ns,
true,
- GetMediaCrypto().obj())) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << " failed";
+ media_crypto)) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << " failed: cannot start audio codec";
media_codec_bridge_.reset();
- return CONFIG_FAILURE;
+ return kConfigFailure;
}
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << " succeeded";
+ DVLOG(0) << class_name() << "::" << __FUNCTION__ << " succeeded";
SetVolumeInternal();
@@ -136,7 +169,10 @@ MediaCodecDecoder::ConfigStatus MediaCodecAudioDecoder::ConfigureInternal() {
frame_count_ = 0;
ResetTimestampHelper();
- return CONFIG_OK;
+ if (!codec_created_for_tests_cb_.is_null())
+ media_task_runner_->PostTask(FROM_HERE, codec_created_for_tests_cb_);
+
+ return kConfigOk;
}
void MediaCodecAudioDecoder::OnOutputFormatChanged() {
@@ -151,38 +187,80 @@ void MediaCodecAudioDecoder::OnOutputFormatChanged() {
}
void MediaCodecAudioDecoder::Render(int buffer_index,
+ size_t offset,
size_t size,
- bool render_output,
+ RenderMode render_mode,
base::TimeDelta pts,
bool eos_encountered) {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
- DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts;
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts << " "
+ << AsString(render_mode);
+
+ const bool do_play = (render_mode != kRenderSkip);
+
+ if (do_play) {
+ AudioCodecBridge* audio_codec =
+ static_cast<AudioCodecBridge*>(media_codec_bridge_.get());
+
+ DCHECK(audio_codec);
- render_output = render_output && (size != 0u);
+ const bool postpone = (render_mode == kRenderAfterPreroll);
- if (render_output) {
int64 head_position =
- (static_cast<AudioCodecBridge*>(media_codec_bridge_.get()))
- ->PlayOutputBuffer(buffer_index, size);
+ audio_codec->PlayOutputBuffer(buffer_index, size, offset, postpone);
+
+ base::TimeTicks current_time = base::TimeTicks::Now();
+
+ frame_statistics_->IncrementFrameCount();
+
+ // Reset the base timestamp if we have not started playing.
+ // SetBaseTimestamp() must be called before AddFrames() since it resets the
+ // internal frame count.
+ if (postpone && !frame_count_)
+ SetBaseTimestamp(pts);
size_t new_frames_count = size / bytes_per_frame_;
frame_count_ += new_frames_count;
audio_timestamp_helper_->AddFrames(new_frames_count);
- int64 frames_to_play = frame_count_ - head_position;
- DCHECK_GE(frames_to_play, 0);
-
- base::TimeDelta last_buffered = audio_timestamp_helper_->GetTimestamp();
- base::TimeDelta now_playing =
- last_buffered -
- audio_timestamp_helper_->GetFrameDuration(frames_to_play);
-
- DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts
- << " will play: [" << now_playing << "," << last_buffered << "]";
- media_task_runner_->PostTask(
- FROM_HERE,
- base::Bind(update_current_time_cb_, now_playing, last_buffered));
+ if (postpone) {
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts
+ << " POSTPONE";
+
+ // Let the player adjust the start time.
+ media_task_runner_->PostTask(
+ FROM_HERE, base::Bind(update_current_time_cb_, pts, pts, true));
+ } else {
+ int64 frames_to_play = frame_count_ - head_position;
+
+ DCHECK_GE(frames_to_play, 0) << class_name() << "::" << __FUNCTION__
+ << " pts:" << pts
+ << " frame_count_:" << frame_count_
+ << " head_position:" << head_position;
+
+ base::TimeDelta last_buffered = audio_timestamp_helper_->GetTimestamp();
+ base::TimeDelta now_playing =
+ last_buffered -
+ audio_timestamp_helper_->GetFrameDuration(frames_to_play);
+
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts
+ << " will play: [" << now_playing << "," << last_buffered << "]";
+
+ // Statistics
+ if (!next_frame_time_limit_.is_null() &&
+ next_frame_time_limit_ < current_time) {
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " LATE FRAME delay:"
+ << current_time - next_frame_time_limit_;
+ frame_statistics_->IncrementLateFrameCount();
+ }
+
+ next_frame_time_limit_ = current_time + (last_buffered - now_playing);
+
+ media_task_runner_->PostTask(
+ FROM_HERE, base::Bind(update_current_time_cb_, now_playing,
+ last_buffered, false));
+ }
}
media_codec_bridge_->ReleaseOutputBuffer(buffer_index, false);
diff --git a/chromium/media/base/android/media_codec_audio_decoder.h b/chromium/media/base/android/media_codec_audio_decoder.h
index 53bb664f601..3143d560fb0 100644
--- a/chromium/media/base/android/media_codec_audio_decoder.h
+++ b/chromium/media/base/android/media_codec_audio_decoder.h
@@ -19,9 +19,12 @@ class MediaCodecAudioDecoder : public MediaCodecDecoder {
// Called for each rendered frame.
MediaCodecAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& media_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& decoder_drained_cb,
const base::Closure& stop_done_cb,
+ const base::Closure& waiting_for_decryption_key_cb,
const base::Closure& error_cb,
const SetTimeCallback& update_current_time_cb);
~MediaCodecAudioDecoder() override;
@@ -30,6 +33,8 @@ class MediaCodecAudioDecoder : public MediaCodecDecoder {
bool HasStream() const override;
void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
+ bool IsContentEncrypted() const override;
+ void ReleaseDecoderResources() override;
void Flush() override;
// Sets the volume of the audio output.
@@ -39,13 +44,13 @@ class MediaCodecAudioDecoder : public MediaCodecDecoder {
void SetBaseTimestamp(base::TimeDelta base_timestamp);
protected:
- bool IsCodecReconfigureNeeded(const DemuxerConfigs& curr,
- const DemuxerConfigs& next) const override;
- ConfigStatus ConfigureInternal() override;
+ bool IsCodecReconfigureNeeded(const DemuxerConfigs& next) const override;
+ ConfigStatus ConfigureInternal(jobject media_crypto) override;
void OnOutputFormatChanged() override;
void Render(int buffer_index,
+ size_t offset,
size_t size,
- bool render_output,
+ RenderMode render_mode,
base::TimeDelta pts,
bool eos_encountered) override;
@@ -83,6 +88,9 @@ class MediaCodecAudioDecoder : public MediaCodecDecoder {
// Reports current playback time to the callee.
SetTimeCallback update_current_time_cb_;
+ // The time limit for the next frame to avoid underrun.
+ base::TimeTicks next_frame_time_limit_;
+
DISALLOW_COPY_AND_ASSIGN(MediaCodecAudioDecoder);
};
diff --git a/chromium/media/base/android/media_codec_bridge.cc b/chromium/media/base/android/media_codec_bridge.cc
index 3d22752159a..51df6d4ad2f 100644
--- a/chromium/media/base/android/media_codec_bridge.cc
+++ b/chromium/media/base/android/media_codec_bridge.cc
@@ -4,6 +4,7 @@
#include "media/base/android/media_codec_bridge.h"
+#include <algorithm>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
@@ -25,6 +26,14 @@ using base::android::ConvertUTF8ToJavaString;
using base::android::JavaIntArrayToIntVector;
using base::android::ScopedJavaLocalRef;
+#define RETURN_ON_ERROR(condition) \
+ do { \
+ if (!(condition)) { \
+ LOG(ERROR) << "Unable to parse AAC header: " #condition; \
+ return false; \
+ } \
+ } while (0)
+
namespace media {
enum {
@@ -52,6 +61,8 @@ static const std::string VideoCodecToAndroidMimeType(const VideoCodec& codec) {
switch (codec) {
case kCodecH264:
return "video/avc";
+ case kCodecHEVC:
+ return "video/hevc";
case kCodecVP8:
return "video/x-vnd.on2.vp8";
case kCodecVP9:
@@ -65,6 +76,8 @@ static const std::string CodecTypeToAndroidMimeType(const std::string& codec) {
// TODO(xhwang): Shall we handle more detailed strings like "mp4a.40.2"?
if (codec == "avc1")
return "video/avc";
+ if (codec == "hvc1")
+ return "video/hevc";
if (codec == "mp4a")
return "audio/mp4a-latm";
if (codec == "vp8" || codec == "vp8.0")
@@ -84,6 +97,8 @@ static const std::string AndroidMimeTypeToCodecType(const std::string& mime) {
return "mp4v";
if (mime == "video/avc")
return "avc1";
+ if (mime == "video/hevc")
+ return "hvc1";
if (mime == "video/x-vnd.on2.vp8")
return "vp8";
if (mime == "video/x-vnd.on2.vp9")
@@ -246,10 +261,14 @@ bool MediaCodecBridge::IsKnownUnaccelerated(const std::string& mime_type,
// devices while HW decoder video freezes and distortions are
// investigated - http://crbug.com/446974.
if (codec_name.length() > 0) {
- return (base::StartsWithASCII(codec_name, "OMX.google.", true) ||
- base::StartsWithASCII(codec_name, "OMX.SEC.", true) ||
- base::StartsWithASCII(codec_name, "OMX.MTK.", true) ||
- base::StartsWithASCII(codec_name, "OMX.Exynos.", true));
+ return (base::StartsWith(codec_name, "OMX.google.",
+ base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_name, "OMX.SEC.",
+ base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_name, "OMX.MTK.",
+ base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_name, "OMX.Exynos.",
+ base::CompareCase::SENSITIVE));
}
return true;
}
@@ -488,19 +507,25 @@ bool MediaCodecBridge::CopyFromOutputBuffer(int index,
size_t offset,
void* dst,
int dst_size) {
- JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jobject> j_buffer(
- Java_MediaCodecBridge_getOutputBuffer(env, j_media_codec_.obj(), index));
- void* src_data =
- reinterpret_cast<uint8*>(env->GetDirectBufferAddress(j_buffer.obj())) +
- offset;
- int src_capacity = env->GetDirectBufferCapacity(j_buffer.obj()) - offset;
+ void* src_data = nullptr;
+ int src_capacity = GetOutputBufferAddress(index, offset, &src_data);
if (src_capacity < dst_size)
return false;
memcpy(dst, src_data, dst_size);
return true;
}
+int MediaCodecBridge::GetOutputBufferAddress(int index,
+ size_t offset,
+ void** addr) {
+ JNIEnv* env = AttachCurrentThread();
+ ScopedJavaLocalRef<jobject> j_buffer(
+ Java_MediaCodecBridge_getOutputBuffer(env, j_media_codec_.obj(), index));
+ *addr = reinterpret_cast<uint8*>(
+ env->GetDirectBufferAddress(j_buffer.obj())) + offset;
+ return env->GetDirectBufferCapacity(j_buffer.obj()) - offset;
+}
+
bool MediaCodecBridge::FillInputBuffer(int index,
const uint8* data,
size_t size) {
@@ -625,18 +650,19 @@ bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
uint8 profile = 0;
uint8 frequency_index = 0;
uint8 channel_config = 0;
- if (!reader.ReadBits(5, &profile) ||
- !reader.ReadBits(4, &frequency_index)) {
- LOG(ERROR) << "Unable to parse AAC header";
- return false;
- }
- if (0xf == frequency_index && !reader.SkipBits(24)) {
- LOG(ERROR) << "Unable to parse AAC header";
- return false;
- }
- if (!reader.ReadBits(4, &channel_config)) {
- LOG(ERROR) << "Unable to parse AAC header";
- return false;
+ RETURN_ON_ERROR(reader.ReadBits(5, &profile));
+ RETURN_ON_ERROR(reader.ReadBits(4, &frequency_index));
+
+ if (0xf == frequency_index)
+ RETURN_ON_ERROR(reader.SkipBits(24));
+ RETURN_ON_ERROR(reader.ReadBits(4, &channel_config));
+
+ if (profile == 5 || profile == 29) {
+ // Read extension config.
+ RETURN_ON_ERROR(reader.ReadBits(4, &frequency_index));
+ if (frequency_index == 0xf)
+ RETURN_ON_ERROR(reader.SkipBits(24));
+ RETURN_ON_ERROR(reader.ReadBits(5, &profile));
}
if (profile < 1 || profile > 4 || frequency_index == 0xf ||
@@ -693,18 +719,23 @@ bool AudioCodecBridge::ConfigureMediaFormat(jobject j_format,
return true;
}
-int64 AudioCodecBridge::PlayOutputBuffer(int index, size_t size) {
+int64 AudioCodecBridge::PlayOutputBuffer(int index,
+ size_t size,
+ size_t offset,
+ bool postpone) {
DCHECK_LE(0, index);
int numBytes = base::checked_cast<int>(size);
+
+ void* buffer = nullptr;
+ int capacity = GetOutputBufferAddress(index, offset, &buffer);
+ numBytes = std::min(capacity, numBytes);
+ CHECK_GE(numBytes, 0);
+
JNIEnv* env = AttachCurrentThread();
- ScopedJavaLocalRef<jobject> buf =
- Java_MediaCodecBridge_getOutputBuffer(env, media_codec(), index);
- uint8* buffer = static_cast<uint8*>(env->GetDirectBufferAddress(buf.obj()));
-
- ScopedJavaLocalRef<jbyteArray> byte_array =
- base::android::ToJavaByteArray(env, buffer, numBytes);
- return Java_MediaCodecBridge_playOutputBuffer(
- env, media_codec(), byte_array.obj());
+ ScopedJavaLocalRef<jbyteArray> byte_array = base::android::ToJavaByteArray(
+ env, static_cast<uint8*>(buffer), numBytes);
+ return Java_MediaCodecBridge_playOutputBuffer(env, media_codec(),
+ byte_array.obj(), postpone);
}
void AudioCodecBridge::SetVolume(double volume) {
diff --git a/chromium/media/base/android/media_codec_bridge.h b/chromium/media/base/android/media_codec_bridge.h
index c5eef442e55..99ac6e8e999 100644
--- a/chromium/media/base/android/media_codec_bridge.h
+++ b/chromium/media/base/android/media_codec_bridge.h
@@ -6,6 +6,7 @@
#define MEDIA_BASE_ANDROID_MEDIA_CODEC_BRIDGE_H_
#include <jni.h>
+#include <set>
#include <string>
#include "base/android/scoped_java_ref.h"
@@ -201,6 +202,11 @@ class MEDIA_EXPORT MediaCodecBridge {
// started.
bool StartInternal() WARN_UNUSED_RESULT;
+ // Called to get the buffer address given the output buffer index and offset.
+ // This function returns the size of the output and |addr| is the pointer to
+ // the address to read.
+ int GetOutputBufferAddress(int index, size_t offset, void** addr);
+
jobject media_codec() { return j_media_codec_.obj(); }
MediaCodecDirection direction_;
@@ -232,10 +238,18 @@ class AudioCodecBridge : public MediaCodecBridge {
int64 codec_delay_ns, int64 seek_preroll_ns,
bool play_audio, jobject media_crypto) WARN_UNUSED_RESULT;
- // Play the output buffer. This call must be called after
- // DequeueOutputBuffer() and before ReleaseOutputBuffer. Returns the playback
- // head position expressed in frames.
- int64 PlayOutputBuffer(int index, size_t size);
+ // Plays the output buffer right away or save for later playback if |postpone|
+ // is set to true. This call must be called after DequeueOutputBuffer() and
+ // before ReleaseOutputBuffer. The data is extracted from the output buffers
+ // using |index|, |size| and |offset|. Returns the playback head position
+ // expressed in frames.
+ // When |postpone| is set to true, the next PlayOutputBuffer() should have
+ // postpone == false, and it will play two buffers: the postponed one and
+ // the one identified by |index|.
+ int64 PlayOutputBuffer(int index,
+ size_t size,
+ size_t offset,
+ bool postpone = false);
// Set the volume of the audio output.
void SetVolume(double volume);
diff --git a/chromium/media/base/android/media_codec_decoder.cc b/chromium/media/base/android/media_codec_decoder.cc
index 8652d5bf61c..c7b9e2a5085 100644
--- a/chromium/media/base/android/media_codec_decoder.cc
+++ b/chromium/media/base/android/media_codec_decoder.cc
@@ -32,24 +32,38 @@ const int kOutputBufferTimeout = 20;
}
MediaCodecDecoder::MediaCodecDecoder(
+ const char* decoder_thread_name,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& external_request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& decoder_drained_cb,
const base::Closure& stop_done_cb,
- const base::Closure& error_cb,
- const char* decoder_thread_name)
- : media_task_runner_(media_task_runner),
- decoder_thread_(decoder_thread_name),
+ const base::Closure& waiting_for_decryption_key_cb,
+ const base::Closure& error_cb)
+ : decoder_thread_(decoder_thread_name),
+ media_task_runner_(media_task_runner),
+ frame_statistics_(frame_statistics),
+ needs_reconfigure_(false),
+ drain_decoder_(false),
+ always_reconfigure_for_tests_(false),
external_request_data_cb_(external_request_data_cb),
starvation_cb_(starvation_cb),
+ decoder_drained_cb_(decoder_drained_cb),
stop_done_cb_(stop_done_cb),
+ waiting_for_decryption_key_cb_(waiting_for_decryption_key_cb),
error_cb_(error_cb),
state_(kStopped),
+ is_prepared_(false),
eos_enqueued_(false),
+ missing_key_reported_(false),
completed_(false),
last_frame_posted_(false),
is_data_request_in_progress_(false),
is_incoming_data_invalid_(false),
+#ifndef NDEBUG
+ verify_next_frame_is_key_(false),
+#endif
weak_factory_(this) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
@@ -57,33 +71,18 @@ MediaCodecDecoder::MediaCodecDecoder(
internal_error_cb_ =
base::Bind(&MediaCodecDecoder::OnCodecError, weak_factory_.GetWeakPtr());
+ internal_preroll_done_cb_ =
+ base::Bind(&MediaCodecDecoder::OnPrerollDone, weak_factory_.GetWeakPtr());
request_data_cb_ =
base::Bind(&MediaCodecDecoder::RequestData, weak_factory_.GetWeakPtr());
}
-MediaCodecDecoder::~MediaCodecDecoder() {
- DCHECK(media_task_runner_->BelongsToCurrentThread());
-
- DVLOG(1) << "Decoder::~Decoder()";
-
- // NB: ReleaseDecoderResources() is virtual
- ReleaseDecoderResources();
-}
+MediaCodecDecoder::~MediaCodecDecoder() {}
const char* MediaCodecDecoder::class_name() const {
return "Decoder";
}
-void MediaCodecDecoder::ReleaseDecoderResources() {
- DCHECK(media_task_runner_->BelongsToCurrentThread());
-
- DVLOG(1) << class_name() << "::" << __FUNCTION__;
-
- decoder_thread_.Stop(); // synchronous
- state_ = kStopped;
- media_codec_bridge_.reset();
-}
-
void MediaCodecDecoder::Flush() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
@@ -97,9 +96,23 @@ void MediaCodecDecoder::Flush() {
is_incoming_data_invalid_ = true;
eos_enqueued_ = false;
+ missing_key_reported_ = false;
completed_ = false;
+ drain_decoder_ = false;
au_queue_.Flush();
+ // |is_prepared_| is set on the decoder thread, it shouldn't be running now.
+ DCHECK(!decoder_thread_.IsRunning());
+ is_prepared_ = false;
+
+#ifndef NDEBUG
+ // We check and reset |verify_next_frame_is_key_| on Decoder thread.
+ // We have just DCHECKed that decoder thread is not running.
+
+ // For video the first frame after flush must be key frame.
+ verify_next_frame_is_key_ = true;
+#endif
+
if (media_codec_bridge_) {
// MediaCodecBridge::Reset() performs MediaCodecBridge.flush()
MediaCodecStatus flush_status = media_codec_bridge_->Reset();
@@ -116,14 +129,34 @@ void MediaCodecDecoder::ReleaseMediaCodec() {
DVLOG(1) << class_name() << "::" << __FUNCTION__;
+ DCHECK(!decoder_thread_.IsRunning());
+
media_codec_bridge_.reset();
+
+ // |is_prepared_| is set on the decoder thread, it shouldn't be running now.
+ is_prepared_ = false;
}
bool MediaCodecDecoder::IsPrefetchingOrPlaying() const {
DCHECK(media_task_runner_->BelongsToCurrentThread());
+ // Whether decoder needs to be stopped.
base::AutoLock lock(state_lock_);
- return state_ == kPrefetching || state_ == kRunning;
+ switch (state_) {
+ case kPrefetching:
+ case kPrefetched:
+ case kPrerolling:
+ case kPrerolled:
+ case kRunning:
+ return true;
+ case kStopped:
+ case kStopping:
+ case kInEmergencyStop:
+ case kError:
+ return false;
+ }
+ NOTREACHED();
+ return false;
}
bool MediaCodecDecoder::IsStopped() const {
@@ -138,14 +171,26 @@ bool MediaCodecDecoder::IsCompleted() const {
return completed_;
}
-base::android::ScopedJavaLocalRef<jobject> MediaCodecDecoder::GetMediaCrypto() {
- base::android::ScopedJavaLocalRef<jobject> media_crypto;
+bool MediaCodecDecoder::NotCompletedAndNeedsPreroll() const {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ return HasStream() && !completed_ &&
+ (!is_prepared_ || preroll_timestamp_ != base::TimeDelta());
+}
+
+void MediaCodecDecoder::SetPrerollTimestamp(base::TimeDelta preroll_timestamp) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": " << preroll_timestamp;
+
+ preroll_timestamp_ = preroll_timestamp;
+}
+
+void MediaCodecDecoder::SetNeedsReconfigure() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ DVLOG(1) << class_name() << "::" << __FUNCTION__;
- // TODO(timav): implement DRM.
- // drm_bridge_ is not implemented
- // if (drm_bridge_)
- // media_crypto = drm_bridge_->GetMediaCrypto();
- return media_crypto;
+ needs_reconfigure_ = true;
}
void MediaCodecDecoder::Prefetch(const base::Closure& prefetch_done_cb) {
@@ -161,43 +206,63 @@ void MediaCodecDecoder::Prefetch(const base::Closure& prefetch_done_cb) {
PrefetchNextChunk();
}
-MediaCodecDecoder::ConfigStatus MediaCodecDecoder::Configure() {
+MediaCodecDecoder::ConfigStatus MediaCodecDecoder::Configure(
+ jobject media_crypto) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << class_name() << "::" << __FUNCTION__;
if (GetState() == kError) {
DVLOG(0) << class_name() << "::" << __FUNCTION__ << ": wrong state kError";
- return CONFIG_FAILURE;
+ return kConfigFailure;
}
- // Here I assume that OnDemuxerConfigsAvailable won't come
- // in the middle of demuxer data.
+ if (needs_reconfigure_) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": needs reconfigure, deleting MediaCodec";
+ needs_reconfigure_ = false;
+ ReleaseMediaCodec();
+ }
if (media_codec_bridge_) {
DVLOG(1) << class_name() << "::" << __FUNCTION__
<< ": reconfiguration is not required, ignoring";
- return CONFIG_OK;
+ return kConfigOk;
}
- return ConfigureInternal();
+ // Read all |kConfigChanged| units preceding the data one.
+ AccessUnitQueue::Info au_info = au_queue_.GetInfo();
+ while (au_info.configs) {
+ SetDemuxerConfigs(*au_info.configs);
+ au_queue_.Advance();
+ au_info = au_queue_.GetInfo();
+ }
+
+ MediaCodecDecoder::ConfigStatus result = ConfigureInternal(media_crypto);
+
+#ifndef NDEBUG
+ // We check and reset |verify_next_frame_is_key_| on Decoder thread.
+ // This DCHECK ensures we won't need to lock this variable.
+ DCHECK(!decoder_thread_.IsRunning());
+
+ // For video the first frame after reconfiguration must be key frame.
+ if (result == kConfigOk)
+ verify_next_frame_is_key_ = true;
+#endif
+
+ return result;
}
-bool MediaCodecDecoder::Start(base::TimeDelta current_time) {
+bool MediaCodecDecoder::Preroll(const base::Closure& preroll_done_cb) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << class_name() << "::" << __FUNCTION__
- << " current_time:" << current_time;
+ << " preroll_timestamp:" << preroll_timestamp_;
DecoderState state = GetState();
- if (state == kRunning) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": already started";
- return true; // already started
- }
-
if (state != kPrefetched) {
DVLOG(0) << class_name() << "::" << __FUNCTION__ << ": wrong state "
- << AsString(state) << " ignoring";
+ << AsString(state) << ", ignoring";
return false;
}
@@ -209,19 +274,64 @@ bool MediaCodecDecoder::Start(base::TimeDelta current_time) {
DCHECK(!decoder_thread_.IsRunning());
+ preroll_done_cb_ = preroll_done_cb;
+
// We only synchronize video stream.
- // When audio is present, the |current_time| is audio time.
- SynchronizePTSWithTime(current_time);
+ DissociatePTSFromTime(); // associaton will happen after preroll is done.
last_frame_posted_ = false;
// Start the decoder thread
if (!decoder_thread_.Start()) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
<< ": cannot start decoder thread";
return false;
}
+ SetState(kPrerolling);
+
+ decoder_thread_.task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(&MediaCodecDecoder::ProcessNextFrame, base::Unretained(this)));
+
+ return true;
+}
+
+bool MediaCodecDecoder::Start(base::TimeDelta start_timestamp) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << " start_timestamp:" << start_timestamp;
+
+ DecoderState state = GetState();
+
+ if (state != kPrefetched && state != kPrerolled) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__ << ": wrong state "
+ << AsString(state) << ", ignoring";
+ return false;
+ }
+
+ if (!media_codec_bridge_) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << ": not configured, ignoring";
+ return false;
+ }
+
+ // We only synchronize video stream.
+ AssociateCurrentTimeWithPTS(start_timestamp);
+
+ DCHECK(preroll_timestamp_ == base::TimeDelta());
+
+ // Start the decoder thread
+ if (!decoder_thread_.IsRunning()) {
+ last_frame_posted_ = false;
+ if (!decoder_thread_.Start()) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": cannot start decoder thread";
+ return false;
+ }
+ }
+
SetState(kRunning);
decoder_thread_.task_runner()->PostTask(
@@ -242,12 +352,8 @@ void MediaCodecDecoder::SyncStop() {
return;
}
- // After this method returns, decoder thread will not be running.
-
- decoder_thread_.Stop(); // synchronous
- state_ = kStopped;
+ DoEmergencyStop();
- // Shall we move |delayed_buffers_| from VideoDecoder to Decoder class?
ReleaseDelayedBuffers();
}
@@ -265,12 +371,20 @@ void MediaCodecDecoder::RequestToStop() {
case kRunning:
SetState(kStopping);
break;
+ case kPrerolling:
+ case kPrerolled:
+ DCHECK(decoder_thread_.IsRunning());
+ // Synchronous stop.
+ decoder_thread_.Stop();
+ SetState(kStopped);
+ media_task_runner_->PostTask(FROM_HERE, stop_done_cb_);
+ break;
case kStopping:
- break; // ignore
case kStopped:
+ break; // ignore
case kPrefetching:
case kPrefetched:
- // There is nothing to wait for, we can sent nofigication right away.
+ // There is nothing to wait for, we can sent notification right away.
DCHECK(!decoder_thread_.IsRunning());
SetState(kStopped);
media_task_runner_->PostTask(FROM_HERE, stop_done_cb_);
@@ -281,48 +395,120 @@ void MediaCodecDecoder::RequestToStop() {
}
}
-void MediaCodecDecoder::OnLastFrameRendered(bool completed) {
+void MediaCodecDecoder::OnLastFrameRendered(bool eos_encountered) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << class_name() << "::" << __FUNCTION__
- << " completed:" << completed;
+ << " eos_encountered:" << eos_encountered;
decoder_thread_.Stop(); // synchronous
- state_ = kStopped;
- completed_ = completed;
+
+ SetState(kStopped);
+ completed_ = (eos_encountered && !drain_decoder_);
+
+ missing_key_reported_ = false;
+
+ // If the stream is completed during preroll we need to report it since
+ // another stream might be running and the player waits for two callbacks.
+ if (completed_ && !preroll_done_cb_.is_null()) {
+ preroll_timestamp_ = base::TimeDelta();
+ media_task_runner_->PostTask(FROM_HERE,
+ base::ResetAndReturn(&preroll_done_cb_));
+ }
+
+ if (eos_encountered && drain_decoder_) {
+ drain_decoder_ = false;
+ eos_enqueued_ = false;
+ ReleaseMediaCodec();
+ media_task_runner_->PostTask(FROM_HERE, decoder_drained_cb_);
+ }
media_task_runner_->PostTask(FROM_HERE, stop_done_cb_);
}
+void MediaCodecDecoder::OnPrerollDone() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << " state:" << AsString(GetState());
+
+ preroll_timestamp_ = base::TimeDelta();
+
+ // The state might be kStopping (?)
+ if (GetState() == kPrerolling)
+ SetState(kPrerolled);
+
+ if (!preroll_done_cb_.is_null())
+ base::ResetAndReturn(&preroll_done_cb_).Run();
+}
+
void MediaCodecDecoder::OnDemuxerDataAvailable(const DemuxerData& data) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
+ // If |data| contains an aborted data, the last AU will have kAborted status.
+ bool aborted_data =
+ !data.access_units.empty() &&
+ data.access_units.back().status == DemuxerStream::kAborted;
+
+#ifndef NDEBUG
const char* explain_if_skipped =
- is_incoming_data_invalid_ ? " skipped as invalid" : "";
+ is_incoming_data_invalid_ ? " skipped as invalid"
+ : (aborted_data ? " skipped as aborted" : "");
- DVLOG(2) << class_name() << "::" << __FUNCTION__ << explain_if_skipped
- << " #AUs:" << data.access_units.size()
- << " #Configs:" << data.demuxer_configs.size();
-#if !defined(NDEBUG)
for (const auto& unit : data.access_units)
DVLOG(2) << class_name() << "::" << __FUNCTION__ << explain_if_skipped
<< " au: " << unit;
+ for (const auto& configs : data.demuxer_configs)
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " configs: " << configs;
#endif
- if (!is_incoming_data_invalid_)
+ if (!is_incoming_data_invalid_ && !aborted_data)
au_queue_.PushBack(data);
is_incoming_data_invalid_ = false;
is_data_request_in_progress_ = false;
- if (state_ == kPrefetching)
+ // Do not request data if we got kAborted. There is no point to request the
+ // data after kAborted and before the OnDemuxerSeekDone.
+ if (GetState() == kPrefetching && !aborted_data)
PrefetchNextChunk();
}
+bool MediaCodecDecoder::IsPrerollingForTests() const {
+ // UI task runner.
+ return GetState() == kPrerolling;
+}
+
+void MediaCodecDecoder::SetAlwaysReconfigureForTests() {
+ // UI task runner.
+ always_reconfigure_for_tests_ = true;
+}
+
+void MediaCodecDecoder::SetCodecCreatedCallbackForTests(base::Closure cb) {
+ // UI task runner.
+ codec_created_for_tests_cb_ = cb;
+}
+
int MediaCodecDecoder::NumDelayedRenderTasks() const {
return 0;
}
+void MediaCodecDecoder::DoEmergencyStop() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << class_name() << "::" << __FUNCTION__;
+
+ // After this method returns, decoder thread will not be running.
+
+ // Set [kInEmergencyStop| state to block already posted ProcessNextFrame().
+ SetState(kInEmergencyStop);
+
+ decoder_thread_.Stop(); // synchronous
+
+ SetState(kStopped);
+
+ missing_key_reported_ = false;
+}
+
void MediaCodecDecoder::CheckLastFrame(bool eos_encountered,
bool has_delayed_tasks) {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
@@ -340,6 +526,14 @@ void MediaCodecDecoder::CheckLastFrame(bool eos_encountered,
void MediaCodecDecoder::OnCodecError() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
+ // Ignore codec errors from the moment surface is changed till the
+ // |media_codec_bridge_| is deleted.
+ if (needs_reconfigure_) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": needs reconfigure, ignoring";
+ return;
+ }
+
SetState(kError);
error_cb_.Run();
}
@@ -361,7 +555,8 @@ void MediaCodecDecoder::PrefetchNextChunk() {
AccessUnitQueue::Info au_info = au_queue_.GetInfo();
- if (eos_enqueued_ || au_info.length >= kPrefetchLimit || au_info.has_eos) {
+ if (eos_enqueued_ || au_info.data_length >= kPrefetchLimit ||
+ au_info.has_eos) {
// We are done prefetching
SetState(kPrefetched);
DVLOG(1) << class_name() << "::" << __FUNCTION__ << " posting PrefetchDone";
@@ -380,15 +575,14 @@ void MediaCodecDecoder::ProcessNextFrame() {
DecoderState state = GetState();
- if (state != kRunning && state != kStopping) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": not running";
+ if (state != kPrerolling && state != kRunning && state != kStopping) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": state: " << AsString(state) << " stopping frame processing";
return;
}
if (state == kStopping) {
if (NumDelayedRenderTasks() == 0 && !last_frame_posted_) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__
- << ": kStopping, posting OnLastFrameRendered";
media_task_runner_->PostTask(
FROM_HERE, base::Bind(&MediaCodecDecoder::OnLastFrameRendered,
weak_factory_.GetWeakPtr(), false));
@@ -397,23 +591,17 @@ void MediaCodecDecoder::ProcessNextFrame() {
// We can stop processing, the |au_queue_| and MediaCodec queues can freeze.
// We only need to let finish the delayed rendering tasks.
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << " kStopping, returning";
return;
}
- DCHECK(state == kRunning);
+ DCHECK(state == kPrerolling || state == kRunning);
if (!EnqueueInputBuffer())
return;
- bool eos_encountered = false;
- if (!DepleteOutputBufferQueue(&eos_encountered))
- return;
-
- if (eos_encountered) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__
- << " EOS dequeued, stopping frame processing";
+ if (!DepleteOutputBufferQueue())
return;
- }
// We need a small delay if we want to stop this thread by
// decoder_thread_.Stop() reliably.
@@ -435,7 +623,13 @@ bool MediaCodecDecoder::EnqueueInputBuffer() {
if (eos_enqueued_) {
DVLOG(1) << class_name() << "::" << __FUNCTION__
- << ": eos_enqueued, returning";
+ << ": EOS enqueued, returning";
+ return true; // Nothing to do
+ }
+
+ if (missing_key_reported_) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": NO KEY reported, returning";
return true; // Nothing to do
}
@@ -447,29 +641,31 @@ bool MediaCodecDecoder::EnqueueInputBuffer() {
return true; // Nothing to do
}
- // Get the next frame from the queue and the queue info
+ // Get the next frame from the queue. As we go, request more data and
+ // consume |kConfigChanged| units.
- AccessUnitQueue::Info au_info = au_queue_.GetInfo();
+ // |drain_decoder_| can be already set here if we could not dequeue the input
+ // buffer for it right away.
- // Request the data from Demuxer
- if (au_info.length <= kPlaybackLowLimit && !au_info.has_eos)
- media_task_runner_->PostTask(FROM_HERE, request_data_cb_);
+ AccessUnitQueue::Info au_info;
+ if (!drain_decoder_) {
+ au_info = AdvanceAccessUnitQueue(&drain_decoder_);
+ if (!au_info.length) {
+ // Report starvation and return, Start() will be called again later.
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": starvation detected";
+ media_task_runner_->PostTask(FROM_HERE, starvation_cb_);
+ return true;
+ }
- // Get the next frame from the queue
+ DCHECK(au_info.front_unit);
- if (!au_info.length) {
- // Report starvation and return, Start() will be called again later.
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": starvation detected";
- media_task_runner_->PostTask(FROM_HERE, starvation_cb_);
- return true;
- }
-
- if (au_info.configs) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__
- << ": received new configs, not implemented";
- // post an error for now?
- media_task_runner_->PostTask(FROM_HERE, internal_error_cb_);
- return false;
+#ifndef NDEBUG
+ if (verify_next_frame_is_key_) {
+ verify_next_frame_is_key_ = false;
+ VerifyUnitIsKeyFrame(au_info.front_unit);
+ }
+#endif
}
// Dequeue input buffer
@@ -490,6 +686,9 @@ bool MediaCodecDecoder::EnqueueInputBuffer() {
return false;
case MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER:
+ DVLOG(2)
+ << class_name() << "::" << __FUNCTION__
+ << ": DequeueInputBuffer returned MediaCodec.INFO_TRY_AGAIN_LATER.";
return true;
default:
@@ -501,25 +700,65 @@ bool MediaCodecDecoder::EnqueueInputBuffer() {
DCHECK_GE(index, 0);
const AccessUnit* unit = au_info.front_unit;
- DCHECK(unit);
- if (unit->is_end_of_stream) {
+ if (drain_decoder_ || unit->is_end_of_stream) {
DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": QueueEOS";
media_codec_bridge_->QueueEOS(index);
eos_enqueued_ = true;
return true;
}
- DVLOG(2) << class_name() << ":: QueueInputBuffer pts:" << unit->timestamp;
+ DCHECK(unit);
+ DCHECK(!unit->data.empty());
+
+ if (unit->key_id.empty() || unit->iv.empty()) {
+ DVLOG(2) << class_name() << "::" << __FUNCTION__
+ << ": QueueInputBuffer pts:" << unit->timestamp;
+
+ status = media_codec_bridge_->QueueInputBuffer(
+ index, &unit->data[0], unit->data.size(), unit->timestamp);
+ } else {
+ DVLOG(2) << class_name() << "::" << __FUNCTION__
+ << ": QueueSecureInputBuffer pts:" << unit->timestamp
+ << " key_id size:" << unit->key_id.size()
+ << " iv size:" << unit->iv.size()
+ << " subsamples size:" << unit->subsamples.size();
+
+ status = media_codec_bridge_->QueueSecureInputBuffer(
+ index, &unit->data[0], unit->data.size(),
+ reinterpret_cast<const uint8_t*>(&unit->key_id[0]), unit->key_id.size(),
+ reinterpret_cast<const uint8_t*>(&unit->iv[0]), unit->iv.size(),
+ unit->subsamples.empty() ? nullptr : &unit->subsamples[0],
+ unit->subsamples.size(), unit->timestamp);
+ }
- status = media_codec_bridge_->QueueInputBuffer(
- index, &unit->data[0], unit->data.size(), unit->timestamp);
+ switch (status) {
+ case MEDIA_CODEC_OK:
+ break;
- if (status == MEDIA_CODEC_ERROR) {
- DVLOG(0) << class_name() << "::" << __FUNCTION__
- << ": MEDIA_CODEC_ERROR: QueueInputBuffer failed";
- media_task_runner_->PostTask(FROM_HERE, internal_error_cb_);
- return false;
+ case MEDIA_CODEC_ERROR:
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << ": MEDIA_CODEC_ERROR: QueueInputBuffer failed";
+ media_task_runner_->PostTask(FROM_HERE, internal_error_cb_);
+ return false;
+
+ case MEDIA_CODEC_NO_KEY:
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": MEDIA_CODEC_NO_KEY";
+ media_task_runner_->PostTask(FROM_HERE, waiting_for_decryption_key_cb_);
+
+ // In response to the |waiting_for_decryption_key_cb_| the player will
+ // request to stop decoder. We need to keep running to properly perform
+ // the stop, but prevent enqueuing the same frame over and over again so
+ // we won't generate more |waiting_for_decryption_key_cb_|.
+ missing_key_reported_ = true;
+ return true;
+
+ default:
+ NOTREACHED() << class_name() << "::" << __FUNCTION__
+ << ": unexpected error code " << status;
+ media_task_runner_->PostTask(FROM_HERE, internal_error_cb_);
+ return false;
}
// Have successfully queued input buffer, go to next access unit.
@@ -527,8 +766,53 @@ bool MediaCodecDecoder::EnqueueInputBuffer() {
return true;
}
+AccessUnitQueue::Info MediaCodecDecoder::AdvanceAccessUnitQueue(
+ bool* drain_decoder) {
+ DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
+ DVLOG(2) << class_name() << "::" << __FUNCTION__;
+
+ // Retrieve access units from the |au_queue_| in a loop until we either get
+ // a non-config front unit or until the queue is empty.
+
+ DCHECK(drain_decoder != nullptr);
+
+ AccessUnitQueue::Info au_info;
+
+ do {
+ // Get current frame
+ au_info = au_queue_.GetInfo();
+
+ // Request the data from Demuxer
+ if (au_info.data_length <= kPlaybackLowLimit && !au_info.has_eos)
+ media_task_runner_->PostTask(FROM_HERE, request_data_cb_);
+
+ if (!au_info.length)
+ break; // Starvation
+
+ if (au_info.configs) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << ": received configs "
+ << (*au_info.configs);
+
+ // Compare the new and current configs.
+ if (IsCodecReconfigureNeeded(*au_info.configs)) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << ": reconfiguration and decoder drain required";
+ *drain_decoder = true;
+ }
+
+ // Replace the current configs.
+ SetDemuxerConfigs(*au_info.configs);
+
+ // Move to the next frame
+ au_queue_.Advance();
+ }
+ } while (au_info.configs);
+
+ return au_info;
+}
+
// Returns false if there was MediaCodec error.
-bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
+bool MediaCodecDecoder::DepleteOutputBufferQueue() {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DVLOG(2) << class_name() << "::" << __FUNCTION__;
@@ -538,6 +822,9 @@ bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
size_t size = 0;
base::TimeDelta pts;
MediaCodecStatus status;
+ bool eos_encountered = false;
+
+ RenderMode render_mode;
base::TimeDelta timeout =
base::TimeDelta::FromMilliseconds(kOutputBufferTimeout);
@@ -547,7 +834,8 @@ bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
// MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED or MEDIA_CODEC_OUTPUT_FORMAT_CHANGED.
do {
status = media_codec_bridge_->DequeueOutputBuffer(
- timeout, &buffer_index, &offset, &size, &pts, eos_encountered, nullptr);
+ timeout, &buffer_index, &offset, &size, &pts, &eos_encountered,
+ nullptr);
// Reset the timeout to 0 for the subsequent DequeueOutputBuffer() calls
// to quickly break the loop after we got all currently available buffers.
@@ -556,6 +844,8 @@ bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
switch (status) {
case MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED:
// Output buffers are replaced in MediaCodecBridge, nothing to do.
+ DVLOG(2) << class_name() << "::" << __FUNCTION__
+ << " MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED";
break;
case MEDIA_CODEC_OUTPUT_FORMAT_CHANGED:
@@ -565,12 +855,32 @@ bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
break;
case MEDIA_CODEC_OK:
- // We got the decoded frame
- Render(buffer_index, size, true, pts, *eos_encountered);
+ // We got the decoded frame.
+
+ is_prepared_ = true;
+
+ if (pts < preroll_timestamp_)
+ render_mode = kRenderSkip;
+ else if (GetState() == kPrerolling)
+ render_mode = kRenderAfterPreroll;
+ else
+ render_mode = kRenderNow;
+
+ Render(buffer_index, offset, size, render_mode, pts, eos_encountered);
+
+ if (render_mode == kRenderAfterPreroll) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << " pts " << pts
+ << " >= preroll timestamp " << preroll_timestamp_
+ << " preroll done, stopping frame processing";
+ media_task_runner_->PostTask(FROM_HERE, internal_preroll_done_cb_);
+ return false;
+ }
break;
case MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER:
// Nothing to do.
+ DVLOG(2) << class_name() << "::" << __FUNCTION__
+ << " MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER";
break;
case MEDIA_CODEC_ERROR:
@@ -585,9 +895,21 @@ bool MediaCodecDecoder::DepleteOutputBufferQueue(bool* eos_encountered) {
}
} while (status != MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER &&
- status != MEDIA_CODEC_ERROR && !*eos_encountered);
+ status != MEDIA_CODEC_ERROR && !eos_encountered);
+
+ if (eos_encountered) {
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << " EOS dequeued, stopping frame processing";
+ return false;
+ }
- return status != MEDIA_CODEC_ERROR;
+ if (status == MEDIA_CODEC_ERROR) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << " MediaCodec error, stopping frame processing";
+ return false;
+ }
+
+ return true;
}
MediaCodecDecoder::DecoderState MediaCodecDecoder::GetState() const {
@@ -596,7 +918,7 @@ MediaCodecDecoder::DecoderState MediaCodecDecoder::GetState() const {
}
void MediaCodecDecoder::SetState(DecoderState state) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << " " << state;
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << " " << AsString(state);
base::AutoLock lock(state_lock_);
state_ = state;
@@ -607,17 +929,28 @@ void MediaCodecDecoder::SetState(DecoderState state) {
case x: \
return #x;
+const char* MediaCodecDecoder::AsString(RenderMode render_mode) {
+ switch (render_mode) {
+ RETURN_STRING(kRenderSkip);
+ RETURN_STRING(kRenderAfterPreroll);
+ RETURN_STRING(kRenderNow);
+ }
+ return nullptr; // crash early
+}
+
const char* MediaCodecDecoder::AsString(DecoderState state) {
switch (state) {
RETURN_STRING(kStopped);
RETURN_STRING(kPrefetching);
RETURN_STRING(kPrefetched);
+ RETURN_STRING(kPrerolling);
+ RETURN_STRING(kPrerolled);
RETURN_STRING(kRunning);
RETURN_STRING(kStopping);
+ RETURN_STRING(kInEmergencyStop);
RETURN_STRING(kError);
- default:
- return "Unknown DecoderState";
}
+ return nullptr; // crash early
}
#undef RETURN_STRING
diff --git a/chromium/media/base/android/media_codec_decoder.h b/chromium/media/base/android/media_codec_decoder.h
index 189ebc3c582..8385d60573a 100644
--- a/chromium/media/base/android/media_codec_decoder.h
+++ b/chromium/media/base/android/media_codec_decoder.h
@@ -19,6 +19,7 @@
namespace media {
+struct FrameStatistics;
class MediaCodecBridge;
// The decoder for MediaCodecPlayer.
@@ -44,27 +45,81 @@ class MediaCodecBridge;
// v | Error recovery:
// [ Prefetched ] |
// | | (any state including Error)
-// | Start | |
+// | Configure and Start | |
// v | | ReleaseDecoderResources
// [ Running ] | v
-// | | [ Stopped ]
-// | RequestToStop |
-// v |
-// [ Stopping ] -------------------
-//
+// | | [ InEmergencyStop ]
+// | RequestToStop | |
+// v | |(decoder thread stopped)
+// [ Stopping ] ------------------- v
+// [ Stopped ]
//
// [ Stopped ] --------------------
// ^ |
// | Flush |
// ---------------------------
+// (any state except Error)
+// |
+// | SyncStop
+// v
+// [ InEmergencyStop ]
+// |
+// |(decoder thread stopped)
+// v
+// [ Stopped ]
+
+// Here is the workflow that is expected to be maintained by a caller, which is
+// MediaCodecPlayer currently.
+//
+// [ Stopped ]
+// |
+// | Prefetch
+// v
+// [ Prefetching ]
+// |
+// | (Enough data received)
+// v
+// [ Prefetched ]
+// |
+// | <---------- SetDemuxerConfigs (*)
+// |
+// | <---------- SetVideoSurface (**)
+// |
+// | Configure --------------------------------------------+
+// | |
+// v v
+// ( Config Succeeded ) ( Key frame required )
+// | |
+// | Start |
+// v |
+// [ Running ] ------------------------------+ |
+// | | |
+// | | |
+// | RequestToStop | SyncStop | SyncStop
+// | | |
+// [ Stopping ] | |
+// | | |
+// | ( Last frame rendered ) | |
+// | | |
+// | | |
+// v | |
+// [ Stopped ] <-----------------------------+-----------------+
+//
+//
+// (*) Demuxer configs is a precondition to Configure(), but MediaCodecPlayer
+// has stricter requirements and they are set before Prefetch().
+//
+// (**) VideoSurface is a precondition to video decoder Configure(), can be set
+// any time before Configure().
+
class MediaCodecDecoder {
public:
// The result of MediaCodec configuration, used by MediaCodecPlayer.
enum ConfigStatus {
- CONFIG_FAILURE = 0,
- CONFIG_OK,
- CONFIG_KEY_FRAME_REQUIRED,
+ kConfigFailure = 0,
+ kConfigOk,
+ kConfigKeyFrameRequired,
};
// The decoder reports current playback time to the MediaCodecPlayer.
@@ -74,35 +129,51 @@ class MediaCodecDecoder {
// playback the subsequent intervals overlap.
// For video both values are PTS of the corresponding frame, i.e. the interval
// has zero width.
- typedef base::Callback<void(base::TimeDelta, base::TimeDelta)>
+ // The third parameter means "postpone", it is set to true if the actual
+ // rendering will start in a later point in time. This only happens with
+ // audio after preroll. The MediaCodecPlayer might decide to update the
+ // current time but not pass it to the upper layer.
+ typedef base::Callback<void(base::TimeDelta, base::TimeDelta, bool)>
SetTimeCallback;
// MediaCodecDecoder constructor.
// Parameters:
+ // decoder_thread_name:
+ // The thread name to be passed to decoder thread constructor.
// media_task_runner:
// A task runner for the controlling thread. All public methods should be
// called on this thread, and callbacks are delivered on this thread.
// The MediaCodecPlayer uses a dedicated (Media) thread for this.
+ // frame_statistics:
+ // A pointer to FrameStatistics object which gathers playback quality
+ // related data.
// external_request_data_cb:
// Called periodically as the amount of internally stored data decreases.
// The receiver should call OnDemuxerDataAvailable() with more data.
// starvation_cb:
// Called when starvation is detected. The decoder state does not change.
// The player is supposed to stop and then prefetch the decoder.
+ // decoder_drained_cb:
+ // Called when decoder is drained for reconfiguration.
// stop_done_cb:
// Called when async stop request is completed.
+ // waiting_for_decryption_key_cb:
+ // Will be executed whenever the key needed to decrypt the stream is not
+ // available.
// error_cb:
// Called when a MediaCodec error occurred. If this happens, a player has
// to either call ReleaseDecoderResources() or destroy the decoder object.
- // decoder_thread_name:
- // The thread name to be passed to decoder thread constructor.
MediaCodecDecoder(
+ const char* decoder_thread_name,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& external_request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& decoder_drained_cb,
const base::Closure& stop_done_cb,
- const base::Closure& error_cb,
- const char* decoder_thread_name);
+ const base::Closure& waiting_for_decryption_key_cb,
+ const base::Closure& error_cb);
+
virtual ~MediaCodecDecoder();
virtual const char* class_name() const;
@@ -116,32 +187,45 @@ class MediaCodecDecoder {
// Stores configuration for the use of upcoming Configure()
virtual void SetDemuxerConfigs(const DemuxerConfigs& configs) = 0;
+ // Returns true if the DemuxerConfigs announce that content is encrypted and
+ // that MediaCrypto is required for configuration.
+ virtual bool IsContentEncrypted() const = 0;
+
// Stops decoder thread, releases the MediaCodecBridge and other resources.
- virtual void ReleaseDecoderResources();
+ virtual void ReleaseDecoderResources() = 0;
- // Flushes the MediaCodec and resets the AccessUnitQueue.
- // Decoder thread should not be running.
+ // Flushes the MediaCodec, after that resets the AccessUnitQueue and blocks
+ // the input. Decoder thread should not be running.
virtual void Flush();
- // Releases MediaCodecBridge.
- void ReleaseMediaCodec();
+ // Releases MediaCodecBridge and any related buffers or references.
+ virtual void ReleaseMediaCodec();
// Returns corresponding conditions.
bool IsPrefetchingOrPlaying() const;
bool IsStopped() const;
bool IsCompleted() const;
+ bool NotCompletedAndNeedsPreroll() const;
+
+ // Forces reconfiguraton on the next Configure().
+ void SetNeedsReconfigure();
- base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
+ // Sets preroll timestamp and requests preroll.
+ void SetPrerollTimestamp(base::TimeDelta preroll_ts);
// Starts prefetching: accumulates enough data in AccessUnitQueue.
// Decoder thread is not running.
void Prefetch(const base::Closure& prefetch_done_cb);
// Configures MediaCodec.
- ConfigStatus Configure();
+ ConfigStatus Configure(jobject media_crypto);
+
+ // Starts the decoder for prerolling. This method starts the decoder thread.
+ bool Preroll(const base::Closure& preroll_done_cb);
- // Starts the decoder thread and resumes the playback.
- bool Start(base::TimeDelta current_time);
+ // Starts the decoder after preroll is not needed, starting decoder thread
+ // if it has not started yet.
+ bool Start(base::TimeDelta start_timestamp);
// Stops the playback process synchronously. This method stops the decoder
// thread synchronously, and then releases all MediaCodec buffers.
@@ -153,24 +237,46 @@ class MediaCodecDecoder {
void RequestToStop();
// Notification posted when asynchronous stop is done or playback completed.
- void OnLastFrameRendered(bool completed);
+ void OnLastFrameRendered(bool eos_encountered);
+
+ // Notification posted when last prerolled frame has been returned to codec.
+ void OnPrerollDone();
// Puts the incoming data into AccessUnitQueue.
void OnDemuxerDataAvailable(const DemuxerData& data);
+ // For testing only.
+
+ // Returns true if the decoder is in kPrerolling state.
+ bool IsPrerollingForTests() const;
+
+ // Drains decoder and reconfigures for each |kConfigChanged|.
+ void SetAlwaysReconfigureForTests();
+
+ // Sets the notification to be called when MediaCodec is created.
+ void SetCodecCreatedCallbackForTests(base::Closure cb);
+
protected:
+ enum RenderMode {
+ kRenderSkip = 0,
+ kRenderAfterPreroll,
+ kRenderNow,
+ };
+
// Returns true if the new DemuxerConfigs requires MediaCodec
// reconfiguration.
- virtual bool IsCodecReconfigureNeeded(const DemuxerConfigs& curr,
- const DemuxerConfigs& next) const = 0;
+ virtual bool IsCodecReconfigureNeeded(const DemuxerConfigs& next) const = 0;
// Does the part of MediaCodecBridge configuration that is specific
// to audio or video.
- virtual ConfigStatus ConfigureInternal() = 0;
+ virtual ConfigStatus ConfigureInternal(jobject media_crypto) = 0;
// Associates PTS with device time so we can calculate delays.
// We use delays for video decoder only.
- virtual void SynchronizePTSWithTime(base::TimeDelta current_time) {}
+ virtual void AssociateCurrentTimeWithPTS(base::TimeDelta current_time) {}
+
+ // Invalidate delay calculation. We use delays for video decoder only.
+ virtual void DissociatePTSFromTime() {}
// Processes the change of the output format, varies by stream.
virtual void OnOutputFormatChanged() = 0;
@@ -178,45 +284,76 @@ class MediaCodecDecoder {
// Renders the decoded frame and releases output buffer, or posts
// a delayed task to do it at a later time,
virtual void Render(int buffer_index,
+ size_t offset,
size_t size,
- bool render_output,
+ RenderMode render_mode,
base::TimeDelta pts,
bool eos_encountered) = 0;
// Returns the number of delayed task (we might have them for video).
virtual int NumDelayedRenderTasks() const;
- // Releases output buffers that are dequeued and not released yet
- // because their rendering is delayed (video).
+ // Releases output buffers that are dequeued and not released yet (video).
virtual void ReleaseDelayedBuffers() {}
+#ifndef NDEBUG
+ // For video, checks that access unit is the key frame or stand-alone EOS.
+ virtual void VerifyUnitIsKeyFrame(const AccessUnit* unit) const {}
+#endif
+
// Helper methods.
+ // Synchroniously stop decoder thread.
+ void DoEmergencyStop();
+
+ // Returns true if we are in the process of sync stop.
+ bool InEmergencyStop() const { return GetState() == kInEmergencyStop; }
+
// Notifies the decoder if the frame is the last one.
void CheckLastFrame(bool eos_encountered, bool has_delayed_tasks);
+ const char* AsString(RenderMode render_mode);
+
// Protected data.
+ // We call MediaCodecBridge on this thread for both input and output buffers.
+ base::Thread decoder_thread_;
+
// Object for posting tasks on Media thread.
scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
+ // Statistics for UMA.
+ FrameStatistics* frame_statistics_;
+
// Controls Android MediaCodec
scoped_ptr<MediaCodecBridge> media_codec_bridge_;
- // We call MediaCodecBridge on this thread for both
- // input and output buffers.
- base::Thread decoder_thread_;
-
// The queue of access units.
AccessUnitQueue au_queue_;
+ // Flag forces reconfiguration even if |media_codec_bridge_| exists. Currently
+ // is set by video decoder when the video surface changes.
+ bool needs_reconfigure_;
+
+ // Flag forces to drain decoder in the process of dynamic reconfiguration.
+ bool drain_decoder_;
+
+ // For tests only. Forces to always reconfigure for |kConfigChanged| unit.
+ bool always_reconfigure_for_tests_;
+
+ // For tests only. Callback to be callned when MediaCodec is created.
+ base::Closure codec_created_for_tests_cb_;
+
private:
enum DecoderState {
kStopped = 0,
kPrefetching,
kPrefetched,
+ kPrerolling,
+ kPrerolled,
kRunning,
kStopping,
+ kInEmergencyStop,
kError,
};
@@ -239,10 +376,19 @@ class MediaCodecDecoder {
// Returns false if there was MediaCodec error.
bool EnqueueInputBuffer();
+ // Helper method for EnqueueInputBuffer.
+ // Gets the next data frame from the queue, requesting more data and saving
+ // configuration changes on the way. Sets |drain_decoder| to true of any of
+ // the configuration changes requires draining the decoder. Returns the Info
+ // pointing to the current data unit ot empty Info if it got past the end of
+ // the queue.
+ AccessUnitQueue::Info AdvanceAccessUnitQueue(bool* drain_decoder);
+
// Helper method for ProcessNextFrame.
// Pulls all currently available output frames and renders them.
- // Returns false if there was MediaCodec error.
- bool DepleteOutputBufferQueue(bool* eos_encountered);
+ // Returns true if we need to continue decoding process, i.e post next
+ // ProcessNextFrame method, and false if we need to stop decoding.
+ bool DepleteOutputBufferQueue();
DecoderState GetState() const;
void SetState(DecoderState state);
@@ -256,7 +402,10 @@ class MediaCodecDecoder {
// These notifications are called on corresponding conditions.
base::Closure prefetch_done_cb_;
base::Closure starvation_cb_;
+ base::Closure preroll_done_cb_;
+ base::Closure decoder_drained_cb_;
base::Closure stop_done_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
base::Closure error_cb_;
// Data request callback that is posted by decoder internally.
@@ -265,13 +414,26 @@ class MediaCodecDecoder {
// Callback used to post OnCodecError method.
base::Closure internal_error_cb_;
+ // Callback for posting OnPrerollDone method.
+ base::Closure internal_preroll_done_cb_;
+
// Internal state.
DecoderState state_;
mutable base::Lock state_lock_;
+ // Preroll timestamp is set if we need preroll and cleared after we done it.
+ base::TimeDelta preroll_timestamp_;
+
+ // Set to true when MediaCodec internal buffers are filled up.
+ bool is_prepared_;
+
// Flag is set when the EOS is enqueued into MediaCodec. Reset by Flush.
bool eos_enqueued_;
+ // Flag is set when NO_KEY error is received from QueueSecureInputBuffer.
+ // Reset after we stop.
+ bool missing_key_reported_;
+
// Flag is set when the EOS is received in MediaCodec output. Reset by Flush.
bool completed_;
@@ -284,6 +446,11 @@ class MediaCodecDecoder {
// Indicates whether the incoming data should be ignored.
bool is_incoming_data_invalid_;
+#ifndef NDEBUG
+ // When set, we check that the following video frame is the key frame.
+ bool verify_next_frame_is_key_;
+#endif
+
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaCodecDecoder> weak_factory_;
diff --git a/chromium/media/base/android/media_codec_decoder_unittest.cc b/chromium/media/base/android/media_codec_decoder_unittest.cc
index 7f8d81337dc..322407ba56f 100644
--- a/chromium/media/base/android/media_codec_decoder_unittest.cc
+++ b/chromium/media/base/android/media_codec_decoder_unittest.cc
@@ -9,7 +9,10 @@
#include "media/base/android/media_codec_audio_decoder.h"
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/media_codec_video_decoder.h"
+#include "media/base/android/media_statistics.h"
#include "media/base/android/test_data_factory.h"
+#include "media/base/android/test_statistics.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/android/surface_texture.h"
@@ -27,43 +30,65 @@ namespace media {
namespace {
const base::TimeDelta kDefaultTimeout = base::TimeDelta::FromMilliseconds(200);
-const base::TimeDelta kAudioFramePeriod = base::TimeDelta::FromMilliseconds(20);
+const base::TimeDelta kAudioFramePeriod =
+ base::TimeDelta::FromSecondsD(1024.0 / 44100); // 1024 samples @ 44100 Hz
const base::TimeDelta kVideoFramePeriod = base::TimeDelta::FromMilliseconds(20);
+// A helper function to calculate the expected number of frames.
+int GetFrameCount(base::TimeDelta duration, base::TimeDelta frame_period) {
+ // A chunk has 4 access units. The last unit timestamp must exceed the
+ // duration. Last chunk has 3 regular access units and one stand-alone EOS
+ // unit that we do not count.
+
+ // Number of time intervals to exceed duration.
+ int num_intervals = duration / frame_period + 1.0;
+
+ // To cover these intervals we need one extra unit at the beginning.
+ int num_units = num_intervals + 1;
+
+ // Number of 4-unit chunks that hold these units:
+ int num_chunks = (num_units + 3) / 4;
+
+ // Altogether these chunks hold 4*num_chunks units, but we do not count
+ // the last EOS as a frame.
+ return 4 * num_chunks - 1;
+}
+
class AudioFactory : public TestDataFactory {
public:
- AudioFactory(const base::TimeDelta& duration);
+ AudioFactory(base::TimeDelta duration);
DemuxerConfigs GetConfigs() const override;
protected:
- void ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) override;
+ void ModifyChunk(DemuxerData* chunk) override;
};
class VideoFactory : public TestDataFactory {
public:
- VideoFactory(const base::TimeDelta& duration);
+ VideoFactory(base::TimeDelta duration);
DemuxerConfigs GetConfigs() const override;
protected:
- void ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) override;
+ void ModifyChunk(DemuxerData* chunk) override;
};
-AudioFactory::AudioFactory(const base::TimeDelta& duration)
- : TestDataFactory("vorbis-packet-%d", duration, kAudioFramePeriod) {
+AudioFactory::AudioFactory(base::TimeDelta duration)
+ : TestDataFactory("aac-44100-packet-%d", duration, kAudioFramePeriod) {
}
DemuxerConfigs AudioFactory::GetConfigs() const {
- return TestDataFactory::CreateAudioConfigs(kCodecVorbis, duration_);
+ return TestDataFactory::CreateAudioConfigs(kCodecAAC, duration_);
}
-void AudioFactory::ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) {
- // Vorbis needs 4 extra bytes padding on Android to decode properly. Check
- // NuMediaExtractor.cpp in Android source code.
- uint8 padding[4] = {0xff, 0xff, 0xff, 0xff};
- unit->data.insert(unit->data.end(), padding, padding + 4);
+void AudioFactory::ModifyChunk(DemuxerData* chunk) {
+ DCHECK(chunk);
+ for (AccessUnit& unit : chunk->access_units) {
+ if (!unit.data.empty())
+ unit.is_key_frame = true;
+ }
}
-VideoFactory::VideoFactory(const base::TimeDelta& duration)
+VideoFactory::VideoFactory(base::TimeDelta duration)
: TestDataFactory("h264-320x180-frame-%d", duration, kVideoFramePeriod) {
}
@@ -72,7 +97,7 @@ DemuxerConfigs VideoFactory::GetConfigs() const {
gfx::Size(320, 180));
}
-void VideoFactory::ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) {
+void VideoFactory::ModifyChunk(DemuxerData* chunk) {
// The frames are taken from High profile and some are B-frames.
// The first 4 frames appear in the file in the following order:
//
@@ -82,42 +107,32 @@ void VideoFactory::ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) {
//
// I keep the last PTS to be 3 for simplicity.
- // Swap pts for second and third frames.
- if (index_in_chunk == 1) // second frame
- unit->timestamp += frame_period_;
- if (index_in_chunk == 2) // third frame
- unit->timestamp -= frame_period_;
-
- if (index_in_chunk == 0)
- unit->is_key_frame = true;
-}
-
-// Class that computes statistics: number of calls, minimum and maximum values.
-// It is used for PTS statistics to verify that playback did actually happen.
+ // If the chunk contains EOS, it should not break the presentation order.
+ // For instance, the following chunk is ok:
+ //
+ // Frames: I P B EOS
+ // Decoding order: 0 1 2 -
+ // Presentation order: 0 2 1 -
+ //
+ // while this one might cause decoder to block:
+ //
+ // Frames: I P EOS
+ // Decoding order: 0 1 -
+ // Presentation order: 0 2 - <------- might wait for the B frame forever
+ //
+ // With current base class implementation that always has EOS at the 4th
+ // place we are covered (http://crbug.com/526755)
-template <typename T>
-class Minimax {
- public:
- Minimax() : num_values_(0) {}
- ~Minimax() {}
-
- void AddValue(const T& value) {
- ++num_values_;
- if (value < min_)
- min_ = value;
- else if (max_ < value)
- max_ = value;
- }
+ DCHECK(chunk);
+ DCHECK(chunk->access_units.size() == 4);
- const T& min() const { return min_; }
- const T& max() const { return max_; }
- int num_values() const { return num_values_; }
+ // Swap pts for second and third frames. Make first frame a key frame.
+ base::TimeDelta tmp = chunk->access_units[1].timestamp;
+ chunk->access_units[1].timestamp = chunk->access_units[2].timestamp;
+ chunk->access_units[2].timestamp = tmp;
- private:
- T min_;
- T max_;
- int num_values_;
-};
+ chunk->access_units[0].is_key_frame = true;
+}
} // namespace (anonymous)
@@ -167,11 +182,20 @@ class MediaCodecDecoderTest : public testing::Test {
// Decoder callbacks.
void OnDataRequested();
void OnStarvation() { is_starved_ = true; }
+ void OnDecoderDrained() {}
void OnStopDone() { is_stopped_ = true; }
- void OnError() {}
+ void OnKeyRequired() {}
+ void OnError() { DVLOG(0) << "MediaCodecDecoderTest::" << __FUNCTION__; }
void OnUpdateCurrentTime(base::TimeDelta now_playing,
- base::TimeDelta last_buffered) {
- pts_stat_.AddValue(now_playing);
+ base::TimeDelta last_buffered,
+ bool postpone) {
+ // Add the |last_buffered| value for PTS. For video it is the same as
+ // |now_playing| and is equal to PTS, for audio |last_buffered| should
+ // exceed PTS.
+ if (postpone)
+ return;
+
+ pts_stat_.AddValue(last_buffered);
if (stop_request_time_ != kNoTimestamp() &&
now_playing >= stop_request_time_) {
@@ -180,12 +204,16 @@ class MediaCodecDecoderTest : public testing::Test {
}
}
- void OnVideoSizeChanged(const gfx::Size& video_size) {}
+ void OnVideoSizeChanged(const gfx::Size& video_size) {
+ video_size_ = video_size;
+ }
+
void OnVideoCodecCreated() {}
scoped_ptr<MediaCodecDecoder> decoder_;
scoped_ptr<TestDataFactory> data_factory_;
Minimax<base::TimeDelta> pts_stat_;
+ gfx::Size video_size_;
private:
bool is_timeout_expired() const { return is_timeout_expired_; }
@@ -200,6 +228,7 @@ class MediaCodecDecoderTest : public testing::Test {
base::TimeDelta stop_request_time_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ FrameStatistics frame_statistics_;
DataAvailableCallback data_available_cb_;
scoped_refptr<gfx::SurfaceTexture> surface_texture_;
@@ -243,10 +272,14 @@ bool MediaCodecDecoderTest::WaitForCondition(const Predicate& condition,
void MediaCodecDecoderTest::CreateAudioDecoder() {
decoder_ = scoped_ptr<MediaCodecDecoder>(new MediaCodecAudioDecoder(
- task_runner_, base::Bind(&MediaCodecDecoderTest::OnDataRequested,
- base::Unretained(this)),
+ task_runner_, &frame_statistics_,
+ base::Bind(&MediaCodecDecoderTest::OnDataRequested,
+ base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnStarvation, base::Unretained(this)),
+ base::Bind(&MediaCodecDecoderTest::OnDecoderDrained,
+ base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnStopDone, base::Unretained(this)),
+ base::Bind(&MediaCodecDecoderTest::OnKeyRequired, base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnError, base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnUpdateCurrentTime,
base::Unretained(this))));
@@ -257,16 +290,18 @@ void MediaCodecDecoderTest::CreateAudioDecoder() {
void MediaCodecDecoderTest::CreateVideoDecoder() {
decoder_ = scoped_ptr<MediaCodecDecoder>(new MediaCodecVideoDecoder(
- task_runner_, base::Bind(&MediaCodecDecoderTest::OnDataRequested,
- base::Unretained(this)),
+ task_runner_, &frame_statistics_,
+ base::Bind(&MediaCodecDecoderTest::OnDataRequested,
+ base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnStarvation, base::Unretained(this)),
+ base::Bind(&MediaCodecDecoderTest::OnDecoderDrained,
+ base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnStopDone, base::Unretained(this)),
+ base::Bind(&MediaCodecDecoderTest::OnKeyRequired, base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnError, base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnUpdateCurrentTime,
base::Unretained(this)),
base::Bind(&MediaCodecDecoderTest::OnVideoSizeChanged,
- base::Unretained(this)),
- base::Bind(&MediaCodecDecoderTest::OnVideoCodecCreated,
base::Unretained(this))));
data_available_cb_ = base::Bind(&MediaCodecDecoder::OnDemuxerDataAvailable,
@@ -292,7 +327,7 @@ void MediaCodecDecoderTest::SetVideoSurface() {
ASSERT_NE(nullptr, decoder_.get());
MediaCodecVideoDecoder* video_decoder =
static_cast<MediaCodecVideoDecoder*>(decoder_.get());
- video_decoder->SetPendingSurface(surface.Pass());
+ video_decoder->SetVideoSurface(surface.Pass());
}
TEST_F(MediaCodecDecoderTest, AudioPrefetch) {
@@ -327,7 +362,7 @@ TEST_F(MediaCodecDecoderTest, AudioConfigureNoParams) {
CreateAudioDecoder();
// Cannot configure without config parameters.
- EXPECT_EQ(MediaCodecDecoder::CONFIG_FAILURE, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigFailure, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, AudioConfigureValidParams) {
@@ -339,7 +374,7 @@ TEST_F(MediaCodecDecoderTest, AudioConfigureValidParams) {
scoped_ptr<AudioFactory> factory(new AudioFactory(duration));
decoder_->SetDemuxerConfigs(factory->GetConfigs());
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, VideoConfigureNoParams) {
@@ -347,8 +382,22 @@ TEST_F(MediaCodecDecoderTest, VideoConfigureNoParams) {
CreateVideoDecoder();
+ // decoder_->Configure() searches back for the key frame.
+ // We have to prefetch decoder.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(500);
+ SetDataFactory(scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ decoder_->Prefetch(base::Bind(&MediaCodecDecoderTest::SetPrefetched,
+ base::Unretained(this), true));
+
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MediaCodecDecoderTest::is_prefetched,
+ base::Unretained(this))));
+
+ SetVideoSurface();
+
// Cannot configure without config parameters.
- EXPECT_EQ(MediaCodecDecoder::CONFIG_FAILURE, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigFailure, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, VideoConfigureNoSurface) {
@@ -372,7 +421,7 @@ TEST_F(MediaCodecDecoderTest, VideoConfigureNoSurface) {
// Surface is not set, Configure() should fail.
- EXPECT_EQ(MediaCodecDecoder::CONFIG_FAILURE, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigFailure, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, VideoConfigureInvalidSurface) {
@@ -404,9 +453,9 @@ TEST_F(MediaCodecDecoderTest, VideoConfigureInvalidSurface) {
MediaCodecVideoDecoder* video_decoder =
static_cast<MediaCodecVideoDecoder*>(decoder_.get());
- video_decoder->SetPendingSurface(surface.Pass());
+ video_decoder->SetVideoSurface(surface.Pass());
- EXPECT_EQ(MediaCodecDecoder::CONFIG_FAILURE, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigFailure, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, VideoConfigureValidParams) {
@@ -432,7 +481,7 @@ TEST_F(MediaCodecDecoderTest, VideoConfigureValidParams) {
// Now we can expect Configure() to succeed.
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
}
TEST_F(MediaCodecDecoderTest, AudioStartWithoutConfigure) {
@@ -460,13 +509,16 @@ TEST_F(MediaCodecDecoderTest, AudioStartWithoutConfigure) {
EXPECT_FALSE(decoder_->Start(base::TimeDelta::FromMilliseconds(0)));
}
+// http://crbug.com/518900
TEST_F(MediaCodecDecoderTest, AudioPlayTillCompletion) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+ DVLOG(0) << "AudioPlayTillCompletion started";
+
CreateAudioDecoder();
base::TimeDelta duration = base::TimeDelta::FromMilliseconds(500);
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(600);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(1500);
SetDataFactory(scoped_ptr<AudioFactory>(new AudioFactory(duration)));
@@ -479,7 +531,7 @@ TEST_F(MediaCodecDecoderTest, AudioPlayTillCompletion) {
decoder_->SetDemuxerConfigs(GetConfigs());
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
EXPECT_TRUE(decoder_->Start(base::TimeDelta::FromMilliseconds(0)));
@@ -490,9 +542,14 @@ TEST_F(MediaCodecDecoderTest, AudioPlayTillCompletion) {
EXPECT_TRUE(decoder_->IsStopped());
EXPECT_TRUE(decoder_->IsCompleted());
- // It is hard to properly estimate minimum and maximum values because
- // reported times are different from PTS.
- EXPECT_EQ(25, pts_stat_.num_values());
+ // Last buffered timestamp should be no less than PTS.
+ // The number of hits in pts_stat_ depends on the preroll implementation.
+ // We might not report the time for the first buffer after preroll that
+ // is written to the audio track. pts_stat_.num_values() is either 21 or 22.
+ EXPECT_LE(21, pts_stat_.num_values());
+ EXPECT_LE(data_factory_->last_pts(), pts_stat_.max());
+
+ DVLOG(0) << "AudioPlayTillCompletion stopping";
}
TEST_F(MediaCodecDecoderTest, VideoPlayTillCompletion) {
@@ -518,7 +575,7 @@ TEST_F(MediaCodecDecoderTest, VideoPlayTillCompletion) {
SetVideoSurface();
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
EXPECT_TRUE(decoder_->Start(base::TimeDelta::FromMilliseconds(0)));
@@ -529,7 +586,8 @@ TEST_F(MediaCodecDecoderTest, VideoPlayTillCompletion) {
EXPECT_TRUE(decoder_->IsStopped());
EXPECT_TRUE(decoder_->IsCompleted());
- EXPECT_EQ(26, pts_stat_.num_values());
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod);
+ EXPECT_EQ(expected_video_frames, pts_stat_.num_values());
EXPECT_EQ(data_factory_->last_pts(), pts_stat_.max());
}
@@ -555,7 +613,7 @@ TEST_F(MediaCodecDecoderTest, VideoStopAndResume) {
SetVideoSurface();
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
SetStopRequestAtTime(stop_request_time);
@@ -597,11 +655,13 @@ TEST_F(MediaCodecDecoderTest, VideoStopAndResume) {
EXPECT_TRUE(decoder_->IsCompleted());
// We should not skip frames in this process.
- EXPECT_EQ(26, pts_stat_.num_values());
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod);
+ EXPECT_EQ(expected_video_frames, pts_stat_.num_values());
EXPECT_EQ(data_factory_->last_pts(), pts_stat_.max());
}
-TEST_F(MediaCodecDecoderTest, AudioStarvationAndStop) {
+// http://crbug.com/518900
+TEST_F(MediaCodecDecoderTest, DISABLED_AudioStarvationAndStop) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
CreateAudioDecoder();
@@ -623,7 +683,7 @@ TEST_F(MediaCodecDecoderTest, AudioStarvationAndStop) {
// Configure.
decoder_->SetDemuxerConfigs(GetConfigs());
- EXPECT_EQ(MediaCodecDecoder::CONFIG_OK, decoder_->Configure());
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
// Start.
EXPECT_TRUE(decoder_->Start(base::TimeDelta::FromMilliseconds(0)));
@@ -648,4 +708,59 @@ TEST_F(MediaCodecDecoderTest, AudioStarvationAndStop) {
EXPECT_FALSE(decoder_->IsCompleted());
}
+TEST_F(MediaCodecDecoderTest, VideoFirstUnitIsReconfig) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the kConfigChanged unit that comes before the first data unit
+ // gets processed, i.e. is not lost.
+
+ CreateVideoDecoder();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(200);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(1000);
+ SetDataFactory(scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ // Ask factory to produce initial configuration unit. The configuraton will
+ // be factory.GetConfigs().
+ data_factory_->RequestInitialConfigs();
+
+ // Create am alternative configuration (we just alter video size).
+ DemuxerConfigs alt_configs = data_factory_->GetConfigs();
+ alt_configs.video_size = gfx::Size(100, 100);
+
+ // Pass the alternative configuration to decoder.
+ decoder_->SetDemuxerConfigs(alt_configs);
+
+ // Prefetch.
+ decoder_->Prefetch(base::Bind(&MediaCodecDecoderTest::SetPrefetched,
+ base::Unretained(this), true));
+
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MediaCodecDecoderTest::is_prefetched,
+ base::Unretained(this))));
+
+ // Current implementation reports the new video size after
+ // SetDemuxerConfigs(), verify that it is alt size.
+ EXPECT_EQ(alt_configs.video_size, video_size_);
+
+ SetVideoSurface();
+
+ // Configure.
+ EXPECT_EQ(MediaCodecDecoder::kConfigOk, decoder_->Configure(nullptr));
+
+ // Start.
+ EXPECT_TRUE(decoder_->Start(base::TimeDelta::FromMilliseconds(0)));
+
+ // Wait for completion.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecDecoderTest::is_stopped, base::Unretained(this)),
+ timeout));
+
+ EXPECT_TRUE(decoder_->IsStopped());
+ EXPECT_TRUE(decoder_->IsCompleted());
+ EXPECT_EQ(data_factory_->last_pts(), pts_stat_.max());
+
+ // Check that the reported video size is the one from the in-stream configs.
+ EXPECT_EQ(data_factory_->GetConfigs().video_size, video_size_);
+}
+
} // namespace media
diff --git a/chromium/media/base/android/media_codec_player.cc b/chromium/media/base/android/media_codec_player.cc
index aa05fdf5829..4c297f01b87 100644
--- a/chromium/media/base/android/media_codec_player.cc
+++ b/chromium/media/base/android/media_codec_player.cc
@@ -12,8 +12,11 @@
#include "base/threading/thread.h"
#include "media/base/android/media_codec_audio_decoder.h"
#include "media/base/android/media_codec_video_decoder.h"
+#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_manager.h"
-#include "media/base/buffers.h"
+#include "media/base/android/media_task_runner.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/timestamp_constants.h"
#define RUN_ON_MEDIA_THREAD(METHOD, ...) \
do { \
@@ -28,48 +31,41 @@
namespace media {
-class MediaThread : public base::Thread {
- public:
- MediaThread() : base::Thread("BrowserMediaThread") {
- Start();
- }
-};
-
-// Create media thread
-base::LazyInstance<MediaThread>::Leaky
- g_media_thread = LAZY_INSTANCE_INITIALIZER;
-
-
-scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner() {
- return g_media_thread.Pointer()->task_runner();
-}
-
// MediaCodecPlayer implementation.
MediaCodecPlayer::MediaCodecPlayer(
int player_id,
base::WeakPtr<MediaPlayerManager> manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
scoped_ptr<DemuxerAndroid> demuxer,
const GURL& frame_url)
: MediaPlayerAndroid(player_id,
manager.get(),
- request_media_resources_cb,
+ on_decoder_resources_released_cb,
frame_url),
ui_task_runner_(base::ThreadTaskRunnerHandle::Get()),
demuxer_(demuxer.Pass()),
- state_(STATE_PAUSED),
+ state_(kStatePaused),
interpolator_(&default_tick_clock_),
pending_start_(false),
+ pending_seek_(kNoTimestamp()),
+ drm_bridge_(nullptr),
+ cdm_registration_id_(0),
+ key_is_required_(false),
+ key_is_added_(false),
media_weak_factory_(this) {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
DVLOG(1) << "MediaCodecPlayer::MediaCodecPlayer: player_id:" << player_id;
- request_resources_cb_ = base::Bind(request_media_resources_cb_, player_id);
-
completion_cb_ =
base::Bind(&MediaPlayerManager::OnPlaybackComplete, manager, player_id);
+ waiting_for_decryption_key_cb_ = base::Bind(
+ &MediaPlayerManager::OnWaitingForDecryptionKey, manager, player_id);
+ seek_done_cb_ =
+ base::Bind(&MediaPlayerManager::OnSeekComplete, manager, player_id);
+ error_cb_ = base::Bind(&MediaPlayerManager::OnError, manager, player_id);
+
attach_listener_cb_ = base::Bind(&MediaPlayerAndroid::AttachListener,
WeakPtrForUIThread(), nullptr);
detach_listener_cb_ =
@@ -90,6 +86,23 @@ MediaCodecPlayer::~MediaCodecPlayer()
{
DVLOG(1) << "MediaCodecPlayer::~MediaCodecPlayer";
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ // Currently the unit tests wait for the MediaCodecPlayer destruction by
+ // watching the demuxer, which is destroyed as one of the member variables.
+ // Release the codecs here, before any member variable is destroyed to make
+ // the unit tests happy.
+
+ if (video_decoder_)
+ video_decoder_->ReleaseDecoderResources();
+ if (audio_decoder_)
+ audio_decoder_->ReleaseDecoderResources();
+
+ media_stat_->StopAndReport(GetInterpolatedTime());
+
+ if (drm_bridge_) {
+ DCHECK(cdm_registration_id_);
+ drm_bridge_->UnregisterPlayer(cdm_registration_id_);
+ }
}
void MediaCodecPlayer::Initialize() {
@@ -127,16 +140,70 @@ void MediaCodecPlayer::SetVideoSurface(gfx::ScopedJavaSurface surface) {
DVLOG(1) << __FUNCTION__ << (surface.IsEmpty() ? " empty" : " non-empty");
- // I assume that if video decoder already has the surface,
- // there will be two calls:
- // (1) SetVideoSurface(0)
- // (2) SetVideoSurface(new_surface)
- video_decoder_->SetPendingSurface(surface.Pass());
+ // Save the empty-ness before we pass the surface to the decoder.
+ bool surface_is_empty = surface.IsEmpty();
+
+ // Apparently RemoveVideoSurface() can be called several times in a row,
+ // ignore the second and subsequent calls.
+ if (surface_is_empty && !video_decoder_->HasVideoSurface()) {
+ DVLOG(1) << __FUNCTION__ << ": surface already removed, ignoring";
+ return;
+ }
+
+ // Do not set unprotected surface if we know that we need a protected one.
+ // Empty surface means the surface removal and we always allow for it.
+ if (!surface_is_empty && video_decoder_->IsProtectedSurfaceRequired() &&
+ !surface.is_protected()) {
+ DVLOG(0) << __FUNCTION__ << ": surface is not protected, ignoring";
+ return;
+ }
- if (video_decoder_->HasPendingSurface() &&
- state_ == STATE_WAITING_FOR_SURFACE) {
- SetState(STATE_PLAYING);
- StartPlaybackDecoders();
+ video_decoder_->SetVideoSurface(surface.Pass());
+
+ if (surface_is_empty) {
+ // Remove video surface.
+ switch (state_) {
+ case kStatePlaying:
+ if (VideoFinished())
+ break;
+
+ DVLOG(1) << __FUNCTION__ << ": stopping and restarting";
+ // Stop decoders as quickly as possible.
+ StopDecoders(); // synchronous stop
+
+ // Prefetch or wait for initial configuration.
+ if (HasAudio() || HasVideo()) {
+ SetState(kStatePrefetching);
+ StartPrefetchDecoders();
+ } else {
+ SetState(kStateWaitingForConfig);
+ }
+ break;
+
+ default:
+ break; // ignore
+ }
+ } else {
+ // Replace video surface.
+ switch (state_) {
+ case kStateWaitingForSurface:
+ SetState(kStatePlaying);
+ StartPlaybackOrBrowserSeek();
+ break;
+
+ case kStatePlaying:
+ if (VideoFinished())
+ break;
+
+ DVLOG(1) << __FUNCTION__ << ": requesting to stop and restart";
+ SetState(kStateStopping);
+ RequestToStopDecoders();
+ SetPendingStart(true);
+ break;
+
+ default:
+ break; // ignore
+ }
}
}
@@ -146,19 +213,30 @@ void MediaCodecPlayer::Start() {
DVLOG(1) << __FUNCTION__;
switch (state_) {
- case STATE_PAUSED:
+ case kStatePaused:
+ // Request play permission or wait for initial configuration.
if (HasAudio() || HasVideo()) {
- SetState(STATE_PREFETCHING);
- StartPrefetchDecoders();
+ SetState(kStateWaitingForPermission);
+ RequestPlayPermission();
} else {
- SetState(STATE_WAITING_FOR_CONFIG);
+ SetState(kStateWaitingForConfig);
}
break;
- case STATE_STOPPING:
+ case kStateStopping:
+ case kStateWaitingForSeek:
SetPendingStart(true);
break;
+ case kStateWaitingForConfig:
+ case kStateWaitingForPermission:
+ case kStatePrefetching:
+ case kStatePlaying:
+ case kStateWaitingForSurface:
+ case kStateWaitingForKey:
+ case kStateWaitingForMediaCrypto:
+ case kStateError:
+ break; // Ignore
default:
- // Ignore
+ NOTREACHED();
break;
}
}
@@ -168,21 +246,29 @@ void MediaCodecPlayer::Pause(bool is_media_related_action) {
DVLOG(1) << __FUNCTION__;
+ SetPendingStart(false);
+
switch (state_) {
- case STATE_PREFETCHING:
- SetState(STATE_PAUSED);
- StopDecoders();
- break;
- case STATE_WAITING_FOR_SURFACE:
- SetState(STATE_PAUSED);
+ case kStateWaitingForConfig:
+ case kStateWaitingForPermission:
+ case kStatePrefetching:
+ case kStateWaitingForSurface:
+ case kStateWaitingForKey:
+ case kStateWaitingForMediaCrypto:
+ SetState(kStatePaused);
StopDecoders();
break;
- case STATE_PLAYING:
- SetState(STATE_STOPPING);
+ case kStatePlaying:
+ SetState(kStateStopping);
RequestToStopDecoders();
break;
+ case kStatePaused:
+ case kStateStopping:
+ case kStateWaitingForSeek:
+ case kStateError:
+ break; // Ignore
default:
- // Ignore
+ NOTREACHED();
break;
}
}
@@ -191,16 +277,77 @@ void MediaCodecPlayer::SeekTo(base::TimeDelta timestamp) {
RUN_ON_MEDIA_THREAD(SeekTo, timestamp);
DVLOG(1) << __FUNCTION__ << " " << timestamp;
- NOTIMPLEMENTED();
+
+ switch (state_) {
+ case kStatePaused:
+ SetState(kStateWaitingForSeek);
+ RequestDemuxerSeek(timestamp);
+ break;
+ case kStateWaitingForConfig:
+ case kStateWaitingForPermission:
+ case kStatePrefetching:
+ case kStateWaitingForSurface:
+ case kStateWaitingForKey:
+ case kStateWaitingForMediaCrypto:
+ SetState(kStateWaitingForSeek);
+ StopDecoders();
+ SetPendingStart(true);
+ RequestDemuxerSeek(timestamp);
+ break;
+ case kStatePlaying:
+ SetState(kStateStopping);
+ RequestToStopDecoders();
+ SetPendingStart(true);
+ SetPendingSeek(timestamp);
+ break;
+ case kStateStopping:
+ SetPendingSeek(timestamp);
+ break;
+ case kStateWaitingForSeek:
+ SetPendingSeek(timestamp);
+ break;
+ case kStateError:
+ break; // ignore
+ default:
+ NOTREACHED();
+ break;
+ }
}
void MediaCodecPlayer::Release() {
+ // TODO(qinmin): the callback should be posted onto the UI thread when
+ // Release() finishes on media thread. However, the BrowserMediaPlayerManager
+ // could be gone in that case, which cause the MediaThrottler unable to
+ // track the active players. We should pass
+ // MediaThrottler::OnDecodeRequestFinished() to this class in the ctor, but
+ // also need a way for BrowserMediaPlayerManager to track active players.
+ if (ui_task_runner_->BelongsToCurrentThread())
+ on_decoder_resources_released_cb_.Run(player_id());
+
RUN_ON_MEDIA_THREAD(Release);
DVLOG(1) << __FUNCTION__;
- SetState(STATE_PAUSED);
+ // Stop decoding threads and delete MediaCodecs, but keep IPC between browser
+ // and renderer processes going. Seek should work across and after Release().
+
ReleaseDecoderResources();
+
+ SetPendingStart(false);
+
+ if (state_ != kStateWaitingForSeek)
+ SetState(kStatePaused);
+
+ // Crear encryption key related flags.
+ key_is_required_ = false;
+ key_is_added_ = false;
+
+ base::TimeDelta pending_seek_time = GetPendingSeek();
+ if (pending_seek_time != kNoTimestamp()) {
+ SetPendingSeek(kNoTimestamp());
+ SetState(kStateWaitingForSeek);
+ RequestDemuxerSeek(pending_seek_time);
+ }
}
void MediaCodecPlayer::SetVolume(double volume) {
@@ -232,7 +379,10 @@ base::TimeDelta MediaCodecPlayer::GetDuration() {
bool MediaCodecPlayer::IsPlaying() {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
- return state_ == STATE_PLAYING;
+
+ // TODO(timav): Use another variable since |state_| should only be accessed on
+ // Media thread.
+ return state_ == kStatePlaying || state_ == kStateStopping;
}
bool MediaCodecPlayer::CanPause() {
@@ -261,8 +411,39 @@ bool MediaCodecPlayer::IsPlayerReady() {
}
void MediaCodecPlayer::SetCdm(BrowserCdm* cdm) {
- DCHECK(ui_task_runner_->BelongsToCurrentThread());
- NOTIMPLEMENTED();
+ RUN_ON_MEDIA_THREAD(SetCdm, cdm);
+
+ DVLOG(1) << __FUNCTION__;
+
+ // Currently we don't support DRM change during the middle of playback, even
+ // if the player is paused. There is no current plan to support it, see
+ // http://crbug.com/253792.
+ if (state_ != kStatePaused || GetInterpolatedTime() > base::TimeDelta()) {
+ VLOG(0) << "Setting DRM bridge after playback has started is not supported";
+ return;
+ }
+
+ if (drm_bridge_) {
+ NOTREACHED() << "Currently we do not support resetting CDM.";
+ return;
+ }
+
+ DCHECK(cdm);
+ drm_bridge_ = static_cast<MediaDrmBridge*>(cdm);
+
+ DCHECK(drm_bridge_);
+
+ cdm_registration_id_ = drm_bridge_->RegisterPlayer(
+ base::Bind(&MediaCodecPlayer::OnKeyAdded, media_weak_this_),
+ base::Bind(&MediaCodecPlayer::OnCdmUnset, media_weak_this_));
+
+ MediaDrmBridge::MediaCryptoReadyCB cb = BindToCurrentLoop(
+ base::Bind(&MediaCodecPlayer::OnMediaCryptoReady, media_weak_this_));
+
+ // Post back to UI thread.
+ ui_task_runner_->PostTask(FROM_HERE,
+ base::Bind(&MediaDrmBridge::SetMediaCryptoReadyCB,
+ drm_bridge_->WeakPtrForUIThread(), cb));
}
// Callbacks from Demuxer.
@@ -304,7 +485,67 @@ void MediaCodecPlayer::OnDemuxerSeekDone(
DVLOG(1) << __FUNCTION__ << " actual_time:" << actual_browser_seek_time;
- NOTIMPLEMENTED();
+ DCHECK(seek_info_.get());
+ DCHECK(seek_info_->seek_time != kNoTimestamp());
+
+ // A browser seek must not jump into the past. Ideally, it seeks to the
+ // requested time, but it might jump into the future.
+ DCHECK(!seek_info_->is_browser_seek ||
+ seek_info_->seek_time <= actual_browser_seek_time);
+
+ // Restrict the current time to be equal to seek_time
+ // for the next StartPlaybackDecoders() call.
+
+ base::TimeDelta seek_time = seek_info_->is_browser_seek
+ ? actual_browser_seek_time
+ : seek_info_->seek_time;
+
+ interpolator_.SetBounds(seek_time, seek_time);
+
+ audio_decoder_->SetBaseTimestamp(seek_time);
+
+ audio_decoder_->SetPrerollTimestamp(seek_time);
+ video_decoder_->SetPrerollTimestamp(seek_time);
+
+ // The Flush() might set the state to kStateError.
+ if (state_ == kStateError) {
+ // Notify the Renderer.
+ if (!seek_info_->is_browser_seek)
+ ui_task_runner_->PostTask(FROM_HERE,
+ base::Bind(seek_done_cb_, seek_time));
+
+ seek_info_.reset();
+ return;
+ }
+
+ DCHECK_EQ(kStateWaitingForSeek, state_);
+
+ base::TimeDelta pending_seek_time = GetPendingSeek();
+ if (pending_seek_time != kNoTimestamp()) {
+ // Keep kStateWaitingForSeek
+ SetPendingSeek(kNoTimestamp());
+ RequestDemuxerSeek(pending_seek_time);
+ return;
+ }
+
+ if (HasPendingStart()) {
+ SetPendingStart(false);
+ // Request play permission or wait for initial configuration.
+ if (HasAudio() || HasVideo()) {
+ SetState(kStateWaitingForPermission);
+ RequestPlayPermission();
+ } else {
+ SetState(kStateWaitingForConfig);
+ }
+ } else {
+ SetState(kStatePaused);
+ }
+
+ // Notify the Renderer.
+ if (!seek_info_->is_browser_seek)
+ ui_task_runner_->PostTask(FROM_HERE, base::Bind(seek_done_cb_, seek_time));
+
+ seek_info_.reset();
}
void MediaCodecPlayer::OnDemuxerDurationChanged(
@@ -315,8 +556,58 @@ void MediaCodecPlayer::OnDemuxerDurationChanged(
duration_ = duration;
}
+void MediaCodecPlayer::SetDecodersTimeCallbackForTests(
+ DecodersTimeCallback cb) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ decoders_time_cb_ = cb;
+}
+
+void MediaCodecPlayer::SetCodecCreatedCallbackForTests(
+ CodecCreatedCallback cb) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(audio_decoder_ && video_decoder_);
+
+ audio_decoder_->SetCodecCreatedCallbackForTests(
+ base::Bind(cb, DemuxerStream::AUDIO));
+ video_decoder_->SetCodecCreatedCallbackForTests(
+ base::Bind(cb, DemuxerStream::VIDEO));
+}
+
+void MediaCodecPlayer::SetAlwaysReconfigureForTests(DemuxerStream::Type type) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(audio_decoder_ && video_decoder_);
+
+ if (type == DemuxerStream::AUDIO)
+ audio_decoder_->SetAlwaysReconfigureForTests();
+ else if (type == DemuxerStream::VIDEO)
+ video_decoder_->SetAlwaysReconfigureForTests();
+}
+
+bool MediaCodecPlayer::IsPrerollingForTests(DemuxerStream::Type type) const {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DCHECK(audio_decoder_ && video_decoder_);
+
+ if (type == DemuxerStream::AUDIO)
+ return audio_decoder_->IsPrerollingForTests();
+ else if (type == DemuxerStream::VIDEO)
+ return video_decoder_->IsPrerollingForTests();
+ else
+ return false;
+}
+
// Events from Player, called on UI thread
+void MediaCodecPlayer::RequestPermissionAndPostResult(
+ base::TimeDelta duration) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " duration:" << duration;
+
+ bool granted = manager()->RequestPlay(player_id(), duration);
+ GetMediaTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&MediaCodecPlayer::OnPermissionDecided,
+ media_weak_this_, granted));
+}
+
void MediaCodecPlayer::OnMediaMetadataChanged(base::TimeDelta duration,
const gfx::Size& video_size) {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
@@ -340,6 +631,35 @@ void MediaCodecPlayer::OnTimeUpdate(base::TimeDelta current_timestamp,
manager()->OnTimeUpdate(player_id(), current_timestamp, current_time_ticks);
}
+// Event from manager, called on Media thread
+
+void MediaCodecPlayer::OnPermissionDecided(bool granted) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ DVLOG(1) << __FUNCTION__ << ": " << (granted ? "granted" : "denied");
+
+ switch (state_) {
+ case kStateWaitingForPermission:
+ if (granted) {
+ SetState(kStatePrefetching);
+ StartPrefetchDecoders();
+ } else {
+ SetState(kStatePaused);
+ StopDecoders();
+ }
+ break;
+
+ case kStatePaused:
+ case kStateWaitingForSeek:
+ case kStateError:
+ break; // ignore
+
+ default:
+ NOTREACHED() << __FUNCTION__ << ": wrong state " << AsString(state_);
+ break;
+ }
+}
+
// Events from Decoders, called on Media thread
void MediaCodecPlayer::RequestDemuxerData(DemuxerStream::Type stream_type) {
@@ -361,59 +681,143 @@ void MediaCodecPlayer::RequestDemuxerData(DemuxerStream::Type stream_type) {
void MediaCodecPlayer::OnPrefetchDone() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
- DVLOG(1) << __FUNCTION__;
- if (state_ != STATE_PREFETCHING)
+ if (state_ != kStatePrefetching) {
+ DVLOG(1) << __FUNCTION__ << " wrong state " << AsString(state_)
+ << " ignoring";
return; // Ignore
+ }
+
+ DVLOG(1) << __FUNCTION__;
if (!HasAudio() && !HasVideo()) {
// No configuration at all after prefetching.
// This is an error, initial configuration is expected
// before the first data chunk.
- GetMediaTaskRunner()->PostTask(FROM_HERE, error_cb_);
+ DCHECK(!internal_error_cb_.is_null());
+ GetMediaTaskRunner()->PostTask(FROM_HERE, internal_error_cb_);
+ return;
+ }
+
+ if (HasVideo() && !video_decoder_->HasVideoSurface()) {
+ SetState(kStateWaitingForSurface);
return;
}
- if (HasVideo() && !HasPendingSurface()) {
- SetState(STATE_WAITING_FOR_SURFACE);
+ if (key_is_required_ && !key_is_added_) {
+ SetState(kStateWaitingForKey);
+ ui_task_runner_->PostTask(FROM_HERE, waiting_for_decryption_key_cb_);
return;
}
- SetState(STATE_PLAYING);
- StartPlaybackDecoders();
+ SetState(kStatePlaying);
+ StartPlaybackOrBrowserSeek();
}
-void MediaCodecPlayer::OnStopDone() {
+void MediaCodecPlayer::OnPrerollDone() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ if (state_ != kStatePlaying) {
+ DVLOG(1) << __FUNCTION__ << ": in state " << AsString(state_)
+ << ", ignoring";
+ return;
+ }
+
DVLOG(1) << __FUNCTION__;
- if (!(audio_decoder_->IsStopped() && video_decoder_->IsStopped()))
+ StartStatus status = StartDecoders();
+ if (status != kStartOk)
+ GetMediaTaskRunner()->PostTask(FROM_HERE, internal_error_cb_);
+}
+
+void MediaCodecPlayer::OnDecoderDrained(DemuxerStream::Type type) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " " << type;
+
+ // We expect that OnStopDone() comes next.
+
+ DCHECK(type == DemuxerStream::AUDIO || type == DemuxerStream::VIDEO);
+
+ // DCHECK(state_ == kStatePlaying || state_ == kStateStopping)
+ // << __FUNCTION__ << " illegal state: " << AsString(state_);
+ //
+ // With simultaneous reconfiguration of audio and video streams the state
+ // can be kStatePrefetching as well:
+ // OnLastFrameRendered VIDEO (VIDEO decoder is stopped)
+ // OnLastFrameRendered AUDIO (AUDIO decoder is stopped)
+ // OnDecoderDrained VIDEO (kStatePlaying -> kStateStopping)
+ // OnStopDone VIDEO (kStateStopping -> kStatePrefetching)
+ // OnDecoderDrained AUDIO
+ // OnStopDone AUDIO
+ //
+ // TODO(timav): combine OnDecoderDrained() and OnStopDone() ?
+
+ switch (state_) {
+ case kStatePlaying:
+ SetState(kStateStopping);
+ SetPendingStart(true);
+
+ if (type == DemuxerStream::AUDIO && !VideoFinished()) {
+ DVLOG(1) << __FUNCTION__ << " requesting to stop video";
+ video_decoder_->RequestToStop();
+ } else if (type == DemuxerStream::VIDEO && !AudioFinished()) {
+ DVLOG(1) << __FUNCTION__ << " requesting to stop audio";
+ audio_decoder_->RequestToStop();
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+void MediaCodecPlayer::OnStopDone(DemuxerStream::Type type) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " " << type
+ << " interpolated time:" << GetInterpolatedTime();
+
+ if (!(audio_decoder_->IsStopped() && video_decoder_->IsStopped())) {
+ DVLOG(1) << __FUNCTION__ << " both audio and video has to be stopped"
+ << ", ignoring";
return; // Wait until other stream is stopped
+ }
// At this point decoder threads should not be running
if (interpolator_.interpolating())
interpolator_.StopInterpolating();
+ base::TimeDelta seek_time;
switch (state_) {
- case STATE_STOPPING:
- if (HasPendingStart()) {
+ case kStateStopping: {
+ base::TimeDelta seek_time = GetPendingSeek();
+ if (seek_time != kNoTimestamp()) {
+ SetState(kStateWaitingForSeek);
+ SetPendingSeek(kNoTimestamp());
+ RequestDemuxerSeek(seek_time);
+ } else if (HasPendingStart()) {
SetPendingStart(false);
- SetState(STATE_PREFETCHING);
- StartPrefetchDecoders();
+ SetState(kStateWaitingForPermission);
+ RequestPlayPermission();
} else {
- SetState(STATE_PAUSED);
+ SetState(kStatePaused);
}
- break;
- case STATE_PLAYING:
+ } break;
+ case kStatePlaying:
// Unexpected stop means completion
- SetState(STATE_PAUSED);
+ SetState(kStatePaused);
break;
default:
- DVLOG(0) << __FUNCTION__ << " illegal state: " << AsString(state_);
- NOTREACHED();
- break;
+ // DVLOG(0) << __FUNCTION__ << " illegal state: " << AsString(state_);
+ // NOTREACHED();
+ // Ignore! There can be a race condition: audio posts OnStopDone,
+ // then video posts, then first OnStopDone arrives at which point
+ // both streams are already stopped, then second OnStopDone arrives. When
+ // the second one arrives, the state us not kStateStopping any more.
+ return;
}
+ media_stat_->StopAndReport(GetInterpolatedTime());
+
// DetachListener to UI thread
ui_task_runner_->PostTask(FROM_HERE, detach_listener_cb_);
@@ -421,46 +825,86 @@ void MediaCodecPlayer::OnStopDone() {
ui_task_runner_->PostTask(FROM_HERE, completion_cb_);
}
+void MediaCodecPlayer::OnMissingKeyReported(DemuxerStream::Type type) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " " << type;
+
+ // Request stop and restart to pick up the key.
+ key_is_required_ = true;
+
+ if (state_ == kStatePlaying) {
+ SetState(kStateStopping);
+ RequestToStopDecoders();
+ SetPendingStart(true);
+ }
+}
+
void MediaCodecPlayer::OnError() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- // STATE_ERROR blocks all events
- SetState(STATE_ERROR);
+ // kStateError blocks all events
+ SetState(kStateError);
ReleaseDecoderResources();
+
+ ui_task_runner_->PostTask(FROM_HERE,
+ base::Bind(error_cb_, MEDIA_ERROR_DECODE));
}
void MediaCodecPlayer::OnStarvation(DemuxerStream::Type type) {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__ << " stream type:" << type;
- if (state_ != STATE_PLAYING)
+ if (state_ != kStatePlaying)
return; // Ignore
- SetState(STATE_STOPPING);
+ SetState(kStateStopping);
RequestToStopDecoders();
SetPendingStart(true);
+
+ media_stat_->AddStarvation();
}
void MediaCodecPlayer::OnTimeIntervalUpdate(DemuxerStream::Type type,
base::TimeDelta now_playing,
- base::TimeDelta last_buffered) {
+ base::TimeDelta last_buffered,
+ bool postpone) {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
- interpolator_.SetBounds(now_playing, last_buffered);
+ DVLOG(2) << __FUNCTION__ << ": stream type:" << type << " [" << now_playing
+ << "," << last_buffered << "]" << (postpone ? " postpone" : "");
+
+ // For testing only: report time interval as we receive it from decoders
+ // as an indication of what is being rendered. Do not post this callback
+ // for postponed frames: although the PTS is correct, the tests also care
+ // about the wall clock time this callback arrives and deduce the rendering
+ // moment from it.
+ if (!decoders_time_cb_.is_null() && !postpone) {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(decoders_time_cb_, type, now_playing, last_buffered));
+ }
- // Post to UI thread
- ui_task_runner_->PostTask(FROM_HERE,
- base::Bind(time_update_cb_, GetInterpolatedTime(),
- base::TimeTicks::Now()));
-}
+ // I assume that audio stream cannot be added after we get configs by
+ // OnDemuxerConfigsAvailable(), but that audio can finish early.
-void MediaCodecPlayer::OnVideoCodecCreated() {
- DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ if (type == DemuxerStream::VIDEO) {
+ // Ignore video PTS if there is audio stream or if it's behind current
+ // time as set by audio stream.
+ if (!AudioFinished() ||
+ (HasAudio() && now_playing < interpolator_.GetInterpolatedTime()))
+ return;
+ }
- // This callback requests resources by releasing other players.
- ui_task_runner_->PostTask(FROM_HERE, request_resources_cb_);
+ interpolator_.SetBounds(now_playing, last_buffered);
+
+ // Post to UI thread
+ if (!postpone) {
+ ui_task_runner_->PostTask(FROM_HERE,
+ base::Bind(time_update_cb_, GetInterpolatedTime(),
+ base::TimeTicks::Now()));
+ }
}
void MediaCodecPlayer::OnVideoResolutionChanged(const gfx::Size& size) {
@@ -473,24 +917,86 @@ void MediaCodecPlayer::OnVideoResolutionChanged(const gfx::Size& size) {
FROM_HERE, base::Bind(metadata_changed_cb_, kNoTimestamp(), size));
}
-// State machine operations, called on Media thread
+// Callbacks from MediaDrmBridge.
-void MediaCodecPlayer::SetState(PlayerState new_state) {
+void MediaCodecPlayer::OnMediaCryptoReady(
+ MediaDrmBridge::JavaObjectPtr media_crypto,
+ bool needs_protected_surface) {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " protected surface is "
+ << (needs_protected_surface ? "required" : "not required");
- DVLOG(1) << "SetState:" << AsString(state_) << " -> " << AsString(new_state);
- state_ = new_state;
+ // We use the parameters that come with this callback every time we call
+ // Configure(). This is possible only if the MediaCrypto object remains valid
+ // and the surface requirement does not change until new SetCdm() is called.
+
+ DCHECK(media_crypto);
+ DCHECK(!media_crypto->is_null());
+
+ media_crypto_ = media_crypto.Pass();
+
+ if (audio_decoder_) {
+ audio_decoder_->SetNeedsReconfigure();
+ }
+
+ if (video_decoder_) {
+ video_decoder_->SetNeedsReconfigure();
+ video_decoder_->SetProtectedSurfaceRequired(needs_protected_surface);
+ }
+
+ if (state_ == kStateWaitingForMediaCrypto) {
+ // Resume start sequence (configure, etc.)
+ SetState(kStatePlaying);
+ StartPlaybackOrBrowserSeek();
+ }
+
+ DVLOG(1) << __FUNCTION__ << " end";
}
-void MediaCodecPlayer::SetPendingSurface(gfx::ScopedJavaSurface surface) {
+void MediaCodecPlayer::OnKeyAdded() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- video_decoder_->SetPendingSurface(surface.Pass());
+ key_is_added_ = true;
+
+ if (state_ == kStateWaitingForKey) {
+ SetState(kStatePlaying);
+ StartPlaybackOrBrowserSeek();
+ }
+}
+
+void MediaCodecPlayer::OnCdmUnset() {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ // This comment is copied from MediaSourcePlayer::OnCdmUnset().
+ // TODO(xhwang): Currently this is only called during teardown. Support full
+ // detachment of CDM during playback. This will be needed when we start to
+ // support setMediaKeys(0) (see http://crbug.com/330324), or when we release
+ // MediaDrm when the video is paused, or when the device goes to sleep (see
+ // http://crbug.com/272421).
+
+ if (audio_decoder_) {
+ audio_decoder_->SetNeedsReconfigure();
+ }
+
+ if (video_decoder_) {
+ video_decoder_->SetProtectedSurfaceRequired(false);
+ video_decoder_->SetNeedsReconfigure();
+ }
+
+ cdm_registration_id_ = 0;
+ drm_bridge_ = nullptr;
+ media_crypto_.reset();
}
-bool MediaCodecPlayer::HasPendingSurface() {
- return video_decoder_->HasPendingSurface();
+// State machine operations, called on Media thread
+
+void MediaCodecPlayer::SetState(PlayerState new_state) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ DVLOG(1) << "SetState:" << AsString(state_) << " -> " << AsString(new_state);
+ state_ = new_state;
}
void MediaCodecPlayer::SetPendingStart(bool need_to_start) {
@@ -499,17 +1005,28 @@ void MediaCodecPlayer::SetPendingStart(bool need_to_start) {
pending_start_ = need_to_start;
}
-bool MediaCodecPlayer::HasPendingStart() {
+bool MediaCodecPlayer::HasPendingStart() const {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
return pending_start_;
}
-bool MediaCodecPlayer::HasAudio() {
+void MediaCodecPlayer::SetPendingSeek(base::TimeDelta timestamp) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << ": " << timestamp;
+ pending_seek_ = timestamp;
+}
+
+base::TimeDelta MediaCodecPlayer::GetPendingSeek() const {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ return pending_seek_;
+}
+
+bool MediaCodecPlayer::HasAudio() const {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
return audio_decoder_->HasStream();
}
-bool MediaCodecPlayer::HasVideo() {
+bool MediaCodecPlayer::HasVideo() const {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
return video_decoder_->HasStream();
}
@@ -531,12 +1048,24 @@ void MediaCodecPlayer::SetDemuxerConfigs(const DemuxerConfigs& configs) {
if (configs.video_codec != kUnknownVideoCodec)
video_decoder_->SetDemuxerConfigs(configs);
- if (state_ == STATE_WAITING_FOR_CONFIG) {
- SetState(STATE_PREFETCHING);
- StartPrefetchDecoders();
+ if (state_ == kStateWaitingForConfig) {
+ SetState(kStateWaitingForPermission);
+ RequestPlayPermission();
}
}
+void MediaCodecPlayer::RequestPlayPermission() {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ // Check that we have received demuxer config hence we know the duration.
+ DCHECK(HasAudio() || HasVideo());
+
+ ui_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&MediaPlayerAndroid::RequestPermissionAndPostResult,
+ WeakPtrForUIThread(), duration_));
+}
+
void MediaCodecPlayer::StartPrefetchDecoders() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
@@ -565,66 +1094,192 @@ void MediaCodecPlayer::StartPrefetchDecoders() {
video_decoder_->Prefetch(prefetch_cb);
}
-void MediaCodecPlayer::StartPlaybackDecoders() {
+void MediaCodecPlayer::StartPlaybackOrBrowserSeek() {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ // TODO(timav): consider replacing this method with posting a
+ // browser seek task (i.e. generate an event) from StartPlaybackDecoders().
+
+ // Clear encryption key related flags.
+ key_is_required_ = false;
+ key_is_added_ = false;
+
+ StartStatus status = StartPlaybackDecoders();
+
+ switch (status) {
+ case kStartBrowserSeekRequired:
+ // Browser seek
+ SetState(kStateWaitingForSeek);
+ SetPendingStart(true);
+ StopDecoders();
+ RequestDemuxerSeek(GetInterpolatedTime(), true);
+ break;
+ case kStartCryptoRequired:
+ SetState(kStateWaitingForMediaCrypto);
+ break;
+ case kStartFailed:
+ GetMediaTaskRunner()->PostTask(FROM_HERE, internal_error_cb_);
+ break;
+ case kStartOk:
+ break;
+ }
+}
+
+MediaCodecPlayer::StartStatus MediaCodecPlayer::StartPlaybackDecoders() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- // Configure all streams before the start since
- // we may discover that browser seek is required.
+ // Configure all streams before the start since we may discover that browser
+ // seek is required.
+ MediaCodecPlayer::StartStatus status = ConfigureDecoders();
+ if (status != kStartOk)
+ return status;
+
+ bool preroll_required = false;
+ status = MaybePrerollDecoders(&preroll_required);
+ if (preroll_required)
+ return status;
+
+ return StartDecoders();
+}
+
+MediaCodecPlayer::StartStatus MediaCodecPlayer::ConfigureDecoders() {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
- bool do_audio = !AudioFinished();
- bool do_video = !VideoFinished();
+ const bool do_audio = !AudioFinished();
+ const bool do_video = !VideoFinished();
- // If there is nothing to play, the state machine should determine
- // this at the prefetch state and never call this method.
+ // If there is nothing to play, the state machine should determine this at the
+ // prefetch state and never call this method.
DCHECK(do_audio || do_video);
- if (do_audio) {
- MediaCodecDecoder::ConfigStatus status = audio_decoder_->Configure();
- if (status != MediaCodecDecoder::CONFIG_OK) {
- GetMediaTaskRunner()->PostTask(FROM_HERE, error_cb_);
- return;
+ const bool need_audio_crypto =
+ do_audio && audio_decoder_->IsContentEncrypted();
+ const bool need_video_crypto =
+ do_video && video_decoder_->IsContentEncrypted();
+
+ // Do we need to create a local ref from the global ref?
+ jobject media_crypto = media_crypto_ ? media_crypto_->obj() : nullptr;
+
+ if (need_audio_crypto || need_video_crypto) {
+ DVLOG(1) << (need_audio_crypto ? " audio" : "")
+ << (need_video_crypto ? " video" : "") << " need(s) encryption";
+ if (!media_crypto) {
+ DVLOG(1) << __FUNCTION__ << ": MediaCrypto is not found, returning";
+ return kStartCryptoRequired;
}
}
- if (do_video) {
- MediaCodecDecoder::ConfigStatus status = video_decoder_->Configure();
- if (status != MediaCodecDecoder::CONFIG_OK) {
- GetMediaTaskRunner()->PostTask(FROM_HERE, error_cb_);
- return;
- }
+ // Start with video: if browser seek is required it would not make sense to
+ // configure audio.
+
+ MediaCodecDecoder::ConfigStatus status = MediaCodecDecoder::kConfigOk;
+ if (do_video)
+ status = video_decoder_->Configure(media_crypto);
+
+ if (status == MediaCodecDecoder::kConfigOk && do_audio)
+ status = audio_decoder_->Configure(media_crypto);
+
+ switch (status) {
+ case MediaCodecDecoder::kConfigOk:
+ break;
+ case MediaCodecDecoder::kConfigKeyFrameRequired:
+ return kStartBrowserSeekRequired;
+ case MediaCodecDecoder::kConfigFailure:
+ return kStartFailed;
+ }
+ return kStartOk;
+}
+
+MediaCodecPlayer::StartStatus MediaCodecPlayer::MaybePrerollDecoders(
+ bool* preroll_required) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ DVLOG(1) << __FUNCTION__ << " current_time:" << GetInterpolatedTime();
+
+ // If requested, preroll is always done in the beginning of the playback,
+ // after prefetch. The request might not happen at all though, in which case
+ // we won't have prerolling phase. We need the prerolling when we (re)create
+ // the decoder, because its configuration and initialization (getting input,
+ // but not making output) can take time, and after the seek because there
+ // could be some data to be skipped and there is again initialization after
+ // the flush.
+
+ *preroll_required = false;
+
+ int count = 0;
+ const bool do_audio = audio_decoder_->NotCompletedAndNeedsPreroll();
+ if (do_audio)
+ ++count;
+
+ const bool do_video = video_decoder_->NotCompletedAndNeedsPreroll();
+ if (do_video)
+ ++count;
+
+ if (count == 0) {
+ DVLOG(1) << __FUNCTION__ << ": preroll is not required, skipping";
+ return kStartOk;
}
- // At this point decoder threads should not be running.
+ *preroll_required = true;
+
+ DCHECK(count > 0);
+ DCHECK(do_audio || do_video);
+
+ DVLOG(1) << __FUNCTION__ << ": preroll for " << count << " stream(s)";
+
+ base::Closure preroll_cb = base::BarrierClosure(
+ count, base::Bind(&MediaCodecPlayer::OnPrerollDone, media_weak_this_));
+
+ if (do_audio && !audio_decoder_->Preroll(preroll_cb))
+ return kStartFailed;
+
+ if (do_video && !video_decoder_->Preroll(preroll_cb))
+ return kStartFailed;
+
+ return kStartOk;
+}
+
+MediaCodecPlayer::StartStatus MediaCodecPlayer::StartDecoders() {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
if (!interpolator_.interpolating())
interpolator_.StartInterpolating();
base::TimeDelta current_time = GetInterpolatedTime();
- if (do_audio) {
- if (!audio_decoder_->Start(current_time)) {
- GetMediaTaskRunner()->PostTask(FROM_HERE, error_cb_);
- return;
- }
+ DVLOG(1) << __FUNCTION__ << " current_time:" << current_time;
+
+ // At this point decoder threads are either not running at all or their
+ // message pumps are in the idle state after the preroll is done.
+ media_stat_->Start(current_time);
+
+ if (!AudioFinished()) {
+ if (!audio_decoder_->Start(current_time))
+ return kStartFailed;
// Attach listener on UI thread
ui_task_runner_->PostTask(FROM_HERE, attach_listener_cb_);
}
- if (do_video) {
- if (!video_decoder_->Start(current_time)) {
- GetMediaTaskRunner()->PostTask(FROM_HERE, error_cb_);
- return;
- }
+ if (!VideoFinished()) {
+ if (!video_decoder_->Start(current_time))
+ return kStartFailed;
}
+
+ return kStartOk;
}
void MediaCodecPlayer::StopDecoders() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- audio_decoder_->SyncStop();
video_decoder_->SyncStop();
+ audio_decoder_->SyncStop();
+
+ media_stat_->StopAndReport(GetInterpolatedTime());
}
void MediaCodecPlayer::RequestToStopDecoders() {
@@ -641,7 +1296,8 @@ void MediaCodecPlayer::RequestToStopDecoders() {
if (!do_audio && !do_video) {
GetMediaTaskRunner()->PostTask(
- FROM_HERE, base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_));
+ FROM_HERE, base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_,
+ DemuxerStream::UNKNOWN));
return;
}
@@ -651,6 +1307,23 @@ void MediaCodecPlayer::RequestToStopDecoders() {
video_decoder_->RequestToStop();
}
+void MediaCodecPlayer::RequestDemuxerSeek(base::TimeDelta seek_time,
+ bool is_browser_seek) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__ << " " << seek_time
+ << (is_browser_seek ? " BROWSER_SEEK" : "");
+
+ // Flush decoders before requesting demuxer.
+ audio_decoder_->Flush();
+ video_decoder_->Flush();
+
+ // Save active seek data. Logically it is attached to kStateWaitingForSeek.
+ DCHECK_EQ(kStateWaitingForSeek, state_);
+ seek_info_.reset(new SeekInfo(seek_time, is_browser_seek));
+
+ demuxer_->RequestDemuxerSeek(seek_time, is_browser_seek);
+}
+
void MediaCodecPlayer::ReleaseDecoderResources() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
@@ -664,39 +1337,58 @@ void MediaCodecPlayer::ReleaseDecoderResources() {
// At this point decoder threads should not be running
if (interpolator_.interpolating())
interpolator_.StopInterpolating();
+
+ media_stat_->StopAndReport(GetInterpolatedTime());
}
void MediaCodecPlayer::CreateDecoders() {
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
DVLOG(1) << __FUNCTION__;
- error_cb_ = base::Bind(&MediaCodecPlayer::OnError, media_weak_this_);
+ internal_error_cb_ = base::Bind(&MediaCodecPlayer::OnError, media_weak_this_);
+
+ media_stat_.reset(new MediaStatistics());
audio_decoder_.reset(new MediaCodecAudioDecoder(
- GetMediaTaskRunner(), base::Bind(&MediaCodecPlayer::RequestDemuxerData,
- media_weak_this_, DemuxerStream::AUDIO),
+ GetMediaTaskRunner(), &media_stat_->audio_frame_stats(),
+ base::Bind(&MediaCodecPlayer::RequestDemuxerData, media_weak_this_,
+ DemuxerStream::AUDIO),
base::Bind(&MediaCodecPlayer::OnStarvation, media_weak_this_,
DemuxerStream::AUDIO),
- base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_), error_cb_,
+ base::Bind(&MediaCodecPlayer::OnDecoderDrained, media_weak_this_,
+ DemuxerStream::AUDIO),
+ base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_,
+ DemuxerStream::AUDIO),
+ base::Bind(&MediaCodecPlayer::OnMissingKeyReported, media_weak_this_,
+ DemuxerStream::AUDIO),
+ internal_error_cb_,
base::Bind(&MediaCodecPlayer::OnTimeIntervalUpdate, media_weak_this_,
DemuxerStream::AUDIO)));
video_decoder_.reset(new MediaCodecVideoDecoder(
- GetMediaTaskRunner(), base::Bind(&MediaCodecPlayer::RequestDemuxerData,
- media_weak_this_, DemuxerStream::VIDEO),
+ GetMediaTaskRunner(), &media_stat_->video_frame_stats(),
+ base::Bind(&MediaCodecPlayer::RequestDemuxerData, media_weak_this_,
+ DemuxerStream::VIDEO),
base::Bind(&MediaCodecPlayer::OnStarvation, media_weak_this_,
DemuxerStream::VIDEO),
- base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_), error_cb_,
- MediaCodecDecoder::SetTimeCallback(), // null callback
- base::Bind(&MediaCodecPlayer::OnVideoResolutionChanged, media_weak_this_),
- base::Bind(&MediaCodecPlayer::OnVideoCodecCreated, media_weak_this_)));
+ base::Bind(&MediaCodecPlayer::OnDecoderDrained, media_weak_this_,
+ DemuxerStream::VIDEO),
+ base::Bind(&MediaCodecPlayer::OnStopDone, media_weak_this_,
+ DemuxerStream::VIDEO),
+ base::Bind(&MediaCodecPlayer::OnMissingKeyReported, media_weak_this_,
+ DemuxerStream::VIDEO),
+ internal_error_cb_,
+ base::Bind(&MediaCodecPlayer::OnTimeIntervalUpdate, media_weak_this_,
+ DemuxerStream::VIDEO),
+ base::Bind(&MediaCodecPlayer::OnVideoResolutionChanged,
+ media_weak_this_)));
}
-bool MediaCodecPlayer::AudioFinished() {
+bool MediaCodecPlayer::AudioFinished() const {
return audio_decoder_->IsCompleted() || !audio_decoder_->HasStream();
}
-bool MediaCodecPlayer::VideoFinished() {
+bool MediaCodecPlayer::VideoFinished() const {
return video_decoder_->IsCompleted() || !video_decoder_->HasStream();
}
@@ -714,13 +1406,17 @@ base::TimeDelta MediaCodecPlayer::GetInterpolatedTime() {
const char* MediaCodecPlayer::AsString(PlayerState state) {
switch (state) {
- RETURN_STRING(STATE_PAUSED);
- RETURN_STRING(STATE_WAITING_FOR_CONFIG);
- RETURN_STRING(STATE_PREFETCHING);
- RETURN_STRING(STATE_PLAYING);
- RETURN_STRING(STATE_STOPPING);
- RETURN_STRING(STATE_WAITING_FOR_SURFACE);
- RETURN_STRING(STATE_ERROR);
+ RETURN_STRING(kStatePaused);
+ RETURN_STRING(kStateWaitingForConfig);
+ RETURN_STRING(kStateWaitingForPermission);
+ RETURN_STRING(kStatePrefetching);
+ RETURN_STRING(kStatePlaying);
+ RETURN_STRING(kStateStopping);
+ RETURN_STRING(kStateWaitingForSurface);
+ RETURN_STRING(kStateWaitingForKey);
+ RETURN_STRING(kStateWaitingForMediaCrypto);
+ RETURN_STRING(kStateWaitingForSeek);
+ RETURN_STRING(kStateError);
}
return nullptr; // crash early
}
diff --git a/chromium/media/base/android/media_codec_player.h b/chromium/media/base/android/media_codec_player.h
index f9f95703bd9..376c2e4f944 100644
--- a/chromium/media/base/android/media_codec_player.h
+++ b/chromium/media/base/android/media_codec_player.h
@@ -11,7 +11,9 @@
#include "base/threading/thread.h"
#include "base/time/default_tick_clock.h"
#include "media/base/android/demuxer_android.h"
+#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_android.h"
+#include "media/base/android/media_statistics.h"
#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
#include "media/base/time_delta_interpolator.h"
@@ -31,18 +33,22 @@
// | <------------------[ WaitingForConfig ] [ Error ]
// |
// |
-// |
-// v
-// [ Prefetching ] -------------------
-// | |
-// | v
-// | <-----------------[ WaitingForSurface ]
-// v
-// [ Playing ]
-// |
-// |
-// v
-// [ Stopping ]
+// | <----------------------------------------------
+// | |
+// [ Waiting for permission ] |
+// | |
+// | |
+// v |
+// [ Prefetching ] ------------------- |
+// | | |
+// | v |
+// | <-----------------[ WaitingForSurface ] |
+// v |
+// [ Playing ] |
+// | |
+// | |
+// v |
+// [ Stopping ] --------------------------------> [ WaitingForSeek ]
// Events and actions for pause/resume workflow.
@@ -54,17 +60,27 @@
// | ^ | /
// | | | /
// | Pause: | | Start w/config: /
-// | | | dec.Prefetch /
+// | Permission denied: | | requestPermission /
// | | | /
// | | | /
// | | | /
// | | | / DemuxerConfigs:
-// | | | / dec.Prefetch
+// | | | / requestPermission
// | | | /
// | | | /
// | | v /
// | /
-// | ------------------> [ Prefetching ] <--------/ [ Waiting ]
+// | ------------------> [ Waiting for ] <--------/
+// | | [ permission ]
+// | | |
+// | | |
+// | | | Permission granted:
+// | | | dec.Prefetch
+// | | |
+// | | |
+// | | v
+// | |
+// | | [ Prefetching ] [ Waiting ]
// | | [ ] --------------> [ for surface ]
// | | | PrefetchDone, /
// | | | no surface: /
@@ -89,15 +105,62 @@
// | |
// ------------------------- [ Stopping ]
+
+// Events and actions for seek workflow.
+// -------------------------------------
+//
+// Seek: -- --
+// demuxer.RequestSeek | |
+// [ Paused ] -----------------------> | |
+// [ ] <----------------------- | |--
+// SeekDone: | | |
+// | | |
+// | | |
+// | | |
+// | | | Start:
+// | | | SetPendingStart
+// Seek: dec.Stop | | |
+// SetPendingStart | | |
+// demuxer.RequestSeek | | |
+// [ Waiting for ] -----------------------> | | |
+// [ permission ] <---------------------- | | | Pause:
+// SeekDone | | | RemovePendingStart
+// | w/pending start: | | |
+// | requestPermission | Waiting | |
+// | | for | | Seek:
+// | | seek | | SetPendingSeek
+// | | | |
+// | Seek: dec.Stop | | |
+// v SetPendingStart | | |
+// demuxer.RequestSeek | | |
+// [ Prefetching ] ----------------------> | | |
+// | | |
+// | | | |
+// | PrefetchDone: dec.Start | | |
+// | | | | SeekDone
+// v | | | w/pending seek:
+// | | | demuxer.RequestSeek
+// [ Playing ] | | |
+// | | |
+// | | |<-
+// | Seek: SetPendingStart | |
+// | SetPendingSeek | |
+// | dec.RequestToStop | |
+// | | |
+// | | |
+// v | |
+// | |
+// [ Stopping ] -----------------------> | |
+// StopDone -- --
+// w/pending seek:
+// demuxer.RequestSeek
+
namespace media {
class BrowserCdm;
class MediaCodecAudioDecoder;
class MediaCodecVideoDecoder;
-// Returns the task runner for the media thread
-MEDIA_EXPORT scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner();
-
class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
public DemuxerAndroidClient {
public:
@@ -108,13 +171,27 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
typedef base::Callback<void(base::TimeDelta, base::TimeTicks)>
TimeUpdateCallback;
+ typedef base::Callback<void(const base::TimeDelta& current_timestamp)>
+ SeekDoneCallback;
+
+ typedef base::Callback<void(int)> ErrorCallback;
+
+ // For testing only.
+ typedef base::Callback<void(DemuxerStream::Type,
+ base::TimeDelta,
+ base::TimeDelta)> DecodersTimeCallback;
+
+ // For testing only.
+ typedef base::Callback<void(DemuxerStream::Type)> CodecCreatedCallback;
+
// Constructs a player with the given ID and demuxer. |manager| must outlive
// the lifetime of this object.
- MediaCodecPlayer(int player_id,
- base::WeakPtr<MediaPlayerManager> manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
- scoped_ptr<DemuxerAndroid> demuxer,
- const GURL& frame_url);
+ MediaCodecPlayer(
+ int player_id,
+ base::WeakPtr<MediaPlayerManager> manager,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url);
~MediaCodecPlayer() override;
// A helper method that performs the media thread part of initialization.
@@ -145,16 +222,33 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
void OnDemuxerSeekDone(base::TimeDelta actual_browser_seek_time) override;
void OnDemuxerDurationChanged(base::TimeDelta duration) override;
+ // For testing only.
+ void SetDecodersTimeCallbackForTests(DecodersTimeCallback cb);
+ void SetCodecCreatedCallbackForTests(CodecCreatedCallback cb);
+ void SetAlwaysReconfigureForTests(DemuxerStream::Type type);
+ bool IsPrerollingForTests(DemuxerStream::Type type) const;
+
private:
// The state machine states.
enum PlayerState {
- STATE_PAUSED,
- STATE_WAITING_FOR_CONFIG,
- STATE_PREFETCHING,
- STATE_PLAYING,
- STATE_STOPPING,
- STATE_WAITING_FOR_SURFACE,
- STATE_ERROR,
+ kStatePaused,
+ kStateWaitingForConfig,
+ kStateWaitingForPermission,
+ kStatePrefetching,
+ kStatePlaying,
+ kStateStopping,
+ kStateWaitingForSurface,
+ kStateWaitingForKey,
+ kStateWaitingForMediaCrypto,
+ kStateWaitingForSeek,
+ kStateError,
+ };
+
+ enum StartStatus {
+ kStartOk = 0,
+ kStartBrowserSeekRequired,
+ kStartCryptoRequired,
+ kStartFailed,
};
// Cached values for the manager.
@@ -163,7 +257,21 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
gfx::Size video_size;
};
+ // Information about current seek in progress.
+ struct SeekInfo {
+ const base::TimeDelta seek_time;
+ const bool is_browser_seek;
+ SeekInfo(base::TimeDelta time, bool browser_seek)
+ : seek_time(time), is_browser_seek(browser_seek) {}
+ };
+
// MediaPlayerAndroid implementation.
+
+ // This method requests playback permission from the manager on UI thread,
+ // passing total duration as an argiment. The duration must be known by the
+ // time of the call. The method posts the result to the media thread.
+ void RequestPermissionAndPostResult(base::TimeDelta duration) override;
+
// This method caches the data and calls manager's OnMediaMetadataChanged().
void OnMediaMetadataChanged(base::TimeDelta duration,
const gfx::Size& video_size) override;
@@ -172,39 +280,58 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
void OnTimeUpdate(base::TimeDelta current_timestamp,
base::TimeTicks current_time_ticks) override;
+ // Callback from manager
+ void OnPermissionDecided(bool granted);
+
// Callbacks from decoders
void RequestDemuxerData(DemuxerStream::Type stream_type);
void OnPrefetchDone();
- void OnStopDone();
+ void OnPrerollDone();
+ void OnDecoderDrained(DemuxerStream::Type type);
+ void OnStopDone(DemuxerStream::Type type);
+ void OnMissingKeyReported(DemuxerStream::Type type);
void OnError();
void OnStarvation(DemuxerStream::Type stream_type);
void OnTimeIntervalUpdate(DemuxerStream::Type stream_type,
base::TimeDelta now_playing,
- base::TimeDelta last_buffered);
+ base::TimeDelta last_buffered,
+ bool postpone);
// Callbacks from video decoder
- void OnVideoCodecCreated();
void OnVideoResolutionChanged(const gfx::Size& size);
+ // Callbacks from CDM
+ void OnMediaCryptoReady(MediaDrmBridge::JavaObjectPtr media_crypto,
+ bool needs_protected_surface);
+ void OnKeyAdded();
+ void OnCdmUnset();
+
// Operations called from the state machine.
void SetState(PlayerState new_state);
- void SetPendingSurface(gfx::ScopedJavaSurface surface);
- bool HasPendingSurface();
void SetPendingStart(bool need_to_start);
- bool HasPendingStart();
- bool HasVideo();
- bool HasAudio();
+ bool HasPendingStart() const;
+ void SetPendingSeek(base::TimeDelta timestamp);
+ base::TimeDelta GetPendingSeek() const;
+ bool HasVideo() const;
+ bool HasAudio() const;
void SetDemuxerConfigs(const DemuxerConfigs& configs);
+ void RequestPlayPermission();
void StartPrefetchDecoders();
- void StartPlaybackDecoders();
+ void StartPlaybackOrBrowserSeek();
+ StartStatus StartPlaybackDecoders();
+ StartStatus ConfigureDecoders();
+ StartStatus MaybePrerollDecoders(bool* preroll_required);
+ StartStatus StartDecoders();
void StopDecoders();
void RequestToStopDecoders();
+ void RequestDemuxerSeek(base::TimeDelta seek_time,
+ bool is_browser_seek = false);
void ReleaseDecoderResources();
// Helper methods.
void CreateDecoders();
- bool AudioFinished();
- bool VideoFinished();
+ bool AudioFinished() const;
+ bool VideoFinished() const;
base::TimeDelta GetInterpolatedTime();
static const char* AsString(PlayerState state);
@@ -223,9 +350,11 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
PlayerState state_;
// Notification callbacks, they call MediaPlayerManager.
- base::Closure request_resources_cb_;
TimeUpdateCallback time_update_cb_;
base::Closure completion_cb_;
+ base::Closure waiting_for_decryption_key_cb_;
+ SeekDoneCallback seek_done_cb_;
+ ErrorCallback error_cb_;
// A callback that updates metadata cache and calls the manager.
MetadataChangedCallback metadata_changed_cb_;
@@ -237,7 +366,7 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
// Error callback is posted by decoders or by this class itself if we cannot
// configure or start decoder.
- base::Closure error_cb_;
+ base::Closure internal_error_cb_;
// Total duration reported by demuxer.
base::TimeDelta duration_;
@@ -252,6 +381,10 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
// Pending data to be picked up by the upcoming state.
gfx::ScopedJavaSurface pending_surface_;
bool pending_start_;
+ base::TimeDelta pending_seek_;
+
+ // Data associated with a seek in progress.
+ scoped_ptr<SeekInfo> seek_info_;
// Configuration data for the manager, accessed on the UI thread.
MediaMetadata metadata_cache_;
@@ -259,6 +392,27 @@ class MEDIA_EXPORT MediaCodecPlayer : public MediaPlayerAndroid,
// Cached current time, accessed on UI thread.
base::TimeDelta current_time_cache_;
+ // For testing only.
+ DecodersTimeCallback decoders_time_cb_;
+
+ // DRM
+ MediaDrmBridge::JavaObjectPtr media_crypto_;
+
+ MediaDrmBridge* drm_bridge_;
+ int cdm_registration_id_;
+
+ // The flag is set when the player receives the error from decoder that the
+ // decoder needs a new decryption key. Cleared on starting the playback.
+ bool key_is_required_;
+
+ // The flag is set after the new encryption key is added to MediaDrm. Cleared
+ // on starting the playback.
+ bool key_is_added_;
+
+ // Gathers and reports playback quality statistics to UMA.
+ // Use pointer to enable replacement of this object for tests.
+ scoped_ptr<MediaStatistics> media_stat_;
+
base::WeakPtr<MediaCodecPlayer> media_weak_this_;
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaCodecPlayer> media_weak_factory_;
diff --git a/chromium/media/base/android/media_codec_player_unittest.cc b/chromium/media/base/android/media_codec_player_unittest.cc
index 2eb77a5e3ce..46b95fcb31b 100644
--- a/chromium/media/base/android/media_codec_player_unittest.cc
+++ b/chromium/media/base/android/media_codec_player_unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/timer/timer.h"
@@ -9,8 +11,12 @@
#include "media/base/android/media_codec_bridge.h"
#include "media/base/android/media_codec_player.h"
#include "media/base/android/media_player_manager.h"
+#include "media/base/android/media_task_runner.h"
#include "media/base/android/test_data_factory.h"
+#include "media/base/android/test_statistics.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "ui/gl/android/surface_texture.h"
namespace media {
@@ -36,21 +42,74 @@ namespace media {
namespace {
const base::TimeDelta kDefaultTimeout = base::TimeDelta::FromMilliseconds(200);
-const base::TimeDelta kAudioFramePeriod = base::TimeDelta::FromMilliseconds(20);
+const base::TimeDelta kAudioFramePeriod =
+ base::TimeDelta::FromSecondsD(1024.0 / 44100); // 1024 samples @ 44100 Hz
+const base::TimeDelta kVideoFramePeriod = base::TimeDelta::FromMilliseconds(20);
+
+enum Flags {
+ kAlwaysReconfigAudio = 0x1,
+ kAlwaysReconfigVideo = 0x2,
+};
+
+// The predicate that always returns false, used for WaitForDelay implementation
+bool AlwaysFalse() {
+ return false;
+}
+
+// The method used to compare two time values of type T in expectations.
+// Type T requires that a difference of type base::TimeDelta is defined.
+template <typename T>
+bool AlmostEqual(T a, T b, double tolerance_ms) {
+ return (a - b).magnitude().InMilliseconds() <= tolerance_ms;
+}
+
+// A helper function to calculate the expected number of frames.
+int GetFrameCount(base::TimeDelta duration,
+ base::TimeDelta frame_period,
+ int num_reconfigs) {
+ // A chunk has 4 access units. The last unit timestamp must exceed the
+ // duration. Last chunk has 3 regular access units and one stand-alone EOS
+ // unit that we do not count.
+
+ // Number of time intervals to exceed duration.
+ int num_intervals = duration / frame_period + 1.0;
+
+ // To cover these intervals we need one extra unit at the beginning and a one
+ // for each reconfiguration.
+ int num_units = num_intervals + 1 + num_reconfigs;
+
+ // Number of 4-unit chunks that hold these units:
+ int num_chunks = (num_units + 3) / 4;
+
+ // Altogether these chunks hold 4*num_chunks units, but we do not count
+ // reconfiguration units and last EOS as frames.
+ return 4 * num_chunks - 1 - num_reconfigs;
+}
// Mock of MediaPlayerManager for testing purpose.
class MockMediaPlayerManager : public MediaPlayerManager {
public:
MockMediaPlayerManager()
- : playback_completed_(false), weak_ptr_factory_(this) {}
+ : playback_allowed_(true),
+ playback_completed_(false),
+ num_seeks_completed_(0),
+ num_audio_codecs_created_(0),
+ num_video_codecs_created_(0),
+ weak_ptr_factory_(this) {}
~MockMediaPlayerManager() override {}
MediaResourceGetter* GetMediaResourceGetter() override { return nullptr; }
MediaUrlInterceptor* GetMediaUrlInterceptor() override { return nullptr; }
+
+ // Regular time update callback, reports current playback time to
+ // MediaPlayerManager.
void OnTimeUpdate(int player_id,
base::TimeDelta current_timestamp,
- base::TimeTicks current_time_ticks) override {}
+ base::TimeTicks current_time_ticks) override {
+ pts_stat_.AddValue(current_timestamp);
+ }
+
void OnMediaMetadataChanged(int player_id,
base::TimeDelta duration,
int width,
@@ -65,27 +124,70 @@ class MockMediaPlayerManager : public MediaPlayerManager {
void OnPlaybackComplete(int player_id) override {
playback_completed_ = true;
}
+
void OnMediaInterrupted(int player_id) override {}
void OnBufferingUpdate(int player_id, int percentage) override {}
void OnSeekComplete(int player_id,
- const base::TimeDelta& current_time) override {}
+ const base::TimeDelta& current_time) override {
+ ++num_seeks_completed_;
+ }
void OnError(int player_id, int error) override {}
void OnVideoSizeChanged(int player_id, int width, int height) override {}
- void OnAudibleStateChanged(int player_id, bool is_audible_now) override {}
void OnWaitingForDecryptionKey(int player_id) override {}
MediaPlayerAndroid* GetFullscreenPlayer() override { return nullptr; }
MediaPlayerAndroid* GetPlayer(int player_id) override { return nullptr; }
- bool RequestPlay(int player_id) override { return true; }
+ bool RequestPlay(int player_id, base::TimeDelta duration) override {
+ return playback_allowed_;
+ }
void OnMediaResourcesRequested(int player_id) {}
+ // Time update callback that reports the internal progress of the stream.
+ // Implementation dependent, used for testing only.
+ void OnDecodersTimeUpdate(DemuxerStream::Type stream_type,
+ base::TimeDelta now_playing,
+ base::TimeDelta last_buffered) {
+ render_stat_[stream_type].AddValue(
+ PTSTime(now_playing, base::TimeTicks::Now()));
+ }
+
+ // Notification called on MediaCodec creation.
+ // Implementation dependent, used for testing only.
+ void OnMediaCodecCreated(DemuxerStream::Type stream_type) {
+ if (stream_type == DemuxerStream::AUDIO)
+ ++num_audio_codecs_created_;
+ else if (stream_type == DemuxerStream::VIDEO)
+ ++num_video_codecs_created_;
+ }
+
+ // First frame information
+ base::TimeDelta FirstFramePTS(DemuxerStream::Type stream_type) const {
+ return render_stat_[stream_type].min().pts;
+ }
+ base::TimeTicks FirstFrameTime(DemuxerStream::Type stream_type) const {
+ return render_stat_[stream_type].min().time;
+ }
+
base::WeakPtr<MockMediaPlayerManager> GetWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
}
+ void SetPlaybackAllowed(bool value) { playback_allowed_ = value; }
+
// Conditions to wait for.
bool IsMetadataChanged() const { return media_metadata_.modified; }
bool IsPlaybackCompleted() const { return playback_completed_; }
+ bool IsPlaybackStarted() const { return pts_stat_.num_values() > 0; }
+ bool IsPlaybackBeyondPosition(const base::TimeDelta& pts) const {
+ return pts_stat_.max() > pts;
+ }
+ bool IsSeekCompleted() const { return num_seeks_completed_ > 0; }
+ bool HasFirstFrame(DemuxerStream::Type stream_type) const {
+ return render_stat_[stream_type].num_values() != 0;
+ }
+
+ int num_audio_codecs_created() const { return num_audio_codecs_created_; }
+ int num_video_codecs_created() const { return num_video_codecs_created_; }
struct MediaMetadata {
base::TimeDelta duration;
@@ -96,8 +198,25 @@ class MockMediaPlayerManager : public MediaPlayerManager {
};
MediaMetadata media_metadata_;
+ struct PTSTime {
+ base::TimeDelta pts;
+ base::TimeTicks time;
+
+ PTSTime() : pts(), time() {}
+ PTSTime(base::TimeDelta p, base::TimeTicks t) : pts(p), time(t) {}
+ bool is_null() const { return time.is_null(); }
+ bool operator<(const PTSTime& rhs) const { return time < rhs.time; }
+ };
+ Minimax<PTSTime> render_stat_[DemuxerStream::NUM_TYPES];
+
+ Minimax<base::TimeDelta> pts_stat_;
+
private:
+ bool playback_allowed_;
bool playback_completed_;
+ int num_seeks_completed_;
+ int num_audio_codecs_created_;
+ int num_video_codecs_created_;
base::WeakPtrFactory<MockMediaPlayerManager> weak_ptr_factory_;
@@ -109,7 +228,7 @@ class MockMediaPlayerManager : public MediaPlayerManager {
DemuxerConfigs CreateAudioVideoConfigs(const base::TimeDelta& duration,
const gfx::Size& video_size) {
DemuxerConfigs configs =
- TestDataFactory::CreateAudioConfigs(kCodecVorbis, duration);
+ TestDataFactory::CreateAudioConfigs(kCodecAAC, duration);
configs.video_codec = kCodecVP8;
configs.video_size = video_size;
configs.is_video_encrypted = false;
@@ -124,6 +243,7 @@ DemuxerConfigs CreateAudioVideoConfigs(const TestDataFactory* audio,
result.video_codec = vconf.video_codec;
result.video_size = vconf.video_size;
result.is_video_encrypted = vconf.is_video_encrypted;
+ result.duration = std::max(result.duration, vconf.duration);
return result;
}
@@ -131,45 +251,132 @@ DemuxerConfigs CreateAudioVideoConfigs(const TestDataFactory* audio,
class AudioFactory : public TestDataFactory {
public:
- AudioFactory(const base::TimeDelta& duration)
- : TestDataFactory("vorbis-packet-%d", duration, kAudioFramePeriod) {}
+ AudioFactory(base::TimeDelta duration)
+ : TestDataFactory("aac-44100-packet-%d", duration, kAudioFramePeriod) {}
DemuxerConfigs GetConfigs() const override {
- return TestDataFactory::CreateAudioConfigs(kCodecVorbis, duration_);
+ return TestDataFactory::CreateAudioConfigs(kCodecAAC, duration_);
}
protected:
- void ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) override {
- // Vorbis needs 4 extra bytes padding on Android to decode properly.
- // Check NuMediaExtractor.cpp in Android source code.
- uint8 padding[4] = {0xff, 0xff, 0xff, 0xff};
- unit->data.insert(unit->data.end(), padding, padding + 4);
+ void ModifyChunk(DemuxerData* chunk) override {
+ DCHECK(chunk);
+ for (AccessUnit& unit : chunk->access_units) {
+ if (!unit.data.empty())
+ unit.is_key_frame = true;
+ }
}
};
+// VideoFactory creates a video stream from demuxer.
+
+class VideoFactory : public TestDataFactory {
+ public:
+ VideoFactory(base::TimeDelta duration)
+ : TestDataFactory("h264-320x180-frame-%d", duration, kVideoFramePeriod),
+ key_frame_requested_(true) {}
+
+ DemuxerConfigs GetConfigs() const override {
+ return TestDataFactory::CreateVideoConfigs(kCodecH264, duration_,
+ gfx::Size(320, 180));
+ }
+
+ void RequestKeyFrame() { key_frame_requested_ = true; }
+
+ protected:
+ void ModifyChunk(DemuxerData* chunk) override {
+ // The frames are taken from High profile and some are B-frames.
+ // The first 4 frames appear in the file in the following order:
+ //
+ // Frames: I P B P
+ // Decoding order: 0 1 2 3
+ // Presentation order: 0 2 1 4(3)
+ //
+ // I keep the last PTS to be 3 for simplicity.
+
+ // If the chunk contains EOS, it should not break the presentation order.
+ // For instance, the following chunk is ok:
+ //
+ // Frames: I P B EOS
+ // Decoding order: 0 1 2 -
+ // Presentation order: 0 2 1 -
+ //
+ // while this might cause decoder to block:
+ //
+ // Frames: I P EOS
+ // Decoding order: 0 1 -
+ // Presentation order: 0 2 - <------- might wait for the B frame forever
+ //
+ // With current base class implementation that always has EOS at the 4th
+ // place we are covered (http://crbug.com/526755)
+
+ DCHECK(chunk);
+ DCHECK(chunk->access_units.size() == 4);
+
+ // Swap pts for second and third frames.
+ base::TimeDelta tmp = chunk->access_units[1].timestamp;
+ chunk->access_units[1].timestamp = chunk->access_units[2].timestamp;
+ chunk->access_units[2].timestamp = tmp;
+
+ // Make first frame a key frame.
+ if (key_frame_requested_) {
+ chunk->access_units[0].is_key_frame = true;
+ key_frame_requested_ = false;
+ }
+ }
+
+ private:
+ bool key_frame_requested_;
+};
+
// Mock of DemuxerAndroid for testing purpose.
class MockDemuxerAndroid : public DemuxerAndroid {
public:
- MockDemuxerAndroid() : client_(nullptr) {}
- ~MockDemuxerAndroid() override {}
+ MockDemuxerAndroid(base::MessageLoop* ui_message_loop);
+ ~MockDemuxerAndroid() override;
// DemuxerAndroid implementation
void Initialize(DemuxerAndroidClient* client) override;
void RequestDemuxerData(DemuxerStream::Type type) override;
- void RequestDemuxerSeek(const base::TimeDelta& time_to_seek,
- bool is_browser_seek) override {}
+ void RequestDemuxerSeek(const base::TimeDelta& seek_request,
+ bool is_browser_seek) override;
+
+ // Helper methods that enable using a weak pointer when posting to the player.
+ void OnDemuxerDataAvailable(const DemuxerData& chunk);
+ void OnDemuxerSeekDone(base::TimeDelta reported_seek_time);
+
+ // Sets the callback that is fired when demuxer is deleted (deletion
+ // happens on the Media thread).
+ void SetDemuxerDeletedCallback(base::Closure cb) { demuxer_deleted_cb_ = cb; }
// Sets the audio data factory.
- void SetAudioFactory(scoped_ptr<TestDataFactory> factory) {
+ void SetAudioFactory(scoped_ptr<AudioFactory> factory) {
audio_factory_ = factory.Pass();
}
// Sets the video data factory.
- void SetVideoFactory(scoped_ptr<TestDataFactory> factory) {
+ void SetVideoFactory(scoped_ptr<VideoFactory> factory) {
video_factory_ = factory.Pass();
}
+ // Accessors for data factories.
+ AudioFactory* audio_factory() const { return audio_factory_.get(); }
+ VideoFactory* video_factory() const { return video_factory_.get(); }
+
+ // Set the preroll interval after seek for audio stream.
+ void SetAudioPrerollInterval(base::TimeDelta value) {
+ audio_preroll_interval_ = value;
+ }
+
+ // Set the preroll interval after seek for video stream.
+ void SetVideoPrerollInterval(base::TimeDelta value) {
+ video_preroll_interval_ = value;
+ }
+
+ // Sets the delay in OnDemuxerSeekDone response.
+ void SetSeekDoneDelay(base::TimeDelta delay) { seek_done_delay_ = delay; }
+
// Post DemuxerConfigs to the client (i.e. the player) on correct thread.
void PostConfigs(const DemuxerConfigs& configs);
@@ -179,16 +386,47 @@ class MockDemuxerAndroid : public DemuxerAndroid {
// Conditions to wait for.
bool IsInitialized() const { return client_; }
bool HasPendingConfigs() const { return pending_configs_; }
+ bool ReceivedSeekRequest() const { return num_seeks_ > 0; }
+ bool ReceivedBrowserSeekRequest() const { return num_browser_seeks_ > 0; }
private:
+ base::MessageLoop* ui_message_loop_;
DemuxerAndroidClient* client_;
+
scoped_ptr<DemuxerConfigs> pending_configs_;
- scoped_ptr<TestDataFactory> audio_factory_;
- scoped_ptr<TestDataFactory> video_factory_;
+ scoped_ptr<AudioFactory> audio_factory_;
+ scoped_ptr<VideoFactory> video_factory_;
+
+ base::TimeDelta audio_preroll_interval_;
+ base::TimeDelta video_preroll_interval_;
+ base::TimeDelta seek_done_delay_;
+
+ int num_seeks_;
+ int num_browser_seeks_;
+
+ base::Closure demuxer_deleted_cb_;
+
+ // NOTE: WeakPtrFactory must be the last data member to be destroyed first.
+ base::WeakPtrFactory<MockDemuxerAndroid> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MockDemuxerAndroid);
};
+MockDemuxerAndroid::MockDemuxerAndroid(base::MessageLoop* ui_message_loop)
+ : ui_message_loop_(ui_message_loop),
+ client_(nullptr),
+ num_seeks_(0),
+ num_browser_seeks_(0),
+ weak_factory_(this) {}
+
+MockDemuxerAndroid::~MockDemuxerAndroid() {
+ DVLOG(1) << "MockDemuxerAndroid::" << __FUNCTION__;
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ if (!demuxer_deleted_cb_.is_null())
+ ui_message_loop_->PostTask(FROM_HERE, demuxer_deleted_cb_);
+}
+
void MockDemuxerAndroid::Initialize(DemuxerAndroidClient* client) {
DVLOG(1) << "MockDemuxerAndroid::" << __FUNCTION__;
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
@@ -199,32 +437,80 @@ void MockDemuxerAndroid::Initialize(DemuxerAndroidClient* client) {
}
void MockDemuxerAndroid::RequestDemuxerData(DemuxerStream::Type type) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+
DemuxerData chunk;
base::TimeDelta delay;
bool created = false;
if (type == DemuxerStream::AUDIO && audio_factory_)
created = audio_factory_->CreateChunk(&chunk, &delay);
- else if (type == DemuxerStream::VIDEO && audio_factory_)
+ else if (type == DemuxerStream::VIDEO && video_factory_)
created = video_factory_->CreateChunk(&chunk, &delay);
if (!created)
return;
+ // Request key frame after |kConfigChanged|
+ if (type == DemuxerStream::VIDEO && !chunk.demuxer_configs.empty())
+ video_factory_->RequestKeyFrame();
+
chunk.type = type;
- // Post to Media thread.
- DCHECK(client_);
+ // Post to the Media thread. Use the weak pointer to prevent the data arrival
+ // after the player has been deleted.
GetMediaTaskRunner()->PostDelayedTask(
- FROM_HERE, base::Bind(&DemuxerAndroidClient::OnDemuxerDataAvailable,
- base::Unretained(client_), chunk),
+ FROM_HERE, base::Bind(&MockDemuxerAndroid::OnDemuxerDataAvailable,
+ weak_factory_.GetWeakPtr(), chunk),
delay);
}
+void MockDemuxerAndroid::RequestDemuxerSeek(const base::TimeDelta& seek_request,
+ bool is_browser_seek) {
+ // Tell data factories to start next chunk with the new timestamp.
+ if (audio_factory_) {
+ base::TimeDelta time_to_seek =
+ std::max(base::TimeDelta(), seek_request - audio_preroll_interval_);
+ audio_factory_->SeekTo(time_to_seek);
+ }
+ if (video_factory_) {
+ base::TimeDelta time_to_seek =
+ std::max(base::TimeDelta(), seek_request - video_preroll_interval_);
+ video_factory_->SeekTo(time_to_seek);
+ video_factory_->RequestKeyFrame();
+ }
+
+ ++num_seeks_;
+ if (is_browser_seek)
+ ++num_browser_seeks_;
+
+ // Post OnDemuxerSeekDone() to the player.
+ DCHECK(client_);
+ base::TimeDelta reported_seek_time =
+ is_browser_seek ? seek_request : kNoTimestamp();
+ GetMediaTaskRunner()->PostDelayedTask(
+ FROM_HERE, base::Bind(&MockDemuxerAndroid::OnDemuxerSeekDone,
+ weak_factory_.GetWeakPtr(), reported_seek_time),
+ seek_done_delay_);
+}
+
+void MockDemuxerAndroid::OnDemuxerDataAvailable(const DemuxerData& chunk) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DCHECK(client_);
+ client_->OnDemuxerDataAvailable(chunk);
+}
+
+void MockDemuxerAndroid::OnDemuxerSeekDone(base::TimeDelta reported_seek_time) {
+ DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
+ DCHECK(client_);
+ client_->OnDemuxerSeekDone(reported_seek_time);
+}
+
void MockDemuxerAndroid::PostConfigs(const DemuxerConfigs& configs) {
- DVLOG(1) << "MockDemuxerAndroid::" << __FUNCTION__;
RUN_ON_MEDIA_THREAD(MockDemuxerAndroid, PostConfigs, configs);
+ DVLOG(1) << "MockDemuxerAndroid::" << __FUNCTION__;
+
DCHECK(GetMediaTaskRunner()->BelongsToCurrentThread());
if (client_)
@@ -253,21 +539,65 @@ void MockDemuxerAndroid::PostInternalConfigs() {
class MediaCodecPlayerTest : public testing::Test {
public:
MediaCodecPlayerTest();
- ~MediaCodecPlayerTest() override;
+
+ // Conditions to wait for.
+ bool IsPaused() const { return !(player_ && player_->IsPlaying()); }
protected:
typedef base::Callback<bool()> Predicate;
+ void TearDown() override;
+
void CreatePlayer();
+ void SetVideoSurface();
+ void SetVideoSurfaceB();
+ void RemoveVideoSurface();
// Waits for condition to become true or for timeout to expire.
// Returns true if the condition becomes true.
bool WaitForCondition(const Predicate& condition,
const base::TimeDelta& timeout = kDefaultTimeout);
+ // Waits for timeout to expire.
+ void WaitForDelay(const base::TimeDelta& timeout);
+
+ // Waits till playback position as determined by maximal reported pts
+ // reaches the given value or for timeout to expire. Returns true if the
+ // playback has passed the given position.
+ bool WaitForPlaybackBeyondPosition(
+ const base::TimeDelta& pts,
+ const base::TimeDelta& timeout = kDefaultTimeout);
+
+ // Helper method that starts video only stream. Waits till it actually
+ // started.
+ bool StartVideoPlayback(base::TimeDelta duration, const char* test_name);
+
+ // Helper method that starts audio and video streams.
+ bool StartAVPlayback(scoped_ptr<AudioFactory> audio_factory,
+ scoped_ptr<VideoFactory> video_factory,
+ uint32_t flags,
+ const char* test_name);
+
+ // Helper method that starts audio and video streams with preroll.
+ // The preroll is achieved by setting significant video preroll interval
+ // so video will have to catch up with audio. To make room for this interval
+ // the Start() command is preceded by SeekTo().
+ bool StartAVSeekAndPreroll(scoped_ptr<AudioFactory> audio_factory,
+ scoped_ptr<VideoFactory> video_factory,
+ base::TimeDelta seek_position,
+ uint32_t flags,
+ const char* test_name);
+
+ // Callback sent when demuxer is being deleted.
+ void OnDemuxerDeleted() { demuxer_ = nullptr; }
+
+ bool IsDemuxerDeleted() const { return !demuxer_; }
+
base::MessageLoop message_loop_;
MockMediaPlayerManager manager_;
MockDemuxerAndroid* demuxer_; // owned by player_
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_a_;
+ scoped_refptr<gfx::SurfaceTexture> surface_texture_b_;
MediaCodecPlayer* player_; // raw pointer due to DeleteOnCorrectThread()
private:
@@ -280,7 +610,34 @@ class MediaCodecPlayerTest : public testing::Test {
};
MediaCodecPlayerTest::MediaCodecPlayerTest()
- : demuxer_(new MockDemuxerAndroid()), player_(nullptr) {
+ : demuxer_(new MockDemuxerAndroid(&message_loop_)),
+ player_(nullptr),
+ is_timeout_expired_(false) {}
+
+void MediaCodecPlayerTest::TearDown() {
+ DVLOG(1) << __FUNCTION__;
+
+ // Wait till the player is destroyed on the Media thread.
+
+ if (player_) {
+ // The player deletes the demuxer on the Media thread. The demuxer's
+ // destructor sends a notification to the UI thread. When this notification
+ // arrives we can conclude that player started destroying its member
+ // variables. By that time the media codecs should have been released.
+
+ DCHECK(demuxer_);
+ demuxer_->SetDemuxerDeletedCallback(base::Bind(
+ &MediaCodecPlayerTest::OnDemuxerDeleted, base::Unretained(this)));
+
+ player_->DeleteOnCorrectThread();
+
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MediaCodecPlayerTest::IsDemuxerDeleted,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(500)));
+
+ player_ = nullptr;
+ }
}
void MediaCodecPlayerTest::CreatePlayer() {
@@ -295,9 +652,25 @@ void MediaCodecPlayerTest::CreatePlayer() {
DCHECK(player_);
}
-MediaCodecPlayerTest::~MediaCodecPlayerTest() {
- if (player_)
- player_->DeleteOnCorrectThread();
+void MediaCodecPlayerTest::SetVideoSurface() {
+ surface_texture_a_ = gfx::SurfaceTexture::Create(0);
+ gfx::ScopedJavaSurface surface(surface_texture_a_.get());
+
+ ASSERT_NE(nullptr, player_);
+ player_->SetVideoSurface(surface.Pass());
+}
+
+void MediaCodecPlayerTest::SetVideoSurfaceB() {
+ surface_texture_b_ = gfx::SurfaceTexture::Create(1);
+ gfx::ScopedJavaSurface surface(surface_texture_b_.get());
+
+ ASSERT_NE(nullptr, player_);
+ player_->SetVideoSurface(surface.Pass());
+}
+
+void MediaCodecPlayerTest::RemoveVideoSurface() {
+ player_->SetVideoSurface(gfx::ScopedJavaSurface());
+ surface_texture_a_ = NULL;
}
bool MediaCodecPlayerTest::WaitForCondition(const Predicate& condition,
@@ -324,6 +697,181 @@ bool MediaCodecPlayerTest::WaitForCondition(const Predicate& condition,
return false;
}
+void MediaCodecPlayerTest::WaitForDelay(const base::TimeDelta& timeout) {
+ WaitForCondition(base::Bind(&AlwaysFalse), timeout);
+}
+
+bool MediaCodecPlayerTest::WaitForPlaybackBeyondPosition(
+ const base::TimeDelta& pts,
+ const base::TimeDelta& timeout) {
+ return WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::IsPlaybackBeyondPosition,
+ base::Unretained(&manager_), pts),
+ timeout);
+}
+
+bool MediaCodecPlayerTest::StartVideoPlayback(base::TimeDelta duration,
+ const char* test_name) {
+ const base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+
+ demuxer_->SetVideoFactory(
+ scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ CreatePlayer();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << test_name << ": demuxer is not initialized";
+ return false;
+ }
+
+ SetVideoSurface();
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Start the player.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+ player_->Start();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+
+ if (!manager_.IsPlaybackStarted()) {
+ DVLOG(0) << test_name << ": playback did not start";
+ return false;
+ }
+
+ return true;
+}
+
+bool MediaCodecPlayerTest::StartAVPlayback(
+ scoped_ptr<AudioFactory> audio_factory,
+ scoped_ptr<VideoFactory> video_factory,
+ uint32_t flags,
+ const char* test_name) {
+ demuxer_->SetAudioFactory(audio_factory.Pass());
+ demuxer_->SetVideoFactory(video_factory.Pass());
+
+ CreatePlayer();
+ SetVideoSurface();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << test_name << ": demuxer is not initialized";
+ return false;
+ }
+
+ // Ask decoders to always reconfigure after the player has been initialized.
+ if (flags & kAlwaysReconfigAudio)
+ player_->SetAlwaysReconfigureForTests(DemuxerStream::AUDIO);
+ if (flags & kAlwaysReconfigVideo)
+ player_->SetAlwaysReconfigureForTests(DemuxerStream::VIDEO);
+
+ // Set a testing callback to receive PTS from decoders.
+ player_->SetDecodersTimeCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnDecodersTimeUpdate,
+ base::Unretained(&manager_)));
+
+ // Set a testing callback to receive MediaCodec creation events from decoders.
+ player_->SetCodecCreatedCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnMediaCodecCreated,
+ base::Unretained(&manager_)));
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Start and wait for playback.
+ player_->Start();
+
+ // Wait till we start to play.
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(2000);
+
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+
+ if (!manager_.IsPlaybackStarted()) {
+ DVLOG(0) << test_name << ": playback did not start";
+ return false;
+ }
+
+ return true;
+}
+
+bool MediaCodecPlayerTest::StartAVSeekAndPreroll(
+ scoped_ptr<AudioFactory> audio_factory,
+ scoped_ptr<VideoFactory> video_factory,
+ base::TimeDelta seek_position,
+ uint32_t flags,
+ const char* test_name) {
+ // Initialize A/V playback
+
+ demuxer_->SetAudioFactory(audio_factory.Pass());
+ demuxer_->SetVideoFactory(video_factory.Pass());
+
+ CreatePlayer();
+ SetVideoSurface();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << test_name << ": demuxer is not initialized";
+ return false;
+ }
+
+ // Ask decoders to always reconfigure after the player has been initialized.
+ if (flags & kAlwaysReconfigAudio)
+ player_->SetAlwaysReconfigureForTests(DemuxerStream::AUDIO);
+ if (flags & kAlwaysReconfigVideo)
+ player_->SetAlwaysReconfigureForTests(DemuxerStream::VIDEO);
+
+ // Set a testing callback to receive PTS from decoders.
+ player_->SetDecodersTimeCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnDecodersTimeUpdate,
+ base::Unretained(&manager_)));
+
+ // Set a testing callback to receive MediaCodec creation events from decoders.
+ player_->SetCodecCreatedCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnMediaCodecCreated,
+ base::Unretained(&manager_)));
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Issue SeekTo().
+ player_->SeekTo(seek_position);
+
+ // Start the playback.
+ player_->Start();
+
+ // Wait till preroll starts.
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(2000);
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayer::IsPrerollingForTests,
+ base::Unretained(player_), DemuxerStream::VIDEO),
+ start_timeout));
+
+ if (!player_->IsPrerollingForTests(DemuxerStream::VIDEO)) {
+ DVLOG(0) << test_name << ": preroll did not happen for video";
+ return false;
+ }
+
+ return true;
+}
+
TEST_F(MediaCodecPlayerTest, SetAudioConfigsBeforePlayerCreation) {
// Post configuration when there is no player yet.
EXPECT_EQ(nullptr, player_);
@@ -331,7 +879,7 @@ TEST_F(MediaCodecPlayerTest, SetAudioConfigsBeforePlayerCreation) {
base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
demuxer_->PostConfigs(
- TestDataFactory::CreateAudioConfigs(kCodecVorbis, duration));
+ TestDataFactory::CreateAudioConfigs(kCodecAAC, duration));
// Wait until the configuration gets to the media thread.
EXPECT_TRUE(WaitForCondition(base::Bind(
@@ -360,7 +908,7 @@ TEST_F(MediaCodecPlayerTest, SetAudioConfigsAfterPlayerCreation) {
// Post configuration after the player has been initialized.
base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
demuxer_->PostConfigs(
- TestDataFactory::CreateAudioConfigs(kCodecVorbis, duration));
+ TestDataFactory::CreateAudioConfigs(kCodecAAC, duration));
// Configuration should propagate through the player and to the manager.
EXPECT_TRUE(
@@ -393,11 +941,11 @@ TEST_F(MediaCodecPlayerTest, SetAudioVideoConfigsAfterPlayerCreation) {
EXPECT_EQ(240, manager_.media_metadata_.height);
}
-TEST_F(MediaCodecPlayerTest, PlayAudioTillCompletion) {
+TEST_F(MediaCodecPlayerTest, AudioPlayTillCompletion) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1000);
- base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(1100);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(2000);
demuxer_->SetAudioFactory(
scoped_ptr<AudioFactory>(new AudioFactory(duration)));
@@ -419,6 +967,1317 @@ TEST_F(MediaCodecPlayerTest, PlayAudioTillCompletion) {
WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
base::Unretained(&manager_)),
timeout));
+
+ // Current timestamp reflects "now playing" time. It might come with delay
+ // relative to the frame's PTS. Allow for 100 ms delay here.
+ base::TimeDelta audio_pts_delay = base::TimeDelta::FromMilliseconds(100);
+ EXPECT_LT(duration - audio_pts_delay, manager_.pts_stat_.max());
+}
+
+TEST_F(MediaCodecPlayerTest, AudioNoPermission) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+
+ manager_.SetPlaybackAllowed(false);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(duration)));
+
+ CreatePlayer();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ EXPECT_FALSE(manager_.IsPlaybackCompleted());
+
+ player_->Start();
+
+ // Playback should not start.
+ EXPECT_FALSE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+}
+
+TEST_F(MediaCodecPlayerTest, VideoPlayTillCompletion) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(500);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(2000);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoPlayTillCompletion"));
+
+ // Wait till completion.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ timeout));
+
+ EXPECT_LE(duration, manager_.pts_stat_.max());
+}
+
+TEST_F(MediaCodecPlayerTest, VideoNoPermission) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(500);
+ const base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+
+ manager_.SetPlaybackAllowed(false);
+
+ demuxer_->SetVideoFactory(
+ scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ CreatePlayer();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ SetVideoSurface();
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Start the player.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+ player_->Start();
+
+ // Playback should not start.
+ EXPECT_FALSE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+}
+
+// http://crbug.com/518900
+TEST_F(MediaCodecPlayerTest, AudioSeekAfterStop) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Play for 300 ms, then Pause, then Seek to beginning. The playback should
+ // start from the beginning.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(duration)));
+
+ CreatePlayer();
+
+ // Post configuration.
+ demuxer_->PostInternalConfigs();
+
+ // Start the player.
+ player_->Start();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_))));
+
+ // Wait for 300 ms and stop. The 300 ms interval takes into account potential
+ // audio delay: audio takes time reconfiguring after the first several packets
+ // get written to the audio track.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(300));
+
+ player_->Pause(true);
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Wait till the Pause is completed.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayerTest::IsPaused, base::Unretained(this))));
+
+ // Clear statistics.
+ manager_.pts_stat_.Clear();
+
+ // Now we can seek to the beginning and start the playback.
+ player_->SeekTo(base::TimeDelta());
+
+ player_->Start();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_))));
+
+ // Make sure we started from the beginninig
+ EXPECT_GT(base::TimeDelta::FromMilliseconds(40), manager_.pts_stat_.min());
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+}
+
+TEST_F(MediaCodecPlayerTest, AudioSeekThenPlay) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Issue Seek command immediately followed by Start. The playback should
+ // start at the seek position.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(500);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(duration)));
+
+ CreatePlayer();
+
+ // Post configuration.
+ demuxer_->PostInternalConfigs();
+
+ // Seek and immediately start.
+ player_->SeekTo(seek_position);
+ player_->Start();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_))));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+}
+
+TEST_F(MediaCodecPlayerTest, AudioSeekThenPlayThenConfig) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Issue Seek command immediately followed by Start but without prior demuxer
+ // configuration. Start should wait for configuration. After it has been
+ // posted the playback should start at the seek position.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(500);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(duration)));
+
+ CreatePlayer();
+
+ // Seek and immediately start.
+ player_->SeekTo(seek_position);
+ player_->Start();
+
+ // Make sure the player is waiting.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+ EXPECT_FALSE(player_->IsPlaying());
+
+ // Post configuration.
+ demuxer_->PostInternalConfigs();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_))));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+}
+
+// http://crbug.com/518900
+TEST_F(MediaCodecPlayerTest, AudioSeekWhilePlaying) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Play for 300 ms, then issue several Seek commands in the row.
+ // The playback should continue at the last seek position.
+
+ // To test this condition without analyzing the reported time details
+ // and without introducing dependency on implementation I make a long (10s)
+ // duration and test that the playback resumes after big time jump (5s) in a
+ // short period of time (200 ms).
+ base::TimeDelta duration = base::TimeDelta::FromSeconds(10);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(duration)));
+
+ CreatePlayer();
+
+ // Post configuration.
+ demuxer_->PostInternalConfigs();
+
+ // Start the player.
+ player_->Start();
+
+ // Wait for playback to start.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_))));
+
+ // Wait for 300 ms.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(300));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Seek forward several times.
+ player_->SeekTo(base::TimeDelta::FromSeconds(3));
+ player_->SeekTo(base::TimeDelta::FromSeconds(4));
+ player_->SeekTo(base::TimeDelta::FromSeconds(5));
+
+ // Make sure that we reached the last timestamp within default timeout,
+ // i.e. 200 ms.
+ EXPECT_TRUE(WaitForPlaybackBeyondPosition(base::TimeDelta::FromSeconds(5)));
+ EXPECT_TRUE(player_->IsPlaying());
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+}
+
+TEST_F(MediaCodecPlayerTest, VideoReplaceSurface) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta timeout = base::TimeDelta::FromMilliseconds(1500);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoReplaceSurface"));
+
+ // Wait for some time and check statistics.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Set new video surface without removing the old one.
+ SetVideoSurfaceB();
+
+ // We should receive a browser seek request.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockDemuxerAndroid::ReceivedBrowserSeekRequest,
+ base::Unretained(demuxer_))));
+
+ // Playback should continue with a new surface. Wait till completion.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ timeout));
+ EXPECT_LE(duration, manager_.pts_stat_.max());
+}
+
+TEST_F(MediaCodecPlayerTest, VideoRemoveAndSetSurface) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1000);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoRemoveAndSetSurface"));
+
+ // Wait for some time and check statistics.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Remove video surface.
+ RemoveVideoSurface();
+
+ // We should be stuck waiting for the new surface.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+ EXPECT_FALSE(player_->IsPlaying());
+
+ // Save last PTS and clear statistics.
+ base::TimeDelta max_pts_before_removal = manager_.pts_stat_.max();
+ manager_.pts_stat_.Clear();
+
+ // After clearing statistics we are ready to wait for IsPlaybackStarted again.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+
+ // Extra RemoveVideoSurface() should not change anything.
+ RemoveVideoSurface();
+
+ // Set another video surface.
+ SetVideoSurfaceB();
+
+ // We should receive a browser seek request.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockDemuxerAndroid::ReceivedBrowserSeekRequest,
+ base::Unretained(demuxer_))));
+
+ // Playback should continue with a new surface. Wait till it starts again.
+ base::TimeDelta reconfigure_timeout = base::TimeDelta::FromMilliseconds(800);
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ reconfigure_timeout));
+
+ // Timestamps should not go back.
+ EXPECT_LE(max_pts_before_removal, manager_.pts_stat_.max());
+}
+
+// http://crbug.com/518900
+TEST_F(MediaCodecPlayerTest, VideoReleaseAndStart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1000);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoReleaseAndStart"));
+
+ // Wait for some time and check statistics.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // When the user presses Tasks button Chrome calls Pause() and Release().
+ player_->Pause(true);
+ player_->Release();
+
+ // Make sure we are not playing any more.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+ EXPECT_FALSE(player_->IsPlaying());
+
+ // Save last PTS and clear statistics.
+ base::TimeDelta max_pts_before_backgrounding = manager_.pts_stat_.max();
+ manager_.pts_stat_.Clear();
+
+ // After clearing statistics we are ready to wait for IsPlaybackStarted again.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+
+ // Restart.
+ SetVideoSurface();
+ player_->Start();
+
+ // We should receive a browser seek request.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockDemuxerAndroid::ReceivedBrowserSeekRequest,
+ base::Unretained(demuxer_))));
+
+ // Wait for playback to start again.
+ base::TimeDelta reconfigure_timeout = base::TimeDelta::FromMilliseconds(800);
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ reconfigure_timeout));
+
+ // Timestamps should not go back.
+ EXPECT_LE(max_pts_before_backgrounding, manager_.pts_stat_.max());
+}
+
+TEST_F(MediaCodecPlayerTest, VideoSeekAndRelease) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoSeekAndRelease"));
+
+ // Wait for some time and check statistics.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Issue SeekTo() immediately followed by Release().
+ player_->SeekTo(seek_position);
+ player_->Release();
+
+ // Make sure we are not playing any more.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(400));
+ EXPECT_FALSE(player_->IsPlaying());
+
+ // The Release() should not cancel the SeekTo() and we should have received
+ // the seek request by this time.
+ EXPECT_TRUE(demuxer_->ReceivedSeekRequest());
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+
+ // Clear statistics.
+ manager_.pts_stat_.Clear();
+
+ // After clearing statistics we are ready to wait for IsPlaybackStarted again.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+
+ // Restart.
+ SetVideoSurface();
+ player_->Start();
+
+ // Wait for playback to start again.
+ base::TimeDelta reconfigure_timeout = base::TimeDelta::FromMilliseconds(800);
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ reconfigure_timeout));
+
+ // Timestamps should start at the new seek position
+ EXPECT_LE(seek_position, manager_.pts_stat_.min());
+}
+
+TEST_F(MediaCodecPlayerTest, VideoReleaseWhileWaitingForSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+
+ ASSERT_TRUE(StartVideoPlayback(duration, "VideoReleaseWhileWaitingForSeek"));
+
+ // Wait for some time and check statistics.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(200));
+
+ // Make sure we played at least 100 ms.
+ EXPECT_LT(base::TimeDelta::FromMilliseconds(100), manager_.pts_stat_.max());
+
+ // Set artificial delay in the OnDemuxerSeekDone response so we can
+ // issue commands while the player is in the STATE_WAITING_FOR_SEEK.
+ demuxer_->SetSeekDoneDelay(base::TimeDelta::FromMilliseconds(100));
+
+ // Issue SeekTo().
+ player_->SeekTo(seek_position);
+
+ // Wait for the seek request to demuxer.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockDemuxerAndroid::ReceivedSeekRequest, base::Unretained(demuxer_))));
+
+ // The player is supposed to be in STATE_WAITING_FOR_SEEK. Issue Release().
+ player_->Release();
+
+ // Make sure we are not playing any more.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(400));
+ EXPECT_FALSE(player_->IsPlaying());
+
+ // Clear statistics.
+ manager_.pts_stat_.Clear();
+
+ // After clearing statistics we are ready to wait for IsPlaybackStarted again.
+ EXPECT_FALSE(manager_.IsPlaybackStarted());
+
+ // Restart.
+ SetVideoSurface();
+ player_->Start();
+
+ // Wait for playback to start again.
+ base::TimeDelta reconfigure_timeout = base::TimeDelta::FromMilliseconds(1000);
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ reconfigure_timeout));
+
+ // Timestamps should start at the new seek position
+ EXPECT_LE(seek_position, manager_.pts_stat_.min());
+
+ // The player should have reported the seek completion to the manager.
+ EXPECT_TRUE(WaitForCondition(base::Bind(
+ &MockMediaPlayerManager::IsSeekCompleted, base::Unretained(&manager_))));
+}
+
+TEST_F(MediaCodecPlayerTest, VideoPrerollAfterSeek) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // A simple test for preroll for video stream only. After the seek is done
+ // the data factory generates the frames with pts before the seek time, and
+ // they should not be rendered. We deduce which frame is rendered by looking
+ // at the reported time progress.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(600);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(500);
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+
+ // Tell demuxer to make the first frame 100ms earlier than the seek request.
+ demuxer_->SetVideoPrerollInterval(base::TimeDelta::FromMilliseconds(100));
+
+ demuxer_->SetVideoFactory(
+ scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ CreatePlayer();
+ SetVideoSurface();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << "VideoPrerollAfterSeek: demuxer is not initialized";
+ return;
+ }
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Issue SeekTo().
+ player_->SeekTo(seek_position);
+
+ // Start the playback and make sure it is started.
+ player_->Start();
+
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+
+ // Wait for completion.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_))));
+
+ // The first pts should be equal than seek position even if video frames
+ // started 100 ms eralier than the seek request.
+ EXPECT_EQ(seek_position, manager_.pts_stat_.min());
+
+ EXPECT_EQ(6, manager_.pts_stat_.num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollAudioWaitsForVideo) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that during prerolling neither audio nor video plays and that both
+ // resume simultaneously after preroll is finished. In other words, test
+ // that preroll works.
+ // We put the video into the long preroll and intercept the time when first
+ // rendering happens in each stream. The moment of rendering is approximated
+ // with a decoder PTS that is delivered by a test-only callback.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+
+ // Set significant preroll interval. 500 ms means 25 frames, at 10 ms
+ // per frame it would take 250 ms to preroll.
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta preroll_intvl = base::TimeDelta::FromMilliseconds(500);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(1000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ demuxer_->SetVideoPrerollInterval(preroll_intvl);
+
+ ASSERT_TRUE(StartAVSeekAndPreroll(audio_factory.Pass(), video_factory.Pass(),
+ seek_position, 0,
+ "AVPrerollAudioWaitsForVideo"));
+
+ // Wait till preroll finishes and the real playback starts.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout));
+
+ // Ensure that the first audio and video pts are close to each other and are
+ // reported at the close moments in time.
+
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::HasFirstFrame,
+ base::Unretained(&manager_), DemuxerStream::VIDEO)));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFramePTS(DemuxerStream::AUDIO),
+ manager_.FirstFramePTS(DemuxerStream::VIDEO), 25));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFrameTime(DemuxerStream::AUDIO),
+ manager_.FirstFrameTime(DemuxerStream::VIDEO), 50));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollReleaseAndRestart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that player will resume prerolling if prerolling is interrupted by
+ // Release() and Start().
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(2000);
+
+ // Set significant preroll interval. 500 ms means 25 frames, at 10 ms
+ // per frame it would take 250 ms to preroll.
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta preroll_intvl = base::TimeDelta::FromMilliseconds(500);
+
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(1000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ demuxer_->SetVideoPrerollInterval(preroll_intvl);
+
+ ASSERT_TRUE(StartAVSeekAndPreroll(audio_factory.Pass(), video_factory.Pass(),
+ seek_position, 0,
+ "AVPrerollReleaseAndRestart"));
+
+ // Issue Release().
+ player_->Release();
+
+ // Make sure we have not been playing.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(400));
+
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::VIDEO));
+
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::AUDIO));
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::VIDEO));
+ EXPECT_EQ(0, manager_.pts_stat_.num_values());
+
+ // Restart. Release() removed the video surface, we need to set it again.
+ SetVideoSurface();
+ player_->Start();
+
+ // The playback should pass through prerolling phase.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayer::IsPrerollingForTests,
+ base::Unretained(player_), DemuxerStream::VIDEO),
+ start_timeout));
+
+ // Wait till preroll finishes and the real playback starts.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout));
+
+ // Ensure that the first audio and video pts are close to each other and are
+ // reported at the close moments in time.
+
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::HasFirstFrame,
+ base::Unretained(&manager_), DemuxerStream::VIDEO)));
+
+ // Release() might disacrd first audio frame.
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFramePTS(DemuxerStream::AUDIO),
+ manager_.FirstFramePTS(DemuxerStream::VIDEO), 50));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFrameTime(DemuxerStream::AUDIO),
+ manager_.FirstFrameTime(DemuxerStream::VIDEO), 50));
+
+ // The playback should start at |seek_position|, but Release() might discard
+ // the first audio frame.
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 50));
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollStopAndRestart) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that if Pause() happens during the preroll phase,
+ // we continue to do preroll after restart.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+
+ // Set significant preroll interval. 500 ms means 25 frames, at 10 ms
+ // per frame it would take 250 ms to preroll.
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta preroll_intvl = base::TimeDelta::FromMilliseconds(500);
+
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(1000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ demuxer_->SetVideoPrerollInterval(preroll_intvl);
+
+ ASSERT_TRUE(StartAVSeekAndPreroll(audio_factory.Pass(), video_factory.Pass(),
+ seek_position, 0,
+ "AVPrerollStopAndRestart"));
+
+ // Video stream should be prerolling. Request to stop.
+ EXPECT_FALSE(IsPaused());
+ player_->Pause(true);
+
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayerTest::IsPaused, base::Unretained(this))));
+
+ // Test that we have not been playing.
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::VIDEO));
+
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::AUDIO));
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::VIDEO));
+ EXPECT_EQ(0, manager_.pts_stat_.num_values());
+
+ // Restart.
+ player_->Start();
+
+ // There should be preroll after the start.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayer::IsPrerollingForTests,
+ base::Unretained(player_), DemuxerStream::VIDEO),
+ start_timeout));
+
+ // Wait for a short period of time, so that preroll is still ongoing,
+ // and pause again.
+ WaitForDelay(base::TimeDelta::FromMilliseconds(100));
+
+ EXPECT_FALSE(IsPaused());
+ player_->Pause(true);
+
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayerTest::IsPaused, base::Unretained(this))));
+
+ // Check that we still haven't started rendering.
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::VIDEO));
+
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::AUDIO));
+ EXPECT_FALSE(player_->IsPrerollingForTests(DemuxerStream::VIDEO));
+ EXPECT_EQ(0, manager_.pts_stat_.num_values());
+
+ // Restart again.
+ player_->Start();
+
+ // Wait till we start to play.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout));
+
+ // Check that we did prerolling, i.e. audio did wait for video.
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::HasFirstFrame,
+ base::Unretained(&manager_), DemuxerStream::VIDEO)));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFramePTS(DemuxerStream::AUDIO),
+ manager_.FirstFramePTS(DemuxerStream::VIDEO), 25));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFrameTime(DemuxerStream::AUDIO),
+ manager_.FirstFrameTime(DemuxerStream::VIDEO), 50));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollVideoEndsWhilePrerolling) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that when one stream ends in the preroll phase and another is not
+ // the preroll finishes and playback continues after it.
+
+ // http://crbug.com/526755
+ // TODO(timav): remove these logs after verifying that the bug is fixed.
+ DVLOG(0) << "AVPrerollVideoEndsWhilePrerolling: begin";
+
+ base::TimeDelta audio_duration = base::TimeDelta::FromMilliseconds(1100);
+ base::TimeDelta video_duration = base::TimeDelta::FromMilliseconds(900);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta video_preroll_intvl = base::TimeDelta::FromMilliseconds(200);
+
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(800);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(400);
+
+ demuxer_->SetVideoPrerollInterval(video_preroll_intvl);
+
+ demuxer_->SetAudioFactory(
+ scoped_ptr<AudioFactory>(new AudioFactory(audio_duration)));
+ demuxer_->SetVideoFactory(
+ scoped_ptr<VideoFactory>(new VideoFactory(video_duration)));
+
+ CreatePlayer();
+ SetVideoSurface();
+
+ // Set special testing callback to receive PTS from decoders.
+ player_->SetDecodersTimeCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnDecodersTimeUpdate,
+ base::Unretained(&manager_)));
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << "AVPrerollVideoEndsWhilePrerolling: demuxer is not initialized";
+ return;
+ }
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Issue SeekTo().
+ player_->SeekTo(seek_position);
+
+ // Start the playback.
+ player_->Start();
+
+ // The video decoder should start prerolling.
+ // Wait till preroll starts.
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MediaCodecPlayer::IsPrerollingForTests,
+ base::Unretained(player_), DemuxerStream::VIDEO),
+ start_timeout));
+
+ // Wait for playback to start.
+ bool playback_started =
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout);
+
+ // http://crbug.com/526755
+ if (!playback_started) {
+ DVLOG(0) << "AVPrerollVideoEndsWhilePrerolling: playback did not start for "
+ << preroll_timeout;
+ }
+ ASSERT_TRUE(playback_started);
+
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+
+ // Play till completion.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_))));
+
+ // There should not be any video frames.
+ EXPECT_FALSE(manager_.HasFirstFrame(DemuxerStream::VIDEO));
+
+ // http://crbug.com/526755
+ DVLOG(0) << "AVPrerollVideoEndsWhilePrerolling: end";
+}
+
+TEST_F(MediaCodecPlayerTest, VideoConfigChangeWhilePlaying) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that video only playback continues after video config change.
+
+ // Initialize video playback
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_position =
+ base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta start_timeout = base::TimeDelta::FromMilliseconds(2000);
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ demuxer_->SetVideoFactory(
+ scoped_ptr<VideoFactory>(new VideoFactory(duration)));
+
+ demuxer_->video_factory()->RequestConfigChange(config_change_position);
+
+ CreatePlayer();
+ SetVideoSurface();
+
+ // Wait till the player is initialized on media thread.
+ EXPECT_TRUE(WaitForCondition(base::Bind(&MockDemuxerAndroid::IsInitialized,
+ base::Unretained(demuxer_))));
+
+ if (!demuxer_->IsInitialized()) {
+ DVLOG(0) << "VideoConfigChangeWhilePlaying: demuxer is not initialized";
+ return;
+ }
+
+ // Ask decoders to always reconfigure after the player has been initialized.
+ player_->SetAlwaysReconfigureForTests(DemuxerStream::VIDEO);
+
+ // Set a testing callback to receive PTS from decoders.
+ player_->SetDecodersTimeCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnDecodersTimeUpdate,
+ base::Unretained(&manager_)));
+
+ // Set a testing callback to receive MediaCodec creation events from decoders.
+ player_->SetCodecCreatedCallbackForTests(
+ base::Bind(&MockMediaPlayerManager::OnMediaCodecCreated,
+ base::Unretained(&manager_)));
+
+ // Post configuration after the player has been initialized.
+ demuxer_->PostInternalConfigs();
+
+ // Start and wait for playback.
+ player_->Start();
+
+ // Wait till we start to play.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ start_timeout));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The video codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod, 1);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVVideoConfigChangeWhilePlaying) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that A/V playback continues after video config change.
+
+ // Initialize A/V playback
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_position =
+ base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ video_factory->RequestConfigChange(config_change_position);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigVideo,
+ "AVVideoConfigChangeWhilePlaying"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should be kept.
+ EXPECT_EQ(1, manager_.num_audio_codecs_created());
+
+ // The video codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod, 1);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+
+ // Check that we did not miss audio frames. We expect one postponed frames
+ // that are not reported.
+ // For Nexus 4 KitKat the AAC decoder seems to swallow the first frame
+ // but reports the last pts twice, maybe it just shifts the reported PTS.
+ int expected_audio_frames = GetFrameCount(duration, kAudioFramePeriod, 0) - 1;
+ EXPECT_EQ(expected_audio_frames,
+ manager_.render_stat_[DemuxerStream::AUDIO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVAudioConfigChangeWhilePlaying) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that A/V playback continues after audio config change.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_position =
+ base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ audio_factory->RequestConfigChange(config_change_position);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigAudio,
+ "AVAudioConfigChangeWhilePlaying"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_audio_codecs_created());
+
+ // The video codec should be kept.
+ EXPECT_EQ(1, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames.
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod, 0);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+
+ // Check that we did not miss audio frames. We expect two postponed frames
+ // that are not reported.
+ int expected_audio_frames = GetFrameCount(duration, kAudioFramePeriod, 1) - 2;
+ EXPECT_EQ(expected_audio_frames,
+ manager_.render_stat_[DemuxerStream::AUDIO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVSimultaneousConfigChange_1) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the playback continues if audio and video config changes happen
+ // at the same time.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_audio = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta config_change_video = base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ audio_factory->RequestConfigChange(config_change_audio);
+ video_factory->RequestConfigChange(config_change_video);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigAudio | kAlwaysReconfigVideo,
+ "AVSimultaneousConfigChange_1"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_audio_codecs_created());
+
+ // The video codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames.
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod, 1);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+
+ // Check that we did not miss audio frames. We expect two postponed frames
+ // that are not reported.
+ int expected_audio_frames = GetFrameCount(duration, kAudioFramePeriod, 1) - 2;
+ EXPECT_EQ(expected_audio_frames,
+ manager_.render_stat_[DemuxerStream::AUDIO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVSimultaneousConfigChange_2) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that the playback continues if audio and video config changes happen
+ // at the same time. Move audio change moment slightly to make it drained
+ // after video.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_audio = base::TimeDelta::FromMilliseconds(1020);
+ base::TimeDelta config_change_video = base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ audio_factory->RequestConfigChange(config_change_audio);
+ video_factory->RequestConfigChange(config_change_video);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigAudio | kAlwaysReconfigVideo,
+ "AVSimultaneousConfigChange_2"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_audio_codecs_created());
+
+ // The video codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames.
+ int expected_video_frames = GetFrameCount(duration, kVideoFramePeriod, 1);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+
+ // Check that we did not miss audio frames. We expect two postponed frames
+ // that are not reported.
+ int expected_audio_frames = GetFrameCount(duration, kAudioFramePeriod, 1) - 2;
+ EXPECT_EQ(expected_audio_frames,
+ manager_.render_stat_[DemuxerStream::AUDIO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVAudioEndsAcrossVideoConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that audio can end while video config change processing.
+
+ base::TimeDelta audio_duration = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta video_duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta config_change_video = base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(audio_duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(video_duration));
+
+ video_factory->RequestConfigChange(config_change_video);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigVideo,
+ "AVAudioEndsAcrossVideoConfigChange"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should not be recreated.
+ EXPECT_EQ(1, manager_.num_audio_codecs_created());
+
+ // The video codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_video_codecs_created());
+
+ // Check that we did not miss video frames.
+ int expected_video_frames =
+ GetFrameCount(video_duration, kVideoFramePeriod, 1);
+ EXPECT_EQ(expected_video_frames,
+ manager_.render_stat_[DemuxerStream::VIDEO].num_values());
+
+ // Check the last video frame timestamp. The maximum render pts may differ
+ // from |video_duration| because of the testing artefact: if the last video
+ // chunk is incomplete if will have different last pts due to B-frames
+ // rearrangements.
+ EXPECT_LE(video_duration,
+ manager_.render_stat_[DemuxerStream::VIDEO].max().pts);
+
+ // Check that the playback time reported by the player goes past
+ // the audio time and corresponds to video after the audio ended.
+ EXPECT_EQ(video_duration, manager_.pts_stat_.max());
+}
+
+TEST_F(MediaCodecPlayerTest, AVVideoEndsAcrossAudioConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that video can end while audio config change processing.
+ base::TimeDelta audio_duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta video_duration = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta config_change_audio = base::TimeDelta::FromMilliseconds(1000);
+
+ base::TimeDelta completion_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(audio_duration));
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(video_duration));
+
+ audio_factory->RequestConfigChange(config_change_audio);
+
+ ASSERT_TRUE(StartAVPlayback(audio_factory.Pass(), video_factory.Pass(),
+ kAlwaysReconfigAudio,
+ "AVVideoEndsAcrossAudioConfigChange"));
+
+ // Wait till completion
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackCompleted,
+ base::Unretained(&manager_)),
+ completion_timeout));
+
+ // The audio codec should be recreated upon config changes.
+ EXPECT_EQ(2, manager_.num_audio_codecs_created());
+
+ // The video codec should not be recreated.
+ EXPECT_EQ(1, manager_.num_video_codecs_created());
+
+ // Check that we did not miss audio frames. We expect two postponed frames
+ // that are not reported.
+ int expected_audio_frames =
+ GetFrameCount(audio_duration, kAudioFramePeriod, 1) - 2;
+ EXPECT_EQ(expected_audio_frames,
+ manager_.render_stat_[DemuxerStream::AUDIO].num_values());
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollAcrossVideoConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that preroll continues if interrupted by video config change.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta config_change_position =
+ base::TimeDelta::FromMilliseconds(800);
+ base::TimeDelta video_preroll_intvl = base::TimeDelta::FromMilliseconds(500);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ demuxer_->SetVideoPrerollInterval(video_preroll_intvl);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+ video_factory->RequestConfigChange(config_change_position);
+
+ ASSERT_TRUE(StartAVSeekAndPreroll(audio_factory.Pass(), video_factory.Pass(),
+ seek_position, kAlwaysReconfigVideo,
+ "AVPrerollAcrossVideoConfigChange"));
+
+ // Wait till preroll finishes and the real playback starts.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout));
+
+ // The presense of config change should not affect preroll behavior:
+
+ // Ensure that the first audio and video pts are close to each other and are
+ // reported at the close moments in time.
+
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::HasFirstFrame,
+ base::Unretained(&manager_), DemuxerStream::VIDEO)));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFramePTS(DemuxerStream::AUDIO),
+ manager_.FirstFramePTS(DemuxerStream::VIDEO), 25));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFrameTime(DemuxerStream::AUDIO),
+ manager_.FirstFrameTime(DemuxerStream::VIDEO), 50));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
+}
+
+TEST_F(MediaCodecPlayerTest, AVPrerollAcrossAudioConfigChange) {
+ SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
+
+ // Test that preroll continues if interrupted by video config change.
+
+ base::TimeDelta duration = base::TimeDelta::FromMilliseconds(1200);
+ base::TimeDelta seek_position = base::TimeDelta::FromMilliseconds(1000);
+ base::TimeDelta config_change_position =
+ base::TimeDelta::FromMilliseconds(800);
+ base::TimeDelta audio_preroll_intvl = base::TimeDelta::FromMilliseconds(400);
+ base::TimeDelta preroll_timeout = base::TimeDelta::FromMilliseconds(3000);
+
+ demuxer_->SetAudioPrerollInterval(audio_preroll_intvl);
+
+ scoped_ptr<AudioFactory> audio_factory(new AudioFactory(duration));
+ audio_factory->RequestConfigChange(config_change_position);
+
+ scoped_ptr<VideoFactory> video_factory(new VideoFactory(duration));
+
+ ASSERT_TRUE(StartAVSeekAndPreroll(audio_factory.Pass(), video_factory.Pass(),
+ seek_position, kAlwaysReconfigAudio,
+ "AVPrerollAcrossAudioConfigChange"));
+
+ // Wait till preroll finishes and the real playback starts.
+ EXPECT_TRUE(
+ WaitForCondition(base::Bind(&MockMediaPlayerManager::IsPlaybackStarted,
+ base::Unretained(&manager_)),
+ preroll_timeout));
+
+ // The presense of config change should not affect preroll behavior:
+
+ // Ensure that the first audio and video pts are close to each other and are
+ // reported at the close moments in time.
+
+ EXPECT_TRUE(manager_.HasFirstFrame(DemuxerStream::AUDIO));
+ EXPECT_TRUE(WaitForCondition(
+ base::Bind(&MockMediaPlayerManager::HasFirstFrame,
+ base::Unretained(&manager_), DemuxerStream::VIDEO)));
+
+ // Wait for some more video
+ WaitForDelay(base::TimeDelta::FromMilliseconds(100));
+
+ EXPECT_TRUE(AlmostEqual(manager_.FirstFramePTS(DemuxerStream::AUDIO),
+ manager_.FirstFramePTS(DemuxerStream::VIDEO), 25));
+
+ // Because for video preroll the first frame after preroll renders during the
+ // preroll stage (and not after the preroll is done) we cannot guarantee the
+ // proper video timimg in this test.
+ // TODO(timav): maybe we should not call the testing callback for
+ // kRenderAfterPreroll for video (for audio we already do not call).
+ // EXPECT_TRUE(AlmostEqual(manager_.FirstFrameTime(DemuxerStream::AUDIO),
+ // manager_.FirstFrameTime(DemuxerStream::VIDEO), 50));
+
+ // The playback should start at |seek_position|
+ EXPECT_TRUE(AlmostEqual(seek_position, manager_.pts_stat_.min(), 25));
}
} // namespace media
diff --git a/chromium/media/base/android/media_codec_video_decoder.cc b/chromium/media/base/android/media_codec_video_decoder.cc
index 2f8c11e2670..36940870a07 100644
--- a/chromium/media/base/android/media_codec_video_decoder.cc
+++ b/chromium/media/base/android/media_codec_video_decoder.cc
@@ -7,8 +7,9 @@
#include "base/bind.h"
#include "base/logging.h"
#include "media/base/android/media_codec_bridge.h"
-#include "media/base/buffers.h"
+#include "media/base/android/media_statistics.h"
#include "media/base/demuxer_stream.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -18,25 +19,31 @@ const int kDelayForStandAloneEOS = 2; // milliseconds
MediaCodecVideoDecoder::MediaCodecVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& decoder_drained_cb,
const base::Closure& stop_done_cb,
+ const base::Closure& waiting_for_decryption_key_cb,
const base::Closure& error_cb,
const SetTimeCallback& update_current_time_cb,
- const VideoSizeChangedCallback& video_size_changed_cb,
- const base::Closure& codec_created_cb)
- : MediaCodecDecoder(media_task_runner,
+ const VideoSizeChangedCallback& video_size_changed_cb)
+ : MediaCodecDecoder("VideoDecoder",
+ media_task_runner,
+ frame_statistics,
request_data_cb,
starvation_cb,
+ decoder_drained_cb,
stop_done_cb,
- error_cb,
- "VideoDecoder"),
+ waiting_for_decryption_key_cb,
+ error_cb),
+ is_protected_surface_required_(false),
update_current_time_cb_(update_current_time_cb),
- video_size_changed_cb_(video_size_changed_cb),
- codec_created_cb_(codec_created_cb) {
+ video_size_changed_cb_(video_size_changed_cb) {
}
MediaCodecVideoDecoder::~MediaCodecVideoDecoder() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << "VideoDecoder::~VideoDecoder()";
ReleaseDecoderResources();
}
@@ -52,8 +59,6 @@ bool MediaCodecVideoDecoder::HasStream() const {
}
void MediaCodecVideoDecoder::SetDemuxerConfigs(const DemuxerConfigs& configs) {
- DCHECK(media_task_runner_->BelongsToCurrentThread());
-
DVLOG(1) << class_name() << "::" << __FUNCTION__ << " " << configs;
configs_ = configs;
@@ -65,46 +70,74 @@ void MediaCodecVideoDecoder::SetDemuxerConfigs(const DemuxerConfigs& configs) {
}
}
+bool MediaCodecVideoDecoder::IsContentEncrypted() const {
+ // Make sure SetDemuxerConfigs() as been called.
+ DCHECK(configs_.video_codec != kUnknownVideoCodec);
+ return configs_.is_video_encrypted;
+}
+
void MediaCodecVideoDecoder::ReleaseDecoderResources() {
DCHECK(media_task_runner_->BelongsToCurrentThread());
-
DVLOG(1) << class_name() << "::" << __FUNCTION__;
- MediaCodecDecoder::ReleaseDecoderResources();
+ DoEmergencyStop();
+
+ ReleaseMediaCodec();
+
surface_ = gfx::ScopedJavaSurface();
+}
+
+void MediaCodecVideoDecoder::ReleaseMediaCodec() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ MediaCodecDecoder::ReleaseMediaCodec();
delayed_buffers_.clear();
}
-void MediaCodecVideoDecoder::SetPendingSurface(gfx::ScopedJavaSurface surface) {
+void MediaCodecVideoDecoder::SetVideoSurface(gfx::ScopedJavaSurface surface) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << class_name() << "::" << __FUNCTION__
+ << (surface.IsEmpty() ? " empty" : " non-empty");
+
surface_ = surface.Pass();
- if (surface_.IsEmpty()) {
- // Synchronously stop decoder thread and release MediaCodec
- ReleaseDecoderResources();
- }
+ needs_reconfigure_ = true;
}
-bool MediaCodecVideoDecoder::HasPendingSurface() const {
+bool MediaCodecVideoDecoder::HasVideoSurface() const {
DCHECK(media_task_runner_->BelongsToCurrentThread());
return !surface_.IsEmpty();
}
+void MediaCodecVideoDecoder::SetProtectedSurfaceRequired(bool value) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ is_protected_surface_required_ = value;
+}
+
+bool MediaCodecVideoDecoder::IsProtectedSurfaceRequired() const {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ return is_protected_surface_required_;
+}
+
bool MediaCodecVideoDecoder::IsCodecReconfigureNeeded(
- const DemuxerConfigs& curr,
const DemuxerConfigs& next) const {
- if (curr.video_codec != next.video_codec ||
- curr.is_video_encrypted != next.is_video_encrypted) {
+ if (always_reconfigure_for_tests_)
+ return true;
+
+ if (configs_.video_codec != next.video_codec ||
+ configs_.is_video_encrypted != next.is_video_encrypted) {
return true;
}
// Only size changes below this point
- if (curr.video_size.width() == next.video_size.width() &&
- curr.video_size.height() == next.video_size.height()) {
- return false; // i.e. curr == next
+ if (configs_.video_size.width() == next.video_size.width() &&
+ configs_.video_size.height() == next.video_size.height()) {
+ return false; // i.e. configs_ == next
}
return !static_cast<VideoCodecBridge*>(media_codec_bridge_.get())
@@ -112,7 +145,8 @@ bool MediaCodecVideoDecoder::IsCodecReconfigureNeeded(
next.video_size.height());
}
-MediaCodecDecoder::ConfigStatus MediaCodecVideoDecoder::ConfigureInternal() {
+MediaCodecDecoder::ConfigStatus MediaCodecVideoDecoder::ConfigureInternal(
+ jobject media_crypto) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
DVLOG(1) << class_name() << "::" << __FUNCTION__;
@@ -120,52 +154,57 @@ MediaCodecDecoder::ConfigStatus MediaCodecVideoDecoder::ConfigureInternal() {
// If we cannot find a key frame in cache, the browser seek is needed.
if (!au_queue_.RewindToLastKeyFrame()) {
DVLOG(1) << class_name() << "::" << __FUNCTION__ << " key frame required";
-
- // The processing of CONFIG_KEY_FRAME_REQUIRED is not implemented yet,
- // return error for now.
- // TODO(timav): Replace this with the following line together with
- // implementing the browser seek:
- // return CONFIG_KEY_FRAME_REQUIRED;
- return CONFIG_FAILURE;
+ return kConfigKeyFrameRequired;
}
- // TODO(timav): implement DRM.
- // bool is_secure = is_content_encrypted() && drm_bridge() &&
- // drm_bridge()->IsProtectedSurfaceRequired();
-
- bool is_secure = false; // DRM is not implemented
+ if (configs_.video_codec == kUnknownVideoCodec) {
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << ": configuration parameters are required";
+ return kConfigFailure;
+ }
if (surface_.IsEmpty()) {
- DVLOG(0) << class_name() << "::" << __FUNCTION__ << " surface required";
- return CONFIG_FAILURE;
+ DVLOG(0) << class_name() << "::" << __FUNCTION__ << ": surface is required";
+ return kConfigFailure;
}
+ bool is_secure = IsContentEncrypted() && is_protected_surface_required_;
+
media_codec_bridge_.reset(VideoCodecBridge::CreateDecoder(
configs_.video_codec,
is_secure,
configs_.video_size,
surface_.j_surface().obj(),
- GetMediaCrypto().obj()));
+ media_crypto));
if (!media_codec_bridge_) {
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << " failed";
- return CONFIG_FAILURE;
+ DVLOG(0) << class_name() << "::" << __FUNCTION__
+ << " failed: cannot create video codec";
+ return kConfigFailure;
}
- DVLOG(1) << class_name() << "::" << __FUNCTION__ << " succeeded";
+ DVLOG(0) << class_name() << "::" << __FUNCTION__ << " succeeded";
- media_task_runner_->PostTask(FROM_HERE, codec_created_cb_);
+ if (!codec_created_for_tests_cb_.is_null())
+ media_task_runner_->PostTask(FROM_HERE, codec_created_for_tests_cb_);
- return CONFIG_OK;
+ return kConfigOk;
}
-void MediaCodecVideoDecoder::SynchronizePTSWithTime(
- base::TimeDelta current_time) {
+void MediaCodecVideoDecoder::AssociateCurrentTimeWithPTS(base::TimeDelta pts) {
DCHECK(media_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << class_name() << "::" << __FUNCTION__ << " pts:" << pts;
+
start_time_ticks_ = base::TimeTicks::Now();
- start_pts_ = current_time;
- last_seen_pts_ = current_time;
+ start_pts_ = pts;
+ last_seen_pts_ = pts;
+}
+
+void MediaCodecVideoDecoder::DissociatePTSFromTime() {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+
+ start_pts_ = last_seen_pts_ = kNoTimestamp();
}
void MediaCodecVideoDecoder::OnOutputFormatChanged() {
@@ -184,15 +223,15 @@ void MediaCodecVideoDecoder::OnOutputFormatChanged() {
}
void MediaCodecVideoDecoder::Render(int buffer_index,
+ size_t offset,
size_t size,
- bool render_output,
+ RenderMode render_mode,
base::TimeDelta pts,
bool eos_encountered) {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts
- << " index:" << buffer_index << " size:" << size
- << (eos_encountered ? " EOS" : "");
+ << (eos_encountered ? " EOS " : " ") << AsString(render_mode);
// Normally EOS comes as a separate access unit that does not have data,
// the corresponding |size| will be 0.
@@ -206,27 +245,60 @@ void MediaCodecVideoDecoder::Render(int buffer_index,
last_seen_pts_ = pts;
}
- if (!render_output) {
- ReleaseOutputBuffer(buffer_index, pts, size, false, eos_encountered);
- return;
+ // Do not update time for stand-alone EOS.
+ const bool update_time = !(eos_encountered && size == 0u);
+
+ // For video we simplify the preroll operation and render the first frame
+ // after preroll during the preroll phase, i.e. without waiting for audio
+ // stream to finish prerolling.
+ switch (render_mode) {
+ case kRenderSkip:
+ ReleaseOutputBuffer(buffer_index, pts, false, false, eos_encountered);
+ return;
+ case kRenderAfterPreroll:
+ // We get here in the preroll phase. Render now as explained above.
+ // |start_pts_| is not set yet, thus we cannot calculate |time_to_render|.
+ ReleaseOutputBuffer(buffer_index, pts, (size > 0), update_time,
+ eos_encountered);
+ return;
+ case kRenderNow:
+ break;
}
+ DCHECK_EQ(kRenderNow, render_mode);
+ DCHECK_NE(kNoTimestamp(), start_pts_); // start_pts_ must be set
+
base::TimeDelta time_to_render =
pts - (base::TimeTicks::Now() - start_time_ticks_ + start_pts_);
+ DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts
+ << " ticks delta:" << (base::TimeTicks::Now() - start_time_ticks_)
+ << " time_to_render:" << time_to_render;
+
+ const bool render = (size > 0);
+
+ if (render)
+ frame_statistics_->IncrementFrameCount();
+
if (time_to_render < base::TimeDelta()) {
+ if (render) {
+ DVLOG(2) << class_name() << "::" << __FUNCTION__
+ << " LATE FRAME delay:" << (-1) * time_to_render;
+
+ frame_statistics_->IncrementLateFrameCount();
+ }
+
// Skip late frames
- ReleaseOutputBuffer(buffer_index, pts, size, false, eos_encountered);
+ ReleaseOutputBuffer(buffer_index, pts, false, update_time, eos_encountered);
return;
}
delayed_buffers_.insert(buffer_index);
- bool do_render = size > 0;
decoder_thread_.task_runner()->PostDelayedTask(
FROM_HERE, base::Bind(&MediaCodecVideoDecoder::ReleaseOutputBuffer,
- base::Unretained(this), buffer_index, pts,
- size, do_render, eos_encountered),
+ base::Unretained(this), buffer_index, pts, render,
+ update_time, eos_encountered),
time_to_render);
}
@@ -241,18 +313,33 @@ void MediaCodecVideoDecoder::ReleaseDelayedBuffers() {
// Called when there is no decoder thread
for (int index : delayed_buffers_)
media_codec_bridge_->ReleaseOutputBuffer(index, false);
+
delayed_buffers_.clear();
}
+#ifndef NDEBUG
+void MediaCodecVideoDecoder::VerifyUnitIsKeyFrame(
+ const AccessUnit* unit) const {
+ // The first video frame in a sequence must be a key frame or stand-alone EOS.
+ DCHECK(unit);
+ bool stand_alone_eos = unit->is_end_of_stream && unit->data.empty();
+ DCHECK(stand_alone_eos || unit->is_key_frame);
+}
+#endif
+
void MediaCodecVideoDecoder::ReleaseOutputBuffer(int buffer_index,
base::TimeDelta pts,
- size_t size,
bool render,
+ bool update_time,
bool eos_encountered) {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DVLOG(2) << class_name() << "::" << __FUNCTION__ << " pts:" << pts;
+ // Do not render if we are in emergency stop, there might be no surface.
+ if (InEmergencyStop())
+ render = false;
+
media_codec_bridge_->ReleaseOutputBuffer(buffer_index, render);
delayed_buffers_.erase(buffer_index);
@@ -261,9 +348,9 @@ void MediaCodecVideoDecoder::ReleaseOutputBuffer(int buffer_index,
// |update_current_time_cb_| might be null if there is audio stream.
// Do not update current time for stand-alone EOS frames.
- if (!update_current_time_cb_.is_null() && !(eos_encountered && !size)) {
- media_task_runner_->PostTask(FROM_HERE,
- base::Bind(update_current_time_cb_, pts, pts));
+ if (!update_current_time_cb_.is_null() && update_time) {
+ media_task_runner_->PostTask(
+ FROM_HERE, base::Bind(update_current_time_cb_, pts, pts, false));
}
}
diff --git a/chromium/media/base/android/media_codec_video_decoder.h b/chromium/media/base/android/media_codec_video_decoder.h
index 1bec99ef768..072a61cd739 100644
--- a/chromium/media/base/android/media_codec_video_decoder.h
+++ b/chromium/media/base/android/media_codec_video_decoder.h
@@ -28,50 +28,65 @@ class MediaCodecVideoDecoder : public MediaCodecDecoder {
// decoder can use them.
MediaCodecVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& media_runner,
+ FrameStatistics* frame_statistics,
const base::Closure& request_data_cb,
const base::Closure& starvation_cb,
+ const base::Closure& drained_requested_cb,
const base::Closure& stop_done_cb,
+ const base::Closure& waiting_for_decryption_key_cb,
const base::Closure& error_cb,
const SetTimeCallback& update_current_time_cb,
- const VideoSizeChangedCallback& video_size_changed_cb,
- const base::Closure& codec_created_cb);
+ const VideoSizeChangedCallback& video_size_changed_cb);
~MediaCodecVideoDecoder() override;
const char* class_name() const override;
bool HasStream() const override;
void SetDemuxerConfigs(const DemuxerConfigs& configs) override;
+ bool IsContentEncrypted() const override;
void ReleaseDecoderResources() override;
+ void ReleaseMediaCodec() override;
// Stores the video surface to use with upcoming Configure()
- void SetPendingSurface(gfx::ScopedJavaSurface surface);
+ void SetVideoSurface(gfx::ScopedJavaSurface surface);
// Returns true if there is a video surface to use.
- bool HasPendingSurface() const;
+ bool HasVideoSurface() const;
+
+ // Sets whether protected surface is needed for the currently used DRM.
+ void SetProtectedSurfaceRequired(bool value);
+
+ // Returns true if protected surface is needed.
+ bool IsProtectedSurfaceRequired() const;
protected:
- bool IsCodecReconfigureNeeded(const DemuxerConfigs& curr,
- const DemuxerConfigs& next) const override;
- ConfigStatus ConfigureInternal() override;
- void SynchronizePTSWithTime(base::TimeDelta current_time) override;
+ bool IsCodecReconfigureNeeded(const DemuxerConfigs& next) const override;
+ ConfigStatus ConfigureInternal(jobject media_crypto) override;
+ void AssociateCurrentTimeWithPTS(base::TimeDelta pts) override;
+ void DissociatePTSFromTime() override;
void OnOutputFormatChanged() override;
void Render(int buffer_index,
+ size_t offset,
size_t size,
- bool render_output,
+ RenderMode render_mode,
base::TimeDelta pts,
bool eos_encountered) override;
int NumDelayedRenderTasks() const override;
void ReleaseDelayedBuffers() override;
+#ifndef NDEBUG
+ void VerifyUnitIsKeyFrame(const AccessUnit* unit) const override;
+#endif
+
private:
// A helper method that releases output buffers and does
// post-release checks. Might be called by Render() or posted
// for later execution.
void ReleaseOutputBuffer(int buffer_index,
base::TimeDelta pts,
- size_t size,
bool render,
+ bool update_time,
bool eos_encountered);
// Data.
@@ -82,15 +97,15 @@ class MediaCodecVideoDecoder : public MediaCodecDecoder {
// Video surface that we render to.
gfx::ScopedJavaSurface surface_;
+ // Flags that indicates whether we need protected surface.
+ bool is_protected_surface_required_;
+
// Reports current playback time to the callee.
SetTimeCallback update_current_time_cb_;
// Informs the callee that video size is changed.
VideoSizeChangedCallback video_size_changed_cb_;
- // Informs the callee that the MediaCodec is created.
- base::Closure codec_created_cb_;
-
// Current video size to be sent with |video_size_changed_cb_|.
gfx::Size video_size_;
diff --git a/chromium/media/base/android/media_decoder_job.cc b/chromium/media/base/android/media_decoder_job.cc
index 005634928ff..3bf5279db91 100644
--- a/chromium/media/base/android/media_decoder_job.cc
+++ b/chromium/media/base/android/media_decoder_job.cc
@@ -11,7 +11,7 @@
#include "base/trace_event/trace_event.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -88,7 +88,7 @@ void MediaDecoderJob::OnDataReceived(const DemuxerData& data) {
if (stop_decode_pending_) {
DCHECK(is_decoding());
- OnDecodeCompleted(MEDIA_CODEC_ABORT, kNoTimestamp(), kNoTimestamp());
+ OnDecodeCompleted(MEDIA_CODEC_ABORT, false, kNoTimestamp(), kNoTimestamp());
return;
}
@@ -361,7 +361,7 @@ void MediaDecoderJob::DecodeCurrentAccessUnit(
// MEDIA_CODEC_OUTPUT_FORMAT_CHANGED status will come later.
ui_task_runner_->PostTask(FROM_HERE, base::Bind(
&MediaDecoderJob::OnDecodeCompleted, base::Unretained(this),
- MEDIA_CODEC_OK, kNoTimestamp(), kNoTimestamp()));
+ MEDIA_CODEC_OK, false, kNoTimestamp(), kNoTimestamp()));
return;
}
// Start draining the decoder so that all the remaining frames are
@@ -394,9 +394,10 @@ void MediaDecoderJob::DecodeInternal(
DVLOG(1) << "DecodeInternal needs flush.";
input_eos_encountered_ = false;
output_eos_encountered_ = false;
+ input_buf_index_ = -1;
MediaCodecStatus reset_status = media_codec_bridge_->Reset();
if (MEDIA_CODEC_OK != reset_status) {
- callback.Run(reset_status, kNoTimestamp(), kNoTimestamp());
+ callback.Run(reset_status, false, kNoTimestamp(), kNoTimestamp());
return;
}
}
@@ -408,7 +409,7 @@ void MediaDecoderJob::DecodeInternal(
// For aborted access unit, just skip it and inform the player.
if (unit.status == DemuxerStream::kAborted) {
- callback.Run(MEDIA_CODEC_ABORT, kNoTimestamp(), kNoTimestamp());
+ callback.Run(MEDIA_CODEC_ABORT, false, kNoTimestamp(), kNoTimestamp());
return;
}
@@ -416,7 +417,7 @@ void MediaDecoderJob::DecodeInternal(
if (unit.is_end_of_stream || unit.data.empty()) {
input_eos_encountered_ = true;
output_eos_encountered_ = true;
- callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, kNoTimestamp(),
+ callback.Run(MEDIA_CODEC_OUTPUT_END_OF_STREAM, false, kNoTimestamp(),
kNoTimestamp());
return;
}
@@ -429,8 +430,12 @@ void MediaDecoderJob::DecodeInternal(
input_status = QueueInputBuffer(unit);
if (input_status == MEDIA_CODEC_INPUT_END_OF_STREAM) {
input_eos_encountered_ = true;
+ } else if (input_status == MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER) {
+ // In some cases, all buffers must be released to codec before format
+ // change can be resolved. Context: b/21786703
+ DVLOG(1) << "dequeueInputBuffer gave AGAIN_LATER, dequeue output buffers";
} else if (input_status != MEDIA_CODEC_OK) {
- callback.Run(input_status, kNoTimestamp(), kNoTimestamp());
+ callback.Run(input_status, false, kNoTimestamp(), kNoTimestamp());
return;
}
}
@@ -466,7 +471,7 @@ void MediaDecoderJob::DecodeInternal(
status != MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER);
if (status != MEDIA_CODEC_OK) {
- callback.Run(status, kNoTimestamp(), kNoTimestamp());
+ callback.Run(status, false, kNoTimestamp(), kNoTimestamp());
return;
}
@@ -489,12 +494,10 @@ void MediaDecoderJob::DecodeInternal(
decoder_task_runner_->PostDelayedTask(
FROM_HERE,
base::Bind(&MediaDecoderJob::ReleaseOutputBuffer,
- base::Unretained(this),
- buffer_index,
- size,
+ base::Unretained(this), buffer_index, offset, size,
render_output,
- presentation_timestamp,
- base::Bind(callback, status)),
+ false, // this is not a late frame
+ presentation_timestamp, base::Bind(callback, status)),
time_to_render);
return;
}
@@ -512,14 +515,19 @@ void MediaDecoderJob::DecodeInternal(
} else {
presentation_timestamp = kNoTimestamp();
}
+
ReleaseOutputCompletionCallback completion_callback = base::Bind(
callback, status);
- ReleaseOutputBuffer(buffer_index, size, render_output, presentation_timestamp,
- completion_callback);
+
+ const bool is_late_frame = (time_to_render < base::TimeDelta());
+ ReleaseOutputBuffer(buffer_index, offset, size, render_output, is_late_frame,
+ presentation_timestamp, completion_callback);
}
void MediaDecoderJob::OnDecodeCompleted(
- MediaCodecStatus status, base::TimeDelta current_presentation_timestamp,
+ MediaCodecStatus status,
+ bool is_late_frame,
+ base::TimeDelta current_presentation_timestamp,
base::TimeDelta max_presentation_timestamp) {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
@@ -581,8 +589,9 @@ void MediaDecoderJob::OnDecodeCompleted(
}
stop_decode_pending_ = false;
- base::ResetAndReturn(&decode_cb_).Run(
- status, current_presentation_timestamp, max_presentation_timestamp);
+ base::ResetAndReturn(&decode_cb_)
+ .Run(status, is_late_frame, current_presentation_timestamp,
+ max_presentation_timestamp);
}
const AccessUnit& MediaDecoderJob::CurrentAccessUnit() const {
diff --git a/chromium/media/base/android/media_decoder_job.h b/chromium/media/base/android/media_decoder_job.h
index 329512f2c6b..752ac8820fe 100644
--- a/chromium/media/base/android/media_decoder_job.h
+++ b/chromium/media/base/android/media_decoder_job.h
@@ -39,17 +39,20 @@ class MediaDecoderJob {
};
// Callback when a decoder job finishes its work. Args: whether decode
- // finished successfully, current presentation time, max presentation time.
+ // finished successfully, a flag whether the frame is late for statistics,
+ // cacurrent presentation time, max presentation time.
// If the current presentation time is equal to kNoTimestamp(), the decoder
// job skipped rendering of the decoded output and the callback target should
- // ignore the timestamps provided.
- typedef base::Callback<void(MediaCodecStatus, base::TimeDelta,
+ // ignore the timestamps provided. The late frame flag has no meaning in this
+ // case.
+ typedef base::Callback<void(MediaCodecStatus, bool, base::TimeDelta,
base::TimeDelta)> DecoderCallback;
// Callback when a decoder job finishes releasing the output buffer.
- // Args: current presentation time, max presentation time.
+ // Args: whether the frame is a late frame, current presentation time, max
+ // presentation time.
// If the current presentation time is equal to kNoTimestamp(), the callback
- // target should ignore the timestamps provided.
- typedef base::Callback<void(base::TimeDelta, base::TimeDelta)>
+ // target should ignore the timestamps provided and whether it is late.
+ typedef base::Callback<void(bool, base::TimeDelta, base::TimeDelta)>
ReleaseOutputCompletionCallback;
virtual ~MediaDecoderJob();
@@ -108,9 +111,6 @@ class MediaDecoderJob {
bool prerolling() const { return prerolling_; }
- // Returns true if this object has data to decode.
- bool HasData() const;
-
protected:
// Creates a new MediaDecoderJob instance.
// |decoder_task_runner| - Thread on which the decoder task will run.
@@ -124,10 +124,14 @@ class MediaDecoderJob {
// Release the output buffer at index |output_buffer_index| and render it if
// |render_output| is true. Upon completion, |callback| will be called.
+ // |is_late_frame| can be passed with the |callback| if the implementation
+ // does not calculate it itself.
virtual void ReleaseOutputBuffer(
int output_buffer_index,
+ size_t offset,
size_t size,
bool render_output,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) = 0;
@@ -165,6 +169,9 @@ class MediaDecoderJob {
// Queues an access unit into |media_codec_bridge_|'s input buffer.
MediaCodecStatus QueueInputBuffer(const AccessUnit& unit);
+ // Returns true if this object has data to decode.
+ bool HasData() const;
+
// Initiates a request for more data.
// |done_cb| is called when more data is available in |received_data_|.
void RequestData(const base::Closure& done_cb);
@@ -195,6 +202,7 @@ class MediaDecoderJob {
// Completes any pending job destruction or any pending decode stop. If
// destruction was not pending, passes its arguments to |decode_cb_|.
void OnDecodeCompleted(MediaCodecStatus status,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
base::TimeDelta max_presentation_timestamp);
diff --git a/chromium/media/base/android/media_drm_bridge.cc b/chromium/media/base/android/media_drm_bridge.cc
index 4200635ef5e..e7186787b08 100644
--- a/chromium/media/base/android/media_drm_bridge.cc
+++ b/chromium/media/base/android/media_drm_bridge.cc
@@ -9,6 +9,7 @@
#include "base/android/build_info.h"
#include "base/android/jni_array.h"
#include "base/android/jni_string.h"
+#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/containers/hash_tables.h"
#include "base/lazy_instance.h"
@@ -16,6 +17,7 @@
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/sys_byteorder.h"
#include "base/sys_info.h"
@@ -23,6 +25,7 @@
#include "jni/MediaDrmBridge_jni.h"
#include "media/base/android/media_client_android.h"
#include "media/base/android/media_drm_bridge_delegate.h"
+#include "media/base/android/media_task_runner.h"
#include "media/base/cdm_key_information.h"
#include "widevine_cdm_version.h" // In SHARED_INTERMEDIATE_DIR.
@@ -31,16 +34,30 @@ using base::android::AttachCurrentThread;
using base::android::ConvertUTF8ToJavaString;
using base::android::ConvertJavaStringToUTF8;
using base::android::JavaByteArrayToByteVector;
+using base::android::ScopedJavaGlobalRef;
using base::android::ScopedJavaLocalRef;
namespace media {
namespace {
-// DrmBridge supports session expiration event but doesn't provide detailed
-// status for each key ID, which is required by the EME spec. Use a dummy key ID
-// here to report session expiration info.
-const char kDummyKeyId[] = "Dummy Key Id";
+// These must be in sync with Android MediaDrm REQUEST_TYPE_XXX constants!
+// https://developer.android.com/reference/android/media/MediaDrm.KeyRequest.html
+enum class RequestType {
+ REQUEST_TYPE_INITIAL = 0,
+ REQUEST_TYPE_RENEWAL = 1,
+ REQUEST_TYPE_RELEASE = 2,
+};
+
+// These must be in sync with Android MediaDrm KEY_STATUS_XXX constants:
+// https://developer.android.com/reference/android/media/MediaDrm.KeyStatus.html
+enum class KeyStatus {
+ KEY_STATUS_USABLE = 0,
+ KEY_STATUS_EXPIRED = 1,
+ KEY_STATUS_OUTPUT_NOT_ALLOWED = 2,
+ KEY_STATUS_PENDING = 3,
+ KEY_STATUS_INTERNAL_ERROR = 4,
+};
// Returns string session ID from jbyteArray (byte[] in Java).
std::string GetSessionId(JNIEnv* env, jbyteArray j_session_id) {
@@ -72,6 +89,39 @@ std::string ConvertInitDataType(media::EmeInitDataType init_data_type) {
}
}
+MediaKeys::MessageType GetMessageType(RequestType request_type) {
+ switch (request_type) {
+ case RequestType::REQUEST_TYPE_INITIAL:
+ return MediaKeys::LICENSE_REQUEST;
+ case RequestType::REQUEST_TYPE_RENEWAL:
+ return MediaKeys::LICENSE_RENEWAL;
+ case RequestType::REQUEST_TYPE_RELEASE:
+ return MediaKeys::LICENSE_RELEASE;
+ }
+
+ NOTREACHED();
+ return MediaKeys::LICENSE_REQUEST;
+}
+
+CdmKeyInformation::KeyStatus ConvertKeyStatus(KeyStatus key_status) {
+ switch (key_status) {
+ case KeyStatus::KEY_STATUS_USABLE:
+ return CdmKeyInformation::USABLE;
+ case KeyStatus::KEY_STATUS_EXPIRED:
+ return CdmKeyInformation::EXPIRED;
+ case KeyStatus::KEY_STATUS_OUTPUT_NOT_ALLOWED:
+ return CdmKeyInformation::OUTPUT_RESTRICTED;
+ case KeyStatus::KEY_STATUS_PENDING:
+ // TODO(xhwang): This should probably be renamed to "PENDING".
+ return CdmKeyInformation::KEY_STATUS_PENDING;
+ case KeyStatus::KEY_STATUS_INTERNAL_ERROR:
+ return CdmKeyInformation::INTERNAL_ERROR;
+ }
+
+ NOTREACHED();
+ return CdmKeyInformation::INTERNAL_ERROR;
+}
+
class KeySystemManager {
public:
KeySystemManager();
@@ -207,6 +257,7 @@ bool MediaDrmBridge::IsKeySystemSupportedWithType(
return IsKeySystemSupportedWithTypeImpl(key_system, container_mime_type);
}
+// static
bool MediaDrmBridge::RegisterMediaDrmBridge(JNIEnv* env) {
return RegisterNativesImpl(env);
}
@@ -216,12 +267,21 @@ MediaDrmBridge::MediaDrmBridge(
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const LegacySessionErrorCB& legacy_session_error_cb,
- const SessionKeysChangeCB& session_keys_change_cb)
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb)
: scheme_uuid_(scheme_uuid),
session_message_cb_(session_message_cb),
session_closed_cb_(session_closed_cb),
legacy_session_error_cb_(legacy_session_error_cb),
- session_keys_change_cb_(session_keys_change_cb) {
+ session_keys_change_cb_(session_keys_change_cb),
+ session_expiration_update_cb_(session_expiration_update_cb),
+ cdm_promise_adapter_(new CdmPromiseAdapter()),
+ ui_task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ use_media_thread_(UseMediaThreadForMediaPlayback()),
+ media_weak_factory_(this),
+ ui_weak_factory_(this) {
+ DVLOG(1) << __FUNCTION__;
+
JNIEnv* env = AttachCurrentThread();
CHECK(env);
@@ -231,23 +291,46 @@ MediaDrmBridge::MediaDrmBridge(
env, j_scheme_uuid.obj(), reinterpret_cast<intptr_t>(this)));
}
-MediaDrmBridge::~MediaDrmBridge() {
+void MediaDrmBridge::DeleteOnCorrectThread() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
JNIEnv* env = AttachCurrentThread();
- player_tracker_.NotifyCdmUnset();
if (!j_media_drm_.is_null())
Java_MediaDrmBridge_destroy(env, j_media_drm_.obj());
+
+ // After the call to Java_MediaDrmBridge_destroy() Java won't call native
+ // methods anymore, this is ensured by MediaDrmBridge.java.
+
+ // CdmPromiseAdapter must be destroyed on the UI thread.
+ cdm_promise_adapter_.reset();
+
+ // Post deletion onto Media thread if we use it.
+ if (use_media_thread_) {
+ ui_weak_factory_.InvalidateWeakPtrs();
+ GetMediaTaskRunner()->DeleteSoon(FROM_HERE, this);
+ } else {
+ delete this;
+ }
+}
+
+MediaDrmBridge::~MediaDrmBridge() {
+ DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!use_media_thread_ || GetMediaTaskRunner()->BelongsToCurrentThread());
+
+ player_tracker_.NotifyCdmUnset();
}
// static
-// TODO(xhwang): Enable SessionExpirationUpdateCB when it is supported.
-scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
+ScopedMediaDrmBridgePtr MediaDrmBridge::Create(
const std::string& key_system,
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const LegacySessionErrorCB& legacy_session_error_cb,
const SessionKeysChangeCB& session_keys_change_cb,
- const SessionExpirationUpdateCB& /* session_expiration_update_cb */) {
- scoped_ptr<MediaDrmBridge> media_drm_bridge;
+ const SessionExpirationUpdateCB& session_expiration_update_cb) {
+ scoped_ptr<MediaDrmBridge, BrowserCdmDeleter> media_drm_bridge;
if (!IsAvailable())
return media_drm_bridge.Pass();
@@ -257,7 +340,8 @@ scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
media_drm_bridge.reset(
new MediaDrmBridge(scheme_uuid, session_message_cb, session_closed_cb,
- legacy_session_error_cb, session_keys_change_cb));
+ legacy_session_error_cb, session_keys_change_cb,
+ session_expiration_update_cb));
if (media_drm_bridge->j_media_drm_.is_null())
media_drm_bridge.reset();
@@ -266,8 +350,8 @@ scoped_ptr<MediaDrmBridge> MediaDrmBridge::Create(
}
// static
-scoped_ptr<MediaDrmBridge> MediaDrmBridge::CreateWithoutSessionSupport(
- const std::string& key_system) {
+ScopedMediaDrmBridgePtr
+MediaDrmBridge::CreateWithoutSessionSupport(const std::string& key_system) {
return MediaDrmBridge::Create(
key_system, SessionMessageCB(), SessionClosedCB(), LegacySessionErrorCB(),
SessionKeysChangeCB(), SessionExpirationUpdateCB());
@@ -357,7 +441,7 @@ void MediaDrmBridge::CreateSessionAndGenerateRequest(
ScopedJavaLocalRef<jstring> j_mime =
ConvertUTF8ToJavaString(env, ConvertInitDataType(init_data_type));
- uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ uint32_t promise_id = cdm_promise_adapter_->SavePromise(promise.Pass());
Java_MediaDrmBridge_createSessionFromNative(env, j_media_drm_.obj(),
j_init_data.obj(), j_mime.obj(),
j_optional_parameters.obj(),
@@ -384,7 +468,7 @@ void MediaDrmBridge::UpdateSession(
ScopedJavaLocalRef<jbyteArray> j_session_id = base::android::ToJavaByteArray(
env, reinterpret_cast<const uint8_t*>(session_id.data()),
session_id.size());
- uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ uint32_t promise_id = cdm_promise_adapter_->SavePromise(promise.Pass());
Java_MediaDrmBridge_updateSession(env, j_media_drm_.obj(), j_session_id.obj(),
j_response.obj(), promise_id);
}
@@ -396,7 +480,7 @@ void MediaDrmBridge::CloseSession(const std::string& session_id,
ScopedJavaLocalRef<jbyteArray> j_session_id = base::android::ToJavaByteArray(
env, reinterpret_cast<const uint8_t*>(session_id.data()),
session_id.size());
- uint32_t promise_id = cdm_promise_adapter_.SavePromise(promise.Pass());
+ uint32_t promise_id = cdm_promise_adapter_->SavePromise(promise.Pass());
Java_MediaDrmBridge_closeSession(env, j_media_drm_.obj(), j_session_id.obj(),
promise_id);
}
@@ -415,47 +499,75 @@ CdmContext* MediaDrmBridge::GetCdmContext() {
int MediaDrmBridge::RegisterPlayer(const base::Closure& new_key_cb,
const base::Closure& cdm_unset_cb) {
+ DCHECK(!use_media_thread_ || GetMediaTaskRunner()->BelongsToCurrentThread());
return player_tracker_.RegisterPlayer(new_key_cb, cdm_unset_cb);
}
void MediaDrmBridge::UnregisterPlayer(int registration_id) {
+ DCHECK(!use_media_thread_ || GetMediaTaskRunner()->BelongsToCurrentThread());
player_tracker_.UnregisterPlayer(registration_id);
}
-void MediaDrmBridge::SetMediaCryptoReadyCB(const base::Closure& closure) {
- if (closure.is_null()) {
+void MediaDrmBridge::SetMediaCryptoReadyCB(
+ const MediaCryptoReadyCB& media_crypto_ready_cb) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ if (media_crypto_ready_cb.is_null()) {
media_crypto_ready_cb_.Reset();
return;
}
DCHECK(media_crypto_ready_cb_.is_null());
+ // |media_crypto_ready_cb| is already bound to the correct thread
+ // (either UI or Media).
if (!GetMediaCrypto().is_null()) {
- base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, closure);
+ NotifyMediaCryptoReady(media_crypto_ready_cb);
return;
}
- media_crypto_ready_cb_ = closure;
+ media_crypto_ready_cb_ = media_crypto_ready_cb;
+}
+
+void MediaDrmBridge::OnMediaCryptoReady(JNIEnv* env, jobject j_media_drm) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ DCHECK(!GetMediaCrypto().is_null());
+
+ if (media_crypto_ready_cb_.is_null())
+ return;
+
+ NotifyMediaCryptoReady(base::ResetAndReturn(&media_crypto_ready_cb_));
}
-void MediaDrmBridge::OnMediaCryptoReady(JNIEnv* env, jobject) {
+void MediaDrmBridge::NotifyMediaCryptoReady(const MediaCryptoReadyCB& cb) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
+ DCHECK(!cb.is_null());
DCHECK(!GetMediaCrypto().is_null());
- if (!media_crypto_ready_cb_.is_null())
- base::ResetAndReturn(&media_crypto_ready_cb_).Run();
+
+ // We can use scoped_ptr to pass ScopedJavaGlobalRef with a callback.
+ scoped_ptr<ScopedJavaGlobalRef<jobject>> j_object_ptr(
+ new ScopedJavaGlobalRef<jobject>());
+ j_object_ptr->Reset(AttachCurrentThread(), GetMediaCrypto().obj());
+
+ cb.Run(j_object_ptr.Pass(), IsProtectedSurfaceRequired());
}
void MediaDrmBridge::OnPromiseResolved(JNIEnv* env,
jobject j_media_drm,
jint j_promise_id) {
- cdm_promise_adapter_.ResolvePromise(j_promise_id);
+ cdm_promise_adapter_->ResolvePromise(j_promise_id);
}
void MediaDrmBridge::OnPromiseResolvedWithSession(JNIEnv* env,
jobject j_media_drm,
jint j_promise_id,
jbyteArray j_session_id) {
- cdm_promise_adapter_.ResolvePromise(j_promise_id,
- GetSessionId(env, j_session_id));
+ cdm_promise_adapter_->ResolvePromise(j_promise_id,
+ GetSessionId(env, j_session_id));
}
void MediaDrmBridge::OnPromiseRejected(JNIEnv* env,
@@ -463,23 +575,22 @@ void MediaDrmBridge::OnPromiseRejected(JNIEnv* env,
jint j_promise_id,
jstring j_error_message) {
std::string error_message = ConvertJavaStringToUTF8(env, j_error_message);
- cdm_promise_adapter_.RejectPromise(j_promise_id, MediaKeys::UNKNOWN_ERROR, 0,
- error_message);
+ cdm_promise_adapter_->RejectPromise(j_promise_id, MediaKeys::UNKNOWN_ERROR, 0,
+ error_message);
}
void MediaDrmBridge::OnSessionMessage(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id,
+ jint j_message_type,
jbyteArray j_message,
jstring j_legacy_destination_url) {
std::vector<uint8> message;
JavaByteArrayToByteVector(env, j_message, &message);
GURL legacy_destination_url =
GURL(ConvertJavaStringToUTF8(env, j_legacy_destination_url));
- // Note: Message type is not supported in MediaDrm. Do our best guess here.
- media::MediaKeys::MessageType message_type =
- legacy_destination_url.is_empty() ? media::MediaKeys::LICENSE_REQUEST
- : media::MediaKeys::LICENSE_RENEWAL;
+ MediaKeys::MessageType message_type =
+ GetMessageType(static_cast<RequestType>(j_message_type));
session_message_cb_.Run(GetSessionId(env, j_session_id), message_type,
message, legacy_destination_url);
@@ -494,23 +605,82 @@ void MediaDrmBridge::OnSessionClosed(JNIEnv* env,
void MediaDrmBridge::OnSessionKeysChange(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id,
- bool has_additional_usable_key,
- jint j_key_status) {
+ jobjectArray j_keys_info,
+ bool has_additional_usable_key) {
if (has_additional_usable_key)
- player_tracker_.NotifyNewKey();
+ NotifyNewKeyOnCorrectThread();
- scoped_ptr<CdmKeyInformation> cdm_key_information(new CdmKeyInformation());
- cdm_key_information->key_id.assign(kDummyKeyId,
- kDummyKeyId + sizeof(kDummyKeyId));
- cdm_key_information->status =
- static_cast<CdmKeyInformation::KeyStatus>(j_key_status);
CdmKeysInfo cdm_keys_info;
- cdm_keys_info.push_back(cdm_key_information.release());
+
+ size_t size = env->GetArrayLength(j_keys_info);
+ DCHECK_GT(size, 0u);
+
+ for (size_t i = 0; i < size; ++i) {
+ ScopedJavaLocalRef<jobject> j_key_status(
+ env, env->GetObjectArrayElement(j_keys_info, i));
+
+ ScopedJavaLocalRef<jbyteArray> j_key_id =
+ Java_KeyStatus_getKeyId(env, j_key_status.obj());
+ std::vector<uint8> key_id;
+ JavaByteArrayToByteVector(env, j_key_id.obj(), &key_id);
+ DCHECK(!key_id.empty());
+
+ jint j_status_code =
+ Java_KeyStatus_getStatusCode(env, j_key_status.obj());
+ CdmKeyInformation::KeyStatus key_status =
+ ConvertKeyStatus(static_cast<KeyStatus>(j_status_code));
+
+ DVLOG(2) << __FUNCTION__ << "Key status change: "
+ << base::HexEncode(&key_id[0], key_id.size()) << ", "
+ << key_status;
+
+ // TODO(xhwang): Update CdmKeyInformation to take key_id and status in the
+ // constructor.
+ scoped_ptr<CdmKeyInformation> cdm_key_information(new CdmKeyInformation());
+ cdm_key_information->key_id = key_id;
+ cdm_key_information->status = key_status;
+ cdm_keys_info.push_back(cdm_key_information.release());
+ }
session_keys_change_cb_.Run(GetSessionId(env, j_session_id),
has_additional_usable_key, cdm_keys_info.Pass());
}
+void MediaDrmBridge::NotifyNewKeyOnCorrectThread() {
+ // Repost this method onto the Media thread if |use_media_thread_| is true.
+ if (use_media_thread_ && !GetMediaTaskRunner()->BelongsToCurrentThread()) {
+ GetMediaTaskRunner()->PostTask(
+ FROM_HERE, base::Bind(&MediaDrmBridge::NotifyNewKeyOnCorrectThread,
+ media_weak_factory_.GetWeakPtr()));
+ return;
+ }
+
+ DCHECK(!use_media_thread_ || GetMediaTaskRunner()->BelongsToCurrentThread());
+ DVLOG(1) << __FUNCTION__;
+
+ player_tracker_.NotifyNewKey();
+}
+
+// According to MeidaDrm documentation [1], zero |expiry_time_ms| means the keys
+// will never expire. This will be translated into a NULL base::Time() [2],
+// which will then be mapped to a zero Java time [3]. The zero Java time is
+// passed to Blink which will then be translated to NaN [4], which is what the
+// spec uses to indicate that the license will never expire [5].
+// [1] http://developer.android.com/reference/android/media/MediaDrm.OnExpirationUpdateListener.html
+// [2] See base::Time::FromDoubleT()
+// [3] See base::Time::ToJavaTime()
+// [4] See MediaKeySession::expirationChanged()
+// [5] https://github.com/w3c/encrypted-media/issues/58
+void MediaDrmBridge::OnSessionExpirationUpdate(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ jlong expiry_time_ms) {
+ DVLOG(2) << __FUNCTION__ << ": " << expiry_time_ms << " ms";
+ session_expiration_update_cb_.Run(
+ GetSessionId(env, j_session_id),
+ base::Time::FromDoubleT(expiry_time_ms / 1000.0));
+}
+
void MediaDrmBridge::OnLegacySessionError(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id,
@@ -521,6 +691,8 @@ void MediaDrmBridge::OnLegacySessionError(JNIEnv* env,
}
ScopedJavaLocalRef<jobject> MediaDrmBridge::GetMediaCrypto() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+
JNIEnv* env = AttachCurrentThread();
return Java_MediaDrmBridge_getMediaCrypto(env, j_media_drm_.obj());
}
@@ -543,6 +715,10 @@ bool MediaDrmBridge::IsProtectedSurfaceRequired() {
return true;
}
+base::WeakPtr<MediaDrmBridge> MediaDrmBridge::WeakPtrForUIThread() {
+ return ui_weak_factory_.GetWeakPtr();
+}
+
void MediaDrmBridge::ResetDeviceCredentials(
const ResetCredentialsCB& callback) {
DCHECK(reset_credentials_cb_.is_null());
diff --git a/chromium/media/base/android/media_drm_bridge.h b/chromium/media/base/android/media_drm_bridge.h
index 30842b97731..75dc5cd1f55 100644
--- a/chromium/media/base/android/media_drm_bridge.h
+++ b/chromium/media/base/android/media_drm_bridge.h
@@ -12,6 +12,7 @@
#include "base/android/scoped_java_ref.h"
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "media/base/browser_cdm.h"
#include "media/base/cdm_promise_adapter.h"
#include "media/base/media_export.h"
@@ -22,8 +23,11 @@ class GURL;
namespace media {
+class MediaDrmBridge;
class MediaPlayerManager;
+using ScopedMediaDrmBridgePtr = scoped_ptr<MediaDrmBridge, BrowserCdmDeleter>;
+
// This class provides DRM services for android EME implementation.
class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
public:
@@ -34,10 +38,21 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
SECURITY_LEVEL_3 = 3,
};
- typedef base::Callback<void(bool)> ResetCredentialsCB;
+ using JavaObjectPtr = scoped_ptr<base::android::ScopedJavaGlobalRef<jobject>>;
+
+ using ResetCredentialsCB = base::Callback<void(bool)>;
+
+ // Notification called when MediaCrypto object is ready.
+ // Parameters:
+ // |media_crypto| - global reference to MediaCrypto object
+ // |needs_protected_surface| - true if protected surface is required.
+ using MediaCryptoReadyCB = base::Callback<void(JavaObjectPtr media_crypto,
+ bool needs_protected_surface)>;
~MediaDrmBridge() override;
+ void DeleteOnCorrectThread() override;
+
// Checks whether MediaDRM is available.
// All other static methods check IsAvailable() internally. There's no need
// to check IsAvailable() explicitly before calling them.
@@ -61,7 +76,7 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
// pointer otherwise.
// TODO(xhwang): Is it okay not to update session expiration info?
- static scoped_ptr<MediaDrmBridge> Create(
+ static ScopedMediaDrmBridgePtr Create(
const std::string& key_system,
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
@@ -72,7 +87,7 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// Returns a MediaDrmBridge instance if |key_system| is supported, or a NULL
// otherwise. No session callbacks are provided. This is used when we need to
// use MediaDrmBridge without creating any sessions.
- static scoped_ptr<MediaDrmBridge> CreateWithoutSessionSupport(
+ static ScopedMediaDrmBridgePtr CreateWithoutSessionSupport(
const std::string& key_system);
// Returns true if |security_level| is successfully set, or false otherwise.
@@ -114,11 +129,11 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// otherwise.
base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
- // Sets callback which will be called when MediaCrypto is ready.
- // If |closure| is null, previously set callback will be cleared.
- void SetMediaCryptoReadyCB(const base::Closure& closure);
+ // Sets callback which will be called when MediaCrypto is ready. If
+ // |media_crypto_ready_cb| is null, previously set callback will be cleared.
+ void SetMediaCryptoReadyCB(const MediaCryptoReadyCB& media_crypto_ready_cb);
- // Called after a MediaCrypto object is created.
+ // Called by Java after a MediaCrypto object is created.
void OnMediaCryptoReady(JNIEnv* env, jobject j_media_drm);
// Callbacks to resolve the promise for |promise_id|.
@@ -139,22 +154,31 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// Session event callbacks.
// Note: Session expiration update is not supported by MediaDrm.
+ // TODO(xhwang): Remove |j_legacy_destination_url| when prefixed EME support
+ // is removed.
void OnSessionMessage(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id,
+ jint j_message_type,
jbyteArray j_message,
jstring j_legacy_destination_url);
void OnSessionClosed(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id);
- // Note: Key ID is not available in MediaDrm, so only a generic |j_key_status|
- // and |has_additional_usable_key| are returned.
void OnSessionKeysChange(JNIEnv* env,
jobject j_media_drm,
jbyteArray j_session_id,
- bool has_additional_usable_key,
- jint j_key_status);
+ jobjectArray j_keys_info,
+ bool has_additional_usable_key);
+
+ // |expiry_time_ms| is the new expiration time for the keys in the session.
+ // The time is in milliseconds, relative to the Unix epoch. A time of 0
+ // indicates that the keys never expire.
+ void OnSessionExpirationUpdate(JNIEnv* env,
+ jobject j_media_drm,
+ jbyteArray j_session_id,
+ jlong expiry_time_ms);
// Called by the CDM when an error occurred in session |j_session_id|
// unrelated to one of the MediaKeys calls that accept a |promise|.
@@ -177,18 +201,29 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
// video playback.
bool IsProtectedSurfaceRequired();
+ // We use this pointer when we post SetMediaCryptoReadyCB onto UI thread.
+ base::WeakPtr<MediaDrmBridge> WeakPtrForUIThread();
+
private:
MediaDrmBridge(const std::vector<uint8>& scheme_uuid,
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const LegacySessionErrorCB& legacy_session_error_cb,
- const SessionKeysChangeCB& session_keys_change_cb);
+ const SessionKeysChangeCB& session_keys_change_cb,
+ const SessionExpirationUpdateCB& session_expiration_update_cb);
static bool IsSecureDecoderRequired(SecurityLevel security_level);
// Get the security level of the media.
SecurityLevel GetSecurityLevel();
+ // A helper method that calls a |player_tracker_| method on correct thread.
+ void NotifyNewKeyOnCorrectThread();
+
+ // A helper method that calculates the |media_crypto_ready_cb_| arguments and
+ // run this callback.
+ void NotifyMediaCryptoReady(const MediaCryptoReadyCB& cb);
+
// UUID of the key system.
std::vector<uint8> scheme_uuid_;
@@ -200,14 +235,27 @@ class MEDIA_EXPORT MediaDrmBridge : public BrowserCdm {
SessionClosedCB session_closed_cb_;
LegacySessionErrorCB legacy_session_error_cb_;
SessionKeysChangeCB session_keys_change_cb_;
+ SessionExpirationUpdateCB session_expiration_update_cb_;
- base::Closure media_crypto_ready_cb_;
+ MediaCryptoReadyCB media_crypto_ready_cb_;
ResetCredentialsCB reset_credentials_cb_;
+ // The |player_tracker_| must be accessed by one thread only. It is accessed
+ // by the Media thread when |use_media_thread_| is true.
PlayerTrackerImpl player_tracker_;
- CdmPromiseAdapter cdm_promise_adapter_;
+ scoped_ptr<CdmPromiseAdapter> cdm_promise_adapter_;
+
+ // Object for posting tasks on UI thread.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+
+ // This flag is set when we use media thread for certain callbacks.
+ const bool use_media_thread_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MediaDrmBridge> media_weak_factory_;
+ base::WeakPtrFactory<MediaDrmBridge> ui_weak_factory_;
DISALLOW_COPY_AND_ASSIGN(MediaDrmBridge);
};
diff --git a/chromium/media/base/android/media_drm_bridge_unittest.cc b/chromium/media/base/android/media_drm_bridge_unittest.cc
index e4c0800030a..f215a425a5e 100644
--- a/chromium/media/base/android/media_drm_bridge_unittest.cc
+++ b/chromium/media/base/android/media_drm_bridge_unittest.cc
@@ -5,6 +5,7 @@
#include "base/android/build_info.h"
#include "base/basictypes.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
#include "media/base/android/media_drm_bridge.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -76,17 +77,20 @@ TEST(MediaDrmBridgeTest, IsKeySystemSupported_InvalidKeySystem) {
}
TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_Widevine) {
+ base::MessageLoop message_loop_;
EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(
MediaDrmBridge::CreateWithoutSessionSupport(kWidevineKeySystem));
}
// Invalid key system is NOT supported regardless whether MediaDrm is available.
TEST(MediaDrmBridgeTest, CreateWithoutSessionSupport_InvalidKeySystem) {
+ base::MessageLoop message_loop_;
EXPECT_FALSE(MediaDrmBridge::CreateWithoutSessionSupport(kInvalidKeySystem));
}
TEST(MediaDrmBridgeTest, SetSecurityLevel_Widevine) {
- scoped_ptr<MediaDrmBridge> media_drm_bridge =
+ base::MessageLoop message_loop_;
+ scoped_ptr<MediaDrmBridge, BrowserCdmDeleter> media_drm_bridge =
MediaDrmBridge::CreateWithoutSessionSupport(kWidevineKeySystem);
EXPECT_TRUE_IF_WIDEVINE_AVAILABLE(media_drm_bridge);
if (!media_drm_bridge)
diff --git a/chromium/media/base/android/media_player_android.cc b/chromium/media/base/android/media_player_android.cc
index 99668e8122f..c08c259420a 100644
--- a/chromium/media/base/android/media_player_android.cc
+++ b/chromium/media/base/android/media_player_android.cc
@@ -16,13 +16,12 @@ namespace media {
MediaPlayerAndroid::MediaPlayerAndroid(
int player_id,
MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
const GURL& frame_url)
- : request_media_resources_cb_(request_media_resources_cb),
+ : on_decoder_resources_released_cb_(on_decoder_resources_released_cb),
player_id_(player_id),
manager_(manager),
frame_url_(frame_url),
- is_audible_(false),
weak_factory_(this) {
listener_.reset(new MediaPlayerListener(base::ThreadTaskRunnerHandle::Get(),
weak_factory_.GetWeakPtr()));
@@ -91,13 +90,6 @@ void MediaPlayerAndroid::DestroyListenerOnUIThread() {
listener_.reset();
}
-void MediaPlayerAndroid::SetAudible(bool is_audible) {
- if (is_audible_ != is_audible) {
- is_audible_ = is_audible;
- manager_->OnAudibleStateChanged(player_id(), is_audible_);
- }
-}
-
base::WeakPtr<MediaPlayerAndroid> MediaPlayerAndroid::WeakPtrForUIThread() {
return weak_factory_.GetWeakPtr();
}
diff --git a/chromium/media/base/android/media_player_android.h b/chromium/media/base/android/media_player_android.h
index e52362411cb..e24ad6c8744 100644
--- a/chromium/media/base/android/media_player_android.h
+++ b/chromium/media/base/android/media_player_android.h
@@ -37,8 +37,8 @@ class MEDIA_EXPORT MediaPlayerAndroid {
MEDIA_ERROR_INVALID_CODE,
};
- // Callback when the player needs decoding resources.
- typedef base::Callback<void(int player_id)> RequestMediaResourcesCB;
+ // Callback when the player releases decoding resources.
+ typedef base::Callback<void(int player_id)> OnDecoderResourcesReleasedCB;
// Virtual destructor.
// For most subclasses we can delete on the caller thread.
@@ -70,16 +70,20 @@ class MEDIA_EXPORT MediaPlayerAndroid {
virtual base::TimeDelta GetDuration() = 0;
virtual base::TimeDelta GetCurrentTime() = 0;
virtual bool IsPlaying() = 0;
- virtual bool IsPlayerReady() = 0;
virtual bool CanPause() = 0;
virtual bool CanSeekForward() = 0;
virtual bool CanSeekBackward() = 0;
+ virtual bool IsPlayerReady() = 0;
virtual GURL GetUrl();
virtual GURL GetFirstPartyForCookies();
// Associates the |cdm| with this player.
virtual void SetCdm(BrowserCdm* cdm);
+ // Requests playback permission from MediaPlayerManager.
+ // Overridden in MediaCodecPlayer to pass data between threads.
+ virtual void RequestPermissionAndPostResult(base::TimeDelta duration) {}
+
// Overridden in MediaCodecPlayer to pass data between threads.
virtual void OnMediaMetadataChanged(base::TimeDelta duration,
const gfx::Size& video_size) {}
@@ -99,10 +103,11 @@ class MEDIA_EXPORT MediaPlayerAndroid {
void DetachListener();
protected:
- MediaPlayerAndroid(int player_id,
- MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
- const GURL& frame_url);
+ MediaPlayerAndroid(
+ int player_id,
+ MediaPlayerManager* manager,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
+ const GURL& frame_url);
// TODO(qinmin): Simplify the MediaPlayerListener class to only listen to
// media interrupt events. And have a separate child class to listen to all
@@ -120,13 +125,12 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// it is still required to destroy the |listener_| related stuff
// on the UI thread.
void DestroyListenerOnUIThread();
- void SetAudible(bool is_audible);
MediaPlayerManager* manager() { return manager_; }
base::WeakPtr<MediaPlayerAndroid> WeakPtrForUIThread();
- RequestMediaResourcesCB request_media_resources_cb_;
+ OnDecoderResourcesReleasedCB on_decoder_resources_released_cb_;
private:
friend class MediaPlayerListener;
@@ -143,9 +147,6 @@ class MEDIA_EXPORT MediaPlayerAndroid {
// Listener object that listens to all the media player events.
scoped_ptr<MediaPlayerListener> listener_;
- // Maintains the audible state of the player, true if it is playing sound.
- bool is_audible_;
-
// Weak pointer passed to |listener_| for callbacks.
// NOTE: Weak pointers must be invalidated before all other member variables.
base::WeakPtrFactory<MediaPlayerAndroid> weak_factory_;
diff --git a/chromium/media/base/android/media_player_bridge.cc b/chromium/media/base/android/media_player_bridge.cc
index 5cc8b3933ed..4ec4419ee00 100644
--- a/chromium/media/base/android/media_player_bridge.cc
+++ b/chromium/media/base/android/media_player_bridge.cc
@@ -14,7 +14,7 @@
#include "media/base/android/media_player_manager.h"
#include "media/base/android/media_resource_getter.h"
#include "media/base/android/media_url_interceptor.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
using base::android::ConvertUTF8ToJavaString;
using base::android::ScopedJavaLocalRef;
@@ -28,12 +28,12 @@ MediaPlayerBridge::MediaPlayerBridge(
const std::string& user_agent,
bool hide_url_log,
MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
const GURL& frame_url,
bool allow_credentials)
: MediaPlayerAndroid(player_id,
manager,
- request_media_resources_cb,
+ on_decoder_resources_released_cb,
frame_url),
prepared_(false),
pending_play_(false),
@@ -109,16 +109,16 @@ void MediaPlayerBridge::SetDuration(base::TimeDelta duration) {
}
void MediaPlayerBridge::SetVideoSurface(gfx::ScopedJavaSurface surface) {
- if (j_media_player_bridge_.is_null()) {
- if (surface.IsEmpty())
- return;
- Prepare();
- }
+ surface_ = surface.Pass();
+
+ if (j_media_player_bridge_.is_null())
+ return;
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
+
Java_MediaPlayerBridge_setSurface(
- env, j_media_player_bridge_.obj(), surface.j_surface().obj());
+ env, j_media_player_bridge_.obj(), surface_.j_surface().obj());
}
void MediaPlayerBridge::Prepare() {
@@ -160,7 +160,7 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
DCHECK(j_context);
const std::string data_uri_prefix("data:");
- if (base::StartsWithASCII(url, data_uri_prefix, true)) {
+ if (base::StartsWith(url, data_uri_prefix, base::CompareCase::SENSITIVE)) {
if (!Java_MediaPlayerBridge_setDataUriDataSource(
env, j_media_player_bridge_.obj(), j_context, j_url_string.obj())) {
OnMediaError(MEDIA_ERROR_FORMAT);
@@ -181,7 +181,6 @@ void MediaPlayerBridge::SetDataSource(const std::string& url) {
}
}
- request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -212,7 +211,6 @@ void MediaPlayerBridge::OnDidSetDataUriDataSource(JNIEnv* env, jobject obj,
return;
}
- request_media_resources_cb_.Run(player_id());
if (!Java_MediaPlayerBridge_prepareAsync(env, j_media_player_bridge_.obj()))
OnMediaError(MEDIA_ERROR_FORMAT);
}
@@ -240,6 +238,7 @@ void MediaPlayerBridge::OnAuthCredentialsRetrieved(
void MediaPlayerBridge::ExtractMediaMetadata(const std::string& url) {
if (url.empty()) {
OnMediaError(MEDIA_ERROR_FORMAT);
+ on_decoder_resources_released_cb_.Run(player_id());
return;
}
@@ -268,6 +267,7 @@ void MediaPlayerBridge::OnMediaMetadataExtracted(
}
manager()->OnMediaMetadataChanged(
player_id(), duration_, width_, height_, success);
+ on_decoder_resources_released_cb_.Run(player_id());
}
void MediaPlayerBridge::Start() {
@@ -325,10 +325,8 @@ void MediaPlayerBridge::SeekTo(base::TimeDelta timestamp) {
pending_seek_ = timestamp;
should_seek_on_prepare_ = true;
- if (j_media_player_bridge_.is_null())
- Prepare();
- else if (prepared_)
- SeekInternal(timestamp);
+ if (prepared_)
+ SeekInternal(GetCurrentTime(), timestamp);
}
base::TimeDelta MediaPlayerBridge::GetCurrentTime() {
@@ -351,11 +349,10 @@ base::TimeDelta MediaPlayerBridge::GetDuration() {
}
void MediaPlayerBridge::Release() {
+ on_decoder_resources_released_cb_.Run(player_id());
if (j_media_player_bridge_.is_null())
return;
- SetAudible(false);
-
time_update_timer_.Stop();
if (prepared_) {
pending_seek_ = GetCurrentTime();
@@ -372,22 +369,16 @@ void MediaPlayerBridge::Release() {
}
void MediaPlayerBridge::SetVolume(double volume) {
- volume_ = volume;
-
- if (j_media_player_bridge_.is_null())
+ if (j_media_player_bridge_.is_null()) {
+ volume_ = volume;
return;
+ }
JNIEnv* env = base::android::AttachCurrentThread();
CHECK(env);
- // Update the audible state if we are playing.
- jboolean is_playing = Java_MediaPlayerBridge_isPlaying(
- env, j_media_player_bridge_.obj());
- if (is_playing)
- SetAudible(volume_ > 0);
-
Java_MediaPlayerBridge_setVolume(
- env, j_media_player_bridge_.obj(), volume_);
+ env, j_media_player_bridge_.obj(), volume);
}
void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
@@ -397,13 +388,11 @@ void MediaPlayerBridge::OnVideoSizeChanged(int width, int height) {
}
void MediaPlayerBridge::OnPlaybackComplete() {
- SetAudible(false);
time_update_timer_.Stop();
MediaPlayerAndroid::OnPlaybackComplete();
}
void MediaPlayerBridge::OnMediaInterrupted() {
- SetAudible(false);
time_update_timer_.Stop();
MediaPlayerAndroid::OnMediaInterrupted();
}
@@ -423,6 +412,9 @@ void MediaPlayerBridge::OnMediaPrepared() {
should_seek_on_prepare_ = false;
}
+ if (!surface_.IsEmpty())
+ SetVideoSurface(surface_.Pass());
+
if (pending_play_) {
StartInternal();
pending_play_ = false;
@@ -455,7 +447,7 @@ void MediaPlayerBridge::UpdateAllowedOperations() {
}
void MediaPlayerBridge::StartInternal() {
- if (!manager()->RequestPlay(player_id())) {
+ if (!manager()->RequestPlay(player_id(), duration_)) {
Pause(true);
return;
}
@@ -468,23 +460,28 @@ void MediaPlayerBridge::StartInternal() {
base::TimeDelta::FromMilliseconds(kTimeUpdateInterval),
this, &MediaPlayerBridge::OnTimeUpdateTimerFired);
}
-
- SetAudible(volume_ > 0);
}
void MediaPlayerBridge::PauseInternal() {
- SetAudible(false);
-
JNIEnv* env = base::android::AttachCurrentThread();
Java_MediaPlayerBridge_pause(env, j_media_player_bridge_.obj());
time_update_timer_.Stop();
}
void MediaPlayerBridge::PendingSeekInternal(const base::TimeDelta& time) {
- SeekInternal(time);
+ SeekInternal(GetCurrentTime(), time);
}
-void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
+bool MediaPlayerBridge::SeekInternal(base::TimeDelta current_time,
+ base::TimeDelta time) {
+ // Seeking on content like live streams may cause the media player to
+ // get stuck in an error state.
+ if (time < current_time && !CanSeekBackward())
+ return false;
+
+ if (time >= current_time && !CanSeekForward())
+ return false;
+
if (time > duration_)
time = duration_;
@@ -492,7 +489,7 @@ void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
// error state.
if (time < base::TimeDelta()) {
DCHECK_EQ(-1.0, time.InMillisecondsF());
- return;
+ return false;
}
JNIEnv* env = base::android::AttachCurrentThread();
@@ -500,6 +497,7 @@ void MediaPlayerBridge::SeekInternal(base::TimeDelta time) {
int time_msec = static_cast<int>(time.InMilliseconds());
Java_MediaPlayerBridge_seekTo(
env, j_media_player_bridge_.obj(), time_msec);
+ return true;
}
void MediaPlayerBridge::OnTimeUpdateTimerFired() {
diff --git a/chromium/media/base/android/media_player_bridge.h b/chromium/media/base/android/media_player_bridge.h
index 291aa97959d..fbc54adf737 100644
--- a/chromium/media/base/android/media_player_bridge.h
+++ b/chromium/media/base/android/media_player_bridge.h
@@ -41,15 +41,16 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// |manager| to track unused resources and free them when needed.
// MediaPlayerBridge also forwards Android MediaPlayer callbacks to
// the |manager| when needed.
- MediaPlayerBridge(int player_id,
- const GURL& url,
- const GURL& first_party_for_cookies,
- const std::string& user_agent,
- bool hide_url_log,
- MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
- const GURL& frame_url,
- bool allow_credentials);
+ MediaPlayerBridge(
+ int player_id,
+ const GURL& url,
+ const GURL& first_party_for_cookies,
+ const std::string& user_agent,
+ bool hide_url_log,
+ MediaPlayerManager* manager,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
+ const GURL& frame_url,
+ bool allow_credentials);
~MediaPlayerBridge() override;
// Initialize this object and extract the metadata from the media.
@@ -99,13 +100,17 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
virtual base::android::ScopedJavaLocalRef<jobject> GetAllowedOperations();
private:
+ friend class MediaPlayerBridgeTest;
+
// Set the data source for the media player.
void SetDataSource(const std::string& url);
// Functions that implements media player control.
void StartInternal();
void PauseInternal();
- void SeekInternal(base::TimeDelta time);
+
+ // Returns true if the Java MediaPlayerBridge's seekTo method is called
+ bool SeekInternal(base::TimeDelta current_time, base::TimeDelta time);
// Called when |time_update_timer_| fires.
void OnTimeUpdateTimerFired();
@@ -170,10 +175,13 @@ class MEDIA_EXPORT MediaPlayerBridge : public MediaPlayerAndroid {
// Cookies for |url_|.
std::string cookies_;
+ // The surface object currently owned by the player.
+ gfx::ScopedJavaSurface surface_;
+
// Java MediaPlayerBridge instance.
base::android::ScopedJavaGlobalRef<jobject> j_media_player_bridge_;
- base::RepeatingTimer<MediaPlayerBridge> time_update_timer_;
+ base::RepeatingTimer time_update_timer_;
// Volume of playback.
double volume_;
diff --git a/chromium/media/base/android/media_player_bridge_unittest.cc b/chromium/media/base/android/media_player_bridge_unittest.cc
new file mode 100644
index 00000000000..77cfb69719b
--- /dev/null
+++ b/chromium/media/base/android/media_player_bridge_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop/message_loop.h"
+#include "media/base/android/media_player_bridge.h"
+#include "media/base/android/media_player_manager.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+class MockMediaPlayerManager : public MediaPlayerManager {
+ public:
+ MOCK_METHOD0(GetMediaResourceGetter, MediaResourceGetter*());
+ MOCK_METHOD0(GetMediaUrlInterceptor, MediaUrlInterceptor*());
+ MOCK_METHOD3(OnTimeUpdate,
+ void(int player_id,
+ base::TimeDelta current_timestamp,
+ base::TimeTicks current_time_ticks));
+ MOCK_METHOD5(OnMediaMetadataChanged,
+ void(int player_id,
+ base::TimeDelta duration,
+ int width,
+ int height,
+ bool success));
+ MOCK_METHOD1(OnPlaybackComplete, void(int player_id));
+ MOCK_METHOD1(OnMediaInterrupted, void(int player_id));
+ MOCK_METHOD2(OnBufferingUpdate, void(int player_id, int percentage));
+ MOCK_METHOD2(OnSeekComplete,
+ void(int player_id, const base::TimeDelta& current_time));
+ MOCK_METHOD2(OnError, void(int player_id, int error));
+ MOCK_METHOD3(OnVideoSizeChanged, void(int player_id, int width, int height));
+ MOCK_METHOD2(OnAudibleStateChanged, void(int player_id, bool is_audible_now));
+ MOCK_METHOD1(OnWaitingForDecryptionKey, void(int player_id));
+ MOCK_METHOD0(GetFullscreenPlayer, MediaPlayerAndroid*());
+ MOCK_METHOD1(GetPlayer, MediaPlayerAndroid*(int player_id));
+ MOCK_METHOD2(RequestPlay, bool(int player_id, base::TimeDelta duration));
+
+ void OnMediaResourcesRequested(int player_id) {}
+};
+
+} // anonymous namespace
+
+class MediaPlayerBridgeTest : public testing::Test {
+ public:
+ MediaPlayerBridgeTest()
+ : bridge_(0,
+ GURL(),
+ GURL(),
+ "",
+ false,
+ &manager_,
+ base::Bind(&MockMediaPlayerManager::OnMediaResourcesRequested,
+ base::Unretained(&manager_)),
+ GURL(),
+ false) {}
+
+ void SetCanSeekForward(bool can_seek_forward) {
+ bridge_.can_seek_forward_ = can_seek_forward;
+ }
+
+ void SetCanSeekBackward(bool can_seek_backward) {
+ bridge_.can_seek_backward_ = can_seek_backward;
+ }
+
+ bool SeekInternal(const base::TimeDelta& current_time, base::TimeDelta time) {
+ return bridge_.SeekInternal(current_time, time);
+ }
+
+ private:
+ // A message loop needs to be instantiated in order for the test to run
+ // properly.
+ base::MessageLoop message_loop_;
+ MockMediaPlayerManager manager_;
+ MediaPlayerBridge bridge_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaPlayerBridgeTest);
+};
+
+TEST_F(MediaPlayerBridgeTest, PreventForwardSeekWhenItIsNotPossible) {
+ // Simulate the Java MediaPlayerBridge reporting that forward seeks are not
+ // possible
+ SetCanSeekForward(false);
+ SetCanSeekBackward(true);
+
+ // If this assertion fails, seeks will be allowed which will result in a
+ // crash because j_media_player_bridge_ cannot be properly instantiated
+ // during this test.
+ ASSERT_FALSE(
+ SeekInternal(base::TimeDelta(), base::TimeDelta::FromSeconds(10)));
+}
+
+TEST_F(MediaPlayerBridgeTest, PreventBackwardSeekWhenItIsNotPossible) {
+ // Simulate the Java MediaPlayerBridge reporting that backward seeks are not
+ // possible
+ SetCanSeekForward(true);
+ SetCanSeekBackward(false);
+
+ // If this assertion fails, seeks will be allowed which will result in a
+ // crash because j_media_player_bridge_ cannot be properly instantiated
+ // during this test.
+ ASSERT_FALSE(
+ SeekInternal(base::TimeDelta::FromSeconds(10), base::TimeDelta()));
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_player_manager.h b/chromium/media/base/android/media_player_manager.h
index bf6ca4e0734..e9edd884e46 100644
--- a/chromium/media/base/android/media_player_manager.h
+++ b/chromium/media/base/android/media_player_manager.h
@@ -63,9 +63,6 @@ class MEDIA_EXPORT MediaPlayerManager {
// Called when video size has changed. Args: player ID, width, height.
virtual void OnVideoSizeChanged(int player_id, int width, int height) = 0;
- // Called when the player thinks it stopped or started making sound.
- virtual void OnAudibleStateChanged(int player_id, bool is_audible_now) = 0;
-
// Called when the player pauses as a new key is required to decrypt
// encrypted content.
virtual void OnWaitingForDecryptionKey(int player_id) = 0;
@@ -76,11 +73,11 @@ class MEDIA_EXPORT MediaPlayerManager {
// Returns the player with the specified id.
virtual MediaPlayerAndroid* GetPlayer(int player_id) = 0;
- // Called by the player to request to play. The manager should use this
- // opportunity to check if the current context is appropriate for a media to
- // play.
+ // Called by the player to request the playback for given duration. The
+ // manager should use this opportunity to check if the current context is
+ // appropriate for a media to play.
// Returns whether the request was granted.
- virtual bool RequestPlay(int player_id) = 0;
+ virtual bool RequestPlay(int player_id, base::TimeDelta duration) = 0;
};
} // namespace media
diff --git a/chromium/media/base/android/media_source_player.cc b/chromium/media/base/android/media_source_player.cc
index 460fbec8fb6..d4408b31396 100644
--- a/chromium/media/base/android/media_source_player.cc
+++ b/chromium/media/base/android/media_source_player.cc
@@ -18,18 +18,20 @@
#include "media/base/android/audio_decoder_job.h"
#include "media/base/android/media_player_manager.h"
#include "media/base/android/video_decoder_job.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/base/timestamp_constants.h"
namespace media {
MediaSourcePlayer::MediaSourcePlayer(
int player_id,
MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
scoped_ptr<DemuxerAndroid> demuxer,
const GURL& frame_url)
: MediaPlayerAndroid(player_id,
manager,
- request_media_resources_cb,
+ on_decoder_resources_released_cb,
frame_url),
demuxer_(demuxer.Pass()),
pending_event_(NO_EVENT_PENDING),
@@ -45,6 +47,8 @@ MediaSourcePlayer::MediaSourcePlayer(
is_waiting_for_video_decoder_(false),
prerolling_(true),
weak_factory_(this) {
+ media_stat_.reset(new MediaStatistics());
+
audio_decoder_job_.reset(new AudioDecoderJob(
base::Bind(&DemuxerAndroid::RequestDemuxerData,
base::Unretained(demuxer_.get()),
@@ -55,9 +59,9 @@ MediaSourcePlayer::MediaSourcePlayer(
base::Bind(&DemuxerAndroid::RequestDemuxerData,
base::Unretained(demuxer_.get()),
DemuxerStream::VIDEO),
- base::Bind(request_media_resources_cb_, player_id),
base::Bind(&MediaSourcePlayer::OnDemuxerConfigsChanged,
weak_factory_.GetWeakPtr())));
+
demuxer_->Initialize(this);
interpolator_.SetUpperBound(base::TimeDelta());
weak_this_ = weak_factory_.GetWeakPtr();
@@ -133,8 +137,6 @@ void MediaSourcePlayer::Pause(bool is_media_related_action) {
// MediaDecoderCallback() is called.
playing_ = false;
start_time_ticks_ = base::TimeTicks();
-
- SetAudible(false);
}
bool MediaSourcePlayer::IsPlaying() {
@@ -187,8 +189,8 @@ void MediaSourcePlayer::Release() {
decoder_starvation_callback_.Cancel();
- SetAudible(false);
DetachListener();
+ on_decoder_resources_released_cb_.Run(player_id());
}
void MediaSourcePlayer::SetVolume(double volume) {
@@ -208,7 +210,7 @@ bool MediaSourcePlayer::CanSeekBackward() {
}
bool MediaSourcePlayer::IsPlayerReady() {
- return audio_decoder_job_ || video_decoder_job_;
+ return HasAudio() || HasVideo();
}
void MediaSourcePlayer::StartInternal() {
@@ -217,7 +219,7 @@ void MediaSourcePlayer::StartInternal() {
if (pending_event_ != NO_EVENT_PENDING)
return;
- if (!manager()->RequestPlay(player_id())) {
+ if (!manager()->RequestPlay(player_id(), duration_)) {
Pause(true);
return;
}
@@ -260,9 +262,12 @@ void MediaSourcePlayer::OnDemuxerDurationChanged(base::TimeDelta duration) {
duration_ = duration;
}
-void MediaSourcePlayer::OnMediaCryptoReady() {
+void MediaSourcePlayer::OnMediaCryptoReady(
+ MediaDrmBridge::JavaObjectPtr /* media_crypto */,
+ bool /* needs_protected_surface */) {
+ // Callback parameters are ignored in this player. They are intended for
+ // MediaCodecPlayer which uses a different threading scheme.
DCHECK(!drm_bridge_->GetMediaCrypto().is_null());
- drm_bridge_->SetMediaCryptoReadyCB(base::Closure());
// Retry decoder creation if the decoders are waiting for MediaCrypto.
RetryDecoderCreation(true, true);
@@ -294,8 +299,9 @@ void MediaSourcePlayer::SetCdm(BrowserCdm* cdm) {
video_decoder_job_->SetDrmBridge(drm_bridge_);
if (drm_bridge_->GetMediaCrypto().is_null()) {
- drm_bridge_->SetMediaCryptoReadyCB(
+ MediaDrmBridge::MediaCryptoReadyCB cb = BindToCurrentLoop(
base::Bind(&MediaSourcePlayer::OnMediaCryptoReady, weak_this_));
+ drm_bridge_->SetMediaCryptoReadyCB(cb);
return;
}
@@ -431,7 +437,9 @@ void MediaSourcePlayer::ProcessPendingEvents() {
}
void MediaSourcePlayer::MediaDecoderCallback(
- bool is_audio, MediaCodecStatus status,
+ bool is_audio,
+ MediaCodecStatus status,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
base::TimeDelta max_presentation_timestamp) {
DVLOG(1) << __FUNCTION__ << ": " << is_audio << ", " << status;
@@ -464,9 +472,19 @@ void MediaSourcePlayer::MediaDecoderCallback(
DVLOG(1) << __FUNCTION__ << " : decode error";
Release();
manager()->OnError(player_id(), MEDIA_ERROR_DECODE);
+ media_stat_->StopAndReport(GetCurrentTime());
return;
}
+ // Increment frame counts for UMA.
+ if (current_presentation_timestamp != kNoTimestamp()) {
+ FrameStatistics& frame_stats = is_audio ? media_stat_->audio_frame_stats()
+ : media_stat_->video_frame_stats();
+ frame_stats.IncrementFrameCount();
+ if (is_late_frame)
+ frame_stats.IncrementLateFrameCount();
+ }
+
DCHECK(!IsEventPending(PREFETCH_DONE_EVENT_PENDING));
// Let |SEEK_EVENT_PENDING| (the highest priority event outside of
@@ -474,6 +492,7 @@ void MediaSourcePlayer::MediaDecoderCallback(
// any other pending events only after handling EOS detection.
if (IsEventPending(SEEK_EVENT_PENDING)) {
ProcessPendingEvents();
+ media_stat_->StopAndReport(GetCurrentTime());
return;
}
@@ -495,8 +514,7 @@ void MediaSourcePlayer::MediaDecoderCallback(
}
if (status == MEDIA_CODEC_OUTPUT_END_OF_STREAM) {
- if (is_audio)
- SetAudible(false);
+ media_stat_->StopAndReport(GetCurrentTime());
return;
}
@@ -504,8 +522,7 @@ void MediaSourcePlayer::MediaDecoderCallback(
if (is_clock_manager)
interpolator_.StopInterpolating();
- if (is_audio)
- SetAudible(false);
+ media_stat_->StopAndReport(GetCurrentTime());
return;
}
@@ -514,11 +531,9 @@ void MediaSourcePlayer::MediaDecoderCallback(
DVLOG(2) << __FUNCTION__ << ": Key was added during decoding.";
ResumePlaybackAfterKeyAdded();
} else {
- if (is_audio)
- SetAudible(false);
-
is_waiting_for_key_ = true;
manager()->OnWaitingForDecryptionKey(player_id());
+ media_stat_->StopAndReport(GetCurrentTime());
}
return;
}
@@ -537,8 +552,7 @@ void MediaSourcePlayer::MediaDecoderCallback(
// in the middle of a seek or stop event and needs to wait for the IPCs to
// come.
if (status == MEDIA_CODEC_ABORT) {
- if (is_audio)
- SetAudible(false);
+ media_stat_->StopAndReport(GetCurrentTime());
return;
}
@@ -550,13 +564,6 @@ void MediaSourcePlayer::MediaDecoderCallback(
return;
}
- // We successfully decoded a frame and going to the next one.
- // Set the audible state.
- if (is_audio) {
- bool is_audible = !prerolling_ && audio_decoder_job_->volume() > 0;
- SetAudible(is_audible);
- }
-
if (is_clock_manager) {
// If we have a valid timestamp, start the starvation callback. Otherwise,
// reset the |start_time_ticks_| so that the next frame will not suffer
@@ -669,12 +676,7 @@ bool MediaSourcePlayer::VideoFinished() {
void MediaSourcePlayer::OnDecoderStarved() {
DVLOG(1) << __FUNCTION__;
- if (HasAudio()) {
- // If the starvation timer fired but there are no encoded frames
- // in the queue we believe the demuxer (i.e. renderer process) froze.
- if (!audio_decoder_job_->HasData())
- SetAudible(false);
- }
+ media_stat_->AddStarvation();
SetPendingEvent(PREFETCH_REQUEST_EVENT_PENDING);
ProcessPendingEvents();
@@ -742,6 +744,8 @@ void MediaSourcePlayer::OnPrefetchDone() {
if (!interpolator_.interpolating())
interpolator_.StartInterpolating();
+ media_stat_->Start(start_presentation_timestamp_);
+
if (!AudioFinished())
DecodeMoreAudio();
diff --git a/chromium/media/base/android/media_source_player.h b/chromium/media/base/android/media_source_player.h
index ab3bda3e72c..7e4d706c381 100644
--- a/chromium/media/base/android/media_source_player.h
+++ b/chromium/media/base/android/media_source_player.h
@@ -23,6 +23,7 @@
#include "media/base/android/media_decoder_job.h"
#include "media/base/android/media_drm_bridge.h"
#include "media/base/android/media_player_android.h"
+#include "media/base/android/media_statistics.h"
#include "media/base/media_export.h"
#include "media/base/time_delta_interpolator.h"
@@ -38,11 +39,12 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
public:
// Constructs a player with the given ID and demuxer. |manager| must outlive
// the lifetime of this object.
- MediaSourcePlayer(int player_id,
- MediaPlayerManager* manager,
- const RequestMediaResourcesCB& request_media_resources_cb,
- scoped_ptr<DemuxerAndroid> demuxer,
- const GURL& frame_url);
+ MediaSourcePlayer(
+ int player_id,
+ MediaPlayerManager* manager,
+ const OnDecoderResourcesReleasedCB& on_decoder_resources_released_cb,
+ scoped_ptr<DemuxerAndroid> demuxer,
+ const GURL& frame_url);
~MediaSourcePlayer() override;
// MediaPlayerAndroid implementation.
@@ -83,10 +85,11 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
void PlaybackCompleted(bool is_audio);
// Called when the decoder finishes its task.
- void MediaDecoderCallback(
- bool is_audio, MediaCodecStatus status,
- base::TimeDelta current_presentation_timestamp,
- base::TimeDelta max_presentation_timestamp);
+ void MediaDecoderCallback(bool is_audio,
+ MediaCodecStatus status,
+ bool is_late_frame,
+ base::TimeDelta current_presentation_timestamp,
+ base::TimeDelta max_presentation_timestamp);
bool IsPrerollFinished(bool is_audio) const;
@@ -94,7 +97,8 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
base::android::ScopedJavaLocalRef<jobject> GetMediaCrypto();
// Callback to notify that MediaCrypto is ready in |drm_bridge_|.
- void OnMediaCryptoReady();
+ void OnMediaCryptoReady(MediaDrmBridge::JavaObjectPtr media_crypto,
+ bool needs_protected_surface);
// Handle pending events if all the decoder jobs are not currently decoding.
void ProcessPendingEvents();
@@ -267,6 +271,10 @@ class MEDIA_EXPORT MediaSourcePlayer : public MediaPlayerAndroid,
// Whether audio or video decoder is in the process of prerolling.
bool prerolling_;
+ // Gathers and reports playback quality statistics to UMA.
+ // Use pointer to enable replacement of this object for tests.
+ scoped_ptr<MediaStatistics> media_stat_;
+
// Weak pointer passed to media decoder jobs for callbacks.
base::WeakPtr<MediaSourcePlayer> weak_this_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/chromium/media/base/android/media_source_player_unittest.cc b/chromium/media/base/android/media_source_player_unittest.cc
index be6db141b87..65b40ec0ecb 100644
--- a/chromium/media/base/android/media_source_player_unittest.cc
+++ b/chromium/media/base/android/media_source_player_unittest.cc
@@ -18,6 +18,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_data_util.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "ui/gl/android/surface_texture.h"
@@ -44,11 +45,8 @@ class MockMediaPlayerManager : public MediaPlayerManager {
explicit MockMediaPlayerManager(base::MessageLoop* message_loop)
: message_loop_(message_loop),
playback_completed_(false),
- num_resources_requested_(0),
num_metadata_changes_(0),
timestamp_updated_(false),
- is_audible_(false),
- is_delay_expired_(false),
allow_play_(true) {}
~MockMediaPlayerManager() override {}
@@ -81,31 +79,20 @@ class MockMediaPlayerManager : public MediaPlayerManager {
void OnWaitingForDecryptionKey(int player_id) override {}
MediaPlayerAndroid* GetFullscreenPlayer() override { return NULL; }
MediaPlayerAndroid* GetPlayer(int player_id) override { return NULL; }
+ void OnDecorderResourcesReleased(int player_id) {}
- bool RequestPlay(int player_id) override {
+ bool RequestPlay(int player_id, base::TimeDelta duration) override {
return allow_play_;
}
- void OnAudibleStateChanged(int player_id, bool is_audible_now) override {
- is_audible_ = is_audible_now;
- }
-
bool playback_completed() const {
return playback_completed_;
}
- int num_resources_requested() const {
- return num_resources_requested_;
- }
-
int num_metadata_changes() const {
return num_metadata_changes_;
}
- void OnMediaResourcesRequested(int player_id) {
- num_resources_requested_++;
- }
-
bool timestamp_updated() const {
return timestamp_updated_;
}
@@ -114,18 +101,6 @@ class MockMediaPlayerManager : public MediaPlayerManager {
timestamp_updated_ = false;
}
- bool is_audible() const {
- return is_audible_;
- }
-
- bool is_delay_expired() const {
- return is_delay_expired_;
- }
-
- void SetDelayExpired(bool value) {
- is_delay_expired_ = value;
- }
-
void set_allow_play(bool value) {
allow_play_ = value;
}
@@ -133,16 +108,10 @@ class MockMediaPlayerManager : public MediaPlayerManager {
private:
base::MessageLoop* message_loop_;
bool playback_completed_;
- // The number of resource requests this object has seen.
- int num_resources_requested_;
// The number of metadata changes reported by the player.
int num_metadata_changes_;
// Playback timestamp was updated.
bool timestamp_updated_;
- // Audible state of the pipeline
- bool is_audible_;
- // Helper flag to ensure delay for WaitForDelay().
- bool is_delay_expired_;
// Whether the manager will allow players that request playing.
bool allow_play_;
@@ -196,7 +165,7 @@ class MediaSourcePlayerTest : public testing::Test {
: manager_(&message_loop_),
demuxer_(new MockDemuxerAndroid(&message_loop_)),
player_(0, &manager_,
- base::Bind(&MockMediaPlayerManager::OnMediaResourcesRequested,
+ base::Bind(&MockMediaPlayerManager::OnDecorderResourcesReleased,
base::Unretained(&manager_)),
scoped_ptr<DemuxerAndroid>(demuxer_),
GURL()),
@@ -566,37 +535,6 @@ class MediaSourcePlayerTest : public testing::Test {
EXPECT_LE(target_timestamp, player_.GetCurrentTime());
}
- void PlayAudioForTimeInterval(const base::TimeDelta& start_timestamp,
- const base::TimeDelta& target_timestamp ) {
-
- DemuxerData data = CreateReadFromDemuxerAckForAudio(1);
- int current_timestamp = start_timestamp.InMilliseconds();
- int stop_timestamp = target_timestamp.InMilliseconds();
- while (current_timestamp < stop_timestamp) {
- data.access_units[0].timestamp =
- base::TimeDelta::FromMilliseconds(current_timestamp);
- player_.OnDemuxerDataAvailable(data);
- current_timestamp += 30;
- WaitForAudioDecodeDone();
- }
- }
-
- void WaitForDelay(const base::TimeDelta& delay) {
- // Let the message_loop_ process events.
- // We post delayed task and RunUnitilIdle() until it signals.
-
- manager_.SetDelayExpired(false);
- message_loop_.PostDelayedTask(
- FROM_HERE,
- base::Bind(&MockMediaPlayerManager::SetDelayExpired,
- base::Unretained(&manager_),
- true),
- delay);
-
- while (!manager_.is_delay_expired())
- message_loop_.RunUntilIdle();
- }
-
DemuxerData CreateReadFromDemuxerAckWithConfigChanged(
bool is_audio,
int config_unit_index,
@@ -921,8 +859,6 @@ class MediaSourcePlayerTest : public testing::Test {
bool surface_texture_a_is_next_;
int next_texture_id_;
- bool verify_not_audible_is_called_;
-
DISALLOW_COPY_AND_ASSIGN(MediaSourcePlayerTest);
};
@@ -954,107 +890,6 @@ TEST_F(MediaSourcePlayerTest, StartAudioDecoderWithInvalidConfig) {
EXPECT_FALSE(GetMediaCodecBridge(true));
}
-// timav
-TEST_F(MediaSourcePlayerTest, AudioDecoderSetsAudibleState) {
- SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
-
- // No data arrived yet
- EXPECT_FALSE(manager_.is_audible());
-
- // Initialize decoder
- StartAudioDecoderJob();
- player_.SetVolume(1.0);
-
- // Process frames until prerolling is done.
- SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
- EXPECT_TRUE(IsPrerolling(true));
- PrerollDecoderToTime(
- true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
- EXPECT_TRUE(IsPrerolling(false));
-
- // Send more packets
- PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
- base::TimeDelta::FromMilliseconds(220));
-
- // The player should trigger audible status
- EXPECT_TRUE(manager_.is_audible());
-
- // The player release should report a non-audible state.
- ReleasePlayer();
- EXPECT_FALSE(manager_.is_audible());
-}
-
-TEST_F(MediaSourcePlayerTest, AudioDecoderRemovesAudibleStateWhenPaused) {
- SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
-
- // No data arrived yet
- EXPECT_FALSE(manager_.is_audible());
-
- // Initialize decoder
- StartAudioDecoderJob();
- player_.SetVolume(1.0);
-
- // Process frames until prerolling is done.
- SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
- EXPECT_TRUE(IsPrerolling(true));
- PrerollDecoderToTime(
- true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
- EXPECT_TRUE(IsPrerolling(false));
-
- // Send more packets
- PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
- base::TimeDelta::FromMilliseconds(220));
-
- // The player should trigger audible status
- EXPECT_TRUE(manager_.is_audible());
-
- // Pause the player
- player_.Pause(true);
-
- // Send more packets
- PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(240),
- base::TimeDelta::FromMilliseconds(280));
-
- // The player should trigger audible status again
- EXPECT_FALSE(manager_.is_audible());
-
- player_.Release();
-}
-
-TEST_F(MediaSourcePlayerTest, AudioDecoderRemovesAudibleStateWhenIdle) {
- SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
-
- // No data arrived yet
- EXPECT_FALSE(manager_.is_audible());
-
- // Initialize decoder
- StartAudioDecoderJob();
- player_.SetVolume(1.0);
-
- // Process frames until prerolling is done.
- SeekPlayerWithAbort(true, base::TimeDelta::FromMilliseconds(100));
- EXPECT_TRUE(IsPrerolling(true));
- PrerollDecoderToTime(
- true, base::TimeDelta(), base::TimeDelta::FromMilliseconds(100), false);
- EXPECT_TRUE(IsPrerolling(false));
-
- // Send more packets
- PlayAudioForTimeInterval(base::TimeDelta::FromMilliseconds(150),
- base::TimeDelta::FromMilliseconds(220));
-
- // The player should trigger audible status
- EXPECT_TRUE(manager_.is_audible());
-
- // Simulate the freeze on demuxer: wait for 300 ms
- WaitForDelay(base::TimeDelta::FromMilliseconds(300));
-
- // By this time the player should have reported
- // that there is no audio.
- EXPECT_FALSE(manager_.is_audible());
-
- ReleasePlayer();
-}
-
TEST_F(MediaSourcePlayerTest, StartVideoCodecWithValidSurface) {
SKIP_TEST_IF_MEDIA_CODEC_BRIDGE_IS_NOT_AVAILABLE();
@@ -1221,8 +1056,6 @@ TEST_F(MediaSourcePlayerTest, ReleaseVideoDecoderResourcesWhileDecoding) {
// not be immediately released.
CreateNextTextureAndSetVideoSurface();
StartVideoDecoderJob();
- // No resource is requested since there is no data to decode.
- EXPECT_EQ(0, manager_.num_resources_requested());
ReleasePlayer();
player_.OnDemuxerDataAvailable(CreateReadFromDemuxerAckForVideo(false));
@@ -1232,7 +1065,6 @@ TEST_F(MediaSourcePlayerTest, ReleaseVideoDecoderResourcesWhileDecoding) {
while (!GetMediaDecoderJob(false)->is_decoding())
message_loop_.RunUntilIdle();
EXPECT_EQ(0, demuxer_->num_browser_seek_requests());
- EXPECT_EQ(1, manager_.num_resources_requested());
ReleasePlayer();
// Wait for the media codec bridge to finish decoding and be reset.
while (GetMediaDecoderJob(false)->is_decoding())
@@ -2094,10 +1926,6 @@ TEST_F(MediaSourcePlayerTest, VideoDemuxerConfigChange) {
EXPECT_TRUE(GetMediaCodecBridge(false));
EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(0, demuxer_->num_seek_requests());
-
- // 2 codecs should have been created, one before the config change, and one
- // after it.
- EXPECT_EQ(2, manager_.num_resources_requested());
WaitForVideoDecodeDone();
}
@@ -2113,9 +1941,6 @@ TEST_F(MediaSourcePlayerTest, VideoDemuxerConfigChangeWithAdaptivePlayback) {
EXPECT_TRUE(GetMediaCodecBridge(false));
EXPECT_EQ(3, demuxer_->num_data_requests());
EXPECT_EQ(0, demuxer_->num_seek_requests());
-
- // Only 1 codec should have been created so far.
- EXPECT_EQ(1, manager_.num_resources_requested());
WaitForVideoDecodeDone();
}
diff --git a/chromium/media/base/android/media_statistics.cc b/chromium/media/base/android/media_statistics.cc
new file mode 100644
index 00000000000..19c0298e6ce
--- /dev/null
+++ b/chromium/media/base/android/media_statistics.cc
@@ -0,0 +1,119 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_statistics.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "media/base/android/demuxer_stream_player_params.h"
+
+namespace media {
+
+// Minimum playback interval to report.
+const int kMinDurationInSeconds = 2;
+
+// Maximum playback interval to report.
+const int kMaxDurationInSeconds = 3600;
+
+// Number of slots in the histogram for playback interval.
+const int kNumDurationSlots = 50;
+
+// For easier reading.
+const int kOneMillion = 1000000;
+
+void FrameStatistics::IncrementLateFrameCount() {
+ // Do not collect the late frame if this is the first frame after the start.
+ // Right now we do not want to consider the late video frame which is the
+ // first after preroll, preroll may be inacurate in this respect.
+ // The first audio frame cannot be late by definition and by not considering
+ // it we can simplify audio decoder code.
+ if (total == 1)
+ return;
+
+ ++late;
+}
+
+MediaStatistics::MediaStatistics() {}
+
+MediaStatistics::~MediaStatistics() {}
+
+void MediaStatistics::Start(base::TimeDelta current_playback_time) {
+ DVLOG(1) << __FUNCTION__;
+
+ if (start_time_ == kNoTimestamp()) {
+ Clear();
+ start_time_ = current_playback_time;
+ }
+}
+
+void MediaStatistics::StopAndReport(base::TimeDelta current_playback_time) {
+ DVLOG(1) << __FUNCTION__;
+
+ if (start_time_ == kNoTimestamp())
+ return; // skip if there was no prior Start().
+
+ base::TimeDelta duration = current_playback_time - start_time_;
+
+ // Reset start time.
+ start_time_ = kNoTimestamp();
+
+ if (duration < base::TimeDelta::FromSeconds(kMinDurationInSeconds))
+ return; // duration is too short.
+
+ if (duration > base::TimeDelta::FromSeconds(kMaxDurationInSeconds))
+ return; // duration is too long.
+
+ Report(duration);
+}
+
+void MediaStatistics::Clear() {
+ start_time_ = kNoTimestamp();
+ audio_frame_stats_.Clear();
+ video_frame_stats_.Clear();
+ num_starvations_ = 0;
+}
+
+void MediaStatistics::Report(base::TimeDelta duration) {
+ DVLOG(1) << __FUNCTION__ << " duration:" << duration
+ << " audio frames:"
+ << audio_frame_stats_.late << "/" << audio_frame_stats_.total
+ << " video frames:"
+ << video_frame_stats_.late << "/" << video_frame_stats_.total
+ << " starvations:" << num_starvations_;
+
+ // Playback duration is the time interval between the moment playback starts
+ // and the moment it is interrupted either by stopping or by seeking, changing
+ // to full screen, minimizing the browser etc. The interval is measured by
+ // media time.
+
+ UMA_HISTOGRAM_CUSTOM_TIMES(
+ "Media.MSE.PlaybackDuration", duration,
+ base::TimeDelta::FromSeconds(kMinDurationInSeconds),
+ base::TimeDelta::FromSeconds(kMaxDurationInSeconds), kNumDurationSlots);
+
+ // Number of late frames per one million frames.
+
+ if (audio_frame_stats_.total) {
+ UMA_HISTOGRAM_COUNTS(
+ "Media.MSE.LateAudioFrames",
+ kOneMillion * audio_frame_stats_.late / audio_frame_stats_.total);
+ }
+
+ if (video_frame_stats_.total) {
+ UMA_HISTOGRAM_COUNTS(
+ "Media.MSE.LateVideoFrames",
+ kOneMillion * video_frame_stats_.late / video_frame_stats_.total);
+ }
+
+ // Number of starvations per one million frames.
+
+ uint32_t total_frames = audio_frame_stats_.total ? audio_frame_stats_.total
+ : video_frame_stats_.total;
+ if (total_frames) {
+ UMA_HISTOGRAM_COUNTS("Media.MSE.Starvations",
+ kOneMillion * num_starvations_ / total_frames);
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_statistics.h b/chromium/media/base/android/media_statistics.h
new file mode 100644
index 00000000000..5b6daabbd7d
--- /dev/null
+++ b/chromium/media/base/android/media_statistics.h
@@ -0,0 +1,88 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_STATISTICS_H_
+#define MEDIA_BASE_ANDROID_MEDIA_STATISTICS_H_
+
+#include <stdint.h>
+#include "base/time/time.h"
+#include "media/base/demuxer_stream.h"
+#include "media/base/timestamp_constants.h"
+
+namespace media {
+
+// FrameStatistics struct deals with frames of one stream, i.e. either
+// audio or video.
+struct FrameStatistics {
+ // Audio: total number of frames that have been rendered.
+ // Video: total number of frames that were supposed to be rendered. Late video
+ // frames might be skipped, but are counted here.
+ uint32_t total = 0;
+
+ // A number of late frames. Late frames are a subset of the total frames.
+ // Audio: The frame is late if it might cause an underrun, i.e. comes from
+ // decoder when audio buffer is already depleted.
+ // Video: The frame is late if it missed its presentation time as determined
+ // by PTS when it comes from decoder. The rendering policy (i.e.
+ // render or skip) does not affect this number.
+ uint32_t late = 0;
+
+ void Clear() { total = late = 0; }
+
+ // Increments |total| frame count.
+ void IncrementFrameCount() { ++total; }
+
+ // Increments |late| frame count except it is the first frame after start.
+ // For each IncrementLateFrameCount() call there should be preceding
+ // IncrementFrameCount() call.
+ void IncrementLateFrameCount();
+};
+
+// MediaStatistics class gathers and reports playback quality statistics to UMA.
+//
+// This class is not thread safe. The caller should guarantee that operations
+// on FrameStatistics objects does not happen during Start() and
+// StopAndReport(). The Start() and StopAndReport() methods need to be called
+// sequentially.
+
+class MediaStatistics {
+ public:
+ MediaStatistics();
+ ~MediaStatistics();
+
+ // Returns the frame statistics for audio frames.
+ FrameStatistics& audio_frame_stats() { return audio_frame_stats_; }
+
+ // Returns the frame statistics for video frames.
+ FrameStatistics& video_frame_stats() { return video_frame_stats_; }
+
+ // Starts gathering statistics. When called in a row only the firts call will
+ // take effect.
+ void Start(base::TimeDelta current_playback_time);
+
+ // Stops gathering statistics, calculate and report results. When called
+ // in a row only the firts call will take effect.
+ void StopAndReport(base::TimeDelta current_playback_time);
+
+ // Adds starvation event. Starvation happens when the player interrupts
+ // the regular playback and asks for more data.
+ void AddStarvation() { ++num_starvations_; }
+
+ private:
+ // Resets the data to the initial state.
+ void Clear();
+
+ // Calculates relative data based on total frame numbers and reports it and
+ // the duration to UMA.
+ void Report(base::TimeDelta duration);
+
+ base::TimeDelta start_time_ = kNoTimestamp();
+ FrameStatistics audio_frame_stats_;
+ FrameStatistics video_frame_stats_;
+ uint32_t num_starvations_ = 0;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_STATISTICS_H_
diff --git a/chromium/media/base/android/media_task_runner.cc b/chromium/media/base/android/media_task_runner.cc
new file mode 100644
index 00000000000..a52919cde30
--- /dev/null
+++ b/chromium/media/base/android/media_task_runner.cc
@@ -0,0 +1,43 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/android/media_task_runner.h"
+
+#include "base/command_line.h"
+#include "base/lazy_instance.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread.h"
+#include "media/base/media_switches.h"
+
+namespace media {
+
+class MediaThread : public base::Thread {
+ public:
+ MediaThread() : base::Thread("BrowserMediaThread") {
+ Start();
+ }
+};
+
+// Create media thread
+base::LazyInstance<MediaThread>::Leaky g_media_thread =
+ LAZY_INSTANCE_INITIALIZER;
+
+scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner() {
+ return g_media_thread.Pointer()->task_runner();
+}
+
+bool UseMediaThreadForMediaPlayback() {
+ const std::string group_name =
+ base::FieldTrialList::FindFullName("EnableMediaThreadForMediaPlayback");
+
+ if (base::CommandLine::ForCurrentProcess()->
+ HasSwitch(switches::kEnableMediaThreadForMediaPlayback)) {
+ return true;
+ }
+
+ return base::StartsWith(group_name, "Enabled", base::CompareCase::SENSITIVE);
+}
+
+} // namespace media
diff --git a/chromium/media/base/android/media_task_runner.h b/chromium/media/base/android/media_task_runner.h
new file mode 100644
index 00000000000..b91c5472fed
--- /dev/null
+++ b/chromium/media/base/android/media_task_runner.h
@@ -0,0 +1,23 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_MEDIA_TASK_RUNNER_H_
+#define MEDIA_BASE_ANDROID_MEDIA_TASK_RUNNER_H_
+
+#include "base/memory/scoped_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Returns the task runner for the media thread.
+MEDIA_EXPORT scoped_refptr<base::SingleThreadTaskRunner> GetMediaTaskRunner();
+
+// Returns true if the MediaCodecPlayer (which works on Media thread) should be
+// used. This behavior is controlled by a finch flag.
+MEDIA_EXPORT bool UseMediaThreadForMediaPlayback();
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_MEDIA_TASK_RUNNER_H_
diff --git a/chromium/media/base/android/test_data_factory.cc b/chromium/media/base/android/test_data_factory.cc
index 8af93826060..e9012d8572c 100644
--- a/chromium/media/base/android/test_data_factory.cc
+++ b/chromium/media/base/android/test_data_factory.cc
@@ -4,6 +4,8 @@
#include "media/base/android/test_data_factory.h"
+#include <iterator>
+
#include "base/strings/stringprintf.h"
#include "media/base/android/demuxer_stream_player_params.h"
#include "media/base/decoder_buffer.h"
@@ -11,9 +13,8 @@
namespace media {
-DemuxerConfigs TestDataFactory::CreateAudioConfigs(
- AudioCodec audio_codec,
- const base::TimeDelta& duration) {
+DemuxerConfigs TestDataFactory::CreateAudioConfigs(AudioCodec audio_codec,
+ base::TimeDelta duration) {
DemuxerConfigs configs;
configs.audio_codec = audio_codec;
configs.audio_channels = 2;
@@ -30,10 +31,8 @@ DemuxerConfigs TestDataFactory::CreateAudioConfigs(
} break;
case kCodecAAC: {
- configs.audio_sampling_rate = 48000;
- uint8 aac_extra_data[] = {0x13, 0x10};
- configs.audio_extra_data =
- std::vector<uint8>(aac_extra_data, aac_extra_data + 2);
+ configs.audio_sampling_rate = 44100;
+ configs.audio_extra_data = {0x12, 0x10};
} break;
default:
@@ -47,7 +46,7 @@ DemuxerConfigs TestDataFactory::CreateAudioConfigs(
DemuxerConfigs TestDataFactory::CreateVideoConfigs(
VideoCodec video_codec,
- const base::TimeDelta& duration,
+ base::TimeDelta duration,
const gfx::Size& video_size) {
DemuxerConfigs configs;
configs.video_codec = video_codec;
@@ -59,11 +58,13 @@ DemuxerConfigs TestDataFactory::CreateVideoConfigs(
}
TestDataFactory::TestDataFactory(const char* file_name_template,
- const base::TimeDelta& duration,
- const base::TimeDelta& frame_period)
+ base::TimeDelta duration,
+ base::TimeDelta frame_period)
: duration_(duration),
frame_period_(frame_period),
- starvation_mode_(false) {
+ total_chunks_(0),
+ starvation_mode_(false),
+ eos_reached_(false) {
LoadPackets(file_name_template);
}
@@ -73,40 +74,84 @@ bool TestDataFactory::CreateChunk(DemuxerData* chunk, base::TimeDelta* delay) {
DCHECK(chunk);
DCHECK(delay);
+ if (eos_reached_)
+ return false;
+
*delay = base::TimeDelta();
- if (regular_pts_ > duration_)
- return false;
+ if (!total_chunks_ &&
+ HasReconfigForInterval(base::TimeDelta::FromMilliseconds(-1),
+ base::TimeDelta())) {
+ // Since the configs AU has to come last in the chunk the initial configs
+ // preceeding any other data has to be the only unit in the chunk.
+ AddConfiguration(chunk);
+ ++total_chunks_;
+ return true;
+ }
for (int i = 0; i < 4; ++i) {
chunk->access_units.push_back(AccessUnit());
AccessUnit& unit = chunk->access_units.back();
- unit.status = DemuxerStream::kOk;
+ unit.status = DemuxerStream::kOk;
unit.timestamp = regular_pts_;
+ unit.data = packet_[i];
+
regular_pts_ += frame_period_;
+ }
- if (unit.timestamp > duration_) {
- if (starvation_mode_)
- return false;
+ if (chunk->access_units.back().timestamp > duration_) {
+ eos_reached_ = true;
+ // Replace last access unit with stand-alone EOS if we exceeded duration.
+ if (!starvation_mode_) {
+ AccessUnit& unit = chunk->access_units.back();
unit.is_end_of_stream = true;
- break; // EOS units have no data.
+ unit.data.clear();
}
+ }
- unit.data = packet_[i];
-
- // Allow for modification by subclasses.
- ModifyAccessUnit(i, &unit);
+ // Allow for modification by subclasses.
+ ModifyChunk(chunk);
- // Maintain last PTS. ModifyAccessUnit() can modify unit's PTS.
- if (last_pts_ < unit.timestamp)
+ // Maintain last PTS.
+ for (const AccessUnit& unit : chunk->access_units) {
+ if (last_pts_ < unit.timestamp && !unit.data.empty())
last_pts_ = unit.timestamp;
}
+ // Replace last access unit with |kConfigChanged| if we have a config
+ // request for the chunk's interval.
+ base::TimeDelta new_chunk_begin_pts = regular_pts_;
+
+ // The interval is [first, last)
+ if (HasReconfigForInterval(chunk_begin_pts_, new_chunk_begin_pts)) {
+ eos_reached_ = false;
+ regular_pts_ -= frame_period_;
+ chunk->access_units.pop_back();
+ AddConfiguration(chunk);
+ }
+ chunk_begin_pts_ = new_chunk_begin_pts;
+
+ ++total_chunks_;
return true;
}
+void TestDataFactory::SeekTo(const base::TimeDelta& seek_time) {
+ regular_pts_ = seek_time;
+ chunk_begin_pts_ = seek_time;
+ last_pts_ = base::TimeDelta();
+ eos_reached_ = false;
+}
+
+void TestDataFactory::RequestInitialConfigs() {
+ reconfigs_.insert(base::TimeDelta::FromMilliseconds(-1));
+}
+
+void TestDataFactory::RequestConfigChange(base::TimeDelta config_position) {
+ reconfigs_.insert(config_position);
+}
+
void TestDataFactory::LoadPackets(const char* file_name_template) {
for (int i = 0; i < 4; ++i) {
scoped_refptr<DecoderBuffer> buffer =
@@ -116,4 +161,25 @@ void TestDataFactory::LoadPackets(const char* file_name_template) {
}
}
+bool TestDataFactory::HasReconfigForInterval(base::TimeDelta left,
+ base::TimeDelta right) const {
+ // |first| points to an element greater or equal to |left|.
+ PTSSet::const_iterator first = reconfigs_.lower_bound(left);
+
+ // |last| points to an element greater or equal to |right|.
+ PTSSet::const_iterator last = reconfigs_.lower_bound(right);
+
+ return std::distance(first, last);
+}
+
+void TestDataFactory::AddConfiguration(DemuxerData* chunk) {
+ DCHECK(chunk);
+ chunk->access_units.push_back(AccessUnit());
+ AccessUnit& unit = chunk->access_units.back();
+ unit.status = DemuxerStream::kConfigChanged;
+
+ DCHECK(chunk->demuxer_configs.empty());
+ chunk->demuxer_configs.push_back(GetConfigs());
+}
+
} // namespace media
diff --git a/chromium/media/base/android/test_data_factory.h b/chromium/media/base/android/test_data_factory.h
index b186d75e276..c831c24116d 100644
--- a/chromium/media/base/android/test_data_factory.h
+++ b/chromium/media/base/android/test_data_factory.h
@@ -6,6 +6,7 @@
#define MEDIA_BASE_ANDROID_TEST_DATA_FACTORY_H_
#include <stdint.h>
+#include <set>
#include <vector>
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
@@ -19,9 +20,9 @@ class TestDataFactory {
public:
// These methods return corresponding demuxer configs.
static DemuxerConfigs CreateAudioConfigs(AudioCodec audio_codec,
- const base::TimeDelta& duration);
+ base::TimeDelta duration);
static DemuxerConfigs CreateVideoConfigs(VideoCodec video_codec,
- const base::TimeDelta& duration,
+ base::TimeDelta duration,
const gfx::Size& video_size);
// Constructor calls |LoadPackets| to load packets from files.
@@ -33,8 +34,8 @@ class TestDataFactory {
// unit and stops.
// frame_period: PTS increment between units.
TestDataFactory(const char* file_name_template,
- const base::TimeDelta& duration,
- const base::TimeDelta& frame_period);
+ const base::TimeDelta duration,
+ const base::TimeDelta frame_period);
virtual ~TestDataFactory();
// Returns demuxer configuration for this factory.
@@ -50,6 +51,17 @@ class TestDataFactory {
// In starvation mode we do not add EOS at the end.
void SetStarvationMode(bool value) { starvation_mode_ = value; }
+ // Resets the timestamp for the next access unit.
+ void SeekTo(const base::TimeDelta& seek_time);
+
+ // Request that a chunk containing sole |kConfigChanged| unit is generated
+ // before the first true data chunk.
+ void RequestInitialConfigs();
+
+ void RequestConfigChange(base::TimeDelta config_position);
+
+ // Returns the maximum PTS, taking into account possible modifications
+ // by subclasses. The SeekTo() resets this value.
base::TimeDelta last_pts() const { return last_pts_; }
protected:
@@ -57,15 +69,30 @@ class TestDataFactory {
// |file_name_template|.
virtual void LoadPackets(const char* file_name_template);
- // Used to modify the generated access unit by a subclass.
- virtual void ModifyAccessUnit(int index_in_chunk, AccessUnit* unit) {}
+ // Used to modify the generated chunk by a subclass.
+ virtual void ModifyChunk(DemuxerData* chunk) {}
base::TimeDelta duration_;
base::TimeDelta frame_period_;
+
+ private:
+ typedef std::set<base::TimeDelta> PTSSet;
+
+ // |left| is included in the interval, |right| is excluded.
+ // If |left| == |right|, the interval is empty and the method returns false.
+ bool HasReconfigForInterval(base::TimeDelta left,
+ base::TimeDelta right) const;
+
+ void AddConfiguration(DemuxerData* chunk);
+
std::vector<uint8_t> packet_[4];
base::TimeDelta regular_pts_; // monotonically increasing PTS
- base::TimeDelta last_pts_; // subclass can modify PTS, maintains the last
+ base::TimeDelta chunk_begin_pts_; // beginning of chunk time interval
+ base::TimeDelta last_pts_; // subclass can modify PTS, maintain the last
+ PTSSet reconfigs_; // ConfigChange requests
+ size_t total_chunks_; // total number of chunks returned
bool starvation_mode_; // true means no EOS at the end
+ bool eos_reached_; // true if CreateChunk() returned EOS frame
};
} // namespace media
diff --git a/chromium/media/base/android/test_statistics.h b/chromium/media/base/android/test_statistics.h
new file mode 100644
index 00000000000..9c889b49943
--- /dev/null
+++ b/chromium/media/base/android/test_statistics.h
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_ANDROID_TEST_STATISTICS_H_
+#define MEDIA_BASE_ANDROID_TEST_STATISTICS_H_
+
+namespace media {
+
+// Class that computes statistics: number of calls, minimum and maximum values.
+// It is used for in tests PTS statistics to verify that playback did actually
+// happen.
+
+template <typename T>
+class Minimax {
+ public:
+ Minimax() : num_values_(0) {}
+ ~Minimax() {}
+
+ void AddValue(const T& value) {
+ if (num_values_ == 0)
+ min_ = max_ = value;
+ else if (value < min_)
+ min_ = value;
+ else if (max_ < value)
+ max_ = value;
+
+ ++num_values_;
+ }
+
+ void Clear() {
+ min_ = T();
+ max_ = T();
+ num_values_ = 0;
+ }
+
+ const T& min() const { return min_; }
+ const T& max() const { return max_; }
+ int num_values() const { return num_values_; }
+
+ private:
+ T min_;
+ T max_;
+ int num_values_;
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_ANDROID_TEST_STATISTICS_H_
diff --git a/chromium/media/base/android/video_decoder_job.cc b/chromium/media/base/android/video_decoder_job.cc
index d49d2d2e6a0..d43f45039b1 100644
--- a/chromium/media/base/android/video_decoder_job.cc
+++ b/chromium/media/base/android/video_decoder_job.cc
@@ -27,7 +27,6 @@ base::LazyInstance<VideoDecoderThread>::Leaky
VideoDecoderJob::VideoDecoderJob(
const base::Closure& request_data_cb,
- const base::Closure& request_resources_cb,
const base::Closure& on_demuxer_config_changed_cb)
: MediaDecoderJob(g_video_decoder_thread.Pointer()->task_runner(),
request_data_cb,
@@ -36,8 +35,7 @@ VideoDecoderJob::VideoDecoderJob(
config_width_(0),
config_height_(0),
output_width_(0),
- output_height_(0),
- request_resources_cb_(request_resources_cb) {
+ output_height_(0) {
}
VideoDecoderJob::~VideoDecoderJob() {}
@@ -78,12 +76,15 @@ void VideoDecoderJob::SetDemuxerConfigs(const DemuxerConfigs& configs) {
void VideoDecoderJob::ReleaseOutputBuffer(
int output_buffer_index,
+ size_t offset,
size_t size,
bool render_output,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) {
media_codec_bridge_->ReleaseOutputBuffer(output_buffer_index, render_output);
- callback.Run(current_presentation_timestamp, current_presentation_timestamp);
+ callback.Run(is_late_frame, current_presentation_timestamp,
+ current_presentation_timestamp);
}
bool VideoDecoderJob::ComputeTimeToRender() const {
@@ -140,7 +141,6 @@ MediaDecoderJob::MediaDecoderJobStatus
if (!media_codec_bridge_)
return STATUS_FAILURE;
- request_resources_cb_.Run();
return STATUS_SUCCESS;
}
diff --git a/chromium/media/base/android/video_decoder_job.h b/chromium/media/base/android/video_decoder_job.h
index 36f70d830b9..29d576136db 100644
--- a/chromium/media/base/android/video_decoder_job.h
+++ b/chromium/media/base/android/video_decoder_job.h
@@ -18,13 +18,10 @@ class VideoDecoderJob : public MediaDecoderJob {
public:
// Create a new VideoDecoderJob instance.
// |request_data_cb| - Callback used to request more data for the decoder.
- // |request_resources_cb| - Callback used to request resources.
// |on_demuxer_config_changed_cb| - Callback used to inform the caller that
// demuxer config has changed.
- VideoDecoderJob(
- const base::Closure& request_data_cb,
- const base::Closure& request_resources_cb,
- const base::Closure& on_demuxer_config_changed_cb);
+ VideoDecoderJob(const base::Closure& request_data_cb,
+ const base::Closure& on_demuxer_config_changed_cb);
~VideoDecoderJob() override;
// Passes a java surface object to the codec. Returns true if the surface
@@ -43,8 +40,10 @@ class VideoDecoderJob : public MediaDecoderJob {
// MediaDecoderJob implementation.
void ReleaseOutputBuffer(
int output_buffer_index,
+ size_t offset,
size_t size,
bool render_output,
+ bool is_late_frame,
base::TimeDelta current_presentation_timestamp,
const ReleaseOutputCompletionCallback& callback) override;
bool ComputeTimeToRender() const override;
@@ -68,10 +67,6 @@ class VideoDecoderJob : public MediaDecoderJob {
// The surface object currently owned by the player.
gfx::ScopedJavaSurface surface_;
- // Callbacks to inform the caller about decoder resources change.
- base::Closure request_resources_cb_;
- base::Closure release_resources_cb_;
-
DISALLOW_COPY_AND_ASSIGN(VideoDecoderJob);
};
diff --git a/chromium/media/base/android/webaudio_media_codec_bridge.cc b/chromium/media/base/android/webaudio_media_codec_bridge.cc
index 12861d67ce1..312e96636eb 100644
--- a/chromium/media/base/android/webaudio_media_codec_bridge.cc
+++ b/chromium/media/base/android/webaudio_media_codec_bridge.cc
@@ -30,10 +30,13 @@ namespace media {
void WebAudioMediaCodecBridge::RunWebAudioMediaCodec(
base::SharedMemoryHandle encoded_audio_handle,
base::FileDescriptor pcm_output,
- uint32_t data_size) {
- WebAudioMediaCodecBridge bridge(encoded_audio_handle, pcm_output, data_size);
+ uint32_t data_size,
+ base::Closure on_decode_finished_cb) {
+ WebAudioMediaCodecBridge bridge(
+ encoded_audio_handle, pcm_output, data_size);
bridge.DecodeInMemoryAudioFile();
+ on_decode_finished_cb.Run();
}
WebAudioMediaCodecBridge::WebAudioMediaCodecBridge(
diff --git a/chromium/media/base/android/webaudio_media_codec_bridge.h b/chromium/media/base/android/webaudio_media_codec_bridge.h
index fda612683fc..1585c4cee76 100644
--- a/chromium/media/base/android/webaudio_media_codec_bridge.h
+++ b/chromium/media/base/android/webaudio_media_codec_bridge.h
@@ -7,6 +7,7 @@
#include <jni.h>
+#include "base/callback.h"
#include "base/file_descriptor_posix.h"
#include "base/memory/shared_memory.h"
#include "media/base/media_export.h"
@@ -36,7 +37,8 @@ class MEDIA_EXPORT WebAudioMediaCodecBridge {
static void RunWebAudioMediaCodec(
base::SharedMemoryHandle encoded_audio_handle,
base::FileDescriptor pcm_output,
- uint32_t data_size);
+ uint32_t data_size,
+ base::Closure on_decode_finished_cb);
void OnChunkDecoded(JNIEnv* env,
jobject /*java object*/,
diff --git a/chromium/media/base/audio_buffer.cc b/chromium/media/base/audio_buffer.cc
index f07ed6dca2b..564cff156bb 100644
--- a/chromium/media/base/audio_buffer.cc
+++ b/chromium/media/base/audio_buffer.cc
@@ -8,8 +8,8 @@
#include "base/logging.h"
#include "media/base/audio_bus.h"
-#include "media/base/buffers.h"
#include "media/base/limits.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/audio_buffer_converter.cc b/chromium/media/base/audio_buffer_converter.cc
index 0297ff6a3d2..8c5ec1c734c 100644
--- a/chromium/media/base/audio_buffer_converter.cc
+++ b/chromium/media/base/audio_buffer_converter.cc
@@ -11,8 +11,8 @@
#include "media/base/audio_bus.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
#include "media/base/sinc_resampler.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/vector_math.h"
namespace media {
@@ -141,7 +141,6 @@ void AudioBufferConverter::ResetConverter(
input_params_.Reset(
input_params_.format(),
buffer->channel_layout(),
- buffer->channel_count(),
buffer->sample_rate(),
input_params_.bits_per_sample(),
// If resampling is needed and the FIFO disabled, the AudioConverter will
@@ -150,6 +149,7 @@ void AudioBufferConverter::ResetConverter(
buffer->sample_rate() == output_params_.sample_rate()
? output_params_.frames_per_buffer()
: SincResampler::kDefaultRequestSize);
+ input_params_.set_channels_for_discrete(buffer->channel_count());
io_sample_rate_ratio_ = static_cast<double>(input_params_.sample_rate()) /
output_params_.sample_rate();
diff --git a/chromium/media/base/audio_buffer_converter_unittest.cc b/chromium/media/base/audio_buffer_converter_unittest.cc
index 7045b702974..b022523c08b 100644
--- a/chromium/media/base/audio_buffer_converter_unittest.cc
+++ b/chromium/media/base/audio_buffer_converter_unittest.cc
@@ -207,13 +207,10 @@ TEST_F(AudioBufferConverterTest, ResetThenConvert) {
}
TEST_F(AudioBufferConverterTest, DiscreteChannelLayout) {
- output_params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- CHANNEL_LAYOUT_DISCRETE,
- 2,
- kOutSampleRate,
- 16,
- 512,
- 0);
+ output_params_ =
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_DISCRETE, kOutSampleRate, 16, 512);
+ output_params_.set_channels_for_discrete(2);
audio_buffer_converter_.reset(new AudioBufferConverter(output_params_));
AddInput(MakeTestBuffer(kOutSampleRate, CHANNEL_LAYOUT_STEREO, 2, 512));
ConsumeAllOutput();
diff --git a/chromium/media/base/audio_buffer_queue.cc b/chromium/media/base/audio_buffer_queue.cc
index 299d44fe85f..dfa655a3be3 100644
--- a/chromium/media/base/audio_buffer_queue.cc
+++ b/chromium/media/base/audio_buffer_queue.cc
@@ -8,7 +8,6 @@
#include "base/logging.h"
#include "media/base/audio_bus.h"
-#include "media/base/buffers.h"
namespace media {
diff --git a/chromium/media/base/audio_buffer_queue_unittest.cc b/chromium/media/base/audio_buffer_queue_unittest.cc
index d883189319b..f467cda93a6 100644
--- a/chromium/media/base/audio_buffer_queue_unittest.cc
+++ b/chromium/media/base/audio_buffer_queue_unittest.cc
@@ -9,8 +9,8 @@
#include "media/base/audio_buffer.h"
#include "media/base/audio_buffer_queue.h"
#include "media/base/audio_bus.h"
-#include "media/base/buffers.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/audio_buffer_unittest.cc b/chromium/media/base/audio_buffer_unittest.cc
index 43c763e848f..b48220df528 100644
--- a/chromium/media/base/audio_buffer_unittest.cc
+++ b/chromium/media/base/audio_buffer_unittest.cc
@@ -202,13 +202,8 @@ TEST(AudioBufferTest, FrameSize) {
kTimestamp);
EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data
- buffer = AudioBuffer::CopyFrom(kSampleFormatF32,
- CHANNEL_LAYOUT_4_0,
- 4,
- kSampleRate,
- 2,
- data,
- kTimestamp);
+ buffer = AudioBuffer::CopyFrom(kSampleFormatF32, CHANNEL_LAYOUT_4_0, 4,
+ kSampleRate, 2, data, kTimestamp);
EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data
}
diff --git a/chromium/media/base/audio_capturer_source.h b/chromium/media/base/audio_capturer_source.h
index 621c3921ab0..ae60f913850 100644
--- a/chromium/media/base/audio_capturer_source.h
+++ b/chromium/media/base/audio_capturer_source.h
@@ -30,7 +30,7 @@ class AudioCapturerSource
bool key_pressed) = 0;
// Signals an error has occurred.
- virtual void OnCaptureError() = 0;
+ virtual void OnCaptureError(const std::string& message) = 0;
protected:
virtual ~CaptureCallback() {}
diff --git a/chromium/media/base/audio_converter_unittest.cc b/chromium/media/base/audio_converter_unittest.cc
index dcc3db69b14..741027f3fd0 100644
--- a/chromium/media/base/audio_converter_unittest.cc
+++ b/chromium/media/base/audio_converter_unittest.cc
@@ -198,13 +198,13 @@ TEST(AudioConverterTest, AudioDelayAndDiscreteChannelCount) {
// Choose input and output parameters such that the transform must make
// multiple calls to fill the buffer.
AudioParameters input_parameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_DISCRETE, 10, kSampleRate,
- kBitsPerChannel, kLowLatencyBufferSize,
- AudioParameters::NO_EFFECTS);
+ CHANNEL_LAYOUT_DISCRETE, kSampleRate,
+ kBitsPerChannel, kLowLatencyBufferSize);
+ input_parameters.set_channels_for_discrete(10);
AudioParameters output_parameters(AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_DISCRETE, 5, kSampleRate * 2,
- kBitsPerChannel, kHighLatencyBufferSize,
- AudioParameters::NO_EFFECTS);
+ CHANNEL_LAYOUT_DISCRETE, kSampleRate * 2,
+ kBitsPerChannel, kHighLatencyBufferSize);
+ output_parameters.set_channels_for_discrete(5);
AudioConverter converter(input_parameters, output_parameters, false);
FakeAudioRenderCallback callback(0.2);
diff --git a/chromium/media/base/audio_decoder_config.cc b/chromium/media/base/audio_decoder_config.cc
index 780bada49e9..6dc5b2f6532 100644
--- a/chromium/media/base/audio_decoder_config.cc
+++ b/chromium/media/base/audio_decoder_config.cc
@@ -5,8 +5,6 @@
#include "media/base/audio_decoder_config.h"
#include "base/logging.h"
-#include "base/metrics/histogram.h"
-#include "media/audio/sample_rates.h"
#include "media/base/limits.h"
namespace media {
@@ -30,8 +28,7 @@ AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
size_t extra_data_size,
bool is_encrypted) {
Initialize(codec, sample_format, channel_layout, samples_per_second,
- extra_data, extra_data_size, is_encrypted, true,
- base::TimeDelta(), 0);
+ extra_data, extra_data_size, is_encrypted, base::TimeDelta(), 0);
}
void AudioDecoderConfig::Initialize(AudioCodec codec,
@@ -41,27 +38,10 @@ void AudioDecoderConfig::Initialize(AudioCodec codec,
const uint8* extra_data,
size_t extra_data_size,
bool is_encrypted,
- bool record_stats,
base::TimeDelta seek_preroll,
int codec_delay) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
- if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", codec, kAudioCodecMax + 1);
- UMA_HISTOGRAM_ENUMERATION("Media.AudioSampleFormat", sample_format,
- kSampleFormatMax + 1);
- UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout", channel_layout,
- CHANNEL_LAYOUT_MAX + 1);
- AudioSampleRate asr;
- if (ToAudioSampleRate(samples_per_second, &asr)) {
- UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
- kAudioSampleRateMax + 1);
- } else {
- UMA_HISTOGRAM_COUNTS(
- "Media.AudioSamplesPerSecondUnexpected", samples_per_second);
- }
- }
-
codec_ = codec;
channel_layout_ = channel_layout;
samples_per_second_ = samples_per_second;
diff --git a/chromium/media/base/audio_decoder_config.h b/chromium/media/base/audio_decoder_config.h
index 9b2fe96855d..c9c85938f5a 100644
--- a/chromium/media/base/audio_decoder_config.h
+++ b/chromium/media/base/audio_decoder_config.h
@@ -65,10 +65,13 @@ class MEDIA_EXPORT AudioDecoderConfig {
~AudioDecoderConfig();
// Resets the internal state of this object. |codec_delay| is in frames.
- void Initialize(AudioCodec codec, SampleFormat sample_format,
- ChannelLayout channel_layout, int samples_per_second,
- const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted, bool record_stats,
+ void Initialize(AudioCodec codec,
+ SampleFormat sample_format,
+ ChannelLayout channel_layout,
+ int samples_per_second,
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted,
base::TimeDelta seek_preroll,
int codec_delay);
diff --git a/chromium/media/base/audio_discard_helper.cc b/chromium/media/base/audio_discard_helper.cc
index f7c645cc20e..12c8e347d2c 100644
--- a/chromium/media/base/audio_discard_helper.cc
+++ b/chromium/media/base/audio_discard_helper.cc
@@ -104,11 +104,8 @@ bool AudioDiscardHelper::ProcessBuffers(
// If everything would be discarded, indicate a new buffer is required.
if (frames_to_discard == decoded_frames) {
- // For simplicity disallow cases where a buffer with discard padding is
- // present. Doing so allows us to avoid complexity around tracking
- // discards across buffers.
- DCHECK(current_discard_padding.first == base::TimeDelta());
- DCHECK(current_discard_padding.second == base::TimeDelta());
+ // For simplicity, we just drop any discard padding if |discard_frames_|
+ // consumes the entire buffer.
return false;
}
diff --git a/chromium/media/base/audio_discard_helper.h b/chromium/media/base/audio_discard_helper.h
index ded404fec13..7708a2f35ac 100644
--- a/chromium/media/base/audio_discard_helper.h
+++ b/chromium/media/base/audio_discard_helper.h
@@ -8,9 +8,9 @@
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_export.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/audio_discard_helper_unittest.cc b/chromium/media/base/audio_discard_helper_unittest.cc
index 8919530a1e8..0f9a5180986 100644
--- a/chromium/media/base/audio_discard_helper_unittest.cc
+++ b/chromium/media/base/audio_discard_helper_unittest.cc
@@ -6,9 +6,9 @@
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_discard_helper.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/audio_hardware_config.cc b/chromium/media/base/audio_hardware_config.cc
index d00e03f6b08..ca34f5e3a37 100644
--- a/chromium/media/base/audio_hardware_config.cc
+++ b/chromium/media/base/audio_hardware_config.cc
@@ -97,18 +97,18 @@ void AudioHardwareConfig::UpdateOutputConfig(
output_params_ = output_params;
}
-int AudioHardwareConfig::GetHighLatencyBufferSize() const {
- AutoLock auto_lock(config_lock_);
-
+// static
+int AudioHardwareConfig::GetHighLatencyBufferSize(
+ const media::AudioParameters& output_params) {
// Empirically, we consider 20ms of samples to be high latency.
- const double twenty_ms_size = 2.0 * output_params_.sample_rate() / 100;
+ const double twenty_ms_size = 2.0 * output_params.sample_rate() / 100;
#if defined(OS_WIN)
// Windows doesn't use power of two buffer sizes, so we should always round up
// to the nearest multiple of the output buffer size.
const int high_latency_buffer_size =
- std::ceil(twenty_ms_size / output_params_.frames_per_buffer()) *
- output_params_.frames_per_buffer();
+ std::ceil(twenty_ms_size / output_params.frames_per_buffer()) *
+ output_params.frames_per_buffer();
#else
// On other platforms use the nearest higher power of two buffer size. For a
// given sample rate, this works out to:
@@ -126,7 +126,12 @@ int AudioHardwareConfig::GetHighLatencyBufferSize() const {
const int high_latency_buffer_size = RoundUpToPowerOfTwo(twenty_ms_size);
#endif // defined(OS_WIN)
- return std::max(output_params_.frames_per_buffer(), high_latency_buffer_size);
+ return std::max(output_params.frames_per_buffer(), high_latency_buffer_size);
+}
+
+int AudioHardwareConfig::GetHighLatencyBufferSize() const {
+ AutoLock auto_lock(config_lock_);
+ return GetHighLatencyBufferSize(output_params_);
}
} // namespace media
diff --git a/chromium/media/base/audio_hardware_config.h b/chromium/media/base/audio_hardware_config.h
index a4baaac0979..46f3192b684 100644
--- a/chromium/media/base/audio_hardware_config.h
+++ b/chromium/media/base/audio_hardware_config.h
@@ -44,6 +44,8 @@ class MEDIA_EXPORT AudioHardwareConfig {
// For clients which don't need low latency, a larger buffer size should be
// used to save power and CPU resources.
int GetHighLatencyBufferSize() const;
+ static int GetHighLatencyBufferSize(
+ const media::AudioParameters& output_params);
private:
// Cached values; access is protected by |config_lock_|.
diff --git a/chromium/media/base/audio_renderer_mixer.cc b/chromium/media/base/audio_renderer_mixer.cc
index 5b5fe49f0d2..984798d916d 100644
--- a/chromium/media/base/audio_renderer_mixer.cc
+++ b/chromium/media/base/audio_renderer_mixer.cc
@@ -71,14 +71,10 @@ void AudioRendererMixer::RemoveErrorCallback(const base::Closure& error_cb) {
NOTREACHED();
}
-void AudioRendererMixer::SwitchOutputDevice(
- const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
- DVLOG(1) << __FUNCTION__ << "(" << device_id << ", " << security_origin
- << ")";
+OutputDevice* AudioRendererMixer::GetOutputDevice() {
+ DVLOG(1) << __FUNCTION__;
base::AutoLock auto_lock(lock_);
- audio_sink_->SwitchOutputDevice(device_id, security_origin, callback);
+ return audio_sink_->GetOutputDevice();
}
int AudioRendererMixer::Render(AudioBus* audio_bus,
diff --git a/chromium/media/base/audio_renderer_mixer.h b/chromium/media/base/audio_renderer_mixer.h
index dc9ab925b24..9bc383934d4 100644
--- a/chromium/media/base/audio_renderer_mixer.h
+++ b/chromium/media/base/audio_renderer_mixer.h
@@ -40,9 +40,11 @@ class MEDIA_EXPORT AudioRendererMixer
pause_delay_ = delay;
}
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback);
+ // TODO(guidou): remove this method. The output device of a mixer should
+ // never be switched, as it may result in a discrepancy between the output
+ // parameters of the new device and the output parameters with which the
+ // mixer was initialized. See crbug.com/506507
+ OutputDevice* GetOutputDevice();
private:
// AudioRendererSink::RenderCallback implementation.
diff --git a/chromium/media/base/audio_renderer_mixer_input.cc b/chromium/media/base/audio_renderer_mixer_input.cc
index 6194f21ca94..1b097ae6a24 100644
--- a/chromium/media/base/audio_renderer_mixer_input.cc
+++ b/chromium/media/base/audio_renderer_mixer_input.cc
@@ -5,24 +5,26 @@
#include "media/base/audio_renderer_mixer_input.h"
#include "base/bind.h"
-#include "base/location.h"
-#include "base/logging.h"
#include "media/base/audio_renderer_mixer.h"
namespace media {
AudioRendererMixerInput::AudioRendererMixerInput(
- const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb)
+ const GetMixerCB& get_mixer_cb,
+ const RemoveMixerCB& remove_mixer_cb,
+ const std::string& device_id,
+ const url::Origin& security_origin)
: playing_(false),
initialized_(false),
volume_(1.0f),
get_mixer_cb_(get_mixer_cb),
remove_mixer_cb_(remove_mixer_cb),
+ device_id_(device_id),
+ security_origin_(security_origin),
mixer_(NULL),
callback_(NULL),
- error_cb_(base::Bind(
- &AudioRendererMixerInput::OnRenderError, base::Unretained(this))) {
-}
+ error_cb_(base::Bind(&AudioRendererMixerInput::OnRenderError,
+ base::Unretained(this))) {}
AudioRendererMixerInput::~AudioRendererMixerInput() {
DCHECK(!playing_);
@@ -44,7 +46,11 @@ void AudioRendererMixerInput::Start() {
DCHECK(initialized_);
DCHECK(!playing_);
DCHECK(!mixer_);
- mixer_ = get_mixer_cb_.Run(params_);
+ mixer_ = get_mixer_cb_.Run(params_, device_id_, security_origin_, nullptr);
+ if (!mixer_) {
+ callback_->OnRenderError();
+ return;
+ }
// Note: OnRenderError() may be called immediately after this call returns.
mixer_->AddErrorCallback(error_cb_);
@@ -63,7 +69,7 @@ void AudioRendererMixerInput::Stop() {
// Stop() by an error event since it may outlive this ref-counted object. We
// should instead have sane ownership semantics: http://crbug.com/151051
mixer_->RemoveErrorCallback(error_cb_);
- remove_mixer_cb_.Run(params_);
+ remove_mixer_cb_.Run(params_, device_id_, security_origin_);
mixer_ = NULL;
}
}
@@ -95,17 +101,54 @@ bool AudioRendererMixerInput::SetVolume(double volume) {
return true;
}
+OutputDevice* AudioRendererMixerInput::GetOutputDevice() {
+ return this;
+}
+
void AudioRendererMixerInput::SwitchOutputDevice(
const std::string& device_id,
- const GURL& security_origin,
+ const url::Origin& security_origin,
const SwitchOutputDeviceCB& callback) {
- DVLOG(1) << __FUNCTION__
- << "(" << device_id << ", " << security_origin << ")";
- if (mixer_) {
- mixer_->SwitchOutputDevice(device_id, security_origin, callback);
- } else {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
+ if (!mixer_) {
+ callback.Run(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL);
+ return;
+ }
+
+ if (device_id == device_id_) {
+ callback.Run(OUTPUT_DEVICE_STATUS_OK);
+ return;
}
+
+ OutputDeviceStatus new_mixer_status = OUTPUT_DEVICE_STATUS_ERROR_INTERNAL;
+ AudioRendererMixer* new_mixer =
+ get_mixer_cb_.Run(params_, device_id, security_origin, &new_mixer_status);
+ if (new_mixer_status != OUTPUT_DEVICE_STATUS_OK) {
+ callback.Run(new_mixer_status);
+ return;
+ }
+
+ bool was_playing = playing_;
+ Stop();
+ device_id_ = device_id;
+ security_origin_ = security_origin;
+ mixer_ = new_mixer;
+ mixer_->AddErrorCallback(error_cb_);
+
+ if (was_playing)
+ Play();
+
+ callback.Run(OUTPUT_DEVICE_STATUS_OK);
+}
+
+AudioParameters AudioRendererMixerInput::GetOutputParameters() {
+ return mixer_->GetOutputDevice()->GetOutputParameters();
+}
+
+OutputDeviceStatus AudioRendererMixerInput::GetDeviceStatus() {
+ if (!mixer_)
+ return OUTPUT_DEVICE_STATUS_ERROR_INTERNAL;
+
+ return mixer_->GetOutputDevice()->GetDeviceStatus();
}
double AudioRendererMixerInput::ProvideInput(AudioBus* audio_bus,
diff --git a/chromium/media/base/audio_renderer_mixer_input.h b/chromium/media/base/audio_renderer_mixer_input.h
index 06a0de1b037..db7e8c707c5 100644
--- a/chromium/media/base/audio_renderer_mixer_input.h
+++ b/chromium/media/base/audio_renderer_mixer_input.h
@@ -6,11 +6,11 @@
#define MEDIA_BASE_AUDIO_RENDERER_MIXER_INPUT_H_
#include <string>
-#include <vector>
#include "base/callback.h"
#include "media/base/audio_converter.h"
#include "media/base/audio_renderer_sink.h"
+#include "url/origin.h"
namespace media {
@@ -18,14 +18,23 @@ class AudioRendererMixer;
class MEDIA_EXPORT AudioRendererMixerInput
: NON_EXPORTED_BASE(public AudioRendererSink),
+ NON_EXPORTED_BASE(public OutputDevice),
public AudioConverter::InputCallback {
public:
- typedef base::Callback<AudioRendererMixer*(
- const AudioParameters& params)> GetMixerCB;
- typedef base::Callback<void(const AudioParameters& params)> RemoveMixerCB;
-
- AudioRendererMixerInput(
- const GetMixerCB& get_mixer_cb, const RemoveMixerCB& remove_mixer_cb);
+ typedef base::Callback<AudioRendererMixer*(const AudioParameters& params,
+ const std::string& device_id,
+ const url::Origin& security_origin,
+ OutputDeviceStatus* device_status)>
+ GetMixerCB;
+ typedef base::Callback<void(const AudioParameters& params,
+ const std::string& device_id,
+ const url::Origin& security_origin)>
+ RemoveMixerCB;
+
+ AudioRendererMixerInput(const GetMixerCB& get_mixer_cb,
+ const RemoveMixerCB& remove_mixer_cb,
+ const std::string& device_id,
+ const url::Origin& security_origin);
// AudioRendererSink implementation.
void Start() override;
@@ -33,12 +42,17 @@ class MEDIA_EXPORT AudioRendererMixerInput
void Play() override;
void Pause() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
void Initialize(const AudioParameters& params,
AudioRendererSink::RenderCallback* renderer) override;
+ // OutputDevice implementation.
+ void SwitchOutputDevice(const std::string& device_id,
+ const url::Origin& security_origin,
+ const SwitchOutputDeviceCB& callback) override;
+ AudioParameters GetOutputParameters() override;
+ OutputDeviceStatus GetDeviceStatus() override;
+
// Called by AudioRendererMixer when an error occurs.
void OnRenderError();
@@ -64,6 +78,10 @@ class MEDIA_EXPORT AudioRendererMixerInput
// AudioParameters received during Initialize().
AudioParameters params_;
+ // ID of hardware device to use
+ std::string device_id_;
+ url::Origin security_origin_;
+
// AudioRendererMixer provided through |get_mixer_cb_| during Initialize(),
// guaranteed to live (at least) until |remove_mixer_cb_| is called.
AudioRendererMixer* mixer_;
diff --git a/chromium/media/base/audio_renderer_mixer_input_unittest.cc b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
index be03867027f..b660b898dc4 100644
--- a/chromium/media/base/audio_renderer_mixer_input_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_input_unittest.cc
@@ -4,6 +4,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/run_loop.h"
#include "media/base/audio_renderer_mixer.h"
#include "media/base/audio_renderer_mixer_input.h"
#include "media/base/fake_audio_render_callback.h"
@@ -17,6 +18,10 @@ static const int kBitsPerChannel = 16;
static const int kSampleRate = 48000;
static const int kBufferSize = 8192;
static const ChannelLayout kChannelLayout = CHANNEL_LAYOUT_STEREO;
+static const std::string kDefaultDeviceId;
+static const std::string kUnauthorizedDeviceId("unauthorized");
+static const std::string kNonexistentDeviceId("nonexistent");
+static const url::Origin kDefaultSecurityOrigin;
class AudioRendererMixerInputTest : public testing::Test {
public:
@@ -33,40 +38,72 @@ class AudioRendererMixerInputTest : public testing::Test {
void CreateMixerInput() {
mixer_input_ = new AudioRendererMixerInput(
- base::Bind(
- &AudioRendererMixerInputTest::GetMixer, base::Unretained(this)),
- base::Bind(
- &AudioRendererMixerInputTest::RemoveMixer, base::Unretained(this)));
+ base::Bind(&AudioRendererMixerInputTest::GetMixer,
+ base::Unretained(this)),
+ base::Bind(&AudioRendererMixerInputTest::RemoveMixer,
+ base::Unretained(this)),
+ kDefaultDeviceId, kDefaultSecurityOrigin);
}
- AudioRendererMixer* GetMixer(const AudioParameters& params) {
- if (!mixer_) {
+ AudioRendererMixer* GetMixer(const AudioParameters& params,
+ const std::string& device_id,
+ const url::Origin& security_origin,
+ OutputDeviceStatus* device_status) {
+ if (device_id == kNonexistentDeviceId) {
+ if (device_status)
+ *device_status = OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND;
+ return nullptr;
+ }
+
+ if (device_id == kUnauthorizedDeviceId) {
+ if (device_status)
+ *device_status = OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED;
+ return nullptr;
+ }
+
+ size_t idx = device_id.empty() ? 0 : 1;
+ if (!mixers_[idx]) {
scoped_refptr<MockAudioRendererSink> sink = new MockAudioRendererSink();
EXPECT_CALL(*sink.get(), Start());
EXPECT_CALL(*sink.get(), Stop());
- mixer_.reset(new AudioRendererMixer(
- audio_parameters_, audio_parameters_, sink));
+ mixers_[idx].reset(
+ new AudioRendererMixer(audio_parameters_, audio_parameters_, sink));
}
- EXPECT_CALL(*this, RemoveMixer(testing::_));
- return mixer_.get();
+ EXPECT_CALL(*this, RemoveMixer(testing::_, device_id, testing::_));
+
+ if (device_status)
+ *device_status = OUTPUT_DEVICE_STATUS_OK;
+ return mixers_[idx].get();
}
double ProvideInput() {
return mixer_input_->ProvideInput(audio_bus_.get(), base::TimeDelta());
}
- MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
+ MOCK_METHOD3(RemoveMixer,
+ void(const AudioParameters&,
+ const std::string&,
+ const url::Origin&));
+
+ MOCK_METHOD1(SwitchCallbackCalled, void(OutputDeviceStatus));
+ void SwitchCallback(base::RunLoop* loop, OutputDeviceStatus result) {
+ SwitchCallbackCalled(result);
+ loop->Quit();
+ }
+
+ AudioRendererMixer* GetInputMixer() { return mixer_input_->mixer_; }
protected:
virtual ~AudioRendererMixerInputTest() {}
AudioParameters audio_parameters_;
- scoped_ptr<AudioRendererMixer> mixer_;
+ scoped_ptr<AudioRendererMixer> mixers_[2];
scoped_refptr<AudioRendererMixerInput> mixer_input_;
scoped_ptr<FakeAudioRenderCallback> fake_callback_;
scoped_ptr<AudioBus> audio_bus_;
+ private:
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerInputTest);
};
@@ -117,4 +154,80 @@ TEST_F(AudioRendererMixerInputTest, StartAfterStop) {
mixer_input_->Stop();
}
+// Test SwitchOutputDevice().
+TEST_F(AudioRendererMixerInputTest, SwitchOutputDevice) {
+ mixer_input_->Start();
+ const std::string kDeviceId("mock-device-id");
+ EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK));
+ AudioRendererMixer* old_mixer = GetInputMixer();
+ EXPECT_EQ(old_mixer, mixers_[0].get());
+ base::RunLoop run_loop;
+ mixer_input_->SwitchOutputDevice(
+ kDeviceId, kDefaultSecurityOrigin,
+ base::Bind(&AudioRendererMixerInputTest::SwitchCallback,
+ base::Unretained(this), &run_loop));
+ run_loop.Run();
+ AudioRendererMixer* new_mixer = GetInputMixer();
+ EXPECT_EQ(new_mixer, mixers_[1].get());
+ EXPECT_NE(old_mixer, new_mixer);
+ mixer_input_->Stop();
+}
+
+// Test SwitchOutputDevice() to the same device as the current (default) device
+TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToSameDevice) {
+ mixer_input_->Start();
+ EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_OK));
+ AudioRendererMixer* old_mixer = GetInputMixer();
+ base::RunLoop run_loop;
+ mixer_input_->SwitchOutputDevice(
+ kDefaultDeviceId, kDefaultSecurityOrigin,
+ base::Bind(&AudioRendererMixerInputTest::SwitchCallback,
+ base::Unretained(this), &run_loop));
+ run_loop.Run();
+ AudioRendererMixer* new_mixer = GetInputMixer();
+ EXPECT_EQ(old_mixer, new_mixer);
+ mixer_input_->Stop();
+}
+
+// Test SwitchOutputDevice() to a nonexistent device
+TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToNonexistentDevice) {
+ mixer_input_->Start();
+ EXPECT_CALL(*this,
+ SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND));
+ base::RunLoop run_loop;
+ mixer_input_->SwitchOutputDevice(
+ kNonexistentDeviceId, kDefaultSecurityOrigin,
+ base::Bind(&AudioRendererMixerInputTest::SwitchCallback,
+ base::Unretained(this), &run_loop));
+ run_loop.Run();
+ mixer_input_->Stop();
+}
+
+// Test SwitchOutputDevice() to an unauthorized device
+TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceToUnauthorizedDevice) {
+ mixer_input_->Start();
+ EXPECT_CALL(*this,
+ SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED));
+ base::RunLoop run_loop;
+ mixer_input_->SwitchOutputDevice(
+ kUnauthorizedDeviceId, kDefaultSecurityOrigin,
+ base::Bind(&AudioRendererMixerInputTest::SwitchCallback,
+ base::Unretained(this), &run_loop));
+ run_loop.Run();
+ mixer_input_->Stop();
+}
+
+// Test that calling SwitchOutputDevice() before Start() fails.
+TEST_F(AudioRendererMixerInputTest, SwitchOutputDeviceBeforeStart) {
+ EXPECT_CALL(*this, SwitchCallbackCalled(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL));
+ base::RunLoop run_loop;
+ mixer_input_->SwitchOutputDevice(
+ std::string(), url::Origin(),
+ base::Bind(&AudioRendererMixerInputTest::SwitchCallback,
+ base::Unretained(this), &run_loop));
+ run_loop.Run();
+ mixer_input_->Start();
+ mixer_input_->Stop();
+}
+
} // namespace media
diff --git a/chromium/media/base/audio_renderer_mixer_unittest.cc b/chromium/media/base/audio_renderer_mixer_unittest.cc
index 6cca1ddb967..b218fa04100 100644
--- a/chromium/media/base/audio_renderer_mixer_unittest.cc
+++ b/chromium/media/base/audio_renderer_mixer_unittest.cc
@@ -34,6 +34,10 @@ const int kLowLatencyBufferSize = 256;
// Number of full sine wave cycles for each Render() call.
const int kSineCycles = 4;
+// Default device ID
+const std::string kDefaultDeviceId;
+const url::Origin kDefaultSecurityOrigin;
+
// Tuple of <input sampling rate, output sampling rate, epsilon>.
typedef std::tr1::tuple<int, int, double> AudioRendererMixerTestData;
class AudioRendererMixerTest
@@ -67,11 +71,17 @@ class AudioRendererMixerTest
expected_callback_.reset(new FakeAudioRenderCallback(step));
}
- AudioRendererMixer* GetMixer(const AudioParameters& params) {
+ AudioRendererMixer* GetMixer(const AudioParameters& params,
+ const std::string& device_id,
+ const url::Origin& security_origin,
+ OutputDeviceStatus* device_status) {
return mixer_.get();
}
- MOCK_METHOD1(RemoveMixer, void(const AudioParameters&));
+ MOCK_METHOD3(RemoveMixer,
+ void(const AudioParameters&,
+ const std::string&,
+ const url::Origin&));
void InitializeInputs(int count) {
mixer_inputs_.reserve(count);
@@ -86,14 +96,15 @@ class AudioRendererMixerTest
for (int i = 0; i < count; ++i) {
fake_callbacks_.push_back(new FakeAudioRenderCallback(step));
mixer_inputs_.push_back(new AudioRendererMixerInput(
- base::Bind(&AudioRendererMixerTest::GetMixer,
- base::Unretained(this)),
+ base::Bind(&AudioRendererMixerTest::GetMixer, base::Unretained(this)),
base::Bind(&AudioRendererMixerTest::RemoveMixer,
- base::Unretained(this))));
+ base::Unretained(this)),
+ kDefaultDeviceId, kDefaultSecurityOrigin));
mixer_inputs_[i]->Initialize(input_parameters_, fake_callbacks_[i]);
mixer_inputs_[i]->SetVolume(1.0f);
}
- EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(count);
+ EXPECT_CALL(*this, RemoveMixer(testing::_, testing::_, testing::_))
+ .Times(count);
}
bool ValidateAudioData(int index, int frames, float scale, double epsilon) {
@@ -284,6 +295,7 @@ class AudioRendererMixerTest
double epsilon_;
bool half_fill_;
+ private:
DISALLOW_COPY_AND_ASSIGN(AudioRendererMixerTest);
};
@@ -412,13 +424,13 @@ TEST_P(AudioRendererMixerBehavioralTest, OnRenderErrorPausedInput) {
// Ensure constructing an AudioRendererMixerInput, but not initializing it does
// not call RemoveMixer().
TEST_P(AudioRendererMixerBehavioralTest, NoInitialize) {
- EXPECT_CALL(*this, RemoveMixer(testing::_)).Times(0);
+ EXPECT_CALL(*this, RemoveMixer(testing::_, testing::_, testing::_)).Times(0);
scoped_refptr<AudioRendererMixerInput> audio_renderer_mixer =
new AudioRendererMixerInput(
- base::Bind(&AudioRendererMixerTest::GetMixer,
- base::Unretained(this)),
+ base::Bind(&AudioRendererMixerTest::GetMixer, base::Unretained(this)),
base::Bind(&AudioRendererMixerTest::RemoveMixer,
- base::Unretained(this)));
+ base::Unretained(this)),
+ kDefaultDeviceId, kDefaultSecurityOrigin);
}
// Ensure the physical stream is paused after a certain amount of time with no
diff --git a/chromium/media/base/audio_renderer_sink.h b/chromium/media/base/audio_renderer_sink.h
index 753135d0c26..1e091af94c5 100644
--- a/chromium/media/base/audio_renderer_sink.h
+++ b/chromium/media/base/audio_renderer_sink.h
@@ -16,6 +16,7 @@
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "media/base/media_export.h"
+#include "media/base/output_device.h"
#include "url/gurl.h"
namespace base {
@@ -24,8 +25,6 @@ class SingleThreadTaskRunner;
namespace media {
-typedef base::Callback<void(SwitchOutputDeviceResult)> SwitchOutputDeviceCB;
-
// AudioRendererSink is an interface representing the end-point for
// rendered audio. An implementation is expected to
// periodically call Render() on a callback object.
@@ -54,7 +53,8 @@ class AudioRendererSink
// Starts audio playback.
virtual void Start() = 0;
- // Stops audio playback.
+ // Stops audio playback and performs cleanup. It must be called before
+ // destruction.
virtual void Stop() = 0;
// Pauses playback.
@@ -67,20 +67,12 @@ class AudioRendererSink
// Returns |true| on success.
virtual bool SetVolume(double volume) = 0;
- // Attempts to switch the audio output device.
- // Once the attempt is finished, |callback| is invoked with the
- // result of the operation passed as a parameter. The result is a value from
- // the media::SwitchOutputDeviceResult enum.
- // There is no guarantee about the thread where |callback| will
- // be invoked, so users are advised to use media::BindToCurrentLoop() to
- // ensure that |callback| runs on the correct thread.
- // Note also that copy constructors and destructors for arguments bound to
- // |callback| may run on arbitrary threads as |callback| is moved across
- // threads. It is advisable to bind arguments such that they are released by
- // |callback| when it runs in order to avoid surprises.
- virtual void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) = 0;
+ // Returns a pointer to the internal output device.
+ // This pointer is not to be owned by the caller and is valid only during
+ // the lifetime of the AudioRendererSink.
+ // It can be null, which means that access to the output device is not
+ // supported.
+ virtual OutputDevice* GetOutputDevice() = 0;
protected:
friend class base::RefCountedThreadSafe<AudioRendererSink>;
diff --git a/chromium/media/base/audio_splicer.cc b/chromium/media/base/audio_splicer.cc
index accff36cd30..3fd6dacbf79 100644
--- a/chromium/media/base/audio_splicer.cc
+++ b/chromium/media/base/audio_splicer.cc
@@ -99,7 +99,7 @@ class AudioStreamSanitizer {
void AddOutputBuffer(const scoped_refptr<AudioBuffer>& buffer);
AudioTimestampHelper output_timestamp_helper_;
- bool received_end_of_stream_;
+ bool received_end_of_stream_ = false;
typedef std::deque<scoped_refptr<AudioBuffer> > BufferQueue;
BufferQueue output_buffers_;
@@ -108,7 +108,7 @@ class AudioStreamSanitizer {
// To prevent log spam, counts the number of audio gap or overlaps warned in
// logs.
- int num_warning_logs_;
+ int num_warning_logs_ = 0;
DISALLOW_ASSIGN(AudioStreamSanitizer);
};
@@ -116,11 +116,7 @@ class AudioStreamSanitizer {
AudioStreamSanitizer::AudioStreamSanitizer(
int samples_per_second,
const scoped_refptr<MediaLog>& media_log)
- : output_timestamp_helper_(samples_per_second),
- received_end_of_stream_(false),
- media_log_(media_log),
- num_warning_logs_(0) {
-}
+ : output_timestamp_helper_(samples_per_second), media_log_(media_log) {}
AudioStreamSanitizer::~AudioStreamSanitizer() {}
diff --git a/chromium/media/base/audio_splicer.h b/chromium/media/base/audio_splicer.h
index 0d35f605269..db25e5316e1 100644
--- a/chromium/media/base/audio_splicer.h
+++ b/chromium/media/base/audio_splicer.h
@@ -9,8 +9,8 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/audio/audio_parameters.h"
-#include "media/base/buffers.h"
#include "media/base/media_export.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/audio_splicer_unittest.cc b/chromium/media/base/audio_splicer_unittest.cc
index 7b226bb0759..92ab9d2fade 100644
--- a/chromium/media/base/audio_splicer_unittest.cc
+++ b/chromium/media/base/audio_splicer_unittest.cc
@@ -7,8 +7,8 @@
#include "media/base/audio_bus.h"
#include "media/base/audio_splicer.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/audio_timestamp_helper.cc b/chromium/media/base/audio_timestamp_helper.cc
index 38fde1f0bd9..ef387ff4267 100644
--- a/chromium/media/base/audio_timestamp_helper.cc
+++ b/chromium/media/base/audio_timestamp_helper.cc
@@ -5,7 +5,7 @@
#include "media/base/audio_timestamp_helper.h"
#include "base/logging.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/audio_timestamp_helper_unittest.cc b/chromium/media/base/audio_timestamp_helper_unittest.cc
index a0cfa3bbfa5..e61ae4a04d1 100644
--- a/chromium/media/base/audio_timestamp_helper_unittest.cc
+++ b/chromium/media/base/audio_timestamp_helper_unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/bind_to_current_loop.h b/chromium/media/base/bind_to_current_loop.h
index d34afe4094c..da568ed6ac5 100644
--- a/chromium/media/base/bind_to_current_loop.h
+++ b/chromium/media/base/bind_to_current_loop.h
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/location.h"
+#include "base/memory/scoped_vector.h"
#include "base/single_thread_task_runner.h"
#include "base/thread_task_runner_handle.h"
diff --git a/chromium/media/base/bitstream_buffer.h b/chromium/media/base/bitstream_buffer.h
index 3a264737f42..c015b92ab5a 100644
--- a/chromium/media/base/bitstream_buffer.h
+++ b/chromium/media/base/bitstream_buffer.h
@@ -7,6 +7,8 @@
#include "base/basictypes.h"
#include "base/memory/shared_memory.h"
+#include "base/time/time.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -17,18 +19,37 @@ class BitstreamBuffer {
BitstreamBuffer(int32 id, base::SharedMemoryHandle handle, size_t size)
: id_(id),
handle_(handle),
- size_(size) {
- }
+ size_(size),
+ presentation_timestamp_(kNoTimestamp()) {}
+
+ BitstreamBuffer(int32 id,
+ base::SharedMemoryHandle handle,
+ size_t size,
+ base::TimeDelta presentation_timestamp)
+ : id_(id),
+ handle_(handle),
+ size_(size),
+ presentation_timestamp_(presentation_timestamp) {}
int32 id() const { return id_; }
base::SharedMemoryHandle handle() const { return handle_; }
size_t size() const { return size_; }
+ // The timestamp is only valid if it's not equal to |media::kNoTimestamp()|.
+ base::TimeDelta presentation_timestamp() const {
+ return presentation_timestamp_;
+ }
+
private:
int32 id_;
base::SharedMemoryHandle handle_;
size_t size_;
+ // This is only set when necessary. For example, AndroidVideoDecodeAccelerator
+ // needs the timestamp because the underlying decoder may require it to
+ // determine the output order.
+ base::TimeDelta presentation_timestamp_;
+
// Allow compiler-generated copy & assign constructors.
};
diff --git a/chromium/media/base/browser_cdm.cc b/chromium/media/base/browser_cdm.cc
index 2c44e1652ad..45a2ea2daa1 100644
--- a/chromium/media/base/browser_cdm.cc
+++ b/chromium/media/base/browser_cdm.cc
@@ -12,4 +12,9 @@ BrowserCdm::BrowserCdm() {
BrowserCdm::~BrowserCdm() {
}
+// For most subclasses we can delete on the caller thread.
+void BrowserCdm::DeleteOnCorrectThread() {
+ delete this;
+}
+
} // namespace media
diff --git a/chromium/media/base/browser_cdm.h b/chromium/media/base/browser_cdm.h
index f4902d8708b..5e4cb28b4d8 100644
--- a/chromium/media/base/browser_cdm.h
+++ b/chromium/media/base/browser_cdm.h
@@ -5,6 +5,7 @@
#ifndef MEDIA_BASE_BROWSER_CDM_H_
#define MEDIA_BASE_BROWSER_CDM_H_
+#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/media_keys.h"
#include "media/base/player_tracker.h"
@@ -16,6 +17,9 @@ class MEDIA_EXPORT BrowserCdm : public MediaKeys, public PlayerTracker {
public:
~BrowserCdm() override;
+ // Virtual destructor. For most subclasses we can delete on the caller thread.
+ virtual void DeleteOnCorrectThread();
+
protected:
BrowserCdm();
@@ -23,6 +27,14 @@ class MEDIA_EXPORT BrowserCdm : public MediaKeys, public PlayerTracker {
DISALLOW_COPY_AND_ASSIGN(BrowserCdm);
};
+struct MEDIA_EXPORT BrowserCdmDeleter {
+ inline void operator()(BrowserCdm* ptr) const {
+ ptr->DeleteOnCorrectThread();
+ }
+};
+
+using ScopedBrowserCdmPtr = scoped_ptr<BrowserCdm, BrowserCdmDeleter>;
+
} // namespace media
#endif // MEDIA_BASE_BROWSER_CDM_H_
diff --git a/chromium/media/base/browser_cdm_factory.cc b/chromium/media/base/browser_cdm_factory.cc
index 88445d890b7..73fd4c72e22 100644
--- a/chromium/media/base/browser_cdm_factory.cc
+++ b/chromium/media/base/browser_cdm_factory.cc
@@ -21,7 +21,7 @@ void SetBrowserCdmFactory(BrowserCdmFactory* factory) {
g_cdm_factory = factory;
}
-scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ScopedBrowserCdmPtr CreateBrowserCdm(
const std::string& key_system,
bool use_hw_secure_codecs,
const SessionMessageCB& session_message_cb,
@@ -34,7 +34,7 @@ scoped_ptr<BrowserCdm> CreateBrowserCdm(
SetBrowserCdmFactory(new BrowserCdmFactoryAndroid);
#else
LOG(ERROR) << "Cannot create BrowserCdm: no BrowserCdmFactory available!";
- return scoped_ptr<BrowserCdm>();
+ return ScopedBrowserCdmPtr();
#endif
}
diff --git a/chromium/media/base/browser_cdm_factory.h b/chromium/media/base/browser_cdm_factory.h
index 2ccf1b4108a..99525a79b2a 100644
--- a/chromium/media/base/browser_cdm_factory.h
+++ b/chromium/media/base/browser_cdm_factory.h
@@ -19,7 +19,7 @@ class MEDIA_EXPORT BrowserCdmFactory {
BrowserCdmFactory() {}
virtual ~BrowserCdmFactory() {}
- virtual scoped_ptr<BrowserCdm> CreateBrowserCdm(
+ virtual ScopedBrowserCdmPtr CreateBrowserCdm(
const std::string& key_system,
bool use_hw_secure_codecs,
const SessionMessageCB& session_message_cb,
@@ -41,7 +41,7 @@ void SetBrowserCdmFactory(BrowserCdmFactory* factory);
// |use_hw_secure_codecs| indicates that the CDM should be configured to use
// hardware-secure codecs (for platforms that support it).
// TODO(xhwang): Add ifdef for IPC based CDM.
-scoped_ptr<BrowserCdm> MEDIA_EXPORT
+ScopedBrowserCdmPtr MEDIA_EXPORT
CreateBrowserCdm(const std::string& key_system,
bool use_hw_secure_codecs,
const SessionMessageCB& session_message_cb,
diff --git a/chromium/media/base/buffers.h b/chromium/media/base/buffers.h
deleted file mode 100644
index 5c5c47b68e1..00000000000
--- a/chromium/media/base/buffers.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Timestamps are derived directly from the encoded media file and are commonly
-// known as the presentation timestamp (PTS). Durations are a best-guess and
-// are usually derived from the sample/frame rate of the media file.
-//
-// Due to encoding and transmission errors, it is not guaranteed that timestamps
-// arrive in a monotonically increasing order nor that the next timestamp will
-// be equal to the previous timestamp plus the duration.
-//
-// In the ideal scenario for a 25fps movie, buffers are timestamped as followed:
-//
-// Buffer0 Buffer1 Buffer2 ... BufferN
-// Timestamp: 0us 40000us 80000us ... (N*40000)us
-// Duration*: 40000us 40000us 40000us ... 40000us
-//
-// *25fps = 0.04s per frame = 40000us per frame
-
-#ifndef MEDIA_BASE_BUFFERS_H_
-#define MEDIA_BASE_BUFFERS_H_
-
-#include "base/basictypes.h"
-#include "base/memory/ref_counted.h"
-#include "base/time/time.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-// TODO(scherkus): Move the contents of this file elsewhere.
-
-// Indicates an invalid or missing timestamp.
-MEDIA_EXPORT extern inline base::TimeDelta kNoTimestamp() {
- return base::TimeDelta::FromMicroseconds(kint64min);
-}
-
-// Represents an infinite stream duration.
-MEDIA_EXPORT extern inline base::TimeDelta kInfiniteDuration() {
- return base::TimeDelta::Max();
-}
-
-} // namespace media
-
-#endif // MEDIA_BASE_BUFFERS_H_
diff --git a/chromium/media/base/cdm_config.h b/chromium/media/base/cdm_config.h
index 6f723485f7b..6a3dd6b4557 100644
--- a/chromium/media/base/cdm_config.h
+++ b/chromium/media/base/cdm_config.h
@@ -5,8 +5,6 @@
#ifndef MEDIA_BASE_CDM_CONFIG_H_
#define MEDIA_BASE_CDM_CONFIG_H_
-#include "media/base/media_export.h"
-
namespace media {
// The runtime configuration for new CDM instances as computed by
diff --git a/chromium/media/base/cdm_key_information.h b/chromium/media/base/cdm_key_information.h
index 4f9d8e08db7..c7ee22513cb 100644
--- a/chromium/media/base/cdm_key_information.h
+++ b/chromium/media/base/cdm_key_information.h
@@ -18,10 +18,11 @@ struct MEDIA_EXPORT CdmKeyInformation {
USABLE = 0,
INTERNAL_ERROR = 1,
EXPIRED = 2,
- OUTPUT_NOT_ALLOWED = 3,
+ OUTPUT_RESTRICTED = 3,
OUTPUT_DOWNSCALED = 4,
KEY_STATUS_PENDING = 5,
- KEY_STATUS_MAX = KEY_STATUS_PENDING
+ RELEASED = 6,
+ KEY_STATUS_MAX = RELEASED
};
CdmKeyInformation();
diff --git a/chromium/media/base/cdm_promise_adapter.cc b/chromium/media/base/cdm_promise_adapter.cc
index 70993b580ec..aed28307157 100644
--- a/chromium/media/base/cdm_promise_adapter.cc
+++ b/chromium/media/base/cdm_promise_adapter.cc
@@ -13,10 +13,12 @@ CdmPromiseAdapter::CdmPromiseAdapter() : next_promise_id_(1) {
CdmPromiseAdapter::~CdmPromiseAdapter() {
DCHECK(promises_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
Clear();
}
uint32_t CdmPromiseAdapter::SavePromise(scoped_ptr<CdmPromise> promise) {
+ DCHECK(thread_checker_.CalledOnValidThread());
uint32_t promise_id = next_promise_id_++;
promises_.add(promise_id, promise.Pass());
return promise_id;
@@ -57,12 +59,14 @@ void CdmPromiseAdapter::RejectPromise(uint32_t promise_id,
void CdmPromiseAdapter::Clear() {
// Reject all outstanding promises.
+ DCHECK(thread_checker_.CalledOnValidThread());
for (auto& promise : promises_)
promise.second->reject(MediaKeys::UNKNOWN_ERROR, 0, "Operation aborted.");
promises_.clear();
}
scoped_ptr<CdmPromise> CdmPromiseAdapter::TakePromise(uint32_t promise_id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
PromiseMap::iterator it = promises_.find(promise_id);
if (it == promises_.end())
return nullptr;
diff --git a/chromium/media/base/cdm_promise_adapter.h b/chromium/media/base/cdm_promise_adapter.h
index b078c0dd0a5..b6d5a89be5b 100644
--- a/chromium/media/base/cdm_promise_adapter.h
+++ b/chromium/media/base/cdm_promise_adapter.h
@@ -9,6 +9,7 @@
#include "base/containers/scoped_ptr_hash_map.h"
#include "base/macros.h"
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/base/cdm_promise.h"
#include "media/base/media_export.h"
@@ -51,6 +52,7 @@ class MEDIA_EXPORT CdmPromiseAdapter {
uint32_t next_promise_id_;
PromiseMap promises_;
+ base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(CdmPromiseAdapter);
};
diff --git a/chromium/media/base/channel_mixer_unittest.cc b/chromium/media/base/channel_mixer_unittest.cc
index 89b4eeedc44..1eb2616236c 100644
--- a/chromium/media/base/channel_mixer_unittest.cc
+++ b/chromium/media/base/channel_mixer_unittest.cc
@@ -104,26 +104,19 @@ TEST_P(ChannelMixerTest, Mixing) {
ChannelLayout input_layout = GetParam().input_layout;
int input_channels = GetParam().input_channels;
scoped_ptr<AudioBus> input_bus = AudioBus::Create(input_channels, kFrames);
- AudioParameters input_audio(AudioParameters::AUDIO_PCM_LINEAR,
- input_layout,
- input_layout == CHANNEL_LAYOUT_DISCRETE ?
- input_channels :
- ChannelLayoutToChannelCount(input_layout),
- AudioParameters::kAudioCDSampleRate, 16,
- kFrames,
- AudioParameters::NO_EFFECTS);
+ AudioParameters input_audio(AudioParameters::AUDIO_PCM_LINEAR, input_layout,
+ AudioParameters::kAudioCDSampleRate, 16, kFrames);
+ if (input_layout == CHANNEL_LAYOUT_DISCRETE)
+ input_audio.set_channels_for_discrete(input_channels);
ChannelLayout output_layout = GetParam().output_layout;
int output_channels = GetParam().output_channels;
scoped_ptr<AudioBus> output_bus = AudioBus::Create(output_channels, kFrames);
- AudioParameters output_audio(AudioParameters::AUDIO_PCM_LINEAR,
- output_layout,
- output_layout == CHANNEL_LAYOUT_DISCRETE ?
- output_channels :
- ChannelLayoutToChannelCount(output_layout),
+ AudioParameters output_audio(AudioParameters::AUDIO_PCM_LINEAR, output_layout,
AudioParameters::kAudioCDSampleRate, 16,
- kFrames,
- AudioParameters::NO_EFFECTS);
+ kFrames);
+ if (output_layout == CHANNEL_LAYOUT_DISCRETE)
+ output_audio.set_channels_for_discrete(output_channels);
const float* channel_values = GetParam().channel_values;
ASSERT_EQ(input_bus->channels(), GetParam().num_channel_values);
diff --git a/chromium/media/base/decoder_buffer.h b/chromium/media/base/decoder_buffer.h
index 665313ddcc1..85036969abe 100644
--- a/chromium/media/base/decoder_buffer.h
+++ b/chromium/media/base/decoder_buffer.h
@@ -14,9 +14,9 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "build/build_config.h"
-#include "media/base/buffers.h"
#include "media/base/decrypt_config.h"
#include "media/base/media_export.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -96,6 +96,7 @@ class MEDIA_EXPORT DecoderBuffer
return data_.get();
}
+ // TODO(servolk): data_size should return size_t instead of int
int data_size() const {
DCHECK(!end_of_stream());
return size_;
@@ -106,6 +107,7 @@ class MEDIA_EXPORT DecoderBuffer
return side_data_.get();
}
+ // TODO(servolk): side_data_size should return size_t instead of int
int side_data_size() const {
DCHECK(!end_of_stream());
return side_data_size_;
@@ -187,6 +189,7 @@ class MEDIA_EXPORT DecoderBuffer
base::TimeDelta timestamp_;
base::TimeDelta duration_;
+ // TODO(servolk): Consider changing size_/side_data_size_ types to size_t.
int size_;
scoped_ptr<uint8, base::AlignedFreeDeleter> data_;
int side_data_size_;
diff --git a/chromium/media/base/decoder_buffer_queue.cc b/chromium/media/base/decoder_buffer_queue.cc
index 26ba9f4e69b..bdd7f4768a4 100644
--- a/chromium/media/base/decoder_buffer_queue.cc
+++ b/chromium/media/base/decoder_buffer_queue.cc
@@ -6,8 +6,8 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -39,7 +39,7 @@ void DecoderBufferQueue::Push(const scoped_refptr<DecoderBuffer>& buffer) {
}
if (buffer->timestamp() < earliest_valid_timestamp_) {
- DVLOG(1)
+ DVLOG(2)
<< "Out of order timestamps: "
<< buffer->timestamp().InMicroseconds()
<< " vs. "
diff --git a/chromium/media/base/decoder_buffer_queue_unittest.cc b/chromium/media/base/decoder_buffer_queue_unittest.cc
index 5eb06d2152d..bc627fdeb70 100644
--- a/chromium/media/base/decoder_buffer_queue_unittest.cc
+++ b/chromium/media/base/decoder_buffer_queue_unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decoder_buffer_queue.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/demuxer_perftest.cc b/chromium/media/base/demuxer_perftest.cc
index 90e19896651..4f740de8baf 100644
--- a/chromium/media/base/demuxer_perftest.cc
+++ b/chromium/media/base/demuxer_perftest.cc
@@ -10,6 +10,7 @@
#include "media/base/media.h"
#include "media/base/media_log.h"
#include "media/base/test_data_util.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/base/fake_audio_renderer_sink.cc b/chromium/media/base/fake_audio_renderer_sink.cc
index 4c4233b5e08..d8a72629d96 100644
--- a/chromium/media/base/fake_audio_renderer_sink.cc
+++ b/chromium/media/base/fake_audio_renderer_sink.cc
@@ -8,13 +8,14 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/single_thread_task_runner.h"
+#include "media/base/fake_output_device.h"
namespace media {
FakeAudioRendererSink::FakeAudioRendererSink()
: state_(kUninitialized),
- callback_(NULL) {
-}
+ callback_(NULL),
+ output_device_(new FakeOutputDevice) {}
FakeAudioRendererSink::~FakeAudioRendererSink() {
DCHECK(!callback_);
@@ -55,11 +56,8 @@ bool FakeAudioRendererSink::SetVolume(double volume) {
return true;
}
-void FakeAudioRendererSink::SwitchOutputDevice(
- const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_SUCCESS);
+OutputDevice* FakeAudioRendererSink::GetOutputDevice() {
+ return output_device_.get();
}
bool FakeAudioRendererSink::Render(AudioBus* dest, int audio_delay_milliseconds,
diff --git a/chromium/media/base/fake_audio_renderer_sink.h b/chromium/media/base/fake_audio_renderer_sink.h
index 1982ba3d373..0a9bd93234a 100644
--- a/chromium/media/base/fake_audio_renderer_sink.h
+++ b/chromium/media/base/fake_audio_renderer_sink.h
@@ -12,6 +12,8 @@
namespace media {
+class FakeOutputDevice;
+
class FakeAudioRendererSink : public AudioRendererSink {
public:
enum State {
@@ -32,9 +34,7 @@ class FakeAudioRendererSink : public AudioRendererSink {
void Pause() override;
void Play() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
// Attempts to call Render() on the callback provided to
// Initialize() with |dest| and |audio_delay_milliseconds|.
@@ -57,6 +57,7 @@ class FakeAudioRendererSink : public AudioRendererSink {
State state_;
RenderCallback* callback_;
+ scoped_ptr<FakeOutputDevice> output_device_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioRendererSink);
};
diff --git a/chromium/media/base/fake_demuxer_stream.cc b/chromium/media/base/fake_demuxer_stream.cc
index ba10ae71a05..30ff406f3fc 100644
--- a/chromium/media/base/fake_demuxer_stream.cc
+++ b/chromium/media/base/fake_demuxer_stream.cc
@@ -13,6 +13,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
@@ -142,10 +143,10 @@ void FakeDemuxerStream::SeekToStart() {
void FakeDemuxerStream::UpdateVideoDecoderConfig() {
const gfx::Rect kVisibleRect(kStartWidth, kStartHeight);
- video_decoder_config_.Initialize(
- kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN, VideoFrame::YV12,
- VideoFrame::COLOR_SPACE_UNSPECIFIED, next_coded_size_, kVisibleRect,
- next_coded_size_, NULL, 0, is_encrypted_, false);
+ video_decoder_config_.Initialize(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
+ PIXEL_FORMAT_YV12, COLOR_SPACE_UNSPECIFIED,
+ next_coded_size_, kVisibleRect,
+ next_coded_size_, NULL, 0, is_encrypted_);
next_coded_size_.Enlarge(kWidthDelta, kHeightDelta);
}
diff --git a/chromium/media/base/fake_media_resources.cc b/chromium/media/base/fake_media_resources.cc
new file mode 100644
index 00000000000..67ac7262403
--- /dev/null
+++ b/chromium/media/base/fake_media_resources.cc
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/utf_string_conversions.h"
+#include "media/base/media_resources.h"
+
+namespace media {
+
+base::string16 FakeLocalizedStringProvider(MessageId message_id) {
+ if (message_id == DEFAULT_AUDIO_DEVICE_NAME)
+ return base::ASCIIToUTF16("Default");
+
+ return base::ASCIIToUTF16("FakeString");
+}
+
+void SetUpFakeMediaResources() {
+ SetLocalizedStringProvider(FakeLocalizedStringProvider);
+}
+
+} // namespace media
diff --git a/chromium/media/base/fake_media_resources.h b/chromium/media/base/fake_media_resources.h
new file mode 100644
index 00000000000..9a374bf7332
--- /dev/null
+++ b/chromium/media/base/fake_media_resources.h
@@ -0,0 +1,15 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_FAKE_MEDIA_RESOURCES_H_
+#define MEDIA_BASE_FAKE_MEDIA_RESOURCES_H_
+
+namespace media {
+
+// Call if tests require non-empty resource strings.
+void SetUpFakeMediaResources();
+
+} // namespace media
+
+#endif // MEDIA_BASE_FAKE_MEDIA_RESOURCES_H_
diff --git a/chromium/media/base/fake_output_device.cc b/chromium/media/base/fake_output_device.cc
new file mode 100644
index 00000000000..2f7c8e70062
--- /dev/null
+++ b/chromium/media/base/fake_output_device.cc
@@ -0,0 +1,36 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/fake_output_device.h"
+
+#include "base/callback.h"
+
+namespace media {
+
+FakeOutputDevice::FakeOutputDevice()
+ : FakeOutputDevice(OUTPUT_DEVICE_STATUS_OK) {}
+
+FakeOutputDevice::FakeOutputDevice(OutputDeviceStatus device_status)
+ : device_status_(device_status) {}
+
+FakeOutputDevice::~FakeOutputDevice() {}
+
+void FakeOutputDevice::SwitchOutputDevice(
+ const std::string& device_id,
+ const url::Origin& security_origin,
+ const SwitchOutputDeviceCB& callback) {
+ callback.Run(device_status_);
+}
+
+AudioParameters FakeOutputDevice::GetOutputParameters() {
+ return media::AudioParameters(
+ media::AudioParameters::AUDIO_FAKE, media::CHANNEL_LAYOUT_STEREO,
+ media::AudioParameters::kTelephoneSampleRate, 16, 1);
+}
+
+OutputDeviceStatus FakeOutputDevice::GetDeviceStatus() {
+ return device_status_;
+}
+
+} // namespace media
diff --git a/chromium/media/base/fake_output_device.h b/chromium/media/base/fake_output_device.h
new file mode 100644
index 00000000000..121a375d56e
--- /dev/null
+++ b/chromium/media/base/fake_output_device.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_FAKE_OUTPUT_DEVICE_H_
+#define MEDIA_BASE_FAKE_OUTPUT_DEVICE_H_
+
+#include <string>
+
+#include "media/base/output_device.h"
+
+namespace media {
+
+class FakeOutputDevice : public OutputDevice {
+ public:
+ FakeOutputDevice();
+ explicit FakeOutputDevice(OutputDeviceStatus status);
+ ~FakeOutputDevice() override;
+
+ // OutputDevice implementation.
+ void SwitchOutputDevice(const std::string& device_id,
+ const url::Origin& security_origin,
+ const SwitchOutputDeviceCB& callback) override;
+ AudioParameters GetOutputParameters() override;
+ OutputDeviceStatus GetDeviceStatus() override;
+
+ private:
+ OutputDeviceStatus device_status_;
+ DISALLOW_COPY_AND_ASSIGN(FakeOutputDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_FAKE_OUTPUT_DEVICE_H_
diff --git a/chromium/media/base/key_systems.cc b/chromium/media/base/key_systems.cc
index 32fa1c7314f..b2f482d68f8 100644
--- a/chromium/media/base/key_systems.cc
+++ b/chromium/media/base/key_systems.cc
@@ -446,7 +446,7 @@ void KeySystemsImpl::AddConcreteSupportedKeySystems(
EmeFeatureSupport::ALWAYS_ENABLED);
}
- DCHECK(!IsSupportedKeySystem(info.key_system))
+ DCHECK(!IsConcreteSupportedKeySystem(info.key_system))
<< "Key system '" << info.key_system << "' already registered";
DCHECK(!parent_key_system_map_.count(info.key_system))
<< "'" << info.key_system << "' is already registered as a parent";
@@ -660,7 +660,20 @@ void KeySystemsImpl::AddCodecMask(
bool KeySystemsImpl::IsSupportedKeySystem(const std::string& key_system) const {
DCHECK(thread_checker_.CalledOnValidThread());
- return concrete_key_system_map_.count(key_system) != 0;
+
+ if (!IsConcreteSupportedKeySystem(key_system))
+ return false;
+
+ // TODO(ddorwin): Move this to where we add key systems when prefixed EME is
+ // removed (crbug.com/249976).
+ if (!IsPotentiallySupportedKeySystem(key_system)) {
+ // If you encounter this path, see the comments for the above function.
+ DLOG(ERROR) << "Unrecognized key system " << key_system
+ << ". See code comments.";
+ return false;
+ }
+
+ return true;
}
EmeConfigRule KeySystemsImpl::GetContentTypeConfigRule(
@@ -674,12 +687,14 @@ EmeConfigRule KeySystemsImpl::GetContentTypeConfigRule(
SupportedCodecs media_type_codec_mask = EME_CODEC_NONE;
switch (media_type) {
case EmeMediaType::AUDIO:
- if (!base::StartsWithASCII(container_mime_type, "audio/", true))
+ if (!base::StartsWith(container_mime_type, "audio/",
+ base::CompareCase::SENSITIVE))
return EmeConfigRule::NOT_SUPPORTED;
media_type_codec_mask = audio_codec_mask_;
break;
case EmeMediaType::VIDEO:
- if (!base::StartsWithASCII(container_mime_type, "video/", true))
+ if (!base::StartsWith(container_mime_type, "video/",
+ base::CompareCase::SENSITIVE))
return EmeConfigRule::NOT_SUPPORTED;
media_type_codec_mask = video_codec_mask_;
break;
@@ -875,22 +890,6 @@ bool PrefixedIsSupportedConcreteKeySystem(const std::string& key_system) {
key_system);
}
-bool IsSupportedKeySystem(const std::string& key_system) {
- if (!KeySystemsImpl::GetInstance()->IsSupportedKeySystem(key_system))
- return false;
-
- // TODO(ddorwin): Move this to where we add key systems when prefixed EME is
- // removed (crbug.com/249976).
- if (!IsPotentiallySupportedKeySystem(key_system)) {
- // If you encounter this path, see the comments for the above function.
- NOTREACHED() << "Unrecognized key system " << key_system
- << ". See code comments.";
- return false;
- }
-
- return true;
-}
-
bool IsSupportedKeySystemWithInitDataType(const std::string& key_system,
EmeInitDataType init_data_type) {
return KeySystemsImpl::GetInstance()->IsSupportedInitDataType(key_system,
diff --git a/chromium/media/base/key_systems.h b/chromium/media/base/key_systems.h
index ade5d65ca6b..92a4b637fae 100644
--- a/chromium/media/base/key_systems.h
+++ b/chromium/media/base/key_systems.h
@@ -82,11 +82,6 @@ MEDIA_EXPORT std::string GetUnprefixedKeySystemName(
MEDIA_EXPORT std::string GetPrefixedKeySystemName(
const std::string& key_system);
-// Use for unprefixed EME only!
-// Returns whether |key_system| is a supported key system.
-// Note: Shouldn't be used for prefixed API as the original
-MEDIA_EXPORT bool IsSupportedKeySystem(const std::string& key_system);
-
// Use for prefixed EME only!
MEDIA_EXPORT bool IsSupportedKeySystemWithInitDataType(
const std::string& key_system,
diff --git a/chromium/media/base/key_systems_unittest.cc b/chromium/media/base/key_systems_unittest.cc
index b97852d132f..12543946b0f 100644
--- a/chromium/media/base/key_systems_unittest.cc
+++ b/chromium/media/base/key_systems_unittest.cc
@@ -73,6 +73,10 @@ static bool IsSupportedKeySystemWithAudioMimeType(
EmeConfigRule::NOT_SUPPORTED);
}
+static bool IsSupportedKeySystem(const std::string& key_system) {
+ return KeySystems::GetInstance()->IsSupportedKeySystem(key_system);
+}
+
// Adds test container and codec masks.
// This function must be called after SetMediaClient() if a MediaClient will be
// provided.
diff --git a/chromium/media/base/mac/video_frame_mac.cc b/chromium/media/base/mac/video_frame_mac.cc
index 9fd290465c0..409077f1459 100644
--- a/chromium/media/base/mac/video_frame_mac.cc
+++ b/chromium/media/base/mac/video_frame_mac.cc
@@ -43,11 +43,11 @@ WrapVideoFrameInCVPixelBuffer(const VideoFrame& frame) {
// represent I420 and NV12 frames. In addition, VideoFrame does not carry
// colorimetric information, so this function assumes standard video range
// and ITU Rec 709 primaries.
- const VideoFrame::Format video_frame_format = frame.format();
+ const VideoPixelFormat video_frame_format = frame.format();
OSType cv_format;
- if (video_frame_format == VideoFrame::Format::I420) {
+ if (video_frame_format == PIXEL_FORMAT_I420) {
cv_format = kCVPixelFormatType_420YpCbCr8Planar;
- } else if (video_frame_format == VideoFrame::Format::NV12) {
+ } else if (video_frame_format == PIXEL_FORMAT_NV12) {
cv_format = CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
} else {
DLOG(ERROR) << " unsupported frame format: " << video_frame_format;
diff --git a/chromium/media/base/mac/video_frame_mac_unittests.cc b/chromium/media/base/mac/video_frame_mac_unittests.cc
index aa037955485..77a19178ff1 100644
--- a/chromium/media/base/mac/video_frame_mac_unittests.cc
+++ b/chromium/media/base/mac/video_frame_mac_unittests.cc
@@ -22,7 +22,7 @@ const int kHeight = 48;
const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
struct FormatPair {
- VideoFrame::Format chrome;
+ VideoPixelFormat chrome;
OSType corevideo;
};
@@ -34,7 +34,7 @@ void Increment(int* i) {
TEST(VideoFrameMac, CheckBasicAttributes) {
gfx::Size size(kWidth, kHeight);
- auto frame = VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, size, gfx::Rect(size),
size, kTimestamp);
ASSERT_TRUE(frame.get());
@@ -42,7 +42,7 @@ TEST(VideoFrameMac, CheckBasicAttributes) {
ASSERT_TRUE(pb.get());
gfx::Size coded_size = frame->coded_size();
- VideoFrame::Format format = frame->format();
+ VideoPixelFormat format = frame->format();
EXPECT_EQ(coded_size.width(), static_cast<int>(CVPixelBufferGetWidth(pb)));
EXPECT_EQ(coded_size.height(), static_cast<int>(CVPixelBufferGetHeight(pb)));
@@ -63,11 +63,11 @@ TEST(VideoFrameMac, CheckBasicAttributes) {
TEST(VideoFrameMac, CheckFormats) {
// CreateFrame() does not support non planar YUV, e.g. NV12.
const FormatPair format_pairs[] = {
- {VideoFrame::I420, kCVPixelFormatType_420YpCbCr8Planar},
- {VideoFrame::YV12, 0},
- {VideoFrame::YV16, 0},
- {VideoFrame::YV12A, 0},
- {VideoFrame::YV24, 0},
+ {PIXEL_FORMAT_I420, kCVPixelFormatType_420YpCbCr8Planar},
+ {PIXEL_FORMAT_YV12, 0},
+ {PIXEL_FORMAT_YV16, 0},
+ {PIXEL_FORMAT_YV12A, 0},
+ {PIXEL_FORMAT_YV24, 0},
};
gfx::Size size(kWidth, kHeight);
@@ -86,7 +86,7 @@ TEST(VideoFrameMac, CheckFormats) {
TEST(VideoFrameMac, CheckLifetime) {
gfx::Size size(kWidth, kHeight);
- auto frame = VideoFrame::CreateFrame(VideoFrame::I420, size, gfx::Rect(size),
+ auto frame = VideoFrame::CreateFrame(PIXEL_FORMAT_I420, size, gfx::Rect(size),
size, kTimestamp);
ASSERT_TRUE(frame.get());
@@ -108,8 +108,8 @@ TEST(VideoFrameMac, CheckLifetime) {
TEST(VideoFrameMac, CheckWrapperFrame) {
const FormatPair format_pairs[] = {
- {VideoFrame::I420, kCVPixelFormatType_420YpCbCr8Planar},
- {VideoFrame::NV12,
+ {PIXEL_FORMAT_I420, kCVPixelFormatType_420YpCbCr8Planar},
+ {PIXEL_FORMAT_NV12,
CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange},
};
diff --git a/chromium/media/base/media.cc b/chromium/media/base/media.cc
index 388c6e0eb35..c3e1378ce8c 100644
--- a/chromium/media/base/media.cc
+++ b/chromium/media/base/media.cc
@@ -8,6 +8,7 @@
#include "base/lazy_instance.h"
#include "base/path_service.h"
#include "base/synchronization/lock.h"
+#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "media/base/yuv_convert.h"
@@ -23,6 +24,9 @@ class MediaInitializer {
friend struct base::DefaultLazyInstanceTraits<MediaInitializer>;
MediaInitializer() {
+ TRACE_EVENT_WARMUP_CATEGORY("audio");
+ TRACE_EVENT_WARMUP_CATEGORY("media");
+
// Perform initialization of libraries which require runtime CPU detection.
InitializeCPUSpecificYUVConversions();
diff --git a/chromium/media/base/media_log.cc b/chromium/media/base/media_log.cc
index f5f58de73f4..754a5f924ce 100644
--- a/chromium/media/base/media_log.cc
+++ b/chromium/media/base/media_log.cc
@@ -271,20 +271,14 @@ void MediaLog::SetTimeProperty(
AddEvent(event.Pass());
}
-LogHelper::LogHelper(MediaLog::MediaLogLevel level, const LogCB& log_cb)
- : level_(level), log_cb_(log_cb) {
-}
-
LogHelper::LogHelper(MediaLog::MediaLogLevel level,
const scoped_refptr<MediaLog>& media_log)
: level_(level), media_log_(media_log) {
+ DCHECK(media_log_.get());
}
LogHelper::~LogHelper() {
- if (!log_cb_.is_null())
- log_cb_.Run(level_, stream_.str());
- else if (media_log_)
- media_log_->AddLogEvent(level_, stream_.str());
+ media_log_->AddLogEvent(level_, stream_.str());
}
} //namespace media
diff --git a/chromium/media/base/media_log.h b/chromium/media/base/media_log.h
index 5394fff3963..1e4b9464dd7 100644
--- a/chromium/media/base/media_log.h
+++ b/chromium/media/base/media_log.h
@@ -81,15 +81,9 @@ class MEDIA_EXPORT MediaLog : public base::RefCountedThreadSafe<MediaLog> {
DISALLOW_COPY_AND_ASSIGN(MediaLog);
};
-// Indicates a string should be added to the log.
-// First parameter - The log level for the string.
-// Second parameter - The string to add to the log.
-typedef base::Callback<void(MediaLog::MediaLogLevel, const std::string&)> LogCB;
-
-// Helper class to make it easier to use LogCB or MediaLog like DVLOG().
+// Helper class to make it easier to use MediaLog like DVLOG().
class MEDIA_EXPORT LogHelper {
public:
- LogHelper(MediaLog::MediaLogLevel level, const LogCB& log_cb);
LogHelper(MediaLog::MediaLogLevel level,
const scoped_refptr<MediaLog>& media_log);
~LogHelper();
@@ -98,15 +92,14 @@ class MEDIA_EXPORT LogHelper {
private:
MediaLog::MediaLogLevel level_;
- LogCB log_cb_;
const scoped_refptr<MediaLog> media_log_;
std::stringstream stream_;
};
// Provides a stringstream to collect a log entry to pass to the provided
-// logger (LogCB or MediaLog) at the requested level.
-#define MEDIA_LOG(level, logger) \
- LogHelper((MediaLog::MEDIALOG_##level), (logger)).stream()
+// MediaLog at the requested level.
+#define MEDIA_LOG(level, media_log) \
+ LogHelper((MediaLog::MEDIALOG_##level), (media_log)).stream()
// Logs only while |count| < |max|, increments |count| for each log, and warns
// in the log if |count| has just reached |max|.
@@ -121,8 +114,8 @@ class MEDIA_EXPORT LogHelper {
// |count| < |max| and |count|++ is 0.
// TODO(wolenetz,chcunningham): Consider using a helper class instead of a macro
// to improve readability.
-#define LIMITED_MEDIA_LOG(level, logger, count, max) \
- LAZY_STREAM(MEDIA_LOG(level, logger), \
+#define LIMITED_MEDIA_LOG(level, media_log, count, max) \
+ LAZY_STREAM(MEDIA_LOG(level, media_log), \
(count) < (max) && ((count)++ || true)) \
<< (((count) == (max)) ? "(Log limit reached. Further similar entries " \
"may be suppressed): " \
diff --git a/chromium/media/base/media_permission.h b/chromium/media/base/media_permission.h
index 760ba0caa9f..b6c3e1eb826 100644
--- a/chromium/media/base/media_permission.h
+++ b/chromium/media/base/media_permission.h
@@ -20,6 +20,8 @@ class MEDIA_EXPORT MediaPermission {
enum Type {
PROTECTED_MEDIA_IDENTIFIER,
+ AUDIO_CAPTURE,
+ VIDEO_CAPTURE,
};
MediaPermission();
diff --git a/chromium/media/base/media_resources.cc b/chromium/media/base/media_resources.cc
new file mode 100644
index 00000000000..d7259e91477
--- /dev/null
+++ b/chromium/media/base/media_resources.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/media_resources.h"
+
+#include "base/logging.h"
+#include "base/strings/utf_string_conversions.h"
+
+namespace media {
+
+static LocalizedStringProvider g_localized_string_provider = nullptr;
+
+void SetLocalizedStringProvider(LocalizedStringProvider func) {
+ g_localized_string_provider = func;
+}
+
+#if !defined(OS_IOS)
+std::string GetLocalizedStringUTF8(MessageId message_id) {
+ return base::UTF16ToUTF8(GetLocalizedStringUTF16(message_id));
+}
+
+base::string16 GetLocalizedStringUTF16(MessageId message_id) {
+ return g_localized_string_provider ? g_localized_string_provider(message_id)
+ : base::string16();
+}
+#endif
+
+} // namespace media
diff --git a/chromium/media/base/media_resources.h b/chromium/media/base/media_resources.h
new file mode 100644
index 00000000000..52053f79433
--- /dev/null
+++ b/chromium/media/base/media_resources.h
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MEDIA_RESOURCES_H_
+#define MEDIA_BASE_MEDIA_RESOURCES_H_
+
+#include <string>
+
+#include "base/strings/string16.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// The media layer can't access Chrome's resource bundle directly. This facility
+// allows clients to provide indirect access.
+
+// IDs that will get mapped to corresponding entries with IDS_ prefixes in
+// chrome/app/generated_resources.grd.
+enum MessageId {
+ DEFAULT_AUDIO_DEVICE_NAME,
+#if defined(OS_WIN)
+ COMMUNICATIONS_AUDIO_DEVICE_NAME,
+#endif
+#if defined(OS_CHROMEOS)
+ BEAMFORMING_ON_DEFAULT_AUDIO_INPUT_DEVICE_NAME,
+ BEAMFORMING_OFF_DEFAULT_AUDIO_INPUT_DEVICE_NAME,
+#endif
+};
+
+// Implementations are expected to convert MessageIds to generated_resources.grd
+// IDs and extract the matching string from Chrome's resource bundle (e.g.
+// through l10n_util::GetStringUTF16).
+using LocalizedStringProvider = base::string16 (*)(MessageId message_id);
+
+// Initializes the global LocalizedStringProvider function.
+MEDIA_EXPORT void SetLocalizedStringProvider(LocalizedStringProvider func);
+
+#if !defined(OS_IOS)
+// The LocalizedStringProvider has probably not been initialized on iOS. This
+// will give an early compile warning for clients attempting to use it.
+
+// Returns a resource string corresponding to |message_id|. See l10n_util.h.
+// Returns an empty string if the LocalizedStringProvider has not been
+// initialized or if the ID is unrecognized.
+std::string GetLocalizedStringUTF8(MessageId message_id);
+base::string16 GetLocalizedStringUTF16(MessageId message_id);
+#endif
+
+} // namespace media
+
+#endif // MEDIA_BASE_MEDIA_RESOURCES_H_
diff --git a/chromium/media/base/media_switches.cc b/chromium/media/base/media_switches.cc
index 0b0154628df..0da3eeae0f6 100644
--- a/chromium/media/base/media_switches.cc
+++ b/chromium/media/base/media_switches.cc
@@ -9,9 +9,6 @@ namespace switches {
// Allow users to specify a custom buffer size for debugging purpose.
const char kAudioBufferSize[] = "audio-buffer-size";
-// Disables the new vsync driven video renderering path.
-const char kDisableNewVideoRenderer[] = "disable-new-video-renderer";
-
// Set number of threads to use for video decoding.
const char kVideoThreads[] = "video-threads";
@@ -96,7 +93,7 @@ const char kEnableAudioHangMonitor[] = "enable-audio-hang-monitor";
const char kUseFakeDeviceForMediaStream[] = "use-fake-device-for-media-stream";
// Use an .y4m file to play as the webcam. See the comments in
-// media/video/capture/file_video_capture_device.h for more details.
+// media/capture/video/file_video_capture_device.h for more details.
const char kUseFileForFakeVideoCapture[] = "use-file-for-fake-video-capture";
// Play a .wav file as the microphone. Note that for WebRTC calls we'll treat
@@ -119,4 +116,8 @@ const char kRequireAudioHardwareForTesting[] =
// TODO(dalecurtis): Remove once experiments for http://crbug.com/470940 finish.
const char kVideoUnderflowThresholdMs[] = "video-underflow-threshold-ms";
+// Use the new rendering algorithm for webrtc, which is designed to improve
+// smoothness.
+const char kEnableRTCSmoothnessAlgorithm[] = "enable-rtc-smoothness-algorithm";
+
} // namespace switches
diff --git a/chromium/media/base/media_switches.h b/chromium/media/base/media_switches.h
index c8c8488d95d..3787137f672 100644
--- a/chromium/media/base/media_switches.h
+++ b/chromium/media/base/media_switches.h
@@ -14,8 +14,6 @@ namespace switches {
MEDIA_EXPORT extern const char kAudioBufferSize[];
-MEDIA_EXPORT extern const char kDisableNewVideoRenderer[];
-
MEDIA_EXPORT extern const char kVideoThreads[];
#if defined(OS_ANDROID)
@@ -59,6 +57,8 @@ MEDIA_EXPORT extern const char kRequireAudioHardwareForTesting[];
MEDIA_EXPORT extern const char kVideoUnderflowThresholdMs[];
+MEDIA_EXPORT extern const char kEnableRTCSmoothnessAlgorithm[];
+
} // namespace switches
#endif // MEDIA_BASE_MEDIA_SWITCHES_H_
diff --git a/chromium/media/base/mime_util.cc b/chromium/media/base/mime_util.cc
index 535837c1474..367953946d0 100644
--- a/chromium/media/base/mime_util.cc
+++ b/chromium/media/base/mime_util.cc
@@ -36,6 +36,7 @@ class MimeUtil {
H264_BASELINE,
H264_MAIN,
H264_HIGH,
+ HEVC_MAIN,
VP8,
VP9,
THEORA
@@ -173,6 +174,8 @@ static const char* const proprietary_media_types[] = {
"audio/mp3",
"audio/x-mp3",
"audio/mpeg",
+
+ // AAC / ADTS
"audio/aac",
#if defined(ENABLE_MPEG2TS_STREAM_PARSER)
@@ -199,6 +202,15 @@ static bool IsCodecSupportedOnAndroid(MimeUtil::Codec codec) {
case MimeUtil::VORBIS:
return true;
+ case MimeUtil::HEVC_MAIN:
+#if defined(ENABLE_HEVC_DEMUXING)
+ // HEVC/H.265 is supported in Lollipop+ (API Level 21), according to
+ // http://developer.android.com/reference/android/media/MediaFormat.html
+ return base::android::BuildInfo::GetInstance()->sdk_int() >= 21;
+#else
+ return false;
+#endif
+
case MimeUtil::MPEG2_AAC_LC:
case MimeUtil::MPEG2_AAC_MAIN:
case MimeUtil::MPEG2_AAC_SSR:
@@ -253,6 +265,11 @@ static const char kMP4VideoCodecsExpression[] =
// kUnambiguousCodecStringMap/kAmbiguousCodecStringMap should be the only
// mapping from strings to codecs. See crbug.com/461009.
"avc1.42E00A,avc1.4D400A,avc1.64000A,"
+#if defined(ENABLE_HEVC_DEMUXING)
+ // Any valid unambiguous HEVC codec id will work here, since these strings
+ // are parsed and mapped to MimeUtil::Codec enum values.
+ "hev1.1.6.L93.B0,"
+#endif
"mp4a.66,mp4a.67,mp4a.68,mp4a.69,mp4a.6B,mp4a.40.2,mp4a.40.02,mp4a.40.5,"
"mp4a.40.05,mp4a.40.29";
@@ -276,6 +293,7 @@ static const MediaFormatStrict format_codec_mappings[] = {
{"audio/mpeg", "mp3"},
{"audio/mp3", ""},
{"audio/x-mp3", ""},
+ {"audio/aac", ""},
{"audio/mp4", kMP4AudioCodecsExpression},
{"audio/x-m4a", kMP4AudioCodecsExpression},
{"video/mp4", kMP4VideoCodecsExpression},
@@ -396,8 +414,7 @@ void MimeUtil::InitializeMimeTypeMaps() {
}
bool MimeUtil::IsSupportedMediaMimeType(const std::string& mime_type) const {
- return media_map_.find(base::StringToLowerASCII(mime_type)) !=
- media_map_.end();
+ return media_map_.find(base::ToLowerASCII(mime_type)) != media_map_.end();
}
@@ -417,9 +434,13 @@ bool MimeUtil::AreSupportedMediaCodecs(
void MimeUtil::ParseCodecString(const std::string& codecs,
std::vector<std::string>* codecs_out,
bool strip) {
- std::string no_quote_codecs;
- base::TrimString(codecs, "\"", &no_quote_codecs);
- base::SplitString(no_quote_codecs, ',', codecs_out);
+ *codecs_out = base::SplitString(
+ base::TrimString(codecs, "\"", base::TRIM_ALL),
+ ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+
+ // Convert empty or all-whitespace input to 0 results.
+ if (codecs_out->size() == 1 && (*codecs_out)[0].empty())
+ codecs_out->clear();
if (!strip)
return;
@@ -435,14 +456,14 @@ void MimeUtil::ParseCodecString(const std::string& codecs,
}
bool MimeUtil::IsStrictMediaMimeType(const std::string& mime_type) const {
- return strict_format_map_.find(base::StringToLowerASCII(mime_type)) !=
+ return strict_format_map_.find(base::ToLowerASCII(mime_type)) !=
strict_format_map_.end();
}
SupportsType MimeUtil::IsSupportedStrictMediaMimeType(
const std::string& mime_type,
const std::vector<std::string>& codecs) const {
- const std::string mime_type_lower_case = base::StringToLowerASCII(mime_type);
+ const std::string mime_type_lower_case = base::ToLowerASCII(mime_type);
StrictMappings::const_iterator it_strict_map =
strict_format_map_.find(mime_type_lower_case);
if (it_strict_map == strict_format_map_.end())
@@ -477,28 +498,6 @@ void MimeUtil::RemoveProprietaryMediaTypesAndCodecsForTests() {
allow_proprietary_codecs_ = false;
}
-// Returns true iff |profile_str| conforms to hex string "42y0".
-//
-// |profile_str| is the first four characters of the H.264 suffix string. From
-// ISO-14496-10 7.3.2.1, it consists of:
-// 8 bits: profile_idc; required to be 0x42 here.
-// 1 bit: constraint_set0_flag; ignored here.
-// 1 bit: constraint_set1_flag; ignored here.
-// 1 bit: constraint_set2_flag; ignored here.
-// 1 bit: constraint_set3_flag; ignored here.
-// 4 bits: reserved; required to be 0 here.
-//
-// The spec indicates other ways, not implemented here, that a |profile_str|
-// can indicate a baseline conforming decoder is sufficient for decode in Annex
-// A.2.1: "[profile_idc not necessarily 0x42] with constraint_set0_flag set and
-// in which level_idc and constraint_set3_flag represent a level less than or
-// equal to the specified level."
-static bool IsValidH264BaselineProfile(const std::string& profile_str) {
- return (profile_str.size() == 4 && profile_str[0] == '4' &&
- profile_str[1] == '2' && base::IsHexDigit(profile_str[2]) &&
- profile_str[3] == '0');
-}
-
static bool IsValidH264Level(const std::string& level_str) {
uint32 level;
if (level_str.size() != 2 || !base::HexStringToUInt(level_str, &level))
@@ -514,9 +513,9 @@ static bool IsValidH264Level(const std::string& level_str) {
}
// Handle parsing H.264 codec IDs as outlined in RFC 6381 and ISO-14496-10.
-// avc1.42y0xx, y >= 8 - H.264 Baseline
-// avc1.4D40xx - H.264 Main
-// avc1.6400xx - H.264 High
+// avc1.42x0yy - H.264 Baseline
+// avc1.4Dx0yy - H.264 Main
+// avc1.64x0yy - H.264 High
//
// avc1.xxxxxx & avc3.xxxxxx are considered ambiguous forms that are trying to
// signal H.264 Baseline. For example, the idc_level, profile_idc and
@@ -526,19 +525,32 @@ static bool IsValidH264Level(const std::string& level_str) {
static bool ParseH264CodecID(const std::string& codec_id,
MimeUtil::Codec* codec,
bool* is_ambiguous) {
- // Make sure we have avc1.xxxxxx or avc3.xxxxxx
+ // Make sure we have avc1.xxxxxx or avc3.xxxxxx , where xxxxxx are hex digits
+ if (!base::StartsWith(codec_id, "avc1.", base::CompareCase::SENSITIVE) &&
+ !base::StartsWith(codec_id, "avc3.", base::CompareCase::SENSITIVE)) {
+ return false;
+ }
if (codec_id.size() != 11 ||
- (!base::StartsWithASCII(codec_id, "avc1.", true) &&
- !base::StartsWithASCII(codec_id, "avc3.", true))) {
+ !base::IsHexDigit(codec_id[5]) || !base::IsHexDigit(codec_id[6]) ||
+ !base::IsHexDigit(codec_id[7]) || !base::IsHexDigit(codec_id[8]) ||
+ !base::IsHexDigit(codec_id[9]) || !base::IsHexDigit(codec_id[10])) {
return false;
}
- std::string profile = base::StringToUpperASCII(codec_id.substr(5, 4));
- if (IsValidH264BaselineProfile(profile)) {
+ // Validate constraint flags and reserved bits.
+ if (!base::IsHexDigit(codec_id[7]) || codec_id[8] != '0') {
+ *codec = MimeUtil::H264_BASELINE;
+ *is_ambiguous = true;
+ return true;
+ }
+
+ // Extract the profile.
+ std::string profile = base::ToUpperASCII(codec_id.substr(5, 2));
+ if (profile == "42") {
*codec = MimeUtil::H264_BASELINE;
- } else if (profile == "4D40") {
+ } else if (profile == "4D") {
*codec = MimeUtil::H264_MAIN;
- } else if (profile == "6400") {
+ } else if (profile == "64") {
*codec = MimeUtil::H264_HIGH;
} else {
*codec = MimeUtil::H264_BASELINE;
@@ -546,11 +558,47 @@ static bool ParseH264CodecID(const std::string& codec_id,
return true;
}
- *is_ambiguous =
- !IsValidH264Level(base::StringToUpperASCII(codec_id.substr(9)));
+ // Validate level.
+ *is_ambiguous = !IsValidH264Level(base::ToUpperASCII(codec_id.substr(9)));
return true;
}
+#if defined(ENABLE_HEVC_DEMUXING)
+// ISO/IEC FDIS 14496-15 standard section E.3 describes the syntax of codec ids
+// reserved for HEVC. According to that spec HEVC codec id must start with
+// either "hev1." or "hvc1.". We don't yet support full parsing of HEVC codec
+// ids, but since no other codec id starts with those string we'll just treat
+// any string starting with "hev1." or "hvc1." as valid HEVC codec ids.
+// crbug.com/482761
+static bool ParseHEVCCodecID(const std::string& codec_id,
+ MimeUtil::Codec* codec,
+ bool* is_ambiguous) {
+ if (base::StartsWith(codec_id, "hev1.", base::CompareCase::SENSITIVE) ||
+ base::StartsWith(codec_id, "hvc1.", base::CompareCase::SENSITIVE)) {
+ *codec = MimeUtil::HEVC_MAIN;
+
+ // TODO(servolk): Full HEVC codec id parsing is not implemented yet (see
+ // crbug.com/482761). So treat HEVC codec ids as ambiguous for now.
+ *is_ambiguous = true;
+
+ // TODO(servolk): Most HEVC codec ids are treated as ambiguous (see above),
+ // but we need to recognize at least one valid unambiguous HEVC codec id,
+ // which is added into kMP4VideoCodecsExpression. We need it to be
+ // unambiguous to avoid DCHECK(!is_ambiguous) in InitializeMimeTypeMaps. We
+ // also use these in unit tests (see
+ // content/browser/media/media_canplaytype_browsertest.cc).
+ // Remove this workaround after crbug.com/482761 is fixed.
+ if (codec_id == "hev1.1.6.L93.B0" || codec_id == "hvc1.1.6.L93.B0") {
+ *is_ambiguous = false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+#endif
+
bool MimeUtil::StringToCodec(const std::string& codec_id,
Codec* codec,
bool* is_ambiguous) const {
@@ -563,8 +611,13 @@ bool MimeUtil::StringToCodec(const std::string& codec_id,
}
// If |codec_id| is not in |string_to_codec_map_|, then we assume that it is
- // an H.264 codec ID because currently those are the only ones that can't be
- // stored in the |string_to_codec_map_| and require parsing.
+ // either H.264 or HEVC/H.265 codec ID because currently those are the only
+ // ones that are not added to the |string_to_codec_map_| and require parsing.
+#if defined(ENABLE_HEVC_DEMUXING)
+ if (ParseHEVCCodecID(codec_id, codec, is_ambiguous)) {
+ return true;
+ }
+#endif
return ParseH264CodecID(codec_id, codec, is_ambiguous);
}
@@ -592,6 +645,7 @@ bool MimeUtil::IsCodecProprietary(Codec codec) const {
case H264_BASELINE:
case H264_MAIN:
case H264_HIGH:
+ case HEVC_MAIN:
return true;
case PCM:
@@ -615,6 +669,11 @@ bool MimeUtil::GetDefaultCodecLowerCase(const std::string& mime_type_lower_case,
return true;
}
+ if (mime_type_lower_case == "audio/aac") {
+ *default_codec = MimeUtil::MPEG4_AAC_LC;
+ return true;
+ }
+
return false;
}
diff --git a/chromium/media/base/mock_audio_renderer_sink.cc b/chromium/media/base/mock_audio_renderer_sink.cc
index b21eb19ba13..79653b2f823 100644
--- a/chromium/media/base/mock_audio_renderer_sink.cc
+++ b/chromium/media/base/mock_audio_renderer_sink.cc
@@ -3,10 +3,15 @@
// found in the LICENSE file.
#include "media/base/mock_audio_renderer_sink.h"
+#include "media/base/fake_output_device.h"
namespace media {
+MockAudioRendererSink::MockAudioRendererSink()
+ : MockAudioRendererSink(OUTPUT_DEVICE_STATUS_OK) {}
+
+MockAudioRendererSink::MockAudioRendererSink(OutputDeviceStatus device_status)
+ : output_device_(new FakeOutputDevice(device_status)) {}
-MockAudioRendererSink::MockAudioRendererSink() {}
MockAudioRendererSink::~MockAudioRendererSink() {}
void MockAudioRendererSink::Initialize(const AudioParameters& params,
@@ -14,4 +19,8 @@ void MockAudioRendererSink::Initialize(const AudioParameters& params,
callback_ = renderer;
}
+OutputDevice* MockAudioRendererSink::GetOutputDevice() {
+ return output_device_.get();
+}
+
} // namespace media
diff --git a/chromium/media/base/mock_audio_renderer_sink.h b/chromium/media/base/mock_audio_renderer_sink.h
index b813257601a..c44ac401c74 100644
--- a/chromium/media/base/mock_audio_renderer_sink.h
+++ b/chromium/media/base/mock_audio_renderer_sink.h
@@ -13,21 +13,19 @@
namespace media {
+class FakeOutputDevice;
+
class MockAudioRendererSink : public AudioRendererSink {
public:
MockAudioRendererSink();
+ explicit MockAudioRendererSink(OutputDeviceStatus device_status);
MOCK_METHOD0(Start, void());
MOCK_METHOD0(Stop, void());
MOCK_METHOD0(Pause, void());
MOCK_METHOD0(Play, void());
MOCK_METHOD1(SetVolume, bool(double volume));
- MOCK_METHOD0(SwitchOutputDevice, void());
- void SwitchOutputDevice(const std::string&,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override {
- SwitchOutputDevice();
- }
+ OutputDevice* GetOutputDevice();
void Initialize(const AudioParameters& params,
RenderCallback* renderer) override;
@@ -38,6 +36,7 @@ class MockAudioRendererSink : public AudioRendererSink {
private:
RenderCallback* callback_;
+ scoped_ptr<FakeOutputDevice> output_device_;
DISALLOW_COPY_AND_ASSIGN(MockAudioRendererSink);
};
diff --git a/chromium/media/base/mock_media_log.cc b/chromium/media/base/mock_media_log.cc
new file mode 100644
index 00000000000..66bc8e20d3c
--- /dev/null
+++ b/chromium/media/base/mock_media_log.cc
@@ -0,0 +1,13 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/mock_media_log.h"
+
+namespace media {
+
+MockMediaLog::MockMediaLog() {}
+
+MockMediaLog::~MockMediaLog() {}
+
+} // namespace media
diff --git a/chromium/media/base/mock_media_log.h b/chromium/media/base/mock_media_log.h
new file mode 100644
index 00000000000..421c2d20481
--- /dev/null
+++ b/chromium/media/base/mock_media_log.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_MOCK_MEDIA_LOG_H_
+#define MEDIA_BASE_MOCK_MEDIA_LOG_H_
+
+#include <string>
+
+#include "media/base/media_log.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+// Helper macros to reduce boilerplate when verifying media log entries.
+// |outer| is the std::string searched for substring |sub|.
+#define CONTAINS_STRING(outer, sub) (std::string::npos != (outer).find(sub))
+
+// "media_log_" is expected to be a scoped_refptr<MockMediaLog>, optionally a
+// StrictMock, in scope of the usage of this macro.
+#define EXPECT_MEDIA_LOG(x) EXPECT_CALL(*media_log_, DoAddEventLogString((x)))
+
+namespace media {
+
+class MockMediaLog : public MediaLog {
+ public:
+ MockMediaLog();
+
+ MOCK_METHOD1(DoAddEventLogString, void(const std::string& event));
+
+ // Trampoline method to workaround GMOCK problems with scoped_ptr<>.
+ // Also simplifies tests to be able to string match on the log string
+ // representation on the added event.
+ void AddEvent(scoped_ptr<MediaLogEvent> event) override {
+ DoAddEventLogString(MediaEventToLogString(*event));
+ }
+
+ protected:
+ virtual ~MockMediaLog();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockMediaLog);
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_MOCK_MEDIA_LOG_H_
diff --git a/chromium/media/base/null_video_sink_unittest.cc b/chromium/media/base/null_video_sink_unittest.cc
index 0279695a214..5d6e150d3d9 100644
--- a/chromium/media/base/null_video_sink_unittest.cc
+++ b/chromium/media/base/null_video_sink_unittest.cc
@@ -42,7 +42,7 @@ class NullVideoSinkTest : public testing::Test,
scoped_refptr<VideoFrame> CreateFrame(base::TimeDelta timestamp) {
const gfx::Size natural_size(8, 8);
- return VideoFrame::CreateFrame(VideoFrame::YV12, natural_size,
+ return VideoFrame::CreateFrame(PIXEL_FORMAT_YV12, natural_size,
gfx::Rect(natural_size), natural_size,
timestamp);
}
diff --git a/chromium/media/base/output_device.h b/chromium/media/base/output_device.h
new file mode 100644
index 00000000000..76f88c78ed9
--- /dev/null
+++ b/chromium/media/base/output_device.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_OUTPUT_DEVICE_H_
+#define MEDIA_BASE_OUTPUT_DEVICE_H_
+
+#include <string>
+
+#include "base/callback.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/media_export.h"
+#include "url/origin.h"
+
+namespace media {
+
+// Result of an audio output device switch operation
+enum OutputDeviceStatus {
+ OUTPUT_DEVICE_STATUS_OK = 0,
+ OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND,
+ OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED,
+ OUTPUT_DEVICE_STATUS_ERROR_INTERNAL,
+ OUTPUT_DEVICE_STATUS_LAST = OUTPUT_DEVICE_STATUS_ERROR_INTERNAL,
+};
+
+typedef base::Callback<void(OutputDeviceStatus)> SwitchOutputDeviceCB;
+
+// OutputDevice is an interface that allows performing operations related
+// audio output devices.
+
+class OutputDevice {
+ public:
+ // Attempts to switch the audio output device.
+ // Once the attempt is finished, |callback| is invoked with the
+ // result of the operation passed as a parameter. The result is a value from
+ // the media::SwitchOutputDeviceResult enum.
+ // There is no guarantee about the thread where |callback| will
+ // be invoked, so users are advised to use media::BindToCurrentLoop() to
+ // ensure that |callback| runs on the correct thread.
+ // Note also that copy constructors and destructors for arguments bound to
+ // |callback| may run on arbitrary threads as |callback| is moved across
+ // threads. It is advisable to bind arguments such that they are released by
+ // |callback| when it runs in order to avoid surprises.
+ virtual void SwitchOutputDevice(const std::string& device_id,
+ const url::Origin& security_origin,
+ const SwitchOutputDeviceCB& callback) = 0;
+
+ // Returns the device's audio output parameters.
+ // The return value is undefined if the device status (as returned by
+ // GetDeviceStatus()) is different from OUTPUT_DEVICE_STATUS_OK.
+ // If the parameters are not available, this method may block until they
+ // become available.
+ // This method must never be called on the IO thread.
+ virtual AudioParameters GetOutputParameters() = 0;
+
+ // Returns the status of output device.
+ // If the status is not available, this method may block until it becomes
+ // available. Must never be called on the IO thread.
+ virtual OutputDeviceStatus GetDeviceStatus() = 0;
+
+ protected:
+ virtual ~OutputDevice() {}
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_OUTPUT_DEVICE_H_
diff --git a/chromium/media/base/renderer_factory.h b/chromium/media/base/renderer_factory.h
index aea3a53496e..4b35f01764b 100644
--- a/chromium/media/base/renderer_factory.h
+++ b/chromium/media/base/renderer_factory.h
@@ -13,6 +13,7 @@
namespace base {
class SingleThreadTaskRunner;
+class TaskRunner;
}
namespace media {
@@ -33,6 +34,7 @@ class MEDIA_EXPORT RendererFactory {
// |video_renderer_sink| to render video.
virtual scoped_ptr<Renderer> CreateRenderer(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
AudioRendererSink* audio_renderer_sink,
VideoRendererSink* video_renderer_sink) = 0;
diff --git a/chromium/media/base/run_all_unittests.cc b/chromium/media/base/run_all_unittests.cc
index 7c0c14aa2e6..97e799b0317 100644
--- a/chromium/media/base/run_all_unittests.cc
+++ b/chromium/media/base/run_all_unittests.cc
@@ -5,8 +5,10 @@
#include "base/bind.h"
#include "base/command_line.h"
#include "base/test/launcher/unit_test_launcher.h"
+#include "base/test/test_discardable_memory_allocator.h"
#include "base/test/test_suite.h"
#include "build/build_config.h"
+#include "media/base/fake_media_resources.h"
#include "media/base/media.h"
#include "media/base/media_switches.h"
@@ -23,6 +25,9 @@ class TestSuiteNoAtExit : public base::TestSuite {
protected:
void Initialize() override;
+
+ private:
+ base::TestDiscardableMemoryAllocator discardable_memory_allocator_;
};
void TestSuiteNoAtExit::Initialize() {
@@ -43,6 +48,9 @@ void TestSuiteNoAtExit::Initialize() {
// Run this here instead of main() to ensure an AtExitManager is already
// present.
media::InitializeMediaLibrary();
+ media::SetUpFakeMediaResources();
+
+ base::DiscardableMemoryAllocator::SetInstance(&discardable_memory_allocator_);
}
int main(int argc, char** argv) {
diff --git a/chromium/media/base/seekable_buffer.cc b/chromium/media/base/seekable_buffer.cc
index 019ae4651ca..60edb59101c 100644
--- a/chromium/media/base/seekable_buffer.cc
+++ b/chromium/media/base/seekable_buffer.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "media/base/data_buffer.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/seekable_buffer.h b/chromium/media/base/seekable_buffer.h
index 41d26fea185..889c669406e 100644
--- a/chromium/media/base/seekable_buffer.h
+++ b/chromium/media/base/seekable_buffer.h
@@ -37,7 +37,8 @@
#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
-#include "media/base/buffers.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
namespace media {
diff --git a/chromium/media/base/seekable_buffer_unittest.cc b/chromium/media/base/seekable_buffer_unittest.cc
index c2844254136..cd79e54de61 100644
--- a/chromium/media/base/seekable_buffer_unittest.cc
+++ b/chromium/media/base/seekable_buffer_unittest.cc
@@ -9,6 +9,7 @@
#include "base/time/time.h"
#include "media/base/data_buffer.h"
#include "media/base/seekable_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
diff --git a/chromium/media/base/stream_parser.cc b/chromium/media/base/stream_parser.cc
index 2412b8219fc..f63644a7755 100644
--- a/chromium/media/base/stream_parser.cc
+++ b/chromium/media/base/stream_parser.cc
@@ -4,7 +4,6 @@
#include "media/base/stream_parser.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
namespace media {
diff --git a/chromium/media/base/stream_parser.h b/chromium/media/base/stream_parser.h
index c777a16c9b0..b072e738549 100644
--- a/chromium/media/base/stream_parser.h
+++ b/chromium/media/base/stream_parser.h
@@ -125,7 +125,7 @@ class MEDIA_EXPORT StreamParser {
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) = 0;
+ const scoped_refptr<MediaLog>& media_log) = 0;
// Called when a seek occurs. This flushes the current parser state
// and puts the parser in a state where it can receive data for the new seek
diff --git a/chromium/media/base/stream_parser_buffer.cc b/chromium/media/base/stream_parser_buffer.cc
index 49d0885da15..a9bb77517a2 100644
--- a/chromium/media/base/stream_parser_buffer.cc
+++ b/chromium/media/base/stream_parser_buffer.cc
@@ -5,7 +5,7 @@
#include "media/base/stream_parser_buffer.h"
#include "base/logging.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -116,6 +116,24 @@ int StreamParserBuffer::GetSpliceBufferConfigId(size_t index) const {
: GetConfigId();
}
+const char* StreamParserBuffer::GetTypeName() const {
+ switch (type()) {
+ case DemuxerStream::AUDIO:
+ return "audio";
+ case DemuxerStream::VIDEO:
+ return "video";
+ case DemuxerStream::TEXT:
+ return "text";
+ case DemuxerStream::UNKNOWN:
+ return "unknown";
+ case DemuxerStream::NUM_TYPES:
+ // Fall-through to NOTREACHED().
+ break;
+ }
+ NOTREACHED();
+ return "";
+}
+
void StreamParserBuffer::ConvertToSpliceBuffer(
const BufferQueue& pre_splice_buffers) {
DCHECK(splice_buffers_.empty());
diff --git a/chromium/media/base/stream_parser_buffer.h b/chromium/media/base/stream_parser_buffer.h
index c614828537a..38460f43ab6 100644
--- a/chromium/media/base/stream_parser_buffer.h
+++ b/chromium/media/base/stream_parser_buffer.h
@@ -11,6 +11,7 @@
#include "media/base/demuxer_stream.h"
#include "media/base/media_export.h"
#include "media/base/stream_parser.h"
+#include "media/base/timestamp_constants.h"
namespace media {
@@ -134,6 +135,7 @@ class MEDIA_EXPORT StreamParserBuffer : public DecoderBuffer {
// Gets the parser's media type associated with this buffer. Value is
// meaningless for EOS buffers.
Type type() const { return type_; }
+ const char* GetTypeName() const;
// Gets the parser's track ID associated with this buffer. Value is
// meaningless for EOS buffers.
diff --git a/chromium/media/base/test_helpers.cc b/chromium/media/base/test_helpers.cc
index f4ba9915007..5da16adb1a7 100644
--- a/chromium/media/base/test_helpers.cc
+++ b/chromium/media/base/test_helpers.cc
@@ -126,8 +126,9 @@ static VideoDecoderConfig GetTestConfig(VideoCodec codec,
gfx::Size natural_size = coded_size;
return VideoDecoderConfig(codec, VIDEO_CODEC_PROFILE_UNKNOWN,
- VideoFrame::YV12, coded_size, visible_rect, natural_size,
- NULL, 0, is_encrypted);
+ PIXEL_FORMAT_YV12, COLOR_SPACE_UNSPECIFIED,
+ coded_size, visible_rect, natural_size, NULL, 0,
+ is_encrypted);
}
static const gfx::Size kNormalSize(320, 240);
@@ -273,10 +274,4 @@ void CallbackPairChecker::RecordBCalled() {
expecting_b_ = false;
}
-void AddLogEntryForTest(MediaLog::MediaLogLevel level,
- const std::string& message) {
- DVLOG(1) << "Media log (" << MediaLog::MediaLogLevelToString(level)
- << "): " << message;
-}
-
} // namespace media
diff --git a/chromium/media/base/test_helpers.h b/chromium/media/base/test_helpers.h
index 05ba898994d..f9df1bded6f 100644
--- a/chromium/media/base/test_helpers.h
+++ b/chromium/media/base/test_helpers.h
@@ -144,11 +144,6 @@ class CallbackPairChecker {
bool expecting_b_;
};
-// Test implementation of a media log LogCB that sends media log messages to
-// DVLOG(1).
-void AddLogEntryForTest(MediaLog::MediaLogLevel level,
- const std::string& message);
-
} // namespace media
#endif // MEDIA_BASE_TEST_HELPERS_H_
diff --git a/chromium/media/base/time_delta_interpolator.cc b/chromium/media/base/time_delta_interpolator.cc
index acff37e72eb..b65bdbc5fed 100644
--- a/chromium/media/base/time_delta_interpolator.cc
+++ b/chromium/media/base/time_delta_interpolator.cc
@@ -8,7 +8,7 @@
#include "base/logging.h"
#include "base/time/tick_clock.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/base/timestamp_constants.h b/chromium/media/base/timestamp_constants.h
new file mode 100644
index 00000000000..dfe9b993e52
--- /dev/null
+++ b/chromium/media/base/timestamp_constants.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_TIMESTAMP_CONSTANTS_H_
+#define MEDIA_BASE_TIMESTAMP_CONSTANTS_H_
+
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Indicates an invalid or missing timestamp.
+MEDIA_EXPORT inline base::TimeDelta kNoTimestamp() {
+ return base::TimeDelta::FromMicroseconds(kint64min);
+}
+
+// Represents an infinite stream duration.
+MEDIA_EXPORT inline base::TimeDelta kInfiniteDuration() {
+ return base::TimeDelta::Max();
+}
+
+} // namespace media
+
+#endif // MEDIA_BASE_TIMESTAMP_CONSTANTS_H_
diff --git a/chromium/media/base/video_capture_types.cc b/chromium/media/base/video_capture_types.cc
index 1796bf2f64c..0bc6ae63951 100644
--- a/chromium/media/base/video_capture_types.cc
+++ b/chromium/media/base/video_capture_types.cc
@@ -7,9 +7,24 @@
#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "media/base/limits.h"
+#include "media/base/video_frame.h"
namespace media {
+// This list is ordered by precedence of use.
+static VideoPixelFormat const kSupportedCapturePixelFormats[] = {
+ PIXEL_FORMAT_I420,
+ PIXEL_FORMAT_YV12,
+ PIXEL_FORMAT_NV12,
+ PIXEL_FORMAT_NV21,
+ PIXEL_FORMAT_UYVY,
+ PIXEL_FORMAT_YUY2,
+ PIXEL_FORMAT_RGB24,
+ PIXEL_FORMAT_RGB32,
+ PIXEL_FORMAT_ARGB,
+ PIXEL_FORMAT_MJPEG,
+};
+
VideoCaptureFormat::VideoCaptureFormat()
: frame_rate(0.0f),
pixel_format(PIXEL_FORMAT_UNKNOWN),
@@ -47,72 +62,16 @@ bool VideoCaptureFormat::IsValid() const {
}
size_t VideoCaptureFormat::ImageAllocationSize() const {
- size_t result_frame_size = frame_size.GetArea();
- switch (pixel_format) {
- case PIXEL_FORMAT_I420:
- case PIXEL_FORMAT_YV12:
- case PIXEL_FORMAT_NV12:
- case PIXEL_FORMAT_NV21:
- result_frame_size = result_frame_size * 3 / 2;
- break;
- case PIXEL_FORMAT_UYVY:
- case PIXEL_FORMAT_YUY2:
- result_frame_size *= 2;
- break;
- case PIXEL_FORMAT_RGB24:
- result_frame_size *= 3;
- break;
- case PIXEL_FORMAT_RGB32:
- case PIXEL_FORMAT_ARGB:
- result_frame_size *= 4;
- break;
- case PIXEL_FORMAT_MJPEG:
- result_frame_size = 0;
- break;
- default: // Sizes for the rest of the formats are unknown.
- NOTREACHED() << "Unknown pixel format provided.";
- break;
- }
- return result_frame_size;
+ return VideoFrame::AllocationSize(pixel_format, frame_size);
}
//static
std::string VideoCaptureFormat::ToString(const VideoCaptureFormat& format) {
- return base::StringPrintf("(%s)@%.3ffps, pixel format: %s storage: %s.",
- format.frame_size.ToString().c_str(),
- format.frame_rate,
- PixelFormatToString(format.pixel_format).c_str(),
- PixelStorageToString(format.pixel_storage).c_str());
-}
-
-// static
-std::string VideoCaptureFormat::PixelFormatToString(VideoPixelFormat format) {
- switch (format) {
- case PIXEL_FORMAT_UNKNOWN:
- return "UNKNOWN";
- case PIXEL_FORMAT_I420:
- return "I420";
- case PIXEL_FORMAT_YUY2:
- return "YUY2";
- case PIXEL_FORMAT_UYVY:
- return "UYVY";
- case PIXEL_FORMAT_RGB24:
- return "RGB24";
- case PIXEL_FORMAT_RGB32:
- return "RGB32";
- case PIXEL_FORMAT_ARGB:
- return "ARGB";
- case PIXEL_FORMAT_MJPEG:
- return "MJPEG";
- case PIXEL_FORMAT_NV12:
- return "NV12";
- case PIXEL_FORMAT_NV21:
- return "NV21";
- case PIXEL_FORMAT_YV12:
- return "YV12";
- }
- NOTREACHED() << "Invalid VideoPixelFormat provided: " << format;
- return std::string();
+ return base::StringPrintf(
+ "(%s)@%.3ffps, pixel format: %s storage: %s.",
+ format.frame_size.ToString().c_str(), format.frame_rate,
+ VideoPixelFormatToString(format.pixel_format).c_str(),
+ PixelStorageToString(format.pixel_storage).c_str());
}
// static
@@ -131,9 +90,23 @@ std::string VideoCaptureFormat::PixelStorageToString(
return std::string();
}
+// static
+bool VideoCaptureFormat::ComparePixelFormatPreference(
+ const VideoPixelFormat& lhs,
+ const VideoPixelFormat& rhs) {
+ const auto& format_lhs = std::find(
+ kSupportedCapturePixelFormats,
+ kSupportedCapturePixelFormats + arraysize(kSupportedCapturePixelFormats),
+ lhs);
+ const auto& format_rhs = std::find(
+ kSupportedCapturePixelFormats,
+ kSupportedCapturePixelFormats + arraysize(kSupportedCapturePixelFormats),
+ rhs);
+ return format_lhs < format_rhs;
+}
+
VideoCaptureParams::VideoCaptureParams()
: resolution_change_policy(RESOLUTION_POLICY_FIXED_RESOLUTION),
- use_gpu_memory_buffers(false) {
-}
+ power_line_frequency(PowerLineFrequency::FREQUENCY_DEFAULT) {}
} // namespace media
diff --git a/chromium/media/base/video_capture_types.h b/chromium/media/base/video_capture_types.h
index 2a8c243f27f..4790ea52e1e 100644
--- a/chromium/media/base/video_capture_types.h
+++ b/chromium/media/base/video_capture_types.h
@@ -9,6 +9,7 @@
#include "build/build_config.h"
#include "media/base/media_export.h"
+#include "media/base/video_types.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -17,24 +18,6 @@ namespace media {
// shared with device manager.
typedef int VideoCaptureSessionId;
-// Color formats from camera. This list is sorted in order of preference.
-// TODO(mcasas): Consider if this list can be merged with media::Format.
-// TODO(mcasas): http://crbug.com/504160 Consider making this an enum class.
-enum VideoPixelFormat {
- PIXEL_FORMAT_I420,
- PIXEL_FORMAT_YV12,
- PIXEL_FORMAT_NV12,
- PIXEL_FORMAT_NV21,
- PIXEL_FORMAT_UYVY,
- PIXEL_FORMAT_YUY2,
- PIXEL_FORMAT_RGB24,
- PIXEL_FORMAT_RGB32,
- PIXEL_FORMAT_ARGB,
- PIXEL_FORMAT_MJPEG,
- PIXEL_FORMAT_UNKNOWN, // Color format not set.
- PIXEL_FORMAT_MAX = PIXEL_FORMAT_UNKNOWN,
-};
-
// Storage type for the pixels. In principle, all combinations of Storage and
// Format are possible, though some are very typical, such as texture + ARGB,
// and others are only available if the platform allows it e.g. GpuMemoryBuffer.
@@ -69,6 +52,24 @@ enum ResolutionChangePolicy {
RESOLUTION_POLICY_LAST = RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
};
+// Potential values of the googPowerLineFrequency optional constraint passed to
+// getUserMedia. Note that the numeric values are currently significant, and are
+// used to map enum values to corresponding frequency values.
+// TODO(ajose): http://crbug.com/525167 Consider making this a class.
+enum class PowerLineFrequency {
+ FREQUENCY_DEFAULT = 0,
+ FREQUENCY_50HZ = 50,
+ FREQUENCY_60HZ = 60,
+ FREQUENCY_MAX = FREQUENCY_60HZ
+};
+// Assert that the int:frequency mapping is correct.
+static_assert(static_cast<int>(PowerLineFrequency::FREQUENCY_DEFAULT) == 0,
+ "static_cast<int>(FREQUENCY_DEFAULT) must equal 0.");
+static_assert(static_cast<int>(PowerLineFrequency::FREQUENCY_50HZ) == 50,
+ "static_cast<int>(FREQUENCY_DEFAULT) must equal 50.");
+static_assert(static_cast<int>(PowerLineFrequency::FREQUENCY_60HZ) == 60,
+ "static_cast<int>(FREQUENCY_DEFAULT) must equal 60.");
+
// Some drivers use rational time per frame instead of float frame rate, this
// constant k is used to convert between both: A fps -> [k/k*A] seconds/frame.
const int kFrameRatePrecision = 10000;
@@ -88,9 +89,13 @@ struct MEDIA_EXPORT VideoCaptureFormat {
VideoPixelStorage pixel_storage);
static std::string ToString(const VideoCaptureFormat& format);
- static std::string PixelFormatToString(VideoPixelFormat format);
static std::string PixelStorageToString(VideoPixelStorage storage);
+ // Compares the priority of the pixel formats. Returns true if |lhs| is the
+ // preferred pixel format in comparison with |rhs|. Returns false otherwise.
+ static bool ComparePixelFormatPreference(const VideoPixelFormat& lhs,
+ const VideoPixelFormat& rhs);
+
// Returns the required buffer size to hold an image of a given
// VideoCaptureFormat with no padding and tightly packed.
size_t ImageAllocationSize() const;
@@ -122,8 +127,8 @@ struct MEDIA_EXPORT VideoCaptureParams {
bool operator==(const VideoCaptureParams& other) const {
return requested_format == other.requested_format &&
- use_gpu_memory_buffers == other.use_gpu_memory_buffers &&
- resolution_change_policy == other.resolution_change_policy;
+ resolution_change_policy == other.resolution_change_policy &&
+ power_line_frequency == other.power_line_frequency;
}
// Requests a resolution and format at which the capture will occur.
@@ -132,8 +137,8 @@ struct MEDIA_EXPORT VideoCaptureParams {
// Policy for resolution change.
ResolutionChangePolicy resolution_change_policy;
- // Indication to the Driver to try to use GpuMemoryBuffers.
- bool use_gpu_memory_buffers;
+ // User-specified power line frequency.
+ PowerLineFrequency power_line_frequency;
};
} // namespace media
diff --git a/chromium/media/base/video_capturer_source.cc b/chromium/media/base/video_capturer_source.cc
index c1ebbe1dbff..0ec65652749 100644
--- a/chromium/media/base/video_capturer_source.cc
+++ b/chromium/media/base/video_capturer_source.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,6 +6,11 @@
namespace media {
+// TODO(mcasas): VideoCapturerSource is implemented in other .dll(s) (e.g.
+// content) in Windows component build. The current compiler fails to generate
+// object files for this destructor if it's defined in the header file and that
+// breaks linking. Consider removing this file when the compiler+linker is able
+// to generate symbols across linking units.
VideoCapturerSource::~VideoCapturerSource() {}
} // namespace media
diff --git a/chromium/media/base/video_capturer_source.h b/chromium/media/base/video_capturer_source.h
index 774f28da02f..a99faf8e394 100644
--- a/chromium/media/base/video_capturer_source.h
+++ b/chromium/media/base/video_capturer_source.h
@@ -1,25 +1,26 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
-#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
+#ifndef MEDIA_BASE_VIDEO_CAPTURER_SOURCE_H_
+#define MEDIA_BASE_VIDEO_CAPTURER_SOURCE_H_
#include <string>
#include <vector>
-#include "base/basictypes.h"
+#include "base/callback.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/base/video_capture_types.h"
-#include "media/base/video_frame.h"
namespace media {
-// VideoCapturerSource is an interface representing the source for
-// captured video. An implementation will periodically call the frame
-// callback with new video frames.
+class VideoFrame;
+
+// VideoCapturerSource is an interface representing the source for captured
+// video. An implementation will periodically call the frame callback with new
+// video frames.
class MEDIA_EXPORT VideoCapturerSource {
public:
virtual ~VideoCapturerSource();
@@ -40,15 +41,14 @@ class MEDIA_EXPORT VideoCapturerSource {
// frame relative to the first frame generated by the corresponding source.
// Because a source can start generating frames before a subscriber is added,
// the first video frame delivered may not have timestamp equal to 0.
- typedef base::Callback<
- void(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& estimated_capture_time)>
- VideoCaptureDeliverFrameCB;
+ using VideoCaptureDeliverFrameCB =
+ base::Callback<void(const scoped_refptr<media::VideoFrame>& video_frame,
+ base::TimeTicks estimated_capture_time)>;
- typedef base::Callback<void(const media::VideoCaptureFormats&)>
- VideoCaptureDeviceFormatsCB;
+ using VideoCaptureDeviceFormatsCB =
+ base::Callback<void(const media::VideoCaptureFormats&)>;
- typedef base::Callback<void(bool)> RunningCallback;
+ using RunningCallback = base::Callback<void(bool)>;
// Collects the formats that can currently be used.
// |max_requested_height|, |max_requested_width|, and
@@ -61,9 +61,8 @@ class MEDIA_EXPORT VideoCapturerSource {
double max_requested_frame_rate,
const VideoCaptureDeviceFormatsCB& callback) = 0;
- // Starts capturing frames using the resolution in |params|.
- // |new_frame_callback| is triggered on |frame_callback_task_runner|
- // when a new video frame is available.
+ // Starts capturing frames using the capture |params|. |new_frame_callback| is
+ // triggered when a new video frame is available.
// If capturing is started successfully then |running_callback| will be
// called with a parameter of true. Note that some implementations may
// simply reject StartCapture (by calling running_callback with a false
@@ -75,7 +74,6 @@ class MEDIA_EXPORT VideoCapturerSource {
virtual void StartCapture(
const media::VideoCaptureParams& params,
const VideoCaptureDeliverFrameCB& new_frame_callback,
- scoped_refptr<base::SingleThreadTaskRunner> frame_callback_task_runner,
const RunningCallback& running_callback) = 0;
// Stops capturing frames and clears all callbacks including the
@@ -87,4 +85,4 @@ class MEDIA_EXPORT VideoCapturerSource {
} // namespace media
-#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURER_SOURCE_H_
+#endif // MEDIA_BASE_VIDEO_CAPTURER_SOURCE_H_
diff --git a/chromium/media/base/video_codecs.h b/chromium/media/base/video_codecs.h
new file mode 100644
index 00000000000..4053902de6f
--- /dev/null
+++ b/chromium/media/base/video_codecs.h
@@ -0,0 +1,64 @@
+// Copyright 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_CODECS_H_
+#define MEDIA_BASE_VIDEO_CODECS_H_
+
+namespace media {
+
+enum VideoCodec {
+ // These values are histogrammed over time; do not change their ordinal
+ // values. When deleting a codec replace it with a dummy value; when adding a
+ // codec, do so at the bottom (and update kVideoCodecMax).
+ kUnknownVideoCodec = 0,
+ kCodecH264,
+ kCodecVC1,
+ kCodecMPEG2,
+ kCodecMPEG4,
+ kCodecTheora,
+ kCodecVP8,
+ kCodecVP9,
+ kCodecHEVC,
+ // DO NOT ADD RANDOM VIDEO CODECS!
+ //
+ // The only acceptable time to add a new codec is if there is production code
+ // that uses said codec in the same CL.
+
+ kVideoCodecMax = kCodecHEVC // Must equal the last "real" codec above.
+};
+
+// Video stream profile. This *must* match PP_VideoDecoder_Profile.
+// (enforced in webkit/plugins/ppapi/ppb_video_decoder_impl.cc) and
+// gpu::VideoCodecProfile.
+enum VideoCodecProfile {
+ // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
+ // for example), and keep the values for a particular format grouped
+ // together for clarity.
+ VIDEO_CODEC_PROFILE_UNKNOWN = -1,
+ VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
+ H264PROFILE_MIN = 0,
+ H264PROFILE_BASELINE = H264PROFILE_MIN,
+ H264PROFILE_MAIN = 1,
+ H264PROFILE_EXTENDED = 2,
+ H264PROFILE_HIGH = 3,
+ H264PROFILE_HIGH10PROFILE = 4,
+ H264PROFILE_HIGH422PROFILE = 5,
+ H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
+ H264PROFILE_SCALABLEBASELINE = 7,
+ H264PROFILE_SCALABLEHIGH = 8,
+ H264PROFILE_STEREOHIGH = 9,
+ H264PROFILE_MULTIVIEWHIGH = 10,
+ H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
+ VP8PROFILE_MIN = 11,
+ VP8PROFILE_ANY = VP8PROFILE_MIN,
+ VP8PROFILE_MAX = VP8PROFILE_ANY,
+ VP9PROFILE_MIN = 12,
+ VP9PROFILE_ANY = VP9PROFILE_MIN,
+ VP9PROFILE_MAX = VP9PROFILE_ANY,
+ VIDEO_CODEC_PROFILE_MAX = VP9PROFILE_MAX,
+};
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_CODECS_H_
diff --git a/chromium/media/base/video_decoder_config.cc b/chromium/media/base/video_decoder_config.cc
index 8825e2d615d..f867ad19d62 100644
--- a/chromium/media/base/video_decoder_config.cc
+++ b/chromium/media/base/video_decoder_config.cc
@@ -5,88 +5,75 @@
#include "media/base/video_decoder_config.h"
#include "base/logging.h"
-#include "base/metrics/histogram.h"
+#include "media/base/video_frame.h"
namespace media {
+VideoCodec VideoCodecProfileToVideoCodec(VideoCodecProfile profile) {
+ switch (profile) {
+ case VIDEO_CODEC_PROFILE_UNKNOWN:
+ return kUnknownVideoCodec;
+ case H264PROFILE_BASELINE:
+ case H264PROFILE_MAIN:
+ case H264PROFILE_EXTENDED:
+ case H264PROFILE_HIGH:
+ case H264PROFILE_HIGH10PROFILE:
+ case H264PROFILE_HIGH422PROFILE:
+ case H264PROFILE_HIGH444PREDICTIVEPROFILE:
+ case H264PROFILE_SCALABLEBASELINE:
+ case H264PROFILE_SCALABLEHIGH:
+ case H264PROFILE_STEREOHIGH:
+ case H264PROFILE_MULTIVIEWHIGH:
+ return kCodecH264;
+ case VP8PROFILE_ANY:
+ return kCodecVP8;
+ case VP9PROFILE_ANY:
+ return kCodecVP9;
+ }
+ NOTREACHED();
+ return kUnknownVideoCodec;
+}
+
+
VideoDecoderConfig::VideoDecoderConfig()
: codec_(kUnknownVideoCodec),
profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
- format_(VideoFrame::UNKNOWN),
+ format_(PIXEL_FORMAT_UNKNOWN),
is_encrypted_(false) {
}
VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
VideoCodecProfile profile,
- VideoFrame::Format format,
+ VideoPixelFormat format,
+ ColorSpace color_space,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
const uint8* extra_data,
size_t extra_data_size,
bool is_encrypted) {
- Initialize(codec, profile, format, VideoFrame::COLOR_SPACE_UNSPECIFIED,
- coded_size, visible_rect, natural_size, extra_data,
- extra_data_size, is_encrypted, true);
+ Initialize(codec, profile, format, color_space, coded_size, visible_rect,
+ natural_size, extra_data, extra_data_size, is_encrypted);
}
VideoDecoderConfig::~VideoDecoderConfig() {}
-// Some videos just want to watch the world burn, with a height of 0; cap the
-// "infinite" aspect ratio resulting.
-static const int kInfiniteRatio = 99999;
-
-// Common aspect ratios (multiplied by 100 and truncated) used for histogramming
-// video sizes. These were taken on 20111103 from
-// http://wikipedia.org/wiki/Aspect_ratio_(image)#Previous_and_currently_used_aspect_ratios
-static const int kCommonAspectRatios100[] = {
- 100, 115, 133, 137, 143, 150, 155, 160, 166, 175, 177, 185, 200, 210, 220,
- 221, 235, 237, 240, 255, 259, 266, 276, 293, 400, 1200, kInfiniteRatio,
-};
-
-template<class T> // T has int width() & height() methods.
-static void UmaHistogramAspectRatio(const char* name, const T& size) {
- UMA_HISTOGRAM_CUSTOM_ENUMERATION(
- name,
- // Intentionally use integer division to truncate the result.
- size.height() ? (size.width() * 100) / size.height() : kInfiniteRatio,
- base::CustomHistogram::ArrayToCustomRanges(
- kCommonAspectRatios100, arraysize(kCommonAspectRatios100)));
-}
-
void VideoDecoderConfig::Initialize(VideoCodec codec,
VideoCodecProfile profile,
- VideoFrame::Format format,
- VideoFrame::ColorSpace color_space,
+ VideoPixelFormat format,
+ ColorSpace color_space,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
const uint8* extra_data,
size_t extra_data_size,
- bool is_encrypted,
- bool record_stats) {
+ bool is_encrypted) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
- if (record_stats) {
- UMA_HISTOGRAM_ENUMERATION("Media.VideoCodec", codec, kVideoCodecMax + 1);
- // Drop UNKNOWN because U_H_E() uses one bucket for all values less than 1.
- if (profile >= 0) {
- UMA_HISTOGRAM_ENUMERATION("Media.VideoCodecProfile", profile,
- VIDEO_CODEC_PROFILE_MAX + 1);
- }
- UMA_HISTOGRAM_COUNTS_10000("Media.VideoCodedWidth", coded_size.width());
- UmaHistogramAspectRatio("Media.VideoCodedAspectRatio", coded_size);
- UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth", visible_rect.width());
- UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio", visible_rect);
- UMA_HISTOGRAM_ENUMERATION("Media.VideoFramePixelFormat", format,
- VideoFrame::FORMAT_MAX + 1);
- UMA_HISTOGRAM_ENUMERATION("Media.VideoFrameColorSpace", color_space,
- VideoFrame::COLOR_SPACE_MAX + 1);
- }
-
codec_ = codec;
profile_ = profile;
format_ = format;
+ color_space_ = color_space;
coded_size_ = coded_size;
visible_rect_ = visible_rect;
natural_size_ = natural_size;
@@ -140,6 +127,8 @@ std::string VideoDecoderConfig::GetHumanReadableCodecName() const {
return "unknown";
case kCodecH264:
return "h264";
+ case kCodecHEVC:
+ return "hevc";
case kCodecVC1:
return "vc1";
case kCodecMPEG2:
@@ -157,42 +146,10 @@ std::string VideoDecoderConfig::GetHumanReadableCodecName() const {
return "";
}
-VideoCodec VideoDecoderConfig::codec() const {
- return codec_;
-}
-
-VideoCodecProfile VideoDecoderConfig::profile() const {
- return profile_;
-}
-
-VideoFrame::Format VideoDecoderConfig::format() const {
- return format_;
-}
-
-gfx::Size VideoDecoderConfig::coded_size() const {
- return coded_size_;
-}
-
-gfx::Rect VideoDecoderConfig::visible_rect() const {
- return visible_rect_;
-}
-
-gfx::Size VideoDecoderConfig::natural_size() const {
- return natural_size_;
-}
-
const uint8* VideoDecoderConfig::extra_data() const {
if (extra_data_.empty())
return NULL;
return &extra_data_[0];
}
-size_t VideoDecoderConfig::extra_data_size() const {
- return extra_data_.size();
-}
-
-bool VideoDecoderConfig::is_encrypted() const {
- return is_encrypted_;
-}
-
} // namespace media
diff --git a/chromium/media/base/video_decoder_config.h b/chromium/media/base/video_decoder_config.h
index 00de84e2fb5..0dae77c86a8 100644
--- a/chromium/media/base/video_decoder_config.h
+++ b/chromium/media/base/video_decoder_config.h
@@ -10,62 +10,15 @@
#include "base/basictypes.h"
#include "media/base/media_export.h"
-#include "media/base/video_frame.h"
+#include "media/base/video_codecs.h"
+#include "media/base/video_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
namespace media {
-enum VideoCodec {
- // These values are histogrammed over time; do not change their ordinal
- // values. When deleting a codec replace it with a dummy value; when adding a
- // codec, do so at the bottom (and update kVideoCodecMax).
- kUnknownVideoCodec = 0,
- kCodecH264,
- kCodecVC1,
- kCodecMPEG2,
- kCodecMPEG4,
- kCodecTheora,
- kCodecVP8,
- kCodecVP9,
- // DO NOT ADD RANDOM VIDEO CODECS!
- //
- // The only acceptable time to add a new codec is if there is production code
- // that uses said codec in the same CL.
-
- kVideoCodecMax = kCodecVP9 // Must equal the last "real" codec above.
-};
-
-// Video stream profile. This *must* match PP_VideoDecoder_Profile.
-// (enforced in webkit/plugins/ppapi/ppb_video_decoder_impl.cc) and
-// gpu::VideoCodecProfile.
-enum VideoCodecProfile {
- // Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
- // for example), and keep the values for a particular format grouped
- // together for clarity.
- VIDEO_CODEC_PROFILE_UNKNOWN = -1,
- VIDEO_CODEC_PROFILE_MIN = VIDEO_CODEC_PROFILE_UNKNOWN,
- H264PROFILE_MIN = 0,
- H264PROFILE_BASELINE = H264PROFILE_MIN,
- H264PROFILE_MAIN = 1,
- H264PROFILE_EXTENDED = 2,
- H264PROFILE_HIGH = 3,
- H264PROFILE_HIGH10PROFILE = 4,
- H264PROFILE_HIGH422PROFILE = 5,
- H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
- H264PROFILE_SCALABLEBASELINE = 7,
- H264PROFILE_SCALABLEHIGH = 8,
- H264PROFILE_STEREOHIGH = 9,
- H264PROFILE_MULTIVIEWHIGH = 10,
- H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
- VP8PROFILE_MIN = 11,
- VP8PROFILE_ANY = VP8PROFILE_MIN,
- VP8PROFILE_MAX = VP8PROFILE_ANY,
- VP9PROFILE_MIN = 12,
- VP9PROFILE_ANY = VP9PROFILE_MIN,
- VP9PROFILE_MAX = VP9PROFILE_ANY,
- VIDEO_CODEC_PROFILE_MAX = VP9PROFILE_MAX,
-};
+MEDIA_EXPORT VideoCodec
+VideoCodecProfileToVideoCodec(VideoCodecProfile profile);
class MEDIA_EXPORT VideoDecoderConfig {
public:
@@ -77,11 +30,13 @@ class MEDIA_EXPORT VideoDecoderConfig {
// |extra_data|, otherwise the memory is copied.
VideoDecoderConfig(VideoCodec codec,
VideoCodecProfile profile,
- VideoFrame::Format format,
+ VideoPixelFormat format,
+ ColorSpace color_space,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- const uint8* extra_data, size_t extra_data_size,
+ const uint8* extra_data,
+ size_t extra_data_size,
bool is_encrypted);
~VideoDecoderConfig();
@@ -89,14 +44,14 @@ class MEDIA_EXPORT VideoDecoderConfig {
// Resets the internal state of this object.
void Initialize(VideoCodec codec,
VideoCodecProfile profile,
- VideoFrame::Format format,
- VideoFrame::ColorSpace color_space,
+ VideoPixelFormat format,
+ ColorSpace color_space,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
- const uint8* extra_data, size_t extra_data_size,
- bool is_encrypted,
- bool record_stats);
+ const uint8* extra_data,
+ size_t extra_data_size,
+ bool is_encrypted);
// Returns true if this object has appropriate configuration values, false
// otherwise.
@@ -112,38 +67,44 @@ class MEDIA_EXPORT VideoDecoderConfig {
std::string GetHumanReadableCodecName() const;
- VideoCodec codec() const;
- VideoCodecProfile profile() const;
+ VideoCodec codec() const { return codec_; }
+ VideoCodecProfile profile() const { return profile_; }
// Video format used to determine YUV buffer sizes.
- VideoFrame::Format format() const;
+ VideoPixelFormat format() const { return format_; }
+
+ // The default color space of the decoded frames. Decoders should output
+ // frames tagged with this color space unless they find a different value in
+ // the bitstream.
+ ColorSpace color_space() const { return color_space_; }
// Width and height of video frame immediately post-decode. Not all pixels
// in this region are valid.
- gfx::Size coded_size() const;
+ gfx::Size coded_size() const { return coded_size_; }
// Region of |coded_size_| that is visible.
- gfx::Rect visible_rect() const;
+ gfx::Rect visible_rect() const { return visible_rect_; }
// Final visible width and height of a video frame with aspect ratio taken
// into account.
- gfx::Size natural_size() const;
+ gfx::Size natural_size() const { return natural_size_; }
// Optional byte data required to initialize video decoders, such as H.264
// AAVC data.
const uint8* extra_data() const;
- size_t extra_data_size() const;
+ size_t extra_data_size() const { return extra_data_.size(); }
// Whether the video stream is potentially encrypted.
// Note that in a potentially encrypted video stream, individual buffers
// can be encrypted or not encrypted.
- bool is_encrypted() const;
+ bool is_encrypted() const { return is_encrypted_; }
private:
VideoCodec codec_;
VideoCodecProfile profile_;
- VideoFrame::Format format_;
+ VideoPixelFormat format_;
+ ColorSpace color_space_;
gfx::Size coded_size_;
gfx::Rect visible_rect_;
diff --git a/chromium/media/base/video_frame.cc b/chromium/media/base/video_frame.cc
index 1d36743d49a..a8736de7a76 100644
--- a/chromium/media/base/video_frame.cc
+++ b/chromium/media/base/video_frame.cc
@@ -11,7 +11,9 @@
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
#include "media/base/limits.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_util.h"
#include "ui/gfx/geometry/point.h"
@@ -31,8 +33,19 @@ static inline size_t RoundDown(size_t value, size_t alignment) {
return value & ~(alignment - 1);
}
+static std::string ConfigToString(const VideoPixelFormat format,
+ const VideoFrame::StorageType storage_type,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size) {
+ return base::StringPrintf(
+ "format:%s coded_size:%s visible_rect:%s natural_size:%s",
+ VideoPixelFormatToString(format).c_str(), coded_size.ToString().c_str(),
+ visible_rect.ToString().c_str(), natural_size.ToString().c_str());
+}
+
// Returns true if |plane| is a valid plane index for the given |format|.
-static bool IsValidPlane(size_t plane, VideoFrame::Format format) {
+static bool IsValidPlane(size_t plane, VideoPixelFormat format) {
DCHECK_LE(VideoFrame::NumPlanes(format),
static_cast<size_t>(VideoFrame::kMaxPlanes));
return (plane < VideoFrame::NumPlanes(format));
@@ -53,8 +66,8 @@ static bool IsStorageTypeMappable(VideoFrame::StorageType storage_type) {
}
// Returns the pixel size per element for given |plane| and |format|. E.g. 2x2
-// for the U-plane in I420.
-static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
+// for the U-plane in PIXEL_FORMAT_I420.
+static gfx::Size SampleSize(VideoPixelFormat format, size_t plane) {
DCHECK(IsValidPlane(plane, format));
switch (plane) {
@@ -65,23 +78,28 @@ static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
case VideoFrame::kUPlane: // and VideoFrame::kUVPlane:
case VideoFrame::kVPlane:
switch (format) {
- case VideoFrame::YV24:
+ case PIXEL_FORMAT_YV24:
return gfx::Size(1, 1);
- case VideoFrame::YV16:
+ case PIXEL_FORMAT_YV16:
return gfx::Size(2, 1);
- case VideoFrame::YV12:
- case VideoFrame::I420:
- case VideoFrame::YV12A:
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case VideoFrame::NV12:
-#endif
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12A:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_MT21:
return gfx::Size(2, 2);
- case VideoFrame::UNKNOWN:
- case VideoFrame::ARGB:
- case VideoFrame::XRGB:
+ case PIXEL_FORMAT_UNKNOWN:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
break;
}
}
@@ -91,7 +109,7 @@ static gfx::Size SampleSize(VideoFrame::Format format, size_t plane) {
// Return the alignment for the whole frame, calculated as the max of the
// alignment for each individual plane.
-static gfx::Size CommonAlignment(VideoFrame::Format format) {
+static gfx::Size CommonAlignment(VideoPixelFormat format) {
int max_sample_width = 0;
int max_sample_height = 0;
for (size_t plane = 0; plane < VideoFrame::NumPlanes(format); ++plane) {
@@ -103,78 +121,42 @@ static gfx::Size CommonAlignment(VideoFrame::Format format) {
}
// Returns the number of bytes per element for given |plane| and |format|.
-static int BytesPerElement(VideoFrame::Format format, size_t plane) {
+static int BytesPerElement(VideoPixelFormat format, size_t plane) {
DCHECK(IsValidPlane(plane, format));
- if (format == VideoFrame::ARGB || format == VideoFrame::XRGB)
- return 4;
-
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- if (format == VideoFrame::NV12 && plane == VideoFrame::kUVPlane)
- return 2;
-#endif
-
- return 1;
-}
-
-// Rounds up |coded_size| if necessary for |format|.
-static gfx::Size AdjustCodedSize(VideoFrame::Format format,
- const gfx::Size& coded_size) {
- const gfx::Size alignment = CommonAlignment(format);
- return gfx::Size(RoundUp(coded_size.width(), alignment.width()),
- RoundUp(coded_size.height(), alignment.height()));
-}
-
-// static
-std::string VideoFrame::FormatToString(Format format) {
switch (format) {
- case UNKNOWN:
- return "UNKNOWN";
- case YV12:
- return "YV12";
- case YV16:
- return "YV16";
- case I420:
- return "I420";
- case YV12A:
- return "YV12A";
- case YV24:
- return "YV24";
- case ARGB:
- return "ARGB";
- case XRGB:
- return "XRGB";
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case NV12:
- return "NV12";
-#endif
- }
- NOTREACHED() << "Invalid VideoFrame format provided: " << format;
- return "";
-}
-
-// static
-bool VideoFrame::IsYuvPlanar(Format format) {
- switch (format) {
- case YV12:
- case I420:
- case YV16:
- case YV12A:
- case YV24:
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case NV12:
-#endif
- return true;
-
- case UNKNOWN:
- case ARGB:
- case XRGB:
- return false;
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB32:
+ return 4;
+ case PIXEL_FORMAT_RGB24:
+ return 3;
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ return 2;
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_MT21: {
+ static const int bytes_per_element[] = {1, 2};
+ DCHECK_LT(plane, arraysize(bytes_per_element));
+ return bytes_per_element[plane];
+ }
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV16:
+ case PIXEL_FORMAT_YV12A:
+ case PIXEL_FORMAT_YV24:
+ return 1;
+ case PIXEL_FORMAT_MJPEG:
+ return 0;
+ case PIXEL_FORMAT_UNKNOWN:
+ break;
}
- return false;
+ NOTREACHED();
+ return 0;
}
// static
-bool VideoFrame::IsValidConfig(Format format,
+bool VideoFrame::IsValidConfig(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -196,82 +178,69 @@ bool VideoFrame::IsValidConfig(Format format,
if (!IsStorageTypeMappable(storage_type))
return true;
- // Check format-specific width/height requirements.
- switch (format) {
- case UNKNOWN:
- return (coded_size.IsEmpty() && visible_rect.IsEmpty() &&
- natural_size.IsEmpty());
- case YV24:
- case YV12:
- case I420:
- case YV12A:
- case YV16:
- case ARGB:
- case XRGB:
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case NV12:
-#endif
- // Check that software-allocated buffer formats are aligned correctly and
- // not empty.
- const gfx::Size alignment = CommonAlignment(format);
- return RoundUp(visible_rect.right(), alignment.width()) <=
- static_cast<size_t>(coded_size.width()) &&
- RoundUp(visible_rect.bottom(), alignment.height()) <=
- static_cast<size_t>(coded_size.height()) &&
- !coded_size.IsEmpty() && !visible_rect.IsEmpty() &&
- !natural_size.IsEmpty();
+ // Make sure new formats are properly accounted for in the method.
+ static_assert(PIXEL_FORMAT_MAX == 15,
+ "Added pixel format, please review IsValidConfig()");
+
+ if (format == PIXEL_FORMAT_UNKNOWN) {
+ return coded_size.IsEmpty() && visible_rect.IsEmpty() &&
+ natural_size.IsEmpty();
}
- // TODO(mcasas): Check that storage type and underlying mailboxes/dataptr are
- // matching.
- NOTREACHED();
- return false;
+ // Check that software-allocated buffer formats are not empty.
+ return !coded_size.IsEmpty() && !visible_rect.IsEmpty() &&
+ !natural_size.IsEmpty();
}
// static
-scoped_refptr<VideoFrame> VideoFrame::CreateFrame(Format format,
+scoped_refptr<VideoFrame> VideoFrame::CreateFrame(VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- if (!IsYuvPlanar(format)) {
- NOTIMPLEMENTED();
- return nullptr;
- }
+ return CreateFrameInternal(format, coded_size, visible_rect, natural_size,
+ timestamp, false);
+}
- // Since we're creating a new YUV frame (and allocating memory for it
- // ourselves), we can pad the requested |coded_size| if necessary if the
- // request does not line up on sample boundaries.
- const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
- DCHECK(IsValidConfig(format, STORAGE_OWNED_MEMORY, new_coded_size,
- visible_rect, natural_size));
-
- scoped_refptr<VideoFrame> frame(new VideoFrame(format, STORAGE_OWNED_MEMORY,
- new_coded_size, visible_rect,
- natural_size, timestamp));
- frame->AllocateYUV();
- return frame;
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateZeroInitializedFrame(
+ VideoPixelFormat format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp) {
+ return CreateFrameInternal(format, coded_size, visible_rect, natural_size,
+ timestamp, true);
}
// static
scoped_refptr<VideoFrame> VideoFrame::WrapNativeTexture(
- Format format,
+ VideoPixelFormat format,
const gpu::MailboxHolder& mailbox_holder,
const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
- if (format != ARGB) {
- DLOG(ERROR) << "Only ARGB pixel format supported, got "
- << FormatToString(format);
+ if (format != PIXEL_FORMAT_ARGB &&
+ format != PIXEL_FORMAT_UYVY &&
+ format != PIXEL_FORMAT_NV12) {
+ DLOG(ERROR) << "Unsupported pixel format supported, got "
+ << VideoPixelFormatToString(format);
return nullptr;
}
+ const StorageType storage = STORAGE_OPAQUE;
+ if (!IsValidConfig(format, storage, coded_size, visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
+ return nullptr;
+ }
+
gpu::MailboxHolder mailbox_holders[kMaxPlanes];
mailbox_holders[kARGBPlane] = mailbox_holder;
- return new VideoFrame(format, STORAGE_OPAQUE, coded_size,
- visible_rect, natural_size, mailbox_holders,
- mailbox_holder_release_cb, timestamp);
+ return new VideoFrame(format, storage, coded_size, visible_rect, natural_size,
+ mailbox_holders, mailbox_holder_release_cb, timestamp);
}
// static
@@ -284,18 +253,26 @@ scoped_refptr<VideoFrame> VideoFrame::WrapYUV420NativeTextures(
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp) {
+ const StorageType storage = STORAGE_OPAQUE;
+ VideoPixelFormat format = PIXEL_FORMAT_I420;
+ if (!IsValidConfig(format, storage, coded_size, visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
+ return nullptr;
+ }
+
gpu::MailboxHolder mailbox_holders[kMaxPlanes];
mailbox_holders[kYPlane] = y_mailbox_holder;
mailbox_holders[kUPlane] = u_mailbox_holder;
mailbox_holders[kVPlane] = v_mailbox_holder;
- return new VideoFrame(I420, STORAGE_OPAQUE, coded_size, visible_rect,
- natural_size, mailbox_holders,
- mailbox_holder_release_cb, timestamp);
+ return new VideoFrame(format, storage, coded_size, visible_rect, natural_size,
+ mailbox_holders, mailbox_holder_release_cb, timestamp);
}
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalData(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -309,7 +286,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalData(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -325,7 +302,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalSharedMemory(
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -336,13 +313,16 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
uint8* u_data,
uint8* v_data,
base::TimeDelta timestamp) {
- const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
- CHECK(IsValidConfig(format, STORAGE_UNOWNED_MEMORY, new_coded_size,
- visible_rect, natural_size));
+ const StorageType storage = STORAGE_UNOWNED_MEMORY;
+ if (!IsValidConfig(format, storage, coded_size, visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
+ return nullptr;
+ }
- scoped_refptr<VideoFrame> frame(new VideoFrame(format, STORAGE_UNOWNED_MEMORY,
- new_coded_size, visible_rect,
- natural_size, timestamp));
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, storage, coded_size, visible_rect, natural_size, timestamp));
frame->strides_[kYPlane] = y_stride;
frame->strides_[kUPlane] = u_stride;
frame->strides_[kVPlane] = v_stride;
@@ -355,25 +335,28 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalYuvData(
#if defined(OS_LINUX)
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
const std::vector<int>& dmabuf_fds,
base::TimeDelta timestamp) {
#if defined(OS_CHROMEOS)
- DCHECK_EQ(format, NV12);
+ DCHECK_EQ(format, PIXEL_FORMAT_NV12);
#endif
- if (!IsValidConfig(format, STORAGE_DMABUFS, coded_size, visible_rect,
- natural_size)) {
+ const StorageType storage = STORAGE_DMABUFS;
+ if (!IsValidConfig(format, storage, coded_size, visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
return nullptr;
}
+
gpu::MailboxHolder mailbox_holders[kMaxPlanes];
scoped_refptr<VideoFrame> frame =
- new VideoFrame(format, STORAGE_DMABUFS, coded_size, visible_rect,
- natural_size, mailbox_holders, ReleaseMailboxCB(),
- timestamp);
+ new VideoFrame(format, storage, coded_size, visible_rect, natural_size,
+ mailbox_holders, ReleaseMailboxCB(), timestamp);
if (!frame || !frame->DuplicateFileDescriptors(dmabuf_fds))
return nullptr;
return frame;
@@ -389,33 +372,35 @@ scoped_refptr<VideoFrame> VideoFrame::WrapCVPixelBuffer(
DCHECK(CFGetTypeID(cv_pixel_buffer) == CVPixelBufferGetTypeID());
const OSType cv_format = CVPixelBufferGetPixelFormatType(cv_pixel_buffer);
- Format format;
+ VideoPixelFormat format;
// There are very few compatible CV pixel formats, so just check each.
if (cv_format == kCVPixelFormatType_420YpCbCr8Planar) {
- format = I420;
+ format = PIXEL_FORMAT_I420;
} else if (cv_format == kCVPixelFormatType_444YpCbCr8) {
- format = YV24;
+ format = PIXEL_FORMAT_YV24;
} else if (cv_format == '420v') {
// TODO(jfroy): Use kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange when the
// minimum OS X and iOS SDKs permits it.
- format = NV12;
+ format = PIXEL_FORMAT_NV12;
} else {
DLOG(ERROR) << "CVPixelBuffer format not supported: " << cv_format;
- return NULL;
+ return nullptr;
}
const gfx::Size coded_size(CVImageBufferGetEncodedSize(cv_pixel_buffer));
const gfx::Rect visible_rect(CVImageBufferGetCleanRect(cv_pixel_buffer));
const gfx::Size natural_size(CVImageBufferGetDisplaySize(cv_pixel_buffer));
+ const StorageType storage = STORAGE_UNOWNED_MEMORY;
- if (!IsValidConfig(format, STORAGE_UNOWNED_MEMORY,
- coded_size, visible_rect, natural_size)) {
- return NULL;
+ if (!IsValidConfig(format, storage, coded_size, visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
+ return nullptr;
}
- scoped_refptr<VideoFrame> frame(new VideoFrame(format, STORAGE_UNOWNED_MEMORY,
- coded_size, visible_rect,
- natural_size, timestamp));
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, storage, coded_size, visible_rect, natural_size, timestamp));
frame->cv_pixel_buffer_.reset(cv_pixel_buffer, base::scoped_policy::RETAIN);
return frame;
@@ -432,6 +417,16 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
CHECK(!frame->HasTextures());
DCHECK(frame->visible_rect().Contains(visible_rect));
+
+ if (!IsValidConfig(frame->format(), frame->storage_type(),
+ frame->coded_size(), visible_rect, natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(frame->format(), frame->storage_type(),
+ frame->coded_size(), visible_rect,
+ natural_size);
+ return nullptr;
+ }
+
scoped_refptr<VideoFrame> wrapping_frame(new VideoFrame(
frame->format(), frame->storage_type(), frame->coded_size(), visible_rect,
natural_size, frame->timestamp()));
@@ -462,7 +457,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
// static
scoped_refptr<VideoFrame> VideoFrame::CreateEOSFrame() {
scoped_refptr<VideoFrame> frame =
- new VideoFrame(UNKNOWN, STORAGE_UNKNOWN, gfx::Size(),
+ new VideoFrame(PIXEL_FORMAT_UNKNOWN, STORAGE_UNKNOWN, gfx::Size(),
gfx::Rect(), gfx::Size(), kNoTimestamp());
frame->metadata()->SetBoolean(VideoFrameMetadata::END_OF_STREAM, true);
return frame;
@@ -474,7 +469,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateColorFrame(
uint8 y, uint8 u, uint8 v,
base::TimeDelta timestamp) {
scoped_refptr<VideoFrame> frame =
- CreateFrame(YV12, size, gfx::Rect(size), size, timestamp);
+ CreateFrame(PIXEL_FORMAT_YV12, size, gfx::Rect(size), size, timestamp);
FillYUV(frame.get(), y, u, v);
return frame;
}
@@ -495,7 +490,7 @@ scoped_refptr<VideoFrame> VideoFrame::CreateTransparentFrame(
const uint8 kTransparentA = 0x00;
const base::TimeDelta kZero;
scoped_refptr<VideoFrame> frame =
- CreateFrame(YV12A, size, gfx::Rect(size), size, kZero);
+ CreateFrame(PIXEL_FORMAT_YV12A, size, gfx::Rect(size), size, kZero);
FillYUVA(frame.get(), kBlackY, kBlackUV, kBlackUV, kTransparentA);
return frame;
}
@@ -508,31 +503,43 @@ scoped_refptr<VideoFrame> VideoFrame::CreateTransparentFrame(
// static
scoped_refptr<VideoFrame> VideoFrame::CreateHoleFrame(
const gfx::Size& size) {
- DCHECK(IsValidConfig(UNKNOWN, STORAGE_HOLE, size, gfx::Rect(size), size));
+ const VideoPixelFormat format = PIXEL_FORMAT_UNKNOWN;
+ const StorageType storage = STORAGE_HOLE;
+ const gfx::Rect visible_rect = gfx::Rect(size);
+ if (!IsValidConfig(format, storage, size, visible_rect, size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, size, visible_rect, size);
+ return nullptr;
+ }
scoped_refptr<VideoFrame> frame(new VideoFrame(
- UNKNOWN, STORAGE_HOLE, size, gfx::Rect(size), size, base::TimeDelta()));
+ format, storage, size, gfx::Rect(size), size, base::TimeDelta()));
return frame;
}
#endif // defined(VIDEO_HOLE)
// static
-size_t VideoFrame::NumPlanes(Format format) {
+size_t VideoFrame::NumPlanes(VideoPixelFormat format) {
switch (format) {
- case ARGB:
- case XRGB:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
return 1;
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case NV12:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_MT21:
return 2;
-#endif
- case YV12:
- case YV16:
- case I420:
- case YV24:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_YV16:
+ case PIXEL_FORMAT_YV24:
return 3;
- case YV12A:
+ case PIXEL_FORMAT_YV12A:
return 4;
- case UNKNOWN:
+ case PIXEL_FORMAT_UNKNOWN:
break;
}
NOTREACHED() << "Unsupported video frame format: " << format;
@@ -540,7 +547,8 @@ size_t VideoFrame::NumPlanes(Format format) {
}
// static
-size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
+size_t VideoFrame::AllocationSize(VideoPixelFormat format,
+ const gfx::Size& coded_size) {
size_t total = 0;
for (size_t i = 0; i < NumPlanes(format); ++i)
total += PlaneSize(format, i, coded_size).GetArea();
@@ -548,14 +556,14 @@ size_t VideoFrame::AllocationSize(Format format, const gfx::Size& coded_size) {
}
// static
-gfx::Size VideoFrame::PlaneSize(Format format,
+gfx::Size VideoFrame::PlaneSize(VideoPixelFormat format,
size_t plane,
const gfx::Size& coded_size) {
DCHECK(IsValidPlane(plane, format));
int width = coded_size.width();
int height = coded_size.height();
- if (format != ARGB) {
+ if (format != PIXEL_FORMAT_ARGB) {
// Align to multiple-of-two size overall. This ensures that non-subsampled
// planes can be addressed by pixel with the same scaling as the subsampled
// planes.
@@ -571,7 +579,8 @@ gfx::Size VideoFrame::PlaneSize(Format format,
}
// static
-int VideoFrame::PlaneHorizontalBitsPerPixel(Format format, size_t plane) {
+int VideoFrame::PlaneHorizontalBitsPerPixel(VideoPixelFormat format,
+ size_t plane) {
DCHECK(IsValidPlane(plane, format));
const int bits_per_element = 8 * BytesPerElement(format, plane);
const int horiz_pixels_per_element = SampleSize(format, plane).width();
@@ -580,27 +589,27 @@ int VideoFrame::PlaneHorizontalBitsPerPixel(Format format, size_t plane) {
}
// static
-int VideoFrame::PlaneBitsPerPixel(Format format, size_t plane) {
+int VideoFrame::PlaneBitsPerPixel(VideoPixelFormat format, size_t plane) {
DCHECK(IsValidPlane(plane, format));
return PlaneHorizontalBitsPerPixel(format, plane) /
SampleSize(format, plane).height();
}
// static
-size_t VideoFrame::RowBytes(size_t plane, Format format, int width) {
+size_t VideoFrame::RowBytes(size_t plane, VideoPixelFormat format, int width) {
DCHECK(IsValidPlane(plane, format));
return BytesPerElement(format, plane) * Columns(plane, format, width);
}
// static
-size_t VideoFrame::Rows(size_t plane, Format format, int height) {
+size_t VideoFrame::Rows(size_t plane, VideoPixelFormat format, int height) {
DCHECK(IsValidPlane(plane, format));
const int sample_height = SampleSize(format, plane).height();
return RoundUp(height, sample_height) / sample_height;
}
// static
-size_t VideoFrame::Columns(size_t plane, Format format, int width) {
+size_t VideoFrame::Columns(size_t plane, VideoPixelFormat format, int width) {
DCHECK(IsValidPlane(plane, format));
const int sample_width = SampleSize(format, plane).width();
return RoundUp(width, sample_width) / sample_width;
@@ -760,7 +769,7 @@ void VideoFrame::UpdateReleaseSyncPoint(SyncPointClient* client) {
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalStorage(
- Format format,
+ VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -772,36 +781,38 @@ scoped_refptr<VideoFrame> VideoFrame::WrapExternalStorage(
size_t data_offset) {
DCHECK(IsStorageTypeMappable(storage_type));
- const gfx::Size new_coded_size = AdjustCodedSize(format, coded_size);
- if (!IsValidConfig(format, storage_type, new_coded_size, visible_rect,
- natural_size) ||
- data_size < AllocationSize(format, new_coded_size)) {
- return NULL;
+ if (format != PIXEL_FORMAT_I420) {
+ DLOG(ERROR) << "Only PIXEL_FORMAT_I420 format supported: "
+ << VideoPixelFormatToString(format);
+ return nullptr;
+ }
+
+ if (!IsValidConfig(format, storage_type, coded_size, visible_rect,
+ natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage_type, coded_size,
+ visible_rect, natural_size);
+ return nullptr;
}
- DLOG_IF(ERROR, format != I420) << "Only I420 format supported: "
- << FormatToString(format);
- if (format != I420)
- return NULL;
scoped_refptr<VideoFrame> frame;
if (storage_type == STORAGE_SHMEM) {
- frame = new VideoFrame(format, storage_type, new_coded_size,
- visible_rect, natural_size, timestamp, handle,
- data_offset);
+ frame = new VideoFrame(format, storage_type, coded_size, visible_rect,
+ natural_size, timestamp, handle, data_offset);
} else {
- frame = new VideoFrame(format, storage_type, new_coded_size,
- visible_rect, natural_size, timestamp);
+ frame = new VideoFrame(format, storage_type, coded_size, visible_rect,
+ natural_size, timestamp);
}
- frame->strides_[kYPlane] = new_coded_size.width();
- frame->strides_[kUPlane] = new_coded_size.width() / 2;
- frame->strides_[kVPlane] = new_coded_size.width() / 2;
+ frame->strides_[kYPlane] = coded_size.width();
+ frame->strides_[kUPlane] = coded_size.width() / 2;
+ frame->strides_[kVPlane] = coded_size.width() / 2;
frame->data_[kYPlane] = data;
- frame->data_[kUPlane] = data + new_coded_size.GetArea();
- frame->data_[kVPlane] = data + (new_coded_size.GetArea() * 5 / 4);
+ frame->data_[kUPlane] = data + coded_size.GetArea();
+ frame->data_[kVPlane] = data + (coded_size.GetArea() * 5 / 4);
return frame;
}
-VideoFrame::VideoFrame(Format format,
+VideoFrame::VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -823,7 +834,7 @@ VideoFrame::VideoFrame(Format format,
memset(&data_, 0, sizeof(data_));
}
-VideoFrame::VideoFrame(Format format,
+VideoFrame::VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -842,7 +853,7 @@ VideoFrame::VideoFrame(Format format,
shared_memory_offset_ = shared_memory_offset;
}
-VideoFrame::VideoFrame(Format format,
+VideoFrame::VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -876,7 +887,46 @@ VideoFrame::~VideoFrame() {
base::ResetAndReturn(&callback).Run();
}
-void VideoFrame::AllocateYUV() {
+// static
+scoped_refptr<VideoFrame> VideoFrame::CreateFrameInternal(
+ VideoPixelFormat format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory) {
+ if (!IsYuvPlanar(format)) {
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+
+ // Since we're creating a new YUV frame (and allocating memory for it
+ // ourselves), we can pad the requested |coded_size| if necessary if the
+ // request does not line up on sample boundaries. See discussion at
+ // http://crrev.com/1240833003
+ const gfx::Size alignment = CommonAlignment(format);
+ const gfx::Size new_coded_size =
+ gfx::Size(RoundUp(coded_size.width(), alignment.width()),
+ RoundUp(coded_size.height(), alignment.height()));
+ DCHECK((new_coded_size.width() % alignment.width() == 0) &&
+ (new_coded_size.height() % alignment.height() == 0));
+
+ const StorageType storage = STORAGE_OWNED_MEMORY;
+ if (!IsValidConfig(format, storage, new_coded_size, visible_rect,
+ natural_size)) {
+ DLOG(ERROR) << __FUNCTION__ << " Invalid config."
+ << ConfigToString(format, storage, coded_size, visible_rect,
+ natural_size);
+ return nullptr;
+ }
+
+ scoped_refptr<VideoFrame> frame(new VideoFrame(
+ format, storage, new_coded_size, visible_rect, natural_size, timestamp));
+ frame->AllocateYUV(zero_initialize_memory);
+ return frame;
+}
+
+void VideoFrame::AllocateYUV(bool zero_initialize_memory) {
DCHECK_EQ(storage_type_, STORAGE_OWNED_MEMORY);
static_assert(0 == kYPlane, "y plane data must be index 0");
@@ -900,11 +950,10 @@ void VideoFrame::AllocateYUV() {
DCHECK(IsValidPlane(kUPlane, format_));
data_size += strides_[kUPlane] + kFrameSizePadding;
- // FFmpeg expects the initialize allocation to be zero-initialized. Failure
- // to do so can lead to unitialized value usage. See http://crbug.com/390941
uint8* data = reinterpret_cast<uint8*>(
base::AlignedAlloc(data_size, kFrameAddressAlignment));
- memset(data, 0, data_size);
+ if (zero_initialize_memory)
+ memset(data, 0, data_size);
for (size_t plane = 0; plane < NumPlanes(format_); ++plane)
data_[plane] = data + offset[plane];
diff --git a/chromium/media/base/video_frame.h b/chromium/media/base/video_frame.h
index 2588e46e99d..2135ae8306e 100644
--- a/chromium/media/base/video_frame.h
+++ b/chromium/media/base/video_frame.h
@@ -12,8 +12,8 @@
#include "base/memory/shared_memory.h"
#include "base/synchronization/lock.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
-#include "media/base/buffers.h"
#include "media/base/video_frame_metadata.h"
+#include "media/base/video_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
@@ -43,35 +43,6 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
kAPlane = 3,
};
- // Pixel formats roughly based on FOURCC labels, see:
- // http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
- // Logged to UMA, so never reuse values. Leave gaps if necessary.
- enum Format {
- UNKNOWN = 0, // Unknown or unspecified format value.
- YV12 = 1, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
- I420 = 2, // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
- YV16 = 3, // 16bpp YVU planar 1x1 Y, 2x1 VU samples.
- YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
- YV24 = 5, // 24bpp YUV planar, no subsampling.
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- NV12 = 6, // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
-#endif
- ARGB = 7, // 32bpp ARGB, 1 plane.
- XRGB = 8, // 24bpp XRGB, 1 plane.
- // Please update UMA histogram enumeration when adding new formats here.
- FORMAT_MAX = XRGB, // Must always be equal to largest entry logged.
- };
-
- // Color space or color range used for the pixels, in general this is left
- // unspecified, meaning Rec601 (SD) is assumed.
- // Logged to UMA, so never reuse values. Leave gaps if necessary.
- enum ColorSpace {
- COLOR_SPACE_UNSPECIFIED = 0, // In general this is Rec601.
- COLOR_SPACE_JPEG = 1, // JPEG color range.
- COLOR_SPACE_HD_REC709 = 2, // Rec709 "HD" color space.
- COLOR_SPACE_MAX = COLOR_SPACE_HD_REC709,
- };
-
// Defines the pixel storage type. Differentiates between directly accessible
// |data_| and pixels that are only indirectly accessible and not via mappable
// memory.
@@ -122,17 +93,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
};
- // Returns the name of a Format as a string.
- static std::string FormatToString(Format format);
-
- // Returns true if |format| is a YUV format. This includes (multi)planar
- // and/or (partially) interleaved formats.
- static bool IsYuvPlanar(Format format);
-
// Call prior to CreateFrame to ensure validity of frame configuration. Called
// automatically by VideoDecoderConfig::IsValidConfig().
- // TODO(scherkus): VideoDecoderConfig shouldn't call this method
- static bool IsValidConfig(Format format,
+ static bool IsValidConfig(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -143,8 +106,16 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// caller most not make assumptions about the actual underlying size(s), but
// check the returned VideoFrame instead.
// TODO(mcasas): implement the RGB version of this factory method.
- static scoped_refptr<VideoFrame> CreateFrame(
- Format format,
+ static scoped_refptr<VideoFrame> CreateFrame(VideoPixelFormat format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp);
+
+ // Offers the same functionality as CreateFrame, and additionally zeroes out
+ // the initial allocated buffers.
+ static scoped_refptr<VideoFrame> CreateZeroInitializedFrame(
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -155,7 +126,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// |mailbox_holder|, and |mailbox_holder_release_cb| will be called with
// a syncpoint as the argument when the VideoFrame is to be destroyed.
static scoped_refptr<VideoFrame> WrapNativeTexture(
- Format format,
+ VideoPixelFormat format,
const gpu::MailboxHolder& mailbox_holder,
const ReleaseMailboxCB& mailbox_holder_release_cb,
const gfx::Size& coded_size,
@@ -181,7 +152,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// buffer of logical dimensions |coded_size| with the appropriate bit depth
// and plane count as given by |format|. Returns NULL on failure.
static scoped_refptr<VideoFrame> WrapExternalData(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -191,7 +162,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Same as WrapExternalData() with SharedMemoryHandle and its offset.
static scoped_refptr<VideoFrame> WrapExternalSharedMemory(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -204,7 +175,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// Wraps external YUV data of the given parameters with a VideoFrame.
// The returned VideoFrame does not own the data passed in.
static scoped_refptr<VideoFrame> WrapExternalYuvData(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -228,7 +199,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// mapped via mmap() for CPU access.
// Returns NULL on failure.
static scoped_refptr<VideoFrame> WrapExternalDmabufs(
- Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -280,35 +251,36 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
static scoped_refptr<VideoFrame> CreateHoleFrame(const gfx::Size& size);
#endif // defined(VIDEO_HOLE)
- static size_t NumPlanes(Format format);
+ static size_t NumPlanes(VideoPixelFormat format);
// Returns the required allocation size for a (tightly packed) frame of the
// given coded size and format.
- static size_t AllocationSize(Format format, const gfx::Size& coded_size);
+ static size_t AllocationSize(VideoPixelFormat format,
+ const gfx::Size& coded_size);
// Returns the plane gfx::Size (in bytes) for a plane of the given coded size
// and format.
- static gfx::Size PlaneSize(Format format,
+ static gfx::Size PlaneSize(VideoPixelFormat format,
size_t plane,
const gfx::Size& coded_size);
// Returns horizontal bits per pixel for given |plane| and |format|.
- static int PlaneHorizontalBitsPerPixel(Format format, size_t plane);
+ static int PlaneHorizontalBitsPerPixel(VideoPixelFormat format, size_t plane);
// Returns bits per pixel for given |plane| and |format|.
- static int PlaneBitsPerPixel(Format format, size_t plane);
+ static int PlaneBitsPerPixel(VideoPixelFormat format, size_t plane);
// Returns the number of bytes per row for the given plane, format, and width.
// The width may be aligned to format requirements.
- static size_t RowBytes(size_t plane, Format format, int width);
+ static size_t RowBytes(size_t plane, VideoPixelFormat format, int width);
// Returns the number of rows for the given plane, format, and height.
// The height may be aligned to format requirements.
- static size_t Rows(size_t plane, Format format, int height);
+ static size_t Rows(size_t plane, VideoPixelFormat format, int height);
// Returns the number of columns for the given plane, format, and width.
// The width may be aligned to format requirements.
- static size_t Columns(size_t plane, Format format, int width);
+ static size_t Columns(size_t plane, VideoPixelFormat format, int width);
// Used to keep a running hash of seen frames. Expects an initialized MD5
// context. Calls MD5Update with the context and the contents of the frame.
@@ -324,7 +296,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
// accessed via data(), visible_data() etc.
bool HasTextures() const;
- Format format() const { return format_; }
+ VideoPixelFormat format() const { return format_; }
StorageType storage_type() const { return storage_type_; }
const gfx::Size& coded_size() const { return coded_size_; }
@@ -400,6 +372,9 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
const VideoFrameMetadata* metadata() const { return &metadata_; }
VideoFrameMetadata* metadata() { return &metadata_; }
+ // The time span between the current frame and the first frame of the stream.
+ // This is the media timestamp, and not the reference time.
+ // See VideoFrameMetadata::REFERENCE_TIME for details.
base::TimeDelta timestamp() const { return timestamp_; }
void set_timestamp(base::TimeDelta timestamp) {
timestamp_ = timestamp;
@@ -415,7 +390,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
friend class base::RefCountedThreadSafe<VideoFrame>;
static scoped_refptr<VideoFrame> WrapExternalStorage(
- Format format,
+ VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -427,13 +402,13 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
size_t data_offset);
// Clients must use the static factory/wrapping methods to create a new frame.
- VideoFrame(Format format,
+ VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
base::TimeDelta timestamp);
- VideoFrame(Format format,
+ VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -441,7 +416,7 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp,
base::SharedMemoryHandle handle,
size_t shared_memory_offset);
- VideoFrame(Format format,
+ VideoFrame(VideoPixelFormat format,
StorageType storage_type,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
@@ -451,10 +426,18 @@ class MEDIA_EXPORT VideoFrame : public base::RefCountedThreadSafe<VideoFrame> {
base::TimeDelta timestamp);
virtual ~VideoFrame();
- void AllocateYUV();
+ static scoped_refptr<VideoFrame> CreateFrameInternal(
+ VideoPixelFormat format,
+ const gfx::Size& coded_size,
+ const gfx::Rect& visible_rect,
+ const gfx::Size& natural_size,
+ base::TimeDelta timestamp,
+ bool zero_initialize_memory);
+
+ void AllocateYUV(bool zero_initialize_memory);
// Frame format.
- const Format format_;
+ const VideoPixelFormat format_;
// Storage type for the different planes.
StorageType storage_type_; // TODO(mcasas): make const
diff --git a/chromium/media/base/video_frame_metadata.h b/chromium/media/base/video_frame_metadata.h
index cee4917b25e..48fe509719f 100644
--- a/chromium/media/base/video_frame_metadata.h
+++ b/chromium/media/base/video_frame_metadata.h
@@ -28,7 +28,7 @@ class MEDIA_EXPORT VideoFrameMetadata {
CAPTURE_END_TIME,
// Some VideoFrames have an indication of the color space used. Use
- // GetInteger()/SetInteger() and VideoFrame::ColorSpace enumeration.
+ // GetInteger()/SetInteger() and ColorSpace enumeration.
COLOR_SPACE,
// Indicates if the current frame is the End of its current Stream. Use
@@ -50,6 +50,15 @@ class MEDIA_EXPORT VideoFrameMetadata {
// key.
FRAME_RATE,
+ // This field represents the local time at which either: 1) the frame was
+ // generated, if it was done so locally; or 2) the targeted play-out time
+ // of the frame, if it was generated from a remote source. This value is NOT
+ // a high-resolution timestamp, and so it should not be used as a
+ // presentation time; but, instead, it should be used for buffering playback
+ // and for A/V synchronization purposes.
+ // Use Get/SetTimeTicks() for this key.
+ REFERENCE_TIME,
+
// A feedback signal that indicates the fraction of the tolerable maximum
// amount of resources that were utilized to process this frame. A producer
// can check this value after-the-fact, usually via a VideoFrame destruction
diff --git a/chromium/media/base/video_frame_pool.cc b/chromium/media/base/video_frame_pool.cc
index ac021348c14..dc9cdf370e2 100644
--- a/chromium/media/base/video_frame_pool.cc
+++ b/chromium/media/base/video_frame_pool.cc
@@ -17,10 +17,8 @@ class VideoFramePool::PoolImpl
public:
PoolImpl();
- // Returns a frame from the pool that matches the specified
- // parameters or creates a new frame if no suitable frame exists in
- // the pool. The pool is drained if no matching frame is found.
- scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ // See VideoFramePool::CreateFrame() for usage.
+ scoped_refptr<VideoFrame> CreateFrame(VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -56,7 +54,7 @@ VideoFramePool::PoolImpl::~PoolImpl() {
}
scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
- VideoFrame::Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
@@ -65,7 +63,6 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
DCHECK(!is_shutdown_);
scoped_refptr<VideoFrame> frame;
-
while (!frame.get() && !frames_.empty()) {
scoped_refptr<VideoFrame> pool_frame = frames_.front();
frames_.pop_front();
@@ -81,7 +78,7 @@ scoped_refptr<VideoFrame> VideoFramePool::PoolImpl::CreateFrame(
}
if (!frame.get()) {
- frame = VideoFrame::CreateFrame(
+ frame = VideoFrame::CreateZeroInitializedFrame(
format, coded_size, visible_rect, natural_size, timestamp);
}
@@ -115,7 +112,7 @@ VideoFramePool::~VideoFramePool() {
}
scoped_refptr<VideoFrame> VideoFramePool::CreateFrame(
- VideoFrame::Format format,
+ VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
diff --git a/chromium/media/base/video_frame_pool.h b/chromium/media/base/video_frame_pool.h
index 76b309196a4..d5c1690e9b5 100644
--- a/chromium/media/base/video_frame_pool.h
+++ b/chromium/media/base/video_frame_pool.h
@@ -25,8 +25,10 @@ class MEDIA_EXPORT VideoFramePool {
// Returns a frame from the pool that matches the specified
// parameters or creates a new frame if no suitable frame exists in
- // the pool.
- scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ // the pool. The pool is drained if no matching frame is found.
+ // The buffer for the new frame will be zero initialized. Reused frames will
+ // not be zero initialized.
+ scoped_refptr<VideoFrame> CreateFrame(VideoPixelFormat format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size,
diff --git a/chromium/media/base/video_frame_pool_unittest.cc b/chromium/media/base/video_frame_pool_unittest.cc
index 7f3694b9f6d..2d0c1202a2c 100644
--- a/chromium/media/base/video_frame_pool_unittest.cc
+++ b/chromium/media/base/video_frame_pool_unittest.cc
@@ -11,7 +11,7 @@ class VideoFramePoolTest : public ::testing::Test {
public:
VideoFramePoolTest() : pool_(new VideoFramePool()) {}
- scoped_refptr<VideoFrame> CreateFrame(VideoFrame::Format format,
+ scoped_refptr<VideoFrame> CreateFrame(VideoPixelFormat format,
int timestamp_ms) {
gfx::Size coded_size(320,240);
gfx::Rect visible_rect(coded_size);
@@ -39,21 +39,29 @@ class VideoFramePoolTest : public ::testing::Test {
scoped_ptr<VideoFramePool> pool_;
};
+TEST_F(VideoFramePoolTest, FrameInitializedAndZeroed) {
+ scoped_refptr<VideoFrame> frame = CreateFrame(PIXEL_FORMAT_YV12, 10);
+
+ // Verify that frame is initialized with zeros.
+ for (size_t i = 0; i < VideoFrame::NumPlanes(frame->format()); ++i)
+ EXPECT_EQ(0, frame->data(i)[0]);
+}
+
TEST_F(VideoFramePoolTest, SimpleFrameReuse) {
- scoped_refptr<VideoFrame> frame = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> frame = CreateFrame(PIXEL_FORMAT_YV12, 10);
const uint8* old_y_data = frame->data(VideoFrame::kYPlane);
// Clear frame reference to return the frame to the pool.
frame = NULL;
// Verify that the next frame from the pool uses the same memory.
- scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12, 20);
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(PIXEL_FORMAT_YV12, 20);
EXPECT_EQ(old_y_data, new_frame->data(VideoFrame::kYPlane));
}
TEST_F(VideoFramePoolTest, SimpleFormatChange) {
- scoped_refptr<VideoFrame> frame_a = CreateFrame(VideoFrame::YV12, 10);
- scoped_refptr<VideoFrame> frame_b = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> frame_a = CreateFrame(PIXEL_FORMAT_YV12, 10);
+ scoped_refptr<VideoFrame> frame_b = CreateFrame(PIXEL_FORMAT_YV12, 10);
// Clear frame references to return the frames to the pool.
frame_a = NULL;
@@ -64,12 +72,12 @@ TEST_F(VideoFramePoolTest, SimpleFormatChange) {
// Verify that requesting a frame with a different format causes the pool
// to get drained.
- scoped_refptr<VideoFrame> new_frame = CreateFrame(VideoFrame::YV12A, 10);
+ scoped_refptr<VideoFrame> new_frame = CreateFrame(PIXEL_FORMAT_YV12A, 10);
CheckPoolSize(0u);
}
TEST_F(VideoFramePoolTest, FrameValidAfterPoolDestruction) {
- scoped_refptr<VideoFrame> frame = CreateFrame(VideoFrame::YV12, 10);
+ scoped_refptr<VideoFrame> frame = CreateFrame(PIXEL_FORMAT_YV12, 10);
// Destroy the pool.
pool_.reset();
diff --git a/chromium/media/base/video_frame_unittest.cc b/chromium/media/base/video_frame_unittest.cc
index 24b8a2b4b5d..879be7dddc3 100644
--- a/chromium/media/base/video_frame_unittest.cc
+++ b/chromium/media/base/video_frame_unittest.cc
@@ -11,7 +11,6 @@
#include "base/memory/scoped_ptr.h"
#include "base/strings/stringprintf.h"
#include "gpu/command_buffer/common/mailbox_holder.h"
-#include "media/base/buffers.h"
#include "media/base/yuv_convert.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,7 +22,7 @@ using base::MD5DigestToBase16;
// lines based on the |white_to_black| parameter. If 0, then the entire
// frame will be black, if 1 then the entire frame will be white.
void InitializeYV12Frame(VideoFrame* frame, double white_to_black) {
- EXPECT_EQ(VideoFrame::YV12, frame->format());
+ EXPECT_EQ(PIXEL_FORMAT_YV12, frame->format());
const int first_black_row =
static_cast<int>(frame->coded_size().height() * white_to_black);
uint8* y_plane = frame->data(VideoFrame::kYPlane);
@@ -45,7 +44,7 @@ void InitializeYV12Frame(VideoFrame* frame, double white_to_black) {
// Given a |yv12_frame| this method converts the YV12 frame to RGBA and
// makes sure that all the pixels of the RBG frame equal |expect_rgb_color|.
void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
- ASSERT_EQ(VideoFrame::YV12, yv12_frame->format());
+ ASSERT_EQ(PIXEL_FORMAT_YV12, yv12_frame->format());
ASSERT_EQ(yv12_frame->stride(VideoFrame::kUPlane),
yv12_frame->stride(VideoFrame::kVPlane));
ASSERT_EQ(
@@ -87,7 +86,7 @@ void ExpectFrameColor(media::VideoFrame* yv12_frame, uint32 expect_rgb_color) {
// Fill each plane to its reported extents and verify accessors report non
// zero values. Additionally, for the first plane verify the rows and
// row_bytes values are correct.
-void ExpectFrameExtents(VideoFrame::Format format, const char* expected_hash) {
+void ExpectFrameExtents(VideoPixelFormat format, const char* expected_hash) {
const unsigned char kFillByte = 0x80;
const int kWidth = 61;
const int kHeight = 31;
@@ -125,13 +124,12 @@ TEST(VideoFrame, CreateFrame) {
// Create a YV12 Video Frame.
gfx::Size size(kWidth, kHeight);
- scoped_refptr<media::VideoFrame> frame =
- VideoFrame::CreateFrame(media::VideoFrame::YV12, size, gfx::Rect(size),
- size, kTimestamp);
+ scoped_refptr<media::VideoFrame> frame = VideoFrame::CreateFrame(
+ media::PIXEL_FORMAT_YV12, size, gfx::Rect(size), size, kTimestamp);
ASSERT_TRUE(frame.get());
// Test VideoFrame implementation.
- EXPECT_EQ(media::VideoFrame::YV12, frame->format());
+ EXPECT_EQ(media::PIXEL_FORMAT_YV12, frame->format());
{
SCOPED_TRACE("");
InitializeYV12Frame(frame.get(), 0.0f);
@@ -159,6 +157,26 @@ TEST(VideoFrame, CreateFrame) {
frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
}
+TEST(VideoFrame, CreateZeroInitializedFrame) {
+ const int kWidth = 2;
+ const int kHeight = 2;
+ const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+
+ // Create a YV12 Video Frame.
+ gfx::Size size(kWidth, kHeight);
+ scoped_refptr<media::VideoFrame> frame =
+ VideoFrame::CreateZeroInitializedFrame(media::PIXEL_FORMAT_YV12, size,
+ gfx::Rect(size), size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+ EXPECT_TRUE(frame->IsMappable());
+
+ // Verify that frame is initialized with zeros.
+ // TODO(emircan): Check all the contents when we know the exact size of the
+ // allocated buffer.
+ for (size_t i = 0; i < VideoFrame::NumPlanes(frame->format()); ++i)
+ EXPECT_EQ(0, frame->data(i)[0]);
+}
+
TEST(VideoFrame, CreateBlackFrame) {
const int kWidth = 2;
const int kHeight = 2;
@@ -176,7 +194,7 @@ TEST(VideoFrame, CreateBlackFrame) {
frame->metadata()->IsTrue(VideoFrameMetadata::END_OF_STREAM));
// Test |frame| properties.
- EXPECT_EQ(VideoFrame::YV12, frame->format());
+ EXPECT_EQ(PIXEL_FORMAT_YV12, frame->format());
EXPECT_EQ(kWidth, frame->coded_size().width());
EXPECT_EQ(kHeight, frame->coded_size().height());
@@ -237,10 +255,10 @@ TEST(VideoFrame, WrapVideoFrame) {
// Ensure each frame is properly sized and allocated. Will trigger OOB reads
// and writes as well as incorrect frame hashes otherwise.
TEST(VideoFrame, CheckFrameExtents) {
- // Each call consists of a VideoFrame::Format and the expected hash of all
+ // Each call consists of a Format and the expected hash of all
// planes if filled with kFillByte (defined in ExpectFrameExtents).
- ExpectFrameExtents(VideoFrame::YV12, "8e5d54cb23cd0edca111dd35ffb6ff05");
- ExpectFrameExtents(VideoFrame::YV16, "cce408a044b212db42a10dfec304b3ef");
+ ExpectFrameExtents(PIXEL_FORMAT_YV12, "8e5d54cb23cd0edca111dd35ffb6ff05");
+ ExpectFrameExtents(PIXEL_FORMAT_YV16, "cce408a044b212db42a10dfec304b3ef");
}
static void TextureCallback(uint32* called_sync_point,
@@ -255,14 +273,14 @@ TEST(VideoFrame, TextureNoLongerNeededCallbackIsCalled) {
{
scoped_refptr<VideoFrame> frame = VideoFrame::WrapNativeTexture(
- VideoFrame::ARGB,
+ PIXEL_FORMAT_ARGB,
gpu::MailboxHolder(gpu::Mailbox::Generate(), 5, 0 /* sync_point */),
base::Bind(&TextureCallback, &called_sync_point),
- gfx::Size(10, 10), // coded_size
- gfx::Rect(10, 10), // visible_rect
- gfx::Size(10, 10), // natural_size
- base::TimeDelta()); // timestamp
- EXPECT_EQ(VideoFrame::ARGB, frame->format());
+ gfx::Size(10, 10), // coded_size
+ gfx::Rect(10, 10), // visible_rect
+ gfx::Size(10, 10), // natural_size
+ base::TimeDelta()); // timestamp
+ EXPECT_EQ(PIXEL_FORMAT_ARGB, frame->format());
EXPECT_EQ(VideoFrame::STORAGE_OPAQUE, frame->storage_type());
EXPECT_TRUE(frame->HasTextures());
}
@@ -313,7 +331,7 @@ TEST(VideoFrame,
base::TimeDelta()); // timestamp
EXPECT_EQ(VideoFrame::STORAGE_OPAQUE, frame->storage_type());
- EXPECT_EQ(VideoFrame::I420, frame->format());
+ EXPECT_EQ(PIXEL_FORMAT_I420, frame->format());
EXPECT_EQ(3u, VideoFrame::NumPlanes(frame->format()));
EXPECT_TRUE(frame->HasTextures());
for (size_t i = 0; i < VideoFrame::NumPlanes(frame->format()); ++i) {
@@ -331,17 +349,48 @@ TEST(VideoFrame,
EXPECT_EQ(release_sync_point, called_sync_point);
}
-TEST(VideoFrame, ZeroInitialized) {
- const int kWidth = 64;
- const int kHeight = 48;
- const base::TimeDelta kTimestamp = base::TimeDelta::FromMicroseconds(1337);
+TEST(VideoFrame, IsValidConfig_OddCodedSize) {
+ // Odd sizes are valid for all formats. Odd formats may be internally rounded
+ // in VideoFrame::CreateFrame because VideoFrame owns the allocation and can
+ // pad the requested coded_size to ensure the UV sample boundaries line up
+ // with the Y plane after subsample scaling. See CreateFrame_OddWidth.
+ gfx::Size odd_size(677, 288);
+
+ // First choosing a format with sub-sampling for UV.
+ EXPECT_TRUE(VideoFrame::IsValidConfig(
+ PIXEL_FORMAT_I420, VideoFrame::STORAGE_OWNED_MEMORY, odd_size,
+ gfx::Rect(odd_size), odd_size));
+
+ // Next try a format with no sub-sampling for UV.
+ EXPECT_TRUE(VideoFrame::IsValidConfig(
+ PIXEL_FORMAT_YV24, VideoFrame::STORAGE_OWNED_MEMORY, odd_size,
+ gfx::Rect(odd_size), odd_size));
+}
- gfx::Size size(kWidth, kHeight);
- scoped_refptr<media::VideoFrame> frame = VideoFrame::CreateFrame(
- media::VideoFrame::YV12, size, gfx::Rect(size), size, kTimestamp);
+TEST(VideoFrame, CreateFrame_OddWidth) {
+ // Odd sizes are non-standard for YUV formats that subsample the UV, but they
+ // do exist in the wild and should be gracefully handled by VideoFrame in
+ // situations where VideoFrame allocates the YUV memory. See discussion in
+ // crrev.com/1240833003
+ const gfx::Size odd_size(677, 288);
+ const base::TimeDelta kTimestamp = base::TimeDelta();
- for (size_t i = 0; i < VideoFrame::NumPlanes(frame->format()); ++i)
- EXPECT_EQ(0, frame->data(i)[0]);
+ // First create a frame that sub-samples UV.
+ scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_I420, odd_size, gfx::Rect(odd_size), odd_size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+ // I420 aligns UV to every 2 Y pixels. Hence, 677 should be rounded to 678
+ // which is the nearest value such that width % 2 == 0
+ EXPECT_EQ(678, frame->coded_size().width());
+
+ // Next create a frame that does not sub-sample UV.
+ frame = VideoFrame::CreateFrame(PIXEL_FORMAT_YV24, odd_size,
+ gfx::Rect(odd_size), odd_size, kTimestamp);
+ ASSERT_TRUE(frame.get());
+ // No sub-sampling for YV24 will mean odd width can remain odd since any pixel
+ // in the Y plane has a a corresponding pixel in the UV planes at the same
+ // index.
+ EXPECT_EQ(677, frame->coded_size().width());
}
TEST(VideoFrameMetadata, SetAndThenGetAllKeysForAllTypes) {
diff --git a/chromium/media/base/video_types.cc b/chromium/media/base/video_types.cc
new file mode 100644
index 00000000000..b6b041ee471
--- /dev/null
+++ b/chromium/media/base/video_types.cc
@@ -0,0 +1,75 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/video_types.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+std::string VideoPixelFormatToString(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_UNKNOWN:
+ return "PIXEL_FORMAT_UNKNOWN";
+ case PIXEL_FORMAT_I420:
+ return "PIXEL_FORMAT_I420";
+ case PIXEL_FORMAT_YV12:
+ return "PIXEL_FORMAT_YV12";
+ case PIXEL_FORMAT_YV16:
+ return "PIXEL_FORMAT_YV16";
+ case PIXEL_FORMAT_YV12A:
+ return "PIXEL_FORMAT_YV12A";
+ case PIXEL_FORMAT_YV24:
+ return "PIXEL_FORMAT_YV24";
+ case PIXEL_FORMAT_NV12:
+ return "PIXEL_FORMAT_NV12";
+ case PIXEL_FORMAT_NV21:
+ return "PIXEL_FORMAT_NV21";
+ case PIXEL_FORMAT_UYVY:
+ return "PIXEL_FORMAT_UYVY";
+ case PIXEL_FORMAT_YUY2:
+ return "PIXEL_FORMAT_YUY2";
+ case PIXEL_FORMAT_ARGB:
+ return "PIXEL_FORMAT_ARGB";
+ case PIXEL_FORMAT_XRGB:
+ return "PIXEL_FORMAT_XRGB";
+ case PIXEL_FORMAT_RGB24:
+ return "PIXEL_FORMAT_RGB24";
+ case PIXEL_FORMAT_RGB32:
+ return "PIXEL_FORMAT_RGB32";
+ case PIXEL_FORMAT_MJPEG:
+ return "PIXEL_FORMAT_MJPEG";
+ case PIXEL_FORMAT_MT21:
+ return "PIXEL_FORMAT_MT21";
+ }
+ NOTREACHED() << "Invalid VideoPixelFormat provided: " << format;
+ return "";
+}
+
+bool IsYuvPlanar(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV16:
+ case PIXEL_FORMAT_YV12A:
+ case PIXEL_FORMAT_YV24:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_MT21:
+ return true;
+
+ case PIXEL_FORMAT_UNKNOWN:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
+ return false;
+ }
+ return false;
+}
+
+} // namespace media
diff --git a/chromium/media/base/video_types.h b/chromium/media/base/video_types.h
new file mode 100644
index 00000000000..b40dfb47a0a
--- /dev/null
+++ b/chromium/media/base/video_types.h
@@ -0,0 +1,74 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_VIDEO_TYPES_H_
+#define MEDIA_BASE_VIDEO_TYPES_H_
+
+#include <string>
+
+#include "build/build_config.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+// Pixel formats roughly based on FOURCC labels, see:
+// http://www.fourcc.org/rgb.php and http://www.fourcc.org/yuv.php
+// Logged to UMA, so never reuse values. Leave gaps if necessary.
+// Ordered as planar, semi-planar, YUV-packed, and RGB formats.
+enum VideoPixelFormat {
+ PIXEL_FORMAT_UNKNOWN = 0, // Unknown or unspecified format value.
+ PIXEL_FORMAT_I420 =
+ 1, // 12bpp YUV planar 1x1 Y, 2x2 UV samples, a.k.a. YU12.
+ PIXEL_FORMAT_YV12 = 2, // 12bpp YVU planar 1x1 Y, 2x2 VU samples.
+ PIXEL_FORMAT_YV16 = 3, // 16bpp YVU planar 1x1 Y, 2x1 VU samples.
+ PIXEL_FORMAT_YV12A = 4, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
+ PIXEL_FORMAT_YV24 = 5, // 24bpp YUV planar, no subsampling.
+ PIXEL_FORMAT_NV12 =
+ 6, // 12bpp with Y plane followed by a 2x2 interleaved UV plane.
+ PIXEL_FORMAT_NV21 =
+ 7, // 12bpp with Y plane followed by a 2x2 interleaved VU plane.
+ PIXEL_FORMAT_UYVY =
+ 8, // 16bpp interleaved 2x1 U, 1x1 Y, 2x1 V, 1x1 Y samples.
+ PIXEL_FORMAT_YUY2 =
+ 9, // 16bpp interleaved 1x1 Y, 2x1 U, 1x1 Y, 2x1 V samples.
+ PIXEL_FORMAT_ARGB = 10, // 32bpp ARGB, 1 plane.
+ PIXEL_FORMAT_XRGB = 11, // 24bpp XRGB, 1 plane.
+ PIXEL_FORMAT_RGB24 = 12, // 24bpp BGR, 1 plane.
+ PIXEL_FORMAT_RGB32 = 13, // 32bpp BGRA, 1 plane.
+ PIXEL_FORMAT_MJPEG = 14, // MJPEG compressed.
+ // MediaTek proprietary format. MT21 is similar to NV21 except the memory
+ // layout and pixel layout (swizzles). 12bpp with Y plane followed by a 2x2
+ // interleaved VU plane. Each image contains two buffers -- Y plane and VU
+ // plane. Two planes can be non-contiguous in memory. The starting addresses
+ // of Y plane and VU plane are 4KB alignment.
+ // Suppose image dimension is (width, height). For both Y plane and VU plane:
+ // Row pitch = ((width+15)/16) * 16.
+ // Plane size = Row pitch * (((height+31)/32)*32)
+ PIXEL_FORMAT_MT21 = 15,
+ // Please update UMA histogram enumeration when adding new formats here.
+ PIXEL_FORMAT_MAX =
+ PIXEL_FORMAT_MT21, // Must always be equal to largest entry logged.
+};
+
+// Color space or color range used for the pixels.
+// Logged to UMA, so never reuse values. Leave gaps if necessary.
+enum ColorSpace {
+ COLOR_SPACE_UNSPECIFIED = 0, // In general this is Rec601.
+ // The JPEG color space is the combination of Rec.601 and full range colors
+ // (aka pc range colors).
+ COLOR_SPACE_JPEG = 1,
+ COLOR_SPACE_HD_REC709 = 2, // Rec709 "HD" color space.
+ COLOR_SPACE_SD_REC601 = 3, // Rec601 "SD" color space.
+ COLOR_SPACE_MAX = COLOR_SPACE_SD_REC601,
+};
+
+// Returns the name of a Format as a string.
+MEDIA_EXPORT std::string VideoPixelFormatToString(VideoPixelFormat format);
+
+// Returns true if |format| is a YUV format with multiple planes.
+MEDIA_EXPORT bool IsYuvPlanar(VideoPixelFormat format);
+
+} // namespace media
+
+#endif // MEDIA_BASE_VIDEO_TYPES_H_
diff --git a/chromium/media/base/video_util.cc b/chromium/media/base/video_util.cc
index 63635d79bc0..cddbb9c3df2 100644
--- a/chromium/media/base/video_util.cc
+++ b/chromium/media/base/video_util.cc
@@ -25,12 +25,8 @@ gfx::Size GetNaturalSize(const gfx::Size& visible_size,
double aspect_ratio = aspect_ratio_numerator /
static_cast<double>(aspect_ratio_denominator);
- int width = floor(visible_size.width() * aspect_ratio + 0.5);
- int height = visible_size.height();
-
- // An even width makes things easier for YV12 and appears to be the behavior
- // expected by WebKit layout tests.
- return gfx::Size(width & ~1, height);
+ return gfx::Size(round(visible_size.width() * aspect_ratio),
+ visible_size.height());
}
void CopyPlane(size_t plane, const uint8* source, int stride, int rows,
@@ -159,8 +155,8 @@ void LetterboxYUV(VideoFrame* frame, const gfx::Rect& view_area) {
DCHECK(!(view_area.y() & 1));
DCHECK(!(view_area.width() & 1));
DCHECK(!(view_area.height() & 1));
- DCHECK(frame->format() == VideoFrame::YV12 ||
- frame->format() == VideoFrame::I420);
+ DCHECK(frame->format() == PIXEL_FORMAT_YV12 ||
+ frame->format() == PIXEL_FORMAT_I420);
LetterboxPlane(frame, VideoFrame::kYPlane, view_area, 0x00);
gfx::Rect half_view_area(view_area.x() / 2,
view_area.y() / 2,
diff --git a/chromium/media/base/video_util_unittest.cc b/chromium/media/base/video_util_unittest.cc
index ff01110f00a..a62f0cbaf0b 100644
--- a/chromium/media/base/video_util_unittest.cc
+++ b/chromium/media/base/video_util_unittest.cc
@@ -38,9 +38,8 @@ class VideoUtilTest : public testing::Test {
void CreateDestinationFrame(int width, int height) {
gfx::Size size(width, height);
- destination_frame_ =
- VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), size,
- base::TimeDelta());
+ destination_frame_ = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_YV12, size, gfx::Rect(size), size, base::TimeDelta());
}
void CopyPlanes() {
@@ -66,6 +65,35 @@ class VideoUtilTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(VideoUtilTest);
};
+TEST_F(VideoUtilTest, GetNaturalSize) {
+ gfx::Size visible_size(320, 240);
+
+ // Test 0 sizes.
+ EXPECT_EQ(gfx::Size(0, 0), GetNaturalSize(gfx::Size(0, 0), 1, 1));
+ EXPECT_EQ(gfx::Size(0, 1), GetNaturalSize(gfx::Size(0, 1), 1, 1));
+ EXPECT_EQ(gfx::Size(1, 0), GetNaturalSize(gfx::Size(1, 0), 1, 1));
+
+ // Test abnormal ratios.
+ EXPECT_EQ(gfx::Size(0, 0), GetNaturalSize(visible_size, 0, 0));
+ EXPECT_EQ(gfx::Size(0, 0), GetNaturalSize(visible_size, 1, 0));
+ EXPECT_EQ(gfx::Size(0, 0), GetNaturalSize(visible_size, 1, -1));
+ EXPECT_EQ(gfx::Size(0, 0), GetNaturalSize(visible_size, -1, 1));
+
+ // Test normal sizes and ratios.
+ EXPECT_EQ(gfx::Size(0, 240), GetNaturalSize(visible_size, 0, 1));
+ EXPECT_EQ(gfx::Size(320, 240), GetNaturalSize(visible_size, 1, 1));
+ EXPECT_EQ(gfx::Size(640, 240), GetNaturalSize(visible_size, 2, 1));
+ EXPECT_EQ(gfx::Size(160, 240), GetNaturalSize(visible_size, 1, 2));
+ EXPECT_EQ(gfx::Size(427, 240), GetNaturalSize(visible_size, 4, 3));
+ EXPECT_EQ(gfx::Size(240, 240), GetNaturalSize(visible_size, 3, 4));
+ EXPECT_EQ(gfx::Size(569, 240), GetNaturalSize(visible_size, 16, 9));
+ EXPECT_EQ(gfx::Size(180, 240), GetNaturalSize(visible_size, 9, 16));
+
+ // Test some random ratios.
+ EXPECT_EQ(gfx::Size(495, 240), GetNaturalSize(visible_size, 17, 11));
+ EXPECT_EQ(gfx::Size(207, 240), GetNaturalSize(visible_size, 11, 17));
+}
+
TEST_F(VideoUtilTest, CopyPlane_Exact) {
CreateSourceFrame(16, 16, 16, 8, 8);
CreateDestinationFrame(16, 16);
@@ -396,9 +424,8 @@ TEST_F(VideoUtilTest, LetterboxYUV) {
int width = 40;
int height = 30;
gfx::Size size(width, height);
- scoped_refptr<VideoFrame> frame(
- VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size), size,
- base::TimeDelta()));
+ scoped_refptr<VideoFrame> frame(VideoFrame::CreateFrame(
+ PIXEL_FORMAT_YV12, size, gfx::Rect(size), size, base::TimeDelta()));
for (int left_margin = 0; left_margin <= 10; left_margin += 10) {
for (int right_margin = 0; right_margin <= 10; right_margin += 10) {
diff --git a/chromium/media/base/yuv_convert_perftest.cc b/chromium/media/base/yuv_convert_perftest.cc
index 7d3d64c956c..d676e4e2a9a 100644
--- a/chromium/media/base/yuv_convert_perftest.cc
+++ b/chromium/media/base/yuv_convert_perftest.cc
@@ -80,11 +80,11 @@ TEST_F(YUVConvertPerfTest, ConvertYUVToRGB32Row_SSE) {
GetLookupTable(YV12));
}
}
+ media::EmptyRegisterState();
double total_time_seconds = (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"yuv_convert_perftest", "", "ConvertYUVToRGB32Row_SSE",
kPerfTestIterations / total_time_seconds, "runs/s", true);
- media::EmptyRegisterState();
}
// 64-bit release + component builds on Windows are too smart and optimizes
@@ -109,11 +109,11 @@ TEST_F(YUVConvertPerfTest, ScaleYUVToRGB32Row_SSE) {
GetLookupTable(YV12));
}
}
+ media::EmptyRegisterState();
double total_time_seconds = (base::TimeTicks::Now() - start).InSecondsF();
perf_test::PrintResult(
"yuv_convert_perftest", "", "ScaleYUVToRGB32Row_SSE",
kPerfTestIterations / total_time_seconds, "runs/s", true);
- media::EmptyRegisterState();
}
TEST_F(YUVConvertPerfTest, LinearScaleYUVToRGB32Row_SSE) {
diff --git a/chromium/media/base/yuv_convert_unittest.cc b/chromium/media/base/yuv_convert_unittest.cc
index 9ab8a9cab31..fff56b98ff4 100644
--- a/chromium/media/base/yuv_convert_unittest.cc
+++ b/chromium/media/base/yuv_convert_unittest.cc
@@ -73,7 +73,8 @@ static void ReadYV16Data(scoped_ptr<uint8[]>* data) {
ReadData(FILE_PATH_LITERAL("bali_640x360_P422.yuv"), kYUV16Size, data);
}
-#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY) && \
+ !defined(OS_ANDROID)
static void ReadYV12AData(scoped_ptr<uint8[]>* data) {
ReadData(FILE_PATH_LITERAL("bali_640x360_P420_alpha.yuv"), kYUVA12Size, data);
}
@@ -529,6 +530,7 @@ TEST(YUVConvertTest, DownScaleYUVToRGB32WithRect) {
}
#if !defined(ARCH_CPU_ARM_FAMILY) && !defined(ARCH_CPU_MIPS_FAMILY)
+#if !defined(OS_ANDROID)
TEST(YUVConvertTest, YUVAtoARGB_MMX_MatchReference) {
// Allocate all surfaces.
scoped_ptr<uint8[]> yuv_bytes;
@@ -570,6 +572,7 @@ TEST(YUVConvertTest, YUVAtoARGB_MMX_MatchReference) {
rgb_converted_bytes_ref.get(),
kRGBSizeConverted));
}
+#endif // !defined(OS_ANDROID)
TEST(YUVConvertTest, RGB32ToYUV_SSE2_MatchReference) {
base::CPU cpu;
diff --git a/chromium/media/blink/BUILD.gn b/chromium/media/blink/BUILD.gn
index 1005bd1aa95..a805c42fca8 100644
--- a/chromium/media/blink/BUILD.gn
+++ b/chromium/media/blink/BUILD.gn
@@ -85,48 +85,56 @@ component("blink") {
}
}
-if (!is_mac) {
- # TODO(GYP): Make linking this work on the mac.
- test("media_blink_unittests") {
- deps = [
- ":blink",
- "//base",
- "//base/test:test_support",
- "//cc",
- "//cc/blink",
- "//gin",
- "//media",
- "//media:shared_memory_support",
- "//media/base:test_support",
- "//net",
- "//testing/gmock",
- "//testing/gtest",
- "//third_party/WebKit/public:blink",
- "//ui/gfx/geometry",
- "//ui/gfx:test_support",
- "//url",
- ]
+test("media_blink_unittests") {
+ deps = [
+ ":blink",
+ "//base",
+ "//base/test:test_support",
+ "//cc",
+ "//cc/blink",
+ "//components/scheduler:scheduler",
+ "//components/scheduler:test_support",
+ "//gin",
+ "//media",
+ "//media:shared_memory_support",
+ "//media/base:test_support",
+ "//net",
+ "//testing/gmock",
+ "//testing/gtest",
+ "//third_party/WebKit/public:blink",
+ "//ui/gfx/geometry",
+ "//ui/gfx:test_support",
+ "//url",
+ ]
- configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+ configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
- sources = [
- "buffered_data_source_host_impl_unittest.cc",
- "buffered_data_source_unittest.cc",
- "buffered_resource_loader_unittest.cc",
- "cache_util_unittest.cc",
- "key_system_config_selector_unittest.cc",
- "mock_webframeclient.h",
- "mock_weburlloader.cc",
- "mock_weburlloader.h",
- "run_all_unittests.cc",
- "test_response_generator.cc",
- "test_response_generator.h",
- "video_frame_compositor_unittest.cc",
- "webaudiosourceprovider_impl_unittest.cc",
- ]
+ sources = [
+ "buffered_data_source_host_impl_unittest.cc",
+ "buffered_data_source_unittest.cc",
+ "buffered_resource_loader_unittest.cc",
+ "cache_util_unittest.cc",
+ "key_system_config_selector_unittest.cc",
+ "mock_webframeclient.h",
+ "mock_weburlloader.cc",
+ "mock_weburlloader.h",
+ "run_all_unittests.cc",
+ "test_response_generator.cc",
+ "test_response_generator.h",
+ "video_frame_compositor_unittest.cc",
+ "webaudiosourceprovider_impl_unittest.cc",
+ ]
- if (is_android) {
- deps += [ "//ui/gl" ]
- }
+ if (is_android) {
+ deps += [ "//ui/gl" ]
}
}
+
+# TODO(GYP): Delete this after we've converted everything to GN.
+# The _run targets exist only for compatibility w/ GYP.
+group("media_blink_unittests_run") {
+ testonly = true
+ deps = [
+ ":media_blink_unittests",
+ ]
+}
diff --git a/chromium/media/blink/DEPS b/chromium/media/blink/DEPS
index 9ab9dd462ab..374de6ec35e 100644
--- a/chromium/media/blink/DEPS
+++ b/chromium/media/blink/DEPS
@@ -2,6 +2,7 @@ include_rules = [
"+cc/blink/web_layer_impl.h",
"+cc/layers/video_frame_provider.h",
"+cc/layers/video_layer.h",
+ "+components/scheduler", # Only allowed in tests.
"+gin",
"+gpu/blink",
"+media",
diff --git a/chromium/media/blink/buffered_data_source.cc b/chromium/media/blink/buffered_data_source.cc
index 614fa39b7de..eb4c984995a 100644
--- a/chromium/media/blink/buffered_data_source.cc
+++ b/chromium/media/blink/buffered_data_source.cc
@@ -397,7 +397,8 @@ void BufferedDataSource::StartCallback(
loader_->range_supported());
}
- base::ResetAndReturn(&init_cb_).Run(success);
+ render_task_runner_->PostTask(
+ FROM_HERE, base::Bind(base::ResetAndReturn(&init_cb_), success));
}
void BufferedDataSource::PartialReadStartCallback(
diff --git a/chromium/media/blink/buffered_data_source_host_impl.cc b/chromium/media/blink/buffered_data_source_host_impl.cc
index 42f9822dab7..542b8567dbc 100644
--- a/chromium/media/blink/buffered_data_source_host_impl.cc
+++ b/chromium/media/blink/buffered_data_source_host_impl.cc
@@ -4,6 +4,8 @@
#include "media/blink/buffered_data_source_host_impl.h"
+#include "media/base/timestamp_constants.h"
+
namespace media {
BufferedDataSourceHostImpl::BufferedDataSourceHostImpl()
diff --git a/chromium/media/blink/buffered_resource_loader.h b/chromium/media/blink/buffered_resource_loader.h
index 6e466a37b0c..e14100b0cfa 100644
--- a/chromium/media/blink/buffered_resource_loader.h
+++ b/chromium/media/blink/buffered_resource_loader.h
@@ -87,7 +87,7 @@ class MEDIA_EXPORT BufferedResourceLoader
int bitrate,
double playback_rate,
MediaLog* media_log);
- virtual ~BufferedResourceLoader();
+ ~BufferedResourceLoader() override;
// Start the resource loading with the specified URL and range.
//
@@ -132,36 +132,36 @@ class MEDIA_EXPORT BufferedResourceLoader
bool range_supported();
// blink::WebURLLoaderClient implementation.
- virtual void willSendRequest(
+ void willSendRequest(
blink::WebURLLoader* loader,
blink::WebURLRequest& newRequest,
- const blink::WebURLResponse& redirectResponse);
- virtual void didSendData(
+ const blink::WebURLResponse& redirectResponse) override;
+ void didSendData(
blink::WebURLLoader* loader,
unsigned long long bytesSent,
- unsigned long long totalBytesToBeSent);
- virtual void didReceiveResponse(
+ unsigned long long totalBytesToBeSent) override;
+ void didReceiveResponse(
blink::WebURLLoader* loader,
- const blink::WebURLResponse& response);
- virtual void didDownloadData(
+ const blink::WebURLResponse& response) override;
+ void didDownloadData(
blink::WebURLLoader* loader,
int data_length,
- int encoded_data_length);
- virtual void didReceiveData(
+ int encoded_data_length) override;
+ void didReceiveData(
blink::WebURLLoader* loader,
const char* data,
int data_length,
- int encoded_data_length);
- virtual void didReceiveCachedMetadata(
+ int encoded_data_length) override;
+ void didReceiveCachedMetadata(
blink::WebURLLoader* loader,
- const char* data, int dataLength);
- virtual void didFinishLoading(
+ const char* data, int dataLength) override;
+ void didFinishLoading(
blink::WebURLLoader* loader,
double finishTime,
- int64_t total_encoded_data_length);
- virtual void didFail(
+ int64_t total_encoded_data_length) override;
+ void didFail(
blink::WebURLLoader* loader,
- const blink::WebURLError&);
+ const blink::WebURLError&) override;
// Returns true if the media resource has a single origin, false otherwise.
// Only valid to call after Start() has completed.
diff --git a/chromium/media/blink/cache_util.cc b/chromium/media/blink/cache_util.cc
index 65bed438fe9..4d1c9c39d43 100644
--- a/chromium/media/blink/cache_util.cc
+++ b/chromium/media/blink/cache_util.cc
@@ -47,8 +47,7 @@ uint32 GetReasonsForUncacheability(const WebURLResponse& response) {
}
std::string cache_control_header =
- response.httpHeaderField("cache-control").utf8();
- base::StringToLowerASCII(&cache_control_header);
+ base::ToLowerASCII(response.httpHeaderField("cache-control").utf8());
if (cache_control_header.find("no-cache") != std::string::npos)
reasons |= kNoCache;
if (cache_control_header.find("no-store") != std::string::npos)
diff --git a/chromium/media/blink/cdm_session_adapter.cc b/chromium/media/blink/cdm_session_adapter.cc
index 5c72b92d107..4dcfe96e51d 100644
--- a/chromium/media/blink/cdm_session_adapter.cc
+++ b/chromium/media/blink/cdm_session_adapter.cc
@@ -6,7 +6,9 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "base/metrics/histogram.h"
#include "base/stl_util.h"
+#include "base/trace_event/trace_event.h"
#include "media/base/cdm_factory.h"
#include "media/base/cdm_key_information.h"
#include "media/base/cdm_promise.h"
@@ -19,9 +21,10 @@ namespace media {
const char kMediaEME[] = "Media.EME.";
const char kDot[] = ".";
+const char kTimeToCreateCdmUMAName[] = "CreateCdmTime";
-CdmSessionAdapter::CdmSessionAdapter() : weak_ptr_factory_(this) {
-}
+CdmSessionAdapter::CdmSessionAdapter()
+ : trace_id_(0), weak_ptr_factory_(this) {}
CdmSessionAdapter::~CdmSessionAdapter() {}
@@ -31,6 +34,11 @@ void CdmSessionAdapter::CreateCdm(
const GURL& security_origin,
const CdmConfig& cdm_config,
scoped_ptr<blink::WebContentDecryptionModuleResult> result) {
+ TRACE_EVENT_ASYNC_BEGIN0("media", "CdmSessionAdapter::CreateCdm",
+ ++trace_id_);
+
+ base::TimeTicks start_time = base::TimeTicks::Now();
+
// Note: WebContentDecryptionModuleImpl::Create() calls this method without
// holding a reference to the CdmSessionAdapter. Bind OnCdmCreated() with
// |this| instead of |weak_this| to prevent |this| from being destructed.
@@ -46,7 +54,8 @@ void CdmSessionAdapter::CreateCdm(
base::Bind(&CdmSessionAdapter::OnLegacySessionError, weak_this),
base::Bind(&CdmSessionAdapter::OnSessionKeysChange, weak_this),
base::Bind(&CdmSessionAdapter::OnSessionExpirationUpdate, weak_this),
- base::Bind(&CdmSessionAdapter::OnCdmCreated, this, key_system));
+ base::Bind(&CdmSessionAdapter::OnCdmCreated, this, key_system,
+ start_time));
}
void CdmSessionAdapter::SetServerCertificate(
@@ -115,14 +124,22 @@ const std::string& CdmSessionAdapter::GetKeySystem() const {
}
const std::string& CdmSessionAdapter::GetKeySystemUMAPrefix() const {
+ DCHECK(!key_system_uma_prefix_.empty());
return key_system_uma_prefix_;
}
void CdmSessionAdapter::OnCdmCreated(
const std::string& key_system,
+ base::TimeTicks start_time,
scoped_ptr<MediaKeys> cdm,
const std::string& error_message) {
DVLOG(2) << __FUNCTION__;
+ DCHECK(!cdm_);
+
+ TRACE_EVENT_ASYNC_END2("media", "CdmSessionAdapter::CreateCdm", trace_id_,
+ "success", (cdm ? "true" : "false"), "error_message",
+ error_message);
+
if (!cdm) {
cdm_created_result_->completeWithError(
blink::WebContentDecryptionModuleExceptionNotSupportedError, 0,
@@ -134,6 +151,10 @@ void CdmSessionAdapter::OnCdmCreated(
key_system_ = key_system;
key_system_uma_prefix_ =
kMediaEME + GetKeySystemNameForUMA(key_system) + kDot;
+
+ // Only report time for successful CDM creation.
+ ReportTimeToCreateCdmUMA(base::TimeTicks::Now() - start_time);
+
cdm_ = cdm.Pass();
cdm_created_result_->completeWithContentDecryptionModule(
@@ -200,4 +221,14 @@ WebContentDecryptionModuleSessionImpl* CdmSessionAdapter::GetSession(
return (session != sessions_.end()) ? session->second.get() : NULL;
}
+void CdmSessionAdapter::ReportTimeToCreateCdmUMA(base::TimeDelta time) const {
+ // Note: This leaks memory, which is expected behavior.
+ base::HistogramBase* histogram = base::Histogram::FactoryTimeGet(
+ GetKeySystemUMAPrefix() + kTimeToCreateCdmUMAName,
+ base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10),
+ 50, base::HistogramBase::kUmaTargetedHistogramFlag);
+
+ histogram->AddTime(time);
+}
+
} // namespace media
diff --git a/chromium/media/blink/cdm_session_adapter.h b/chromium/media/blink/cdm_session_adapter.h
index e36aefa163a..ac4219e6828 100644
--- a/chromium/media/blink/cdm_session_adapter.h
+++ b/chromium/media/blink/cdm_session_adapter.h
@@ -110,6 +110,7 @@ class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
// Callback for CreateCdm().
void OnCdmCreated(const std::string& key_system,
+ base::TimeTicks start_time,
scoped_ptr<MediaKeys> cdm,
const std::string& error_message);
@@ -133,6 +134,8 @@ class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
WebContentDecryptionModuleSessionImpl* GetSession(
const std::string& session_id);
+ void ReportTimeToCreateCdmUMA(base::TimeDelta cdm_creation_time) const;
+
scoped_ptr<MediaKeys> cdm_;
SessionMap sessions_;
@@ -140,6 +143,10 @@ class CdmSessionAdapter : public base::RefCounted<CdmSessionAdapter> {
std::string key_system_;
std::string key_system_uma_prefix_;
+ // A unique ID to trace CdmSessionAdapter::CreateCdm() call and the matching
+ // OnCdmCreated() call.
+ uint32 trace_id_;
+
scoped_ptr<blink::WebContentDecryptionModuleResult> cdm_created_result_;
// NOTE: Weak pointers must be invalidated before all other member variables.
diff --git a/chromium/media/blink/encrypted_media_player_support.cc b/chromium/media/blink/encrypted_media_player_support.cc
index 39ae48f113c..086c469bb6e 100644
--- a/chromium/media/blink/encrypted_media_player_support.cc
+++ b/chromium/media/blink/encrypted_media_player_support.cc
@@ -16,12 +16,12 @@
#include "media/base/key_systems.h"
#include "media/blink/webcontentdecryptionmodule_impl.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModule.h"
-#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
+#include "third_party/WebKit/public/platform/WebMediaPlayerEncryptedMediaClient.h"
#include "third_party/WebKit/public/web/WebDocument.h"
#include "third_party/WebKit/public/web/WebLocalFrame.h"
using blink::WebMediaPlayer;
-using blink::WebMediaPlayerClient;
+using blink::WebMediaPlayerEncryptedMediaClient;
using blink::WebString;
namespace media {
@@ -38,8 +38,9 @@ static const char* kMediaEme = "Media.EME.";
// Convert a WebString to ASCII, falling back on an empty string in the case
// of a non-ASCII string.
static std::string ToASCIIOrEmpty(const WebString& string) {
- return base::IsStringASCII(string) ? base::UTF16ToASCII(string)
- : std::string();
+ return base::IsStringASCII(string)
+ ? base::UTF16ToASCII(base::StringPiece16(string))
+ : std::string();
}
// Helper functions to report media EME related stats to UMA. They follow the
@@ -115,7 +116,7 @@ static EmeInitDataType GuessInitDataType(const unsigned char* init_data,
EncryptedMediaPlayerSupport::EncryptedMediaPlayerSupport(
CdmFactory* cdm_factory,
- blink::WebMediaPlayerClient* client,
+ WebMediaPlayerEncryptedMediaClient* client,
MediaPermission* media_permission,
const CdmContextReadyCB& cdm_context_ready_cb)
: cdm_factory_(cdm_factory),
@@ -305,7 +306,8 @@ void EncryptedMediaPlayerSupport::OnKeyError(const std::string& session_id,
client_->keyError(
WebString::fromUTF8(GetPrefixedKeySystemName(current_key_system_)),
WebString::fromUTF8(session_id),
- static_cast<WebMediaPlayerClient::MediaKeyErrorCode>(error_code),
+ static_cast<WebMediaPlayerEncryptedMediaClient::MediaKeyErrorCode>(
+ error_code),
short_system_code);
}
diff --git a/chromium/media/blink/encrypted_media_player_support.h b/chromium/media/blink/encrypted_media_player_support.h
index 6ddec23d511..2d09579c61b 100644
--- a/chromium/media/blink/encrypted_media_player_support.h
+++ b/chromium/media/blink/encrypted_media_player_support.h
@@ -22,7 +22,7 @@
namespace blink {
class WebContentDecryptionModule;
class WebLocalFrame;
-class WebMediaPlayerClient;
+class WebMediaPlayerEncryptedMediaClient;
class WebString;
}
@@ -41,7 +41,7 @@ class EncryptedMediaPlayerSupport
// |cdm_context_ready_cb| is called when the CDM instance creation completes.
EncryptedMediaPlayerSupport(CdmFactory* cdm_factory,
- blink::WebMediaPlayerClient* client,
+ blink::WebMediaPlayerEncryptedMediaClient* client,
MediaPermission* media_permission,
const CdmContextReadyCB& cdm_context_ready_cb);
~EncryptedMediaPlayerSupport();
@@ -95,7 +95,7 @@ class EncryptedMediaPlayerSupport
CdmFactory* cdm_factory_;
- blink::WebMediaPlayerClient* client_;
+ blink::WebMediaPlayerEncryptedMediaClient* client_;
MediaPermission* media_permission_;
diff --git a/chromium/media/blink/key_system_config_selector.cc b/chromium/media/blink/key_system_config_selector.cc
index 29fe49288c5..a727722c5a6 100644
--- a/chromium/media/blink/key_system_config_selector.cc
+++ b/chromium/media/blink/key_system_config_selector.cc
@@ -281,7 +281,7 @@ bool KeySystemConfigSelector::IsSupportedContentType(
KeySystemConfigSelector::ConfigState* config_state) {
// TODO(sandersd): Move contentType parsing from Blink to here so that invalid
// parameters can be rejected. http://crbug.com/417561
- std::string container_lower = base::StringToLowerASCII(container_mime_type);
+ std::string container_lower = base::ToLowerASCII(container_mime_type);
// Check that |container_mime_type| is supported by Chrome.
if (!media::IsSupportedMediaMimeType(container_lower))
@@ -347,8 +347,10 @@ bool KeySystemConfigSelector::GetSupportedCapabilities(
if (!base::IsStringASCII(capability.mimeType) ||
!base::IsStringASCII(capability.codecs) ||
!IsSupportedContentType(key_system, media_type,
- base::UTF16ToASCII(capability.mimeType),
- base::UTF16ToASCII(capability.codecs),
+ base::UTF16ToASCII(
+ base::StringPiece16(capability.mimeType)),
+ base::UTF16ToASCII(
+ base::StringPiece16(capability.codecs)),
&proposed_config_state)) {
continue;
}
@@ -360,7 +362,8 @@ bool KeySystemConfigSelector::GetSupportedCapabilities(
if (!base::IsStringASCII(capability.robustness))
continue;
EmeConfigRule robustness_rule = key_systems_->GetRobustnessConfigRule(
- key_system, media_type, base::UTF16ToASCII(capability.robustness));
+ key_system, media_type, base::UTF16ToASCII(
+ base::StringPiece16(capability.robustness)));
if (!proposed_config_state.IsRuleSupported(robustness_rule))
continue;
proposed_config_state.AddRule(robustness_rule);
@@ -688,7 +691,8 @@ void KeySystemConfigSelector::SelectConfig(
return;
}
- std::string key_system_ascii = base::UTF16ToASCII(key_system);
+ std::string key_system_ascii =
+ base::UTF16ToASCII(base::StringPiece16(key_system));
if (!key_systems_->IsSupportedKeySystem(key_system_ascii)) {
not_supported_cb.Run("Unsupported keySystem");
return;
diff --git a/chromium/media/blink/media_blink.gyp b/chromium/media/blink/media_blink.gyp
index 3462a382c99..5e4a7384c63 100644
--- a/chromium/media/blink/media_blink.gyp
+++ b/chromium/media/blink/media_blink.gyp
@@ -3,6 +3,9 @@
# found in the LICENSE file.
{
+ 'includes': [
+ '../media_variables.gypi'
+ ],
'targets': [
{
# GN version: //media/blink
@@ -76,7 +79,7 @@
'websourcebuffer_impl.h',
],
'conditions': [
- ['OS=="android"', {
+ ['OS=="android" and media_use_ffmpeg==0', {
'sources!': [
'encrypted_media_player_support.cc',
'encrypted_media_player_support.h',
@@ -97,6 +100,8 @@
'../../base/base.gyp:test_support_base',
'../../cc/cc.gyp:cc',
'../../cc/blink/cc_blink.gyp:cc_blink',
+ '../../components/scheduler/scheduler.gyp:scheduler',
+ '../../components/scheduler/scheduler.gyp:scheduler_test_support',
'../../gin/gin.gyp:gin',
'../../net/net.gyp:net',
'../../testing/gmock.gyp:gmock',
@@ -123,5 +128,33 @@
'webaudiosourceprovider_impl_unittest.cc',
],
},
- ]
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'media_blink_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'media_blink_unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'media_blink_unittests.isolate',
+ ],
+ 'conditions': [
+ ['use_x11==1',
+ {
+ 'dependencies': [
+ '../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
+ ],
+ }
+ ],
+ ],
+ },
+ ],
+ }],
+ ],
}
diff --git a/chromium/media/blink/media_blink_unittests.isolate b/chromium/media/blink/media_blink_unittests.isolate
new file mode 100644
index 00000000000..545ce9fd46a
--- /dev/null
+++ b/chromium/media/blink/media_blink_unittests.isolate
@@ -0,0 +1,63 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['use_x11==0', {
+ 'variables': {
+ 'command': [
+ '../../testing/test_env.py',
+ '<(PRODUCT_DIR)/media_blink_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ },
+ }],
+ ['use_x11==1', {
+ 'variables': {
+ 'command': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)',
+ '<(PRODUCT_DIR)/media_blink_unittests<(EXECUTABLE_SUFFIX)',
+ '--brave-new-test-launcher',
+ '--test-launcher-bot-mode',
+ '--asan=<(asan)',
+ '--msan=<(msan)',
+ '--tsan=<(tsan)',
+ ],
+ 'files': [
+ '../../testing/xvfb.py',
+ '<(PRODUCT_DIR)/xdisplaycheck<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="mac" or OS=="win"', {
+ 'variables': {
+ 'files': [
+ '../../testing/test_env.py',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1 and fastbuild==0', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/media_blink_unittests.dSYM/',
+ ],
+ },
+ }],
+ ['OS=="win" and (fastbuild==0 or fastbuild==1)', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/media_blink_unittests.exe.pdb',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../../base/base.isolate',
+ '../../gin/v8.isolate',
+ ],
+}
diff --git a/chromium/media/blink/new_session_cdm_result_promise.cc b/chromium/media/blink/new_session_cdm_result_promise.cc
index 0a07614dc0a..f7f1446a786 100644
--- a/chromium/media/blink/new_session_cdm_result_promise.cc
+++ b/chromium/media/blink/new_session_cdm_result_promise.cc
@@ -10,6 +10,22 @@
namespace media {
+static blink::WebContentDecryptionModuleResult::SessionStatus ConvertStatus(
+ SessionInitStatus status) {
+ switch (status) {
+ case SessionInitStatus::UNKNOWN_STATUS:
+ break;
+ case SessionInitStatus::NEW_SESSION:
+ return blink::WebContentDecryptionModuleResult::NewSession;
+ case SessionInitStatus::SESSION_NOT_FOUND:
+ return blink::WebContentDecryptionModuleResult::SessionNotFound;
+ case SessionInitStatus::SESSION_ALREADY_EXISTS:
+ return blink::WebContentDecryptionModuleResult::SessionAlreadyExists;
+ }
+ NOTREACHED();
+ return blink::WebContentDecryptionModuleResult::SessionNotFound;
+}
+
NewSessionCdmResultPromise::NewSessionCdmResultPromise(
const blink::WebContentDecryptionModuleResult& result,
const std::string& uma_name,
@@ -23,11 +39,20 @@ NewSessionCdmResultPromise::~NewSessionCdmResultPromise() {
}
void NewSessionCdmResultPromise::resolve(const std::string& session_id) {
+ // |new_session_created_cb_| uses a WeakPtr<> and may not do anything
+ // if the session object has been destroyed.
+ SessionInitStatus status = SessionInitStatus::UNKNOWN_STATUS;
+ new_session_created_cb_.Run(session_id, &status);
+
+ if (status == SessionInitStatus::UNKNOWN_STATUS) {
+ reject(MediaKeys::INVALID_STATE_ERROR, 0,
+ "Cannot finish session initialization");
+ return;
+ }
+
MarkPromiseSettled();
ReportCdmResultUMA(uma_name_, SUCCESS);
- blink::WebContentDecryptionModuleResult::SessionStatus status =
- new_session_created_cb_.Run(session_id);
- web_cdm_result_.completeWithSession(status);
+ web_cdm_result_.completeWithSession(ConvertStatus(status));
}
void NewSessionCdmResultPromise::reject(MediaKeys::Exception exception_code,
diff --git a/chromium/media/blink/new_session_cdm_result_promise.h b/chromium/media/blink/new_session_cdm_result_promise.h
index c4b657da122..78e70b6645b 100644
--- a/chromium/media/blink/new_session_cdm_result_promise.h
+++ b/chromium/media/blink/new_session_cdm_result_promise.h
@@ -15,8 +15,23 @@
namespace media {
-typedef base::Callback<blink::WebContentDecryptionModuleResult::SessionStatus(
- const std::string& session_id)> SessionInitializedCB;
+enum class SessionInitStatus {
+ // Error creating the session.
+ UNKNOWN_STATUS,
+
+ // New session has been initialized.
+ NEW_SESSION,
+
+ // CDM could not find the requested session.
+ SESSION_NOT_FOUND,
+
+ // CDM already has a non-closed session that matches the provided
+ // parameters.
+ SESSION_ALREADY_EXISTS
+};
+
+typedef base::Callback<void(const std::string& session_id,
+ SessionInitStatus* status)> SessionInitializedCB;
// Special class for resolving a new session promise. Resolving a new session
// promise returns the session ID (as a string), but the blink promise needs
@@ -43,7 +58,7 @@ class MEDIA_EXPORT NewSessionCdmResultPromise
// UMA name to report result to.
std::string uma_name_;
- // Called on resolve() to convert the session ID into a SessionStatus to
+ // Called on resolve() to convert the session ID into a SessionInitStatus to
// be reported to blink.
SessionInitializedCB new_session_created_cb_;
diff --git a/chromium/media/blink/run_all_unittests.cc b/chromium/media/blink/run_all_unittests.cc
index 8af78f7cdd0..d84a1af06f1 100644
--- a/chromium/media/blink/run_all_unittests.cc
+++ b/chromium/media/blink/run_all_unittests.cc
@@ -7,7 +7,16 @@
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
#include "build/build_config.h"
+#include "components/scheduler/child/scheduler_task_runner_delegate_impl.h"
+#include "components/scheduler/child/web_task_runner_impl.h"
+#include "components/scheduler/renderer/renderer_scheduler_impl.h"
+#include "components/scheduler/renderer/renderer_web_scheduler_impl.h"
+#include "components/scheduler/test/lazy_scheduler_message_loop_delegate_for_tests.h"
#include "media/base/media.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebScheduler.h"
+#include "third_party/WebKit/public/platform/WebTaskRunner.h"
+#include "third_party/WebKit/public/platform/WebThread.h"
#include "third_party/WebKit/public/web/WebKit.h"
#if defined(OS_ANDROID)
@@ -20,14 +29,52 @@
#include "gin/v8_initializer.h"
#endif
+class CurrentThreadMock : public blink::WebThread {
+ public:
+ CurrentThreadMock()
+ : task_runner_delegate_(
+ scheduler::LazySchedulerMessageLoopDelegateForTests::Create()),
+ scheduler_(
+ new scheduler::RendererSchedulerImpl(task_runner_delegate_.get())),
+ web_scheduler_(
+ new scheduler::RendererWebSchedulerImpl(scheduler_.get())),
+ web_task_runner_(
+ new scheduler::WebTaskRunnerImpl(scheduler_->DefaultTaskRunner())) {
+ }
+
+ ~CurrentThreadMock() override {
+ scheduler_->Shutdown();
+ }
+
+ blink::WebTaskRunner* taskRunner() override { return web_task_runner_.get(); }
+
+ bool isCurrentThread() const override { return true; }
+
+ blink::PlatformThreadId threadId() const override { return 17; }
+
+ blink::WebScheduler* scheduler() const override {
+ return web_scheduler_.get();
+ }
+
+ private:
+ scoped_refptr<scheduler::SchedulerTaskRunnerDelegate> task_runner_delegate_;
+ scoped_ptr<scheduler::RendererScheduler> scheduler_;
+ scoped_ptr<blink::WebScheduler> web_scheduler_;
+ scoped_ptr<blink::WebTaskRunner> web_task_runner_;
+};
+
class TestBlinkPlatformSupport : NON_EXPORTED_BASE(public blink::Platform) {
public:
- virtual ~TestBlinkPlatformSupport();
+ ~TestBlinkPlatformSupport() override;
- virtual void cryptographicallyRandomValues(unsigned char* buffer,
- size_t length) override;
- virtual const unsigned char* getTraceCategoryEnabledFlag(
+ void cryptographicallyRandomValues(unsigned char* buffer,
+ size_t length) override;
+ const unsigned char* getTraceCategoryEnabledFlag(
const char* categoryName) override;
+ blink::WebThread* currentThread() override { return &m_currentThread; }
+
+ private:
+ CurrentThreadMock m_currentThread;
};
TestBlinkPlatformSupport::~TestBlinkPlatformSupport() {}
diff --git a/chromium/media/blink/skcanvas_video_renderer.cc b/chromium/media/blink/skcanvas_video_renderer.cc
index 51168fb0886..132e0da7c6e 100644
--- a/chromium/media/blink/skcanvas_video_renderer.cc
+++ b/chromium/media/blink/skcanvas_video_renderer.cc
@@ -19,8 +19,7 @@
#include "third_party/skia/include/gpu/GrTexture.h"
#include "third_party/skia/include/gpu/GrTextureProvider.h"
#include "third_party/skia/include/gpu/SkGr.h"
-#include "third_party/skia/include/gpu/SkGrPixelRef.h"
-#include "ui/gfx/skbitmap_operations.h"
+#include "ui/gfx/geometry/rect_f.h"
// Skia internal format depends on a platform. On Android it is ABGR, on others
// it is ARGB.
@@ -28,10 +27,18 @@
SK_A32_SHIFT == 24
#define LIBYUV_I420_TO_ARGB libyuv::I420ToARGB
#define LIBYUV_I422_TO_ARGB libyuv::I422ToARGB
+#define LIBYUV_I444_TO_ARGB libyuv::I444ToARGB
+#define LIBYUV_I420ALPHA_TO_ARGB libyuv::I420AlphaToARGB
+#define LIBYUV_J420_TO_ARGB libyuv::J420ToARGB
+#define LIBYUV_H420_TO_ARGB libyuv::H420ToARGB
#elif SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
SK_A32_SHIFT == 24
#define LIBYUV_I420_TO_ARGB libyuv::I420ToABGR
#define LIBYUV_I422_TO_ARGB libyuv::I422ToABGR
+#define LIBYUV_I444_TO_ARGB libyuv::I444ToABGR
+#define LIBYUV_I420ALPHA_TO_ARGB libyuv::I420AlphaToABGR
+#define LIBYUV_J420_TO_ARGB libyuv::J420ToABGR
+#define LIBYUV_H420_TO_ARGB libyuv::H420ToABGR
#else
#error Unexpected Skia ARGB_8888 layout!
#endif
@@ -40,54 +47,17 @@ namespace media {
namespace {
-// This class keeps two temporary resources; software bitmap, hardware bitmap.
-// If both bitmap are created and then only software bitmap is updated every
-// frame, hardware bitmap outlives until the media player dies. So we delete
-// a temporary resource if it is not used for 3 sec.
+// This class keeps the last image drawn.
+// We delete the temporary resource if it is not used for 3 seconds.
const int kTemporaryResourceDeletionDelay = 3; // Seconds;
-bool CheckColorSpace(const scoped_refptr<VideoFrame>& video_frame,
- VideoFrame::ColorSpace color_space) {
+bool CheckColorSpace(const VideoFrame* video_frame, ColorSpace color_space) {
int result;
return video_frame->metadata()->GetInteger(
VideoFrameMetadata::COLOR_SPACE, &result) &&
result == color_space;
}
-bool IsSkBitmapProperlySizedTexture(const SkBitmap* bitmap,
- const gfx::Size& size) {
- return bitmap->getTexture() && bitmap->width() == size.width() &&
- bitmap->height() == size.height();
-}
-
-bool AllocateSkBitmapTexture(GrContext* gr,
- SkBitmap* bitmap,
- const gfx::Size& size) {
- DCHECK(gr);
- GrTextureDesc desc;
- // Use kRGBA_8888_GrPixelConfig, not kSkia8888_GrPixelConfig, to avoid
- // RGBA to BGRA conversion.
- desc.fConfig = kRGBA_8888_GrPixelConfig;
- desc.fFlags = kRenderTarget_GrSurfaceFlag;
- desc.fSampleCnt = 0;
- desc.fOrigin = kTopLeft_GrSurfaceOrigin;
- desc.fWidth = size.width();
- desc.fHeight = size.height();
- skia::RefPtr<GrTexture> texture = skia::AdoptRef(
- gr->textureProvider()->refScratchTexture(
- desc, GrTextureProvider::kExact_ScratchTexMatch));
- if (!texture.get())
- return false;
-
- SkImageInfo info = SkImageInfo::MakeN32Premul(desc.fWidth, desc.fHeight);
- SkGrPixelRef* pixel_ref = SkNEW_ARGS(SkGrPixelRef, (info, texture.get()));
- if (!pixel_ref)
- return false;
- bitmap->setInfo(info);
- bitmap->setPixelRef(pixel_ref)->unref();
- return true;
-}
-
class SyncPointClientImpl : public VideoFrame::SyncPointClient {
public:
explicit SyncPointClientImpl(gpu::gles2::GLES2Interface* gl) : gl_(gl) {}
@@ -103,12 +73,12 @@ class SyncPointClientImpl : public VideoFrame::SyncPointClient {
DISALLOW_IMPLICIT_CONSTRUCTORS(SyncPointClientImpl);
};
-scoped_ptr<SkImage> CreateSkImageFromVideoFrameYUVTextures(
- VideoFrame* video_frame,
+skia::RefPtr<SkImage> NewSkImageFromVideoFrameYUVTextures(
+ const VideoFrame* video_frame,
const Context3D& context_3d) {
// Support only TEXTURE_YUV_420.
DCHECK(video_frame->HasTextures());
- DCHECK_EQ(media::VideoFrame::I420, video_frame->format());
+ DCHECK_EQ(media::PIXEL_FORMAT_I420, video_frame->format());
DCHECK_EQ(3u, media::VideoFrame::NumPlanes(video_frame->format()));
gpu::gles2::GLES2Interface* gl = context_3d.gl;
@@ -153,47 +123,60 @@ scoped_ptr<SkImage> CreateSkImageFromVideoFrameYUVTextures(
{uv_tex_size.width(), uv_tex_size.height()},
};
- // TODO(dcastagna): Skia currently doesn't support Rec709 YUV conversion.
- DCHECK(!CheckColorSpace(video_frame, VideoFrame::COLOR_SPACE_HD_REC709));
SkYUVColorSpace color_space = kRec601_SkYUVColorSpace;
- if (CheckColorSpace(video_frame, VideoFrame::COLOR_SPACE_JPEG))
+ if (CheckColorSpace(video_frame, media::COLOR_SPACE_JPEG))
color_space = kJPEG_SkYUVColorSpace;
+ else if (CheckColorSpace(video_frame, media::COLOR_SPACE_HD_REC709))
+ color_space = kRec709_SkYUVColorSpace;
SkImage* img = SkImage::NewFromYUVTexturesCopy(context_3d.gr_context,
color_space, handles, yuvSizes,
kTopLeft_GrSurfaceOrigin);
- DCHECK(img);
gl->DeleteTextures(3, source_textures);
- SyncPointClientImpl client(gl);
- video_frame->UpdateReleaseSyncPoint(&client);
- return make_scoped_ptr(img);
+ return skia::AdoptRef(img);
}
-bool CopyVideoFrameSingleTextureToSkBitmap(VideoFrame* video_frame,
- SkBitmap* bitmap,
- const Context3D& context_3d) {
- // Check if we could reuse existing texture based bitmap.
- // Otherwise, release existing texture based bitmap and allocate
- // a new one based on video size.
- if (!IsSkBitmapProperlySizedTexture(bitmap,
- video_frame->visible_rect().size())) {
- if (!AllocateSkBitmapTexture(context_3d.gr_context, bitmap,
- video_frame->visible_rect().size())) {
- return false;
- }
- }
+// Creates a SkImage from a |video_frame| backed by native resources.
+// The SkImage will take ownership of the underlying resource.
+skia::RefPtr<SkImage> NewSkImageFromVideoFrameNative(
+ VideoFrame* video_frame,
+ const Context3D& context_3d) {
+ DCHECK(PIXEL_FORMAT_ARGB == video_frame->format() ||
+ PIXEL_FORMAT_NV12 == video_frame->format() ||
+ PIXEL_FORMAT_UYVY == video_frame->format());
+
+ const gpu::MailboxHolder& mailbox_holder = video_frame->mailbox_holder(0);
+ DCHECK(mailbox_holder.texture_target == GL_TEXTURE_2D ||
+ mailbox_holder.texture_target == GL_TEXTURE_RECTANGLE_ARB ||
+ mailbox_holder.texture_target == GL_TEXTURE_EXTERNAL_OES)
+ << mailbox_holder.texture_target;
- unsigned texture_id =
- static_cast<unsigned>((bitmap->getTexture())->getTextureHandle());
- // If CopyVideoFrameSingleTextureToGLTexture() changes the state of the
- // |texture_id|, it's needed to invalidate the state cached in skia,
- // but currently the state isn't changed.
-
- SkCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
- context_3d.gl, video_frame, texture_id, GL_RGBA, GL_UNSIGNED_BYTE, true,
- false);
- bitmap->notifyPixelsChanged();
- return true;
+ gpu::gles2::GLES2Interface* gl = context_3d.gl;
+ unsigned source_texture = 0;
+ if (mailbox_holder.texture_target != GL_TEXTURE_2D) {
+ // TODO(dcastagna): At the moment Skia doesn't support targets different
+ // than GL_TEXTURE_2D. Avoid this copy once
+ // https://code.google.com/p/skia/issues/detail?id=3868 is addressed.
+ gl->GenTextures(1, &source_texture);
+ DCHECK(source_texture);
+ gl->BindTexture(GL_TEXTURE_2D, source_texture);
+ SkCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
+ gl, video_frame, source_texture, GL_RGBA, GL_UNSIGNED_BYTE, true,
+ false);
+ } else {
+ gl->WaitSyncPointCHROMIUM(mailbox_holder.sync_point);
+ source_texture = gl->CreateAndConsumeTextureCHROMIUM(
+ mailbox_holder.texture_target, mailbox_holder.mailbox.name);
+ }
+ GrBackendTextureDesc desc;
+ desc.fFlags = kRenderTarget_GrBackendTextureFlag;
+ desc.fOrigin = kTopLeft_GrSurfaceOrigin;
+ desc.fWidth = video_frame->coded_size().width();
+ desc.fHeight = video_frame->coded_size().height();
+ desc.fConfig = kRGBA_8888_GrPixelConfig;
+ desc.fTextureHandle = source_texture;
+ return skia::AdoptRef(
+ SkImage::NewFromAdoptedTexture(context_3d.gr_context, desc));
}
} // anonymous namespace
@@ -204,45 +187,41 @@ class VideoImageGenerator : public SkImageGenerator {
VideoImageGenerator(const scoped_refptr<VideoFrame>& frame)
: SkImageGenerator(
SkImageInfo::MakeN32Premul(frame->visible_rect().width(),
- frame->visible_rect().height()))
- , frame_(frame) {
- DCHECK(frame_.get());
+ frame->visible_rect().height())),
+ frame_(frame) {
+ DCHECK(!frame_->HasTextures());
}
~VideoImageGenerator() override {}
- void set_frame(const scoped_refptr<VideoFrame>& frame) { frame_ = frame; }
-
protected:
- Result onGetPixels(const SkImageInfo& info,
- void* pixels,
- size_t row_bytes,
- const Options&,
- SkPMColor ctable[],
- int* ctable_count) override {
- if (!frame_.get())
- return kInvalidInput;
+ bool onGetPixels(const SkImageInfo& info,
+ void* pixels,
+ size_t row_bytes,
+ SkPMColor ctable[],
+ int* ctable_count) override {
// If skia couldn't do the YUV conversion on GPU, we will on CPU.
- SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
- frame_, pixels, row_bytes);
- return kSuccess;
+ SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(frame_.get(), pixels,
+ row_bytes);
+ return true;
}
bool onGetYUV8Planes(SkISize sizes[3],
void* planes[3],
size_t row_bytes[3],
SkYUVColorSpace* color_space) override {
- if (!frame_.get() || !VideoFrame::IsYuvPlanar(frame_->format()) ||
- // TODO(rileya): Skia currently doesn't support Rec709 YUV conversion,
- // or YUVA conversion. Remove this case once it does. As-is we will
- // fall back on the pure-software path in this case.
- CheckColorSpace(frame_, VideoFrame::COLOR_SPACE_HD_REC709) ||
- frame_->format() == VideoFrame::YV12A) {
+ if (!media::IsYuvPlanar(frame_->format()) ||
+ // TODO(rileya): Skia currently doesn't support YUVA conversion. Remove
+ // this case once it does. As-is we will fall back on the pure-software
+ // path in this case.
+ frame_->format() == PIXEL_FORMAT_YV12A) {
return false;
}
if (color_space) {
- if (CheckColorSpace(frame_, VideoFrame::COLOR_SPACE_JPEG))
+ if (CheckColorSpace(frame_.get(), COLOR_SPACE_JPEG))
*color_space = kJPEG_SkYUVColorSpace;
+ else if (CheckColorSpace(frame_.get(), COLOR_SPACE_HD_REC709))
+ *color_space = kRec709_SkYUVColorSpace;
else
*color_space = kRec601_SkYUVColorSpace;
}
@@ -258,7 +237,8 @@ class VideoImageGenerator : public SkImageGenerator {
}
if (row_bytes && planes) {
size_t offset;
- const int y_shift = (frame_->format() == VideoFrame::YV16) ? 0 : 1;
+ const int y_shift =
+ (frame_->format() == media::PIXEL_FORMAT_YV16) ? 0 : 1;
if (plane == VideoFrame::kYPlane) {
offset = (frame_->stride(VideoFrame::kYPlane) *
frame_->visible_rect().y()) +
@@ -301,23 +281,15 @@ class VideoImageGenerator : public SkImageGenerator {
};
SkCanvasVideoRenderer::SkCanvasVideoRenderer()
- : last_frame_timestamp_(media::kNoTimestamp()),
- frame_deleting_timer_(
+ : last_image_deleting_timer_(
FROM_HERE,
base::TimeDelta::FromSeconds(kTemporaryResourceDeletionDelay),
this,
- &SkCanvasVideoRenderer::ResetLastFrame),
- accelerated_generator_(nullptr),
- accelerated_last_frame_timestamp_(media::kNoTimestamp()),
- accelerated_frame_deleting_timer_(
- FROM_HERE,
- base::TimeDelta::FromSeconds(kTemporaryResourceDeletionDelay),
- this,
- &SkCanvasVideoRenderer::ResetAcceleratedLastFrame) {
- last_frame_.setIsVolatile(true);
-}
+ &SkCanvasVideoRenderer::ResetCache) {}
-SkCanvasVideoRenderer::~SkCanvasVideoRenderer() {}
+SkCanvasVideoRenderer::~SkCanvasVideoRenderer() {
+ ResetCache();
+}
void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
@@ -339,105 +311,48 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
// Paint black rectangle if there isn't a frame available or the
// frame has an unexpected format.
if (!video_frame.get() || video_frame->natural_size().IsEmpty() ||
- !(VideoFrame::IsYuvPlanar(video_frame->format()) ||
+ !(media::IsYuvPlanar(video_frame->format()) ||
video_frame->HasTextures())) {
canvas->drawRect(dest, paint);
canvas->flush();
return;
}
- SkBitmap* target_frame = nullptr;
+ gpu::gles2::GLES2Interface* gl = context_3d.gl;
- if (video_frame->HasTextures()) {
- // Draw HW Video on both SW and HW Canvas.
- // In SW Canvas case, rely on skia drawing Ganesh SkBitmap on SW SkCanvas.
- if (accelerated_last_frame_.isNull() ||
- video_frame->timestamp() != accelerated_last_frame_timestamp_) {
- DCHECK(context_3d.gl);
+ if (!last_image_ || video_frame->timestamp() != last_timestamp_) {
+ ResetCache();
+ // Generate a new image.
+ // Note: Skia will hold onto |video_frame| via |video_generator| only when
+ // |video_frame| is software.
+ // Holding |video_frame| longer than this call when using GPUVideoDecoder
+ // could cause problems since the pool of VideoFrames has a fixed size.
+ if (video_frame->HasTextures()) {
DCHECK(context_3d.gr_context);
- if (accelerated_generator_) {
- // Reset SkBitmap used in SWVideo-to-HWCanvas path.
- accelerated_last_frame_.reset();
- accelerated_generator_ = nullptr;
- }
-
+ DCHECK(gl);
if (media::VideoFrame::NumPlanes(video_frame->format()) == 1) {
- accelerated_last_image_.reset();
- if (!CopyVideoFrameSingleTextureToSkBitmap(
- video_frame.get(), &accelerated_last_frame_, context_3d)) {
- NOTREACHED();
- return;
- }
- DCHECK(video_frame->visible_rect().width() ==
- accelerated_last_frame_.width() &&
- video_frame->visible_rect().height() ==
- accelerated_last_frame_.height());
+ last_image_ =
+ NewSkImageFromVideoFrameNative(video_frame.get(), context_3d);
} else {
- accelerated_last_image_ = CreateSkImageFromVideoFrameYUVTextures(
- video_frame.get(), context_3d);
- DCHECK(accelerated_last_image_);
+ last_image_ =
+ NewSkImageFromVideoFrameYUVTextures(video_frame.get(), context_3d);
}
- accelerated_last_frame_timestamp_ = video_frame->timestamp();
+ } else {
+ auto video_generator = new VideoImageGenerator(video_frame);
+ last_image_ = skia::AdoptRef(SkImage::NewFromGenerator(video_generator));
}
- target_frame = &accelerated_last_frame_;
- accelerated_frame_deleting_timer_.Reset();
- } else if (canvas->getGrContext()) {
- if (accelerated_last_frame_.isNull() ||
- video_frame->timestamp() != accelerated_last_frame_timestamp_) {
- // Draw SW Video on HW Canvas.
- if (!accelerated_generator_ && !accelerated_last_frame_.isNull()) {
- // Reset SkBitmap used in HWVideo-to-HWCanvas path.
- accelerated_last_frame_.reset();
- }
- accelerated_generator_ = new VideoImageGenerator(video_frame);
-
- // Note: This takes ownership of |accelerated_generator_|.
- if (!SkInstallDiscardablePixelRef(accelerated_generator_,
- &accelerated_last_frame_)) {
- NOTREACHED();
- return;
- }
- DCHECK(video_frame->visible_rect().width() ==
- accelerated_last_frame_.width() &&
- video_frame->visible_rect().height() ==
- accelerated_last_frame_.height());
-
- accelerated_last_frame_timestamp_ = video_frame->timestamp();
- } else if (accelerated_generator_) {
- accelerated_generator_->set_frame(video_frame);
- }
- target_frame = &accelerated_last_frame_;
- accelerated_frame_deleting_timer_.Reset();
- } else {
- // Draw SW Video on SW Canvas.
- DCHECK(video_frame->IsMappable());
- if (last_frame_.isNull() ||
- video_frame->timestamp() != last_frame_timestamp_) {
- // Check if |bitmap| needs to be (re)allocated.
- if (last_frame_.isNull() ||
- last_frame_.width() != video_frame->visible_rect().width() ||
- last_frame_.height() != video_frame->visible_rect().height()) {
- last_frame_.allocN32Pixels(video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
- last_frame_.setIsVolatile(true);
- }
- last_frame_.lockPixels();
- ConvertVideoFrameToRGBPixels(
- video_frame, last_frame_.getPixels(), last_frame_.rowBytes());
- last_frame_.notifyPixelsChanged();
- last_frame_.unlockPixels();
- last_frame_timestamp_ = video_frame->timestamp();
- }
- target_frame = &last_frame_;
- frame_deleting_timer_.Reset();
+ if (!last_image_) // Couldn't create the SkImage.
+ return;
+ last_timestamp_ = video_frame->timestamp();
}
+ last_image_deleting_timer_.Reset();
paint.setXfermodeMode(mode);
paint.setFilterQuality(kLow_SkFilterQuality);
const bool need_transform =
video_rotation != VIDEO_ROTATION_0 ||
- dest_rect.size() != video_frame->visible_rect().size() ||
+ dest_rect.size() != gfx::SizeF(video_frame->visible_rect().size()) ||
!dest_rect.origin().IsOrigin();
if (need_transform) {
canvas->save();
@@ -467,95 +382,100 @@ void SkCanvasVideoRenderer::Paint(const scoped_refptr<VideoFrame>& video_frame,
gfx::SizeF(rotated_dest_size.height(), rotated_dest_size.width());
}
canvas->scale(
- SkFloatToScalar(rotated_dest_size.width() / target_frame->width()),
- SkFloatToScalar(rotated_dest_size.height() / target_frame->height()));
- canvas->translate(-SkFloatToScalar(target_frame->width() * 0.5f),
- -SkFloatToScalar(target_frame->height() * 0.5f));
+ SkFloatToScalar(rotated_dest_size.width() / last_image_->width()),
+ SkFloatToScalar(rotated_dest_size.height() / last_image_->height()));
+ canvas->translate(-SkFloatToScalar(last_image_->width() * 0.5f),
+ -SkFloatToScalar(last_image_->height() * 0.5f));
}
- if (accelerated_last_image_) {
- canvas->drawImage(accelerated_last_image_.get(), 0, 0, &paint);
+
+ // This is a workaround for crbug.com/524717. SkBitmaps are read back before a
+ // SkPicture is sent to multiple threads while SkImages are not. The long term
+ // solution is for Skia to provide a SkPicture filter that makes a picture
+ // safe for multiple CPU raster threads (skbug.com/4321). We limit the
+ // workaround to cases where the src frame is a texture and the canvas is
+ // recording.
+ if (last_image_.get()->getTexture() &&
+ canvas->imageInfo().colorType() == kUnknown_SkColorType) {
+ SkBitmap bmp;
+ GrWrapTextureInBitmap(last_image_.get()->getTexture(),
+ last_image_.get()->width(), last_image_.get()->height(), true, &bmp);
+ // Even though the bitmap is logically immutable we do not mark it as such
+ // because doing so would defer readback until rasterization, which will be
+ // on another thread and is therefore unsafe.
+ canvas->drawBitmap(bmp, 0, 0, &paint);
} else {
- canvas->drawBitmap(*target_frame, 0, 0, &paint);
+ canvas->drawImage(last_image_.get(), 0, 0, &paint);
}
+
if (need_transform)
canvas->restore();
+ // Make sure to flush so we can remove the videoframe from the generator.
canvas->flush();
- // SkCanvas::flush() causes the generator to generate SkImage, so delete
- // |video_frame| not to be outlived.
- if (canvas->getGrContext() && accelerated_generator_)
- accelerated_generator_->set_frame(nullptr);
+
+ if (video_frame->HasTextures()) {
+ DCHECK(gl);
+ SyncPointClientImpl client(gl);
+ video_frame->UpdateReleaseSyncPoint(&client);
+ }
}
void SkCanvasVideoRenderer::Copy(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
const Context3D& context_3d) {
- Paint(video_frame, canvas, video_frame->visible_rect(), 0xff,
+ Paint(video_frame, canvas, gfx::RectF(video_frame->visible_rect()), 0xff,
SkXfermode::kSrc_Mode, media::VIDEO_ROTATION_0, context_3d);
}
// static
void SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
- const scoped_refptr<VideoFrame>& video_frame,
+ const VideoFrame* video_frame,
void* rgb_pixels,
size_t row_bytes) {
if (!video_frame->IsMappable()) {
NOTREACHED() << "Cannot extract pixels from non-CPU frame formats.";
return;
}
- if (!VideoFrame::IsYuvPlanar(video_frame->format())) {
+ if (!media::IsYuvPlanar(video_frame->format())) {
NOTREACHED() << "Non YUV formats are not supported";
return;
}
-
DCHECK_EQ(video_frame->stride(VideoFrame::kUPlane),
video_frame->stride(VideoFrame::kVPlane));
- const int y_shift =
- (video_frame->format() == VideoFrame::YV16) ? 0 : 1;
- // Use the "left" and "top" of the destination rect to locate the offset
- // in Y, U and V planes.
- const size_t y_offset = (video_frame->stride(VideoFrame::kYPlane) *
- video_frame->visible_rect().y()) +
- video_frame->visible_rect().x();
- // For format YV12, there is one U, V value per 2x2 block.
- // For format YV16, there is one U, V value per 2x1 block.
- const size_t uv_offset = (video_frame->stride(VideoFrame::kUPlane) *
- (video_frame->visible_rect().y() >> y_shift)) +
- (video_frame->visible_rect().x() >> 1);
-
switch (video_frame->format()) {
- case VideoFrame::YV12:
- case VideoFrame::I420:
- if (CheckColorSpace(video_frame, VideoFrame::COLOR_SPACE_JPEG)) {
- ConvertYUVToRGB32(
- video_frame->data(VideoFrame::kYPlane) + y_offset,
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
- static_cast<uint8*>(rgb_pixels),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(VideoFrame::kYPlane),
- video_frame->stride(VideoFrame::kUPlane),
- row_bytes,
- YV12J);
- } else if (CheckColorSpace(video_frame,
- VideoFrame::COLOR_SPACE_HD_REC709)) {
- ConvertYUVToRGB32(video_frame->data(VideoFrame::kYPlane) + y_offset,
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
- static_cast<uint8*>(rgb_pixels),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
- video_frame->stride(VideoFrame::kYPlane),
- video_frame->stride(VideoFrame::kUPlane), row_bytes,
- YV12HD);
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ if (CheckColorSpace(video_frame, COLOR_SPACE_JPEG)) {
+ LIBYUV_J420_TO_ARGB(
+ video_frame->visible_data(VideoFrame::kYPlane),
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->visible_data(VideoFrame::kUPlane),
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->visible_data(VideoFrame::kVPlane),
+ video_frame->stride(VideoFrame::kVPlane),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
+ } else if (CheckColorSpace(video_frame, COLOR_SPACE_HD_REC709)) {
+ LIBYUV_H420_TO_ARGB(
+ video_frame->visible_data(VideoFrame::kYPlane),
+ video_frame->stride(VideoFrame::kYPlane),
+ video_frame->visible_data(VideoFrame::kUPlane),
+ video_frame->stride(VideoFrame::kUPlane),
+ video_frame->visible_data(VideoFrame::kVPlane),
+ video_frame->stride(VideoFrame::kVPlane),
+ static_cast<uint8*>(rgb_pixels),
+ row_bytes,
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height());
} else {
LIBYUV_I420_TO_ARGB(
- video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->visible_data(VideoFrame::kYPlane),
video_frame->stride(VideoFrame::kYPlane),
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kUPlane),
video_frame->stride(VideoFrame::kUPlane),
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kVPlane),
video_frame->stride(VideoFrame::kVPlane),
static_cast<uint8*>(rgb_pixels),
row_bytes,
@@ -563,13 +483,13 @@ void SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
video_frame->visible_rect().height());
}
break;
- case VideoFrame::YV16:
+ case PIXEL_FORMAT_YV16:
LIBYUV_I422_TO_ARGB(
- video_frame->data(VideoFrame::kYPlane) + y_offset,
+ video_frame->visible_data(VideoFrame::kYPlane),
video_frame->stride(VideoFrame::kYPlane),
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kUPlane),
video_frame->stride(VideoFrame::kUPlane),
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kVPlane),
video_frame->stride(VideoFrame::kVPlane),
static_cast<uint8*>(rgb_pixels),
row_bytes,
@@ -577,53 +497,47 @@ void SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels(
video_frame->visible_rect().height());
break;
- case VideoFrame::YV12A:
- // Since libyuv doesn't support YUVA, fallback to media, which is not ARM
- // optimized.
- // TODO(fbarchard, mtomasz): Use libyuv, then copy the alpha channel.
- ConvertYUVAToARGB(
- video_frame->data(VideoFrame::kYPlane) + y_offset,
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
- video_frame->data(VideoFrame::kAPlane),
- static_cast<uint8*>(rgb_pixels),
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height(),
+ case PIXEL_FORMAT_YV12A:
+ LIBYUV_I420ALPHA_TO_ARGB(
+ video_frame->visible_data(VideoFrame::kYPlane),
video_frame->stride(VideoFrame::kYPlane),
+ video_frame->visible_data(VideoFrame::kUPlane),
video_frame->stride(VideoFrame::kUPlane),
+ video_frame->visible_data(VideoFrame::kVPlane),
+ video_frame->stride(VideoFrame::kVPlane),
+ video_frame->visible_data(VideoFrame::kAPlane),
video_frame->stride(VideoFrame::kAPlane),
+ static_cast<uint8*>(rgb_pixels),
row_bytes,
- YV12);
+ video_frame->visible_rect().width(),
+ video_frame->visible_rect().height(),
+ 1); // 1 = enable RGB premultiplication by Alpha.
break;
- case VideoFrame::YV24:
- libyuv::I444ToARGB(
- video_frame->data(VideoFrame::kYPlane) + y_offset,
+ case PIXEL_FORMAT_YV24:
+ LIBYUV_I444_TO_ARGB(
+ video_frame->visible_data(VideoFrame::kYPlane),
video_frame->stride(VideoFrame::kYPlane),
- video_frame->data(VideoFrame::kUPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kUPlane),
video_frame->stride(VideoFrame::kUPlane),
- video_frame->data(VideoFrame::kVPlane) + uv_offset,
+ video_frame->visible_data(VideoFrame::kVPlane),
video_frame->stride(VideoFrame::kVPlane),
static_cast<uint8*>(rgb_pixels),
row_bytes,
video_frame->visible_rect().width(),
video_frame->visible_rect().height());
-#if SK_R32_SHIFT == 0 && SK_G32_SHIFT == 8 && SK_B32_SHIFT == 16 && \
- SK_A32_SHIFT == 24
- libyuv::ARGBToABGR(static_cast<uint8*>(rgb_pixels),
- row_bytes,
- static_cast<uint8*>(rgb_pixels),
- row_bytes,
- video_frame->visible_rect().width(),
- video_frame->visible_rect().height());
-#endif
break;
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case VideoFrame::NV12:
-#endif
- case VideoFrame::ARGB:
- case VideoFrame::XRGB:
- case VideoFrame::UNKNOWN:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_MT21:
+ case PIXEL_FORMAT_UNKNOWN:
NOTREACHED();
}
}
@@ -644,7 +558,8 @@ void SkCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
const gpu::MailboxHolder& mailbox_holder = video_frame->mailbox_holder(0);
DCHECK(mailbox_holder.texture_target == GL_TEXTURE_2D ||
mailbox_holder.texture_target == GL_TEXTURE_RECTANGLE_ARB ||
- mailbox_holder.texture_target == GL_TEXTURE_EXTERNAL_OES);
+ mailbox_holder.texture_target == GL_TEXTURE_EXTERNAL_OES)
+ << mailbox_holder.texture_target;
gl->WaitSyncPointCHROMIUM(mailbox_holder.sync_point);
uint32 source_texture = gl->CreateAndConsumeTextureCHROMIUM(
@@ -667,16 +582,10 @@ void SkCanvasVideoRenderer::CopyVideoFrameSingleTextureToGLTexture(
video_frame->UpdateReleaseSyncPoint(&client);
}
-void SkCanvasVideoRenderer::ResetLastFrame() {
- last_frame_.reset();
- last_frame_timestamp_ = media::kNoTimestamp();
-}
-
-void SkCanvasVideoRenderer::ResetAcceleratedLastFrame() {
- accelerated_last_image_.reset();
- accelerated_last_frame_.reset();
- accelerated_generator_ = nullptr;
- accelerated_last_frame_timestamp_ = media::kNoTimestamp();
+void SkCanvasVideoRenderer::ResetCache() {
+ // Clear cached values.
+ last_image_ = nullptr;
+ last_timestamp_ = kNoTimestamp();
}
} // namespace media
diff --git a/chromium/media/blink/skcanvas_video_renderer.h b/chromium/media/blink/skcanvas_video_renderer.h
index 504495321b9..0bb7b39a06e 100644
--- a/chromium/media/blink/skcanvas_video_renderer.h
+++ b/chromium/media/blink/skcanvas_video_renderer.h
@@ -9,22 +9,25 @@
#include "base/time/time.h"
#include "base/timer/timer.h"
#include "media/base/media_export.h"
+#include "media/base/timestamp_constants.h"
+#include "media/base/video_frame.h"
#include "media/base/video_rotation.h"
#include "media/filters/context_3d.h"
-#include "third_party/skia/include/core/SkBitmap.h"
+#include "skia/ext/refptr.h"
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkXfermode.h"
-#include "ui/gfx/geometry/rect.h"
class SkCanvas;
+class SkImage;
-namespace media {
+namespace gfx {
+class RectF;
+}
-class VideoFrame;
+namespace media {
class VideoImageGenerator;
-// Handles rendering of VideoFrames to SkCanvases, doing any necessary YUV
-// conversion and caching of resulting RGB bitmaps.
+// Handles rendering of VideoFrames to SkCanvases.
class MEDIA_EXPORT SkCanvasVideoRenderer {
public:
SkCanvasVideoRenderer();
@@ -32,7 +35,7 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
// Paints |video_frame| on |canvas|, scaling and rotating the result to fit
// dimensions specified by |dest_rect|.
- // If the format of |video_frame| is VideoFrame::NATIVE_TEXTURE, |context_3d|
+ // If the format of |video_frame| is PIXEL_FORMAT_NATIVE_TEXTURE, |context_3d|
// must be provided.
//
// Black will be painted on |canvas| if |video_frame| is null.
@@ -45,7 +48,7 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
const Context3D& context_3d);
// Copy |video_frame| on |canvas|.
- // If the format of |video_frame| is VideoFrame::NATIVE_TEXTURE, |context_3d|
+ // If the format of |video_frame| is PIXEL_FORMAT_NATIVE_TEXTURE, |context_3d|
// must be provided.
void Copy(const scoped_refptr<VideoFrame>& video_frame,
SkCanvas* canvas,
@@ -54,10 +57,9 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
// Convert the contents of |video_frame| to raw RGB pixels. |rgb_pixels|
// should point into a buffer large enough to hold as many 32 bit RGBA pixels
// as are in the visible_rect() area of the frame.
- static void ConvertVideoFrameToRGBPixels(
- const scoped_refptr<media::VideoFrame>& video_frame,
- void* rgb_pixels,
- size_t row_bytes);
+ static void ConvertVideoFrameToRGBPixels(const media::VideoFrame* video_frame,
+ void* rgb_pixels,
+ size_t row_bytes);
// Copy the contents of texture of |video_frame| to texture |texture|.
// |level|, |internal_format|, |type| specify target texture |texture|.
@@ -72,25 +74,14 @@ class MEDIA_EXPORT SkCanvasVideoRenderer {
bool flip_y);
private:
- void ResetLastFrame();
- void ResetAcceleratedLastFrame();
-
- // An RGB bitmap and corresponding timestamp of the previously converted
- // video frame data by software color space conversion.
- SkBitmap last_frame_;
- base::TimeDelta last_frame_timestamp_;
- // If |last_frame_| is not used for a while, it's deleted to save memory.
- base::DelayTimer<SkCanvasVideoRenderer> frame_deleting_timer_;
-
- // This is a hardware accelerated copy of the frame generated by
- // |accelerated_generator_|.
- // It's used when |canvas| parameter in Paint() is Ganesh canvas.
- // Note: all GrContext in SkCanvas instances are same.
- scoped_ptr<SkImage> accelerated_last_image_;
- SkBitmap accelerated_last_frame_;
- VideoImageGenerator* accelerated_generator_;
- base::TimeDelta accelerated_last_frame_timestamp_;
- base::DelayTimer<SkCanvasVideoRenderer> accelerated_frame_deleting_timer_;
+ void ResetCache();
+
+ // Last image used to draw to the canvas.
+ skia::RefPtr<SkImage> last_image_;
+ // Timestamp of the videoframe used to generate |last_image_|.
+ base::TimeDelta last_timestamp_ = media::kNoTimestamp();
+ // If |last_image_| is not used for a while, it's deleted to save memory.
+ base::DelayTimer last_image_deleting_timer_;
DISALLOW_COPY_AND_ASSIGN(SkCanvasVideoRenderer);
};
diff --git a/chromium/media/blink/skcanvas_video_renderer_unittest.cc b/chromium/media/blink/skcanvas_video_renderer_unittest.cc
index 3062d77164c..dec29bf7eb8 100644
--- a/chromium/media/blink/skcanvas_video_renderer_unittest.cc
+++ b/chromium/media/blink/skcanvas_video_renderer_unittest.cc
@@ -3,11 +3,17 @@
// found in the LICENSE file.
#include "base/message_loop/message_loop.h"
+#include "gpu/GLES2/gl2extchromium.h"
+#include "gpu/command_buffer/client/gles2_interface_stub.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "media/blink/skcanvas_video_renderer.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/gpu/GrContext.h"
+#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
+#include "ui/gfx/geometry/rect_f.h"
using media::VideoFrame;
@@ -15,7 +21,7 @@ namespace media {
static const int kWidth = 320;
static const int kHeight = 240;
-static const gfx::Rect kNaturalRect(0, 0, kWidth, kHeight);
+static const gfx::RectF kNaturalRect(kWidth, kHeight);
// Helper for filling a |canvas| with a solid |color|.
void FillCanvas(SkCanvas* canvas, SkColor color) {
@@ -74,7 +80,7 @@ class SkCanvasVideoRendererTest : public testing::Test {
// Standard canvas.
SkCanvas* target_canvas() { return &target_canvas_; }
- private:
+ protected:
SkCanvasVideoRenderer renderer_;
scoped_refptr<VideoFrame> natural_frame_;
@@ -97,16 +103,16 @@ static SkBitmap AllocBitmap(int width, int height) {
SkCanvasVideoRendererTest::SkCanvasVideoRendererTest()
: natural_frame_(VideoFrame::CreateBlackFrame(gfx::Size(kWidth, kHeight))),
- larger_frame_(VideoFrame::CreateBlackFrame(
- gfx::Size(kWidth * 2, kHeight * 2))),
- smaller_frame_(VideoFrame::CreateBlackFrame(
- gfx::Size(kWidth / 2, kHeight / 2))),
- cropped_frame_(VideoFrame::CreateFrame(
- VideoFrame::YV12,
- gfx::Size(16, 16),
- gfx::Rect(6, 6, 8, 6),
- gfx::Size(8, 6),
- base::TimeDelta::FromMilliseconds(4))),
+ larger_frame_(
+ VideoFrame::CreateBlackFrame(gfx::Size(kWidth * 2, kHeight * 2))),
+ smaller_frame_(
+ VideoFrame::CreateBlackFrame(gfx::Size(kWidth / 2, kHeight / 2))),
+ cropped_frame_(
+ VideoFrame::CreateFrame(PIXEL_FORMAT_YV12,
+ gfx::Size(16, 16),
+ gfx::Rect(6, 6, 8, 6),
+ gfx::Size(8, 6),
+ base::TimeDelta::FromMilliseconds(4))),
target_canvas_(AllocBitmap(kWidth, kHeight)) {
// Give each frame a unique timestamp.
natural_frame_->set_timestamp(base::TimeDelta::FromMilliseconds(1));
@@ -312,15 +318,6 @@ TEST_F(SkCanvasVideoRendererTest, NoTimestamp) {
EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
}
-TEST_F(SkCanvasVideoRendererTest, SameVideoFrame) {
- Paint(natural_frame(), target_canvas(), kRed);
- EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
-
- // Slow paints can get cached, expect the old color value.
- Paint(natural_frame(), target_canvas(), kBlue);
- EXPECT_EQ(SK_ColorRED, GetColor(target_canvas()));
-}
-
TEST_F(SkCanvasVideoRendererTest, CroppedFrame) {
Paint(cropped_frame(), target_canvas(), kNone);
// Check the corners.
@@ -420,12 +417,9 @@ TEST_F(SkCanvasVideoRendererTest, Video_Translate) {
SkCanvas canvas(AllocBitmap(kWidth, kHeight));
FillCanvas(&canvas, SK_ColorMAGENTA);
- PaintRotated(cropped_frame(),
- &canvas,
- gfx::Rect(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_0);
+ PaintRotated(cropped_frame(), &canvas,
+ gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
+ kNone, SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_0);
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, 0, 0));
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, (kWidth / 2) - 1, 0));
@@ -443,12 +437,9 @@ TEST_F(SkCanvasVideoRendererTest, Video_Translate_Rotation_90) {
FillCanvas(&canvas, SK_ColorMAGENTA);
const gfx::Rect crop_rect = cropped_frame()->visible_rect();
- PaintRotated(cropped_frame(),
- &canvas,
- gfx::Rect(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_90);
+ PaintRotated(cropped_frame(), &canvas,
+ gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
+ kNone, SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_90);
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, 0, 0));
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, (kWidth / 2) - 1, 0));
@@ -465,12 +456,9 @@ TEST_F(SkCanvasVideoRendererTest, Video_Translate_Rotation_180) {
SkCanvas canvas(AllocBitmap(kWidth, kHeight));
FillCanvas(&canvas, SK_ColorMAGENTA);
- PaintRotated(cropped_frame(),
- &canvas,
- gfx::Rect(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_180);
+ PaintRotated(cropped_frame(), &canvas,
+ gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
+ kNone, SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_180);
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, 0, 0));
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, (kWidth / 2) - 1, 0));
@@ -487,12 +475,9 @@ TEST_F(SkCanvasVideoRendererTest, Video_Translate_Rotation_270) {
SkCanvas canvas(AllocBitmap(kWidth, kHeight));
FillCanvas(&canvas, SK_ColorMAGENTA);
- PaintRotated(cropped_frame(),
- &canvas,
- gfx::Rect(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
- kNone,
- SkXfermode::kSrcOver_Mode,
- VIDEO_ROTATION_270);
+ PaintRotated(cropped_frame(), &canvas,
+ gfx::RectF(kWidth / 2, kHeight / 2, kWidth / 2, kHeight / 2),
+ kNone, SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_270);
// Check the corners of quadrant 2 and 4.
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, 0, 0));
EXPECT_EQ(SK_ColorMAGENTA, GetColorAt(&canvas, (kWidth / 2) - 1, 0));
@@ -505,4 +490,39 @@ TEST_F(SkCanvasVideoRendererTest, Video_Translate_Rotation_270) {
EXPECT_EQ(SK_ColorBLACK, GetColorAt(&canvas, kWidth / 2, kHeight - 1));
}
+namespace {
+class TestGLES2Interface : public gpu::gles2::GLES2InterfaceStub {
+ public:
+ void GenTextures(GLsizei n, GLuint* textures) override {
+ DCHECK_EQ(1, n);
+ *textures = 1;
+ }
+};
+void MailboxHoldersReleased(uint32 sync_point) {}
+} // namespace
+
+// Test that SkCanvasVideoRendererTest::Paint doesn't crash when GrContext is
+// abandoned.
+TEST_F(SkCanvasVideoRendererTest, ContextLost) {
+ skia::RefPtr<const GrGLInterface> null_interface =
+ skia::AdoptRef(GrGLCreateNullInterface());
+ auto gr_context = skia::AdoptRef(GrContext::Create(
+ kOpenGL_GrBackend,
+ reinterpret_cast<GrBackendContext>(null_interface.get())));
+ gr_context->abandonContext();
+
+ SkCanvas canvas(AllocBitmap(kWidth, kHeight));
+
+ TestGLES2Interface gles2;
+ Context3D context_3d(&gles2, gr_context.get());
+ gfx::Size size(kWidth, kHeight);
+ gpu::MailboxHolder mailbox(gpu::Mailbox::Generate(), GL_TEXTURE_RECTANGLE_ARB,
+ 0);
+ auto video_frame = VideoFrame::WrapNativeTexture(
+ PIXEL_FORMAT_UYVY, mailbox, base::Bind(MailboxHoldersReleased), size,
+ gfx::Rect(size), size, kNoTimestamp());
+
+ renderer_.Paint(video_frame, &canvas, kNaturalRect, 0xFF,
+ SkXfermode::kSrcOver_Mode, VIDEO_ROTATION_90, context_3d);
+}
} // namespace media
diff --git a/chromium/media/blink/video_frame_compositor.cc b/chromium/media/blink/video_frame_compositor.cc
index ce8ba2b82ca..073ec97462a 100644
--- a/chromium/media/blink/video_frame_compositor.cc
+++ b/chromium/media/blink/video_frame_compositor.cc
@@ -19,18 +19,23 @@ const int kBackgroundRenderingTimeoutMs = 250;
// Returns true if the format has no Alpha channel (hence is always opaque).
static bool IsOpaque(const scoped_refptr<VideoFrame>& frame) {
switch (frame->format()) {
- case VideoFrame::UNKNOWN:
- case VideoFrame::YV12:
- case VideoFrame::I420:
- case VideoFrame::YV16:
- case VideoFrame::YV24:
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case VideoFrame::NV12:
-#endif
- case VideoFrame::XRGB:
+ case PIXEL_FORMAT_UNKNOWN:
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_YV16:
+ case PIXEL_FORMAT_YV24:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_MT21:
return true;
- case VideoFrame::YV12A:
- case VideoFrame::ARGB:
+ case PIXEL_FORMAT_YV12A:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_RGB32:
break;
}
return false;
@@ -219,7 +224,9 @@ void VideoFrameCompositor::BackgroundRender() {
DCHECK(compositor_task_runner_->BelongsToCurrentThread());
const base::TimeTicks now = tick_clock_->NowTicks();
last_background_render_ = now;
- CallRender(now, now + last_interval_, true);
+ bool new_frame = CallRender(now, now + last_interval_, true);
+ if (new_frame && client_)
+ client_->DidReceiveFrame();
}
bool VideoFrameCompositor::CallRender(base::TimeTicks deadline_min,
diff --git a/chromium/media/blink/video_frame_compositor_unittest.cc b/chromium/media/blink/video_frame_compositor_unittest.cc
index 883102e8f55..a6b9ee11818 100644
--- a/chromium/media/blink/video_frame_compositor_unittest.cc
+++ b/chromium/media/blink/video_frame_compositor_unittest.cc
@@ -51,7 +51,7 @@ class VideoFrameCompositorTest : public testing::Test,
scoped_refptr<VideoFrame> CreateOpaqueFrame() {
gfx::Size size(8, 8);
- return VideoFrame::CreateFrame(VideoFrame::YV12, size, gfx::Rect(size),
+ return VideoFrame::CreateFrame(PIXEL_FORMAT_YV12, size, gfx::Rect(size),
size, base::TimeDelta());
}
@@ -220,7 +220,7 @@ TEST_F(VideoFrameCompositorTest, OpacityChanged) {
gfx::Size size(8, 8);
scoped_refptr<VideoFrame> opaque_frame = CreateOpaqueFrame();
scoped_refptr<VideoFrame> not_opaque_frame = VideoFrame::CreateFrame(
- VideoFrame::YV12A, size, gfx::Rect(size), size, base::TimeDelta());
+ PIXEL_FORMAT_YV12A, size, gfx::Rect(size), size, base::TimeDelta());
// Initial expectations.
EXPECT_FALSE(opaque());
@@ -258,7 +258,7 @@ TEST_F(VideoFrameCompositorTest, OpacityChanged) {
EXPECT_FALSE(opaque());
EXPECT_EQ(1, opacity_changed_count());
- EXPECT_FALSE(
+ EXPECT_TRUE(
compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
RenderFrame();
EXPECT_FALSE(opaque());
@@ -285,9 +285,7 @@ TEST_F(VideoFrameCompositorTest, VideoRendererSinkFrameDropped) {
EXPECT_CALL(*this, Render(_, _, _)).WillRepeatedly(Return(opaque_frame));
StartVideoRendererSink();
- // The first UpdateCurrentFrame() after a background render, which starting
- // the sink does automatically, won't report a dropped frame.
- EXPECT_FALSE(
+ EXPECT_TRUE(
compositor()->UpdateCurrentFrame(base::TimeTicks(), base::TimeTicks()));
// Another call should trigger a dropped frame callback.
diff --git a/chromium/media/blink/webaudiosourceprovider_impl.cc b/chromium/media/blink/webaudiosourceprovider_impl.cc
index 8ca3d0bff57..6efda7226ad 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl.cc
+++ b/chromium/media/blink/webaudiosourceprovider_impl.cc
@@ -163,16 +163,9 @@ bool WebAudioSourceProviderImpl::SetVolume(double volume) {
return true;
}
-void WebAudioSourceProviderImpl::SwitchOutputDevice(
- const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) {
+OutputDevice* WebAudioSourceProviderImpl::GetOutputDevice() {
base::AutoLock auto_lock(sink_lock_);
- if (!client_) {
- sink_->SwitchOutputDevice(device_id, security_origin, callback);
- } else {
- callback.Run(SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED);
- }
+ return sink_->GetOutputDevice();
}
void WebAudioSourceProviderImpl::Initialize(
diff --git a/chromium/media/blink/webaudiosourceprovider_impl.h b/chromium/media/blink/webaudiosourceprovider_impl.h
index 1fdb9de8b13..bc406e57c06 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl.h
+++ b/chromium/media/blink/webaudiosourceprovider_impl.h
@@ -38,9 +38,9 @@ class MEDIA_EXPORT WebAudioSourceProviderImpl
const scoped_refptr<AudioRendererSink>& sink);
// blink::WebAudioSourceProvider implementation.
- virtual void setClient(blink::WebAudioSourceProviderClient* client);
- virtual void provideInput(const blink::WebVector<float*>& audio_data,
- size_t number_of_frames);
+ void setClient(blink::WebAudioSourceProviderClient* client) override;
+ void provideInput(const blink::WebVector<float*>& audio_data,
+ size_t number_of_frames) override;
// AudioRendererSink implementation.
void Start() override;
@@ -48,14 +48,12 @@ class MEDIA_EXPORT WebAudioSourceProviderImpl
void Play() override;
void Pause() override;
bool SetVolume(double volume) override;
- void SwitchOutputDevice(const std::string& device_id,
- const GURL& security_origin,
- const SwitchOutputDeviceCB& callback) override;
+ OutputDevice* GetOutputDevice() override;
void Initialize(const AudioParameters& params,
RenderCallback* renderer) override;
protected:
- virtual ~WebAudioSourceProviderImpl();
+ ~WebAudioSourceProviderImpl() override;
private:
// Calls setFormat() on |client_| from the Blink renderer thread.
diff --git a/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc b/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
index c1725fa5d06..c0d607053b8 100644
--- a/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
+++ b/chromium/media/blink/webaudiosourceprovider_impl_unittest.cc
@@ -162,7 +162,7 @@ TEST_F(WebAudioSourceProviderImplTest, ProvideInput) {
// Point the WebVector into memory owned by |bus1|.
blink::WebVector<float*> audio_data(static_cast<size_t>(bus1->channels()));
for (size_t i = 0; i < audio_data.size(); ++i)
- audio_data[i] = bus1->channel(i);
+ audio_data[i] = bus1->channel(static_cast<int>(i));
// Verify provideInput() works before Initialize() and returns silence.
bus1->channel(0)[0] = 1;
diff --git a/chromium/media/blink/webcontentdecryptionmodule_impl.cc b/chromium/media/blink/webcontentdecryptionmodule_impl.cc
index 1681b86b8b6..d6509dae53d 100644
--- a/chromium/media/blink/webcontentdecryptionmodule_impl.cc
+++ b/chromium/media/blink/webcontentdecryptionmodule_impl.cc
@@ -43,7 +43,8 @@ void WebContentDecryptionModuleImpl::Create(
// TODO(ddorwin): This should be a DCHECK.
std::string key_system_ascii = base::UTF16ToASCII(key_system);
- if (!media::IsSupportedKeySystem(key_system_ascii)) {
+ if (!media::KeySystems::GetInstance()->IsSupportedKeySystem(
+ key_system_ascii)) {
std::string message =
"Keysystem '" + key_system_ascii + "' is not supported.";
result->completeWithError(
diff --git a/chromium/media/blink/webcontentdecryptionmodule_impl.h b/chromium/media/blink/webcontentdecryptionmodule_impl.h
index 60ceb067be3..3567d8b2e1c 100644
--- a/chromium/media/blink/webcontentdecryptionmodule_impl.h
+++ b/chromium/media/blink/webcontentdecryptionmodule_impl.h
@@ -37,15 +37,15 @@ class MEDIA_EXPORT WebContentDecryptionModuleImpl
const CdmConfig& cdm_config,
scoped_ptr<blink::WebContentDecryptionModuleResult> result);
- virtual ~WebContentDecryptionModuleImpl();
+ ~WebContentDecryptionModuleImpl() override;
// blink::WebContentDecryptionModule implementation.
- virtual blink::WebContentDecryptionModuleSession* createSession();
+ blink::WebContentDecryptionModuleSession* createSession() override;
- virtual void setServerCertificate(
+ void setServerCertificate(
const uint8* server_certificate,
size_t server_certificate_length,
- blink::WebContentDecryptionModuleResult result);
+ blink::WebContentDecryptionModuleResult result) override;
// Returns the CdmContext associated with this CDM, which must not be nullptr.
// TODO(jrummell): Figure out lifetimes, as WMPI may still use the decryptor
diff --git a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
index 5f156984e19..72e3de96d63 100644
--- a/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
+++ b/chromium/media/blink/webcontentdecryptionmoduleaccess_impl.h
@@ -26,12 +26,12 @@ class WebContentDecryptionModuleAccessImpl
const blink::WebMediaKeySystemConfiguration& configuration,
const CdmConfig& cdm_config,
const base::WeakPtr<WebEncryptedMediaClientImpl>& client);
- virtual ~WebContentDecryptionModuleAccessImpl();
+ ~WebContentDecryptionModuleAccessImpl() override;
// blink::WebContentDecryptionModuleAccess interface.
- virtual blink::WebMediaKeySystemConfiguration getConfiguration();
- virtual void createContentDecryptionModule(
- blink::WebContentDecryptionModuleResult result);
+ blink::WebMediaKeySystemConfiguration getConfiguration() override;
+ void createContentDecryptionModule(
+ blink::WebContentDecryptionModuleResult result) override;
private:
WebContentDecryptionModuleAccessImpl(
diff --git a/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
index 174ef5079c1..15392311025 100644
--- a/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
+++ b/chromium/media/blink/webcontentdecryptionmodulesession_impl.cc
@@ -18,7 +18,6 @@
#include "media/base/media_keys.h"
#include "media/blink/cdm_result_promise.h"
#include "media/blink/cdm_session_adapter.h"
-#include "media/blink/new_session_cdm_result_promise.h"
#include "media/blink/webmediaplayer_util.h"
#include "media/cdm/json_web_key.h"
#include "media/cdm/key_system_names.h"
@@ -68,14 +67,16 @@ static blink::WebEncryptedMediaKeyInformation::KeyStatus convertStatus(
return blink::WebEncryptedMediaKeyInformation::KeyStatus::InternalError;
case media::CdmKeyInformation::EXPIRED:
return blink::WebEncryptedMediaKeyInformation::KeyStatus::Expired;
- case media::CdmKeyInformation::OUTPUT_NOT_ALLOWED:
+ case media::CdmKeyInformation::OUTPUT_RESTRICTED:
return blink::WebEncryptedMediaKeyInformation::KeyStatus::
- OutputNotAllowed;
+ OutputRestricted;
case media::CdmKeyInformation::OUTPUT_DOWNSCALED:
return blink::WebEncryptedMediaKeyInformation::KeyStatus::
OutputDownscaled;
case media::CdmKeyInformation::KEY_STATUS_PENDING:
return blink::WebEncryptedMediaKeyInformation::KeyStatus::StatusPending;
+ case media::CdmKeyInformation::RELEASED:
+ return blink::WebEncryptedMediaKeyInformation::KeyStatus::Released;
}
NOTREACHED();
@@ -165,7 +166,8 @@ static bool SanitizeSessionId(const blink::WebString& session_id,
if (!base::IsStringASCII(session_id))
return false;
- sanitized_session_id->assign(base::UTF16ToASCII(session_id));
+ sanitized_session_id->assign(
+ base::UTF16ToASCII(base::StringPiece16(session_id)));
if (sanitized_session_id->length() > limits::kMaxSessionIdLength)
return false;
@@ -225,6 +227,7 @@ WebContentDecryptionModuleSessionImpl::WebContentDecryptionModuleSessionImpl(
WebContentDecryptionModuleSessionImpl::
~WebContentDecryptionModuleSessionImpl() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!session_id_.empty())
adapter_->UnregisterSession(session_id_);
}
@@ -245,6 +248,7 @@ void WebContentDecryptionModuleSessionImpl::initializeNewSession(
blink::WebContentDecryptionModuleResult result) {
DCHECK(init_data);
DCHECK(session_id_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
// From https://w3c.github.io/encrypted-media/#generateRequest.
// 5. If the Key System implementation represented by this object's cdm
@@ -301,7 +305,7 @@ void WebContentDecryptionModuleSessionImpl::initializeNewSession(
result, adapter_->GetKeySystemUMAPrefix() + kGenerateRequestUMAName,
base::Bind(
&WebContentDecryptionModuleSessionImpl::OnSessionInitialized,
- base::Unretained(this)))));
+ weak_ptr_factory_.GetWeakPtr()))));
}
void WebContentDecryptionModuleSessionImpl::load(
@@ -309,6 +313,7 @@ void WebContentDecryptionModuleSessionImpl::load(
blink::WebContentDecryptionModuleResult result) {
DCHECK(!session_id.isEmpty());
DCHECK(session_id_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
std::string sanitized_session_id;
if (!SanitizeSessionId(session_id, &sanitized_session_id)) {
@@ -327,7 +332,7 @@ void WebContentDecryptionModuleSessionImpl::load(
result, adapter_->GetKeySystemUMAPrefix() + kLoadSessionUMAName,
base::Bind(
&WebContentDecryptionModuleSessionImpl::OnSessionInitialized,
- base::Unretained(this)))));
+ weak_ptr_factory_.GetWeakPtr()))));
}
void WebContentDecryptionModuleSessionImpl::update(
@@ -336,6 +341,7 @@ void WebContentDecryptionModuleSessionImpl::update(
blink::WebContentDecryptionModuleResult result) {
DCHECK(response);
DCHECK(!session_id_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
std::vector<uint8> sanitized_response;
if (!SanitizeResponse(adapter_->GetKeySystem(), response, response_length,
@@ -355,6 +361,7 @@ void WebContentDecryptionModuleSessionImpl::update(
void WebContentDecryptionModuleSessionImpl::close(
blink::WebContentDecryptionModuleResult result) {
DCHECK(!session_id_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
adapter_->CloseSession(
session_id_,
scoped_ptr<SimpleCdmPromise>(new CdmResultPromise<>(
@@ -364,6 +371,7 @@ void WebContentDecryptionModuleSessionImpl::close(
void WebContentDecryptionModuleSessionImpl::remove(
blink::WebContentDecryptionModuleResult result) {
DCHECK(!session_id_.empty());
+ DCHECK(thread_checker_.CalledOnValidThread());
adapter_->RemoveSession(
session_id_,
scoped_ptr<SimpleCdmPromise>(new CdmResultPromise<>(
@@ -374,6 +382,7 @@ void WebContentDecryptionModuleSessionImpl::OnSessionMessage(
MediaKeys::MessageType message_type,
const std::vector<uint8>& message) {
DCHECK(client_) << "Client not set before message event";
+ DCHECK(thread_checker_.CalledOnValidThread());
client_->message(convertMessageType(message_type), vector_as_array(&message),
message.size());
}
@@ -381,6 +390,7 @@ void WebContentDecryptionModuleSessionImpl::OnSessionMessage(
void WebContentDecryptionModuleSessionImpl::OnSessionKeysChange(
bool has_additional_usable_key,
CdmKeysInfo keys_info) {
+ DCHECK(thread_checker_.CalledOnValidThread());
blink::WebVector<blink::WebEncryptedMediaKeyInformation> keys(
keys_info.size());
for (size_t i = 0; i < keys_info.size(); ++i) {
@@ -397,10 +407,12 @@ void WebContentDecryptionModuleSessionImpl::OnSessionKeysChange(
void WebContentDecryptionModuleSessionImpl::OnSessionExpirationUpdate(
const base::Time& new_expiry_time) {
+ DCHECK(thread_checker_.CalledOnValidThread());
client_->expirationChanged(new_expiry_time.ToJsTime());
}
void WebContentDecryptionModuleSessionImpl::OnSessionClosed() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (is_closed_)
return;
@@ -408,18 +420,22 @@ void WebContentDecryptionModuleSessionImpl::OnSessionClosed() {
client_->close();
}
-blink::WebContentDecryptionModuleResult::SessionStatus
-WebContentDecryptionModuleSessionImpl::OnSessionInitialized(
- const std::string& session_id) {
+void WebContentDecryptionModuleSessionImpl::OnSessionInitialized(
+ const std::string& session_id,
+ SessionInitStatus* status) {
+ DCHECK(thread_checker_.CalledOnValidThread());
// CDM will return NULL if the session to be loaded can't be found.
- if (session_id.empty())
- return blink::WebContentDecryptionModuleResult::SessionNotFound;
+ if (session_id.empty()) {
+ *status = SessionInitStatus::SESSION_NOT_FOUND;
+ return;
+ }
DCHECK(session_id_.empty()) << "Session ID may not be changed once set.";
session_id_ = session_id;
- return adapter_->RegisterSession(session_id_, weak_ptr_factory_.GetWeakPtr())
- ? blink::WebContentDecryptionModuleResult::NewSession
- : blink::WebContentDecryptionModuleResult::SessionAlreadyExists;
+ *status =
+ adapter_->RegisterSession(session_id_, weak_ptr_factory_.GetWeakPtr())
+ ? SessionInitStatus::NEW_SESSION
+ : SessionInitStatus::SESSION_ALREADY_EXISTS;
}
} // namespace media
diff --git a/chromium/media/blink/webcontentdecryptionmodulesession_impl.h b/chromium/media/blink/webcontentdecryptionmodulesession_impl.h
index 2f00ff5b368..9a635097ec2 100644
--- a/chromium/media/blink/webcontentdecryptionmodulesession_impl.h
+++ b/chromium/media/blink/webcontentdecryptionmodulesession_impl.h
@@ -12,7 +12,9 @@
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/base/media_keys.h"
+#include "media/blink/new_session_cdm_result_promise.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModuleSession.h"
#include "third_party/WebKit/public/platform/WebString.h"
@@ -26,25 +28,25 @@ class WebContentDecryptionModuleSessionImpl
public:
WebContentDecryptionModuleSessionImpl(
const scoped_refptr<CdmSessionAdapter>& adapter);
- virtual ~WebContentDecryptionModuleSessionImpl();
+ ~WebContentDecryptionModuleSessionImpl() override;
// blink::WebContentDecryptionModuleSession implementation.
- virtual void setClientInterface(Client* client);
- virtual blink::WebString sessionId() const;
+ void setClientInterface(Client* client) override;
+ blink::WebString sessionId() const override;
- virtual void initializeNewSession(
+ void initializeNewSession(
blink::WebEncryptedMediaInitDataType init_data_type,
const unsigned char* initData,
size_t initDataLength,
blink::WebEncryptedMediaSessionType session_type,
- blink::WebContentDecryptionModuleResult result);
- virtual void load(const blink::WebString& session_id,
- blink::WebContentDecryptionModuleResult result);
- virtual void update(const uint8* response,
- size_t response_length,
- blink::WebContentDecryptionModuleResult result);
- virtual void close(blink::WebContentDecryptionModuleResult result);
- virtual void remove(blink::WebContentDecryptionModuleResult result);
+ blink::WebContentDecryptionModuleResult result) override;
+ void load(const blink::WebString& session_id,
+ blink::WebContentDecryptionModuleResult result) override;
+ void update(const uint8* response,
+ size_t response_length,
+ blink::WebContentDecryptionModuleResult result) override;
+ void close(blink::WebContentDecryptionModuleResult result) override;
+ void remove(blink::WebContentDecryptionModuleResult result) override;
// Callbacks.
void OnSessionMessage(MediaKeys::MessageType message_type,
@@ -55,9 +57,10 @@ class WebContentDecryptionModuleSessionImpl
void OnSessionClosed();
private:
- // Called when a new session is created.
- blink::WebContentDecryptionModuleResult::SessionStatus OnSessionInitialized(
- const std::string& session_id);
+ // Called when a new session is created or loaded. |status| is set as
+ // appropriate, depending on whether the session already exists or not.
+ void OnSessionInitialized(const std::string& session_id,
+ SessionInitStatus* status);
scoped_refptr<CdmSessionAdapter> adapter_;
@@ -74,6 +77,7 @@ class WebContentDecryptionModuleSessionImpl
// closed() event.
bool is_closed_;
+ base::ThreadChecker thread_checker_;
// Since promises will live until they are fired, use a weak reference when
// creating a promise in case this class disappears before the promise
// actually fires.
diff --git a/chromium/media/blink/webencryptedmediaclient_impl.cc b/chromium/media/blink/webencryptedmediaclient_impl.cc
index fd45ad7cf32..940805b8e6d 100644
--- a/chromium/media/blink/webencryptedmediaclient_impl.cc
+++ b/chromium/media/blink/webencryptedmediaclient_impl.cc
@@ -151,7 +151,7 @@ WebEncryptedMediaClientImpl::Reporter* WebEncryptedMediaClientImpl::GetReporter(
// TODO(sandersd): Avoid doing ASCII conversion more than once.
std::string key_system_ascii;
if (base::IsStringASCII(key_system))
- key_system_ascii = base::UTF16ToASCII(key_system);
+ key_system_ascii = base::UTF16ToASCII(base::StringPiece16(key_system));
// Return a per-frame singleton so that UMA reports will be once-per-frame.
std::string uma_name = GetKeySystemNameForUMA(key_system_ascii);
diff --git a/chromium/media/blink/webencryptedmediaclient_impl.h b/chromium/media/blink/webencryptedmediaclient_impl.h
index 5bd1a744204..83197d82160 100644
--- a/chromium/media/blink/webencryptedmediaclient_impl.h
+++ b/chromium/media/blink/webencryptedmediaclient_impl.h
@@ -37,11 +37,11 @@ class MEDIA_EXPORT WebEncryptedMediaClientImpl
base::Callback<bool(void)> are_secure_codecs_supported_cb,
CdmFactory* cdm_factory,
MediaPermission* media_permission);
- virtual ~WebEncryptedMediaClientImpl();
+ ~WebEncryptedMediaClientImpl() override;
// WebEncryptedMediaClient implementation.
- virtual void requestMediaKeySystemAccess(
- blink::WebEncryptedMediaRequest request);
+ void requestMediaKeySystemAccess(
+ blink::WebEncryptedMediaRequest request) override;
// Create the CDM for |key_system| and |security_origin|. The caller owns
// the created cdm (passed back using |result|).
diff --git a/chromium/media/blink/webinbandtexttrack_impl.h b/chromium/media/blink/webinbandtexttrack_impl.h
index 9c75caec6ad..3225dac6cd9 100644
--- a/chromium/media/blink/webinbandtexttrack_impl.h
+++ b/chromium/media/blink/webinbandtexttrack_impl.h
@@ -16,16 +16,16 @@ class WebInbandTextTrackImpl : public blink::WebInbandTextTrack {
const blink::WebString& label,
const blink::WebString& language,
const blink::WebString& id);
- virtual ~WebInbandTextTrackImpl();
+ ~WebInbandTextTrackImpl() override;
- virtual void setClient(blink::WebInbandTextTrackClient* client);
- virtual blink::WebInbandTextTrackClient* client();
+ void setClient(blink::WebInbandTextTrackClient* client) override;
+ blink::WebInbandTextTrackClient* client() override;
- virtual Kind kind() const;
+ Kind kind() const override;
- virtual blink::WebString label() const;
- virtual blink::WebString language() const;
- virtual blink::WebString id() const;
+ blink::WebString label() const override;
+ blink::WebString language() const override;
+ blink::WebString id() const override;
private:
blink::WebInbandTextTrackClient* client_;
diff --git a/chromium/media/blink/webmediaplayer_impl.cc b/chromium/media/blink/webmediaplayer_impl.cc
index 1e0b82f40bd..2726552c4e1 100644
--- a/chromium/media/blink/webmediaplayer_impl.cc
+++ b/chromium/media/blink/webmediaplayer_impl.cc
@@ -27,6 +27,7 @@
#include "media/base/limits.h"
#include "media/base/media_log.h"
#include "media/base/text_renderer.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/blink/texttrack_impl.h"
#include "media/blink/webaudiosourceprovider_impl.h"
@@ -38,6 +39,8 @@
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "third_party/WebKit/public/platform/WebEncryptedMediaTypes.h"
+#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
+#include "third_party/WebKit/public/platform/WebMediaPlayerEncryptedMediaClient.h"
#include "third_party/WebKit/public/platform/WebMediaSource.h"
#include "third_party/WebKit/public/platform/WebRect.h"
#include "third_party/WebKit/public/platform/WebSize.h"
@@ -103,6 +106,7 @@ STATIC_ASSERT_MATCHING_ENUM(UseCredentials);
WebMediaPlayerImpl::WebMediaPlayerImpl(
blink::WebLocalFrame* frame,
blink::WebMediaPlayerClient* client,
+ blink::WebMediaPlayerEncryptedMediaClient* encrypted_client,
base::WeakPtr<WebMediaPlayerDelegate> delegate,
scoped_ptr<RendererFactory> renderer_factory,
CdmFactory* cdm_factory,
@@ -113,6 +117,7 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
preload_(BufferedDataSource::AUTO),
main_task_runner_(base::ThreadTaskRunnerHandle::Get()),
media_task_runner_(params.media_task_runner()),
+ worker_task_runner_(params.worker_task_runner()),
media_log_(params.media_log()),
pipeline_(media_task_runner_, media_log_.get()),
load_type_(LoadTypeURL),
@@ -124,6 +129,7 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
pending_seek_(false),
should_notify_time_changed_(false),
client_(client),
+ encrypted_client_(encrypted_client),
delegate_(delegate),
defer_load_cb_(params.defer_load_cb()),
context_3d_cb_(params.context_3d_cb()),
@@ -139,7 +145,7 @@ WebMediaPlayerImpl::WebMediaPlayerImpl(
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnNaturalSizeChanged),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnOpacityChanged))),
encrypted_media_support_(cdm_factory,
- client,
+ encrypted_client,
params.media_permission(),
base::Bind(&WebMediaPlayerImpl::SetCdm,
AsWeakPtr(),
@@ -293,18 +299,18 @@ void WebMediaPlayerImpl::seek(double seconds) {
if (ready_state_ > WebMediaPlayer::ReadyStateHaveMetadata)
SetReadyState(WebMediaPlayer::ReadyStateHaveMetadata);
- base::TimeDelta new_seek_time = ConvertSecondsToTimestamp(seconds);
+ base::TimeDelta new_seek_time = base::TimeDelta::FromSecondsD(seconds);
if (seeking_) {
if (new_seek_time == seek_time_) {
if (chunk_demuxer_) {
- if (!pending_seek_) {
- // If using media source demuxer, only suppress redundant seeks if
- // there is no pending seek. This enforces that any pending seek that
- // results in a demuxer seek is preceded by matching
- // CancelPendingSeek() and StartWaitingForSeek() calls.
- return;
- }
+ // Don't suppress any redundant in-progress MSE seek. There could have
+ // been changes to the underlying buffers after seeking the demuxer and
+ // before receiving OnPipelineSeeked() for the currently in-progress
+ // seek.
+ MEDIA_LOG(DEBUG, media_log_)
+ << "Detected MediaSource seek to same time as in-progress seek to "
+ << seek_time_ << ".";
} else {
// Suppress all redundant seeks if unrestricted by media source demuxer
// API.
@@ -324,11 +330,16 @@ void WebMediaPlayerImpl::seek(double seconds) {
media_log_->AddEvent(media_log_->CreateSeekEvent(seconds));
// Update our paused time.
- // In paused state ignore the seek operations to current time if the loading
- // is completed and generate OnPipelineBufferingStateChanged event to
- // eventually fire seeking and seeked events
+ // For non-MSE playbacks, in paused state ignore the seek operations to
+ // current time if the loading is completed and generate
+ // OnPipelineBufferingStateChanged event to eventually fire seeking and seeked
+ // events. We don't short-circuit MSE seeks in this logic because the
+ // underlying buffers around the seek time might have changed (or even been
+ // removed) since previous seek/preroll/pause action, and the pipeline might
+ // need to flush so the new buffers are decoded and rendered instead of the
+ // old ones.
if (paused_) {
- if (paused_time_ != new_seek_time) {
+ if (paused_time_ != new_seek_time || chunk_demuxer_) {
paused_time_ = new_seek_time;
} else if (old_state == ReadyStateHaveEnoughData) {
main_task_runner_->PostTask(
@@ -387,15 +398,20 @@ void WebMediaPlayerImpl::setVolume(double volume) {
}
void WebMediaPlayerImpl::setSinkId(const blink::WebString& device_id,
- WebSetSinkIdCB* web_callbacks) {
+ WebSetSinkIdCB* web_callback) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- std::string device_id_str(device_id.utf8());
- GURL security_origin(frame_->securityOrigin().toString().utf8());
- DVLOG(1) << __FUNCTION__
- << "(" << device_id_str << ", " << security_origin << ")";
- audio_source_provider_->SwitchOutputDevice(
- device_id_str, security_origin,
- ConvertToSwitchOutputDeviceCB(web_callbacks));
+ DVLOG(1) << __FUNCTION__;
+ media::SwitchOutputDeviceCB callback =
+ media::ConvertToSwitchOutputDeviceCB(web_callback);
+ OutputDevice* output_device = audio_source_provider_->GetOutputDevice();
+ if (output_device) {
+ std::string device_id_str(device_id.utf8());
+ url::Origin security_origin(
+ GURL(frame_->securityOrigin().toString().utf8()));
+ output_device->SwitchOutputDevice(device_id_str, security_origin, callback);
+ } else {
+ callback.Run(OUTPUT_DEVICE_STATUS_ERROR_INTERNAL);
+ }
}
#define STATIC_ASSERT_MATCHING_ENUM(webkit_name, chromium_name) \
@@ -562,8 +578,9 @@ void WebMediaPlayerImpl::paint(blink::WebCanvas* canvas,
if (!context_3d.gl)
return;
}
- skcanvas_video_renderer_.Paint(video_frame, canvas, gfx_rect, alpha, mode,
- pipeline_metadata_.video_rotation, context_3d);
+ skcanvas_video_renderer_.Paint(video_frame, canvas, gfx::RectF(gfx_rect),
+ alpha, mode, pipeline_metadata_.video_rotation,
+ context_3d);
}
bool WebMediaPlayerImpl::hasSingleSecurityOrigin() const {
@@ -579,7 +596,7 @@ bool WebMediaPlayerImpl::didPassCORSAccessCheck() const {
}
double WebMediaPlayerImpl::mediaTimeForTimeValue(double timeValue) const {
- return ConvertSecondsToTimestamp(timeValue).InSecondsF();
+ return base::TimeDelta::FromSecondsD(timeValue).InSecondsF();
}
unsigned WebMediaPlayerImpl::decodedFrameCount() const {
@@ -718,18 +735,18 @@ void WebMediaPlayerImpl::OnEncryptedMediaInitData(
encrypted_media_support_.SetInitDataType(init_data_type);
- client_->encrypted(ConvertToWebInitDataType(init_data_type),
- vector_as_array(&init_data),
- base::saturated_cast<unsigned int>(init_data.size()));
+ encrypted_client_->encrypted(
+ ConvertToWebInitDataType(init_data_type), vector_as_array(&init_data),
+ base::saturated_cast<unsigned int>(init_data.size()));
}
void WebMediaPlayerImpl::OnWaitingForDecryptionKey() {
- client_->didBlockPlaybackWaitingForKey();
+ encrypted_client_->didBlockPlaybackWaitingForKey();
// TODO(jrummell): didResumePlaybackBlockedForKey() should only be called
// when a key has been successfully added (e.g. OnSessionKeysChange() with
// |has_additional_usable_key| = true). http://crbug.com/461903
- client_->didResumePlaybackBlockedForKey();
+ encrypted_client_->didResumePlaybackBlockedForKey();
}
void WebMediaPlayerImpl::SetCdm(const CdmAttachedCB& cdm_attached_cb,
@@ -827,7 +844,8 @@ void WebMediaPlayerImpl::OnPipelineMetadata(
}
video_weblayer_.reset(new cc_blink::WebLayerImpl(layer));
- video_weblayer_->setOpaque(opaque_);
+ video_weblayer_->layer()->SetContentsOpaque(opaque_);
+ video_weblayer_->SetContentsOpaqueIsFixed(true);
client_->setWebLayer(video_weblayer_.get());
}
}
@@ -857,8 +875,8 @@ void WebMediaPlayerImpl::OnPipelineBufferingStateChanged(
void WebMediaPlayerImpl::OnDemuxerOpened() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
- client_->mediaSourceOpened(new WebMediaSourceImpl(
- chunk_demuxer_, base::Bind(&MediaLog::AddLogEvent, media_log_)));
+ client_->mediaSourceOpened(
+ new WebMediaSourceImpl(chunk_demuxer_, media_log_));
}
void WebMediaPlayerImpl::OnAddTextTrack(
@@ -925,8 +943,7 @@ void WebMediaPlayerImpl::StartPipeline() {
chunk_demuxer_ = new ChunkDemuxer(
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnDemuxerOpened),
- encrypted_media_init_data_cb,
- base::Bind(&MediaLog::AddLogEvent, media_log_), media_log_, true);
+ encrypted_media_init_data_cb, media_log_, true);
demuxer_.reset(chunk_demuxer_);
}
@@ -934,9 +951,9 @@ void WebMediaPlayerImpl::StartPipeline() {
seeking_ = true;
pipeline_.Start(
- demuxer_.get(),
- renderer_factory_->CreateRenderer(
- media_task_runner_, audio_source_provider_.get(), compositor_),
+ demuxer_.get(), renderer_factory_->CreateRenderer(
+ media_task_runner_, worker_task_runner_,
+ audio_source_provider_.get(), compositor_),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineEnded),
BIND_TO_RENDER_LOOP(&WebMediaPlayerImpl::OnPipelineError),
BIND_TO_RENDER_LOOP1(&WebMediaPlayerImpl::OnPipelineSeeked, false),
@@ -1008,8 +1025,10 @@ void WebMediaPlayerImpl::OnOpacityChanged(bool opaque) {
DCHECK_NE(ready_state_, WebMediaPlayer::ReadyStateHaveNothing);
opaque_ = opaque;
+ // Modify content opaqueness of cc::Layer directly so that
+ // SetContentsOpaqueIsFixed is ignored.
if (video_weblayer_)
- video_weblayer_->setOpaque(opaque_);
+ video_weblayer_->layer()->SetContentsOpaque(opaque_);
}
static void GetCurrentFrameAndSignal(
diff --git a/chromium/media/blink/webmediaplayer_impl.h b/chromium/media/blink/webmediaplayer_impl.h
index dd3a2daab6e..98996453237 100644
--- a/chromium/media/blink/webmediaplayer_impl.h
+++ b/chromium/media/blink/webmediaplayer_impl.h
@@ -29,16 +29,18 @@
#include "third_party/WebKit/public/platform/WebAudioSourceProvider.h"
#include "third_party/WebKit/public/platform/WebContentDecryptionModuleResult.h"
#include "third_party/WebKit/public/platform/WebMediaPlayer.h"
-#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
#include "url/gurl.h"
namespace blink {
class WebGraphicsContext3D;
class WebLocalFrame;
+class WebMediaPlayerClient;
+class WebMediaPlayerEncryptedMediaClient;
}
namespace base {
class SingleThreadTaskRunner;
+class TaskRunner;
}
namespace cc_blink {
@@ -68,68 +70,70 @@ class MEDIA_EXPORT WebMediaPlayerImpl
// internal renderer will be created.
// TODO(xhwang): Drop the internal renderer path and always pass in a renderer
// here.
- WebMediaPlayerImpl(blink::WebLocalFrame* frame,
- blink::WebMediaPlayerClient* client,
- base::WeakPtr<WebMediaPlayerDelegate> delegate,
- scoped_ptr<RendererFactory> renderer_factory,
- CdmFactory* cdm_factory,
- const WebMediaPlayerParams& params);
- virtual ~WebMediaPlayerImpl();
-
- virtual void load(LoadType load_type,
- const blink::WebURL& url,
- CORSMode cors_mode);
+ WebMediaPlayerImpl(
+ blink::WebLocalFrame* frame,
+ blink::WebMediaPlayerClient* client,
+ blink::WebMediaPlayerEncryptedMediaClient* encrypted_client,
+ base::WeakPtr<WebMediaPlayerDelegate> delegate,
+ scoped_ptr<RendererFactory> renderer_factory,
+ CdmFactory* cdm_factory,
+ const WebMediaPlayerParams& params);
+ ~WebMediaPlayerImpl() override;
+
+ void load(LoadType load_type,
+ const blink::WebURL& url,
+ CORSMode cors_mode) override;
// Playback controls.
- virtual void play();
- virtual void pause();
- virtual bool supportsSave() const;
- virtual void seek(double seconds);
- virtual void setRate(double rate);
- virtual void setVolume(double volume);
- virtual void setSinkId(const blink::WebString& device_id,
- WebSetSinkIdCB* web_callbacks);
- virtual void setPreload(blink::WebMediaPlayer::Preload preload);
- virtual blink::WebTimeRanges buffered() const;
- virtual blink::WebTimeRanges seekable() const;
+ void play() override;
+ void pause() override;
+ bool supportsSave() const override;
+ void seek(double seconds) override;
+ void setRate(double rate) override;
+ void setVolume(double volume) override;
+ void setSinkId(const blink::WebString& device_id,
+ WebSetSinkIdCB* web_callbacks) override;
+ void setPreload(blink::WebMediaPlayer::Preload preload) override;
+ blink::WebTimeRanges buffered() const override;
+ blink::WebTimeRanges seekable() const override;
// Methods for painting.
- virtual void paint(blink::WebCanvas* canvas,
- const blink::WebRect& rect,
- unsigned char alpha,
- SkXfermode::Mode mode);
+ void paint(blink::WebCanvas* canvas,
+ const blink::WebRect& rect,
+ unsigned char alpha,
+ SkXfermode::Mode mode) override;
// True if the loaded media has a playable video/audio track.
- virtual bool hasVideo() const;
- virtual bool hasAudio() const;
+ bool hasVideo() const override;
+ bool hasAudio() const override;
// Dimensions of the video.
- virtual blink::WebSize naturalSize() const;
+ blink::WebSize naturalSize() const override;
// Getters of playback state.
- virtual bool paused() const;
- virtual bool seeking() const;
- virtual double duration() const;
+ bool paused() const override;
+ bool seeking() const override;
+ double duration() const override;
virtual double timelineOffset() const;
- virtual double currentTime() const;
+ double currentTime() const override;
// Internal states of loading and network.
// TODO(hclam): Ask the pipeline about the state rather than having reading
// them from members which would cause race conditions.
- virtual blink::WebMediaPlayer::NetworkState networkState() const;
- virtual blink::WebMediaPlayer::ReadyState readyState() const;
+ blink::WebMediaPlayer::NetworkState networkState() const override;
+ blink::WebMediaPlayer::ReadyState readyState() const override;
- virtual bool didLoadingProgress();
+ bool didLoadingProgress() override;
- virtual bool hasSingleSecurityOrigin() const;
- virtual bool didPassCORSAccessCheck() const;
+ bool hasSingleSecurityOrigin() const override;
+ bool didPassCORSAccessCheck() const override;
- virtual double mediaTimeForTimeValue(double timeValue) const;
+ double mediaTimeForTimeValue(double timeValue) const override;
- virtual unsigned decodedFrameCount() const;
- virtual unsigned droppedFrameCount() const;
- virtual unsigned audioDecodedByteCount() const;
- virtual unsigned videoDecodedByteCount() const;
+ unsigned decodedFrameCount() const override;
+ unsigned droppedFrameCount() const override;
+ unsigned audioDecodedByteCount() const override;
+ unsigned videoDecodedByteCount() const override;
bool copyVideoTextureToPlatformTexture(
blink::WebGraphicsContext3D* web_graphics_context,
@@ -139,27 +143,27 @@ class MEDIA_EXPORT WebMediaPlayerImpl
bool premultiply_alpha,
bool flip_y) override;
- virtual blink::WebAudioSourceProvider* audioSourceProvider();
+ blink::WebAudioSourceProvider* audioSourceProvider() override;
- virtual MediaKeyException generateKeyRequest(
+ MediaKeyException generateKeyRequest(
const blink::WebString& key_system,
const unsigned char* init_data,
- unsigned init_data_length);
+ unsigned init_data_length) override;
- virtual MediaKeyException addKey(const blink::WebString& key_system,
- const unsigned char* key,
- unsigned key_length,
- const unsigned char* init_data,
- unsigned init_data_length,
- const blink::WebString& session_id);
+ MediaKeyException addKey(const blink::WebString& key_system,
+ const unsigned char* key,
+ unsigned key_length,
+ const unsigned char* init_data,
+ unsigned init_data_length,
+ const blink::WebString& session_id) override;
- virtual MediaKeyException cancelKeyRequest(
+ MediaKeyException cancelKeyRequest(
const blink::WebString& key_system,
- const blink::WebString& session_id);
+ const blink::WebString& session_id) override;
- virtual void setContentDecryptionModule(
+ void setContentDecryptionModule(
blink::WebContentDecryptionModule* cdm,
- blink::WebContentDecryptionModuleResult result);
+ blink::WebContentDecryptionModuleResult result) override;
void OnPipelineSeeked(bool time_changed, PipelineStatus status);
void OnPipelineEnded();
@@ -243,6 +247,7 @@ class MEDIA_EXPORT WebMediaPlayerImpl
const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
+ scoped_refptr<base::TaskRunner> worker_task_runner_;
scoped_refptr<MediaLog> media_log_;
Pipeline pipeline_;
@@ -288,6 +293,7 @@ class MEDIA_EXPORT WebMediaPlayerImpl
bool should_notify_time_changed_;
blink::WebMediaPlayerClient* client_;
+ blink::WebMediaPlayerEncryptedMediaClient* encrypted_client_;
base::WeakPtr<WebMediaPlayerDelegate> delegate_;
diff --git a/chromium/media/blink/webmediaplayer_params.cc b/chromium/media/blink/webmediaplayer_params.cc
index ec602a227c9..5d88f871381 100644
--- a/chromium/media/blink/webmediaplayer_params.cc
+++ b/chromium/media/blink/webmediaplayer_params.cc
@@ -5,6 +5,7 @@
#include "media/blink/webmediaplayer_params.h"
#include "base/single_thread_task_runner.h"
+#include "base/task_runner.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_log.h"
@@ -15,6 +16,7 @@ WebMediaPlayerParams::WebMediaPlayerParams(
const scoped_refptr<AudioRendererSink>& audio_renderer_sink,
const scoped_refptr<MediaLog>& media_log,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
const Context3DCB& context_3d_cb,
MediaPermission* media_permission,
@@ -23,11 +25,11 @@ WebMediaPlayerParams::WebMediaPlayerParams(
audio_renderer_sink_(audio_renderer_sink),
media_log_(media_log),
media_task_runner_(media_task_runner),
+ worker_task_runner_(worker_task_runner),
compositor_task_runner_(compositor_task_runner),
context_3d_cb_(context_3d_cb),
media_permission_(media_permission),
- initial_cdm_(initial_cdm) {
-}
+ initial_cdm_(initial_cdm) {}
WebMediaPlayerParams::~WebMediaPlayerParams() {}
diff --git a/chromium/media/blink/webmediaplayer_params.h b/chromium/media/blink/webmediaplayer_params.h
index 02ca00789e6..0811902a9ba 100644
--- a/chromium/media/blink/webmediaplayer_params.h
+++ b/chromium/media/blink/webmediaplayer_params.h
@@ -12,6 +12,7 @@
namespace base {
class SingleThreadTaskRunner;
+class TaskRunner;
}
namespace blink {
@@ -39,6 +40,7 @@ class MEDIA_EXPORT WebMediaPlayerParams {
const scoped_refptr<AudioRendererSink>& audio_renderer_sink,
const scoped_refptr<MediaLog>& media_log,
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner,
const Context3DCB& context_3d,
MediaPermission* media_permission,
@@ -60,6 +62,10 @@ class MEDIA_EXPORT WebMediaPlayerParams {
return media_task_runner_;
}
+ const scoped_refptr<base::TaskRunner> worker_task_runner() const {
+ return worker_task_runner_;
+ }
+
const scoped_refptr<base::SingleThreadTaskRunner>& compositor_task_runner()
const {
return compositor_task_runner_;
@@ -79,6 +85,7 @@ class MEDIA_EXPORT WebMediaPlayerParams {
scoped_refptr<AudioRendererSink> audio_renderer_sink_;
scoped_refptr<MediaLog> media_log_;
scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
+ scoped_refptr<base::TaskRunner> worker_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner_;
Context3DCB context_3d_cb_;
diff --git a/chromium/media/blink/webmediaplayer_util.cc b/chromium/media/blink/webmediaplayer_util.cc
index fc1a07943fc..f017975c286 100644
--- a/chromium/media/blink/webmediaplayer_util.cc
+++ b/chromium/media/blink/webmediaplayer_util.cc
@@ -5,32 +5,27 @@
#include "media/blink/webmediaplayer_util.h"
#include <math.h>
+#include <string>
#include "base/metrics/histogram.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/media_client.h"
#include "media/base/media_keys.h"
-#include "third_party/WebKit/public/platform/WebMediaPlayerClient.h"
+#include "third_party/WebKit/public/platform/WebMediaPlayerEncryptedMediaClient.h"
namespace media {
// Compile asserts shared by all platforms.
-#define STATIC_ASSERT_MATCHING_ENUM(name) \
- static_assert( \
- static_cast<int>(blink::WebMediaPlayerClient::MediaKeyErrorCode ## name) == \
- static_cast<int>(MediaKeys::k ## name ## Error), \
- "mismatching enum values: " #name)
+#define STATIC_ASSERT_MATCHING_ENUM(name) \
+ static_assert(static_cast<int>(blink::WebMediaPlayerEncryptedMediaClient:: \
+ MediaKeyErrorCode##name) == \
+ static_cast<int>(MediaKeys::k##name##Error), \
+ "mismatching enum values: " #name)
STATIC_ASSERT_MATCHING_ENUM(Unknown);
STATIC_ASSERT_MATCHING_ENUM(Client);
#undef STATIC_ASSERT_MATCHING_ENUM
-base::TimeDelta ConvertSecondsToTimestamp(double seconds) {
- double microseconds = seconds * base::Time::kMicrosecondsPerSecond;
- return base::TimeDelta::FromMicroseconds(
- microseconds > 0 ? microseconds + 0.5 : ceil(microseconds - 0.5));
-}
-
blink::WebTimeRanges ConvertToWebTimeRanges(
const Ranges<base::TimeDelta>& ranges) {
blink::WebTimeRanges result(ranges.size());
@@ -132,12 +127,9 @@ void ReportMetrics(blink::WebMediaPlayer::LoadType load_type,
UMA_HISTOGRAM_ENUMERATION("Media.URLScheme", URLScheme(url),
kMaxURLScheme + 1);
- // Keep track if this is a MSE or non-MSE playback.
- // TODO(xhwang): This name is not intuitive. We should have a histogram for
- // all load types.
- UMA_HISTOGRAM_BOOLEAN(
- "Media.MSE.Playback",
- load_type == blink::WebMediaPlayer::LoadTypeMediaSource);
+ // Report load type, such as URL, MediaSource or MediaStream.
+ UMA_HISTOGRAM_ENUMERATION("Media.LoadType", load_type,
+ blink::WebMediaPlayer::LoadTypeMax + 1);
// Report the origin from where the media player is created.
if (GetMediaClient()) {
@@ -146,6 +138,11 @@ void ReportMetrics(blink::WebMediaPlayer::LoadType load_type,
}
}
+void RecordOriginOfHLSPlayback(const GURL& origin_url) {
+ if (media::GetMediaClient())
+ GetMediaClient()->RecordRapporURL("Media.OriginUrl.HLS", origin_url);
+}
+
EmeInitDataType ConvertToEmeInitDataType(
blink::WebEncryptedMediaInitDataType init_data_type) {
switch (init_data_type) {
@@ -202,7 +199,7 @@ class SetSinkIdCallback {
: web_callback_(other.web_callback_.Pass()) {}
~SetSinkIdCallback() {}
friend void RunSetSinkIdCallback(const SetSinkIdCallback& callback,
- SwitchOutputDeviceResult result);
+ OutputDeviceStatus result);
private:
// Mutable is required so that Pass() can be called in the copy
@@ -211,33 +208,28 @@ class SetSinkIdCallback {
};
void RunSetSinkIdCallback(const SetSinkIdCallback& callback,
- SwitchOutputDeviceResult result) {
+ OutputDeviceStatus result) {
DVLOG(1) << __FUNCTION__;
if (!callback.web_callback_)
return;
switch (result) {
- case SWITCH_OUTPUT_DEVICE_RESULT_SUCCESS:
+ case OUTPUT_DEVICE_STATUS_OK:
callback.web_callback_->onSuccess();
break;
- case SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_FOUND:
+ case OUTPUT_DEVICE_STATUS_ERROR_NOT_FOUND:
callback.web_callback_->onError(new blink::WebSetSinkIdError(
blink::WebSetSinkIdError::ErrorTypeNotFound, "Device not found"));
break;
- case SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_AUTHORIZED:
+ case OUTPUT_DEVICE_STATUS_ERROR_NOT_AUTHORIZED:
callback.web_callback_->onError(new blink::WebSetSinkIdError(
blink::WebSetSinkIdError::ErrorTypeSecurity,
"No permission to access device"));
break;
- case SWITCH_OUTPUT_DEVICE_RESULT_ERROR_OBSOLETE:
- callback.web_callback_->onError(new blink::WebSetSinkIdError(
- blink::WebSetSinkIdError::ErrorTypeAbort,
- "The requested operation became obsolete and was aborted"));
- break;
- case SWITCH_OUTPUT_DEVICE_RESULT_ERROR_NOT_SUPPORTED:
+ case OUTPUT_DEVICE_STATUS_ERROR_INTERNAL:
callback.web_callback_->onError(new blink::WebSetSinkIdError(
blink::WebSetSinkIdError::ErrorTypeAbort,
- "The requested operation cannot be performed and was aborted"));
+ "The requested operation could be performed and was aborted"));
break;
default:
NOTREACHED();
diff --git a/chromium/media/blink/webmediaplayer_util.h b/chromium/media/blink/webmediaplayer_util.h
index 9aaa51283b3..9182f8a9733 100644
--- a/chromium/media/blink/webmediaplayer_util.h
+++ b/chromium/media/blink/webmediaplayer_util.h
@@ -20,12 +20,6 @@
namespace media {
-// Platform independent method for converting and rounding floating point
-// seconds to an int64 timestamp.
-//
-// Refer to https://bugs.webkit.org/show_bug.cgi?id=52697 for details.
-base::TimeDelta MEDIA_EXPORT ConvertSecondsToTimestamp(double seconds);
-
blink::WebTimeRanges MEDIA_EXPORT ConvertToWebTimeRanges(
const Ranges<base::TimeDelta>& ranges);
@@ -37,13 +31,16 @@ void MEDIA_EXPORT ReportMetrics(blink::WebMediaPlayer::LoadType load_type,
const GURL& url,
const GURL& origin_url);
+// Record a RAPPOR metric for the origin of an HLS playback.
+void MEDIA_EXPORT RecordOriginOfHLSPlayback(const GURL& origin_url);
+
// Convert Initialization Data Types.
EmeInitDataType MEDIA_EXPORT
ConvertToEmeInitDataType(blink::WebEncryptedMediaInitDataType init_data_type);
blink::WebEncryptedMediaInitDataType MEDIA_EXPORT
ConvertToWebInitDataType(EmeInitDataType init_data_type);
-typedef blink::WebCallbacks<void, blink::WebSetSinkIdError> WebSetSinkIdCB;
+typedef blink::WebCallbacks<void, blink::WebSetSinkIdError*> WebSetSinkIdCB;
// Wraps a WebSetSinkIdCB into a media::SwitchOutputDeviceCB
// and binds it to the current thread
diff --git a/chromium/media/blink/webmediasource_impl.cc b/chromium/media/blink/webmediasource_impl.cc
index d67ca6d7d7f..6e899568e25 100644
--- a/chromium/media/blink/webmediasource_impl.cc
+++ b/chromium/media/blink/webmediasource_impl.cc
@@ -5,6 +5,7 @@
#include "media/blink/webmediasource_impl.h"
#include "base/guid.h"
+#include "media/base/mime_util.h"
#include "media/blink/websourcebuffer_impl.h"
#include "media/filters/chunk_demuxer.h"
#include "third_party/WebKit/public/platform/WebCString.h"
@@ -24,10 +25,9 @@ STATIC_ASSERT_MATCHING_STATUS_ENUM(AddStatusNotSupported, kNotSupported);
STATIC_ASSERT_MATCHING_STATUS_ENUM(AddStatusReachedIdLimit, kReachedIdLimit);
#undef STATIC_ASSERT_MATCHING_STATUS_ENUM
-WebMediaSourceImpl::WebMediaSourceImpl(
- ChunkDemuxer* demuxer, LogCB log_cb)
- : demuxer_(demuxer),
- log_cb_(log_cb) {
+WebMediaSourceImpl::WebMediaSourceImpl(ChunkDemuxer* demuxer,
+ const scoped_refptr<MediaLog>& media_log)
+ : demuxer_(demuxer), media_log_(media_log) {
DCHECK(demuxer_);
}
@@ -35,16 +35,16 @@ WebMediaSourceImpl::~WebMediaSourceImpl() {}
WebMediaSource::AddStatus WebMediaSourceImpl::addSourceBuffer(
const blink::WebString& type,
- const blink::WebVector<blink::WebString>& codecs,
+ const blink::WebString& codecs,
blink::WebSourceBuffer** source_buffer) {
std::string id = base::GenerateGUID();
- std::vector<std::string> new_codecs(codecs.size());
- for (size_t i = 0; i < codecs.size(); ++i)
- new_codecs[i] = codecs[i].utf8().data();
+
+ std::vector<std::string> parsed_codec_ids;
+ media::ParseCodecString(codecs.utf8().data(), &parsed_codec_ids, false);
WebMediaSource::AddStatus result =
static_cast<WebMediaSource::AddStatus>(
- demuxer_->AddId(id, type.utf8().data(), new_codecs));
+ demuxer_->AddId(id, type.utf8().data(), parsed_codec_ids));
if (result == WebMediaSource::AddStatusOk)
*source_buffer = new WebSourceBufferImpl(id, demuxer_);
diff --git a/chromium/media/blink/webmediasource_impl.h b/chromium/media/blink/webmediasource_impl.h
index ac442f02d37..9251935dc22 100644
--- a/chromium/media/blink/webmediasource_impl.h
+++ b/chromium/media/blink/webmediasource_impl.h
@@ -18,22 +18,23 @@ class ChunkDemuxer;
class MEDIA_EXPORT WebMediaSourceImpl
: NON_EXPORTED_BASE(public blink::WebMediaSource) {
public:
- WebMediaSourceImpl(ChunkDemuxer* demuxer, LogCB log_cb);
- virtual ~WebMediaSourceImpl();
+ WebMediaSourceImpl(ChunkDemuxer* demuxer,
+ const scoped_refptr<MediaLog>& media_log);
+ ~WebMediaSourceImpl() override;
// blink::WebMediaSource implementation.
- virtual AddStatus addSourceBuffer(
+ AddStatus addSourceBuffer(
const blink::WebString& type,
- const blink::WebVector<blink::WebString>& codecs,
- blink::WebSourceBuffer** source_buffer);
- virtual double duration();
- virtual void setDuration(double duration);
- virtual void markEndOfStream(EndOfStreamStatus status);
- virtual void unmarkEndOfStream();
+ const blink::WebString& codecs,
+ blink::WebSourceBuffer** source_buffer) override;
+ double duration() override;
+ void setDuration(double duration) override;
+ void markEndOfStream(EndOfStreamStatus status) override;
+ void unmarkEndOfStream() override;
private:
ChunkDemuxer* demuxer_; // Owned by WebMediaPlayerImpl.
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
DISALLOW_COPY_AND_ASSIGN(WebMediaSourceImpl);
};
diff --git a/chromium/media/blink/websourcebuffer_impl.cc b/chromium/media/blink/websourcebuffer_impl.cc
index 0e387785ae1..7e880ddde5c 100644
--- a/chromium/media/blink/websourcebuffer_impl.cc
+++ b/chromium/media/blink/websourcebuffer_impl.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/chunk_demuxer.h"
#include "third_party/WebKit/public/platform/WebSourceBufferClient.h"
@@ -81,6 +82,14 @@ blink::WebTimeRanges WebSourceBufferImpl::buffered() {
return result;
}
+bool WebSourceBufferImpl::evictCodedFrames(double currentPlaybackTime,
+ size_t newDataSize) {
+ return demuxer_->EvictCodedFrames(
+ id_,
+ base::TimeDelta::FromSecondsD(currentPlaybackTime),
+ newDataSize);
+}
+
void WebSourceBufferImpl::append(
const unsigned char* data,
unsigned length,
@@ -101,13 +110,13 @@ void WebSourceBufferImpl::append(
*timestamp_offset = timestamp_offset_.InSecondsF();
}
-void WebSourceBufferImpl::abort() {
- demuxer_->Abort(id_,
- append_window_start_, append_window_end_,
- &timestamp_offset_);
+void WebSourceBufferImpl::resetParserState() {
+ demuxer_->ResetParserState(id_,
+ append_window_start_, append_window_end_,
+ &timestamp_offset_);
- // TODO(wolenetz): abort should be able to modify the caller timestamp offset
- // (just like WebSourceBufferImpl::append).
+ // TODO(wolenetz): resetParserState should be able to modify the caller
+ // timestamp offset (just like WebSourceBufferImpl::append).
// See http://crbug.com/370229 for further details.
}
diff --git a/chromium/media/blink/websourcebuffer_impl.h b/chromium/media/blink/websourcebuffer_impl.h
index 96e37b60954..b03ea7b594a 100644
--- a/chromium/media/blink/websourcebuffer_impl.h
+++ b/chromium/media/blink/websourcebuffer_impl.h
@@ -18,22 +18,24 @@ class ChunkDemuxer;
class WebSourceBufferImpl : public blink::WebSourceBuffer {
public:
WebSourceBufferImpl(const std::string& id, ChunkDemuxer* demuxer);
- virtual ~WebSourceBufferImpl();
+ ~WebSourceBufferImpl() override;
// blink::WebSourceBuffer implementation.
- virtual void setClient(blink::WebSourceBufferClient* client);
- virtual bool setMode(AppendMode mode);
- virtual blink::WebTimeRanges buffered();
- virtual void append(
+ void setClient(blink::WebSourceBufferClient* client) override;
+ bool setMode(AppendMode mode) override;
+ blink::WebTimeRanges buffered() override;
+ bool evictCodedFrames(double currentPlaybackTime,
+ size_t newDataSize) override;
+ void append(
const unsigned char* data,
unsigned length,
- double* timestamp_offset);
- virtual void abort();
- virtual void remove(double start, double end);
- virtual bool setTimestampOffset(double offset);
- virtual void setAppendWindowStart(double start);
- virtual void setAppendWindowEnd(double end);
- virtual void removedFromMediaSource();
+ double* timestamp_offset) override;
+ void resetParserState() override;
+ void remove(double start, double end) override;
+ bool setTimestampOffset(double offset) override;
+ void setAppendWindowStart(double start) override;
+ void setAppendWindowEnd(double end) override;
+ void removedFromMediaSource() override;
private:
// Demuxer callback handler to process an initialization segment received
diff --git a/chromium/media/capture/OWNERS b/chromium/media/capture/content/OWNERS
index 02bdb39030f..02bdb39030f 100644
--- a/chromium/media/capture/OWNERS
+++ b/chromium/media/capture/content/OWNERS
diff --git a/chromium/media/capture/content/README b/chromium/media/capture/content/README
new file mode 100644
index 00000000000..20154f01d70
--- /dev/null
+++ b/chromium/media/capture/content/README
@@ -0,0 +1,5 @@
+This folder contains code refering to content capture, or the capture of
+"already-rendered stuff." The screen capture implementations (desktop, window,
+tab, etc.) in content/ use these tools. The code in this dir is all about
+deciding how to capture content w.r.t. timing, quality, and other performance
+concerns. \ No newline at end of file
diff --git a/chromium/media/capture/animated_content_sampler.cc b/chromium/media/capture/content/animated_content_sampler.cc
index 0fe5148d51e..ae90419b1d7 100644
--- a/chromium/media/capture/animated_content_sampler.cc
+++ b/chromium/media/capture/content/animated_content_sampler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/animated_content_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
#include <algorithm>
@@ -41,19 +41,20 @@ const int kDriftCorrectionMillis = 2000;
AnimatedContentSampler::AnimatedContentSampler(
base::TimeDelta min_capture_period)
- : min_capture_period_(min_capture_period),
- sampling_state_(NOT_SAMPLING) {
+ : min_capture_period_(min_capture_period), sampling_state_(NOT_SAMPLING) {
DCHECK_GT(min_capture_period_, base::TimeDelta());
}
-AnimatedContentSampler::~AnimatedContentSampler() {}
+AnimatedContentSampler::~AnimatedContentSampler() {
+}
void AnimatedContentSampler::SetTargetSamplingPeriod(base::TimeDelta period) {
target_sampling_period_ = period;
}
void AnimatedContentSampler::ConsiderPresentationEvent(
- const gfx::Rect& damage_rect, base::TimeTicks event_time) {
+ const gfx::Rect& damage_rect,
+ base::TimeTicks event_time) {
// Analyze the current event and recent history to determine whether animating
// content is detected.
AddObservation(damage_rect, event_time);
@@ -71,9 +72,8 @@ void AnimatedContentSampler::ConsiderPresentationEvent(
// At this point, animation is being detected. Update the sampling period
// since the client may call the accessor method even if the heuristics below
// decide not to sample the current event.
- sampling_period_ = ComputeSamplingPeriod(detected_period_,
- target_sampling_period_,
- min_capture_period_);
+ sampling_period_ = ComputeSamplingPeriod(
+ detected_period_, target_sampling_period_, min_capture_period_);
// If this is the first event causing animating content to be detected,
// transition to the START_SAMPLING state.
@@ -207,13 +207,13 @@ bool AnimatedContentSampler::AnalyzeObservations(
if (last_event_time.is_null()) {
last_event_time = i->event_time;
if ((event_time - last_event_time) >=
- base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
+ base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
return false; // Content animation has recently ended.
}
} else {
const base::TimeDelta frame_duration = first_event_time - i->event_time;
if (frame_duration >=
- base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
+ base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
break; // Content not animating before this point.
}
sum_frame_durations += frame_duration;
@@ -223,7 +223,7 @@ bool AnimatedContentSampler::AnalyzeObservations(
}
if ((last_event_time - first_event_time) <
- base::TimeDelta::FromMilliseconds(kMinObservationWindowMillis)) {
+ base::TimeDelta::FromMilliseconds(kMinObservationWindowMillis)) {
return false; // Content has not animated for long enough for accuracy.
}
if (num_pixels_damaged_in_chosen <= (num_pixels_damaged_in_all * 2 / 3))
@@ -250,7 +250,7 @@ base::TimeTicks AnimatedContentSampler::ComputeNextFrameTimestamp(
const base::TimeDelta drift = ideal_timestamp - event_time;
const int64 correct_over_num_frames =
base::TimeDelta::FromMilliseconds(kDriftCorrectionMillis) /
- sampling_period_;
+ sampling_period_;
DCHECK_GT(correct_over_num_frames, 0);
return ideal_timestamp - drift / correct_over_num_frames;
@@ -258,9 +258,9 @@ base::TimeTicks AnimatedContentSampler::ComputeNextFrameTimestamp(
// static
base::TimeDelta AnimatedContentSampler::ComputeSamplingPeriod(
- base::TimeDelta animation_period,
- base::TimeDelta target_sampling_period,
- base::TimeDelta min_capture_period) {
+ base::TimeDelta animation_period,
+ base::TimeDelta target_sampling_period,
+ base::TimeDelta min_capture_period) {
// If the animation rate is unknown, return the ideal sampling period.
if (animation_period == base::TimeDelta()) {
return std::max(target_sampling_period, min_capture_period);
@@ -279,7 +279,7 @@ base::TimeDelta AnimatedContentSampler::ComputeSamplingPeriod(
const double target_fps = 1.0 / target_sampling_period.InSecondsF();
const double animation_fps = 1.0 / animation_period.InSecondsF();
if (std::abs(animation_fps / ratio - target_fps) <
- std::abs(animation_fps / (ratio + 1) - target_fps)) {
+ std::abs(animation_fps / (ratio + 1) - target_fps)) {
sampling_period = ratio * animation_period;
} else {
sampling_period = (ratio + 1) * animation_period;
diff --git a/chromium/media/capture/animated_content_sampler.h b/chromium/media/capture/content/animated_content_sampler.h
index bcc2d635cb6..bcc2d635cb6 100644
--- a/chromium/media/capture/animated_content_sampler.h
+++ b/chromium/media/capture/content/animated_content_sampler.h
diff --git a/chromium/media/capture/animated_content_sampler_unittest.cc b/chromium/media/capture/content/animated_content_sampler_unittest.cc
index 2d96018947e..bc3d595d002 100644
--- a/chromium/media/capture/animated_content_sampler_unittest.cc
+++ b/chromium/media/capture/content/animated_content_sampler_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/animated_content_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
#include <cmath>
#include <utility>
@@ -34,9 +34,8 @@ class AnimatedContentSamplerTest : public ::testing::Test {
~AnimatedContentSamplerTest() override {}
void SetUp() override {
- const base::TimeDelta since_epoch =
- InitialTestTimeTicks() - base::TimeTicks::UnixEpoch();
- rand_seed_ = abs(static_cast<int>(since_epoch.InMicroseconds()));
+ rand_seed_ = static_cast<int>(
+ (InitialTestTimeTicks() - base::TimeTicks()).InMicroseconds());
sampler_.reset(new AnimatedContentSampler(GetMinCapturePeriod()));
}
@@ -46,9 +45,7 @@ class AnimatedContentSamplerTest : public ::testing::Test {
return base::TimeDelta::FromSeconds(1) / 30;
}
- AnimatedContentSampler* sampler() const {
- return sampler_.get();
- }
+ AnimatedContentSampler* sampler() const { return sampler_.get(); }
int GetRandomInRange(int begin, int end) {
const int len = end - begin;
@@ -182,10 +179,8 @@ TEST_F(AnimatedContentSamplerTest, TargetsSamplingPeriod) {
for (int content_fps = 1; content_fps <= 60; ++content_fps) {
const base::TimeDelta content_period = FpsAsPeriod(content_fps);
- const base::TimeDelta sampling_period =
- ComputeSamplingPeriod(content_period,
- target_sampling_period,
- min_capture_period);
+ const base::TimeDelta sampling_period = ComputeSamplingPeriod(
+ content_period, target_sampling_period, min_capture_period);
if (content_period >= target_sampling_period) {
ASSERT_EQ(content_period, sampling_period);
} else {
@@ -217,14 +212,12 @@ namespace {
// A test scenario for AnimatedContentSamplerParameterizedTest.
struct Scenario {
- base::TimeDelta vsync_interval; // Reflects compositor's update rate.
+ base::TimeDelta vsync_interval; // Reflects compositor's update rate.
base::TimeDelta min_capture_period; // Reflects maximum capture rate.
- base::TimeDelta content_period; // Reflects content animation rate.
+ base::TimeDelta content_period; // Reflects content animation rate.
base::TimeDelta target_sampling_period;
- Scenario(int compositor_frequency,
- int max_frame_rate,
- int content_frame_rate)
+ Scenario(int compositor_frequency, int max_frame_rate, int content_frame_rate)
: vsync_interval(FpsAsPeriod(compositor_frequency)),
min_capture_period(FpsAsPeriod(max_frame_rate)),
content_period(FpsAsPeriod(content_frame_rate)) {
@@ -249,8 +242,7 @@ struct Scenario {
::std::ostream& operator<<(::std::ostream& os, const Scenario& s) {
return os << "{ vsync_interval=" << s.vsync_interval.InMicroseconds()
<< ", min_capture_period=" << s.min_capture_period.InMicroseconds()
- << ", content_period=" << s.content_period.InMicroseconds()
- << " }";
+ << ", content_period=" << s.content_period.InMicroseconds() << " }";
}
} // namespace
@@ -277,8 +269,7 @@ class AnimatedContentSamplerParameterizedTest
base::TimeDelta ComputeExpectedSamplingPeriod() const {
return AnimatedContentSamplerTest::ComputeSamplingPeriod(
- GetParam().content_period,
- GetParam().target_sampling_period,
+ GetParam().content_period, GetParam().target_sampling_period,
GetParam().min_capture_period);
}
@@ -414,14 +405,12 @@ class AnimatedContentSamplerParameterizedTest
}
const double expected_sampling_ratio =
GetParam().content_period.InSecondsF() /
- ComputeExpectedSamplingPeriod().InSecondsF();
+ ComputeExpectedSamplingPeriod().InSecondsF();
const int total_frames = count_dropped_frames_ + count_sampled_frames_;
- EXPECT_NEAR(total_frames * expected_sampling_ratio,
- count_sampled_frames_,
+ EXPECT_NEAR(total_frames * expected_sampling_ratio, count_sampled_frames_,
1.5);
EXPECT_NEAR(total_frames * (1.0 - expected_sampling_ratio),
- count_dropped_frames_,
- 1.5);
+ count_dropped_frames_, 1.5);
}
private:
@@ -440,29 +429,18 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Provide random events and expect no lock-in.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- false,
- true,
- &begin),
- false,
- false,
- false,
- "Provide random events and expect no lock-in.");
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ false, true, &begin),
+ false, false, false, "Provide random events and expect no lock-in.");
if (HasFailure())
return;
// Provide content frame events with some random events mixed-in, and expect
// the sampler to lock-in.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- true,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, true, &begin),
+ false, true, false,
"Provide content frame events with some random events mixed-in, and "
"expect the sampler to lock-in.");
if (HasFailure())
@@ -471,14 +449,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Continue providing content frame events without the random events mixed-in
// and expect the lock-in to hold.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- true,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ true, true, false,
"Continue providing content frame events without the random events "
"mixed-in and expect the lock-in to hold.");
if (HasFailure())
@@ -487,31 +460,21 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Continue providing just content frame events and expect the lock-in to
// hold. Also simulate the capture pipeline experiencing back pressure.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin),
- true,
- true,
- true,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(20),
+ true, false, &begin),
+ true, true, true,
"Continue providing just content frame events and expect the lock-in to "
"hold. Also simulate the capture pipeline experiencing back pressure.");
if (HasFailure())
return;
-
// Provide a half-second of random events only, and expect the lock-in to be
// broken.
RunEventSequence(
GenerateEventSequence(begin,
begin + base::TimeDelta::FromMilliseconds(500),
- false,
- true,
- &begin),
- true,
- false,
- false,
+ false, true, &begin),
+ true, false, false,
"Provide a half-second of random events only, and expect the lock-in to "
"be broken.");
if (HasFailure())
@@ -520,14 +483,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Now, go back to providing content frame events, and expect the sampler to
// lock-in once again.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Now, go back to providing content frame events, and expect the sampler "
"to lock-in once again.");
}
@@ -547,14 +505,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// lock-in.
base::TimeTicks begin = InitialTestTimeTicks();
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Start the first animation and run for a bit, and expect the sampler to "
"lock-in.");
if (HasFailure())
@@ -564,12 +517,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// size and frame rate, but at a different position. This will should cause
// the sampler to enter an "undetected" state since it's unclear which
// animation should be locked into.
- std::vector<Event> first_animation_events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin);
+ std::vector<Event> first_animation_events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, &begin);
gfx::Rect second_animation_rect(
gfx::Point(0, GetContentDamageRect().height()),
GetContentDamageRect().size());
@@ -591,14 +540,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// Now, run just the first animation, and expect the sampler to lock-in once
// again.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Now, run just the first animation, and expect the sampler to lock-in "
"once again.");
if (HasFailure())
@@ -608,12 +552,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// the first animation and damage Rects with twice the area. This will should
// cause the sampler to enter an "undetected" state again. This tests that
// pixel-weighting is being accounted for in the sampler's logic.
- first_animation_events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin);
+ first_animation_events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, &begin);
second_animation_rect.set_width(second_animation_rect.width() * 2);
both_animations_events.clear();
bool include_second_animation_frame = true;
@@ -641,11 +581,7 @@ TEST_P(AnimatedContentSamplerParameterizedTest, FrameTimestampsAreSmooth) {
// once lock-in is continuous.
const base::TimeTicks begin = InitialTestTimeTicks();
std::vector<Event> events = GenerateEventSequence(
- begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- nullptr);
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, nullptr);
typedef std::vector<base::TimeTicks> Timestamps;
Timestamps frame_timestamps;
for (std::vector<Event>::const_iterator i = events.begin(); i != events.end();
@@ -667,8 +603,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest, FrameTimestampsAreSmooth) {
// of 30 Hz content on a 60 Hz v-sync interval should result in
// display_counts[2] == 10. Quit early if any one frame was obviously
// repeated too many times.
- const int64 max_expected_repeats_per_frame = 1 +
- ComputeExpectedSamplingPeriod() / GetParam().vsync_interval;
+ const int64 max_expected_repeats_per_frame =
+ 1 + ComputeExpectedSamplingPeriod() / GetParam().vsync_interval;
std::vector<size_t> display_counts(max_expected_repeats_per_frame + 1, 0);
base::TimeTicks last_present_time = frame_timestamps.front();
for (Timestamps::const_iterator i = frame_timestamps.begin() + 1;
@@ -721,19 +657,14 @@ TEST_P(AnimatedContentSamplerParameterizedTest, FrameTimestampsAreSmooth) {
// Tests that frame timestamps are "lightly pushed" back towards the original
// presentation event times, which tells us the AnimatedContentSampler can
// account for sources of timestamp drift and correct the drift.
-// flaky: http://crbug.com/487491
TEST_P(AnimatedContentSamplerParameterizedTest,
- DISABLED_FrameTimestampsConvergeTowardsEventTimes) {
+ FrameTimestampsConvergeTowardsEventTimes) {
const int max_drift_increment_millis = 3;
// Generate a full minute of events.
const base::TimeTicks begin = InitialTestTimeTicks();
- std::vector<Event> events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromMinutes(1),
- true,
- false,
- nullptr);
+ std::vector<Event> events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromMinutes(1), true, false, nullptr);
// Modify the event sequence so that 1-3 ms of additional drift is suddenly
// present every 100 events. This is meant to simulate that, external to
@@ -762,10 +693,10 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// the last event's timestamp.
const base::TimeDelta total_error =
events.back().second - last_frame_timestamp;
- const base::TimeDelta max_acceptable_error = GetParam().min_capture_period +
+ const base::TimeDelta max_acceptable_error =
+ GetParam().min_capture_period +
base::TimeDelta::FromMilliseconds(max_drift_increment_millis);
- EXPECT_NEAR(0.0,
- total_error.InMicroseconds(),
+ EXPECT_NEAR(0.0, total_error.InMicroseconds(),
max_acceptable_error.InMicroseconds());
}
@@ -773,47 +704,47 @@ INSTANTIATE_TEST_CASE_P(
,
AnimatedContentSamplerParameterizedTest,
::testing::Values(
- // Typical frame rate content: Compositor runs at 60 Hz, capture at 30
- // Hz, and content video animates at 30, 25, or 24 Hz.
- Scenario(60, 30, 30),
- Scenario(60, 30, 25),
- Scenario(60, 30, 24),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, but capture is still at 30 Hz.
- Scenario(60, 30, 60),
- Scenario(60, 30, 50),
- Scenario(60, 30, 48),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, and capture is also a buttery 60 Hz.
- Scenario(60, 60, 60),
- Scenario(60, 60, 50),
- Scenario(60, 60, 48),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, but the client has disabled HFR sampling.
- Scenario(60, 60, 60, 30),
- Scenario(60, 60, 50, 30),
- Scenario(60, 60, 48, 30),
-
- // On some platforms, the Compositor runs at 50 Hz.
- Scenario(50, 30, 30),
- Scenario(50, 30, 25),
- Scenario(50, 30, 24),
- Scenario(50, 30, 50),
- Scenario(50, 30, 48),
-
- // Stable, but non-standard content frame rates.
- Scenario(60, 30, 16),
- Scenario(60, 30, 20),
- Scenario(60, 30, 23),
- Scenario(60, 30, 26),
- Scenario(60, 30, 27),
- Scenario(60, 30, 28),
- Scenario(60, 30, 29),
- Scenario(60, 30, 31),
- Scenario(60, 30, 32),
- Scenario(60, 30, 33)));
+ // Typical frame rate content: Compositor runs at 60 Hz, capture at 30
+ // Hz, and content video animates at 30, 25, or 24 Hz.
+ Scenario(60, 30, 30),
+ Scenario(60, 30, 25),
+ Scenario(60, 30, 24),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, but capture is still at 30 Hz.
+ Scenario(60, 30, 60),
+ Scenario(60, 30, 50),
+ Scenario(60, 30, 48),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, and capture is also a buttery 60 Hz.
+ Scenario(60, 60, 60),
+ Scenario(60, 60, 50),
+ Scenario(60, 60, 48),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, but the client has disabled HFR sampling.
+ Scenario(60, 60, 60, 30),
+ Scenario(60, 60, 50, 30),
+ Scenario(60, 60, 48, 30),
+
+ // On some platforms, the Compositor runs at 50 Hz.
+ Scenario(50, 30, 30),
+ Scenario(50, 30, 25),
+ Scenario(50, 30, 24),
+ Scenario(50, 30, 50),
+ Scenario(50, 30, 48),
+
+ // Stable, but non-standard content frame rates.
+ Scenario(60, 30, 16),
+ Scenario(60, 30, 20),
+ Scenario(60, 30, 23),
+ Scenario(60, 30, 26),
+ Scenario(60, 30, 27),
+ Scenario(60, 30, 28),
+ Scenario(60, 30, 29),
+ Scenario(60, 30, 31),
+ Scenario(60, 30, 32),
+ Scenario(60, 30, 33)));
} // namespace media
diff --git a/chromium/media/capture/capture_resolution_chooser.cc b/chromium/media/capture/content/capture_resolution_chooser.cc
index 768e129f710..3c7f3e58515 100644
--- a/chromium/media/capture/capture_resolution_chooser.cc
+++ b/chromium/media/capture/content/capture_resolution_chooser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/capture_resolution_chooser.h"
+#include "media/capture/content/capture_resolution_chooser.h"
#include <algorithm>
#include <limits>
@@ -86,8 +86,8 @@ CaptureResolutionChooser::CaptureResolutionChooser(
const gfx::Size& max_frame_size,
ResolutionChangePolicy resolution_change_policy)
: max_frame_size_(max_frame_size),
- min_frame_size_(ComputeMinimumCaptureSize(max_frame_size,
- resolution_change_policy)),
+ min_frame_size_(
+ ComputeMinimumCaptureSize(max_frame_size, resolution_change_policy)),
resolution_change_policy_(resolution_change_policy),
target_area_(std::numeric_limits<decltype(target_area_)>::max()) {
DCHECK_LT(0, max_frame_size_.width());
@@ -100,7 +100,8 @@ CaptureResolutionChooser::CaptureResolutionChooser(
RecomputeCaptureSize();
}
-CaptureResolutionChooser::~CaptureResolutionChooser() {}
+CaptureResolutionChooser::~CaptureResolutionChooser() {
+}
void CaptureResolutionChooser::SetSourceSize(const gfx::Size& source_size) {
if (source_size.IsEmpty())
@@ -114,8 +115,7 @@ void CaptureResolutionChooser::SetSourceSize(const gfx::Size& source_size) {
case RESOLUTION_POLICY_FIXED_ASPECT_RATIO:
UpdateSnappedFrameSizes(ComputeBoundedCaptureSize(
- PadToMatchAspectRatio(source_size, max_frame_size_),
- min_frame_size_,
+ PadToMatchAspectRatio(source_size, max_frame_size_), min_frame_size_,
max_frame_size_));
RecomputeCaptureSize();
break;
@@ -158,7 +158,8 @@ gfx::Size CaptureResolutionChooser::FindNearestFrameSize(int area) const {
}
gfx::Size CaptureResolutionChooser::FindLargerFrameSize(
- int area, int num_steps_up) const {
+ int area,
+ int num_steps_up) const {
DCHECK_GT(num_steps_up, 0);
const auto begin = snapped_sizes_.begin();
const auto end = snapped_sizes_.end();
@@ -174,7 +175,8 @@ gfx::Size CaptureResolutionChooser::FindLargerFrameSize(
}
gfx::Size CaptureResolutionChooser::FindSmallerFrameSize(
- int area, int num_steps_down) const {
+ int area,
+ int num_steps_down) const {
DCHECK_GT(num_steps_down, 0);
const auto begin = snapped_sizes_.begin();
const auto end = snapped_sizes_.end();
@@ -209,8 +211,7 @@ void CaptureResolutionChooser::UpdateSnappedFrameSizes(
// the prior size.
int last_area = constrained_size.GetArea();
for (int height = constrained_size.height() - kSnappedHeightStep;
- height >= min_frame_size_.height();
- height -= kSnappedHeightStep) {
+ height >= min_frame_size_.height(); height -= kSnappedHeightStep) {
const int width =
height * constrained_size.width() / constrained_size.height();
if (width < min_frame_size_.width())
@@ -231,7 +232,7 @@ void CaptureResolutionChooser::UpdateSnappedFrameSizes(
for (const gfx::Size& size : snapped_sizes_)
stringified_sizes.push_back(size.ToString());
VLOG_STREAM(1) << "Recomputed snapped frame sizes: "
- << JoinString(stringified_sizes, " <--> ");
+ << base::JoinString(stringified_sizes, " <--> ");
}
}
diff --git a/chromium/media/capture/capture_resolution_chooser.h b/chromium/media/capture/content/capture_resolution_chooser.h
index 144cfaa295c..f223fdfd93e 100644
--- a/chromium/media/capture/capture_resolution_chooser.h
+++ b/chromium/media/capture/content/capture_resolution_chooser.h
@@ -36,15 +36,12 @@ class MEDIA_EXPORT CaptureResolutionChooser {
// media::ResolutionChangePolicy determines whether the variable frame
// resolutions being computed must adhere to a fixed aspect ratio or not, or
// that there must only be a single fixed resolution.
- CaptureResolutionChooser(
- const gfx::Size& max_frame_size,
- ResolutionChangePolicy resolution_change_policy);
+ CaptureResolutionChooser(const gfx::Size& max_frame_size,
+ ResolutionChangePolicy resolution_change_policy);
~CaptureResolutionChooser();
// Returns the current capture frame resolution to use.
- gfx::Size capture_size() const {
- return capture_size_;
- }
+ gfx::Size capture_size() const { return capture_size_; }
// Updates the capture size based on a change in the resolution of the source
// content.
diff --git a/chromium/media/capture/capture_resolution_chooser_unittest.cc b/chromium/media/capture/content/capture_resolution_chooser_unittest.cc
index 8eee2a83561..4a41f0df3fb 100644
--- a/chromium/media/capture/capture_resolution_chooser_unittest.cc
+++ b/chromium/media/capture/content/capture_resolution_chooser_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/capture_resolution_chooser.h"
+#include "media/capture/content/capture_resolution_chooser.h"
#include "base/location.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -31,8 +31,7 @@ void ExpectIsWithinBoundsAndSameAspectRatio(const Location& location,
EXPECT_GE(max_size.width(), size.width());
EXPECT_GE(max_size.height(), size.height());
EXPECT_NEAR(static_cast<double>(max_size.width()) / max_size.height(),
- static_cast<double>(size.width()) / size.height(),
- 0.01);
+ static_cast<double>(size.width()) / size.height(), 0.01);
}
// Test that the correct snapped frame sizes are computed for a |chooser|
@@ -41,11 +40,23 @@ void ExpectIsWithinBoundsAndSameAspectRatio(const Location& location,
void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
const gfx::Size& smallest_size) {
const int kSizes[17][2] = {
- { kMaxFrameWidth, kMaxFrameHeight },
- { 3520, 1980 }, { 3200, 1800 }, { 2880, 1620}, { 2560, 1440 },
- { 2240, 1260 }, { 1920, 1080 }, { 1760, 990 }, { 1600, 900 },
- { 1440, 810 }, { 1280, 720 }, { 1120, 630 }, { 960, 540 },
- { 800, 450 }, { 640, 360 }, { 480, 270 }, { 320, 180 },
+ {kMaxFrameWidth, kMaxFrameHeight},
+ {3520, 1980},
+ {3200, 1800},
+ {2880, 1620},
+ {2560, 1440},
+ {2240, 1260},
+ {1920, 1080},
+ {1760, 990},
+ {1600, 900},
+ {1440, 810},
+ {1280, 720},
+ {1120, 630},
+ {960, 540},
+ {800, 450},
+ {640, 360},
+ {480, 270},
+ {320, 180},
};
const gfx::Size largest_size(kMaxFrameWidth, kMaxFrameHeight);
@@ -70,11 +81,11 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
// Test the "find Nth lower size" logic.
for (size_t skips = 1; skips < 4; ++skips) {
for (size_t i = skips; i < arraysize(kSizes); ++i) {
- EXPECT_EQ(gfx::Size(kSizes[i][0], kSizes[i][1]),
- chooser->FindSmallerFrameSize(
- gfx::Size(kSizes[i - skips][0],
- kSizes[i - skips][1]).GetArea(),
- skips));
+ EXPECT_EQ(
+ gfx::Size(kSizes[i][0], kSizes[i][1]),
+ chooser->FindSmallerFrameSize(
+ gfx::Size(kSizes[i - skips][0], kSizes[i - skips][1]).GetArea(),
+ skips));
}
}
@@ -83,8 +94,7 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
for (size_t i = skips; i < arraysize(kSizes); ++i) {
EXPECT_EQ(gfx::Size(kSizes[i - skips][0], kSizes[i - skips][1]),
chooser->FindLargerFrameSize(
- gfx::Size(kSizes[i][0], kSizes[i][1]).GetArea(),
- skips));
+ gfx::Size(kSizes[i][0], kSizes[i][1]).GetArea(), skips));
}
}
@@ -215,33 +225,32 @@ TEST(CaptureResolutionChooserTest,
TEST(CaptureResolutionChooserTest,
FixedAspectRatioPolicy_CaptureSizeHasSameAspectRatio) {
- CaptureResolutionChooser chooser(
- gfx::Size(kMaxFrameWidth, kMaxFrameHeight),
- RESOLUTION_POLICY_FIXED_ASPECT_RATIO);
+ CaptureResolutionChooser chooser(gfx::Size(kMaxFrameWidth, kMaxFrameHeight),
+ RESOLUTION_POLICY_FIXED_ASPECT_RATIO);
// Starting condition.
const gfx::Size min_size(kMinFrameWidth, kMinFrameHeight);
const gfx::Size max_size(kMaxFrameWidth, kMaxFrameHeight);
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Max size in --> max size out.
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Various source sizes within bounds.
chooser.SetSourceSize(gfx::Size(640, 480));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(480, 640));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(640, 640));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Bad source size results in no update.
const gfx::Size unchanged_size = chooser.capture_size();
@@ -251,30 +260,30 @@ TEST(CaptureResolutionChooserTest,
// Downscaling size (preserving aspect ratio) when source size exceeds the
// upper bounds.
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth * 2, kMaxFrameHeight * 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth * 2, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth, kMaxFrameHeight * 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Upscaling size (preserving aspect ratio) when source size is under the
// lower bounds.
chooser.SetSourceSize(gfx::Size(kMinFrameWidth / 2, kMinFrameHeight / 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMinFrameWidth / 2, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMinFrameWidth, kMinFrameHeight / 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// For a chooser configured with the "fixed aspect ratio" policy, the smallest
// possible computed size is the one with 180 lines of resolution and the same
@@ -288,8 +297,8 @@ TEST(CaptureResolutionChooserTest,
TEST(CaptureResolutionChooserTest,
AnyWithinLimitPolicy_CaptureSizeIsAnythingWithinLimits) {
const gfx::Size max_size(kMaxFrameWidth, kMaxFrameHeight);
- CaptureResolutionChooser chooser(
- max_size, RESOLUTION_POLICY_ANY_WITHIN_LIMIT);
+ CaptureResolutionChooser chooser(max_size,
+ RESOLUTION_POLICY_ANY_WITHIN_LIMIT);
// Starting condition.
EXPECT_EQ(max_size, chooser.capture_size());
diff --git a/chromium/media/capture/feedback_signal_accumulator.cc b/chromium/media/capture/content/feedback_signal_accumulator.cc
index 0b073f83fa4..896c23a8a0b 100644
--- a/chromium/media/capture/feedback_signal_accumulator.cc
+++ b/chromium/media/capture/content/feedback_signal_accumulator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/feedback_signal_accumulator.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
#include <algorithm>
#include <cmath>
diff --git a/chromium/media/capture/feedback_signal_accumulator.h b/chromium/media/capture/content/feedback_signal_accumulator.h
index 4139f57e7fa..c6046ded0fe 100644
--- a/chromium/media/capture/feedback_signal_accumulator.h
+++ b/chromium/media/capture/content/feedback_signal_accumulator.h
@@ -54,9 +54,9 @@ class MEDIA_EXPORT FeedbackSignalAccumulator {
// accumulated average.
const base::TimeDelta half_life_;
- base::TimeTicks reset_time_; // |timestamp| passed in last call to Reset().
- double average_; // Current accumulated average.
- double update_value_; // Latest |value| accepted by Update().
+ base::TimeTicks reset_time_; // |timestamp| passed in last call to Reset().
+ double average_; // Current accumulated average.
+ double update_value_; // Latest |value| accepted by Update().
base::TimeTicks update_time_; // Latest |timestamp| accepted by Update().
double prior_average_; // Accumulated average before last call to Update().
base::TimeTicks prior_update_time_; // |timestamp| in prior call to Update().
diff --git a/chromium/media/capture/feedback_signal_accumulator_unittest.cc b/chromium/media/capture/content/feedback_signal_accumulator_unittest.cc
index 8696056ae19..9d0f925adcd 100644
--- a/chromium/media/capture/feedback_signal_accumulator_unittest.cc
+++ b/chromium/media/capture/content/feedback_signal_accumulator_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/feedback_signal_accumulator.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/capture/screen_capture_device_core.cc b/chromium/media/capture/content/screen_capture_device_core.cc
index 431fe402ecc..d99ca5a2958 100644
--- a/chromium/media/capture/screen_capture_device_core.cc
+++ b/chromium/media/capture/content/screen_capture_device_core.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/screen_capture_device_core.h"
+#include "media/capture/content/screen_capture_device_core.h"
#include "base/bind.h"
#include "base/logging.h"
@@ -16,16 +16,17 @@ namespace media {
namespace {
-void DeleteCaptureMachine(
- scoped_ptr<VideoCaptureMachine> capture_machine) {
+void DeleteCaptureMachine(scoped_ptr<VideoCaptureMachine> capture_machine) {
capture_machine.reset();
}
} // namespace
-VideoCaptureMachine::VideoCaptureMachine() {}
+VideoCaptureMachine::VideoCaptureMachine() {
+}
-VideoCaptureMachine::~VideoCaptureMachine() {}
+VideoCaptureMachine::~VideoCaptureMachine() {
+}
bool VideoCaptureMachine::IsAutoThrottlingEnabled() const {
return false;
@@ -57,8 +58,7 @@ void ScreenCaptureDeviceCore::AllocateAndStart(
client.Pass(), params, capture_machine_->IsAutoThrottlingEnabled());
capture_machine_->Start(
- oracle_proxy_,
- params,
+ oracle_proxy_, params,
base::Bind(&ScreenCaptureDeviceCore::CaptureStarted, AsWeakPtr()));
TransitionStateTo(kCapturing);
@@ -89,8 +89,7 @@ void ScreenCaptureDeviceCore::CaptureStarted(bool success) {
ScreenCaptureDeviceCore::ScreenCaptureDeviceCore(
scoped_ptr<VideoCaptureMachine> capture_machine)
- : state_(kIdle),
- capture_machine_(capture_machine.Pass()) {
+ : state_(kIdle), capture_machine_(capture_machine.Pass()) {
DCHECK(capture_machine_.get());
}
@@ -98,8 +97,8 @@ ScreenCaptureDeviceCore::~ScreenCaptureDeviceCore() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_NE(state_, kCapturing);
if (capture_machine_) {
- capture_machine_->Stop(base::Bind(&DeleteCaptureMachine,
- base::Passed(&capture_machine_)));
+ capture_machine_->Stop(
+ base::Bind(&DeleteCaptureMachine, base::Passed(&capture_machine_)));
}
DVLOG(1) << "ScreenCaptureDeviceCore@" << this << " destroying.";
}
@@ -109,10 +108,9 @@ void ScreenCaptureDeviceCore::TransitionStateTo(State next_state) {
#ifndef NDEBUG
static const char* kStateNames[] = {
- "Idle", "Allocated", "Capturing", "Error"
- };
- DVLOG(1) << "State change: " << kStateNames[state_]
- << " --> " << kStateNames[next_state];
+ "Idle", "Allocated", "Capturing", "Error"};
+ DVLOG(1) << "State change: " << kStateNames[state_] << " --> "
+ << kStateNames[next_state];
#endif
state_ = next_state;
diff --git a/chromium/media/capture/screen_capture_device_core.h b/chromium/media/capture/content/screen_capture_device_core.h
index 03d6625f94f..0720094f5b8 100644
--- a/chromium/media/capture/screen_capture_device_core.h
+++ b/chromium/media/capture/content/screen_capture_device_core.h
@@ -11,8 +11,8 @@
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
#include "media/base/media_export.h"
-#include "media/capture/thread_safe_capture_oracle.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/content/thread_safe_capture_oracle.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -58,8 +58,7 @@ class MEDIA_EXPORT VideoCaptureMachine {
class MEDIA_EXPORT ScreenCaptureDeviceCore
: public base::SupportsWeakPtr<ScreenCaptureDeviceCore> {
public:
- ScreenCaptureDeviceCore(
- scoped_ptr<VideoCaptureMachine> capture_machine);
+ ScreenCaptureDeviceCore(scoped_ptr<VideoCaptureMachine> capture_machine);
virtual ~ScreenCaptureDeviceCore();
// Asynchronous requests to change ScreenCaptureDeviceCore state.
@@ -69,11 +68,7 @@ class MEDIA_EXPORT ScreenCaptureDeviceCore
private:
// Flag indicating current state.
- enum State {
- kIdle,
- kCapturing,
- kError
- };
+ enum State { kIdle, kCapturing, kError };
void TransitionStateTo(State next_state);
@@ -103,7 +98,6 @@ class MEDIA_EXPORT ScreenCaptureDeviceCore
DISALLOW_COPY_AND_ASSIGN(ScreenCaptureDeviceCore);
};
-
} // namespace media
#endif // MEDIA_CAPTURE_SCREEN_CAPTURE_DEVICE_CORE_H_
diff --git a/chromium/media/capture/smooth_event_sampler.cc b/chromium/media/capture/content/smooth_event_sampler.cc
index 7064f86a5b4..79031ebc749 100644
--- a/chromium/media/capture/smooth_event_sampler.cc
+++ b/chromium/media/capture/content/smooth_event_sampler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/smooth_event_sampler.h"
+#include "media/capture/content/smooth_event_sampler.h"
#include <algorithm>
@@ -21,9 +21,9 @@ const int kOverdueDirtyThresholdMillis = 250; // 4 FPS
SmoothEventSampler::SmoothEventSampler(base::TimeDelta min_capture_period,
int redundant_capture_goal)
- : redundant_capture_goal_(redundant_capture_goal),
- overdue_sample_count_(0),
- token_bucket_(base::TimeDelta::Max()) {
+ : redundant_capture_goal_(redundant_capture_goal),
+ overdue_sample_count_(0),
+ token_bucket_(base::TimeDelta::Max()) {
SetMinCapturePeriod(min_capture_period);
}
@@ -49,8 +49,7 @@ void SmoothEventSampler::ConsiderPresentationEvent(base::TimeTicks event_time) {
if (token_bucket_ > token_bucket_capacity_)
token_bucket_ = token_bucket_capacity_;
}
- TRACE_COUNTER1("gpu.capture",
- "MirroringTokenBucketUsec",
+ TRACE_COUNTER1("gpu.capture", "MirroringTokenBucketUsec",
std::max<int64>(0, token_bucket_.InMicroseconds()));
}
current_event_ = event_time;
@@ -64,8 +63,7 @@ void SmoothEventSampler::RecordSample() {
token_bucket_ -= min_capture_period_;
if (token_bucket_ < base::TimeDelta())
token_bucket_ = base::TimeDelta();
- TRACE_COUNTER1("gpu.capture",
- "MirroringTokenBucketUsec",
+ TRACE_COUNTER1("gpu.capture", "MirroringTokenBucketUsec",
std::max<int64>(0, token_bucket_.InMicroseconds()));
if (HasUnrecordedEvent()) {
@@ -76,8 +74,8 @@ void SmoothEventSampler::RecordSample() {
}
}
-bool SmoothEventSampler::IsOverdueForSamplingAt(base::TimeTicks event_time)
- const {
+bool SmoothEventSampler::IsOverdueForSamplingAt(
+ base::TimeTicks event_time) const {
DCHECK(!event_time.is_null());
if (!HasUnrecordedEvent() && overdue_sample_count_ >= redundant_capture_goal_)
@@ -90,7 +88,7 @@ bool SmoothEventSampler::IsOverdueForSamplingAt(base::TimeTicks event_time)
// won't request a sample just yet.
base::TimeDelta dirty_interval = event_time - last_sample_;
return dirty_interval >=
- base::TimeDelta::FromMilliseconds(kOverdueDirtyThresholdMillis);
+ base::TimeDelta::FromMilliseconds(kOverdueDirtyThresholdMillis);
}
bool SmoothEventSampler::HasUnrecordedEvent() const {
diff --git a/chromium/media/capture/smooth_event_sampler.h b/chromium/media/capture/content/smooth_event_sampler.h
index c250eb486e1..c250eb486e1 100644
--- a/chromium/media/capture/smooth_event_sampler.h
+++ b/chromium/media/capture/content/smooth_event_sampler.h
diff --git a/chromium/media/capture/content/smooth_event_sampler_unittest.cc b/chromium/media/capture/content/smooth_event_sampler_unittest.cc
new file mode 100644
index 00000000000..b3234aa20d6
--- /dev/null
+++ b/chromium/media/capture/content/smooth_event_sampler_unittest.cc
@@ -0,0 +1,704 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/content/smooth_event_sampler.h"
+
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+bool AddEventAndConsiderSampling(SmoothEventSampler* sampler,
+ base::TimeTicks event_time) {
+ sampler->ConsiderPresentationEvent(event_time);
+ return sampler->ShouldSample();
+}
+
+void SteadyStateSampleAndAdvance(base::TimeDelta vsync,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ sampler->RecordSample();
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+ *t += vsync;
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+}
+
+void SteadyStateNoSampleAndAdvance(base::TimeDelta vsync,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ ASSERT_FALSE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+ *t += vsync;
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+}
+
+base::TimeTicks InitialTestTimeTicks() {
+ return base::TimeTicks() + base::TimeDelta::FromSeconds(1);
+}
+
+void TestRedundantCaptureStrategy(base::TimeDelta capture_period,
+ int redundant_capture_goal,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ // Before any events have been considered, we're overdue for sampling.
+ ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t));
+
+ // Consider the first event. We want to sample that.
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ sampler->RecordSample();
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+
+ // After more than 250 ms has passed without considering an event, we should
+ // repeatedly be overdue for sampling. However, once the redundant capture
+ // goal is achieved, we should no longer be overdue for sampling.
+ *t += base::TimeDelta::FromMilliseconds(250);
+ for (int i = 0; i < redundant_capture_goal; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t))
+ << "Should sample until redundant capture goal is hit";
+ sampler->RecordSample();
+ *t += capture_period; // Timer fires once every capture period.
+ }
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t))
+ << "Should not be overdue once redundant capture goal achieved.";
+}
+
+} // namespace
+
+// 60Hz sampled at 30Hz should produce 30Hz. In addition, this test contains
+// much more comprehensive before/after/edge-case scenarios than the others.
+TEST(SmoothEventSamplerTest, Sample60HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 200;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every other vsync, indefinitely.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 14, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ ASSERT_TRUE(sampler.HasUnrecordedEvent());
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state,
+ // but at a different phase.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 50Hz sampled at 30Hz should produce a sequence where some frames are skipped.
+TEST(SmoothEventSamplerTest, Sample50HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 2;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 50;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture 1st, 2nd and 4th frames out of every five
+ // frames, indefinitely.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 11, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state
+ // again.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 75Hz sampled at 30Hz should produce a sequence where some frames are skipped.
+TEST(SmoothEventSamplerTest, Sample75HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 32;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 75;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture 1st and 3rd frames out of every five
+ // frames, indefinitely.
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 16, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We capture the next frame, and not the one
+ // after that, and then we're back in the steady state again.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 30Hz sampled at 30Hz should produce 30Hz.
+TEST(SmoothEventSamplerTest, Sample30HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 1;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 30;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every vsync, indefinitely.
+ for (int i = 0; i < 200; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 10; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 7, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 24Hz sampled at 30Hz should produce 24Hz.
+TEST(SmoothEventSamplerTest, Sample24HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 333;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 24;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every vsync, indefinitely.
+ for (int i = 0; i < 200; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 10; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 6, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// Tests that changing the minimum capture period during usage results in the
+// desired behavior.
+TEST(SmoothEventSamplerTest, Sample60HertzWithVariedCapturePeriods) {
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
+ const base::TimeDelta one_to_one_period = vsync;
+ const base::TimeDelta two_to_one_period = vsync * 2;
+ const base::TimeDelta two_and_three_to_one_period =
+ base::TimeDelta::FromSeconds(1) / 24;
+ const int redundant_capture_goal = 1;
+
+ SmoothEventSampler sampler(one_to_one_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(one_to_one_period, redundant_capture_goal,
+ &sampler, &t);
+
+ // With the capture rate at 60 Hz, we should capture every vsync.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change to the capture rate to 30 Hz, and we should capture every other
+ // vsync.
+ sampler.SetMinCapturePeriod(two_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change the capture rate back to 60 Hz, and we should capture every
+ // vsync again.
+ sampler.SetMinCapturePeriod(one_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change the capture rate to 24 Hz, and we should capture with a 2-3-2-3
+ // cadence.
+ sampler.SetMinCapturePeriod(two_and_three_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+TEST(SmoothEventSamplerTest, DoubleDrawAtOneTimeStillDirties) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const base::TimeDelta overdue_period = base::TimeDelta::FromSeconds(1);
+
+ SmoothEventSampler sampler(capture_period, 1);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ sampler.RecordSample();
+ ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t))
+ << "Sampled last event; should not be dirty.";
+ t += overdue_period;
+
+ // Now simulate 2 events with the same clock value.
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ sampler.RecordSample();
+ ASSERT_FALSE(AddEventAndConsiderSampling(&sampler, t))
+ << "Two events at same time -- expected second not to be sampled.";
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t + overdue_period))
+ << "Second event should dirty the capture state.";
+ sampler.RecordSample();
+ ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t + overdue_period));
+}
+
+namespace {
+
+struct DataPoint {
+ bool should_capture;
+ double increment_ms;
+};
+
+void ReplayCheckingSamplerDecisions(const DataPoint* data_points,
+ size_t num_data_points,
+ SmoothEventSampler* sampler) {
+ base::TimeTicks t = InitialTestTimeTicks();
+ for (size_t i = 0; i < num_data_points; ++i) {
+ t += base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(data_points[i].increment_ms * 1000));
+ ASSERT_EQ(data_points[i].should_capture,
+ AddEventAndConsiderSampling(sampler, t))
+ << "at data_points[" << i << ']';
+ if (data_points[i].should_capture)
+ sampler->RecordSample();
+ }
+}
+
+} // namespace
+
+TEST(SmoothEventSamplerTest, DrawingAt24FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: Initial instability as a 24 FPS video was
+ // started from a still screen, then clearly followed by steady-state.
+ static const DataPoint data_points[] = {{true, 1437.93},
+ {true, 150.484},
+ {true, 217.362},
+ {true, 50.161},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 66.88},
+ {true, 50.161},
+ {false, 0},
+ {false, 0},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 117.041},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 0},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 16.721},
+ {true, 66.881},
+ {false, 0},
+ {true, 33.441},
+ {true, 16.72},
+ {true, 50.16},
+ {true, 16.72},
+ {false, 16.721},
+ {true, 50.161},
+ {true, 50.16},
+ {false, 0},
+ {true, 33.441},
+ {true, 50.337},
+ {true, 50.183},
+ {true, 16.722},
+ {true, 50.161},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 83.601},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: Initial instability as a 30 FPS video was
+ // started from a still screen, then followed by steady-state. Drawing
+ // framerate from the video rendering was a bit volatile, but averaged 30 FPS.
+ static const DataPoint data_points[] = {{true, 2407.69},
+ {true, 16.733},
+ {true, 217.362},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 16.721},
+ {true, 33.44},
+ {false, 0},
+ {true, 50.161},
+ {true, 50.16},
+ {false, 0},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 0},
+ {false, 16.72},
+ {true, 66.881},
+ {false, 0},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.538},
+ {true, 33.526},
+ {true, 33.447},
+ {true, 33.445},
+ {true, 33.441},
+ {true, 16.721},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {false, 0},
+ {false, 16.72},
+ {true, 66.881},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 66.88},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 50.16},
+ {false, 0.001},
+ {true, 16.721},
+ {true, 66.88},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 66.881},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 66.88},
+ {true, 16.721},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: WebGL Acquarium demo
+ // (http://webglsamples.googlecode.com/hg/aquarium/aquarium.html) which ran
+ // between 55-60 FPS in the steady-state.
+ static const DataPoint data_points[] = {{true, 16.72},
+ {true, 16.72},
+ {true, 4163.29},
+ {true, 50.193},
+ {true, 117.041},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {false, 0},
+ {false, 0},
+ {true, 50.161},
+ {true, 83.601},
+ {true, 50.16},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 50.16},
+ {true, 16.72},
+ {false, 0.001},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.721},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.721},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.721},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+} // namespace media
diff --git a/chromium/media/capture/thread_safe_capture_oracle.cc b/chromium/media/capture/content/thread_safe_capture_oracle.cc
index ade36bb2935..22b1dcfc022 100644
--- a/chromium/media/capture/thread_safe_capture_oracle.cc
+++ b/chromium/media/capture/content/thread_safe_capture_oracle.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/thread_safe_capture_oracle.h"
+#include "media/capture/content/thread_safe_capture_oracle.h"
#include "base/basictypes.h"
#include "base/bind.h"
@@ -34,15 +34,17 @@ ThreadSafeCaptureOracle::ThreadSafeCaptureOracle(
const VideoCaptureParams& params,
bool enable_auto_throttling)
: client_(client.Pass()),
- oracle_(base::TimeDelta::FromMicroseconds(
- static_cast<int64>(1000000.0 / params.requested_format.frame_rate +
- 0.5 /* to round to nearest int */)),
- params.requested_format.frame_size,
- params.resolution_change_policy,
- enable_auto_throttling),
- params_(params) {}
+ oracle_(base::TimeDelta::FromMicroseconds(static_cast<int64>(
+ 1000000.0 / params.requested_format.frame_rate +
+ 0.5 /* to round to nearest int */)),
+ params.requested_format.frame_size,
+ params.resolution_change_policy,
+ enable_auto_throttling),
+ params_(params) {
+}
-ThreadSafeCaptureOracle::~ThreadSafeCaptureOracle() {}
+ThreadSafeCaptureOracle::~ThreadSafeCaptureOracle() {
+}
bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
VideoCaptureOracle::Event event,
@@ -76,21 +78,20 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
// Get the current buffer pool utilization and attenuate it: The utilization
// reported to the oracle is in terms of a maximum sustainable amount (not the
// absolute maximum).
- const double attenuated_utilization = client_->GetBufferPoolUtilization() *
+ const double attenuated_utilization =
+ client_->GetBufferPoolUtilization() *
(100.0 / kTargetMaxPoolUtilizationPercent);
const char* event_name =
- (event == VideoCaptureOracle::kTimerPoll ? "poll" :
- (event == VideoCaptureOracle::kCompositorUpdate ? "gpu" :
- "unknown"));
+ (event == VideoCaptureOracle::kTimerPoll
+ ? "poll"
+ : (event == VideoCaptureOracle::kCompositorUpdate ? "gpu"
+ : "unknown"));
// Consider the various reasons not to initiate a capture.
if (should_capture && !output_buffer.get()) {
- TRACE_EVENT_INSTANT1("gpu.capture",
- "PipelineLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger",
- event_name);
+ TRACE_EVENT_INSTANT1("gpu.capture", "PipelineLimited",
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
oracle_.RecordWillNotCapture(attenuated_utilization);
return false;
} else if (!should_capture && output_buffer.get()) {
@@ -99,41 +100,32 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
// capture rate limit: for example, the content is animating at 60fps but
// we're capturing at 30fps.
TRACE_EVENT_INSTANT1("gpu.capture", "FpsRateLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger", event_name);
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
}
return false;
} else if (!should_capture && !output_buffer.get()) {
// We decided not to capture, but we wouldn't have been able to if we wanted
// to because no output buffer was available.
TRACE_EVENT_INSTANT1("gpu.capture", "NearlyPipelineLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger", event_name);
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
return false;
}
const int frame_number = oracle_.RecordCapture(attenuated_utilization);
TRACE_EVENT_ASYNC_BEGIN2("gpu.capture", "Capture", output_buffer.get(),
- "frame_number", frame_number,
- "trigger", event_name);
+ "frame_number", frame_number, "trigger", event_name);
// Texture frames wrap a texture mailbox, which we don't have at the moment.
// We do not construct those frames.
if (params_.requested_format.pixel_storage != media::PIXEL_STORAGE_TEXTURE) {
*storage = VideoFrame::WrapExternalData(
- VideoFrame::I420,
- coded_size,
- gfx::Rect(visible_size),
- visible_size,
- static_cast<uint8*>(output_buffer->data()),
- output_buffer->size(),
- base::TimeDelta());
+ media::PIXEL_FORMAT_I420, coded_size, gfx::Rect(visible_size),
+ visible_size, static_cast<uint8*>(output_buffer->data()),
+ output_buffer->mapped_size(), base::TimeDelta());
DCHECK(*storage);
}
- *callback = base::Bind(&ThreadSafeCaptureOracle::DidCaptureFrame,
- this,
- frame_number,
- base::Passed(&output_buffer),
- capture_begin_time,
- oracle_.estimated_frame_duration());
+ *callback =
+ base::Bind(&ThreadSafeCaptureOracle::DidCaptureFrame, this, frame_number,
+ base::Passed(&output_buffer), capture_begin_time,
+ oracle_.estimated_frame_duration());
return true;
}
@@ -168,9 +160,8 @@ void ThreadSafeCaptureOracle::DidCaptureFrame(
base::TimeTicks timestamp,
bool success) {
base::AutoLock guard(lock_);
- TRACE_EVENT_ASYNC_END2("gpu.capture", "Capture", buffer.get(),
- "success", success,
- "timestamp", timestamp.ToInternalValue());
+ TRACE_EVENT_ASYNC_END2("gpu.capture", "Capture", buffer.get(), "success",
+ success, "timestamp", timestamp.ToInternalValue());
if (oracle_.CompleteCapture(frame_number, success, &timestamp)) {
TRACE_EVENT_INSTANT0("gpu.capture", "CaptureSucceeded",
@@ -181,18 +172,16 @@ void ThreadSafeCaptureOracle::DidCaptureFrame(
frame->metadata()->SetDouble(VideoFrameMetadata::FRAME_RATE,
params_.requested_format.frame_rate);
- frame->metadata()->SetTimeTicks(
- VideoFrameMetadata::CAPTURE_BEGIN_TIME, capture_begin_time);
- frame->metadata()->SetTimeTicks(
- VideoFrameMetadata::CAPTURE_END_TIME, base::TimeTicks::Now());
+ frame->metadata()->SetTimeTicks(VideoFrameMetadata::CAPTURE_BEGIN_TIME,
+ capture_begin_time);
+ frame->metadata()->SetTimeTicks(VideoFrameMetadata::CAPTURE_END_TIME,
+ base::TimeTicks::Now());
frame->metadata()->SetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
estimated_frame_duration);
- frame->AddDestructionObserver(base::Bind(
- &ThreadSafeCaptureOracle::DidConsumeFrame,
- this,
- frame_number,
- frame->metadata()));
+ frame->AddDestructionObserver(
+ base::Bind(&ThreadSafeCaptureOracle::DidConsumeFrame, this,
+ frame_number, frame->metadata()));
client_->OnIncomingCapturedVideoFrame(buffer.Pass(), frame, timestamp);
}
diff --git a/chromium/media/capture/thread_safe_capture_oracle.h b/chromium/media/capture/content/thread_safe_capture_oracle.h
index c5d83dfee7b..1aa905e37f1 100644
--- a/chromium/media/capture/thread_safe_capture_oracle.h
+++ b/chromium/media/capture/content/thread_safe_capture_oracle.h
@@ -11,8 +11,8 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/video_frame.h"
-#include "media/capture/video_capture_oracle.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/content/video_capture_oracle.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -70,14 +70,13 @@ class MEDIA_EXPORT ThreadSafeCaptureOracle
virtual ~ThreadSafeCaptureOracle();
// Callback invoked on completion of all captures.
- void DidCaptureFrame(
- int frame_number,
- scoped_ptr<VideoCaptureDevice::Client::Buffer> buffer,
- base::TimeTicks capture_begin_time,
- base::TimeDelta estimated_frame_duration,
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks timestamp,
- bool success);
+ void DidCaptureFrame(int frame_number,
+ scoped_ptr<VideoCaptureDevice::Client::Buffer> buffer,
+ base::TimeTicks capture_begin_time,
+ base::TimeDelta estimated_frame_duration,
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks timestamp,
+ bool success);
// Callback invoked once all consumers have finished with a delivered video
// frame. Consumer feedback signals are scanned from the frame's |metadata|.
diff --git a/chromium/media/capture/video_capture_oracle.cc b/chromium/media/capture/content/video_capture_oracle.cc
index a536aab84aa..6127996fbb7 100644
--- a/chromium/media/capture/video_capture_oracle.cc
+++ b/chromium/media/capture/content/video_capture_oracle.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video_capture_oracle.h"
+#include "media/capture/content/video_capture_oracle.h"
#include <algorithm>
@@ -48,6 +48,13 @@ const int kMinSizeChangePeriodMicros = 3000000; // 3 seconds
// is an unexpected pause in events.
const int kMaxTimeSinceLastFeedbackUpdateMicros = 1000000; // 1 second
+// The amount of time, since the source size last changed, to allow frequent
+// increases in capture area. This allows the system a period of time to
+// quickly explore up and down to find an ideal point before being more careful
+// about capture size increases.
+const int kExplorationPeriodAfterSourceSizeChangeMicros =
+ 3 * kMinSizeChangePeriodMicros;
+
// The amount of additional time, since content animation was last detected, to
// continue being extra-careful about increasing the capture size. This is used
// to prevent breif periods of non-animating content from throwing off the
@@ -65,7 +72,7 @@ double FractionFromExpectedFrameRate(base::TimeDelta delta, int frame_rate) {
const base::TimeDelta expected_delta =
base::TimeDelta::FromSeconds(1) / frame_rate;
return (delta - expected_delta).InMillisecondsF() /
- expected_delta.InMillisecondsF();
+ expected_delta.InMillisecondsF();
}
// Returns the next-higher TimeTicks value.
@@ -83,8 +90,8 @@ bool HasSufficientRecentFeedback(const FeedbackSignalAccumulator& accumulator,
const base::TimeDelta amount_of_history =
accumulator.update_time() - accumulator.reset_time();
return (amount_of_history.InMicroseconds() >= kMinSizeChangePeriodMicros) &&
- ((now - accumulator.update_time()).InMicroseconds() <=
- kMaxTimeSinceLastFeedbackUpdateMicros);
+ ((now - accumulator.update_time()).InMicroseconds() <=
+ kMaxTimeSinceLastFeedbackUpdateMicros);
}
} // anonymous namespace
@@ -110,12 +117,15 @@ VideoCaptureOracle::VideoCaptureOracle(
<< (auto_throttling_enabled_ ? "enabled." : "disabled.");
}
-VideoCaptureOracle::~VideoCaptureOracle() {}
+VideoCaptureOracle::~VideoCaptureOracle() {
+}
void VideoCaptureOracle::SetSourceSize(const gfx::Size& source_size) {
resolution_chooser_.SetSourceSize(source_size);
// If the |resolution_chooser_| computed a new capture size, that will become
// visible via a future call to ObserveEventAndDecideCapture().
+ source_size_change_time_ = (next_frame_number_ == 0) ?
+ base::TimeTicks() : GetFrameTimestamp(next_frame_number_ - 1);
}
bool VideoCaptureOracle::ObserveEventAndDecideCapture(
@@ -262,19 +272,19 @@ bool VideoCaptureOracle::CompleteCapture(int frame_number,
const int rounded_frame_rate =
static_cast<int>(estimated_frame_rate + 0.5);
VLOG_STREAM(3) << base::StringPrintf(
- "Captured #%d: delta=%" PRId64 " usec"
+ "Captured #%d: delta=%" PRId64
+ " usec"
", now locked into {%s}, %+0.1f%% slower than %d FPS",
- frame_number,
- delta.InMicroseconds(),
+ frame_number, delta.InMicroseconds(),
content_sampler_.detected_region().ToString().c_str(),
100.0 * FractionFromExpectedFrameRate(delta, rounded_frame_rate),
rounded_frame_rate);
} else {
VLOG_STREAM(3) << base::StringPrintf(
- "Captured #%d: delta=%" PRId64 " usec"
+ "Captured #%d: delta=%" PRId64
+ " usec"
", d/30fps=%+0.1f%%, d/25fps=%+0.1f%%, d/24fps=%+0.1f%%",
- frame_number,
- delta.InMicroseconds(),
+ frame_number, delta.InMicroseconds(),
100.0 * FractionFromExpectedFrameRate(delta, 30),
100.0 * FractionFromExpectedFrameRate(delta, 25),
100.0 * FractionFromExpectedFrameRate(delta, 24));
@@ -325,9 +335,11 @@ void VideoCaptureOracle::SetFrameTimestamp(int frame_number,
}
bool VideoCaptureOracle::IsFrameInRecentHistory(int frame_number) const {
- return ((next_frame_number_ - frame_number) < kMaxFrameTimestamps &&
+ // Adding (next_frame_number_ >= 0) helps the compiler deduce that there
+ // is no possibility of overflow here.
+ return (frame_number >= 0 && next_frame_number_ >= 0 &&
frame_number <= next_frame_number_ &&
- frame_number >= 0);
+ (next_frame_number_ - frame_number) < kMaxFrameTimestamps);
}
void VideoCaptureOracle::CommitCaptureSizeAndReset(
@@ -340,10 +352,6 @@ void VideoCaptureOracle::CommitCaptureSizeAndReset(
const base::TimeTicks ignore_before_time = JustAfter(last_frame_time);
buffer_pool_utilization_.Reset(1.0, ignore_before_time);
estimated_capable_area_.Reset(capture_size_.GetArea(), ignore_before_time);
-
- // With the new capture size, erase any prior conclusion about the end-to-end
- // system being under-utilized.
- start_time_of_underutilization_ = base::TimeTicks();
}
void VideoCaptureOracle::AnalyzeAndAdjust(const base::TimeTicks analyze_time) {
@@ -381,8 +389,8 @@ int VideoCaptureOracle::AnalyzeForDecreasedArea(base::TimeTicks analyze_time) {
buffer_pool_utilization_.current() > 1.0) {
// This calculation is hand-wavy, but seems to work well in a variety of
// situations.
- buffer_capable_area = static_cast<int>(
- current_area / buffer_pool_utilization_.current());
+ buffer_capable_area =
+ static_cast<int>(current_area / buffer_pool_utilization_.current());
} else {
buffer_capable_area = current_area;
}
@@ -403,6 +411,9 @@ int VideoCaptureOracle::AnalyzeForDecreasedArea(base::TimeTicks analyze_time) {
decreased_area = std::min(
capable_area,
resolution_chooser_.FindSmallerFrameSize(current_area, 1).GetArea());
+ VLOG_IF(2, !start_time_of_underutilization_.is_null())
+ << "Contiguous period of under-utilization ends: "
+ "System is suddenly over-utilized.";
start_time_of_underutilization_ = base::TimeTicks();
VLOG(2) << "Proposing a "
<< (100.0 * (current_area - decreased_area) / current_area)
@@ -435,6 +446,9 @@ int VideoCaptureOracle::AnalyzeForIncreasedArea(base::TimeTicks analyze_time) {
const int buffer_capable_area = base::saturated_cast<int>(
current_area / buffer_pool_utilization_.current());
if (buffer_capable_area < increased_area) {
+ VLOG_IF(2, !start_time_of_underutilization_.is_null())
+ << "Contiguous period of under-utilization ends: "
+ "Buffer pool is no longer under-utilized.";
start_time_of_underutilization_ = base::TimeTicks();
return -1; // Buffer pool is not under-utilized.
}
@@ -443,6 +457,9 @@ int VideoCaptureOracle::AnalyzeForIncreasedArea(base::TimeTicks analyze_time) {
// Determine whether the consumer could handle an increase in area.
if (HasSufficientRecentFeedback(estimated_capable_area_, analyze_time)) {
if (estimated_capable_area_.current() < increased_area) {
+ VLOG_IF(2, !start_time_of_underutilization_.is_null())
+ << "Contiguous period of under-utilization ends: "
+ "Consumer is no longer under-utilized.";
start_time_of_underutilization_ = base::TimeTicks();
return -1; // Consumer is not under-utilized.
}
@@ -454,7 +471,6 @@ int VideoCaptureOracle::AnalyzeForIncreasedArea(base::TimeTicks analyze_time) {
// Consumer is providing feedback, but hasn't reported it recently. Just in
// case it's stalled, don't make things worse by increasing the capture
// area.
- start_time_of_underutilization_ = base::TimeTicks();
return -1;
}
@@ -463,26 +479,46 @@ int VideoCaptureOracle::AnalyzeForIncreasedArea(base::TimeTicks analyze_time) {
if (start_time_of_underutilization_.is_null())
start_time_of_underutilization_ = analyze_time;
+ // If the under-utilization started soon after the last source size change,
+ // permit an immediate increase in the capture area. This allows the system
+ // to quickly step-up to an ideal point.
+ if ((start_time_of_underutilization_ -
+ source_size_change_time_).InMicroseconds() <=
+ kExplorationPeriodAfterSourceSizeChangeMicros) {
+ VLOG(2) << "Proposing a "
+ << (100.0 * (increased_area - current_area) / current_area)
+ << "% increase in capture area after source size change. :-)";
+ return increased_area;
+ }
+
// While content is animating, require a "proving period" of contiguous
// under-utilization before increasing the capture area. This will mitigate
- // the risk of causing frames to be dropped when increasing the load. If
- // content is not animating, be aggressive about increasing the capture area,
- // to improve the quality of non-animating content (where frame drops are not
- // much of a concern).
+ // the risk of frames getting dropped when the data volume increases.
if ((analyze_time - last_time_animation_was_detected_).InMicroseconds() <
- kDebouncingPeriodForAnimatedContentMicros) {
+ kDebouncingPeriodForAnimatedContentMicros) {
if ((analyze_time - start_time_of_underutilization_).InMicroseconds() <
- kProvingPeriodForAnimatedContentMicros) {
- // Content is animating and the system has not been contiguously
- // under-utilizated for long enough.
+ kProvingPeriodForAnimatedContentMicros) {
+ // Content is animating but the system needs to be under-utilized for a
+ // longer period of time.
return -1;
+ } else {
+ // Content is animating and the system has been contiguously
+ // under-utilized for a good long time.
+ VLOG(2) << "Proposing a *cautious* "
+ << (100.0 * (increased_area - current_area) / current_area)
+ << "% increase in capture area while content is animating. :-)";
+ // Reset the "proving period."
+ start_time_of_underutilization_ = base::TimeTicks();
+ return increased_area;
}
}
+ // Content is not animating, so permit an immediate increase in the capture
+ // area. This allows the system to quickly improve the quality of
+ // non-animating content (frame drops are not much of a concern).
VLOG(2) << "Proposing a "
<< (100.0 * (increased_area - current_area) / current_area)
- << "% increase in capture area. :-)";
-
+ << "% increase in capture area for non-animating content. :-)";
return increased_area;
}
diff --git a/chromium/media/capture/video_capture_oracle.h b/chromium/media/capture/content/video_capture_oracle.h
index 94bdc009bce..aff97452951 100644
--- a/chromium/media/capture/video_capture_oracle.h
+++ b/chromium/media/capture/content/video_capture_oracle.h
@@ -9,10 +9,10 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/capture/animated_content_sampler.h"
-#include "media/capture/capture_resolution_chooser.h"
-#include "media/capture/feedback_signal_accumulator.h"
-#include "media/capture/smooth_event_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
+#include "media/capture/content/capture_resolution_chooser.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
+#include "media/capture/content/smooth_event_sampler.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
@@ -161,6 +161,9 @@ class MEDIA_EXPORT VideoCaptureOracle {
// Determines video capture frame sizes.
CaptureResolutionChooser resolution_chooser_;
+ // The timestamp of the frame just before the last call to SetSourceSize().
+ base::TimeTicks source_size_change_time_;
+
// The current capture size. |resolution_chooser_| may hold an updated value
// because the oracle prevents this size from changing too frequently. This
// avoids over-stressing consumers (e.g., when a window is being activly
diff --git a/chromium/media/capture/video_capture_oracle_unittest.cc b/chromium/media/capture/content/video_capture_oracle_unittest.cc
index ab43871566b..64fff4fb4d0 100644
--- a/chromium/media/capture/video_capture_oracle_unittest.cc
+++ b/chromium/media/capture/content/video_capture_oracle_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video_capture_oracle.h"
+#include "media/capture/content/video_capture_oracle.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -19,10 +19,18 @@ base::TimeDelta Get30HzPeriod() {
return base::TimeDelta::FromSeconds(1) / 30;
}
+gfx::Size Get1080pSize() {
+ return gfx::Size(1920, 1080);
+}
+
gfx::Size Get720pSize() {
return gfx::Size(1280, 720);
}
+gfx::Size Get360pSize() {
+ return gfx::Size(640, 360);
+}
+
} // namespace
// Tests that VideoCaptureOracle filters out events whose timestamps are
@@ -31,33 +39,28 @@ TEST(VideoCaptureOracleTest, EnforcesEventTimeMonotonicity) {
const gfx::Rect damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
base::TimeTicks t = InitialTestTimeTicks();
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
base::TimeTicks furthest_event_time = t;
for (int i = 0; i < 10; ++i) {
t -= event_increment;
ASSERT_FALSE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
t = furthest_event_time;
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
}
@@ -68,10 +71,8 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
const gfx::Rect damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Most basic scenario: Frames delivered one at a time, with no additional
// captures in-between deliveries.
@@ -81,8 +82,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
ASSERT_TRUE(oracle.CompleteCapture(last_frame_number, true, &ignored));
}
@@ -93,8 +93,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
for (int j = num_in_flight - 1; j >= 0; --j) {
@@ -110,8 +109,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
ASSERT_TRUE(oracle.CompleteCapture(last_frame_number, true, &ignored));
@@ -128,8 +126,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
// Report the last frame as an out of order failure.
@@ -138,7 +135,6 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
ASSERT_TRUE(
oracle.CompleteCapture(last_frame_number - j, true, &ignored));
}
-
}
}
@@ -148,10 +144,8 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
const gfx::Rect animation_damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Run sequences of animation events and non-animation events through the
// oracle. As the oracle transitions between each sampler, make sure the
@@ -196,8 +190,8 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
// a few frames dropped, so allow a gap in the timestamps. Otherwise, the
// delta between frame timestamps should never be more than 2X the
// |event_increment|.
- const base::TimeDelta max_acceptable_delta = (i % 100) == 78 ?
- event_increment * 5 : event_increment * 2;
+ const base::TimeDelta max_acceptable_delta =
+ (i % 100) == 78 ? event_increment * 5 : event_increment * 2;
EXPECT_GE(max_acceptable_delta.InMicroseconds(), delta.InMicroseconds());
}
last_frame_timestamp = frame_timestamp;
@@ -207,15 +201,12 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
// Tests that VideoCaptureOracle prevents timer polling from initiating
// simultaneous captures.
TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
- const base::TimeDelta vsync_interval =
- base::TimeDelta::FromSeconds(1) / 60;
+ const base::TimeDelta vsync_interval = base::TimeDelta::FromSeconds(1) / 60;
const base::TimeDelta timer_interval = base::TimeDelta::FromMilliseconds(
VideoCaptureOracle::kMinTimerPollPeriodMillis);
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Have the oracle observe some compositor events. Simulate that each capture
// completes successfully.
@@ -261,8 +252,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
did_complete_a_capture = false;
for (int i = 0; i < 10; ++i) {
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
ASSERT_TRUE(
oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
did_complete_a_capture = true;
@@ -274,8 +265,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
for (int i = 0; i <= 10; ++i) {
ASSERT_GT(10, i) << "BUG: Seems like it'll never happen!";
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
break;
}
}
@@ -292,8 +283,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
for (int i = 0; i <= 10; ++i) {
ASSERT_GT(10, i) << "BUG: Seems like it'll never happen!";
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
break;
}
}
@@ -303,10 +294,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
// to allow both the source content and the rest of the end-to-end system to
// stabilize.
TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT, true);
// Run 30 seconds of frame captures without any source size changes.
base::TimeTicks t = InitialTestTimeTicks();
@@ -317,8 +306,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
VideoCaptureOracle::kCompositorUpdate, gfx::Rect(), t));
ASSERT_EQ(Get720pSize(), oracle.capture_size());
base::TimeTicks ignored;
- ASSERT_TRUE(oracle.CompleteCapture(
- oracle.RecordCapture(0.0), true, &ignored));
+ ASSERT_TRUE(
+ oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
}
// Now run 30 seconds of frame captures with lots of random source size
@@ -330,9 +319,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
for (; t < end_t; t += event_increment) {
// Change the source size every frame to a random non-empty size.
const gfx::Size last_source_size = source_size;
- source_size.SetSize(
- ((last_source_size.width() * 11 + 12345) % 1280) + 1,
- ((last_source_size.height() * 11 + 12345) % 720) + 1);
+ source_size.SetSize(((last_source_size.width() * 11 + 12345) % 1280) + 1,
+ ((last_source_size.height() * 11 + 12345) % 720) + 1);
ASSERT_NE(last_source_size, source_size);
oracle.SetSourceSize(source_size);
@@ -346,8 +334,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
}
base::TimeTicks ignored;
- ASSERT_TRUE(oracle.CompleteCapture(
- oracle.RecordCapture(0.0), true, &ignored));
+ ASSERT_TRUE(
+ oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
}
}
@@ -362,14 +350,13 @@ namespace {
// feedback varies.
void RunAutoThrottleTest(bool is_content_animating,
bool with_consumer_feedback) {
- SCOPED_TRACE(::testing::Message() << "RunAutoThrottleTest("
+ SCOPED_TRACE(::testing::Message()
+ << "RunAutoThrottleTest("
<< "(is_content_animating=" << is_content_animating
<< ", with_consumer_feedback=" << with_consumer_feedback << ")");
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT, true);
// Run 10 seconds of frame captures with 90% utilization expect no capture
// size changes.
@@ -380,8 +367,7 @@ void RunAutoThrottleTest(bool is_content_animating,
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
ASSERT_EQ(Get720pSize(), oracle.capture_size());
const double utilization = 0.9;
const int frame_number =
@@ -397,17 +383,16 @@ void RunAutoThrottleTest(bool is_content_animating,
// expect the resolution to remain constant. Repeat.
for (int i = 0; i < 2; ++i) {
const gfx::Size starting_size = oracle.capture_size();
- SCOPED_TRACE(::testing::Message()
- << "Stepping down from " << starting_size.ToString()
- << ", i=" << i);
+ SCOPED_TRACE(::testing::Message() << "Stepping down from "
+ << starting_size.ToString()
+ << ", i=" << i);
gfx::Size stepped_down_size;
end_t = t + base::TimeDelta::FromSeconds(10);
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ VideoCaptureOracle::kCompositorUpdate,
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
if (stepped_down_size.IsEmpty()) {
if (oracle.capture_size() != starting_size) {
@@ -435,25 +420,23 @@ void RunAutoThrottleTest(bool is_content_animating,
// utilization and expect the resolution to remain constant. Repeat.
for (int i = 0; i < 2; ++i) {
const gfx::Size starting_size = oracle.capture_size();
- SCOPED_TRACE(::testing::Message()
- << "Stepping up from " << starting_size.ToString()
- << ", i=" << i);
+ SCOPED_TRACE(::testing::Message() << "Stepping up from "
+ << starting_size.ToString()
+ << ", i=" << i);
gfx::Size stepped_up_size;
end_t = t + base::TimeDelta::FromSeconds(is_content_animating ? 90 : 10);
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
if (stepped_up_size.IsEmpty()) {
if (oracle.capture_size() != starting_size) {
// When content is animating, a much longer amount of time must pass
// before the capture size will step up.
- ASSERT_LT(
- base::TimeDelta::FromSeconds(is_content_animating ? 15 : 1),
- t - time_of_last_size_change);
+ ASSERT_LT(base::TimeDelta::FromSeconds(is_content_animating ? 15 : 1),
+ t - time_of_last_size_change);
time_of_last_size_change = t;
stepped_up_size = oracle.capture_size();
ASSERT_LT(starting_size.width(), stepped_up_size.width());
@@ -487,13 +470,124 @@ TEST(VideoCaptureOracleTest, AutoThrottlesBasedOnUtilizationFeedback) {
RunAutoThrottleTest(true, true);
}
+// Tests that, while content is animating, VideoCaptureOracle can make frequent
+// capture size increases only just after the source size has changed.
+// Otherwise, capture size increases should only be made cautiously, after a
+// long "proving period of under-utilization" has elapsed.
+TEST(VideoCaptureOracleTest, IncreasesFrequentlyOnlyAfterSourceSizeChange) {
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT, true);
+
+ // Start out the source size at 360p, so there is room to grow to the 720p
+ // maximum.
+ oracle.SetSourceSize(Get360pSize());
+
+ // Run 10 seconds of frame captures with under-utilization to represent a
+ // machine that can do more, but won't because the source size is small.
+ base::TimeTicks t = InitialTestTimeTicks();
+ const base::TimeDelta event_increment = Get30HzPeriod() * 2;
+ base::TimeTicks end_t = t + base::TimeDelta::FromSeconds(10);
+ for (; t < end_t; t += event_increment) {
+ if (!oracle.ObserveEventAndDecideCapture(
+ VideoCaptureOracle::kCompositorUpdate, gfx::Rect(Get360pSize()),
+ t)) {
+ continue;
+ }
+ ASSERT_EQ(Get360pSize(), oracle.capture_size());
+ const int frame_number = oracle.RecordCapture(0.25);
+ base::TimeTicks ignored;
+ ASSERT_TRUE(oracle.CompleteCapture(frame_number, true, &ignored));
+ }
+
+ // Now, set the source size to 720p, continuing to report under-utilization,
+ // and expect the capture size increases to reach a full 720p within 15
+ // seconds.
+ oracle.SetSourceSize(Get720pSize());
+ gfx::Size last_capture_size = oracle.capture_size();
+ end_t = t + base::TimeDelta::FromSeconds(15);
+ for (; t < end_t; t += event_increment) {
+ if (!oracle.ObserveEventAndDecideCapture(
+ VideoCaptureOracle::kCompositorUpdate, gfx::Rect(Get720pSize()),
+ t)) {
+ continue;
+ }
+ ASSERT_LE(last_capture_size.width(), oracle.capture_size().width());
+ ASSERT_LE(last_capture_size.height(), oracle.capture_size().height());
+ last_capture_size = oracle.capture_size();
+ const int frame_number = oracle.RecordCapture(0.25);
+ base::TimeTicks ignored;
+ ASSERT_TRUE(oracle.CompleteCapture(frame_number, true, &ignored));
+ }
+ ASSERT_EQ(Get720pSize(), oracle.capture_size());
+
+ // Now, change the source size again, but report over-utilization so the
+ // capture size will decrease. Once it decreases one step, report 90%
+ // utilization to achieve a steady-state.
+ oracle.SetSourceSize(Get1080pSize());
+ gfx::Size stepped_down_size;
+ end_t = t + base::TimeDelta::FromSeconds(10);
+ for (; t < end_t; t += event_increment) {
+ if (!oracle.ObserveEventAndDecideCapture(
+ VideoCaptureOracle::kCompositorUpdate, gfx::Rect(Get1080pSize()),
+ t)) {
+ continue;
+ }
+
+ if (stepped_down_size.IsEmpty()) {
+ if (oracle.capture_size() != Get720pSize()) {
+ stepped_down_size = oracle.capture_size();
+ ASSERT_GT(Get720pSize().width(), stepped_down_size.width());
+ ASSERT_GT(Get720pSize().height(), stepped_down_size.height());
+ }
+ } else {
+ ASSERT_EQ(stepped_down_size, oracle.capture_size());
+ }
+
+ const double utilization = stepped_down_size.IsEmpty() ? 1.5 : 0.9;
+ const int frame_number = oracle.RecordCapture(utilization);
+ base::TimeTicks ignored;
+ ASSERT_TRUE(oracle.CompleteCapture(frame_number, true, &ignored));
+ }
+ ASSERT_FALSE(stepped_down_size.IsEmpty());
+
+ // Now, if we report under-utilization again (without any source size change),
+ // there should be a long "proving period" before there is any increase in
+ // capture size made by the oracle.
+ const base::TimeTicks proving_period_end_time =
+ t + base::TimeDelta::FromSeconds(15);
+ gfx::Size stepped_up_size;
+ end_t = t + base::TimeDelta::FromSeconds(60);
+ for (; t < end_t; t += event_increment) {
+ if (!oracle.ObserveEventAndDecideCapture(
+ VideoCaptureOracle::kCompositorUpdate, gfx::Rect(Get1080pSize()),
+ t)) {
+ continue;
+ }
+
+ if (stepped_up_size.IsEmpty()) {
+ if (oracle.capture_size() != stepped_down_size) {
+ ASSERT_LT(proving_period_end_time, t);
+ stepped_up_size = oracle.capture_size();
+ ASSERT_LT(stepped_down_size.width(), stepped_up_size.width());
+ ASSERT_LT(stepped_down_size.height(), stepped_up_size.height());
+ }
+ } else {
+ ASSERT_EQ(stepped_up_size, oracle.capture_size());
+ }
+
+ const double utilization = stepped_up_size.IsEmpty() ? 0.25 : 0.9;
+ const int frame_number = oracle.RecordCapture(utilization);
+ base::TimeTicks ignored;
+ ASSERT_TRUE(oracle.CompleteCapture(frame_number, true, &ignored));
+ }
+ ASSERT_FALSE(stepped_up_size.IsEmpty());
+}
+
// Tests that VideoCaptureOracle does not change the capture size if
// auto-throttling is enabled when using a fixed resolution policy.
TEST(VideoCaptureOracleTest, DoesNotAutoThrottleWhenResolutionIsFixed) {
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, true);
// Run 10 seconds of frame captures with 90% utilization expect no capture
// size changes.
diff --git a/chromium/media/capture/smooth_event_sampler_unittest.cc b/chromium/media/capture/smooth_event_sampler_unittest.cc
deleted file mode 100644
index 14a6823f556..00000000000
--- a/chromium/media/capture/smooth_event_sampler_unittest.cc
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/capture/smooth_event_sampler.h"
-
-#include "base/strings/stringprintf.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace {
-
-bool AddEventAndConsiderSampling(SmoothEventSampler* sampler,
- base::TimeTicks event_time) {
- sampler->ConsiderPresentationEvent(event_time);
- return sampler->ShouldSample();
-}
-
-void SteadyStateSampleAndAdvance(base::TimeDelta vsync,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- sampler->RecordSample();
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
- *t += vsync;
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
-}
-
-void SteadyStateNoSampleAndAdvance(base::TimeDelta vsync,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- ASSERT_FALSE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
- *t += vsync;
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
-}
-
-base::TimeTicks InitialTestTimeTicks() {
- return base::TimeTicks() + base::TimeDelta::FromSeconds(1);
-}
-
-void TestRedundantCaptureStrategy(base::TimeDelta capture_period,
- int redundant_capture_goal,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- // Before any events have been considered, we're overdue for sampling.
- ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t));
-
- // Consider the first event. We want to sample that.
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- sampler->RecordSample();
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
-
- // After more than 250 ms has passed without considering an event, we should
- // repeatedly be overdue for sampling. However, once the redundant capture
- // goal is achieved, we should no longer be overdue for sampling.
- *t += base::TimeDelta::FromMilliseconds(250);
- for (int i = 0; i < redundant_capture_goal; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t))
- << "Should sample until redundant capture goal is hit";
- sampler->RecordSample();
- *t += capture_period; // Timer fires once every capture period.
- }
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t))
- << "Should not be overdue once redundant capture goal achieved.";
-}
-
-} // namespace
-
-// 60Hz sampled at 30Hz should produce 30Hz. In addition, this test contains
-// much more comprehensive before/after/edge-case scenarios than the others.
-TEST(SmoothEventSamplerTest, Sample60HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 200;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every other vsync, indefinitely.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 14, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- ASSERT_TRUE(sampler.HasUnrecordedEvent());
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state,
- // but at a different phase.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 50Hz sampled at 30Hz should produce a sequence where some frames are skipped.
-TEST(SmoothEventSamplerTest, Sample50HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 2;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 50;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture 1st, 2nd and 4th frames out of every five
- // frames, indefinitely.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 11, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state
- // again.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 75Hz sampled at 30Hz should produce a sequence where some frames are skipped.
-TEST(SmoothEventSamplerTest, Sample75HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 32;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 75;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture 1st and 3rd frames out of every five
- // frames, indefinitely.
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 16, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We capture the next frame, and not the one
- // after that, and then we're back in the steady state again.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 30Hz sampled at 30Hz should produce 30Hz.
-TEST(SmoothEventSamplerTest, Sample30HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 1;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 30;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every vsync, indefinitely.
- for (int i = 0; i < 200; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 10; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 7, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 24Hz sampled at 30Hz should produce 24Hz.
-TEST(SmoothEventSamplerTest, Sample24HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 333;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 24;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every vsync, indefinitely.
- for (int i = 0; i < 200; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 10; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 6, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// Tests that changing the minimum capture period during usage results in the
-// desired behavior.
-TEST(SmoothEventSamplerTest, Sample60HertzWithVariedCapturePeriods) {
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
- const base::TimeDelta one_to_one_period = vsync;
- const base::TimeDelta two_to_one_period = vsync * 2;
- const base::TimeDelta two_and_three_to_one_period =
- base::TimeDelta::FromSeconds(1) / 24;
- const int redundant_capture_goal = 1;
-
- SmoothEventSampler sampler(one_to_one_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(one_to_one_period, redundant_capture_goal,
- &sampler, &t);
-
- // With the capture rate at 60 Hz, we should capture every vsync.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change to the capture rate to 30 Hz, and we should capture every other
- // vsync.
- sampler.SetMinCapturePeriod(two_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change the capture rate back to 60 Hz, and we should capture every
- // vsync again.
- sampler.SetMinCapturePeriod(one_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change the capture rate to 24 Hz, and we should capture with a 2-3-2-3
- // cadence.
- sampler.SetMinCapturePeriod(two_and_three_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-TEST(SmoothEventSamplerTest, DoubleDrawAtOneTimeStillDirties) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const base::TimeDelta overdue_period = base::TimeDelta::FromSeconds(1);
-
- SmoothEventSampler sampler(capture_period, 1);
- base::TimeTicks t = InitialTestTimeTicks();
-
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- sampler.RecordSample();
- ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t))
- << "Sampled last event; should not be dirty.";
- t += overdue_period;
-
- // Now simulate 2 events with the same clock value.
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- sampler.RecordSample();
- ASSERT_FALSE(AddEventAndConsiderSampling(&sampler, t))
- << "Two events at same time -- expected second not to be sampled.";
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t + overdue_period))
- << "Second event should dirty the capture state.";
- sampler.RecordSample();
- ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t + overdue_period));
-}
-
-namespace {
-
-struct DataPoint {
- bool should_capture;
- double increment_ms;
-};
-
-void ReplayCheckingSamplerDecisions(const DataPoint* data_points,
- size_t num_data_points,
- SmoothEventSampler* sampler) {
- base::TimeTicks t = InitialTestTimeTicks();
- for (size_t i = 0; i < num_data_points; ++i) {
- t += base::TimeDelta::FromMicroseconds(
- static_cast<int64>(data_points[i].increment_ms * 1000));
- ASSERT_EQ(data_points[i].should_capture,
- AddEventAndConsiderSampling(sampler, t))
- << "at data_points[" << i << ']';
- if (data_points[i].should_capture)
- sampler->RecordSample();
- }
-}
-
-} // namespace
-
-TEST(SmoothEventSamplerTest, DrawingAt24FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: Initial instability as a 24 FPS video was
- // started from a still screen, then clearly followed by steady-state.
- static const DataPoint data_points[] = {
- { true, 1437.93 }, { true, 150.484 }, { true, 217.362 }, { true, 50.161 },
- { true, 33.44 }, { false, 0 }, { true, 16.721 }, { true, 66.88 },
- { true, 50.161 }, { false, 0 }, { false, 0 }, { true, 50.16 },
- { true, 33.441 }, { true, 16.72 }, { false, 16.72 }, { true, 117.041 },
- { true, 16.72 }, { false, 16.72 }, { true, 50.161 }, { true, 50.16 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { true, 16.72 },
- { false, 0 }, { true, 50.161 }, { false, 0 }, { true, 33.44 },
- { true, 16.72 }, { false, 16.721 }, { true, 66.881 }, { false, 0 },
- { true, 33.441 }, { true, 16.72 }, { true, 50.16 }, { true, 16.72 },
- { false, 16.721 }, { true, 50.161 }, { true, 50.16 }, { false, 0 },
- { true, 33.441 }, { true, 50.337 }, { true, 50.183 }, { true, 16.722 },
- { true, 50.161 }, { true, 33.441 }, { true, 50.16 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.441 }, { true, 50.16 }, { true, 33.44 },
- { true, 50.161 }, { true, 50.16 }, { true, 33.44 }, { true, 33.441 },
- { true, 50.16 }, { true, 50.161 }, { true, 33.44 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.44 }, { true, 50.161 }, { true, 33.44 },
- { true, 50.161 }, { true, 33.44 }, { true, 50.161 }, { true, 33.44 },
- { true, 83.601 }, { true, 16.72 }, { true, 33.44 }, { false, 0 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: Initial instability as a 30 FPS video was
- // started from a still screen, then followed by steady-state. Drawing
- // framerate from the video rendering was a bit volatile, but averaged 30 FPS.
- static const DataPoint data_points[] = {
- { true, 2407.69 }, { true, 16.733 }, { true, 217.362 }, { true, 33.441 },
- { true, 33.44 }, { true, 33.44 }, { true, 33.441 }, { true, 33.44 },
- { true, 33.44 }, { true, 33.441 }, { true, 33.44 }, { true, 33.44 },
- { true, 16.721 }, { true, 33.44 }, { false, 0 }, { true, 50.161 },
- { true, 50.16 }, { false, 0 }, { true, 50.161 }, { true, 33.44 },
- { true, 16.72 }, { false, 0 }, { false, 16.72 }, { true, 66.881 },
- { false, 0 }, { true, 33.44 }, { true, 16.72 }, { true, 50.161 },
- { false, 0 }, { true, 33.538 }, { true, 33.526 }, { true, 33.447 },
- { true, 33.445 }, { true, 33.441 }, { true, 16.721 }, { true, 33.44 },
- { true, 33.44 }, { true, 50.161 }, { true, 16.72 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { false, 0 }, { false, 16.72 },
- { true, 66.881 }, { true, 16.72 }, { false, 16.72 }, { true, 50.16 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { true, 50.161 }, { false, 0 },
- { true, 33.44 }, { true, 33.44 }, { true, 50.161 }, { true, 16.72 },
- { true, 33.44 }, { true, 33.441 }, { false, 0 }, { true, 66.88 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { false, 0 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { false, 0 },
- { true, 16.72 }, { true, 50.161 }, { false, 0 }, { true, 50.16 },
- { false, 0.001 }, { true, 16.721 }, { true, 66.88 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { true, 50.161 }, { true, 16.72 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 66.881 },
- { true, 33.44 }, { true, 16.72 }, { true, 33.441 }, { false, 16.72 },
- { true, 66.88 }, { true, 16.721 }, { true, 50.16 }, { true, 33.44 },
- { true, 16.72 }, { true, 33.441 }, { true, 33.44 }, { true, 33.44 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: WebGL Acquarium demo
- // (http://webglsamples.googlecode.com/hg/aquarium/aquarium.html) which ran
- // between 55-60 FPS in the steady-state.
- static const DataPoint data_points[] = {
- { true, 16.72 }, { true, 16.72 }, { true, 4163.29 }, { true, 50.193 },
- { true, 117.041 }, { true, 50.161 }, { true, 50.16 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.44 }, { false, 0 }, { false, 0 },
- { true, 50.161 }, { true, 83.601 }, { true, 50.16 }, { true, 16.72 },
- { true, 33.441 }, { false, 16.72 }, { true, 50.16 }, { true, 16.72 },
- { false, 0.001 }, { true, 33.441 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 33.44 }, { false, 16.721 }, { true, 16.721 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 16.721 }, { true, 50.161 }, { false, 0 },
- { true, 33.44 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.721 }, { true, 33.44 },
- { false, 0 }, { true, 33.44 }, { false, 16.721 }, { true, 16.721 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 33.441 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { true, 33.44 }, { false, 0 }, { true, 33.44 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { true, 33.441 }, { false, 0 },
- { true, 33.44 }, { false, 0 }, { true, 33.44 }, { false, 16.72 },
- { true, 16.721 }, { true, 50.161 }, { false, 0 }, { true, 16.72 },
- { true, 33.44 }, { true, 33.441 }, { false, 0 }, { true, 33.44 },
- { true, 33.44 }, { false, 0 }, { true, 33.441 }, { false, 16.72 },
- { true, 16.72 }, { true, 50.16 }, { false, 0 }, { true, 16.72 },
- { true, 33.441 }, { false, 0 }, { true, 33.44 }, { false, 16.72 },
- { true, 33.44 }, { false, 0 }, { true, 16.721 }, { true, 50.161 },
- { false, 0 }, { true, 16.72 }, { true, 33.44 }, { false, 0 },
- { true, 33.441 }, { false, 16.72 }, { true, 16.72 }, { true, 50.16 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-} // namespace media
diff --git a/chromium/media/video/capture/OWNERS b/chromium/media/capture/video/OWNERS
index 92972c65cbf..3165f0bed2c 100644
--- a/chromium/media/video/capture/OWNERS
+++ b/chromium/media/capture/video/OWNERS
@@ -1,4 +1,3 @@
mcasas@chromium.org
perkj@chromium.org
-posciak@chromium.org
tommi@chromium.org
diff --git a/chromium/media/video/capture/android/video_capture_device_android.cc b/chromium/media/capture/video/android/video_capture_device_android.cc
index 26d7fb3b3e3..69d8b056346 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.cc
+++ b/chromium/media/capture/video/android/video_capture_device_android.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/android/video_capture_device_android.h"
-
+#include "media/capture/video/android/video_capture_device_android.h"
#include "base/android/jni_android.h"
+#include "base/android/jni_string.h"
#include "base/strings/string_number_conversions.h"
#include "jni/VideoCapture_jni.h"
-#include "media/video/capture/android/video_capture_device_factory_android.h"
+#include "media/capture/video/android/video_capture_device_factory_android.h"
using base::android::AttachCurrentThread;
using base::android::CheckException;
@@ -32,7 +32,8 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
}
VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
- : state_(kIdle), got_first_frame_(false), device_name_(device_name) {}
+ : state_(kIdle), got_first_frame_(false), device_name_(device_name) {
+}
VideoCaptureDeviceAndroid::~VideoCaptureDeviceAndroid() {
StopAndDeAllocate();
@@ -63,9 +64,7 @@ void VideoCaptureDeviceAndroid::AllocateAndStart(
JNIEnv* env = AttachCurrentThread();
jboolean ret = Java_VideoCapture_allocate(
- env,
- j_capture_.obj(),
- params.requested_format.frame_size.width(),
+ env, j_capture_.obj(), params.requested_format.frame_size.width(),
params.requested_format.frame_size.height(),
params.requested_format.frame_rate);
if (!ret) {
@@ -161,10 +160,8 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(JNIEnv* env,
if (expected_next_frame_time_ <= current_time) {
expected_next_frame_time_ += frame_interval_;
- client_->OnIncomingCapturedData(reinterpret_cast<uint8*>(buffer),
- length,
- capture_format_,
- rotation,
+ client_->OnIncomingCapturedData(reinterpret_cast<uint8*>(buffer), length,
+ capture_format_, rotation,
base::TimeTicks::Now());
}
@@ -174,14 +171,12 @@ void VideoCaptureDeviceAndroid::OnFrameAvailable(JNIEnv* env,
void VideoCaptureDeviceAndroid::OnError(JNIEnv* env,
jobject obj,
jstring message) {
- const char *native_string = env->GetStringUTFChars(message, JNI_FALSE);
- SetErrorState(native_string);
- env->ReleaseStringUTFChars(message, native_string);
+ SetErrorState(base::android::ConvertJavaStringToUTF8(env, message));
}
VideoPixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
JNIEnv* env = AttachCurrentThread();
- int current_capture_colorspace =
+ const int current_capture_colorspace =
Java_VideoCapture_getColorspace(env, j_capture_.obj());
switch (current_capture_colorspace) {
case ANDROID_IMAGE_FORMAT_YV12:
diff --git a/chromium/media/video/capture/android/video_capture_device_android.h b/chromium/media/capture/video/android/video_capture_device_android.h
index 832d64fbc8b..ccab5e2a352 100644
--- a/chromium/media/video/capture/android/video_capture_device_android.h
+++ b/chromium/media/capture/video/android/video_capture_device_android.h
@@ -13,7 +13,7 @@
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -53,21 +53,20 @@ class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
void StopAndDeAllocate() override;
// Implement org.chromium.media.VideoCapture.nativeOnFrameAvailable.
- void OnFrameAvailable(
- JNIEnv* env,
- jobject obj,
- jbyteArray data,
- jint length,
- jint rotation);
+ void OnFrameAvailable(JNIEnv* env,
+ jobject obj,
+ jbyteArray data,
+ jint length,
+ jint rotation);
// Implement org.chromium.media.VideoCapture.nativeOnError.
void OnError(JNIEnv* env, jobject obj, jstring message);
private:
enum InternalState {
- kIdle, // The device is opened but not in use.
+ kIdle, // The device is opened but not in use.
kCapturing, // Video is being captured.
- kError // Hit error. User needs to recover by destroying the object.
+ kError // Hit error. User needs to recover by destroying the object.
};
VideoPixelFormat GetColorspace();
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.cc b/chromium/media/capture/video/android/video_capture_device_factory_android.cc
index cdf25524c77..cce951b0f32 100644
--- a/chromium/media/video/capture/android/video_capture_device_factory_android.cc
+++ b/chromium/media/capture/video/android/video_capture_device_factory_android.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/android/video_capture_device_factory_android.h"
+#include "media/capture/video/android/video_capture_device_factory_android.h"
#include "base/android/jni_string.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "jni/VideoCaptureFactory_jni.h"
-#include "media/video/capture/android/video_capture_device_android.h"
+#include "media/capture/video/android/video_capture_device_android.h"
using base::android::AttachCurrentThread;
using base::android::ScopedJavaLocalRef;
@@ -27,9 +27,7 @@ VideoCaptureDeviceFactoryAndroid::createVideoCaptureAndroid(
int id,
jlong nativeVideoCaptureDeviceAndroid) {
return (Java_VideoCaptureFactory_createVideoCapture(
- AttachCurrentThread(),
- base::android::GetApplicationContext(),
- id,
+ AttachCurrentThread(), base::android::GetApplicationContext(), id,
nativeVideoCaptureDeviceAndroid));
}
@@ -58,8 +56,8 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceNames(
JNIEnv* env = AttachCurrentThread();
const jobject context = base::android::GetApplicationContext();
- const int num_cameras = Java_VideoCaptureFactory_getNumberOfCameras(env,
- context);
+ const int num_cameras =
+ Java_VideoCaptureFactory_getNumberOfCameras(env, context);
DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: num_cameras=" << num_cameras;
if (num_cameras <= 0)
return;
@@ -94,8 +92,8 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
return;
JNIEnv* env = AttachCurrentThread();
base::android::ScopedJavaLocalRef<jobjectArray> collected_formats =
- Java_VideoCaptureFactory_getDeviceSupportedFormats(env,
- base::android::GetApplicationContext(), id);
+ Java_VideoCaptureFactory_getDeviceSupportedFormats(
+ env, base::android::GetApplicationContext(), id);
if (collected_formats.is_null())
return;
@@ -104,7 +102,8 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
base::android::ScopedJavaLocalRef<jobject> format(
env, env->GetObjectArrayElement(collected_formats.obj(), i));
- VideoPixelFormat pixel_format = media::PIXEL_FORMAT_UNKNOWN;
+ VideoPixelFormat pixel_format =
+ media::PIXEL_FORMAT_UNKNOWN;
switch (media::Java_VideoCaptureFactory_getCaptureFormatPixelFormat(
env, format.obj())) {
case VideoCaptureDeviceAndroid::ANDROID_IMAGE_FORMAT_YV12:
@@ -117,10 +116,10 @@ void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
continue;
}
VideoCaptureFormat capture_format(
- gfx::Size(media::Java_VideoCaptureFactory_getCaptureFormatWidth(env,
- format.obj()),
- media::Java_VideoCaptureFactory_getCaptureFormatHeight(env,
- format.obj())),
+ gfx::Size(media::Java_VideoCaptureFactory_getCaptureFormatWidth(
+ env, format.obj()),
+ media::Java_VideoCaptureFactory_getCaptureFormatHeight(
+ env, format.obj())),
media::Java_VideoCaptureFactory_getCaptureFormatFramerate(env,
format.obj()),
pixel_format);
diff --git a/chromium/media/video/capture/android/video_capture_device_factory_android.h b/chromium/media/capture/video/android/video_capture_device_factory_android.h
index 994139390fe..5ca432a1122 100644
--- a/chromium/media/video/capture/android/video_capture_device_factory_android.h
+++ b/chromium/media/capture/video/android/video_capture_device_factory_android.h
@@ -5,19 +5,19 @@
#ifndef MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
#define MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
#include <jni.h>
#include "base/android/scoped_java_ref.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
// VideoCaptureDeviceFactory on Android. This class implements the static
// VideoCapture methods and the factory of VideoCaptureAndroid.
-class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid :
- public VideoCaptureDeviceFactory {
+class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid
+ : public VideoCaptureDeviceFactory {
public:
static bool RegisterVideoCaptureDeviceFactory(JNIEnv* env);
static base::android::ScopedJavaLocalRef<jobject> createVideoCaptureAndroid(
@@ -35,8 +35,8 @@ class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid :
VideoCaptureFormats* supported_formats) override;
private:
-
- DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryAndroid);};
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryAndroid);
+};
} // namespace media
#endif // MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
diff --git a/chromium/media/video/capture/fake_video_capture_device.cc b/chromium/media/capture/video/fake_video_capture_device.cc
index 79bd43fa34d..6016c8e2010 100644
--- a/chromium/media/video/capture/fake_video_capture_device.cc
+++ b/chromium/media/capture/video/fake_video_capture_device.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/fake_video_capture_device.h"
+#include "media/capture/video/fake_video_capture_device.h"
#include <algorithm>
@@ -26,10 +26,8 @@ void DrawPacman(bool use_argb,
// |kN32_SkColorType| stands for the appropriiate RGBA/BGRA format.
const SkColorType colorspace =
use_argb ? kN32_SkColorType : kAlpha_8_SkColorType;
- const SkImageInfo info = SkImageInfo::Make(frame_size.width(),
- frame_size.height(),
- colorspace,
- kOpaque_SkAlphaType);
+ const SkImageInfo info = SkImageInfo::Make(
+ frame_size.width(), frame_size.height(), colorspace, kOpaque_SkAlphaType);
SkBitmap bitmap;
bitmap.setInfo(info);
bitmap.setPixels(data);
@@ -39,8 +37,8 @@ void DrawPacman(bool use_argb,
// Equalize Alpha_8 that has light green background while RGBA has white.
if (use_argb) {
- const SkRect full_frame = SkRect::MakeWH(frame_size.width(),
- frame_size.height());
+ const SkRect full_frame =
+ SkRect::MakeWH(frame_size.width(), frame_size.height());
paint.setARGB(255, 0, 127, 0);
canvas.drawRect(full_frame, paint);
}
@@ -51,8 +49,7 @@ void DrawPacman(bool use_argb,
const int radius = std::min(frame_size.width(), frame_size.height()) / 4;
const SkRect rect = SkRect::MakeXYWH(frame_size.width() / 2 - radius,
frame_size.height() / 2 - radius,
- 2 * radius,
- 2 * radius);
+ 2 * radius, 2 * radius);
canvas.drawArc(rect, 0, end_angle, true, paint);
// Draw current time.
@@ -62,15 +59,17 @@ void DrawPacman(bool use_argb,
const int minutes = (elapsed_ms / 1000 / 60) % 60;
const int hours = (elapsed_ms / 1000 / 60 / 60) % 60;
- const std::string time_string = base::StringPrintf("%d:%02d:%02d:%03d %d",
- hours, minutes, seconds, milliseconds, frame_count);
+ const std::string time_string =
+ base::StringPrintf("%d:%02d:%02d:%03d %d", hours, minutes, seconds,
+ milliseconds, frame_count);
canvas.scale(3, 3);
canvas.drawText(time_string.data(), time_string.length(), 30, 20, paint);
}
-FakeVideoCaptureDevice::FakeVideoCaptureDevice(
- FakeVideoCaptureDeviceType device_type)
- : device_type_(device_type),
+FakeVideoCaptureDevice::FakeVideoCaptureDevice(BufferOwnership buffer_ownership,
+ BufferPlanarity planarity)
+ : buffer_ownership_(buffer_ownership),
+ planarity_(planarity),
frame_count_(0),
weak_factory_(this) {}
@@ -88,42 +87,47 @@ void FakeVideoCaptureDevice::AllocateAndStart(
// Incoming |params| can be none of the supported formats, so we get the
// closest thing rounded up. TODO(mcasas): Use the |params|, if they belong to
// the supported ones, when http://crbug.com/309554 is verified.
- DCHECK_EQ(params.requested_format.pixel_format, PIXEL_FORMAT_I420);
- capture_format_.pixel_format = params.requested_format.pixel_format;
capture_format_.frame_rate = 30.0;
if (params.requested_format.frame_size.width() > 1280)
- capture_format_.frame_size.SetSize(1920, 1080);
+ capture_format_.frame_size.SetSize(1920, 1080);
else if (params.requested_format.frame_size.width() > 640)
- capture_format_.frame_size.SetSize(1280, 720);
+ capture_format_.frame_size.SetSize(1280, 720);
else if (params.requested_format.frame_size.width() > 320)
capture_format_.frame_size.SetSize(640, 480);
else
capture_format_.frame_size.SetSize(320, 240);
- if (device_type_ == USING_OWN_BUFFERS ||
- device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ if (buffer_ownership_ == BufferOwnership::CLIENT_BUFFERS) {
+ if (planarity_ == BufferPlanarity::PACKED) {
+ capture_format_.pixel_storage = PIXEL_STORAGE_CPU;
+ capture_format_.pixel_format = PIXEL_FORMAT_ARGB;
+ DVLOG(1) << "starting with client argb buffers";
+ } else if (planarity_ == BufferPlanarity::TRIPLANAR) {
+ capture_format_.pixel_storage = PIXEL_STORAGE_GPUMEMORYBUFFER;
+ capture_format_.pixel_format = PIXEL_FORMAT_I420;
+ DVLOG(1) << "starting with gmb I420 buffers";
+ }
+ } else if (buffer_ownership_ == BufferOwnership::OWN_BUFFERS) {
capture_format_.pixel_storage = PIXEL_STORAGE_CPU;
+ capture_format_.pixel_format = PIXEL_FORMAT_I420;
+ DVLOG(1) << "starting with own I420 buffers";
+ }
+
+ if (capture_format_.pixel_format == PIXEL_FORMAT_I420) {
fake_frame_.reset(new uint8[VideoFrame::AllocationSize(
- VideoFrame::I420, capture_format_.frame_size)]);
+ PIXEL_FORMAT_I420, capture_format_.frame_size)]);
+ }
+
+ if (buffer_ownership_ == BufferOwnership::CLIENT_BUFFERS)
BeepAndScheduleNextCapture(
base::TimeTicks::Now(),
- base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers,
weak_factory_.GetWeakPtr()));
- } else if (device_type_ == USING_CLIENT_BUFFERS) {
- DVLOG(1) << "starting with "
- << (params.use_gpu_memory_buffers ? "GMB" : "ShMem");
+ else if (buffer_ownership_ == BufferOwnership::OWN_BUFFERS)
BeepAndScheduleNextCapture(
base::TimeTicks::Now(),
- base::Bind(
- &FakeVideoCaptureDevice::CaptureUsingClientBuffers,
- weak_factory_.GetWeakPtr(),
- params.use_gpu_memory_buffers ? PIXEL_FORMAT_ARGB
- : PIXEL_FORMAT_I420,
- params.use_gpu_memory_buffers ? PIXEL_STORAGE_GPUMEMORYBUFFER
- : PIXEL_STORAGE_CPU));
- } else {
- client_->OnError("Unknown Fake Video Capture Device type.");
- }
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ weak_factory_.GetWeakPtr()));
}
void FakeVideoCaptureDevice::StopAndDeAllocate() {
@@ -137,30 +141,23 @@ void FakeVideoCaptureDevice::CaptureUsingOwnBuffers(
const size_t frame_size = capture_format_.ImageAllocationSize();
memset(fake_frame_.get(), 0, frame_size);
- DrawPacman(false /* use_argb */,
- fake_frame_.get(),
- frame_count_,
- kFakeCapturePeriodMs,
- capture_format_.frame_size);
+ DrawPacman(false /* use_argb */, fake_frame_.get(), frame_count_,
+ kFakeCapturePeriodMs, capture_format_.frame_size);
// Give the captured frame to the client.
- if (device_type_ == USING_OWN_BUFFERS) {
- client_->OnIncomingCapturedData(fake_frame_.get(),
- frame_size,
- capture_format_,
- 0 /* rotation */,
+ if (planarity_ == BufferPlanarity::PACKED) {
+ client_->OnIncomingCapturedData(fake_frame_.get(), frame_size,
+ capture_format_, 0 /* rotation */,
base::TimeTicks::Now());
- } else if (device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ } else if (planarity_ == BufferPlanarity::TRIPLANAR) {
client_->OnIncomingCapturedYuvData(
fake_frame_.get(),
fake_frame_.get() + capture_format_.frame_size.GetArea(),
fake_frame_.get() + capture_format_.frame_size.GetArea() * 5 / 4,
capture_format_.frame_size.width(),
capture_format_.frame_size.width() / 2,
- capture_format_.frame_size.width() / 2,
- capture_format_,
- 0 /* rotation */,
- base::TimeTicks::Now());
+ capture_format_.frame_size.width() / 2, capture_format_,
+ 0 /* rotation */, base::TimeTicks::Now());
}
BeepAndScheduleNextCapture(
expected_execution_time,
@@ -169,37 +166,51 @@ void FakeVideoCaptureDevice::CaptureUsingOwnBuffers(
}
void FakeVideoCaptureDevice::CaptureUsingClientBuffers(
- VideoPixelFormat pixel_format,
- VideoPixelStorage pixel_storage,
base::TimeTicks expected_execution_time) {
DCHECK(thread_checker_.CalledOnValidThread());
scoped_ptr<VideoCaptureDevice::Client::Buffer> capture_buffer(
- client_->ReserveOutputBuffer(capture_format_.frame_size, pixel_format,
- pixel_storage));
+ client_->ReserveOutputBuffer(capture_format_.frame_size,
+ capture_format_.pixel_format,
+ capture_format_.pixel_storage));
DLOG_IF(ERROR, !capture_buffer) << "Couldn't allocate Capture Buffer";
-
- if (capture_buffer.get()) {
- uint8_t* const data_ptr = static_cast<uint8_t*>(capture_buffer->data());
- DCHECK(data_ptr) << "Buffer has NO backing memory";
- memset(data_ptr, 0, capture_buffer->size());
-
- DrawPacman((pixel_format == media::PIXEL_FORMAT_ARGB), /* use_argb */
- data_ptr, frame_count_, kFakeCapturePeriodMs,
- capture_format_.frame_size);
-
- // Give the captured frame to the client.
- const VideoCaptureFormat format(capture_format_.frame_size,
- capture_format_.frame_rate, pixel_format,
- pixel_storage);
- client_->OnIncomingCapturedBuffer(capture_buffer.Pass(), format,
- base::TimeTicks::Now());
+ DCHECK(capture_buffer->data()) << "Buffer has NO backing memory";
+
+ if (capture_format_.pixel_storage == PIXEL_STORAGE_GPUMEMORYBUFFER &&
+ capture_format_.pixel_format == media::PIXEL_FORMAT_I420) {
+ // Since SkBitmap expects a packed&continuous memory region for I420, we
+ // need to use |fake_frame_| to draw onto.
+ memset(fake_frame_.get(), 0, capture_format_.ImageAllocationSize());
+ DrawPacman(false /* use_argb */, fake_frame_.get(), frame_count_,
+ kFakeCapturePeriodMs, capture_format_.frame_size);
+
+ // Copy data from |fake_frame_| into the reserved planes of GpuMemoryBuffer.
+ size_t offset = 0;
+ for (size_t i = 0; i < VideoFrame::NumPlanes(PIXEL_FORMAT_I420); ++i) {
+ const size_t plane_size =
+ VideoFrame::PlaneSize(PIXEL_FORMAT_I420, i,
+ capture_format_.frame_size)
+ .GetArea();
+ memcpy(capture_buffer->data(i), fake_frame_.get() + offset, plane_size);
+ offset += plane_size;
+ }
+ } else {
+ DCHECK_EQ(capture_format_.pixel_storage, PIXEL_STORAGE_CPU);
+ DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_ARGB);
+ uint8_t* data_ptr = static_cast<uint8_t*>(capture_buffer->data());
+ memset(data_ptr, 0, capture_buffer->mapped_size());
+ DrawPacman(true /* use_argb */, data_ptr, frame_count_,
+ kFakeCapturePeriodMs, capture_format_.frame_size);
}
+ // Give the captured frame to the client.
+ client_->OnIncomingCapturedBuffer(capture_buffer.Pass(), capture_format_,
+ base::TimeTicks::Now());
+
BeepAndScheduleNextCapture(
expected_execution_time,
base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers,
- weak_factory_.GetWeakPtr(), pixel_format, pixel_storage));
+ weak_factory_.GetWeakPtr()));
}
void FakeVideoCaptureDevice::BeepAndScheduleNextCapture(
diff --git a/chromium/media/video/capture/fake_video_capture_device.h b/chromium/media/capture/video/fake_video_capture_device.h
index f05c01e3104..c45bfea6e09 100644
--- a/chromium/media/video/capture/fake_video_capture_device.h
+++ b/chromium/media/capture/video/fake_video_capture_device.h
@@ -16,21 +16,26 @@
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
public:
- enum FakeVideoCaptureDeviceType {
- USING_OWN_BUFFERS,
- USING_OWN_BUFFERS_TRIPLANAR,
- USING_CLIENT_BUFFERS,
+ enum class BufferOwnership {
+ OWN_BUFFERS,
+ CLIENT_BUFFERS,
+ };
+
+ enum class BufferPlanarity {
+ PACKED,
+ TRIPLANAR,
};
static int FakeCapturePeriodMs() { return kFakeCapturePeriodMs; }
- explicit FakeVideoCaptureDevice(FakeVideoCaptureDeviceType device_type);
+ FakeVideoCaptureDevice(BufferOwnership buffer_ownership,
+ BufferPlanarity planarity);
~FakeVideoCaptureDevice() override;
// VideoCaptureDevice implementation.
@@ -42,9 +47,7 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
static const int kFakeCapturePeriodMs = 50;
void CaptureUsingOwnBuffers(base::TimeTicks expected_execution_time);
- void CaptureUsingClientBuffers(VideoPixelFormat pixel_format,
- VideoPixelStorage pixel_storage,
- base::TimeTicks expected_execution_time);
+ void CaptureUsingClientBuffers(base::TimeTicks expected_execution_time);
void BeepAndScheduleNextCapture(
base::TimeTicks expected_execution_time,
const base::Callback<void(base::TimeTicks)>& next_capture);
@@ -53,7 +56,8 @@ class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
// correct thread that owns the object.
base::ThreadChecker thread_checker_;
- const FakeVideoCaptureDeviceType device_type_;
+ const BufferOwnership buffer_ownership_;
+ const BufferPlanarity planarity_;
scoped_ptr<VideoCaptureDevice::Client> client_;
// |fake_frame_| is used for capturing on Own Buffers.
diff --git a/chromium/media/video/capture/fake_video_capture_device_factory.cc b/chromium/media/capture/video/fake_video_capture_device_factory.cc
index a9d32dee2ed..d6403ee98c4 100644
--- a/chromium/media/video/capture/fake_video_capture_device_factory.cc
+++ b/chromium/media/capture/video/fake_video_capture_device_factory.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/fake_video_capture_device_factory.h"
+#include "media/capture/video/fake_video_capture_device_factory.h"
#include "base/command_line.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "media/base/media_switches.h"
-#include "media/video/capture/fake_video_capture_device.h"
+#include "media/capture/video/fake_video_capture_device.h"
namespace media {
@@ -20,22 +20,25 @@ scoped_ptr<VideoCaptureDevice> FakeVideoCaptureDeviceFactory::Create(
const VideoCaptureDevice::Name& device_name) {
DCHECK(thread_checker_.CalledOnValidThread());
- const std::string option = base::CommandLine::ForCurrentProcess()->
- GetSwitchValueASCII(switches::kUseFakeDeviceForMediaStream);
+ const std::string option =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseFakeDeviceForMediaStream);
- FakeVideoCaptureDevice::FakeVideoCaptureDeviceType fake_vcd_type;
- if (option.empty())
- fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS;
- else if (base:: strcasecmp(option.c_str(), "triplanar") == 0)
- fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR;
- else
- fake_vcd_type = FakeVideoCaptureDevice::USING_CLIENT_BUFFERS;
+ const FakeVideoCaptureDevice::BufferOwnership fake_vcd_ownership =
+ base::StartsWith(option, "client", base::CompareCase::INSENSITIVE_ASCII)
+ ? FakeVideoCaptureDevice::BufferOwnership::CLIENT_BUFFERS
+ : FakeVideoCaptureDevice::BufferOwnership::OWN_BUFFERS;
+
+ const FakeVideoCaptureDevice::BufferPlanarity fake_vcd_planarity =
+ base::EndsWith(option, "triplanar", base::CompareCase::INSENSITIVE_ASCII)
+ ? FakeVideoCaptureDevice::BufferPlanarity::TRIPLANAR
+ : FakeVideoCaptureDevice::BufferPlanarity::PACKED;
for (int n = 0; n < number_of_devices_; ++n) {
std::string possible_id = base::StringPrintf("/dev/video%d", n);
if (device_name.id().compare(possible_id) == 0) {
return scoped_ptr<VideoCaptureDevice>(
- new FakeVideoCaptureDevice(fake_vcd_type));
+ new FakeVideoCaptureDevice(fake_vcd_ownership, fake_vcd_planarity));
}
}
return scoped_ptr<VideoCaptureDevice>();
@@ -49,15 +52,19 @@ void FakeVideoCaptureDeviceFactory::GetDeviceNames(
VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
base::StringPrintf("/dev/video%d", n)
#if defined(OS_LINUX)
- , VideoCaptureDevice::Name::V4L2_SINGLE_PLANE
+ ,
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE
#elif defined(OS_MACOSX)
- , VideoCaptureDevice::Name::AVFOUNDATION
+ ,
+ VideoCaptureDevice::Name::AVFOUNDATION
#elif defined(OS_WIN)
- , VideoCaptureDevice::Name::DIRECT_SHOW
+ ,
+ VideoCaptureDevice::Name::DIRECT_SHOW
#elif defined(OS_ANDROID)
- , VideoCaptureDevice::Name::API2_LEGACY
+ ,
+ VideoCaptureDevice::Name::API2_LEGACY
#endif
- );
+ );
device_names->push_back(name);
}
}
diff --git a/chromium/media/video/capture/fake_video_capture_device_factory.h b/chromium/media/capture/video/fake_video_capture_device_factory.h
index 00cea5d6364..a6de31af801 100644
--- a/chromium/media/video/capture/fake_video_capture_device_factory.h
+++ b/chromium/media/capture/video/fake_video_capture_device_factory.h
@@ -7,14 +7,14 @@
#ifndef MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
#define MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
namespace media {
// Extension of VideoCaptureDeviceFactory to create and manipulate fake devices,
// not including file-based ones.
-class MEDIA_EXPORT FakeVideoCaptureDeviceFactory :
- public VideoCaptureDeviceFactory {
+class MEDIA_EXPORT FakeVideoCaptureDeviceFactory
+ : public VideoCaptureDeviceFactory {
public:
FakeVideoCaptureDeviceFactory();
~FakeVideoCaptureDeviceFactory() override {}
diff --git a/chromium/media/video/capture/fake_video_capture_device_unittest.cc b/chromium/media/capture/video/fake_video_capture_device_unittest.cc
index a388c0068f1..e323fe3b10c 100644
--- a/chromium/media/video/capture/fake_video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/fake_video_capture_device_unittest.cc
@@ -8,9 +8,9 @@
#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/fake_video_capture_device.h"
-#include "media/video/capture/fake_video_capture_device_factory.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/fake_video_capture_device.h"
+#include "media/capture/video/fake_video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -27,14 +27,17 @@ namespace {
// This class is a Client::Buffer that allocates and frees the requested |size|.
class MockBuffer : public VideoCaptureDevice::Client::Buffer {
public:
- MockBuffer(int buffer_id, size_t size)
- : id_(buffer_id), size_(size), data_(new uint8[size_]) {}
+ MockBuffer(int buffer_id, size_t mapped_size)
+ : id_(buffer_id),
+ mapped_size_(mapped_size),
+ data_(new uint8[mapped_size]) {}
~MockBuffer() override { delete[] data_; }
int id() const override { return id_; }
- size_t size() const override { return size_; }
- void* data() override { return data_; }
- ClientBuffer AsClientBuffer() override { return nullptr; }
+ gfx::Size dimensions() const override { return gfx::Size(); }
+ size_t mapped_size() const override { return mapped_size_; }
+ void* data(int plane) override { return data_; }
+ ClientBuffer AsClientBuffer(int plane) override { return nullptr; }
#if defined(OS_POSIX)
base::FileDescriptor AsPlatformFile() override {
return base::FileDescriptor();
@@ -43,7 +46,7 @@ class MockBuffer : public VideoCaptureDevice::Client::Buffer {
private:
const int id_;
- const size_t size_;
+ const size_t mapped_size_;
uint8* const data_;
};
@@ -78,9 +81,9 @@ class MockClient : public VideoCaptureDevice::Client {
scoped_ptr<Buffer> ReserveOutputBuffer(const gfx::Size& dimensions,
media::VideoPixelFormat format,
media::VideoPixelStorage storage) {
- EXPECT_TRUE((format == media::PIXEL_FORMAT_I420 &&
+ EXPECT_TRUE((format == media::PIXEL_FORMAT_ARGB &&
storage == media::PIXEL_STORAGE_CPU) ||
- (format == media::PIXEL_FORMAT_ARGB &&
+ (format == media::PIXEL_FORMAT_I420 &&
storage == media::PIXEL_STORAGE_GPUMEMORYBUFFER));
EXPECT_GT(dimensions.GetArea(), 0);
const VideoCaptureFormat frame_format(dimensions, 0.0, format);
@@ -96,7 +99,8 @@ class MockClient : public VideoCaptureDevice::Client {
scoped_ptr<Buffer> buffer,
const scoped_refptr<media::VideoFrame>& frame,
const base::TimeTicks& timestamp) {
- VideoCaptureFormat format(frame->natural_size(), 30.0, PIXEL_FORMAT_I420);
+ VideoCaptureFormat format(frame->natural_size(), 30.0,
+ PIXEL_FORMAT_I420);
frame_cb_.Run(format);
}
@@ -106,8 +110,8 @@ class MockClient : public VideoCaptureDevice::Client {
base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
-class DeviceEnumerationListener :
- public base::RefCounted<DeviceEnumerationListener> {
+class DeviceEnumerationListener
+ : public base::RefCounted<DeviceEnumerationListener> {
public:
MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
void(VideoCaptureDevice::Names* names));
@@ -125,9 +129,9 @@ class DeviceEnumerationListener :
} // namespace
class FakeVideoCaptureDeviceTest
- : public testing::TestWithParam<
- ::testing::tuple<FakeVideoCaptureDevice::FakeVideoCaptureDeviceType,
- bool>> {
+ : public testing::TestWithParam<::testing::tuple<
+ FakeVideoCaptureDevice::BufferOwnership,
+ FakeVideoCaptureDevice::BufferPlanarity>> {
protected:
FakeVideoCaptureDeviceTest()
: loop_(new base::MessageLoop()),
@@ -138,9 +142,7 @@ class FakeVideoCaptureDeviceTest
device_enumeration_listener_ = new DeviceEnumerationListener();
}
- void SetUp() override {
- EXPECT_CALL(*client_, OnError(_)).Times(0);
- }
+ void SetUp() override { EXPECT_CALL(*client_, OnError(_)).Times(0); }
void OnFrameCaptured(const VideoCaptureFormat& format) {
last_format_ = format;
@@ -179,15 +181,13 @@ TEST_P(FakeVideoCaptureDeviceTest, CaptureUsing) {
const scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
ASSERT_FALSE(names->empty());
- scoped_ptr<VideoCaptureDevice> device(
- new FakeVideoCaptureDevice(testing::get<0>(GetParam())));
+ scoped_ptr<VideoCaptureDevice> device(new FakeVideoCaptureDevice(
+ testing::get<0>(GetParam()), testing::get<1>(GetParam())));
ASSERT_TRUE(device);
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(640, 480);
capture_params.requested_format.frame_rate = 30;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
- capture_params.use_gpu_memory_buffers = ::testing::get<1>(GetParam());
device->AllocateAndStart(capture_params, client_.Pass());
WaitForCapturedFrame();
@@ -200,10 +200,10 @@ TEST_P(FakeVideoCaptureDeviceTest, CaptureUsing) {
INSTANTIATE_TEST_CASE_P(
,
FakeVideoCaptureDeviceTest,
- Combine(Values(FakeVideoCaptureDevice::USING_OWN_BUFFERS,
- FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR,
- FakeVideoCaptureDevice::USING_CLIENT_BUFFERS),
- Bool()));
+ Combine(Values(FakeVideoCaptureDevice::BufferOwnership::OWN_BUFFERS,
+ FakeVideoCaptureDevice::BufferOwnership::CLIENT_BUFFERS),
+ Values(FakeVideoCaptureDevice::BufferPlanarity::PACKED,
+ FakeVideoCaptureDevice::BufferPlanarity::TRIPLANAR)));
TEST_F(FakeVideoCaptureDeviceTest, GetDeviceSupportedFormats) {
scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
diff --git a/chromium/media/capture/video/file_video_capture_device.cc b/chromium/media/capture/video/file_video_capture_device.cc
new file mode 100644
index 00000000000..073c0a13254
--- /dev/null
+++ b/chromium/media/capture/video/file_video_capture_device.cc
@@ -0,0 +1,386 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/file_video_capture_device.h"
+
+#include "base/bind.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "media/base/video_capture_types.h"
+#include "media/filters/jpeg_parser.h"
+
+namespace media {
+
+static const int kY4MHeaderMaxSize = 200;
+static const char kY4MSimpleFrameDelimiter[] = "FRAME";
+static const int kY4MSimpleFrameDelimiterSize = 6;
+static const float kMJpegFrameRate = 30.0f;
+
+int ParseY4MInt(const base::StringPiece& token) {
+ int temp_int;
+ CHECK(base::StringToInt(token, &temp_int)) << token;
+ return temp_int;
+}
+
+// Extract numerator and denominator out of a token that must have the aspect
+// numerator:denominator, both integer numbers.
+void ParseY4MRational(const base::StringPiece& token,
+ int* numerator, int* denominator) {
+ size_t index_divider = token.find(':');
+ CHECK_NE(index_divider, token.npos);
+ *numerator = ParseY4MInt(token.substr(0, index_divider));
+ *denominator = ParseY4MInt(token.substr(index_divider + 1, token.length()));
+ CHECK(*denominator);
+}
+
+// This function parses the ASCII string in |header| as belonging to a Y4M file,
+// returning the collected format in |video_format|. For a non authoritative
+// explanation of the header format, check
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2
+// Restrictions: Only interlaced I420 pixel format is supported, and pixel
+// aspect ratio is ignored.
+// Implementation notes: Y4M header should end with an ASCII 0x20 (whitespace)
+// character, however all examples mentioned in the Y4M header description end
+// with a newline character instead. Also, some headers do _not_ specify pixel
+// format, in this case it means I420.
+// This code was inspired by third_party/libvpx_new/.../y4minput.* .
+void ParseY4MTags(const std::string& file_header,
+ media::VideoCaptureFormat* video_format) {
+ media::VideoCaptureFormat format;
+ format.pixel_format = media::PIXEL_FORMAT_I420;
+ size_t index = 0;
+ size_t blank_position = 0;
+ base::StringPiece token;
+ while ((blank_position = file_header.find_first_of("\n ", index)) !=
+ std::string::npos) {
+ // Every token is supposed to have an identifier letter and a bunch of
+ // information immediately after, which we extract into a |token| here.
+ token =
+ base::StringPiece(&file_header[index + 1], blank_position - index - 1);
+ CHECK(!token.empty());
+ switch (file_header[index]) {
+ case 'W':
+ format.frame_size.set_width(ParseY4MInt(token));
+ break;
+ case 'H':
+ format.frame_size.set_height(ParseY4MInt(token));
+ break;
+ case 'F': {
+ // If the token is "FRAME", it means we have finished with the header.
+ if (token[0] == 'R')
+ break;
+ int fps_numerator, fps_denominator;
+ ParseY4MRational(token, &fps_numerator, &fps_denominator);
+ format.frame_rate = fps_numerator / fps_denominator;
+ break;
+ }
+ case 'I':
+ // Interlacing is ignored, but we don't like mixed modes.
+ CHECK_NE(token[0], 'm');
+ break;
+ case 'A':
+ // Pixel aspect ratio ignored.
+ break;
+ case 'C':
+ CHECK(token == "420" || token == "420jpeg" || token == "420paldv")
+ << token; // Only I420 is supported, and we fudge the variants.
+ break;
+ default:
+ break;
+ }
+ // We're done if we have found a newline character right after the token.
+ if (file_header[blank_position] == '\n')
+ break;
+ index = blank_position + 1;
+ }
+ // Last video format semantic correctness check before sending it back.
+ CHECK(format.IsValid());
+ *video_format = format;
+}
+
+class VideoFileParser {
+ public:
+ explicit VideoFileParser(const base::FilePath& file_path);
+ virtual ~VideoFileParser();
+
+ // Parses file header and collects format information in |capture_format|.
+ virtual bool Initialize(media::VideoCaptureFormat* capture_format) = 0;
+
+ // Gets the start pointer of next frame and stores current frame size in
+ // |frame_size|.
+ virtual const uint8_t* GetNextFrame(int* frame_size) = 0;
+
+ protected:
+ const base::FilePath file_path_;
+ int frame_size_;
+ size_t current_byte_index_;
+ size_t first_frame_byte_index_;
+};
+
+class Y4mFileParser final : public VideoFileParser {
+ public:
+ explicit Y4mFileParser(const base::FilePath& file_path);
+
+ // VideoFileParser implementation, class methods.
+ ~Y4mFileParser() override;
+ bool Initialize(media::VideoCaptureFormat* capture_format) override;
+ const uint8_t* GetNextFrame(int* frame_size) override;
+
+ private:
+ scoped_ptr<base::File> file_;
+ scoped_ptr<uint8_t[]> video_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(Y4mFileParser);
+};
+
+class MjpegFileParser final : public VideoFileParser {
+ public:
+ explicit MjpegFileParser(const base::FilePath& file_path);
+
+ // VideoFileParser implementation, class methods.
+ ~MjpegFileParser() override;
+ bool Initialize(media::VideoCaptureFormat* capture_format) override;
+ const uint8_t* GetNextFrame(int* frame_size) override;
+
+ private:
+ scoped_ptr<base::MemoryMappedFile> mapped_file_;
+
+ DISALLOW_COPY_AND_ASSIGN(MjpegFileParser);
+};
+
+VideoFileParser::VideoFileParser(const base::FilePath& file_path)
+ : file_path_(file_path),
+ frame_size_(0),
+ current_byte_index_(0),
+ first_frame_byte_index_(0) {}
+
+VideoFileParser::~VideoFileParser() {}
+
+Y4mFileParser::Y4mFileParser(const base::FilePath& file_path)
+ : VideoFileParser(file_path) {}
+
+Y4mFileParser::~Y4mFileParser() {}
+
+bool Y4mFileParser::Initialize(media::VideoCaptureFormat* capture_format) {
+ file_.reset(new base::File(file_path_,
+ base::File::FLAG_OPEN | base::File::FLAG_READ));
+ if (!file_->IsValid()) {
+ DLOG(ERROR) << file_path_.value() << ", error: "
+ << base::File::ErrorToString(file_->error_details());
+ return false;
+ }
+
+ std::string header(kY4MHeaderMaxSize, '\0');
+ file_->Read(0, &header[0], header.size());
+ const size_t header_end = header.find(kY4MSimpleFrameDelimiter);
+ CHECK_NE(header_end, header.npos);
+
+ ParseY4MTags(header, capture_format);
+ first_frame_byte_index_ = header_end + kY4MSimpleFrameDelimiterSize;
+ current_byte_index_ = first_frame_byte_index_;
+ frame_size_ = capture_format->ImageAllocationSize();
+ return true;
+}
+
+const uint8_t* Y4mFileParser::GetNextFrame(int* frame_size) {
+ if (!video_frame_)
+ video_frame_.reset(new uint8_t[frame_size_]);
+ int result =
+ file_->Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()), frame_size_);
+
+ // If we passed EOF to base::File, it will return 0 read characters. In that
+ // case, reset the pointer and read again.
+ if (result != frame_size_) {
+ CHECK_EQ(result, 0);
+ current_byte_index_ = first_frame_byte_index_;
+ CHECK_EQ(
+ file_->Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()), frame_size_),
+ frame_size_);
+ } else {
+ current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
+ }
+ *frame_size = frame_size_;
+ return video_frame_.get();
+}
+
+MjpegFileParser::MjpegFileParser(const base::FilePath& file_path)
+ : VideoFileParser(file_path) {}
+
+MjpegFileParser::~MjpegFileParser() {}
+
+bool MjpegFileParser::Initialize(media::VideoCaptureFormat* capture_format) {
+ mapped_file_.reset(new base::MemoryMappedFile());
+
+ if (!mapped_file_->Initialize(file_path_) || !mapped_file_->IsValid()) {
+ LOG(ERROR) << "File memory map error: " << file_path_.value();
+ return false;
+ }
+
+ JpegParseResult result;
+ if (!ParseJpegStream(mapped_file_->data(), mapped_file_->length(), &result))
+ return false;
+
+ frame_size_ = result.image_size;
+ if (frame_size_ > static_cast<int>(mapped_file_->length())) {
+ LOG(ERROR) << "File is incomplete";
+ return false;
+ }
+
+ VideoCaptureFormat format;
+ format.pixel_format = media::PIXEL_FORMAT_MJPEG;
+ format.frame_size.set_width(result.frame_header.visible_width);
+ format.frame_size.set_height(result.frame_header.visible_height);
+ format.frame_rate = kMJpegFrameRate;
+ if (!format.IsValid())
+ return false;
+ *capture_format = format;
+ return true;
+}
+
+const uint8_t* MjpegFileParser::GetNextFrame(int* frame_size) {
+ const uint8_t* buf_ptr = mapped_file_->data() + current_byte_index_;
+
+ JpegParseResult result;
+ if (!ParseJpegStream(buf_ptr, mapped_file_->length() - current_byte_index_,
+ &result)) {
+ return nullptr;
+ }
+ *frame_size = frame_size_ = result.image_size;
+ current_byte_index_ += frame_size_;
+ // Reset the pointer to play repeatedly.
+ if (current_byte_index_ >= mapped_file_->length())
+ current_byte_index_ = first_frame_byte_index_;
+ return buf_ptr;
+}
+
+// static
+bool FileVideoCaptureDevice::GetVideoCaptureFormat(
+ const base::FilePath& file_path,
+ media::VideoCaptureFormat* video_format) {
+ scoped_ptr<VideoFileParser> file_parser =
+ GetVideoFileParser(file_path, video_format);
+ return file_parser != nullptr;
+}
+
+// static
+scoped_ptr<VideoFileParser>
+FileVideoCaptureDevice::GetVideoFileParser(
+ const base::FilePath& file_path,
+ media::VideoCaptureFormat* video_format) {
+ scoped_ptr<VideoFileParser> file_parser;
+ std::string file_name(file_path.value().begin(), file_path.value().end());
+
+ if (base::EndsWith(file_name, "y4m",
+ base::CompareCase::INSENSITIVE_ASCII)) {
+ file_parser.reset(new Y4mFileParser(file_path));
+ } else if (base::EndsWith(file_name, "mjpeg",
+ base::CompareCase::INSENSITIVE_ASCII)) {
+ file_parser.reset(new MjpegFileParser(file_path));
+ } else {
+ LOG(ERROR) << "Unsupported file format.";
+ return file_parser.Pass();
+ }
+
+ if (!file_parser->Initialize(video_format)) {
+ file_parser.reset();
+ }
+ return file_parser.Pass();
+}
+
+FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
+ : capture_thread_("CaptureThread"), file_path_(file_path) {}
+
+FileVideoCaptureDevice::~FileVideoCaptureDevice() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Check if the thread is running.
+ // This means that the device have not been DeAllocated properly.
+ CHECK(!capture_thread_.IsRunning());
+}
+
+void FileVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(!capture_thread_.IsRunning());
+
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this), params, base::Passed(&client)));
+}
+
+void FileVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(capture_thread_.IsRunning());
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
+}
+
+void FileVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+
+ client_ = client.Pass();
+
+ DCHECK(!file_parser_);
+ file_parser_ = GetVideoFileParser(file_path_, &capture_format_);
+ if (!file_parser_) {
+ client_->OnError("Could not open Video file");
+ return;
+ }
+
+ DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
+ << ", fps: " << capture_format_.frame_rate;
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)));
+}
+
+void FileVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ file_parser_.reset();
+ client_.reset();
+ next_frame_time_ = base::TimeTicks();
+}
+
+void FileVideoCaptureDevice::OnCaptureTask() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ if (!client_)
+ return;
+
+ // Give the captured frame to the client.
+ int frame_size = 0;
+ const uint8_t* frame_ptr = file_parser_->GetNextFrame(&frame_size);
+ DCHECK(frame_size);
+ CHECK(frame_ptr);
+ const base::TimeTicks current_time = base::TimeTicks::Now();
+ client_->OnIncomingCapturedData(frame_ptr, frame_size, capture_format_, 0,
+ current_time);
+ // Reschedule next CaptureTask.
+ const base::TimeDelta frame_interval =
+ base::TimeDelta::FromMicroseconds(1E6 / capture_format_.frame_rate);
+ if (next_frame_time_.is_null()) {
+ next_frame_time_ = current_time + frame_interval;
+ } else {
+ next_frame_time_ += frame_interval;
+ // Don't accumulate any debt if we are lagging behind - just post next frame
+ // immediately and continue as normal.
+ if (next_frame_time_ < current_time)
+ next_frame_time_ = current_time;
+ }
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)),
+ next_frame_time_ - current_time);
+}
+
+} // namespace media
diff --git a/chromium/media/video/capture/file_video_capture_device.h b/chromium/media/capture/video/file_video_capture_device.h
index 2ea6f358b0a..76edf8d793f 100644
--- a/chromium/media/video/capture/file_video_capture_device.h
+++ b/chromium/media/capture/video/file_video_capture_device.h
@@ -8,30 +8,38 @@
#include <string>
#include "base/files/file.h"
+#include "base/files/memory_mapped_file.h"
#include "base/memory/scoped_ptr.h"
#include "base/threading/thread.h"
#include "base/threading/thread_checker.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
+class VideoFileParser;
+
// Implementation of a VideoCaptureDevice class that reads from a file. Used for
// testing the video capture pipeline when no real hardware is available. The
-// only supported file format is YUV4MPEG2 (a.k.a. Y4M), a minimal container
-// with a series of uncompressed video only frames, see the link
-// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2 for more information
-// on the file format. Several restrictions and notes apply, see the
+// supported file formats are YUV4MPEG2 (a.k.a. Y4M) and MJPEG/JPEG. YUV4MPEG2
+// is a minimal container with a series of uncompressed video only frames, see
+// the link http://wiki.multimedia.cx/index.php?title=YUV4MPEG2 for more
+// information on the file format. Several restrictions and notes apply, see the
// implementation file.
-// Example videos can be found in http://media.xiph.org/video/derf.
+// Example Y4M videos can be found in http://media.xiph.org/video/derf.
+// Example MJPEG videos can be found in media/data/test/bear.mjpeg.
+// Restrictions: Y4M videos should have .y4m file extension and MJPEG videos
+// should have .mjpeg file extension.
class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
public:
- static int64 ParseFileAndExtractVideoFormat(
- base::File* file,
- media::VideoCaptureFormat* video_format);
- static base::File OpenFileForRead(const base::FilePath& file_path);
+ // Reads and parses the header of a |file_path|, returning the collected
+ // pixel format in |video_format|. Returns true on file parsed successfully,
+ // or false.
+ // Restrictions: Only trivial Y4M per-frame headers and MJPEG are supported.
+ static bool GetVideoCaptureFormat(const base::FilePath& file_path,
+ media::VideoCaptureFormat* video_format);
// Constructor of the class, with a fully qualified file path as input, which
- // represents the Y4M video file to stream repeatedly.
+ // represents the Y4M or MJPEG file to stream repeatedly.
explicit FileVideoCaptureDevice(const base::FilePath& file_path);
// VideoCaptureDevice implementation, class methods.
@@ -41,14 +49,18 @@ class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
void StopAndDeAllocate() override;
private:
- // Returns size in bytes of an I420 frame, not including possible paddings,
- // defined by |capture_format_|.
- int CalculateFrameSize() const;
+ // Opens a given file |file_path| for reading, and stores collected format
+ // information in |video_format|. Returns the parsed file to the
+ // caller, who is responsible for closing it.
+ static scoped_ptr<VideoFileParser> GetVideoFileParser(
+ const base::FilePath& file_path,
+ media::VideoCaptureFormat* video_format);
// Called on the |capture_thread_|.
void OnAllocateAndStart(const VideoCaptureParams& params,
scoped_ptr<Client> client);
void OnStopAndDeAllocate();
+ const uint8_t* GetNextFrame();
void OnCaptureTask();
// |thread_checker_| is used to check that destructor, AllocateAndStart() and
@@ -61,12 +73,8 @@ class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
// The following members belong to |capture_thread_|.
scoped_ptr<VideoCaptureDevice::Client> client_;
const base::FilePath file_path_;
- base::File file_;
- scoped_ptr<uint8[]> video_frame_;
+ scoped_ptr<VideoFileParser> file_parser_;
VideoCaptureFormat capture_format_;
- int frame_size_;
- int64 current_byte_index_;
- int64 first_frame_byte_index_;
// Target time for the next frame.
base::TimeTicks next_frame_time_;
diff --git a/chromium/media/video/capture/file_video_capture_device_factory.cc b/chromium/media/capture/video/file_video_capture_device_factory.cc
index d5dd112d9c4..ea36c4d70bc 100644
--- a/chromium/media/video/capture/file_video_capture_device_factory.cc
+++ b/chromium/media/capture/video/file_video_capture_device_factory.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/file_video_capture_device_factory.h"
+#include "media/capture/video/file_video_capture_device_factory.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/strings/sys_string_conversions.h"
#include "media/base/media_switches.h"
-#include "media/video/capture/file_video_capture_device.h"
+#include "media/capture/video/file_video_capture_device.h"
namespace media {
@@ -31,8 +31,8 @@ scoped_ptr<VideoCaptureDevice> FileVideoCaptureDeviceFactory::Create(
return scoped_ptr<VideoCaptureDevice>(new FileVideoCaptureDevice(
base::FilePath(base::SysUTF8ToWide(device_name.name()))));
#else
- return scoped_ptr<VideoCaptureDevice>(new FileVideoCaptureDevice(
- base::FilePath(device_name.name())));
+ return scoped_ptr<VideoCaptureDevice>(
+ new FileVideoCaptureDevice(base::FilePath(device_name.name())));
#endif
}
@@ -44,22 +44,18 @@ void FileVideoCaptureDeviceFactory::GetDeviceNames(
#if defined(OS_WIN)
device_names->push_back(VideoCaptureDevice::Name(
base::SysWideToUTF8(command_line_file_path.value()),
- kFileVideoCaptureDeviceName,
- VideoCaptureDevice::Name::DIRECT_SHOW));
+ kFileVideoCaptureDeviceName, VideoCaptureDevice::Name::DIRECT_SHOW));
#elif defined(OS_MACOSX)
device_names->push_back(VideoCaptureDevice::Name(
- command_line_file_path.value(),
- kFileVideoCaptureDeviceName,
+ command_line_file_path.value(), kFileVideoCaptureDeviceName,
VideoCaptureDevice::Name::AVFOUNDATION));
#elif defined(OS_LINUX)
device_names->push_back(VideoCaptureDevice::Name(
- command_line_file_path.value(),
- kFileVideoCaptureDeviceName,
+ command_line_file_path.value(), kFileVideoCaptureDeviceName,
VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
#else
device_names->push_back(VideoCaptureDevice::Name(
- command_line_file_path.value(),
- kFileVideoCaptureDeviceName));
+ command_line_file_path.value(), kFileVideoCaptureDeviceName));
#endif
}
@@ -67,13 +63,13 @@ void FileVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
const VideoCaptureDevice::Name& device,
VideoCaptureFormats* supported_formats) {
DCHECK(thread_checker_.CalledOnValidThread());
- base::File file =
- FileVideoCaptureDevice::OpenFileForRead(GetFilePathFromCommandLine());
- if (!file.IsValid())
- return;
+
VideoCaptureFormat capture_format;
- FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(&file,
- &capture_format);
+ if (!FileVideoCaptureDevice::GetVideoCaptureFormat(
+ GetFilePathFromCommandLine(), &capture_format)) {
+ return;
+ }
+
supported_formats->push_back(capture_format);
}
diff --git a/chromium/media/video/capture/file_video_capture_device_factory.h b/chromium/media/capture/video/file_video_capture_device_factory.h
index 523cd71b2cf..5335b2c6e88 100644
--- a/chromium/media/video/capture/file_video_capture_device_factory.h
+++ b/chromium/media/capture/video/file_video_capture_device_factory.h
@@ -5,15 +5,15 @@
#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
namespace media {
// Extension of VideoCaptureDeviceFactory to create and manipulate file-backed
// fake devices. These devices play back video-only files as video capture
// input.
-class MEDIA_EXPORT FileVideoCaptureDeviceFactory :
- public VideoCaptureDeviceFactory {
+class MEDIA_EXPORT FileVideoCaptureDeviceFactory
+ : public VideoCaptureDeviceFactory {
public:
FileVideoCaptureDeviceFactory() {}
~FileVideoCaptureDeviceFactory() override {}
diff --git a/chromium/media/capture/video/linux/OWNERS b/chromium/media/capture/video/linux/OWNERS
new file mode 100644
index 00000000000..bf72e049d0e
--- /dev/null
+++ b/chromium/media/capture/video/linux/OWNERS
@@ -0,0 +1 @@
+posciak@chromium.org
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate.cc b/chromium/media/capture/video/linux/v4l2_capture_delegate.cc
index de9edb94f3b..88f50759ea9 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate.cc
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/v4l2_capture_delegate.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
#include <poll.h>
#include <sys/fcntl.h>
@@ -14,9 +14,9 @@
#include "base/posix/eintr_wrapper.h"
#include "base/strings/stringprintf.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/video/capture/linux/v4l2_capture_delegate_multi_plane.h"
-#include "media/video/capture/linux/v4l2_capture_delegate_single_plane.h"
-#include "media/video/capture/linux/video_capture_device_linux.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_multi_plane.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_single_plane.h"
+#include "media/capture/video/linux/video_capture_device_linux.h"
namespace media {
@@ -37,27 +37,30 @@ const int kTypicalFramerate = 30;
// V4L2 color formats supported by V4L2CaptureDelegate derived classes.
// This list is ordered by precedence of use -- but see caveats for MJPEG.
-static struct{
+static struct {
uint32_t fourcc;
VideoPixelFormat pixel_format;
size_t num_planes;
} const kSupportedFormatsAndPlanarity[] = {
- {V4L2_PIX_FMT_YUV420, PIXEL_FORMAT_I420, 1},
- {V4L2_PIX_FMT_YUYV, PIXEL_FORMAT_YUY2, 1},
- {V4L2_PIX_FMT_UYVY, PIXEL_FORMAT_UYVY, 1},
- {V4L2_PIX_FMT_RGB24, PIXEL_FORMAT_RGB24, 1},
+ {V4L2_PIX_FMT_YUV420, PIXEL_FORMAT_I420, 1},
+ {V4L2_PIX_FMT_YUYV, PIXEL_FORMAT_YUY2, 1},
+ {V4L2_PIX_FMT_UYVY, PIXEL_FORMAT_UYVY, 1},
+ {V4L2_PIX_FMT_RGB24, PIXEL_FORMAT_RGB24, 1},
#if !defined(OS_OPENBSD)
- // TODO(mcasas): add V4L2_PIX_FMT_YVU420M when available in bots.
- {V4L2_PIX_FMT_YUV420M, PIXEL_FORMAT_I420, 3},
+ // TODO(mcasas): add V4L2_PIX_FMT_YVU420M when available in bots.
+ {V4L2_PIX_FMT_YUV420M, PIXEL_FORMAT_I420, 3},
#endif
- // MJPEG is usually sitting fairly low since we don't want to have to decode.
- // However, is needed for large resolutions due to USB bandwidth limitations,
- // so GetListOfUsableFourCcs() can duplicate it on top, see that method.
- {V4L2_PIX_FMT_MJPEG, PIXEL_FORMAT_MJPEG, 1},
- // JPEG works as MJPEG on some gspca webcams from field reports, see
- // https://code.google.com/p/webrtc/issues/detail?id=529, put it as the least
- // preferred format.
- {V4L2_PIX_FMT_JPEG, PIXEL_FORMAT_MJPEG, 1},
+ // MJPEG is usually sitting fairly low since we don't want to have to
+ // decode.
+ // However, is needed for large resolutions due to USB bandwidth
+ // limitations,
+ // so GetListOfUsableFourCcs() can duplicate it on top, see that method.
+ {V4L2_PIX_FMT_MJPEG, PIXEL_FORMAT_MJPEG, 1},
+ // JPEG works as MJPEG on some gspca webcams from field reports, see
+ // https://code.google.com/p/webrtc/issues/detail?id=529, put it as the
+ // least
+ // preferred format.
+ {V4L2_PIX_FMT_JPEG, PIXEL_FORMAT_MJPEG, 1},
};
// static
@@ -99,7 +102,8 @@ VideoPixelFormat V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(
return fourcc_and_pixel_format.pixel_format;
}
// Not finding a pixel format is OK during device capabilities enumeration.
- // Let the caller decide if PIXEL_FORMAT_UNKNOWN is an error or not.
+ // Let the caller decide if PIXEL_FORMAT_UNKNOWN is an error or
+ // not.
DVLOG(1) << "Unsupported pixel format: " << FourccToString(v4l2_fourcc);
return PIXEL_FORMAT_UNKNOWN;
}
@@ -288,8 +292,8 @@ void V4L2CaptureDelegate::AllocateAndStart(
}
}
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &capture_type_))
- < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &capture_type_)) <
+ 0) {
SetErrorState("VIDIOC_STREAMON failed");
return;
}
@@ -304,8 +308,8 @@ void V4L2CaptureDelegate::StopAndDeAllocate() {
DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
// The order is important: stop streaming, clear |buffer_pool_|,
// thus munmap()ing the v4l2_buffers, and then return them to the OS.
- if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &capture_type_))
- < 0) {
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &capture_type_)) <
+ 0) {
SetErrorState("VIDIOC_STREAMOFF failed");
return;
}
@@ -356,8 +360,7 @@ bool V4L2CaptureDelegate::MapAndQueueBuffer(int index) {
return true;
}
-void V4L2CaptureDelegate::FillV4L2Buffer(v4l2_buffer* buffer,
- int i) const {
+void V4L2CaptureDelegate::FillV4L2Buffer(v4l2_buffer* buffer, int i) const {
memset(buffer, 0, sizeof(*buffer));
buffer->memory = V4L2_MEMORY_MMAP;
buffer->index = i;
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate.h b/chromium/media/capture/video/linux/v4l2_capture_delegate.h
index 9d65bb24853..1bb1735944a 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate.h
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate.h
@@ -13,8 +13,7 @@
#include "base/files/scoped_file.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -33,7 +32,8 @@ class V4L2CaptureDelegate
// Retrieves the #planes for a given |fourcc|, or 0 if unknown.
static size_t GetNumPlanesForFourCc(uint32_t fourcc);
// Returns the Chrome pixel format for |v4l2_fourcc| or PIXEL_FORMAT_UNKNOWN.
- static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(uint32_t v4l2_fourcc);
+ static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(
+ uint32_t v4l2_fourcc);
// Composes a list of usable and supported pixel formats, in order of
// preference, with MJPEG prioritised depending on |prefer_mjpeg|.
@@ -117,9 +117,8 @@ class V4L2CaptureDelegate
const v4l2_buffer& buffer) const = 0;
// Sends the captured |buffer| to the |client_|, synchronously.
- virtual void SendBuffer(
- const scoped_refptr<BufferTracker>& buffer_tracker,
- const v4l2_format& format) const = 0;
+ virtual void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const = 0;
// A few accessors for SendBuffer()'s to access private member variables.
VideoCaptureFormat capture_format() const { return capture_format_; }
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc b/chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc
index 99f70cebdd7..6aab0de49cf 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.cc
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/v4l2_capture_delegate_multi_plane.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_multi_plane.h"
#include <sys/mman.h>
@@ -12,9 +12,7 @@ V4L2CaptureDelegateMultiPlane::V4L2CaptureDelegateMultiPlane(
const VideoCaptureDevice::Name& device_name,
const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
int power_line_frequency)
- : V4L2CaptureDelegate(device_name,
- v4l2_task_runner,
- power_line_frequency) {
+ : V4L2CaptureDelegate(device_name, v4l2_task_runner, power_line_frequency) {
}
V4L2CaptureDelegateMultiPlane::~V4L2CaptureDelegateMultiPlane() {
@@ -72,15 +70,10 @@ void V4L2CaptureDelegateMultiPlane::SendBuffer(
DCHECK_GE(y_stride, 1u * capture_format().frame_size.width());
DCHECK_GE(u_stride, 1u * capture_format().frame_size.width() / 2);
DCHECK_GE(v_stride, 1u * capture_format().frame_size.width() / 2);
- client()->OnIncomingCapturedYuvData(buffer_tracker->GetPlaneStart(0),
- buffer_tracker->GetPlaneStart(1),
- buffer_tracker->GetPlaneStart(2),
- y_stride,
- u_stride,
- v_stride,
- capture_format(),
- rotation(),
- base::TimeTicks::Now());
+ client()->OnIncomingCapturedYuvData(
+ buffer_tracker->GetPlaneStart(0), buffer_tracker->GetPlaneStart(1),
+ buffer_tracker->GetPlaneStart(2), y_stride, u_stride, v_stride,
+ capture_format(), rotation(), base::TimeTicks::Now());
}
bool V4L2CaptureDelegateMultiPlane::BufferTrackerMPlane::Init(
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h b/chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h
index 64511ad3c4b..ee6cad31476 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate_multi_plane.h
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h
@@ -6,7 +6,7 @@
#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
#include "base/memory/ref_counted.h"
-#include "media/video/capture/linux/v4l2_capture_delegate.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
#if defined(OS_OPENBSD)
#error "OpenBSD does not support MPlane capture API."
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc b/chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc
index e2e7e5b0da1..722eedcdc31 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.cc
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/v4l2_capture_delegate_single_plane.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_single_plane.h"
#include <sys/mman.h>
@@ -39,11 +39,8 @@ void V4L2CaptureDelegateSinglePlane::SendBuffer(
const scoped_refptr<BufferTracker>& buffer_tracker,
const v4l2_format& format) const {
client()->OnIncomingCapturedData(
- buffer_tracker->GetPlaneStart(0),
- buffer_tracker->GetPlanePayloadSize(0),
- capture_format(),
- rotation(),
- base::TimeTicks::Now());
+ buffer_tracker->GetPlaneStart(0), buffer_tracker->GetPlanePayloadSize(0),
+ capture_format(), rotation(), base::TimeTicks::Now());
}
bool V4L2CaptureDelegateSinglePlane::BufferTrackerSPlane::Init(
diff --git a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h b/chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.h
index 8d4dc91f520..5124f14a508 100644
--- a/chromium/media/video/capture/linux/v4l2_capture_delegate_single_plane.h
+++ b/chromium/media/capture/video/linux/v4l2_capture_delegate_single_plane.h
@@ -6,8 +6,8 @@
#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
#include "base/memory/ref_counted.h"
-#include "media/video/capture/linux/v4l2_capture_delegate.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
+#include "media/capture/video/video_capture_device.h"
namespace base {
class SingleThreadTaskRunner;
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.cc b/chromium/media/capture/video/linux/video_capture_device_chromeos.cc
index 5abfbffaa25..3f2d76192b5 100644
--- a/chromium/media/video/capture/linux/video_capture_device_chromeos.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_chromeos.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/video_capture_device_chromeos.h"
+#include "media/capture/video/linux/video_capture_device_chromeos.h"
#include "base/bind.h"
#include "base/memory/ref_counted.h"
@@ -93,13 +93,12 @@ class VideoCaptureDeviceChromeOS::ScreenObserverDelegate
DISALLOW_IMPLICIT_CONSTRUCTORS(ScreenObserverDelegate);
};
-
VideoCaptureDeviceChromeOS::VideoCaptureDeviceChromeOS(
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
const Name& device_name)
: VideoCaptureDeviceLinux(device_name),
- screen_observer_delegate_(new ScreenObserverDelegate(this,
- ui_task_runner)) {
+ screen_observer_delegate_(
+ new ScreenObserverDelegate(this, ui_task_runner)) {
}
VideoCaptureDeviceChromeOS::~VideoCaptureDeviceChromeOS() {
diff --git a/chromium/media/video/capture/linux/video_capture_device_chromeos.h b/chromium/media/capture/video/linux/video_capture_device_chromeos.h
index 50b77a9debb..832d8cee470 100644
--- a/chromium/media/video/capture/linux/video_capture_device_chromeos.h
+++ b/chromium/media/capture/video/linux/video_capture_device_chromeos.h
@@ -5,7 +5,7 @@
#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
-#include "media/video/capture/linux/video_capture_device_linux.h"
+#include "media/capture/video/linux/video_capture_device_linux.h"
namespace gfx {
class Display;
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc b/chromium/media/capture/video/linux/video_capture_device_factory_linux.cc
index cadbf3e97ad..f7dfa53d3b8 100644
--- a/chromium/media/video/capture/linux/video_capture_device_factory_linux.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_factory_linux.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/video_capture_device_factory_linux.h"
+#include "media/capture/video/linux/video_capture_device_factory_linux.h"
#include <errno.h>
#include <fcntl.h>
@@ -16,11 +16,10 @@
#include "base/files/file_enumerator.h"
#include "base/files/scoped_file.h"
#include "base/posix/eintr_wrapper.h"
-#include "base/strings/stringprintf.h"
#if defined(OS_CHROMEOS)
-#include "media/video/capture/linux/video_capture_device_chromeos.h"
+#include "media/capture/video/linux/video_capture_device_chromeos.h"
#endif
-#include "media/video/capture/linux/video_capture_device_linux.h"
+#include "media/capture/video/linux/video_capture_device_linux.h"
namespace media {
@@ -32,9 +31,8 @@ static bool HasUsableFormats(int fd, uint32 capabilities) {
int capability;
v4l2_buf_type buf_type;
} kCapabilityAndBufferTypes[] = {
- {V4L2_CAP_VIDEO_CAPTURE, V4L2_BUF_TYPE_VIDEO_CAPTURE},
- {V4L2_CAP_VIDEO_CAPTURE_MPLANE, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE}
- };
+ {V4L2_CAP_VIDEO_CAPTURE, V4L2_BUF_TYPE_VIDEO_CAPTURE},
+ {V4L2_CAP_VIDEO_CAPTURE_MPLANE, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE}};
for (const auto& capability_and_buffer_type : kCapabilityAndBufferTypes) {
v4l2_fmtdesc fmtdesc = {};
@@ -62,11 +60,13 @@ static std::list<float> GetFrameRateList(int fd,
frame_interval.pixel_format = fourcc;
frame_interval.width = width;
frame_interval.height = height;
- for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS,
- &frame_interval)) == 0; ++frame_interval.index) {
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval)) ==
+ 0;
+ ++frame_interval.index) {
if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
if (frame_interval.discrete.numerator != 0) {
- frame_rates.push_back(frame_interval.discrete.denominator /
+ frame_rates.push_back(
+ frame_interval.discrete.denominator /
static_cast<float>(frame_interval.discrete.numerator));
}
} else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS ||
@@ -161,8 +161,8 @@ void VideoCaptureDeviceFactoryLinux::GetDeviceNames(
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(device_names->empty());
const base::FilePath path("/dev/");
- base::FileEnumerator enumerator(
- path, false, base::FileEnumerator::FILES, "video*");
+ base::FileEnumerator enumerator(path, false, base::FileEnumerator::FILES,
+ "video*");
while (!enumerator.Next().empty()) {
const base::FileEnumerator::FileInfo info = enumerator.GetInfo();
@@ -179,12 +179,12 @@ void VideoCaptureDeviceFactoryLinux::GetDeviceNames(
v4l2_capability cap;
if ((HANDLE_EINTR(ioctl(fd.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE ||
- cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
+ cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
!(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
!(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)) &&
HasUsableFormats(fd.get(), cap.capabilities)) {
device_names->push_back(VideoCaptureDevice::Name(
- base::StringPrintf("%s", cap.card), unique_id,
+ reinterpret_cast<char*>(cap.card), unique_id,
(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
? VideoCaptureDevice::Name::V4L2_MULTI_PLANE
: VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
diff --git a/chromium/media/video/capture/linux/video_capture_device_factory_linux.h b/chromium/media/capture/video/linux/video_capture_device_factory_linux.h
index 8e52abbcc3d..5a9c1401032 100644
--- a/chromium/media/video/capture/linux/video_capture_device_factory_linux.h
+++ b/chromium/media/capture/video/linux/video_capture_device_factory_linux.h
@@ -7,7 +7,7 @@
#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
#include "media/base/video_capture_types.h"
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.cc b/chromium/media/capture/video/linux/video_capture_device_linux.cc
index 5a05223be3b..487199ea040 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.cc
+++ b/chromium/media/capture/video/linux/video_capture_device_linux.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/linux/video_capture_device_linux.h"
+#include "media/capture/video/linux/video_capture_device_linux.h"
#if defined(OS_OPENBSD)
#include <sys/videoio.h>
@@ -14,7 +14,7 @@
#include "base/bind.h"
#include "base/strings/stringprintf.h"
-#include "media/video/capture/linux/v4l2_capture_delegate.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
namespace media {
@@ -28,7 +28,7 @@ static const char kVidPathTemplate[] =
static const char kPidPathTemplate[] =
"/sys/class/video4linux/%s/device/../idProduct";
-static bool ReadIdFile(const std::string path, std::string* id) {
+static bool ReadIdFile(const std::string& path, std::string* id) {
char id_buf[kVidPidSize];
FILE* file = fopen(path.c_str(), "rb");
if (!file)
@@ -43,12 +43,12 @@ static bool ReadIdFile(const std::string path, std::string* id) {
// Translates Video4Linux pixel formats to Chromium pixel formats.
// static
-VideoPixelFormat VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(
- uint32 v4l2_fourcc) {
+VideoPixelFormat
+VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(uint32 v4l2_fourcc) {
return V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(v4l2_fourcc);
}
-// Gets a list of usable Four CC formats prioritised.
+// Gets a list of usable Four CC formats prioritized.
// static
std::list<uint32_t> VideoCaptureDeviceLinux::GetListOfUsableFourCCs(
bool favour_mjpeg) {
@@ -78,8 +78,7 @@ const std::string VideoCaptureDevice::Name::GetModel() const {
}
VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
- : v4l2_thread_("V4L2CaptureThread"),
- device_name_(device_name) {
+ : v4l2_thread_("V4L2CaptureThread"), device_name_(device_name) {
}
VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
@@ -98,7 +97,7 @@ void VideoCaptureDeviceLinux::AllocateAndStart(
v4l2_thread_.Start();
const int line_frequency =
- TranslatePowerLineFrequencyToV4L2(GetPowerLineFrequencyForLocation());
+ TranslatePowerLineFrequencyToV4L2(GetPowerLineFrequency(params));
capture_impl_ = V4L2CaptureDelegate::CreateV4L2CaptureDelegate(
device_name_, v4l2_thread_.task_runner(), line_frequency);
if (!capture_impl_) {
@@ -127,17 +126,17 @@ void VideoCaptureDeviceLinux::StopAndDeAllocate() {
void VideoCaptureDeviceLinux::SetRotation(int rotation) {
if (v4l2_thread_.IsRunning()) {
v4l2_thread_.message_loop()->PostTask(
- FROM_HERE, base::Bind(&V4L2CaptureDelegate::SetRotation,
- capture_impl_, rotation));
+ FROM_HERE,
+ base::Bind(&V4L2CaptureDelegate::SetRotation, capture_impl_, rotation));
}
}
// static
int VideoCaptureDeviceLinux::TranslatePowerLineFrequencyToV4L2(int frequency) {
switch (frequency) {
- case kPowerLine50Hz:
+ case static_cast<int>(media::PowerLineFrequency::FREQUENCY_50HZ):
return V4L2_CID_POWER_LINE_FREQUENCY_50HZ;
- case kPowerLine60Hz:
+ case static_cast<int>(media::PowerLineFrequency::FREQUENCY_60HZ):
return V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
default:
// If we have no idea of the frequency, at least try and set it to AUTO.
diff --git a/chromium/media/video/capture/linux/video_capture_device_linux.h b/chromium/media/capture/video/linux/video_capture_device_linux.h
index 998d3a49563..61e2cba6c27 100644
--- a/chromium/media/video/capture/linux/video_capture_device_linux.h
+++ b/chromium/media/capture/video/linux/video_capture_device_linux.h
@@ -16,7 +16,7 @@
#include "base/files/scoped_file.h"
#include "base/threading/thread.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -25,7 +25,8 @@ class V4L2CaptureDelegate;
// Linux V4L2 implementation of VideoCaptureDevice.
class VideoCaptureDeviceLinux : public VideoCaptureDevice {
public:
- static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(uint32 v4l2_fourcc);
+ static VideoPixelFormat V4l2FourCcToChromiumPixelFormat(
+ uint32 v4l2_fourcc);
static std::list<uint32_t> GetListOfUsableFourCCs(bool favour_mjpeg);
explicit VideoCaptureDeviceLinux(const Name& device_name);
@@ -43,7 +44,7 @@ class VideoCaptureDeviceLinux : public VideoCaptureDevice {
static int TranslatePowerLineFrequencyToV4L2(int frequency);
// Internal delegate doing the actual capture setting, buffer allocation and
- // circulacion with the V4L2 API. Created and deleted in the thread where
+ // circulation with the V4L2 API. Created and deleted in the thread where
// VideoCaptureDeviceLinux lives but otherwise operating on |v4l2_thread_|.
scoped_refptr<V4L2CaptureDelegate> capture_impl_;
diff --git a/chromium/media/video/capture/mac/DEPS b/chromium/media/capture/video/mac/DEPS
index 58a10036ee4..58a10036ee4 100644
--- a/chromium/media/video/capture/mac/DEPS
+++ b/chromium/media/capture/video/mac/DEPS
diff --git a/chromium/media/video/capture/mac/platform_video_capturing_mac.h b/chromium/media/capture/video/mac/platform_video_capturing_mac.h
index 33ad7b6e540..13cc6a2c669 100644
--- a/chromium/media/video/capture/mac/platform_video_capturing_mac.h
+++ b/chromium/media/capture/video/mac/platform_video_capturing_mac.h
@@ -13,7 +13,7 @@ class VideoCaptureDeviceMac;
// Protocol representing platform-dependent video capture on Mac, implemented
// by both QTKit and AVFoundation APIs.
-@protocol PlatformVideoCapturingMac <NSObject>
+@protocol PlatformVideoCapturingMac<NSObject>
// This method initializes the instance by calling NSObject |init| and registers
// internally a frame receiver at the same time. The frame receiver is supposed
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.h
index e30c2f8bce9..539dffced2a 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.h
@@ -12,8 +12,8 @@
#include "base/threading/thread_checker.h"
#import "media/base/mac/avfoundation_glue.h"
#include "media/base/video_capture_types.h"
-#import "media/video/capture/mac/platform_video_capturing_mac.h"
-#include "media/video/capture/video_capture_device.h"
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
class VideoCaptureDeviceMac;
diff --git a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
index e3b7b27442d..deb14080cfd 100644
--- a/chromium/media/video/capture/mac/video_capture_device_avfoundation_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
#import <CoreVideo/CoreVideo.h>
#include "base/logging.h"
#include "base/mac/foundation_util.h"
-#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
#include "ui/gfx/geometry/size.h"
// Prefer MJPEG if frame width or height is larger than this.
@@ -16,7 +17,7 @@ static const int kMjpegWidthThreshold = 640;
static const int kMjpegHeightThreshold = 480;
// This function translates Mac Core Video pixel formats to Chromium pixel
-// formats. Chromium pixel formats are sorted in order of preference.
+// formats.
media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
switch (code) {
case kCVPixelFormatType_422YpCbCr8:
@@ -44,10 +45,9 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
![device isSuspended]) {
DeviceNameAndTransportType* nameAndTransportType =
[[[DeviceNameAndTransportType alloc]
- initWithName:[device localizedName]
- transportType:[device transportType]] autorelease];
- [deviceNames setObject:nameAndTransportType
- forKey:[device uniqueID]];
+ initWithName:[device localizedName]
+ transportType:[device transportType]] autorelease];
+ [deviceNames setObject:nameAndTransportType forKey:[device uniqueID]];
}
}
}
@@ -62,7 +62,7 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
}
+ (void)getDevice:(const media::VideoCaptureDevice::Name&)name
- supportedFormats:(media::VideoCaptureFormats*)formats{
+ supportedFormats:(media::VideoCaptureFormats*)formats {
NSArray* devices = [AVCaptureDeviceGlue devices];
CrAVCaptureDevice* device = nil;
for (device in devices) {
@@ -83,17 +83,15 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
[format formatDescription]);
for (CrAVFrameRateRange* frameRate in
- [format videoSupportedFrameRateRanges]) {
+ [format videoSupportedFrameRateRanges]) {
media::VideoCaptureFormat format(
gfx::Size(dimensions.width, dimensions.height),
- frameRate.maxFrameRate,
- pixelFormat);
+ frameRate.maxFrameRate, pixelFormat);
formats->push_back(format);
DVLOG(2) << name.name() << " "
<< media::VideoCaptureFormat::ToString(format);
}
}
-
}
#pragma mark Public methods
@@ -139,22 +137,24 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
// Look for input device with requested name.
captureDevice_ = [AVCaptureDeviceGlue deviceWithUniqueID:deviceId];
if (!captureDevice_) {
- [self sendErrorString:[NSString
- stringWithUTF8String:"Could not open video capture device."]];
+ [self
+ sendErrorString:[NSString stringWithUTF8String:
+ "Could not open video capture device."]];
return NO;
}
// Create the capture input associated with the device. Easy peasy.
NSError* error = nil;
- captureDeviceInput_ = [AVCaptureDeviceInputGlue
- deviceInputWithDevice:captureDevice_
- error:&error];
+ captureDeviceInput_ =
+ [AVCaptureDeviceInputGlue deviceInputWithDevice:captureDevice_
+ error:&error];
if (!captureDeviceInput_) {
captureDevice_ = nil;
- [self sendErrorString:[NSString
- stringWithFormat:@"Could not create video capture input (%@): %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ [self sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not create video capture input (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
[captureSession_ addInput:captureDeviceInput_];
@@ -165,15 +165,15 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
[[AVFoundationGlue::AVCaptureVideoDataOutputClass() alloc] init]);
if (!captureVideoDataOutput_) {
[captureSession_ removeInput:captureDeviceInput_];
- [self sendErrorString:[NSString
- stringWithUTF8String:"Could not create video data output."]];
+ [self sendErrorString:[NSString stringWithUTF8String:
+ "Could not create video data output."]];
return NO;
}
[captureVideoDataOutput_ setAlwaysDiscardsLateVideoFrames:true];
[captureVideoDataOutput_
setSampleBufferDelegate:self
queue:dispatch_get_global_queue(
- DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
+ DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
[captureSession_ addOutput:captureVideoDataOutput_];
return YES;
}
@@ -185,8 +185,8 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
// VideoCaptureDeviceMac::ReceiveFrame() is calling here, depending on the
// running state. VCDM::ReceiveFrame() calls here to change aspect ratio.
DCHECK((![captureSession_ isRunning] &&
- main_thread_checker_.CalledOnValidThread()) ||
- callback_thread_checker_.CalledOnValidThread());
+ main_thread_checker_.CalledOnValidThread()) ||
+ callback_thread_checker_.CalledOnValidThread());
frameWidth_ = width;
frameHeight_ = height;
@@ -204,9 +204,11 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
best_fourcc = fourcc;
break;
}
+
// Compare according to Chromium preference.
- if (FourCCToChromiumPixelFormat(fourcc) <
- FourCCToChromiumPixelFormat(best_fourcc)) {
+ if (media::VideoCaptureFormat::ComparePixelFormatPreference(
+ FourCCToChromiumPixelFormat(fourcc),
+ FourCCToChromiumPixelFormat(best_fourcc))) {
best_fourcc = fourcc;
}
}
@@ -218,9 +220,9 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
// yes/no and preserve aspect ratio yes/no when scaling. Currently we set
// cropping and preservation.
NSDictionary* videoSettingsDictionary = @{
- (id)kCVPixelBufferWidthKey : @(width),
- (id)kCVPixelBufferHeightKey : @(height),
- (id)kCVPixelBufferPixelFormatTypeKey : @(best_fourcc),
+ (id) kCVPixelBufferWidthKey : @(width), (id)
+ kCVPixelBufferHeightKey : @(height), (id)
+ kCVPixelBufferPixelFormatTypeKey : @(best_fourcc),
AVFoundationGlue::AVVideoScalingModeKey() :
AVFoundationGlue::AVVideoScalingModeResizeAspectFill()
};
@@ -232,18 +234,22 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
// http://crbug.com/328096.
// CMTimeMake accepts integer argumenst but |frameRate| is float, round it.
if ([captureConnection
- respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
+ respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
[captureConnection isVideoMinFrameDurationSupported]) {
- [captureConnection setVideoMinFrameDuration:
- CoreMediaGlue::CMTimeMake(media::kFrameRatePrecision,
- (int)(frameRate * media::kFrameRatePrecision))];
+ [captureConnection
+ setVideoMinFrameDuration:CoreMediaGlue::CMTimeMake(
+ media::kFrameRatePrecision,
+ (int)(frameRate *
+ media::kFrameRatePrecision))];
}
if ([captureConnection
- respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
+ respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
[captureConnection isVideoMaxFrameDurationSupported]) {
- [captureConnection setVideoMaxFrameDuration:
- CoreMediaGlue::CMTimeMake(media::kFrameRatePrecision,
- (int)(frameRate * media::kFrameRatePrecision))];
+ [captureConnection
+ setVideoMaxFrameDuration:CoreMediaGlue::CMTimeMake(
+ media::kFrameRatePrecision,
+ (int)(frameRate *
+ media::kFrameRatePrecision))];
}
return YES;
}
@@ -275,8 +281,8 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
// |captureOutput| is called by the capture device to deliver a new frame.
- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
- didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
- fromConnection:(CrAVCaptureConnection*)connection {
+didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection {
// AVFoundation calls from a number of threads, depending on, at least, if
// Chrome is on foreground or background. Sample the actual thread here.
callback_thread_checker_.DetachFromThread();
@@ -289,8 +295,7 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
const CoreMediaGlue::CMVideoDimensions dimensions =
CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(formatDescription);
const media::VideoCaptureFormat captureFormat(
- gfx::Size(dimensions.width, dimensions.height),
- frameRate_,
+ gfx::Size(dimensions.width, dimensions.height), frameRate_,
FourCCToChromiumPixelFormat(fourcc));
char* baseAddress = 0;
@@ -337,9 +342,9 @@ media::VideoPixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
NSError* error = base::mac::ObjCCast<NSError>([[errorNotification userInfo]
objectForKey:AVFoundationGlue::AVCaptureSessionErrorKey()]);
[self sendErrorString:[NSString
- stringWithFormat:@"%@: %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ stringWithFormat:@"%@: %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
}
- (void)sendErrorString:(NSString*)error {
diff --git a/chromium/media/video/capture/mac/video_capture_device_decklink_mac.h b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h
index 45e697d4fdb..e03765abc5b 100644
--- a/chromium/media/video/capture/mac/video_capture_device_decklink_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.h
@@ -8,7 +8,7 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DECKLINK_MAC_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DECKLINK_MAC_H_
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
#import <Foundation/Foundation.h>
diff --git a/chromium/media/video/capture/mac/video_capture_device_decklink_mac.mm b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm
index c494304702f..c73cb406878 100644
--- a/chromium/media/video/capture/mac/video_capture_device_decklink_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_decklink_mac.mm
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/mac/video_capture_device_decklink_mac.h"
+#include "media/capture/video/mac/video_capture_device_decklink_mac.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
@@ -26,9 +26,7 @@ class ScopedDeckLinkPtr : public scoped_refptr<T> {
return &ptr_;
}
- void** ReceiveVoid() {
- return reinterpret_cast<void**>(Receive());
- }
+ void** ReceiveVoid() { return reinterpret_cast<void**>(Receive()); }
void Release() {
if (ptr_ != NULL) {
@@ -42,9 +40,9 @@ class ScopedDeckLinkPtr : public scoped_refptr<T> {
// Implements the reference counted interface IUnknown. Has a weak reference to
// VideoCaptureDeviceDeckLinkMac for sending captured frames, error messages and
// logs.
-class DeckLinkCaptureDelegate :
- public IDeckLinkInputCallback,
- public base::RefCountedThreadSafe<DeckLinkCaptureDelegate> {
+class DeckLinkCaptureDelegate
+ : public IDeckLinkInputCallback,
+ public base::RefCountedThreadSafe<DeckLinkCaptureDelegate> {
public:
DeckLinkCaptureDelegate(const media::VideoCaptureDevice::Name& device_name,
media::VideoCaptureDeviceDeckLinkMac* frame_receiver);
@@ -121,11 +119,11 @@ static float GetDisplayModeFrameRate(
DeckLinkCaptureDelegate::DeckLinkCaptureDelegate(
const media::VideoCaptureDevice::Name& device_name,
media::VideoCaptureDeviceDeckLinkMac* frame_receiver)
- : device_name_(device_name),
- frame_receiver_(frame_receiver) {
+ : device_name_(device_name), frame_receiver_(frame_receiver) {
}
-DeckLinkCaptureDelegate::~DeckLinkCaptureDelegate() {}
+DeckLinkCaptureDelegate::~DeckLinkCaptureDelegate() {
+}
void DeckLinkCaptureDelegate::AllocateAndStart(
const media::VideoCaptureParams& params) {
@@ -150,15 +148,15 @@ void DeckLinkCaptureDelegate::AllocateAndStart(
}
ScopedDeckLinkPtr<IDeckLinkInput> decklink_input_local;
- if (decklink_local->QueryInterface(IID_IDeckLinkInput,
- decklink_input_local.ReceiveVoid()) != S_OK) {
+ if (decklink_local->QueryInterface(
+ IID_IDeckLinkInput, decklink_input_local.ReceiveVoid()) != S_OK) {
SendErrorString("Error querying input interface.");
return;
}
ScopedDeckLinkPtr<IDeckLinkDisplayModeIterator> display_mode_iter;
if (decklink_input_local->GetDisplayModeIterator(
- display_mode_iter.Receive()) != S_OK) {
+ display_mode_iter.Receive()) != S_OK) {
SendErrorString("Error creating Display Mode Iterator");
return;
}
@@ -168,10 +166,11 @@ void DeckLinkCaptureDelegate::AllocateAndStart(
float min_diff = FLT_MAX;
while (display_mode_iter->Next(display_mode.Receive()) == S_OK) {
const float diff = labs(display_mode->GetWidth() -
- params.requested_format.frame_size.width()) +
- labs(params.requested_format.frame_size.height() -
- display_mode->GetHeight()) + fabs(params.requested_format.frame_rate -
- GetDisplayModeFrameRate(display_mode));
+ params.requested_format.frame_size.width()) +
+ labs(params.requested_format.frame_size.height() -
+ display_mode->GetHeight()) +
+ fabs(params.requested_format.frame_rate -
+ GetDisplayModeFrameRate(display_mode));
if (diff < min_diff) {
chosen_display_mode = display_mode;
min_diff = diff;
@@ -222,7 +221,7 @@ void DeckLinkCaptureDelegate::StopAndDeAllocate() {
HRESULT DeckLinkCaptureDelegate::VideoInputFormatChanged(
BMDVideoInputFormatChangedEvents notification_events,
- IDeckLinkDisplayMode *new_display_mode,
+ IDeckLinkDisplayMode* new_display_mode,
BMDDetectedVideoInputFormatFlags detected_signal_flags) {
DCHECK(thread_checker_.CalledOnValidThread());
return S_OK;
@@ -235,7 +234,8 @@ HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(
uint8* video_data = NULL;
video_frame->GetBytes(reinterpret_cast<void**>(&video_data));
- media::VideoPixelFormat pixel_format = media::PIXEL_FORMAT_UNKNOWN;
+ media::VideoPixelFormat pixel_format =
+ media::PIXEL_FORMAT_UNKNOWN;
switch (video_frame->GetPixelFormat()) {
case bmdFormat8BitYUV: // A.k.a. '2vuy';
pixel_format = media::PIXEL_FORMAT_UYVY;
@@ -255,8 +255,7 @@ HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(
base::AutoLock lock(lock_);
if (frame_receiver_) {
frame_receiver_->OnIncomingCapturedData(
- video_data,
- video_frame->GetRowBytes() * video_frame->GetHeight(),
+ video_data, video_frame->GetRowBytes() * video_frame->GetHeight(),
capture_format,
0, // Rotation.
base::TimeTicks::Now());
@@ -314,7 +313,7 @@ namespace media {
static std::string JoinDeviceNameAndFormat(CFStringRef name,
CFStringRef format) {
return base::SysCFStringRefToUTF8(name) + " - " +
- base::SysCFStringRefToUTF8(format);
+ base::SysCFStringRefToUTF8(format);
}
// static
@@ -339,15 +338,15 @@ void VideoCaptureDeviceDeckLinkMac::EnumerateDevices(
CFStringRef device_display_name = NULL;
hr = decklink_local->GetDisplayName(&device_display_name);
DVLOG_IF(1, hr != S_OK) << "Error reading Blackmagic device display name";
- DVLOG_IF(1, hr == S_OK) << "Blackmagic device found with name: " <<
- base::SysCFStringRefToUTF8(device_display_name);
+ DVLOG_IF(1, hr == S_OK) << "Blackmagic device found with name: "
+ << base::SysCFStringRefToUTF8(device_display_name);
if (!device_model_name && !device_display_name)
continue;
ScopedDeckLinkPtr<IDeckLinkInput> decklink_input;
if (decklink_local->QueryInterface(IID_IDeckLinkInput,
- decklink_input.ReceiveVoid()) != S_OK) {
+ decklink_input.ReceiveVoid()) != S_OK) {
DLOG(ERROR) << "Error Blackmagic querying input interface.";
return;
}
@@ -392,7 +391,7 @@ void VideoCaptureDeviceDeckLinkMac::EnumerateDeviceCapabilities(
ScopedDeckLinkPtr<IDeckLinkInput> decklink_input;
if (decklink_local->QueryInterface(IID_IDeckLinkInput,
- decklink_input.ReceiveVoid()) != S_OK) {
+ decklink_input.ReceiveVoid()) != S_OK) {
DLOG(ERROR) << "Error Blackmagic querying input interface.";
return;
}
@@ -410,8 +409,9 @@ void VideoCaptureDeviceDeckLinkMac::EnumerateDeviceCapabilities(
ScopedDeckLinkPtr<IDeckLinkDisplayMode> display_mode;
while (display_mode_iter->Next(display_mode.Receive()) == S_OK) {
CFStringRef format_name = NULL;
- if (display_mode->GetName(&format_name) == S_OK && device.id() !=
- JoinDeviceNameAndFormat(device_model_name, format_name)) {
+ if (display_mode->GetName(&format_name) == S_OK &&
+ device.id() !=
+ JoinDeviceNameAndFormat(device_model_name, format_name)) {
display_mode.Release();
continue;
}
@@ -449,7 +449,7 @@ void VideoCaptureDeviceDeckLinkMac::OnIncomingCapturedData(
base::AutoLock lock(lock_);
if (client_) {
client_->OnIncomingCapturedData(data, length, frame_format, rotation,
- timestamp);
+ timestamp);
}
}
@@ -468,8 +468,8 @@ void VideoCaptureDeviceDeckLinkMac::SendLogString(const std::string& message) {
}
void VideoCaptureDeviceDeckLinkMac::AllocateAndStart(
- const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) {
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
DCHECK(thread_checker_.CalledOnValidThread());
client_ = client.Pass();
if (decklink_capture_delegate_.get())
@@ -481,4 +481,4 @@ void VideoCaptureDeviceDeckLinkMac::StopAndDeAllocate() {
decklink_capture_delegate_->StopAndDeAllocate();
}
-} // namespace media
+} // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac.h b/chromium/media/capture/video/mac/video_capture_device_factory_mac.h
index f3a180ca0cc..07e127b940e 100644
--- a/chromium/media/video/capture/mac/video_capture_device_factory_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_factory_mac.h
@@ -7,13 +7,13 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
namespace media {
// Extension of VideoCaptureDeviceFactory to create and manipulate Mac devices.
-class MEDIA_EXPORT VideoCaptureDeviceFactoryMac :
- public VideoCaptureDeviceFactory {
+class MEDIA_EXPORT VideoCaptureDeviceFactoryMac
+ : public VideoCaptureDeviceFactory {
public:
static bool PlatformSupportsAVFoundation();
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm b/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm
index 76e14c6b24d..3d7c795d03a 100644
--- a/chromium/media/video/capture/mac/video_capture_device_factory_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_factory_mac.mm
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
#import <IOKit/audio/IOAudioTypes.h>
@@ -12,10 +12,10 @@
#include "base/strings/string_util.h"
#include "base/task_runner_util.h"
#import "media/base/mac/avfoundation_glue.h"
-#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
-#import "media/video/capture/mac/video_capture_device_decklink_mac.h"
-#include "media/video/capture/mac/video_capture_device_mac.h"
-#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
+#import "media/capture/video/mac/video_capture_device_decklink_mac.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
namespace media {
@@ -29,17 +29,19 @@ const struct NameAndVid {
const int capture_width;
const int capture_height;
const float capture_frame_rate;
-} kBlacklistedCameras[] = { {"-01FDA82C8A9C", 1280, 720, 60.0f } };
+} kBlacklistedCameras[] = {{"-01FDA82C8A9C", 1280, 720, 60.0f}};
static bool IsDeviceBlacklisted(const VideoCaptureDevice::Name& name) {
bool is_device_blacklisted = false;
for(size_t i = 0;
!is_device_blacklisted && i < arraysize(kBlacklistedCameras); ++i) {
- is_device_blacklisted = base::EndsWith(name.id(),
- kBlacklistedCameras[i].unique_id_signature, false);
+ is_device_blacklisted =
+ base::EndsWith(name.id(),
+ kBlacklistedCameras[i].unique_id_signature,
+ base::CompareCase::INSENSITIVE_ASCII);
}
- DVLOG_IF(2, is_device_blacklisted) << "Blacklisted camera: " <<
- name.name() << ", id: " << name.id();
+ DVLOG_IF(2, is_device_blacklisted) << "Blacklisted camera: " << name.name()
+ << ", id: " << name.id();
return is_device_blacklisted;
}
@@ -52,7 +54,7 @@ EnumerateDevicesUsingQTKit() {
"458397 media::EnumerateDevicesUsingQTKit"));
scoped_ptr<VideoCaptureDevice::Names> device_names(
- new VideoCaptureDevice::Names());
+ new VideoCaptureDevice::Names());
NSMutableDictionary* capture_devices =
[[[NSMutableDictionary alloc] init] autorelease];
[VideoCaptureDeviceQTKit getDeviceNames:capture_devices];
@@ -90,7 +92,8 @@ VideoCaptureDeviceFactoryMac::VideoCaptureDeviceFactoryMac(
thread_checker_.DetachFromThread();
}
-VideoCaptureDeviceFactoryMac::~VideoCaptureDeviceFactoryMac() {}
+VideoCaptureDeviceFactoryMac::~VideoCaptureDeviceFactoryMac() {
+}
scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryMac::Create(
const VideoCaptureDevice::Name& device_name) {
@@ -132,9 +135,9 @@ void VideoCaptureDeviceFactoryMac::GetDeviceNames(
// Transport types are defined for Audio devices and reused for video.
VideoCaptureDevice::Name::TransportType device_transport_type =
(transport_type == kIOAudioDeviceTransportTypeBuiltIn ||
- transport_type == kIOAudioDeviceTransportTypeUSB)
- ? VideoCaptureDevice::Name::USB_OR_BUILT_IN
- : VideoCaptureDevice::Name::OTHER_TRANSPORT;
+ transport_type == kIOAudioDeviceTransportTypeUSB)
+ ? VideoCaptureDevice::Name::USB_OR_BUILT_IN
+ : VideoCaptureDevice::Name::OTHER_TRANSPORT;
VideoCaptureDevice::Name name(
[[[capture_devices valueForKey:key] deviceName] UTF8String],
[key UTF8String], VideoCaptureDevice::Name::AVFOUNDATION,
@@ -161,7 +164,8 @@ void VideoCaptureDeviceFactoryMac::EnumerateDeviceNames(const base::Callback<
callback.Run(device_names.Pass());
} else {
DVLOG(1) << "Enumerating video capture devices using QTKit";
- base::PostTaskAndReplyWithResult(ui_task_runner_.get(), FROM_HERE,
+ base::PostTaskAndReplyWithResult(
+ ui_task_runner_.get(), FROM_HERE,
base::Bind(&EnumerateDevicesUsingQTKit),
base::Bind(&RunDevicesEnumeratedCallback, callback));
}
@@ -184,7 +188,7 @@ void VideoCaptureDeviceFactoryMac::GetDeviceSupportedFormats(
for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
if (base::EndsWith(device.id(),
kBlacklistedCameras[i].unique_id_signature,
- false)) {
+ base::CompareCase::INSENSITIVE_ASCII)) {
supported_formats->push_back(media::VideoCaptureFormat(
gfx::Size(kBlacklistedCameras[i].capture_width,
kBlacklistedCameras[i].capture_height),
diff --git a/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm b/chromium/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm
index 45027bac822..6b8500d7bba 100644
--- a/chromium/media/video/capture/mac/video_capture_device_factory_mac_unittest.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm
@@ -7,8 +7,8 @@
#include "base/thread_task_runner_handle.h"
#import "media/base/mac/avfoundation_glue.h"
#include "media/base/media_switches.h"
-#include "media/video/capture/mac/video_capture_device_factory_mac.h"
-#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
@@ -41,7 +41,7 @@ TEST_F(VideoCaptureDeviceFactoryMacTest, ListDevicesAVFoundation) {
// There should be no blacklisted devices, i.e. QTKit.
std::string device_vid;
for (VideoCaptureDevice::Names::const_iterator it = names.begin();
- it != names.end(); ++it) {
+ it != names.end(); ++it) {
EXPECT_EQ(it->capture_api_type(), VideoCaptureDevice::Name::AVFOUNDATION);
}
}
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.h b/chromium/media/capture/video/mac/video_capture_device_mac.h
index bbe8a230558..c36248eb293 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_mac.h
@@ -19,7 +19,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
@protocol PlatformVideoCapturingMac;
@@ -83,12 +83,7 @@ class VideoCaptureDeviceMac : public VideoCaptureDevice {
bool UpdateCaptureResolution();
// Flag indicating the internal state.
- enum InternalState {
- kNotInitialized,
- kIdle,
- kCapturing,
- kError
- };
+ enum InternalState { kNotInitialized, kIdle, kCapturing, kError };
Name device_name_;
scoped_ptr<VideoCaptureDevice::Client> client_;
diff --git a/chromium/media/video/capture/mac/video_capture_device_mac.mm b/chromium/media/capture/video/mac/video_capture_device_mac.mm
index 521a11178dd..8f71b681b0d 100644
--- a/chromium/media/video/capture/mac/video_capture_device_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_mac.mm
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/mac/video_capture_device_mac.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
#include <IOKit/IOCFPlugIn.h>
#include <IOKit/usb/IOUSBLib.h>
@@ -18,9 +18,9 @@
#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
#import "media/base/mac/avfoundation_glue.h"
-#import "media/video/capture/mac/platform_video_capturing_mac.h"
-#import "media/video/capture/mac/video_capture_device_avfoundation_mac.h"
-#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
#include "ui/gfx/geometry/size.h"
@implementation DeviceNameAndTransportType
@@ -55,14 +55,12 @@ const size_t kVidPidSize = 4;
const struct Resolution {
const int width;
const int height;
-} kQVGA = { 320, 240 },
- kVGA = { 640, 480 },
- kHD = { 1280, 720 };
+} kQVGA = {320, 240}, kVGA = {640, 480}, kHD = {1280, 720};
const struct Resolution* const kWellSupportedResolutions[] = {
- &kQVGA,
- &kVGA,
- &kHD,
+ &kQVGA,
+ &kVGA,
+ &kHD,
};
// Rescaling the image to fix the pixel aspect ratio runs the risk of making
@@ -122,12 +120,9 @@ static bool FindDeviceInterfaceInUsbDevice(
// Create a plugin, i.e. a user-side controller to manipulate USB device.
IOCFPlugInInterface** plugin;
SInt32 score; // Unused, but required for IOCreatePlugInInterfaceForService.
- kern_return_t kr =
- IOCreatePlugInInterfaceForService(usb_device,
- kIOUSBDeviceUserClientTypeID,
- kIOCFPlugInInterfaceID,
- &plugin,
- &score);
+ kern_return_t kr = IOCreatePlugInInterfaceForService(
+ usb_device, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &plugin,
+ &score);
if (kr != kIOReturnSuccess || !plugin) {
DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
return false;
@@ -135,10 +130,9 @@ static bool FindDeviceInterfaceInUsbDevice(
base::mac::ScopedIOPluginInterface<IOCFPlugInInterface> plugin_ref(plugin);
// Fetch the Device Interface from the plugin.
- HRESULT res =
- (*plugin)->QueryInterface(plugin,
- CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
- reinterpret_cast<LPVOID*>(device_interface));
+ HRESULT res = (*plugin)->QueryInterface(
+ plugin, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
+ reinterpret_cast<LPVOID*>(device_interface));
if (!SUCCEEDED(res) || !*device_interface) {
DLOG(ERROR) << "QueryInterface, couldn't create interface to USB";
return false;
@@ -156,15 +150,14 @@ static bool FindVideoControlInterfaceInDeviceInterface(
// then get the first interface in the list.
io_iterator_t interface_iterator;
IOUSBFindInterfaceRequest interface_request = {
- .bInterfaceClass = kUSBVideoInterfaceClass,
- .bInterfaceSubClass = kUSBVideoControlSubClass,
- .bInterfaceProtocol = kIOUSBFindInterfaceDontCare,
- .bAlternateSetting = kIOUSBFindInterfaceDontCare
- };
+ .bInterfaceClass = kUSBVideoInterfaceClass,
+ .bInterfaceSubClass = kUSBVideoControlSubClass,
+ .bInterfaceProtocol = kIOUSBFindInterfaceDontCare,
+ .bAlternateSetting = kIOUSBFindInterfaceDontCare};
kern_return_t kr =
- (*device_interface)->CreateInterfaceIterator(device_interface,
- &interface_request,
- &interface_iterator);
+ (*device_interface)
+ ->CreateInterfaceIterator(device_interface, &interface_request,
+ &interface_iterator);
if (kr != kIOReturnSuccess) {
DLOG(ERROR) << "Could not create an iterator to the device's interfaces.";
return false;
@@ -182,11 +175,9 @@ static bool FindVideoControlInterfaceInDeviceInterface(
// Create a user side controller (i.e. a "plugin") for the found interface.
SInt32 score;
- kr = IOCreatePlugInInterfaceForService(found_interface,
- kIOUSBInterfaceUserClientTypeID,
- kIOCFPlugInInterfaceID,
- video_control_interface,
- &score);
+ kr = IOCreatePlugInInterfaceForService(
+ found_interface, kIOUSBInterfaceUserClientTypeID, kIOCFPlugInInterfaceID,
+ video_control_interface, &score);
if (kr != kIOReturnSuccess || !*video_control_interface) {
DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
return false;
@@ -202,11 +193,12 @@ static void SetAntiFlickerInVideoControlInterface(
// Create, the control interface for the found plugin, and release
// the intermediate plugin.
IOUSBInterfaceInterface** control_interface = NULL;
- HRESULT res = (*plugin_interface)->QueryInterface(
- plugin_interface,
- CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),
- reinterpret_cast<LPVOID*>(&control_interface));
- if (!SUCCEEDED(res) || !control_interface ) {
+ HRESULT res =
+ (*plugin_interface)
+ ->QueryInterface(plugin_interface,
+ CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),
+ reinterpret_cast<LPVOID*>(&control_interface));
+ if (!SUCCEEDED(res) || !control_interface) {
DLOG(ERROR) << "Couldn’t create control interface";
return;
}
@@ -221,23 +213,24 @@ static void SetAntiFlickerInVideoControlInterface(
IOUSBInterfaceDescriptor* cs_descriptor = NULL;
IOUSBInterfaceInterface220** interface =
reinterpret_cast<IOUSBInterfaceInterface220**>(control_interface);
- while ((descriptor = (*interface)->FindNextAssociatedDescriptor(
- interface, descriptor, kUSBAnyDesc))) {
- cs_descriptor =
- reinterpret_cast<IOUSBInterfaceDescriptor*>(descriptor);
+ while ((descriptor = (*interface)
+ ->FindNextAssociatedDescriptor(interface, descriptor,
+ kUSBAnyDesc))) {
+ cs_descriptor = reinterpret_cast<IOUSBInterfaceDescriptor*>(descriptor);
if ((descriptor->bDescriptorType == kVcCsInterface) &&
(cs_descriptor->bDescriptorSubType == kVcProcessingUnit)) {
real_unit_id = cs_descriptor->bUnitID;
break;
}
}
- DVLOG_IF(1, real_unit_id == -1) << "This USB device doesn't seem to have a "
+ DVLOG_IF(1, real_unit_id == -1)
+ << "This USB device doesn't seem to have a "
<< " VC_PROCESSING_UNIT, anti-flicker not available";
if (real_unit_id == -1)
return;
if ((*control_interface)->USBInterfaceOpen(control_interface) !=
- kIOReturnSuccess) {
+ kIOReturnSuccess) {
DLOG(ERROR) << "Unable to open control interface";
return;
}
@@ -245,13 +238,12 @@ static void SetAntiFlickerInVideoControlInterface(
// Create the control request and launch it to the device's control interface.
// Note how the wIndex needs the interface number OR'ed in the lowest bits.
IOUSBDevRequest command;
- command.bmRequestType = USBmakebmRequestType(kUSBOut,
- kUSBClass,
- kUSBInterface);
+ command.bmRequestType =
+ USBmakebmRequestType(kUSBOut, kUSBClass, kUSBInterface);
command.bRequest = kVcRequestCodeSetCur;
UInt8 interface_number;
- (*control_interface)->GetInterfaceNumber(control_interface,
- &interface_number);
+ (*control_interface)
+ ->GetInterfaceNumber(control_interface, &interface_number);
command.wIndex = (real_unit_id << 8) | interface_number;
const int selector = kPuPowerLineFrequencyControl;
command.wValue = (selector << 8);
@@ -260,12 +252,13 @@ static void SetAntiFlickerInVideoControlInterface(
int power_line_flag_value = (frequency == 50) ? k50Hz : k60Hz;
command.pData = &power_line_flag_value;
- IOReturn ret = (*control_interface)->ControlRequest(control_interface,
- 0, &command);
+ IOReturn ret =
+ (*control_interface)->ControlRequest(control_interface, 0, &command);
DLOG_IF(ERROR, ret != kIOReturnSuccess) << "Anti-flicker control request"
- << " failed (0x" << std::hex << ret << "), unit id: " << real_unit_id;
+ << " failed (0x" << std::hex << ret
+ << "), unit id: " << real_unit_id;
DVLOG_IF(1, ret == kIOReturnSuccess) << "Anti-flicker set to " << frequency
- << "Hz";
+ << "Hz";
(*control_interface)->USBInterfaceClose(control_interface);
}
@@ -281,20 +274,21 @@ static void SetAntiFlickerInUsbDevice(const int vendor_id,
if (frequency == 0)
return;
DVLOG(1) << "Setting Power Line Frequency to " << frequency << " Hz, device "
- << std::hex << vendor_id << "-" << product_id;
+ << std::hex << vendor_id << "-" << product_id;
// Compose a search dictionary with vendor and product ID.
CFMutableDictionaryRef query_dictionary =
IOServiceMatching(kIOUSBDeviceClassName);
- CFDictionarySetValue(query_dictionary, CFSTR(kUSBVendorName),
+ CFDictionarySetValue(
+ query_dictionary, CFSTR(kUSBVendorName),
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vendor_id));
- CFDictionarySetValue(query_dictionary, CFSTR(kUSBProductName),
+ CFDictionarySetValue(
+ query_dictionary, CFSTR(kUSBProductName),
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &product_id));
io_iterator_t usb_iterator;
- kern_return_t kr = IOServiceGetMatchingServices(kIOMasterPortDefault,
- query_dictionary,
- &usb_iterator);
+ kern_return_t kr = IOServiceGetMatchingServices(
+ kIOMasterPortDefault, query_dictionary, &usb_iterator);
if (kr != kIOReturnSuccess) {
DLOG(ERROR) << "No devices found with specified Vendor and Product ID.";
return;
@@ -305,8 +299,8 @@ static void SetAntiFlickerInUsbDevice(const int vendor_id,
base::mac::ScopedIOObject<io_service_t> usb_device_ref(usb_device);
IOUSBDeviceInterface** device_interface = NULL;
- if (!FindDeviceInterfaceInUsbDevice(vendor_id, product_id,
- usb_device, &device_interface)) {
+ if (!FindDeviceInterfaceInUsbDevice(vendor_id, product_id, usb_device,
+ &device_interface)) {
return;
}
base::mac::ScopedIOPluginInterface<IOUSBDeviceInterface>
@@ -314,7 +308,7 @@ static void SetAntiFlickerInUsbDevice(const int vendor_id,
IOCFPlugInInterface** video_control_interface = NULL;
if (!FindVideoControlInterfaceInDeviceInterface(device_interface,
- &video_control_interface)) {
+ &video_control_interface)) {
return;
}
base::mac::ScopedIOPluginInterface<IOCFPlugInInterface>
@@ -352,7 +346,7 @@ VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
weak_factory_(this) {
// Avoid reconfiguring AVFoundation or blacklisted devices.
final_resolution_selected_ = AVFoundationGlue::IsAVFoundationSupported() ||
- device_name.is_blacklisted();
+ device_name.is_blacklisted();
}
VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
@@ -422,7 +416,7 @@ void VideoCaptureDeviceMac::AllocateAndStart(
if (base::HexStringToInt(base::StringPiece(vendor_id), &vendor_id_as_int) &&
base::HexStringToInt(base::StringPiece(model_id), &model_id_as_int)) {
SetAntiFlickerInUsbDevice(vendor_id_as_int, model_id_as_int,
- GetPowerLineFrequencyForLocation());
+ GetPowerLineFrequency(params));
}
}
@@ -465,12 +459,11 @@ bool VideoCaptureDeviceMac::Init(
return true;
}
-void VideoCaptureDeviceMac::ReceiveFrame(
- const uint8* video_frame,
- int video_frame_length,
- const VideoCaptureFormat& frame_format,
- int aspect_numerator,
- int aspect_denominator) {
+void VideoCaptureDeviceMac::ReceiveFrame(const uint8* video_frame,
+ int video_frame_length,
+ const VideoCaptureFormat& frame_format,
+ int aspect_numerator,
+ int aspect_denominator) {
// This method is safe to call from a device capture thread, i.e. any thread
// controlled by QTKit/AVFoundation.
if (!final_resolution_selected_) {
@@ -481,21 +474,22 @@ void VideoCaptureDeviceMac::ReceiveFrame(
// drop down to VGA.
bool change_to_vga = false;
if (frame_format.frame_size.width() <
- capture_format_.frame_size.width() ||
+ capture_format_.frame_size.width() ||
frame_format.frame_size.height() <
- capture_format_.frame_size.height()) {
+ capture_format_.frame_size.height()) {
// These are the default capture settings, not yet configured to match
// |capture_format_|.
DCHECK(frame_format.frame_rate == 0);
- DVLOG(1) << "Switching to VGA because the default resolution is " <<
- frame_format.frame_size.ToString();
+ DVLOG(1) << "Switching to VGA because the default resolution is "
+ << frame_format.frame_size.ToString();
change_to_vga = true;
}
if (capture_format_.frame_size == frame_format.frame_size &&
aspect_numerator != aspect_denominator) {
- DVLOG(1) << "Switching to VGA because HD has nonsquare pixel " <<
- "aspect ratio " << aspect_numerator << ":" << aspect_denominator;
+ DVLOG(1) << "Switching to VGA because HD has nonsquare pixel "
+ << "aspect ratio " << aspect_numerator << ":"
+ << aspect_denominator;
change_to_vga = true;
}
@@ -536,22 +530,18 @@ void VideoCaptureDeviceMac::ReceiveFrame(
capture_format_.frame_size = frame_format.frame_size;
} else if (capture_format_.frame_size != frame_format.frame_size) {
ReceiveError("Captured resolution " + frame_format.frame_size.ToString() +
- ", and expected " + capture_format_.frame_size.ToString());
+ ", and expected " + capture_format_.frame_size.ToString());
return;
}
- client_->OnIncomingCapturedData(video_frame,
- video_frame_length,
- frame_format,
- 0,
- base::TimeTicks::Now());
+ client_->OnIncomingCapturedData(video_frame, video_frame_length, frame_format,
+ 0, base::TimeTicks::Now());
}
void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
task_runner_->PostTask(FROM_HERE,
base::Bind(&VideoCaptureDeviceMac::SetErrorState,
- weak_factory_.GetWeakPtr(),
- reason));
+ weak_factory_.GetWeakPtr(), reason));
}
void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
@@ -576,4 +566,4 @@ bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
return true;
}
-} // namespace media
+} // namespace media
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h b/chromium/media/capture/video/mac/video_capture_device_qtkit_mac.h
index c1af6978488..a96dd6cf091 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.h
+++ b/chromium/media/capture/video/mac/video_capture_device_qtkit_mac.h
@@ -12,7 +12,7 @@
#include <vector>
-#import "media/video/capture/mac/platform_video_capturing_mac.h"
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
namespace media {
class VideoCaptureDeviceMac;
@@ -26,12 +26,12 @@ class VideoCaptureDeviceMac;
// Settings.
float frameRate_;
- NSLock *lock_;
- media::VideoCaptureDeviceMac *frameReceiver_;
+ NSLock* lock_;
+ media::VideoCaptureDeviceMac* frameReceiver_;
// QTKit variables.
- QTCaptureSession *captureSession_;
- QTCaptureDeviceInput *captureDeviceInput_;
+ QTCaptureSession* captureSession_;
+ QTCaptureDeviceInput* captureDeviceInput_;
// Buffer for adjusting frames which do not fit receiver
// assumptions. scoped_array<> might make more sense, if the size
diff --git a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm b/chromium/media/capture/video/mac/video_capture_device_qtkit_mac.mm
index a8492d595c2..1ea4c9e4498 100644
--- a/chromium/media/video/capture/mac/video_capture_device_qtkit_mac.mm
+++ b/chromium/media/capture/video/mac/video_capture_device_qtkit_mac.mm
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#import "media/video/capture/mac/video_capture_device_qtkit_mac.h"
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
#import <QTKit/QTKit.h>
#include "base/debug/crash_logging.h"
#include "base/logging.h"
-#include "base/mac/scoped_nsexception_enabler.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/mac/video_capture_device_mac.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#include "media/capture/video/video_capture_device.h"
#include "ui/gfx/geometry/size.h"
@implementation VideoCaptureDeviceQTKit
@@ -19,25 +18,23 @@
#pragma mark Class methods
+ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
- // Third-party drivers often throw exceptions, which are fatal in
- // Chromium (see comments in scoped_nsexception_enabler.h). The
- // following catches any exceptions and continues in an orderly
- // fashion with no devices detected.
- NSArray* captureDevices =
- base::mac::RunBlockIgnoringExceptions(^{
- return [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
- });
+ // Third-party drivers often throw exceptions. The following catches any
+ // exceptions and continues in an orderly fashion with no devices detected.
+ NSArray* captureDevices = nil;
+ @try {
+ captureDevices =
+ [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+ } @catch (id exception) {
+ }
for (QTCaptureDevice* device in captureDevices) {
if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
continue;
- DeviceNameAndTransportType* nameAndTransportType =
- [[[DeviceNameAndTransportType alloc]
- initWithName:[device localizedDisplayName]
- transportType:media::kIOAudioDeviceTransportTypeUnknown]
- autorelease];
- [deviceNames setObject:nameAndTransportType
- forKey:[device uniqueID]];
+ DeviceNameAndTransportType* nameAndTransportType = [[
+ [DeviceNameAndTransportType alloc]
+ initWithName:[device localizedDisplayName]
+ transportType:media::kIOAudioDeviceTransportTypeUnknown] autorelease];
+ [deviceNames setObject:nameAndTransportType forKey:[device uniqueID]];
}
}
@@ -86,43 +83,46 @@
// TODO(mcasas): Consider using [QTCaptureDevice deviceWithUniqueID] instead
// of explicitly forcing reenumeration of devices.
- NSArray *captureDevices =
+ NSArray* captureDevices =
[QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
- NSArray *captureDevicesNames =
- [captureDevices valueForKey:@"uniqueID"];
+ NSArray* captureDevicesNames = [captureDevices valueForKey:@"uniqueID"];
NSUInteger index = [captureDevicesNames indexOfObject:deviceId];
if (index == NSNotFound) {
- [self sendErrorString:[NSString
- stringWithUTF8String:"Video capture device not found."]];
+ [self sendErrorString:[NSString stringWithUTF8String:
+ "Video capture device not found."]];
return NO;
}
- QTCaptureDevice *device = [captureDevices objectAtIndex:index];
- if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute]
- boolValue]) {
- [self sendErrorString:[NSString
- stringWithUTF8String:"Cannot open suspended video capture device."]];
+ QTCaptureDevice* device = [captureDevices objectAtIndex:index];
+ if ([[device
+ attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue]) {
+ [self sendErrorString:
+ [NSString stringWithUTF8String:
+ "Cannot open suspended video capture device."]];
return NO;
}
- NSError *error;
+ NSError* error;
if (![device open:&error]) {
- [self sendErrorString:[NSString
- stringWithFormat:@"Could not open video capture device (%@): %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ [self sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not open video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
captureDeviceInput_ = [[QTCaptureDeviceInput alloc] initWithDevice:device];
captureSession_ = [[QTCaptureSession alloc] init];
- QTCaptureDecompressedVideoOutput *captureDecompressedOutput =
+ QTCaptureDecompressedVideoOutput* captureDecompressedOutput =
[[[QTCaptureDecompressedVideoOutput alloc] init] autorelease];
[captureDecompressedOutput setDelegate:self];
[captureDecompressedOutput setAutomaticallyDropsLateVideoFrames:YES];
if (![captureSession_ addOutput:captureDecompressedOutput error:&error]) {
- [self sendErrorString:[NSString
- stringWithFormat:@"Could not connect video capture output (%@): %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ [self
+ sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not connect video capture output (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
@@ -131,7 +131,7 @@
base::debug::SetCrashKeyValue("VideoCaptureDeviceQTKit", "OpenedDevice");
// Set the video pixel format to 2VUY (a.k.a UYVY, packed 4:2:2).
- NSDictionary *captureDictionary = [NSDictionary
+ NSDictionary* captureDictionary = [NSDictionary
dictionaryWithObject:
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
@@ -144,7 +144,8 @@
// Being here means stopping a device that never started OK in the first
// place, log it.
[self sendLogString:[NSString
- stringWithUTF8String:"No video capture device set, on removal."]];
+ stringWithUTF8String:
+ "No video capture device set, on removal."]];
return YES;
}
// Tear down input and output, stop the capture and deregister observers.
@@ -161,57 +162,61 @@
width:(int)width
frameRate:(float)frameRate {
if (!captureDeviceInput_) {
- [self sendErrorString:[NSString
- stringWithUTF8String:"No video capture device set."]];
+ [self sendErrorString:
+ [NSString stringWithUTF8String:"No video capture device set."]];
return NO;
}
if ([[captureSession_ outputs] count] != 1) {
[self sendErrorString:[NSString
- stringWithUTF8String:"Video capture capabilities already set."]];
+ stringWithUTF8String:
+ "Video capture capabilities already set."]];
return NO;
}
if (frameRate <= 0.0f) {
- [self sendErrorString:[NSString stringWithUTF8String: "Wrong frame rate."]];
+ [self sendErrorString:[NSString stringWithUTF8String:"Wrong frame rate."]];
return NO;
}
frameRate_ = frameRate;
- QTCaptureDecompressedVideoOutput *output =
+ QTCaptureDecompressedVideoOutput* output =
[[captureSession_ outputs] objectAtIndex:0];
// Set up desired output properties. The old capture dictionary is used to
// retrieve the initial pixel format, which must be maintained.
NSDictionary* videoSettingsDictionary = @{
- (id)kCVPixelBufferWidthKey : @(width),
- (id)kCVPixelBufferHeightKey : @(height),
- (id)kCVPixelBufferPixelFormatTypeKey : [[output pixelBufferAttributes]
+ (id)kCVPixelBufferWidthKey : @(width), (id)
+ kCVPixelBufferHeightKey : @(height), (id)
+ kCVPixelBufferPixelFormatTypeKey : [[output pixelBufferAttributes]
valueForKey:(id)kCVPixelBufferPixelFormatTypeKey]
};
[output setPixelBufferAttributes:videoSettingsDictionary];
- [output setMinimumVideoFrameInterval:(NSTimeInterval)1/frameRate];
+ [output setMinimumVideoFrameInterval:(NSTimeInterval)1 / frameRate];
return YES;
}
- (BOOL)startCapture {
if ([[captureSession_ outputs] count] == 0) {
// Capture properties not set.
- [self sendErrorString:[NSString
- stringWithUTF8String:"Video capture device not initialized."]];
+ [self
+ sendErrorString:[NSString stringWithUTF8String:
+ "Video capture device not initialized."]];
return NO;
}
if ([[captureSession_ inputs] count] == 0) {
- NSError *error;
+ NSError* error;
if (![captureSession_ addInput:captureDeviceInput_ error:&error]) {
- [self sendErrorString:[NSString
- stringWithFormat:@"Could not connect video capture device (%@): %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ [self
+ sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not connect video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
return NO;
}
- NSNotificationCenter * notificationCenter =
+ NSNotificationCenter* notificationCenter =
[NSNotificationCenter defaultCenter];
[notificationCenter addObserver:self
selector:@selector(handleNotification:)
@@ -251,20 +256,20 @@
// |captureOutput| is called by the capture device to deliver a new frame.
- (void)captureOutput:(QTCaptureOutput*)captureOutput
- didOutputVideoFrame:(CVImageBufferRef)videoFrame
- withSampleBuffer:(QTSampleBuffer*)sampleBuffer
- fromConnection:(QTCaptureConnection*)connection {
+ didOutputVideoFrame:(CVImageBufferRef)videoFrame
+ withSampleBuffer:(QTSampleBuffer*)sampleBuffer
+ fromConnection:(QTCaptureConnection*)connection {
[lock_ lock];
- if(!frameReceiver_) {
+ if (!frameReceiver_) {
[lock_ unlock];
return;
}
// Lock the frame and calculate frame size.
const int kLockFlags = 0;
- if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags)
- == kCVReturnSuccess) {
- void *baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
+ if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags) ==
+ kCVReturnSuccess) {
+ void* baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
@@ -291,17 +296,16 @@
for (size_t y = 0; y < frameHeight; ++y) {
memcpy(adjustedAddress + y * expectedBytesPerRow,
- addressToPass + y * bytesPerRow,
- expectedBytesPerRow);
+ addressToPass + y * bytesPerRow, expectedBytesPerRow);
}
addressToPass = adjustedAddress;
frameSize = frameHeight * expectedBytesPerRow;
}
- media::VideoCaptureFormat captureFormat(gfx::Size(frameWidth, frameHeight),
- frameRate_,
- media::PIXEL_FORMAT_UYVY);
+ media::VideoCaptureFormat captureFormat(
+ gfx::Size(frameWidth, frameHeight), frameRate_,
+ media::PIXEL_FORMAT_UYVY);
// The aspect ratio dictionary is often missing, in which case we report
// a pixel aspect ratio of 0:0.
@@ -313,16 +317,16 @@
aspectRatioDict, kCVImageBufferPixelAspectRatioHorizontalSpacingKey);
CFNumberRef aspectDenominatorRef = (CFNumberRef)CFDictionaryGetValue(
aspectRatioDict, kCVImageBufferPixelAspectRatioVerticalSpacingKey);
- DCHECK(aspectNumeratorRef && aspectDenominatorRef) <<
- "Aspect Ratio dictionary missing its entries.";
+ DCHECK(aspectNumeratorRef && aspectDenominatorRef)
+ << "Aspect Ratio dictionary missing its entries.";
CFNumberGetValue(aspectNumeratorRef, kCFNumberIntType, &aspectNumerator);
- CFNumberGetValue(
- aspectDenominatorRef, kCFNumberIntType, &aspectDenominator);
+ CFNumberGetValue(aspectDenominatorRef, kCFNumberIntType,
+ &aspectDenominator);
}
// Deliver the captured video frame.
frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat,
- aspectNumerator, aspectDenominator);
+ aspectNumerator, aspectDenominator);
CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
}
@@ -330,12 +334,11 @@
}
- (void)handleNotification:(NSNotification*)errorNotification {
- NSError * error = (NSError*)[[errorNotification userInfo]
- objectForKey:QTCaptureSessionErrorKey];
- [self sendErrorString:[NSString
- stringWithFormat:@"%@: %@",
- [error localizedDescription],
- [error localizedFailureReason]]];
+ NSError* error = (NSError*)
+ [[errorNotification userInfo] objectForKey:QTCaptureSessionErrorKey];
+ [self sendErrorString:
+ [NSString stringWithFormat:@"%@: %@", [error localizedDescription],
+ [error localizedFailureReason]]];
}
- (void)sendErrorString:(NSString*)error {
diff --git a/chromium/media/video/capture/video_capture_device.cc b/chromium/media/capture/video/video_capture_device.cc
index e4744893a26..01816c22061 100644
--- a/chromium/media/video/capture/video_capture_device.cc
+++ b/chromium/media/capture/video/video_capture_device.cc
@@ -2,35 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
+#include "base/command_line.h"
#include "base/i18n/timezone.h"
#include "base/strings/string_util.h"
+#include "media/base/media_switches.h"
namespace media {
+// TODO(msu.koo): http://crbug.com/532272, remove checking the switch in favour
+// of deferring GetModel() call to the actual VideoCaptureDevice object.
const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
const std::string model_id = GetModel();
if (model_id.empty())
return device_name_;
const std::string suffix = " (" + model_id + ")";
- if (base::EndsWith(device_name_, suffix, true /* case sensitive */))
+ if (base::EndsWith(device_name_, suffix, base::CompareCase::SENSITIVE) ||
+ base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kUseFakeDeviceForMediaStream))
+ // Ignore |model_id| if |kUseFakeDeviceForMediaStream| flag is present.
return device_name_;
return device_name_ + suffix;
}
-VideoCaptureDevice::Name::Name() {}
+VideoCaptureDevice::Name::Name() {
+}
VideoCaptureDevice::Name::Name(const std::string& name, const std::string& id)
- : device_name_(name), unique_id_(id) {}
+ : device_name_(name), unique_id_(id) {
+}
#if defined(OS_LINUX)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
- : device_name_(name),
- unique_id_(id),
- capture_api_class_(api_type) {}
+ : device_name_(name), unique_id_(id), capture_api_class_(api_type) {
+}
#elif defined(OS_WIN)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
@@ -38,7 +46,8 @@ VideoCaptureDevice::Name::Name(const std::string& name,
: device_name_(name),
unique_id_(id),
capture_api_class_(api_type),
- capabilities_id_(id) {}
+ capabilities_id_(id) {
+}
#elif defined(OS_MACOSX)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
@@ -47,7 +56,8 @@ VideoCaptureDevice::Name::Name(const std::string& name,
unique_id_(id),
capture_api_class_(api_type),
transport_type_(OTHER_TRANSPORT),
- is_blacklisted_(false) {}
+ is_blacklisted_(false) {
+}
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
@@ -57,17 +67,18 @@ VideoCaptureDevice::Name::Name(const std::string& name,
unique_id_(id),
capture_api_class_(api_type),
transport_type_(transport_type),
- is_blacklisted_(false) {}
+ is_blacklisted_(false) {
+}
#elif defined(ANDROID)
VideoCaptureDevice::Name::Name(const std::string& name,
const std::string& id,
const CaptureApiType api_type)
- : device_name_(name),
- unique_id_(id),
- capture_api_class_(api_type) {}
+ : device_name_(name), unique_id_(id), capture_api_class_(api_type) {
+}
#endif
-VideoCaptureDevice::Name::~Name() {}
+VideoCaptureDevice::Name::~Name() {
+}
#if defined(OS_LINUX)
const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
@@ -83,7 +94,7 @@ const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
}
#elif defined(OS_WIN)
const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
- switch(capture_api_type()) {
+ switch (capture_api_type()) {
case MEDIA_FOUNDATION:
return "Media Foundation";
case DIRECT_SHOW:
@@ -95,7 +106,7 @@ const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
}
#elif defined(OS_MACOSX)
const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
- switch(capture_api_type()) {
+ switch (capture_api_type()) {
case AVFOUNDATION:
return "AV Foundation";
case QTKIT:
@@ -109,7 +120,7 @@ const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
}
#elif defined(OS_ANDROID)
const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
- switch(capture_api_type()) {
+ switch (capture_api_type()) {
case API1:
return "Camera API1";
case API2_LEGACY:
@@ -128,9 +139,11 @@ const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
}
#endif
-VideoCaptureDevice::Client::Buffer::~Buffer() {}
+VideoCaptureDevice::Client::Buffer::~Buffer() {
+}
-VideoCaptureDevice::~VideoCaptureDevice() {}
+VideoCaptureDevice::~VideoCaptureDevice() {
+}
int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
std::string current_country = base::CountryCodeForCurrentTimezone();
@@ -147,9 +160,20 @@ int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
countries_using_60Hz + arraysize(countries_using_60Hz);
if (std::find(countries_using_60Hz, countries_using_60Hz_end,
current_country) == countries_using_60Hz_end) {
- return kPowerLine50Hz;
+ return static_cast<int>(media::PowerLineFrequency::FREQUENCY_50HZ);
+ }
+ return static_cast<int>(media::PowerLineFrequency::FREQUENCY_60HZ);
+}
+
+int VideoCaptureDevice::GetPowerLineFrequency(
+ const VideoCaptureParams& params) const {
+ switch (params.power_line_frequency) {
+ case media::PowerLineFrequency::FREQUENCY_50HZ: // fall through
+ case media::PowerLineFrequency::FREQUENCY_60HZ:
+ return static_cast<int>(params.power_line_frequency);
+ default:
+ return GetPowerLineFrequencyForLocation();
}
- return kPowerLine60Hz;
}
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device.h b/chromium/media/capture/video/video_capture_device.h
index 739831628ce..336013368fd 100644
--- a/chromium/media/video/capture/video_capture_device.h
+++ b/chromium/media/capture/video/video_capture_device.h
@@ -52,25 +52,13 @@ class MEDIA_EXPORT VideoCaptureDevice {
};
#elif defined(OS_WIN)
// Windows targets Capture Api type: it can only be set on construction.
- enum CaptureApiType {
- MEDIA_FOUNDATION,
- DIRECT_SHOW,
- API_TYPE_UNKNOWN
- };
+ enum CaptureApiType { MEDIA_FOUNDATION, DIRECT_SHOW, API_TYPE_UNKNOWN };
#elif defined(OS_MACOSX)
// Mac targets Capture Api type: it can only be set on construction.
- enum CaptureApiType {
- AVFOUNDATION,
- QTKIT,
- DECKLINK,
- API_TYPE_UNKNOWN
- };
+ enum CaptureApiType { AVFOUNDATION, QTKIT, DECKLINK, API_TYPE_UNKNOWN };
// For AVFoundation Api, identify devices that are built-in or USB.
- enum TransportType {
- USB_OR_BUILT_IN,
- OTHER_TRANSPORT
- };
-#elif defined (OS_ANDROID)
+ enum TransportType { USB_OR_BUILT_IN, OTHER_TRANSPORT };
+#elif defined(OS_ANDROID)
// Android targets Capture Api type: it can only be set on construction.
// Automatically generated enum to interface with Java world.
//
@@ -88,7 +76,8 @@ class MEDIA_EXPORT VideoCaptureDevice {
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
defined(OS_ANDROID)
- Name(const std::string& name, const std::string& id,
+ Name(const std::string& name,
+ const std::string& id,
const CaptureApiType api_type);
#endif
#if defined(OS_MACOSX)
@@ -120,9 +109,7 @@ class MEDIA_EXPORT VideoCaptureDevice {
bool operator==(const Name& other) const {
return other.id() == unique_id_;
}
- bool operator<(const Name& other) const {
- return unique_id_ < other.id();
- }
+ bool operator<(const Name& other) const { return unique_id_ < other.id(); }
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
defined(OS_ANDROID)
@@ -134,20 +121,12 @@ class MEDIA_EXPORT VideoCaptureDevice {
#if defined(OS_WIN)
// Certain devices need an ID different from the |unique_id_| for
// capabilities retrieval.
- const std::string& capabilities_id() const {
- return capabilities_id_;
- }
- void set_capabilities_id(const std::string& id) {
- capabilities_id_ = id;
- }
+ const std::string& capabilities_id() const { return capabilities_id_; }
+ void set_capabilities_id(const std::string& id) { capabilities_id_ = id; }
#endif // if defined(OS_WIN)
#if defined(OS_MACOSX)
- TransportType transport_type() const {
- return transport_type_;
- }
- bool is_blacklisted() const {
- return is_blacklisted_;
- }
+ TransportType transport_type() const { return transport_type_; }
+ bool is_blacklisted() const { return is_blacklisted_; }
void set_is_blacklisted(bool is_blacklisted) {
is_blacklisted_ = is_blacklisted;
}
@@ -162,13 +141,14 @@ class MEDIA_EXPORT VideoCaptureDevice {
// initialized.
class CaptureApiClass {
public:
- CaptureApiClass(): capture_api_type_(API_TYPE_UNKNOWN) {}
+ CaptureApiClass() : capture_api_type_(API_TYPE_UNKNOWN) {}
CaptureApiClass(const CaptureApiType api_type)
: capture_api_type_(api_type) {}
CaptureApiType capture_api_type() const {
DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
return capture_api_type_;
}
+
private:
CaptureApiType capture_api_type_;
};
@@ -190,20 +170,22 @@ class MEDIA_EXPORT VideoCaptureDevice {
// Manages a list of Name entries.
typedef std::list<Name> Names;
- // Interface defining the methods that clients of VideoCapture must have. It
- // is actually two-in-one: clients may implement OnIncomingCapturedData() or
- // ReserveOutputBuffer() + OnIncomingCapturedVideoFrame(), or all of them.
- // All clients must implement OnError().
- class MEDIA_EXPORT Client {
+ // Interface defining the methods that clients of VideoCapture must have. It
+ // is actually two-in-one: clients may implement OnIncomingCapturedData() or
+ // ReserveOutputBuffer() + OnIncomingCapturedVideoFrame(), or all of them.
+ // All clients must implement OnError().
+ class MEDIA_EXPORT Client {
public:
// Memory buffer returned by Client::ReserveOutputBuffer().
class MEDIA_EXPORT Buffer {
public:
virtual ~Buffer() = 0;
virtual int id() const = 0;
- virtual size_t size() const = 0;
- virtual void* data() = 0;
- virtual ClientBuffer AsClientBuffer() = 0;
+ virtual gfx::Size dimensions() const = 0;
+ virtual size_t mapped_size() const = 0;
+ virtual void* data(int plane) = 0;
+ void* data() { return data(0); }
+ virtual ClientBuffer AsClientBuffer(int plane) = 0;
#if defined(OS_POSIX)
virtual base::FileDescriptor AsPlatformFile() = 0;
#endif
@@ -299,13 +281,14 @@ class MEDIA_EXPORT VideoCaptureDevice {
// happens first.
virtual void StopAndDeAllocate() = 0;
+ // Gets the power line frequency, either from the params if specified by the
+ // user or from the current system time zone.
+ int GetPowerLineFrequency(const VideoCaptureParams& params) const;
+
+ private:
// Gets the power line frequency from the current system time zone if this is
// defined, otherwise returns 0.
int GetPowerLineFrequencyForLocation() const;
-
- protected:
- static const int kPowerLine50Hz = 50;
- static const int kPowerLine60Hz = 60;
};
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_factory.cc b/chromium/media/capture/video/video_capture_device_factory.cc
index c87c39a11b5..aa6be6f5ee7 100644
--- a/chromium/media/video/capture/video_capture_device_factory.cc
+++ b/chromium/media/capture/video/video_capture_device_factory.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
#include "base/command_line.h"
#include "media/base/media_switches.h"
-#include "media/video/capture/fake_video_capture_device_factory.h"
-#include "media/video/capture/file_video_capture_device_factory.h"
+#include "media/capture/video/fake_video_capture_device_factory.h"
+#include "media/capture/video/file_video_capture_device_factory.h"
namespace media {
@@ -20,11 +20,11 @@ scoped_ptr<VideoCaptureDeviceFactory> VideoCaptureDeviceFactory::CreateFactory(
// present, otherwise use the normal, platform-dependent, device factory.
if (command_line->HasSwitch(switches::kUseFakeDeviceForMediaStream)) {
if (command_line->HasSwitch(switches::kUseFileForFakeVideoCapture)) {
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- media::FileVideoCaptureDeviceFactory());
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ new media::FileVideoCaptureDeviceFactory());
} else {
- return scoped_ptr<VideoCaptureDeviceFactory>(new
- media::FakeVideoCaptureDeviceFactory());
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ new media::FakeVideoCaptureDeviceFactory());
}
} else {
// |ui_task_runner| is needed for the Linux ChromeOS factory to retrieve
@@ -38,7 +38,8 @@ VideoCaptureDeviceFactory::VideoCaptureDeviceFactory() {
thread_checker_.DetachFromThread();
}
-VideoCaptureDeviceFactory::~VideoCaptureDeviceFactory() {}
+VideoCaptureDeviceFactory::~VideoCaptureDeviceFactory() {
+}
void VideoCaptureDeviceFactory::EnumerateDeviceNames(const base::Callback<
void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) {
@@ -50,7 +51,8 @@ void VideoCaptureDeviceFactory::EnumerateDeviceNames(const base::Callback<
callback.Run(device_names.Pass());
}
-#if !defined(OS_MACOSX) && !defined(OS_LINUX) && !defined(OS_ANDROID) && !defined(OS_WIN)
+#if !defined(OS_MACOSX) && !defined(OS_LINUX) && !defined(OS_ANDROID) && \
+ !defined(OS_WIN)
// static
VideoCaptureDeviceFactory*
VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
diff --git a/chromium/media/video/capture/video_capture_device_factory.h b/chromium/media/capture/video/video_capture_device_factory.h
index 224cdba8949..1cd5d1723b7 100644
--- a/chromium/media/video/capture/video_capture_device_factory.h
+++ b/chromium/media/capture/video/video_capture_device_factory.h
@@ -6,7 +6,7 @@
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
#include "base/threading/thread_checker.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
diff --git a/chromium/media/video/capture/video_capture_device_info.cc b/chromium/media/capture/video/video_capture_device_info.cc
index 79becf3d587..3bb4df2adac 100644
--- a/chromium/media/video/capture/video_capture_device_info.cc
+++ b/chromium/media/capture/video/video_capture_device_info.cc
@@ -2,18 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/video_capture_device_info.h"
+#include "media/capture/video/video_capture_device_info.h"
namespace media {
-VideoCaptureDeviceInfo::VideoCaptureDeviceInfo() {}
+VideoCaptureDeviceInfo::VideoCaptureDeviceInfo() {
+}
VideoCaptureDeviceInfo::VideoCaptureDeviceInfo(
const VideoCaptureDevice::Name& name,
const VideoCaptureFormats& supported_formats)
- : name(name),
- supported_formats(supported_formats) {}
+ : name(name), supported_formats(supported_formats) {
+}
-VideoCaptureDeviceInfo::~VideoCaptureDeviceInfo() {}
+VideoCaptureDeviceInfo::~VideoCaptureDeviceInfo() {
+}
} // namespace media
diff --git a/chromium/media/video/capture/video_capture_device_info.h b/chromium/media/capture/video/video_capture_device_info.h
index d215cf5755b..3cdb1b52655 100644
--- a/chromium/media/video/capture/video_capture_device_info.h
+++ b/chromium/media/capture/video/video_capture_device_info.h
@@ -6,7 +6,7 @@
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
diff --git a/chromium/media/video/capture/video_capture_device_unittest.cc b/chromium/media/capture/video/video_capture_device_unittest.cc
index 0002929a9fc..b5d1dfcdcd9 100644
--- a/chromium/media/video/capture/video_capture_device_unittest.cc
+++ b/chromium/media/capture/video/video_capture_device_unittest.cc
@@ -12,24 +12,24 @@
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/video_capture_device_factory.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
#include "base/win/scoped_com_initializer.h"
-#include "media/video/capture/win/video_capture_device_factory_win.h"
+#include "media/capture/video/win/video_capture_device_factory_win.h"
#endif
#if defined(OS_MACOSX)
#include "media/base/mac/avfoundation_glue.h"
-#include "media/video/capture/mac/video_capture_device_factory_mac.h"
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
#endif
#if defined(OS_ANDROID)
#include "base/android/jni_android.h"
-#include "media/video/capture/android/video_capture_device_android.h"
+#include "media/capture/video/android/video_capture_device_android.h"
#endif
#if defined(OS_MACOSX)
@@ -61,10 +61,8 @@ using ::testing::SaveArg;
namespace media {
namespace {
-static const gfx::Size kCaptureSizes[] = {
- gfx::Size(640, 480),
- gfx::Size(1280, 720)
-};
+static const gfx::Size kCaptureSizes[] = {gfx::Size(640, 480),
+ gfx::Size(1280, 720)};
class MockClient : public VideoCaptureDevice::Client {
public:
@@ -86,7 +84,7 @@ class MockClient : public VideoCaptureDevice::Client {
explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
: main_thread_(base::ThreadTaskRunnerHandle::Get()),
- frame_cb_(frame_cb) {}
+ frame_cb_(frame_cb) {}
void OnIncomingCapturedData(const uint8* data,
int length,
@@ -123,8 +121,8 @@ class MockClient : public VideoCaptureDevice::Client {
base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
};
-class DeviceEnumerationListener :
- public base::RefCounted<DeviceEnumerationListener> {
+class DeviceEnumerationListener
+ : public base::RefCounted<DeviceEnumerationListener> {
public:
MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
void(VideoCaptureDevice::Names* names));
@@ -133,6 +131,7 @@ class DeviceEnumerationListener :
scoped_ptr<VideoCaptureDevice::Names> names) {
OnEnumeratedDevicesCallbackPtr(names.release());
}
+
private:
friend class base::RefCounted<DeviceEnumerationListener>;
virtual ~DeviceEnumerationListener() {}
@@ -140,8 +139,7 @@ class DeviceEnumerationListener :
} // namespace
-class VideoCaptureDeviceTest :
- public testing::TestWithParam<gfx::Size> {
+class VideoCaptureDeviceTest : public testing::TestWithParam<gfx::Size> {
protected:
typedef VideoCaptureDevice::Client Client;
@@ -163,8 +161,8 @@ class VideoCaptureDeviceTest :
#if defined(OS_MACOSX)
AVFoundationGlue::InitializeAVFoundation();
#endif
- EXPECT_CALL(*client_, OnIncomingCapturedYuvData(_,_,_,_,_,_,_,_,_))
- .Times(0);
+ EXPECT_CALL(*client_, OnIncomingCapturedYuvData(_, _, _, _, _, _, _, _, _))
+ .Times(0);
EXPECT_CALL(*client_, DoReserveOutputBuffer()).Times(0);
EXPECT_CALL(*client_, DoOnIncomingCapturedBuffer()).Times(0);
EXPECT_CALL(*client_, DoOnIncomingCapturedVideoFrame()).Times(0);
@@ -209,8 +207,7 @@ class VideoCaptureDeviceTest :
for (const auto& names_iterator : *names_) {
VideoCaptureFormats supported_formats;
video_capture_device_factory_->GetDeviceSupportedFormats(
- names_iterator,
- &supported_formats);
+ names_iterator, &supported_formats);
for (const auto& formats_iterator : supported_formats) {
if (formats_iterator.pixel_format == pixel_format) {
return scoped_ptr<VideoCaptureDevice::Name>(
@@ -218,8 +215,9 @@ class VideoCaptureDeviceTest :
}
}
}
- DVLOG_IF(1, pixel_format != PIXEL_FORMAT_MAX) << "No camera can capture the"
- << " format: " << VideoCaptureFormat::PixelFormatToString(pixel_format);
+ DVLOG_IF(1, pixel_format != PIXEL_FORMAT_MAX)
+ << "No camera can capture the"
+ << " format: " << VideoPixelFormatToString(pixel_format);
return scoped_ptr<VideoCaptureDevice::Name>();
}
@@ -228,11 +226,9 @@ class VideoCaptureDeviceTest :
VideoCaptureFormats supported_formats;
video_capture_device_factory_->GetDeviceSupportedFormats(
device, &supported_formats);
- const auto it =
- std::find_if(supported_formats.begin(), supported_formats.end(),
- [&size](VideoCaptureFormat const& f) {
- return f.frame_size == size;
- });
+ const auto it = std::find_if(
+ supported_formats.begin(), supported_formats.end(),
+ [&size](VideoCaptureFormat const& f) { return f.frame_size == size; });
if (it == supported_formats.end()) {
DVLOG(1) << "Size " << size.ToString() << " is not supported.";
return false;
@@ -267,7 +263,8 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_OpenInvalidDevice) {
: VideoCaptureDevice::Name::DIRECT_SHOW;
VideoCaptureDevice::Name device_name("jibberish", "jibberish", api_type);
#elif defined(OS_MACOSX)
- VideoCaptureDevice::Name device_name("jibberish", "jibberish",
+ VideoCaptureDevice::Name device_name(
+ "jibberish", "jibberish",
VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation()
? VideoCaptureDevice::Name::AVFOUNDATION
: VideoCaptureDevice::Name::QTKIT);
@@ -319,7 +316,8 @@ TEST_P(VideoCaptureDeviceTest, CaptureWithSize) {
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(width, height);
capture_params.requested_format.frame_rate = 30.0f;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.requested_format.pixel_format =
+ PIXEL_FORMAT_I420;
device->AllocateAndStart(capture_params, client_.Pass());
// Get captured video frames.
WaitForCapturedFrame();
@@ -352,7 +350,8 @@ TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
VideoCaptureParams capture_params;
capture_params.requested_format.frame_size.SetSize(637, 472);
capture_params.requested_format.frame_rate = 35;
- capture_params.requested_format.pixel_format = PIXEL_FORMAT_I420;
+ capture_params.requested_format.pixel_format =
+ PIXEL_FORMAT_I420;
device->AllocateAndStart(capture_params, client_.Pass());
WaitForCapturedFrame();
device->StopAndDeAllocate();
@@ -472,7 +471,8 @@ TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
// GetDeviceSupportedFormats().
scoped_ptr<VideoCaptureDevice::Name> name =
GetFirstDeviceNameSupportingPixelFormat(PIXEL_FORMAT_MAX);
- // Verify no camera returned for PIXEL_FORMAT_MAX. Nothing else to test here
+ // Verify no camera returned for PIXEL_FORMAT_MAX. Nothing else
+ // to test here
// since we cannot forecast the hardware capabilities.
ASSERT_FALSE(name);
}
diff --git a/chromium/media/video/capture/win/capability_list_win.cc b/chromium/media/capture/video/win/capability_list_win.cc
index 2de2786e752..32cbb61ca73 100644
--- a/chromium/media/video/capture/win/capability_list_win.cc
+++ b/chromium/media/capture/video/win/capability_list_win.cc
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/capability_list_win.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/win/capability_list_win.h"
#include <algorithm>
#include <functional>
@@ -36,12 +37,13 @@ static bool CompareCapability(const VideoCaptureFormat& requested,
if (diff_fps_lhs != diff_fps_rhs)
return diff_fps_lhs < diff_fps_rhs;
- return lhs.pixel_format < rhs.pixel_format;
+ return VideoCaptureFormat::ComparePixelFormatPreference(lhs.pixel_format,
+ rhs.pixel_format);
}
const CapabilityWin& GetBestMatchedCapability(
- const VideoCaptureFormat& requested,
- const CapabilityList& capabilities) {
+ const VideoCaptureFormat& requested,
+ const CapabilityList& capabilities) {
DCHECK(!capabilities.empty());
const CapabilityWin* best_match = &(*capabilities.begin());
for (const CapabilityWin& capability : capabilities) {
diff --git a/chromium/media/video/capture/win/capability_list_win.h b/chromium/media/capture/video/win/capability_list_win.h
index dc74947629b..47166c1ec40 100644
--- a/chromium/media/video/capture/win/capability_list_win.h
+++ b/chromium/media/capture/video/win/capability_list_win.h
@@ -21,12 +21,12 @@ struct CapabilityWin {
: stream_index(index), supported_format(format), info_header() {}
// Used by VideoCaptureDeviceWin.
- CapabilityWin(int index, const VideoCaptureFormat& format,
- const BITMAPINFOHEADER& info_header)
+ CapabilityWin(int index,
+ const VideoCaptureFormat& format,
+ const BITMAPINFOHEADER& info_header)
: stream_index(index),
supported_format(format),
- info_header(info_header) {
- }
+ info_header(info_header) {}
const int stream_index;
const VideoCaptureFormat supported_format;
diff --git a/chromium/media/video/capture/win/filter_base_win.cc b/chromium/media/capture/video/win/filter_base_win.cc
index d371f8be0bb..166b8600146 100644
--- a/chromium/media/video/capture/win/filter_base_win.cc
+++ b/chromium/media/capture/video/win/filter_base_win.cc
@@ -2,21 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/filter_base_win.h"
+#include "media/capture/video/win/filter_base_win.h"
#pragma comment(lib, "strmiids.lib")
namespace media {
// Implement IEnumPins.
-class PinEnumerator final
- : public IEnumPins,
- public base::RefCounted<PinEnumerator> {
+class PinEnumerator final : public IEnumPins,
+ public base::RefCounted<PinEnumerator> {
public:
- explicit PinEnumerator(FilterBase* filter)
- : filter_(filter),
- index_(0) {
- }
+ explicit PinEnumerator(FilterBase* filter) : filter_(filter), index_(0) {}
// IUnknown implementation.
STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
@@ -48,13 +44,13 @@ class PinEnumerator final
}
if (fetched)
- *fetched = pins_fetched;
+ *fetched = pins_fetched;
return pins_fetched == count ? S_OK : S_FALSE;
}
STDMETHOD(Skip)(ULONG count) override {
- if (filter_->NoOfPins()- index_ > count) {
+ if (filter_->NoOfPins() - index_ > count) {
index_ += count;
return S_OK;
}
@@ -109,7 +105,7 @@ STDMETHODIMP FilterBase::JoinFilterGraph(IFilterGraph* graph, LPCWSTR name) {
return S_OK;
}
-STDMETHODIMP FilterBase::QueryVendorInfo(LPWSTR *pVendorInfo) {
+STDMETHODIMP FilterBase::QueryVendorInfo(LPWSTR* pVendorInfo) {
return S_OK;
}
diff --git a/chromium/media/video/capture/win/filter_base_win.h b/chromium/media/capture/video/win/filter_base_win.h
index 25876edea41..2294742a43b 100644
--- a/chromium/media/video/capture/win/filter_base_win.h
+++ b/chromium/media/capture/video/win/filter_base_win.h
@@ -17,9 +17,7 @@
namespace media {
-class FilterBase
- : public IBaseFilter,
- public base::RefCounted<FilterBase> {
+class FilterBase : public IBaseFilter, public base::RefCounted<FilterBase> {
public:
FilterBase();
diff --git a/chromium/media/video/capture/win/pin_base_win.cc b/chromium/media/capture/video/win/pin_base_win.cc
index 734072588a3..3f521948e20 100644
--- a/chromium/media/video/capture/win/pin_base_win.cc
+++ b/chromium/media/capture/video/win/pin_base_win.cc
@@ -2,21 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/pin_base_win.h"
+#include "media/capture/video/win/pin_base_win.h"
#include "base/logging.h"
namespace media {
// Implement IEnumPins.
-class TypeEnumerator final
- : public IEnumMediaTypes,
- public base::RefCounted<TypeEnumerator> {
+class TypeEnumerator final : public IEnumMediaTypes,
+ public base::RefCounted<TypeEnumerator> {
public:
- explicit TypeEnumerator(PinBase* pin)
- : pin_(pin),
- index_(0) {
- }
+ explicit TypeEnumerator(PinBase* pin) : pin_(pin), index_(0) {}
// Implement from IUnknown.
STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
@@ -44,8 +40,8 @@ class TypeEnumerator final
while (types_fetched < count) {
// Allocate AM_MEDIA_TYPE that we will store the media type in.
- AM_MEDIA_TYPE* type = reinterpret_cast<AM_MEDIA_TYPE*>(CoTaskMemAlloc(
- sizeof(AM_MEDIA_TYPE)));
+ AM_MEDIA_TYPE* type = reinterpret_cast<AM_MEDIA_TYPE*>(
+ CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
if (!type) {
FreeAllocatedMediaTypes(types_fetched, types);
return E_OUTOFMEMORY;
@@ -54,8 +50,8 @@ class TypeEnumerator final
// Allocate a VIDEOINFOHEADER and connect it to the AM_MEDIA_TYPE.
type->cbFormat = sizeof(VIDEOINFOHEADER);
- BYTE *format = reinterpret_cast<BYTE*>(CoTaskMemAlloc(
- sizeof(VIDEOINFOHEADER)));
+ BYTE* format =
+ reinterpret_cast<BYTE*>(CoTaskMemAlloc(sizeof(VIDEOINFOHEADER)));
if (!format) {
CoTaskMemFree(type);
FreeAllocatedMediaTypes(types_fetched, types);
@@ -111,8 +107,7 @@ class TypeEnumerator final
int index_;
};
-PinBase::PinBase(IBaseFilter* owner)
- : owner_(owner) {
+PinBase::PinBase(IBaseFilter* owner) : owner_(owner) {
memset(&current_media_type_, 0, sizeof(current_media_type_));
}
diff --git a/chromium/media/video/capture/win/pin_base_win.h b/chromium/media/capture/video/win/pin_base_win.h
index f531ec27bf7..a3ac0200484 100644
--- a/chromium/media/video/capture/win/pin_base_win.h
+++ b/chromium/media/capture/video/win/pin_base_win.h
@@ -17,10 +17,9 @@
namespace media {
-class PinBase
- : public IPin,
- public IMemInputPin,
- public base::RefCounted<PinBase> {
+class PinBase : public IPin,
+ public IMemInputPin,
+ public base::RefCounted<PinBase> {
public:
explicit PinBase(IBaseFilter* owner);
diff --git a/chromium/media/video/capture/win/sink_filter_observer_win.h b/chromium/media/capture/video/win/sink_filter_observer_win.h
index 13451b47e9d..acfc70f36d8 100644
--- a/chromium/media/video/capture/win/sink_filter_observer_win.h
+++ b/chromium/media/capture/video/win/sink_filter_observer_win.h
@@ -14,7 +14,9 @@ class SinkFilterObserver {
public:
// SinkFilter will call this function with all frames delivered to it.
// buffer in only valid during this function call.
- virtual void FrameReceived(const uint8* buffer, int length) = 0;
+ virtual void FrameReceived(const uint8* buffer, int length,
+ base::TimeTicks timestamp) = 0;
+
protected:
virtual ~SinkFilterObserver();
};
diff --git a/chromium/media/video/capture/win/sink_filter_win.cc b/chromium/media/capture/video/win/sink_filter_win.cc
index 664d25b48d2..90c4ae00626 100644
--- a/chromium/media/video/capture/win/sink_filter_win.cc
+++ b/chromium/media/capture/video/win/sink_filter_win.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/sink_filter_win.h"
+#include "media/capture/video/win/sink_filter_win.h"
#include "base/logging.h"
-#include "media/video/capture/win/sink_input_pin_win.h"
+#include "media/capture/video/win/sink_input_pin_win.h"
namespace media {
@@ -13,21 +13,23 @@ namespace media {
// it is not defined in the DirectShow SDK.
// http://msdn.microsoft.com/en-us/library/dd757532.aspx
// 30323449-0000-0010-8000-00AA00389B71.
-GUID kMediaSubTypeI420 = {
- 0x30323449, 0x0000, 0x0010, { 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71}
-};
+GUID kMediaSubTypeI420 = {0x30323449,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71}};
// UYVY synonym with BT709 color components, used in HD video. This variation
// might appear in non-USB capture cards and it's implemented as a normal YUV
// pixel format with the characters HDYC encoded in the first array word.
-GUID kMediaSubTypeHDYC = {
- 0x43594448, 0x0000, 0x0010, { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-};
+GUID kMediaSubTypeHDYC = {0x43594448,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
-SinkFilterObserver::~SinkFilterObserver() {}
+SinkFilterObserver::~SinkFilterObserver() {
+}
-SinkFilter::SinkFilter(SinkFilterObserver* observer)
- : input_pin_(NULL) {
+SinkFilter::SinkFilter(SinkFilterObserver* observer) : input_pin_(NULL) {
input_pin_ = new SinkInputPin(this, observer);
}
@@ -37,8 +39,8 @@ void SinkFilter::SetRequestedMediaFormat(VideoPixelFormat pixel_format,
input_pin_->SetRequestedMediaFormat(pixel_format, frame_rate, info_header);
}
-const VideoCaptureFormat& SinkFilter::ResultingFormat() {
- return input_pin_->ResultingFormat();
+const VideoCaptureFormat& SinkFilter::ResultingFormat() const {
+ return input_pin_->resulting_format();
}
size_t SinkFilter::NoOfPins() {
diff --git a/chromium/media/video/capture/win/sink_filter_win.h b/chromium/media/capture/video/win/sink_filter_win.h
index df11eba6d71..ec8d6b4283c 100644
--- a/chromium/media/video/capture/win/sink_filter_win.h
+++ b/chromium/media/capture/video/win/sink_filter_win.h
@@ -12,9 +12,9 @@
#include "base/memory/ref_counted.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/win/filter_base_win.h"
-#include "media/video/capture/win/sink_filter_observer_win.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/filter_base_win.h"
+#include "media/capture/video/win/sink_filter_observer_win.h"
namespace media {
@@ -31,8 +31,8 @@ extern GUID kMediaSubTypeHDYC;
class SinkInputPin;
-class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3"))
- SinkFilter : public FilterBase {
+class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3")) SinkFilter
+ : public FilterBase {
public:
explicit SinkFilter(SinkFilterObserver* observer);
@@ -41,7 +41,7 @@ class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3"))
const BITMAPINFOHEADER& info_header);
// Returns the format that is negotiated when this
// filter is connected to a media filter.
- const VideoCaptureFormat& ResultingFormat();
+ const VideoCaptureFormat& ResultingFormat() const;
// Implement FilterBase.
size_t NoOfPins() override;
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.cc b/chromium/media/capture/video/win/sink_input_pin_win.cc
index 0edd46f34f5..91baf2a83b5 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.cc
+++ b/chromium/media/capture/video/win/sink_input_pin_win.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/sink_input_pin_win.h"
+#include "media/capture/video/win/sink_input_pin_win.h"
#include <cstring>
@@ -16,7 +16,6 @@ namespace media {
const REFERENCE_TIME kSecondsToReferenceTime = 10000000;
-
static DWORD GetArea(const BITMAPINFOHEADER& info_header) {
return info_header.biWidth * info_header.biHeight;
}
@@ -37,21 +36,17 @@ void SinkInputPin::SetRequestedMediaFormat(
resulting_format_.pixel_format = PIXEL_FORMAT_UNKNOWN;
}
-const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
- return resulting_format_;
-}
-
bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
- GUID type = media_type->majortype;
+ const GUID type = media_type->majortype;
if (type != MEDIATYPE_Video)
return false;
- GUID format_type = media_type->formattype;
+ const GUID format_type = media_type->formattype;
if (format_type != FORMAT_VideoInfo)
return false;
// Check for the sub types we support.
- GUID sub_type = media_type->subtype;
+ const GUID sub_type = media_type->subtype;
VIDEOINFOHEADER* pvi =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
if (pvi == NULL)
@@ -76,6 +71,12 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
resulting_format_.pixel_format = PIXEL_FORMAT_YUY2;
return true;
}
+ // This format is added after http:/crbug.com/508413.
+ if (sub_type == MEDIASUBTYPE_UYVY &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('U', 'Y', 'V', 'Y')) {
+ resulting_format_.pixel_format = PIXEL_FORMAT_UYVY;
+ return true;
+ }
if (sub_type == MEDIASUBTYPE_MJPG &&
pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
resulting_format_.pixel_format = PIXEL_FORMAT_MJPEG;
@@ -91,6 +92,12 @@ bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
resulting_format_.pixel_format = PIXEL_FORMAT_RGB32;
return true;
}
+
+#ifndef NDEBUG
+ WCHAR guid_str[128];
+ StringFromGUID2(sub_type, guid_str, arraysize(guid_str));
+ DVLOG(2) << __FUNCTION__ << " unsupported media type: " << guid_str;
+#endif
return false;
}
@@ -98,7 +105,7 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
if (media_type->cbFormat < sizeof(VIDEOINFOHEADER))
return false;
- VIDEOINFOHEADER* pvi =
+ VIDEOINFOHEADER* const pvi =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
@@ -106,10 +113,8 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
pvi->bmiHeader.biPlanes = 1;
pvi->bmiHeader.biClrImportant = 0;
pvi->bmiHeader.biClrUsed = 0;
- if (requested_frame_rate_ > 0) {
- pvi->AvgTimePerFrame =
- kSecondsToReferenceTime / requested_frame_rate_;
- }
+ if (requested_frame_rate_ > 0)
+ pvi->AvgTimePerFrame = kSecondsToReferenceTime / requested_frame_rate_;
media_type->majortype = MEDIATYPE_Video;
media_type->formattype = FORMAT_VideoInfo;
@@ -146,6 +151,15 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
break;
}
case 2: {
+ pvi->bmiHeader.biCompression = MAKEFOURCC('U', 'Y', 'V', 'Y');
+ pvi->bmiHeader.biBitCount = 16;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 2;
+ media_type->subtype = MEDIASUBTYPE_UYVY;
+ break;
+ }
+ case 3: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 24;
pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
@@ -154,7 +168,7 @@ bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
media_type->subtype = MEDIASUBTYPE_RGB24;
break;
}
- case 3: {
+ case 4: {
pvi->bmiHeader.biCompression = BI_RGB;
pvi->bmiHeader.biBitCount = 32;
pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
@@ -184,10 +198,21 @@ HRESULT SinkInputPin::Receive(IMediaSample* sample) {
if (FAILED(sample->GetPointer(&buffer)))
return S_FALSE;
- observer_->FrameReceived(buffer, length);
+ REFERENCE_TIME start_time, end_time;
+ base::TimeTicks timestamp;
+ if (SUCCEEDED(sample->GetTime(&start_time, &end_time))) {
+ DCHECK(start_time <= end_time);
+ timestamp += base::TimeDelta::FromMicroseconds(start_time / 10);
+ } else {
+ timestamp = base::TimeTicks::Now();
+ }
+
+
+ observer_->FrameReceived(buffer, length, timestamp);
return S_OK;
}
-SinkInputPin::~SinkInputPin() {}
+SinkInputPin::~SinkInputPin() {
+}
} // namespace media
diff --git a/chromium/media/video/capture/win/sink_input_pin_win.h b/chromium/media/capture/video/win/sink_input_pin_win.h
index 869d001fd5a..2d62783beee 100644
--- a/chromium/media/video/capture/win/sink_input_pin_win.h
+++ b/chromium/media/capture/video/win/sink_input_pin_win.h
@@ -9,9 +9,9 @@
#define MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/win/pin_base_win.h"
-#include "media/video/capture/win/sink_filter_win.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/pin_base_win.h"
+#include "media/capture/video/win/sink_filter_win.h"
namespace media {
@@ -28,7 +28,9 @@ class SinkInputPin : public PinBase {
const BITMAPINFOHEADER& info_header);
// Returns the capability that is negotiated when this
// pin is connected to a media filter.
- const VideoCaptureFormat& ResultingFormat();
+ const VideoCaptureFormat& resulting_format() const {
+ return resulting_format_;
+ }
// Implement PinBase.
bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) override;
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.cc b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
index 99b145117ef..5a626f8e561 100644
--- a/chromium/media/video/capture/win/video_capture_device_factory_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_factory_win.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/video_capture_device_factory_win.h"
+#include "media/capture/video/win/video_capture_device_factory_win.h"
#include <mfapi.h>
#include <mferror.h>
@@ -18,8 +18,8 @@
#include "base/win/windows_version.h"
#include "media/base/media_switches.h"
#include "media/base/win/mf_initializer.h"
-#include "media/video/capture/win/video_capture_device_mf_win.h"
-#include "media/video/capture/win/video_capture_device_win.h"
+#include "media/capture/video/win/video_capture_device_mf_win.h"
+#include "media/capture/video/win/video_capture_device_win.h"
using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
@@ -37,7 +37,7 @@ enum BlacklistedCameraNames {
BLACKLISTED_CAMERA_IP_CAMERA = 1,
BLACKLISTED_CAMERA_CYBERLINK_WEBCAM_SPLITTER = 2,
BLACKLISTED_CAMERA_EPOCCAM = 3,
- // This one must be last, and equal to the previous enumerated value.
+ // This one must be last, and equal to the previous enumerated value.
BLACKLISTED_CAMERA_MAX = BLACKLISTED_CAMERA_EPOCCAM,
};
@@ -45,21 +45,22 @@ enum BlacklistedCameraNames {
// This prefix is used case-insensitively. This list must be kept in sync with
// |BlacklistedCameraNames|.
static const char* const kBlacklistedCameraNames[] = {
- // Name of a fake DirectShow filter on computers with GTalk installed.
- "Google Camera Adapter",
- // The following software WebCams cause crashes.
- "IP Camera [JPEG/MJPEG]",
- "CyberLink Webcam Splitter",
- "EpocCam",
+ // Name of a fake DirectShow filter on computers with GTalk installed.
+ "Google Camera Adapter",
+ // The following software WebCams cause crashes.
+ "IP Camera [JPEG/MJPEG]",
+ "CyberLink Webcam Splitter",
+ "EpocCam",
};
static_assert(arraysize(kBlacklistedCameraNames) == BLACKLISTED_CAMERA_MAX + 1,
- "kBlacklistedCameraNames should be same size as BlacklistedCameraNames enum");
+ "kBlacklistedCameraNames should be same size as "
+ "BlacklistedCameraNames enum");
static bool LoadMediaFoundationDlls() {
static const wchar_t* const kMfDLLs[] = {
- L"%WINDIR%\\system32\\mf.dll",
- L"%WINDIR%\\system32\\mfplat.dll",
- L"%WINDIR%\\system32\\mfreadwrite.dll",
+ L"%WINDIR%\\system32\\mf.dll",
+ L"%WINDIR%\\system32\\mfplat.dll",
+ L"%WINDIR%\\system32\\mfreadwrite.dll",
};
for (int i = 0; i < arraysize(kMfDLLs); ++i) {
@@ -79,8 +80,10 @@ static bool PrepareVideoCaptureAttributesMediaFoundation(
if (FAILED(MFCreateAttributes(attributes, count)))
return false;
- return SUCCEEDED((*attributes)->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
- MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID));
+ return SUCCEEDED(
+ (*attributes)
+ ->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID));
}
static bool CreateVideoCaptureDeviceMediaFoundation(const char* sym_link,
@@ -108,10 +111,11 @@ static bool IsDeviceBlackListed(const std::string& name) {
DCHECK_EQ(BLACKLISTED_CAMERA_MAX + 1,
static_cast<int>(arraysize(kBlacklistedCameraNames)));
for (size_t i = 0; i < arraysize(kBlacklistedCameraNames); ++i) {
- if (base::StartsWithASCII(name, kBlacklistedCameraNames[i], false)) {
+ if (base::StartsWith(name, kBlacklistedCameraNames[i],
+ base::CompareCase::INSENSITIVE_ASCII)) {
DVLOG(1) << "Enumerated blacklisted device: " << name;
- UMA_HISTOGRAM_ENUMERATION("Media.VideoCapture.BlacklistedDevice",
- i, BLACKLISTED_CAMERA_MAX + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCapture.BlacklistedDevice", i,
+ BLACKLISTED_CAMERA_MAX + 1);
return true;
}
}
@@ -123,8 +127,8 @@ static void GetDeviceNamesDirectShow(Names* device_names) {
DVLOG(1) << " GetDeviceNamesDirectShow";
ScopedComPtr<ICreateDevEnum> dev_enum;
- HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
- CLSCTX_INPROC);
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
if (FAILED(hr))
return;
@@ -190,10 +194,10 @@ static void GetDeviceNamesMediaFoundation(Names* device_names) {
MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
&id_size);
if (SUCCEEDED(hr)) {
- device_names->push_back(Name(
- base::SysWideToUTF8(std::wstring(name, name_size)),
- base::SysWideToUTF8(std::wstring(id, id_size)),
- Name::MEDIA_FOUNDATION));
+ device_names->push_back(
+ Name(base::SysWideToUTF8(std::wstring(name, name_size)),
+ base::SysWideToUTF8(std::wstring(id, id_size)),
+ Name::MEDIA_FOUNDATION));
}
}
DLOG_IF(ERROR, FAILED(hr)) << "GetAllocatedString failed: "
@@ -206,8 +210,8 @@ static void GetDeviceSupportedFormatsDirectShow(const Name& device,
VideoCaptureFormats* formats) {
DVLOG(1) << "GetDeviceSupportedFormatsDirectShow for " << device.name();
ScopedComPtr<ICreateDevEnum> dev_enum;
- HRESULT hr = dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL,
- CLSCTX_INPROC);
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
if (FAILED(hr))
return;
@@ -224,7 +228,6 @@ static void GetDeviceSupportedFormatsDirectShow(const Name& device,
// VFW devices are already skipped previously in GetDeviceNames() enumeration.
base::win::ScopedComPtr<IBaseFilter> capture_filter;
hr = VideoCaptureDeviceWin::GetDeviceFilter(device.capabilities_id(),
- CLSID_VideoInputDeviceCategory,
capture_filter.Receive());
if (!capture_filter.get()) {
DLOG(ERROR) << "Failed to create capture filter: "
@@ -278,12 +281,12 @@ static void GetDeviceSupportedFormatsDirectShow(const Name& device,
continue;
VIDEOINFOHEADER* h =
reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
- format.frame_size.SetSize(h->bmiHeader.biWidth,
- h->bmiHeader.biHeight);
+ format.frame_size.SetSize(h->bmiHeader.biWidth, h->bmiHeader.biHeight);
// Trust the frame rate from the VIDEOINFOHEADER.
- format.frame_rate = (h->AvgTimePerFrame > 0) ?
- kSecondsToReferenceTime / static_cast<float>(h->AvgTimePerFrame) :
- 0.0f;
+ format.frame_rate =
+ (h->AvgTimePerFrame > 0)
+ ? kSecondsToReferenceTime / static_cast<float>(h->AvgTimePerFrame)
+ : 0.0f;
formats->push_back(format);
DVLOG(1) << device.name() << " " << VideoCaptureFormat::ToString(format);
}
@@ -311,8 +314,8 @@ static void GetDeviceSupportedFormatsMediaFoundation(
DWORD stream_index = 0;
ScopedComPtr<IMFMediaType> type;
- while (SUCCEEDED(reader->GetNativeMediaType(
- kFirstVideoStream, stream_index, type.Receive()))) {
+ while (SUCCEEDED(reader->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive()))) {
UINT32 width, height;
hr = MFGetAttributeSize(type.get(), MF_MT_FRAME_SIZE, &width, &height);
if (FAILED(hr)) {
@@ -331,14 +334,13 @@ static void GetDeviceSupportedFormatsMediaFoundation(
<< logging::SystemErrorCodeToString(hr);
return;
}
- capture_format.frame_rate = denominator
- ? static_cast<float>(numerator) / denominator : 0.0f;
+ capture_format.frame_rate =
+ denominator ? static_cast<float>(numerator) / denominator : 0.0f;
GUID type_guid;
hr = type->GetGUID(MF_MT_SUBTYPE, &type_guid);
if (FAILED(hr)) {
- DLOG(ERROR) << "GetGUID failed: "
- << logging::SystemErrorCodeToString(hr);
+ DLOG(ERROR) << "GetGUID failed: " << logging::SystemErrorCodeToString(hr);
return;
}
VideoCaptureDeviceMFWin::FormatFromGuid(type_guid,
@@ -374,10 +376,11 @@ VideoCaptureDeviceFactoryWin::VideoCaptureDeviceFactoryWin() {
// can also be forced if appropriate flag is set and we are in Windows 7 or
// 8 in non-Metro mode.
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
- use_media_foundation_ = (base::win::IsMetroProcess() &&
- !cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) ||
- (base::win::GetVersion() >= base::win::VERSION_WIN7 &&
- cmd_line->HasSwitch(switches::kForceMediaFoundationVideoCapture));
+ use_media_foundation_ =
+ (base::win::IsMetroProcess() &&
+ !cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) ||
+ (base::win::GetVersion() >= base::win::VERSION_WIN7 &&
+ cmd_line->HasSwitch(switches::kForceMediaFoundationVideoCapture));
}
scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
@@ -407,11 +410,10 @@ scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
void VideoCaptureDeviceFactoryWin::GetDeviceNames(Names* device_names) {
DCHECK(thread_checker_.CalledOnValidThread());
- if (use_media_foundation_) {
+ if (use_media_foundation_)
GetDeviceNamesMediaFoundation(device_names);
- } else {
+ else
GetDeviceNamesDirectShow(device_names);
- }
}
void VideoCaptureDeviceFactoryWin::GetDeviceSupportedFormats(
diff --git a/chromium/media/video/capture/win/video_capture_device_factory_win.h b/chromium/media/capture/video/win/video_capture_device_factory_win.h
index f76fe63db08..151292554de 100644
--- a/chromium/media/video/capture/win/video_capture_device_factory_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_factory_win.h
@@ -7,14 +7,14 @@
#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
-#include "media/video/capture/video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device_factory.h"
namespace media {
// Extension of VideoCaptureDeviceFactory to create and manipulate Windows
// devices, via either DirectShow or MediaFoundation APIs.
-class MEDIA_EXPORT VideoCaptureDeviceFactoryWin :
- public VideoCaptureDeviceFactory {
+class MEDIA_EXPORT VideoCaptureDeviceFactoryWin
+ : public VideoCaptureDeviceFactory {
public:
static bool PlatformSupportsMediaFoundation();
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.cc b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
index 95ade7161e4..c55cf4aa0be 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/video_capture_device_mf_win.h"
+#include "media/capture/video/win/video_capture_device_mf_win.h"
#include <mfapi.h>
#include <mferror.h>
@@ -13,7 +13,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/windows_version.h"
-#include "media/video/capture/win/capability_list_win.h"
+#include "media/capture/video/win/capability_list_win.h"
using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
@@ -37,7 +37,7 @@ static bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
static bool GetFrameRate(IMFMediaType* type, float* frame_rate) {
UINT32 numerator, denominator;
if (FAILED(MFGetAttributeRatio(type, MF_MT_FRAME_RATE, &numerator,
- &denominator))||
+ &denominator)) ||
!denominator) {
return false;
}
@@ -78,18 +78,14 @@ HRESULT FillCapabilities(IMFSourceReader* source,
return (hr == MF_E_NO_MORE_TYPES) ? S_OK : hr;
}
-
class MFReaderCallback final
: public base::RefCountedThreadSafe<MFReaderCallback>,
public IMFSourceReaderCallback {
public:
MFReaderCallback(VideoCaptureDeviceMFWin* observer)
- : observer_(observer), wait_event_(NULL) {
- }
+ : observer_(observer), wait_event_(NULL) {}
- void SetSignalOnFlush(base::WaitableEvent* event) {
- wait_event_ = event;
- }
+ void SetSignalOnFlush(base::WaitableEvent* event) { wait_event_ = event; }
STDMETHOD(QueryInterface)(REFIID riid, void** object) override {
if (riid != IID_IUnknown && riid != IID_IMFSourceReaderCallback)
@@ -165,13 +161,13 @@ bool VideoCaptureDeviceMFWin::FormatFromGuid(const GUID& guid,
const GUID& guid;
const VideoPixelFormat format;
} static const kFormatMap[] = {
- { MFVideoFormat_I420, PIXEL_FORMAT_I420 },
- { MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2 },
- { MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY },
- { MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24 },
- { MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB },
- { MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG },
- { MFVideoFormat_YV12, PIXEL_FORMAT_YV12 },
+ {MFVideoFormat_I420, PIXEL_FORMAT_I420},
+ {MFVideoFormat_YUY2, PIXEL_FORMAT_YUY2},
+ {MFVideoFormat_UYVY, PIXEL_FORMAT_UYVY},
+ {MFVideoFormat_RGB24, PIXEL_FORMAT_RGB24},
+ {MFVideoFormat_ARGB32, PIXEL_FORMAT_ARGB},
+ {MFVideoFormat_MJPG, PIXEL_FORMAT_MJPEG},
+ {MFVideoFormat_YV12, PIXEL_FORMAT_YV12},
};
for (int i = 0; i < arraysize(kFormatMap); ++i) {
@@ -252,8 +248,8 @@ void VideoCaptureDeviceMFWin::AllocateAndStart(
if (SUCCEEDED(hr)) {
hr = reader_->SetCurrentMediaType(kFirstVideoStream, NULL, type.get());
if (SUCCEEDED(hr)) {
- hr = reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL,
- NULL);
+ hr =
+ reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL, NULL);
if (SUCCEEDED(hr)) {
capture_format_ = found_capability.supported_format;
capture_ = true;
@@ -277,8 +273,8 @@ void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
if (capture_) {
capture_ = false;
callback_->SetSignalOnFlush(&flushed);
- wait = SUCCEEDED(reader_->Flush(
- static_cast<DWORD>(MF_SOURCE_READER_ALL_STREAMS)));
+ wait = SUCCEEDED(
+ reader_->Flush(static_cast<DWORD>(MF_SOURCE_READER_ALL_STREAMS)));
if (!wait) {
callback_->SetSignalOnFlush(NULL);
}
@@ -302,8 +298,8 @@ void VideoCaptureDeviceMFWin::OnIncomingCapturedData(
const base::TimeTicks& time_stamp) {
base::AutoLock lock(lock_);
if (data && client_.get()) {
- client_->OnIncomingCapturedData(
- data, length, capture_format_, rotation, time_stamp);
+ client_->OnIncomingCapturedData(data, length, capture_format_, rotation,
+ time_stamp);
}
if (capture_) {
diff --git a/chromium/media/video/capture/win/video_capture_device_mf_win.h b/chromium/media/capture/video/win/video_capture_device_mf_win.h
index eeb7edf8a4c..7894864d48b 100644
--- a/chromium/media/video/capture/win/video_capture_device_mf_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_mf_win.h
@@ -18,7 +18,7 @@
#include "base/threading/non_thread_safe.h"
#include "base/win/scoped_comptr.h"
#include "media/base/media_export.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/video/video_capture_device.h"
interface IMFSourceReader;
@@ -29,9 +29,8 @@ class MFReaderCallback;
const DWORD kFirstVideoStream =
static_cast<DWORD>(MF_SOURCE_READER_FIRST_VIDEO_STREAM);
-class MEDIA_EXPORT VideoCaptureDeviceMFWin
- : public base::NonThreadSafe,
- public VideoCaptureDevice {
+class MEDIA_EXPORT VideoCaptureDeviceMFWin : public base::NonThreadSafe,
+ public VideoCaptureDevice {
public:
static bool FormatFromGuid(const GUID& guid, VideoPixelFormat* format);
diff --git a/chromium/media/video/capture/win/video_capture_device_win.cc b/chromium/media/capture/video/win/video_capture_device_win.cc
index 320cba2a72f..055fc2105f1 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.cc
+++ b/chromium/media/capture/video/win/video_capture_device_win.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/video/capture/win/video_capture_device_win.h"
+#include "media/capture/video/win/video_capture_device_win.h"
#include <ks.h>
#include <ksmedia.h>
@@ -13,7 +13,6 @@
#include "base/strings/sys_string_conversions.h"
#include "base/win/scoped_co_mem.h"
#include "base/win/scoped_variant.h"
-#include "media/video/capture/win/video_capture_device_mf_win.h"
using base::win::ScopedCoMem;
using base::win::ScopedComPtr;
@@ -43,17 +42,13 @@ bool PinMatchesCategory(IPin* pin, REFGUID category) {
bool PinMatchesMajorType(IPin* pin, REFGUID major_type) {
DCHECK(pin);
AM_MEDIA_TYPE connection_media_type;
- HRESULT hr = pin->ConnectionMediaType(&connection_media_type);
+ const HRESULT hr = pin->ConnectionMediaType(&connection_media_type);
return SUCCEEDED(hr) && connection_media_type.majortype == major_type;
}
// Finds and creates a DirectShow Video Capture filter matching the |device_id|.
-// |class_id| is usually CLSID_VideoInputDeviceCategory for standard DirectShow
-// devices but might also be AM_KSCATEGORY_CAPTURE or AM_KSCATEGORY_CROSSBAR, to
-// enumerate WDM capture devices or WDM crossbars, respectively.
// static
HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
- const CLSID device_class_id,
IBaseFilter** filter) {
DCHECK(filter);
@@ -64,37 +59,36 @@ HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
return hr;
ScopedComPtr<IEnumMoniker> enum_moniker;
- hr = dev_enum->CreateClassEnumerator(device_class_id, enum_moniker.Receive(),
- 0);
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
// CreateClassEnumerator returns S_FALSE on some Windows OS
// when no camera exist. Therefore the FAILED macro can't be used.
if (hr != S_OK)
- return NULL;
+ return hr;
- ScopedComPtr<IMoniker> moniker;
ScopedComPtr<IBaseFilter> capture_filter;
- DWORD fetched = 0;
- while (enum_moniker->Next(1, moniker.Receive(), &fetched) == S_OK) {
+ for (ScopedComPtr<IMoniker> moniker;
+ enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK;
+ moniker.Release()) {
ScopedComPtr<IPropertyBag> prop_bag;
hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
- if (FAILED(hr)) {
- moniker.Release();
+ if (FAILED(hr))
continue;
- }
- // Find the device via DevicePath, Description or FriendlyName, whichever is
- // available first.
+ // Find |device_id| via DevicePath, Description or FriendlyName, whichever
+ // is available first and is a VT_BSTR (i.e. String) type.
static const wchar_t* kPropertyNames[] = {
L"DevicePath", L"Description", L"FriendlyName"};
ScopedVariant name;
for (const auto* property_name : kPropertyNames) {
- if (name.type() != VT_BSTR)
- prop_bag->Read(property_name, name.Receive(), 0);
+ prop_bag->Read(property_name, name.Receive(), 0);
+ if (name.type() == VT_BSTR)
+ break;
}
if (name.type() == VT_BSTR) {
- std::string device_path(base::SysWideToUTF8(V_BSTR(name.ptr())));
+ const std::string device_path(base::SysWideToUTF8(V_BSTR(name.ptr())));
if (device_path.compare(device_id) == 0) {
// We have found the requested device
hr = moniker->BindToObject(0, 0, IID_IBaseFilter,
@@ -104,7 +98,6 @@ HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
break;
}
}
- moniker.Release();
}
*filter = capture_filter.Detach();
@@ -147,12 +140,13 @@ ScopedComPtr<IPin> VideoCaptureDeviceWin::GetPin(IBaseFilter* filter,
}
// static
-VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+VideoPixelFormat
+VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
const GUID& sub_type) {
static struct {
const GUID& sub_type;
VideoPixelFormat format;
- } pixel_formats[] = {
+ } const kMediaSubtypeToPixelFormatCorrespondence[] = {
{kMediaSubTypeI420, PIXEL_FORMAT_I420},
{MEDIASUBTYPE_IYUV, PIXEL_FORMAT_I420},
{MEDIASUBTYPE_RGB24, PIXEL_FORMAT_RGB24},
@@ -162,9 +156,9 @@ VideoPixelFormat VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
{MEDIASUBTYPE_ARGB32, PIXEL_FORMAT_ARGB},
{kMediaSubTypeHDYC, PIXEL_FORMAT_UYVY},
};
- for (size_t i = 0; i < arraysize(pixel_formats); ++i) {
- if (sub_type == pixel_formats[i].sub_type)
- return pixel_formats[i].format;
+ for (const auto& pixel_format : kMediaSubtypeToPixelFormatCorrespondence) {
+ if (sub_type == pixel_format.sub_type)
+ return pixel_format.format;
}
#ifndef NDEBUG
WCHAR guid_str[128];
@@ -215,11 +209,12 @@ void VideoCaptureDeviceWin::ScopedMediaType::DeleteMediaType(
VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
: device_name_(device_name), state_(kIdle) {
- DetachFromThread();
+ // TODO(mcasas): Check that CoInitializeEx() has been called with the
+ // appropriate Apartment model, i.e., Single Threaded.
}
VideoCaptureDeviceWin::~VideoCaptureDeviceWin() {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (media_control_.get())
media_control_->Stop();
@@ -238,11 +233,10 @@ VideoCaptureDeviceWin::~VideoCaptureDeviceWin() {
}
bool VideoCaptureDeviceWin::Init() {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
HRESULT hr;
- hr = GetDeviceFilter(device_name_.id(), CLSID_VideoInputDeviceCategory,
- capture_filter_.Receive());
+ hr = GetDeviceFilter(device_name_.id(), capture_filter_.Receive());
if (!capture_filter_.get()) {
DLOG(ERROR) << "Failed to create capture filter: "
@@ -260,7 +254,7 @@ bool VideoCaptureDeviceWin::Init() {
// Create the sink filter used for receiving Captured frames.
sink_filter_ = new SinkFilter(this);
if (sink_filter_.get() == NULL) {
- DLOG(ERROR) << "Failed to create send filter";
+ DLOG(ERROR) << "Failed to create sink filter";
return false;
}
@@ -305,7 +299,7 @@ bool VideoCaptureDeviceWin::Init() {
hr = graph_builder_->AddFilter(sink_filter_.get(), NULL);
if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to add the send filter to the graph: "
+ DLOG(ERROR) << "Failed to add the sink filter to the graph: "
<< logging::SystemErrorCodeToString(hr);
return false;
}
@@ -335,7 +329,7 @@ bool VideoCaptureDeviceWin::Init() {
void VideoCaptureDeviceWin::AllocateAndStart(
const VideoCaptureParams& params,
scoped_ptr<VideoCaptureDevice::Client> client) {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (state_ != kIdle)
return;
@@ -347,8 +341,9 @@ void VideoCaptureDeviceWin::AllocateAndStart(
// Reduce the frame rate if the requested frame rate is lower
// than the capability.
- float frame_rate = std::min(found_capability.supported_format.frame_rate,
- params.requested_format.frame_rate);
+ const float frame_rate =
+ std::min(params.requested_format.frame_rate,
+ found_capability.supported_format.frame_rate);
ScopedComPtr<IAMStreamConfig> stream_config;
HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
@@ -394,7 +389,7 @@ void VideoCaptureDeviceWin::AllocateAndStart(
return;
}
- SetAntiFlickerInCaptureFilter();
+ SetAntiFlickerInCaptureFilter(params);
if (media_type->subtype == kMediaSubTypeHDYC) {
// HDYC pixel format, used by the DeckLink capture card, needs an AVI
@@ -414,8 +409,7 @@ void VideoCaptureDeviceWin::AllocateAndStart(
hr = media_control_->Pause();
if (FAILED(hr)) {
SetErrorState(
- "Failed to Pause the Capture device. "
- "Is it already occupied?");
+ "Failed to pause the Capture device, is it already occupied?");
return;
}
@@ -434,7 +428,7 @@ void VideoCaptureDeviceWin::AllocateAndStart(
}
void VideoCaptureDeviceWin::StopAndDeAllocate() {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
if (state_ != kCapturing)
return;
@@ -452,13 +446,16 @@ void VideoCaptureDeviceWin::StopAndDeAllocate() {
}
// Implements SinkFilterObserver::SinkFilterObserver.
-void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer, int length) {
+void VideoCaptureDeviceWin::FrameReceived(
+ const uint8* buffer,
+ int length,
+ base::TimeTicks timestamp) {
client_->OnIncomingCapturedData(buffer, length, capture_format_, 0,
- base::TimeTicks::Now());
+ timestamp);
}
bool VideoCaptureDeviceWin::CreateCapabilityMap() {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
ScopedComPtr<IAMStreamConfig> stream_config;
HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
if (FAILED(hr)) {
@@ -540,10 +537,13 @@ bool VideoCaptureDeviceWin::CreateCapabilityMap() {
}
// Set the power line frequency removal in |capture_filter_| if available.
-void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
- const int power_line_frequency = GetPowerLineFrequencyForLocation();
- if (power_line_frequency != kPowerLine50Hz &&
- power_line_frequency != kPowerLine60Hz) {
+void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter(
+ const VideoCaptureParams& params) {
+ const int power_line_frequency = GetPowerLineFrequency(params);
+ if (power_line_frequency !=
+ static_cast<int>(media::PowerLineFrequency::FREQUENCY_50HZ) &&
+ power_line_frequency !=
+ static_cast<int>(media::PowerLineFrequency::FREQUENCY_60HZ)) {
return;
}
ScopedComPtr<IKsPropertySet> ks_propset;
@@ -559,7 +559,10 @@ void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
data.Property.Set = PROPSETID_VIDCAP_VIDEOPROCAMP;
data.Property.Id = KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY;
data.Property.Flags = KSPROPERTY_TYPE_SET;
- data.Value = (power_line_frequency == kPowerLine50Hz) ? 1 : 2;
+ data.Value = (power_line_frequency ==
+ static_cast<int>(media::PowerLineFrequency::FREQUENCY_50HZ))
+ ? 1
+ : 2;
data.Flags = KSPROPERTY_VIDEOPROCAMP_FLAGS_MANUAL;
hr = ks_propset->Set(PROPSETID_VIDCAP_VIDEOPROCAMP,
KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY, &data,
@@ -573,7 +576,7 @@ void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
}
void VideoCaptureDeviceWin::SetErrorState(const std::string& reason) {
- DCHECK(CalledOnValidThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
state_ = kError;
client_->OnError(reason);
}
diff --git a/chromium/media/video/capture/win/video_capture_device_win.h b/chromium/media/capture/video/win/video_capture_device_win.h
index 9315754f687..dec02c1e0f6 100644
--- a/chromium/media/video/capture/win/video_capture_device_win.h
+++ b/chromium/media/capture/video/win/video_capture_device_win.h
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Windows specific implementation of VideoCaptureDevice.
-// DirectShow is used for capturing. DirectShow provide its own threads
-// for capturing.
+// Windows specific implementation of VideoCaptureDevice. DirectShow is used for
+// capturing. DirectShow provide its own threads for capturing.
#ifndef MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_WIN_H_
#define MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_WIN_H_
@@ -16,22 +15,19 @@
#include <map>
#include <string>
-#include "base/threading/non_thread_safe.h"
-#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
#include "base/win/scoped_comptr.h"
#include "media/base/video_capture_types.h"
-#include "media/video/capture/video_capture_device.h"
-#include "media/video/capture/win/capability_list_win.h"
-#include "media/video/capture/win/sink_filter_win.h"
-#include "media/video/capture/win/sink_input_pin_win.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/capability_list_win.h"
+#include "media/capture/video/win/sink_filter_win.h"
+#include "media/capture/video/win/sink_input_pin_win.h"
namespace media {
// All the methods in the class can only be run on a COM initialized thread.
-class VideoCaptureDeviceWin
- : public base::NonThreadSafe,
- public VideoCaptureDevice,
- public SinkFilterObserver {
+class VideoCaptureDeviceWin : public VideoCaptureDevice,
+ public SinkFilterObserver {
public:
// A utility class that wraps the AM_MEDIA_TYPE type and guarantees that
// we free the structure when exiting the scope. DCHECKing is also done to
@@ -54,7 +50,6 @@ class VideoCaptureDeviceWin
};
static HRESULT GetDeviceFilter(const std::string& device_id,
- const CLSID device_class_id,
IBaseFilter** filter);
static base::win::ScopedComPtr<IPin> GetPin(IBaseFilter* filter,
PIN_DIRECTION pin_dir,
@@ -75,20 +70,21 @@ class VideoCaptureDeviceWin
private:
enum InternalState {
- kIdle, // The device driver is opened but camera is not in use.
+ kIdle, // The device driver is opened but camera is not in use.
kCapturing, // Video is being captured.
- kError // Error accessing HW functions.
- // User needs to recover by destroying the object.
+ kError // Error accessing HW functions.
+ // User needs to recover by destroying the object.
};
// Implements SinkFilterObserver.
- void FrameReceived(const uint8* buffer, int length) override;
+ void FrameReceived(const uint8* buffer, int length,
+ base::TimeTicks timestamp) override;
bool CreateCapabilityMap();
- void SetAntiFlickerInCaptureFilter();
+ void SetAntiFlickerInCaptureFilter(const VideoCaptureParams& params);
void SetErrorState(const std::string& reason);
- Name device_name_;
+ const Name device_name_;
InternalState state_;
scoped_ptr<VideoCaptureDevice::Client> client_;
@@ -107,6 +103,8 @@ class VideoCaptureDeviceWin
CapabilityList capabilities_;
VideoCaptureFormat capture_format_;
+ base::ThreadChecker thread_checker_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceWin);
};
diff --git a/chromium/media/capture/webm_muxer.cc b/chromium/media/capture/webm_muxer.cc
new file mode 100644
index 00000000000..c59bbae8052
--- /dev/null
+++ b/chromium/media/capture/webm_muxer.cc
@@ -0,0 +1,128 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/webm_muxer.h"
+
+#include "base/bind.h"
+#include "media/base/limits.h"
+#include "media/base/video_frame.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace media {
+
+static double GetFrameRate(const scoped_refptr<VideoFrame>& video_frame) {
+ const double kZeroFrameRate = 0.0;
+ const double kDefaultFrameRate = 30.0;
+
+ double frame_rate = kDefaultFrameRate;
+ if (!video_frame->metadata()->GetDouble(
+ VideoFrameMetadata::FRAME_RATE, &frame_rate) ||
+ frame_rate <= kZeroFrameRate ||
+ frame_rate > media::limits::kMaxFramesPerSecond) {
+ frame_rate = kDefaultFrameRate;
+ }
+ return frame_rate;
+}
+
+WebmMuxer::WebmMuxer(const WriteDataCB& write_data_callback)
+ : track_index_(0),
+ write_data_callback_(write_data_callback),
+ position_(0) {
+ DCHECK(!write_data_callback_.is_null());
+ // Creation is done on a different thread than main activities.
+ thread_checker_.DetachFromThread();
+}
+
+WebmMuxer::~WebmMuxer() {
+ // No need to segment_.Finalize() since is not Seekable(), i.e. a live
+ // stream, but is a good practice.
+ DCHECK(thread_checker_.CalledOnValidThread());
+ segment_.Finalize();
+}
+
+void WebmMuxer::OnEncodedVideo(const scoped_refptr<VideoFrame>& video_frame,
+ scoped_ptr<std::string> encoded_data,
+ base::TimeTicks timestamp,
+ bool is_key_frame) {
+ DVLOG(1) << __FUNCTION__ << " - " << encoded_data->size() << "B";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!track_index_) {
+ // |track_index_|, cannot be zero (!), initialize WebmMuxer in that case.
+ // http://www.matroska.org/technical/specs/index.html#Tracks
+ AddVideoTrack(video_frame->visible_rect().size(),
+ GetFrameRate(video_frame));
+ first_frame_timestamp_ = timestamp;
+ }
+ segment_.AddFrame(reinterpret_cast<const uint8_t*>(encoded_data->data()),
+ encoded_data->size(),
+ track_index_,
+ (timestamp - first_frame_timestamp_).InMicroseconds() *
+ base::Time::kNanosecondsPerMicrosecond,
+ is_key_frame);
+}
+
+void WebmMuxer::AddVideoTrack(const gfx::Size& frame_size, double frame_rate) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(track_index_, 0u) << "WebmMuxer can only be initialised once.";
+
+ segment_.Init(this);
+ segment_.set_mode(mkvmuxer::Segment::kLive);
+ segment_.OutputCues(false);
+
+ mkvmuxer::SegmentInfo* const info = segment_.GetSegmentInfo();
+ info->set_writing_app("Chrome");
+ info->set_muxing_app("Chrome");
+
+ track_index_ =
+ segment_.AddVideoTrack(frame_size.width(), frame_size.height(), 0);
+ DCHECK_GT(track_index_, 0u);
+
+ mkvmuxer::VideoTrack* const video_track =
+ reinterpret_cast<mkvmuxer::VideoTrack*>(
+ segment_.GetTrackByNumber(track_index_));
+ DCHECK(video_track);
+ video_track->set_codec_id(mkvmuxer::Tracks::kVp8CodecId);
+ DCHECK_EQ(video_track->crop_right(), 0ull);
+ DCHECK_EQ(video_track->crop_left(), 0ull);
+ DCHECK_EQ(video_track->crop_top(), 0ull);
+ DCHECK_EQ(video_track->crop_bottom(), 0ull);
+
+ video_track->set_frame_rate(frame_rate);
+ video_track->set_default_duration(base::Time::kNanosecondsPerSecond /
+ frame_rate);
+ // Segment's timestamps should be in milliseconds, DCHECK it. See
+ // http://www.webmproject.org/docs/container/#muxer-guidelines
+ DCHECK_EQ(segment_.GetSegmentInfo()->timecode_scale(), 1000000ull);
+}
+
+mkvmuxer::int32 WebmMuxer::Write(const void* buf, mkvmuxer::uint32 len) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(buf);
+ write_data_callback_.Run(base::StringPiece(reinterpret_cast<const char*>(buf),
+ len));
+ position_ += len;
+ return 0;
+}
+
+mkvmuxer::int64 WebmMuxer::Position() const {
+ return position_.ValueOrDie();
+}
+
+mkvmuxer::int32 WebmMuxer::Position(mkvmuxer::int64 position) {
+ // The stream is not Seekable() so indicate we cannot set the position.
+ return -1;
+}
+
+bool WebmMuxer::Seekable() const {
+ return false;
+}
+
+void WebmMuxer::ElementStartNotify(mkvmuxer::uint64 element_id,
+ mkvmuxer::int64 position) {
+ // This method gets pinged before items are sent to |write_data_callback_|.
+ DCHECK_GE(position, position_.ValueOrDefault(0))
+ << "Can't go back in a live WebM stream.";
+}
+
+} // namespace media
diff --git a/chromium/media/capture/webm_muxer.h b/chromium/media/capture/webm_muxer.h
new file mode 100644
index 00000000000..2d25c46a66a
--- /dev/null
+++ b/chromium/media/capture/webm_muxer.h
@@ -0,0 +1,93 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_LIBWEBM_MUXER_H_
+#define MEDIA_FILTERS_LIBWEBM_MUXER_H_
+
+#include <set>
+
+#include "base/callback.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_piece.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "third_party/libwebm/source/mkvmuxer.hpp"
+
+namespace gfx {
+class Size;
+} // namespace gfx
+
+namespace media {
+
+class VideoFrame;
+
+// Adapter class to manage a WebM container [1], a simplified version of a
+// Matroska container [2], composed of an EBML header, and a single Segment
+// including at least a Track Section and a number of SimpleBlocks each
+// containing a single encoded video frame. WebM container has no Trailer.
+// Clients will push encoded VPx video frames one by one via OnEncodedVideo().
+// libwebm will eventually ping the WriteDataCB passed on contructor with the
+// wrapped encoded data.
+// WebmMuxer is designed for single thread use throughout.
+// [1] http://www.webmproject.org/docs/container/
+// [2] http://www.matroska.org/technical/specs/index.html
+// TODO(mcasas): Add support for Audio muxing.
+class MEDIA_EXPORT WebmMuxer : public NON_EXPORTED_BASE(mkvmuxer::IMkvWriter) {
+ public:
+ // Callback to be called when WebmMuxer is ready to write a chunk of data,
+ // either any file header or a SingleBlock.
+ using WriteDataCB = base::Callback<void(base::StringPiece)>;
+
+ explicit WebmMuxer(const WriteDataCB& write_data_callback);
+ ~WebmMuxer() override;
+
+ // Adds a |video_frame| with |encoded_data.data()| to WebM Segment.
+ void OnEncodedVideo(const scoped_refptr<VideoFrame>& video_frame,
+ scoped_ptr<std::string> encoded_data,
+ base::TimeTicks timestamp,
+ bool is_key_frame);
+
+ private:
+ friend class WebmMuxerTest;
+
+ // Creates and adds a new video track. Called upon receiving the first
+ // frame of a given Track, adds |frame_size| and |frame_rate| to the Segment
+ // info, although individual frames passed to OnEncodedVideo() can have any
+ // frame size.
+ void AddVideoTrack(const gfx::Size& frame_size, double frame_rate);
+
+ // IMkvWriter interface.
+ mkvmuxer::int32 Write(const void* buf, mkvmuxer::uint32 len) override;
+ mkvmuxer::int64 Position() const override;
+ mkvmuxer::int32 Position(mkvmuxer::int64 position) override;
+ bool Seekable() const override;
+ void ElementStartNotify(mkvmuxer::uint64 element_id,
+ mkvmuxer::int64 position) override;
+
+ // Used to DCHECK that we are called on the correct thread.
+ base::ThreadChecker thread_checker_;
+
+ // A caller-side identifier to interact with |segment_|, initialised upon
+ // first frame arrival by AddVideoTrack().
+ uint64_t track_index_;
+
+ // Origin of times for frame timestamps.
+ base::TimeTicks first_frame_timestamp_;
+
+ // Callback to dump written data as being called by libwebm.
+ const WriteDataCB write_data_callback_;
+
+ // Rolling counter of the position in bytes of the written goo.
+ base::CheckedNumeric<mkvmuxer::int64> position_;
+
+ // The MkvMuxer active element.
+ mkvmuxer::Segment segment_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebmMuxer);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_LIBWEBM_MUXER_H_
diff --git a/chromium/media/capture/webm_muxer_unittest.cc b/chromium/media/capture/webm_muxer_unittest.cc
new file mode 100644
index 00000000000..7e162fa0a67
--- /dev/null
+++ b/chromium/media/capture/webm_muxer_unittest.cc
@@ -0,0 +1,118 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/video_frame.h"
+#include "media/capture/webm_muxer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Mock;
+using ::testing::WithArgs;
+
+namespace media {
+
+class WebmMuxerTest : public testing::Test {
+ public:
+ WebmMuxerTest()
+ : webm_muxer_(base::Bind(&WebmMuxerTest::WriteCallback,
+ base::Unretained(this))),
+ last_encoded_length_(0),
+ accumulated_position_(0) {
+ EXPECT_EQ(webm_muxer_.Position(), 0);
+ const mkvmuxer::int64 kRandomNewPosition = 333;
+ EXPECT_EQ(webm_muxer_.Position(kRandomNewPosition), -1);
+ EXPECT_FALSE(webm_muxer_.Seekable());
+ }
+
+ MOCK_METHOD1(WriteCallback, void(base::StringPiece));
+
+ void SaveEncodedDataLen(const base::StringPiece& encoded_data) {
+ last_encoded_length_ = encoded_data.size();
+ accumulated_position_ += encoded_data.size();
+ }
+
+ mkvmuxer::int64 GetWebmMuxerPosition() const {
+ return webm_muxer_.Position();
+ }
+
+ mkvmuxer::Segment::Mode GetWebmSegmentMode() const {
+ return webm_muxer_.segment_.mode();
+ }
+
+ mkvmuxer::int32 WebmMuxerWrite(const void* buf, mkvmuxer::uint32 len) {
+ return webm_muxer_.Write(buf, len);
+ }
+
+ WebmMuxer webm_muxer_;
+
+ size_t last_encoded_length_;
+ int64_t accumulated_position_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WebmMuxerTest);
+};
+
+// Checks that the WriteCallback is called with appropriate params when
+// WebmMuxer::Write() method is called.
+TEST_F(WebmMuxerTest, Write) {
+ const base::StringPiece encoded_data("abcdefghijklmnopqrstuvwxyz");
+
+ EXPECT_CALL(*this, WriteCallback(encoded_data));
+ WebmMuxerWrite(encoded_data.data(), encoded_data.size());
+
+ EXPECT_EQ(GetWebmMuxerPosition(), static_cast<int64_t>(encoded_data.size()));
+}
+
+// This test sends two frames and checks that the WriteCallback is called with
+// appropriate params in both cases.
+TEST_F(WebmMuxerTest, OnEncodedVideoTwoFrames) {
+ const gfx::Size frame_size(160, 80);
+ const scoped_refptr<VideoFrame> video_frame =
+ VideoFrame::CreateBlackFrame(frame_size);
+ const std::string encoded_data("abcdefghijklmnopqrstuvwxyz");
+
+ EXPECT_CALL(*this, WriteCallback(_))
+ .Times(AtLeast(1))
+ .WillRepeatedly(WithArgs<0>(
+ Invoke(this, &WebmMuxerTest::SaveEncodedDataLen)));
+ webm_muxer_.OnEncodedVideo(video_frame,
+ make_scoped_ptr(new std::string(encoded_data)),
+ base::TimeTicks::Now(),
+ false /* keyframe */);
+
+ // First time around WriteCallback() is pinged a number of times to write the
+ // Matroska header, but at the end it dumps |encoded_data|.
+ EXPECT_EQ(last_encoded_length_, encoded_data.size());
+ EXPECT_EQ(GetWebmMuxerPosition(), accumulated_position_);
+ EXPECT_GE(GetWebmMuxerPosition(), static_cast<int64_t>(last_encoded_length_));
+ EXPECT_EQ(GetWebmSegmentMode(), mkvmuxer::Segment::kLive);
+
+ const int64_t begin_of_second_block = accumulated_position_;
+ EXPECT_CALL(*this, WriteCallback(_))
+ .Times(AtLeast(1))
+ .WillRepeatedly(WithArgs<0>(
+ Invoke(this, &WebmMuxerTest::SaveEncodedDataLen)));
+ webm_muxer_.OnEncodedVideo(video_frame,
+ make_scoped_ptr(new std::string(encoded_data)),
+ base::TimeTicks::Now(),
+ false /* keyframe */);
+
+ // The second time around the callbacks should include a SimpleBlock header,
+ // namely the track index, a timestamp and a flags byte, for a total of 6B.
+ EXPECT_EQ(last_encoded_length_, encoded_data.size());
+ EXPECT_EQ(GetWebmMuxerPosition(), accumulated_position_);
+ const uint32_t kSimpleBlockSize = 6u;
+ EXPECT_EQ(static_cast<int64_t>(begin_of_second_block + kSimpleBlockSize +
+ encoded_data.size()),
+ accumulated_position_);
+}
+
+} // namespace media
diff --git a/chromium/media/cast/BUILD.gn b/chromium/media/cast/BUILD.gn
index 79230630ba6..188cfca8b6d 100644
--- a/chromium/media/cast/BUILD.gn
+++ b/chromium/media/cast/BUILD.gn
@@ -137,6 +137,8 @@ source_set("sender") {
"sender/video_sender.h",
"sender/vp8_encoder.cc",
"sender/vp8_encoder.h",
+ "sender/vp8_quantizer_parser.cc",
+ "sender/vp8_quantizer_parser.h",
]
deps = [
@@ -144,7 +146,7 @@ source_set("sender") {
":net",
"//media",
"//media:shared_memory_support",
- "//third_party/libvpx",
+ "//third_party/libvpx_new",
"//third_party/opus",
"//ui/gfx/geometry",
]
@@ -154,7 +156,7 @@ source_set("sender") {
deps += [ "//media:media_for_cast_ios" ]
deps -= [
"//media",
- "//third_party/libvpx",
+ "//third_party/libvpx_new",
"//third_party/opus",
]
sources -= [
@@ -164,6 +166,8 @@ source_set("sender") {
"sender/video_encoder_impl.h",
"sender/vp8_encoder.cc",
"sender/vp8_encoder.h",
+ "sender/vp8_quantizer_parser.cc",
+ "sender/vp8_quantizer_parser.h",
]
}
@@ -201,7 +205,7 @@ source_set("receiver") {
":net",
"//media",
"//media:shared_memory_support",
- "//third_party/libvpx",
+ "//third_party/libvpx_new",
"//third_party/opus",
"//ui/gfx",
]
@@ -211,7 +215,7 @@ source_set("receiver") {
deps += [ "//media:media_for_cast_ios" ]
deps -= [
"//media",
- "//third_party/libvpx",
+ "//third_party/libvpx_new",
"//third_party/opus",
]
}
@@ -266,6 +270,15 @@ static_library("test_support") {
]
}
+# TODO(GYP): Delete this after we've converted everything to GN.
+# The _run targets exist only for compatibility w/ GYP.
+group("cast_unittests_run") {
+ testonly = true
+ deps = [
+ ":cast_unittests",
+ ]
+}
+
test("cast_unittests") {
sources = [
"logging/encoding_event_subscriber_unittest.cc",
@@ -276,6 +289,7 @@ test("cast_unittests") {
"logging/simple_event_subscriber_unittest.cc",
"logging/stats_event_subscriber_unittest.cc",
"net/cast_transport_sender_impl_unittest.cc",
+ "net/frame_id_wrap_helper_test.cc",
"net/mock_cast_transport_sender.cc",
"net/mock_cast_transport_sender.h",
"net/pacing/mock_paced_packet_sender.cc",
@@ -309,10 +323,12 @@ test("cast_unittests") {
"sender/audio_encoder_unittest.cc",
"sender/audio_sender_unittest.cc",
"sender/congestion_control_unittest.cc",
+ "sender/external_video_encoder_unittest.cc",
"sender/fake_video_encode_accelerator_factory.cc",
"sender/fake_video_encode_accelerator_factory.h",
"sender/video_encoder_unittest.cc",
"sender/video_sender_unittest.cc",
+ "sender/vp8_quantizer_parser_unittest.cc",
"test/end2end_unittest.cc",
"test/fake_receiver_time_offset_estimator.cc",
"test/fake_receiver_time_offset_estimator.h",
@@ -358,6 +374,7 @@ executable("generate_barcode_video") {
deps = [
":test_support",
"//base",
+ "//build/config/sanitizers:deps",
"//media",
]
}
@@ -371,6 +388,7 @@ executable("generate_timecode_audio") {
deps = [
":test_support",
"//base",
+ "//build/config/sanitizers:deps",
"//media",
]
}
@@ -384,6 +402,7 @@ executable("udp_proxy") {
deps = [
":test_support",
"//base",
+ "//build/config/sanitizers:deps",
"//net",
]
}
diff --git a/chromium/media/cast/cast.gyp b/chromium/media/cast/cast.gyp
index 71ba2f04467..b2acc85f7e0 100644
--- a/chromium/media/cast/cast.gyp
+++ b/chromium/media/cast/cast.gyp
@@ -94,7 +94,7 @@
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
],
'sources': [
@@ -125,7 +125,7 @@
'dependencies!': [
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
],
}], # OS=="ios"
], # conditions
@@ -143,7 +143,7 @@
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/media/media.gyp:shared_memory_support',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
'<(DEPTH)/ui/gfx/gfx.gyp:gfx_geometry',
], # dependencies
'sources': [
@@ -178,6 +178,8 @@
'sender/video_sender.h',
'sender/vp8_encoder.cc',
'sender/vp8_encoder.h',
+ 'sender/vp8_quantizer_parser.h',
+ 'sender/vp8_quantizer_parser.cc',
], # source
'conditions': [
# use a restricted subset of media and no software codecs on iOS
@@ -186,7 +188,7 @@
'dependencies!': [
'<(DEPTH)/media/media.gyp:media',
'<(DEPTH)/third_party/opus/opus.gyp:opus',
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
],
'sources!': [
'sender/external_video_encoder.cc',
@@ -195,6 +197,8 @@
'sender/video_encoder_impl.h',
'sender/vp8_encoder.cc',
'sender/vp8_encoder.h',
+ 'sender/vp8_quantizer_parser.cc',
+ 'sender/vp8_quantizer_parser.h',
],
}], # OS=="ios"
# iOS and OS X encoders
diff --git a/chromium/media/cast/cast_config.cc b/chromium/media/cast/cast_config.cc
index e361f158c49..d251016b0ce 100644
--- a/chromium/media/cast/cast_config.cc
+++ b/chromium/media/cast/cast_config.cc
@@ -4,6 +4,10 @@
#include "media/cast/cast_config.h"
+namespace {
+const float kDefaultCongestionControlBackOff = 0.875f;
+} // namespace
+
namespace media {
namespace cast {
diff --git a/chromium/media/cast/cast_defines.h b/chromium/media/cast/cast_defines.h
index 9bf2f44352c..f4fb3c83b67 100644
--- a/chromium/media/cast/cast_defines.h
+++ b/chromium/media/cast/cast_defines.h
@@ -20,9 +20,8 @@
namespace media {
namespace cast {
-const int64 kDontShowTimeoutMs = 33;
-const float kDefaultCongestionControlBackOff = 0.875f;
const uint32 kVideoFrequency = 90000;
+const int kMinSampleRateForEncoding = 8000;
const uint32 kStartFrameId = UINT32_C(0xffffffff);
// This is an important system-wide constant. This limits how much history the
diff --git a/chromium/media/cast/cast_testing.gypi b/chromium/media/cast/cast_testing.gypi
index e763dc22ec0..b13bb5b0612 100644
--- a/chromium/media/cast/cast_testing.gypi
+++ b/chromium/media/cast/cast_testing.gypi
@@ -68,6 +68,7 @@
# the tools compile correctly.
'cast_tools',
'<(DEPTH)/base/base.gyp:test_support_base',
+ '<(DEPTH)/media/media.gyp:media_test_support',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/testing/gmock.gyp:gmock',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -115,10 +116,12 @@
'sender/audio_encoder_unittest.cc',
'sender/audio_sender_unittest.cc',
'sender/congestion_control_unittest.cc',
+ 'sender/external_video_encoder_unittest.cc',
'sender/fake_video_encode_accelerator_factory.cc',
'sender/fake_video_encode_accelerator_factory.h',
'sender/video_encoder_unittest.cc',
'sender/video_sender_unittest.cc',
+ 'sender/vp8_quantizer_parser_unittest.cc',
'test/end2end_unittest.cc',
'test/fake_receiver_time_offset_estimator.cc',
'test/fake_receiver_time_offset_estimator.h',
@@ -377,6 +380,15 @@
'sources': [
'cast_unittests.isolate',
],
+ 'conditions': [
+ ['use_x11==1',
+ {
+ 'dependencies': [
+ '../../tools/xdisplaycheck/xdisplaycheck.gyp:xdisplaycheck',
+ ],
+ }
+ ],
+ ],
},
],
}],
diff --git a/chromium/media/cast/cast_unittests.isolate b/chromium/media/cast/cast_unittests.isolate
index bf207cd4c13..2d61c787dfc 100644
--- a/chromium/media/cast/cast_unittests.isolate
+++ b/chromium/media/cast/cast_unittests.isolate
@@ -38,7 +38,6 @@
'variables': {
'files': [
'../../testing/test_env.py',
- '<(PRODUCT_DIR)/cast_unittests<(EXECUTABLE_SUFFIX)',
],
},
}],
diff --git a/chromium/media/cast/common/transport_encryption_handler.cc b/chromium/media/cast/common/transport_encryption_handler.cc
index 54a43e8b526..360e40c63a8 100644
--- a/chromium/media/cast/common/transport_encryption_handler.cc
+++ b/chromium/media/cast/common/transport_encryption_handler.cc
@@ -41,8 +41,8 @@ TransportEncryptionHandler::TransportEncryptionHandler()
TransportEncryptionHandler::~TransportEncryptionHandler() {}
-bool TransportEncryptionHandler::Initialize(std::string aes_key,
- std::string aes_iv_mask) {
+bool TransportEncryptionHandler::Initialize(const std::string& aes_key,
+ const std::string& aes_iv_mask) {
is_activated_ = false;
if (aes_iv_mask.size() == kAesKeySize && aes_key.size() == kAesKeySize) {
iv_mask_ = aes_iv_mask;
diff --git a/chromium/media/cast/common/transport_encryption_handler.h b/chromium/media/cast/common/transport_encryption_handler.h
index d4798dc78b5..d71dc49c35e 100644
--- a/chromium/media/cast/common/transport_encryption_handler.h
+++ b/chromium/media/cast/common/transport_encryption_handler.h
@@ -26,7 +26,7 @@ class TransportEncryptionHandler : public base::NonThreadSafe {
TransportEncryptionHandler();
~TransportEncryptionHandler();
- bool Initialize(std::string aes_key, std::string aes_iv_mask);
+ bool Initialize(const std::string& aes_key, const std::string& aes_iv_mask);
bool Encrypt(uint32 frame_id,
const base::StringPiece& data,
diff --git a/chromium/media/cast/net/cast_transport_sender_impl.cc b/chromium/media/cast/net/cast_transport_sender_impl.cc
index 4def3272c74..c3afc276b33 100644
--- a/chromium/media/cast/net/cast_transport_sender_impl.cc
+++ b/chromium/media/cast/net/cast_transport_sender_impl.cc
@@ -9,7 +9,7 @@
#include "media/cast/net/cast_transport_defines.h"
#include "media/cast/net/udp_transport.h"
#include "net/base/net_errors.h"
-#include "net/base/net_util.h"
+#include "net/base/network_interfaces.h"
namespace media {
namespace cast {
diff --git a/chromium/media/cast/net/cast_transport_sender_impl.h b/chromium/media/cast/net/cast_transport_sender_impl.h
index c118ad883a9..a5538e7840c 100644
--- a/chromium/media/cast/net/cast_transport_sender_impl.h
+++ b/chromium/media/cast/net/cast_transport_sender_impl.h
@@ -42,7 +42,7 @@
#include "media/cast/net/rtcp/rtcp.h"
#include "media/cast/net/rtp/rtp_parser.h"
#include "media/cast/net/rtp/rtp_sender.h"
-#include "net/base/net_util.h"
+#include "net/base/network_interfaces.h"
namespace media {
namespace cast {
diff --git a/chromium/media/cast/net/rtcp/rtcp_unittest.cc b/chromium/media/cast/net/rtcp/rtcp_unittest.cc
index 0ee56432c4f..ce6c3b24622 100644
--- a/chromium/media/cast/net/rtcp/rtcp_unittest.cc
+++ b/chromium/media/cast/net/rtcp/rtcp_unittest.cc
@@ -213,7 +213,7 @@ TEST_F(RtcpTest, RoundTripTimesDeterminedFromReportPingPong) {
rtcp_for_sender_.current_round_trip_time());
#ifdef SENDER_PROVIDES_REPORT_BLOCK
EXPECT_EQ(expected_rtt_according_to_receiver,
- rtcp_for_receiver_.current_round_trip_time();
+ rtcp_for_receiver_.current_round_trip_time());
#endif
// In the next iteration of this loop, after the receiver gets the sender
diff --git a/chromium/media/cast/receiver/video_decoder.cc b/chromium/media/cast/receiver/video_decoder.cc
index b2a2e3e2b61..f9b24a420a2 100644
--- a/chromium/media/cast/receiver/video_decoder.cc
+++ b/chromium/media/cast/receiver/video_decoder.cc
@@ -16,8 +16,8 @@
// VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
// backwards compatibility for legacy applications using the library.
#define VPX_CODEC_DISABLE_COMPAT 1
-#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vpx_decoder.h"
#include "ui/gfx/geometry/size.h"
namespace media {
@@ -140,12 +140,9 @@ class VideoDecoder::Vp8Impl : public VideoDecoder::ImplBase {
const gfx::Size frame_size(image->d_w, image->d_h);
// Note: Timestamp for the VideoFrame will be set in VideoReceiver.
- const scoped_refptr<VideoFrame> decoded_frame =
- VideoFrame::CreateFrame(VideoFrame::YV12,
- frame_size,
- gfx::Rect(frame_size),
- frame_size,
- base::TimeDelta());
+ const scoped_refptr<VideoFrame> decoded_frame = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_YV12, frame_size, gfx::Rect(frame_size), frame_size,
+ base::TimeDelta());
CopyYPlane(image->planes[VPX_PLANE_Y],
image->stride[VPX_PLANE_Y],
image->d_h,
diff --git a/chromium/media/cast/receiver/video_decoder_unittest.cc b/chromium/media/cast/receiver/video_decoder_unittest.cc
index bd273c38f4d..fad2db54f82 100644
--- a/chromium/media/cast/receiver/video_decoder_unittest.cc
+++ b/chromium/media/cast/receiver/video_decoder_unittest.cc
@@ -73,12 +73,9 @@ class VideoDecoderTest : public ::testing::TestWithParam<Codec> {
void FeedMoreVideo(int num_dropped_frames) {
// Prepare a simulated EncodedFrame to feed into the VideoDecoder.
- const scoped_refptr<VideoFrame> video_frame =
- VideoFrame::CreateFrame(VideoFrame::YV12,
- next_frame_size_,
- gfx::Rect(next_frame_size_),
- next_frame_size_,
- next_frame_timestamp_);
+ const scoped_refptr<VideoFrame> video_frame = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_YV12, next_frame_size_, gfx::Rect(next_frame_size_),
+ next_frame_size_, next_frame_timestamp_);
const base::TimeTicks reference_time =
base::TimeTicks::UnixEpoch() + next_frame_timestamp_;
next_frame_timestamp_ += base::TimeDelta::FromSeconds(1) / kFrameRate;
diff --git a/chromium/media/cast/sender/audio_encoder.cc b/chromium/media/cast/sender/audio_encoder.cc
index 4173ff091b2..879b476f5ac 100644
--- a/chromium/media/cast/sender/audio_encoder.cc
+++ b/chromium/media/cast/sender/audio_encoder.cc
@@ -14,6 +14,7 @@
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
#include "media/cast/cast_defines.h"
#if !defined(OS_IOS)
@@ -107,6 +108,10 @@ class AudioEncoder::ImplBase
DVLOG(1) << "Skipping RTP timestamp ahead to account for "
<< num_frames_missed * samples_per_frame_
<< " samples' worth of underrun.";
+ TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip",
+ TRACE_EVENT_SCOPE_THREAD,
+ "frames missed", num_frames_missed,
+ "samples dropped", samples_dropped_from_buffer_);
}
}
frame_capture_time_ = recorded_time - buffer_fill_duration;
@@ -138,6 +143,9 @@ class AudioEncoder::ImplBase
audio_frame->rtp_timestamp = frame_rtp_timestamp_;
audio_frame->reference_time = frame_capture_time_;
+ TRACE_EVENT_ASYNC_BEGIN2("cast.stream", "Audio Encode", audio_frame.get(),
+ "frame_id", frame_id_,
+ "rtp_timestamp", frame_rtp_timestamp_);
if (EncodeFromFilledBuffer(&audio_frame->data)) {
// Compute deadline utilization as the real-world time elapsed divided
// by the signal duration.
@@ -145,6 +153,9 @@ class AudioEncoder::ImplBase
(base::TimeTicks::Now() - start_time).InSecondsF() /
frame_duration_.InSecondsF();
+ TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(),
+ "Deadline utilization",
+ audio_frame->deadline_utilization);
cast_environment_->PostTask(
CastEnvironment::MAIN,
FROM_HERE,
diff --git a/chromium/media/cast/sender/congestion_control.cc b/chromium/media/cast/sender/congestion_control.cc
index 5ede1b5886b..0b0aa254a52 100644
--- a/chromium/media/cast/sender/congestion_control.cc
+++ b/chromium/media/cast/sender/congestion_control.cc
@@ -15,7 +15,10 @@
#include "media/cast/sender/congestion_control.h"
+#include <deque>
+
#include "base/logging.h"
+#include "base/trace_event/trace_event.h"
#include "media/cast/cast_config.h"
#include "media/cast/cast_defines.h"
@@ -25,42 +28,37 @@ namespace cast {
class AdaptiveCongestionControl : public CongestionControl {
public:
AdaptiveCongestionControl(base::TickClock* clock,
- uint32 max_bitrate_configured,
- uint32 min_bitrate_configured,
+ int max_bitrate_configured,
+ int min_bitrate_configured,
double max_frame_rate);
~AdaptiveCongestionControl() final;
+ // CongestionControl implementation.
void UpdateRtt(base::TimeDelta rtt) final;
-
void UpdateTargetPlayoutDelay(base::TimeDelta delay) final;
-
- // Called when an encoded frame is sent to the transport.
void SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
+ size_t frame_size_in_bits,
base::TimeTicks when) final;
-
- // Called when we receive an ACK for a frame.
void AckFrame(uint32 frame_id, base::TimeTicks when) final;
-
- // Returns the bitrate we should use for the next frame.
- uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) final;
+ int GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay,
+ int soft_max_bitrate) final;
private:
struct FrameStats {
FrameStats();
- // Time this frame was sent to the transport.
- base::TimeTicks sent_time;
+ // Time this frame was first enqueued for transport.
+ base::TimeTicks enqueue_time;
// Time this frame was acked.
base::TimeTicks ack_time;
// Size of encoded frame in bits.
- size_t frame_size;
+ size_t frame_size_in_bits;
};
// Calculate how much "dead air" (idle time) there is between two frames.
static base::TimeDelta DeadTime(const FrameStats& a, const FrameStats& b);
- // Get the FrameStats for a given |frame_id|.
+ // Get the FrameStats for a given |frame_id|. Never returns nullptr.
// Note: Older FrameStats will be removed automatically.
FrameStats* GetFrameStats(uint32 frame_id);
// Discard old FrameStats.
@@ -69,22 +67,20 @@ class AdaptiveCongestionControl : public CongestionControl {
// sending in the past.
double CalculateSafeBitrate();
- // For a given frame, calculate when it might be acked.
- // (Or return the time it was acked, if it was.)
- base::TimeTicks EstimatedAckTime(uint32 frame_id, double bitrate);
- // Calculate when we start sending the data for a given frame.
- // This is done by calculating when we were done sending the previous
- // frame, but obviously can't be less than |sent_time| (if known).
- base::TimeTicks EstimatedSendingTime(uint32 frame_id, double bitrate);
+ // Estimate when the transport will start sending the data for a given frame.
+ // |estimated_bitrate| is the current estimated transmit bitrate in bits per
+ // second.
+ base::TimeTicks EstimatedSendingTime(uint32 frame_id,
+ double estimated_bitrate);
base::TickClock* const clock_; // Not owned by this class.
- const uint32 max_bitrate_configured_;
- const uint32 min_bitrate_configured_;
+ const int max_bitrate_configured_;
+ const int min_bitrate_configured_;
const double max_frame_rate_;
std::deque<FrameStats> frame_stats_;
uint32 last_frame_stats_;
uint32 last_acked_frame_;
- uint32 last_encoded_frame_;
+ uint32 last_enqueued_frame_;
base::TimeDelta rtt_;
size_t history_size_;
size_t acked_bits_in_history_;
@@ -95,37 +91,33 @@ class AdaptiveCongestionControl : public CongestionControl {
class FixedCongestionControl : public CongestionControl {
public:
- FixedCongestionControl(uint32 bitrate) : bitrate_(bitrate) {}
+ explicit FixedCongestionControl(int bitrate) : bitrate_(bitrate) {}
~FixedCongestionControl() final {}
+ // CongestionControl implementation.
void UpdateRtt(base::TimeDelta rtt) final {}
-
void UpdateTargetPlayoutDelay(base::TimeDelta delay) final {}
-
- // Called when an encoded frame is sent to the transport.
void SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
+ size_t frame_size_in_bits,
base::TimeTicks when) final {}
-
- // Called when we receive an ACK for a frame.
void AckFrame(uint32 frame_id, base::TimeTicks when) final {}
-
- // Returns the bitrate we should use for the next frame.
- uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) final {
+ int GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay,
+ int soft_max_bitrate) final {
return bitrate_;
}
private:
- uint32 bitrate_;
+ const int bitrate_;
+
DISALLOW_COPY_AND_ASSIGN(FixedCongestionControl);
};
CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
- uint32 max_bitrate_configured,
- uint32 min_bitrate_configured,
+ int max_bitrate_configured,
+ int min_bitrate_configured,
double max_frame_rate) {
return new AdaptiveCongestionControl(clock,
max_bitrate_configured,
@@ -133,7 +125,7 @@ CongestionControl* NewAdaptiveCongestionControl(
max_frame_rate);
}
-CongestionControl* NewFixedCongestionControl(uint32 bitrate) {
+CongestionControl* NewFixedCongestionControl(int bitrate) {
return new FixedCongestionControl(bitrate);
}
@@ -147,13 +139,13 @@ static const double kTargetEmptyBufferFraction = 0.9;
// congestion control adapt slower.
static const size_t kHistorySize = 100;
-AdaptiveCongestionControl::FrameStats::FrameStats() : frame_size(0) {
+AdaptiveCongestionControl::FrameStats::FrameStats() : frame_size_in_bits(0) {
}
AdaptiveCongestionControl::AdaptiveCongestionControl(
base::TickClock* clock,
- uint32 max_bitrate_configured,
- uint32 min_bitrate_configured,
+ int max_bitrate_configured,
+ int min_bitrate_configured,
double max_frame_rate)
: clock_(clock),
max_bitrate_configured_(max_bitrate_configured),
@@ -161,14 +153,15 @@ AdaptiveCongestionControl::AdaptiveCongestionControl(
max_frame_rate_(max_frame_rate),
last_frame_stats_(static_cast<uint32>(-1)),
last_acked_frame_(static_cast<uint32>(-1)),
- last_encoded_frame_(static_cast<uint32>(-1)),
+ last_enqueued_frame_(static_cast<uint32>(-1)),
history_size_(kHistorySize),
acked_bits_in_history_(0) {
DCHECK_GE(max_bitrate_configured, min_bitrate_configured) << "Invalid config";
+ DCHECK_GT(min_bitrate_configured, 0);
frame_stats_.resize(2);
base::TimeTicks now = clock->NowTicks();
frame_stats_[0].ack_time = now;
- frame_stats_[0].sent_time = now;
+ frame_stats_[0].enqueue_time = now;
frame_stats_[1].ack_time = now;
DCHECK(!frame_stats_[0].ack_time.is_null());
}
@@ -194,8 +187,8 @@ void AdaptiveCongestionControl::UpdateTargetPlayoutDelay(
// Calculate how much "dead air" there is between two frames.
base::TimeDelta AdaptiveCongestionControl::DeadTime(const FrameStats& a,
const FrameStats& b) {
- if (b.sent_time > a.ack_time) {
- return b.sent_time - a.ack_time;
+ if (b.enqueue_time > a.ack_time) {
+ return b.enqueue_time - a.ack_time;
} else {
return base::TimeDelta();
}
@@ -204,7 +197,7 @@ base::TimeDelta AdaptiveCongestionControl::DeadTime(const FrameStats& a,
double AdaptiveCongestionControl::CalculateSafeBitrate() {
double transmit_time =
(GetFrameStats(last_acked_frame_)->ack_time -
- frame_stats_.front().sent_time - dead_time_in_history_).InSecondsF();
+ frame_stats_.front().enqueue_time - dead_time_in_history_).InSecondsF();
if (acked_bits_in_history_ == 0 || transmit_time <= 0.0) {
return min_bitrate_configured_;
@@ -223,9 +216,9 @@ AdaptiveCongestionControl::GetFrameStats(uint32 frame_id) {
}
PruneFrameStats();
offset += frame_stats_.size() - 1;
- if (offset < 0 || offset >= static_cast<int32>(frame_stats_.size())) {
- return NULL;
- }
+ // TODO(miu): Change the following to DCHECK once crash fix is confirmed.
+ // http://crbug.com/517145
+ CHECK(offset >= 0 && offset < static_cast<int32>(frame_stats_.size()));
return &frame_stats_[offset];
}
@@ -233,7 +226,7 @@ void AdaptiveCongestionControl::PruneFrameStats() {
while (frame_stats_.size() > history_size_) {
DCHECK_GT(frame_stats_.size(), 1UL);
DCHECK(!frame_stats_[0].ack_time.is_null());
- acked_bits_in_history_ -= frame_stats_[0].frame_size;
+ acked_bits_in_history_ -= frame_stats_[0].frame_size_in_bits;
dead_time_in_history_ -= DeadTime(frame_stats_[0], frame_stats_[1]);
DCHECK_GE(acked_bits_in_history_, 0UL);
VLOG(2) << "DT: " << dead_time_in_history_.InSecondsF();
@@ -248,90 +241,132 @@ void AdaptiveCongestionControl::AckFrame(uint32 frame_id,
while (IsNewerFrameId(frame_id, last_acked_frame_)) {
FrameStats* last_frame_stats = frame_stats;
frame_stats = GetFrameStats(last_acked_frame_ + 1);
- DCHECK(frame_stats);
- if (frame_stats->sent_time.is_null()) {
+ if (frame_stats->enqueue_time.is_null()) {
// Can't ack a frame that hasn't been sent yet.
return;
}
last_acked_frame_++;
- if (when < frame_stats->sent_time)
- when = frame_stats->sent_time;
+ if (when < frame_stats->enqueue_time)
+ when = frame_stats->enqueue_time;
frame_stats->ack_time = when;
- acked_bits_in_history_ += frame_stats->frame_size;
+ acked_bits_in_history_ += frame_stats->frame_size_in_bits;
dead_time_in_history_ += DeadTime(*last_frame_stats, *frame_stats);
}
}
void AdaptiveCongestionControl::SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
+ size_t frame_size_in_bits,
base::TimeTicks when) {
- last_encoded_frame_ = frame_id;
+ last_enqueued_frame_ = frame_id;
FrameStats* frame_stats = GetFrameStats(frame_id);
- DCHECK(frame_stats);
- frame_stats->frame_size = frame_size;
- frame_stats->sent_time = when;
+ frame_stats->enqueue_time = when;
+ frame_stats->frame_size_in_bits = frame_size_in_bits;
}
-base::TimeTicks AdaptiveCongestionControl::EstimatedAckTime(uint32 frame_id,
- double bitrate) {
- FrameStats* frame_stats = GetFrameStats(frame_id);
- DCHECK(frame_stats);
- if (frame_stats->ack_time.is_null()) {
- DCHECK(frame_stats->frame_size) << "frame_id: " << frame_id;
- base::TimeTicks ret = EstimatedSendingTime(frame_id, bitrate);
- ret += base::TimeDelta::FromSecondsD(frame_stats->frame_size / bitrate);
- ret += rtt_;
- base::TimeTicks now = clock_->NowTicks();
- if (ret < now) {
- // This is a little counter-intuitive, but it seems to work.
- // Basically, when we estimate that the ACK should have already happened,
- // we figure out how long ago it should have happened and guess that the
- // ACK will happen half of that time in the future. This will cause some
- // over-estimation when acks are late, which is actually what we want.
- return now + (now - ret) / 2;
- } else {
- return ret;
+base::TimeTicks AdaptiveCongestionControl::EstimatedSendingTime(
+ uint32 frame_id,
+ double estimated_bitrate) {
+ const base::TimeTicks now = clock_->NowTicks();
+
+ // Starting with the time of the latest acknowledgement, extrapolate forward
+ // to determine an estimated sending time for |frame_id|.
+ //
+ // |estimated_sending_time| will contain the estimated sending time for each
+ // frame after the last ACK'ed frame. It is possible for multiple frames to
+ // be in-flight; and therefore it is common for the |estimated_sending_time|
+ // for those frames to be before |now|.
+ base::TimeTicks estimated_sending_time;
+ for (uint32 f = last_acked_frame_; IsNewerFrameId(frame_id, f); ++f) {
+ FrameStats* const stats = GetFrameStats(f);
+
+ // |estimated_ack_time| is the local time when the sender receives the ACK,
+ // and not the time when the ACK left the receiver.
+ base::TimeTicks estimated_ack_time = stats->ack_time;
+
+ // If |estimated_ack_time| is not null, then we already have the actual ACK
+ // time, so we'll just use it. Otherwise, we need to estimate when the ACK
+ // will arrive.
+ if (estimated_ack_time.is_null()) {
+ // Model: The |estimated_sending_time| is the time at which the first byte
+ // of the encoded frame is transmitted. Then, assume the transmission of
+ // the remaining bytes is paced such that the last byte has just left the
+ // sender at |frame_transmit_time| later. This last byte then takes
+ // ~RTT/2 amount of time to travel to the receiver. Finally, the ACK from
+ // the receiver is sent and this takes another ~RTT/2 amount of time to
+ // reach the sender.
+ const base::TimeDelta frame_transmit_time =
+ base::TimeDelta::FromSecondsD(stats->frame_size_in_bits /
+ estimated_bitrate);
+ estimated_ack_time =
+ std::max(estimated_sending_time, stats->enqueue_time) +
+ frame_transmit_time + rtt_;
+
+ if (estimated_ack_time < now) {
+ // The current frame has not yet been ACK'ed and the yet the computed
+ // |estimated_ack_time| is before |now|. This contradiction must be
+ // resolved.
+ //
+ // The solution below is a little counter-intuitive, but it seems to
+ // work. Basically, when we estimate that the ACK should have already
+ // happened, we figure out how long ago it should have happened and
+ // guess that the ACK will happen half of that time in the future. This
+ // will cause some over-estimation when acks are late, which is actually
+ // the desired behavior.
+ estimated_ack_time = now + (now - estimated_ack_time) / 2;
+ }
}
- } else {
- return frame_stats->ack_time;
+
+ // Since we [in the common case] do not wait for an ACK before we start
+ // sending the next frame, estimate the next frame's sending time as the
+ // time just after the last byte of the current frame left the sender (see
+ // Model comment above).
+ estimated_sending_time =
+ std::max(estimated_sending_time, estimated_ack_time - rtt_);
}
-}
-base::TimeTicks AdaptiveCongestionControl::EstimatedSendingTime(
- uint32 frame_id,
- double bitrate) {
- FrameStats* frame_stats = GetFrameStats(frame_id);
- DCHECK(frame_stats);
- base::TimeTicks ret = EstimatedAckTime(frame_id - 1, bitrate) - rtt_;
- if (frame_stats->sent_time.is_null()) {
- // Not sent yet, but we can't start sending it in the past.
- return std::max(ret, clock_->NowTicks());
+ FrameStats* const frame_stats = GetFrameStats(frame_id);
+ if (frame_stats->enqueue_time.is_null()) {
+ // The frame has not yet been enqueued for transport. Since it cannot be
+ // enqueued in the past, ensure the result is lower-bounded by |now|.
+ estimated_sending_time = std::max(estimated_sending_time, now);
} else {
- return std::max(ret, frame_stats->sent_time);
+ // |frame_stats->enqueue_time| is the time the frame was enqueued for
+ // transport. The frame may not actually start being sent until a
+ // point-in-time after that, because the transport is waiting for prior
+ // frames to be acknowledged.
+ estimated_sending_time =
+ std::max(estimated_sending_time, frame_stats->enqueue_time);
}
+
+ return estimated_sending_time;
}
-uint32 AdaptiveCongestionControl::GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) {
+int AdaptiveCongestionControl::GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay,
+ int soft_max_bitrate) {
double safe_bitrate = CalculateSafeBitrate();
// Estimate when we might start sending the next frame.
base::TimeDelta time_to_catch_up =
playout_time -
- EstimatedSendingTime(last_encoded_frame_ + 1, safe_bitrate);
+ EstimatedSendingTime(last_enqueued_frame_ + 1, safe_bitrate);
double empty_buffer_fraction =
time_to_catch_up.InSecondsF() / playout_delay.InSecondsF();
empty_buffer_fraction = std::min(empty_buffer_fraction, 1.0);
empty_buffer_fraction = std::max(empty_buffer_fraction, 0.0);
- uint32 bits_per_second = static_cast<uint32>(
+ int bits_per_second = static_cast<int>(
safe_bitrate * empty_buffer_fraction / kTargetEmptyBufferFraction);
VLOG(3) << " FBR:" << (bits_per_second / 1E6)
<< " EBF:" << empty_buffer_fraction
<< " SBR:" << (safe_bitrate / 1E6);
+ TRACE_COUNTER_ID1("cast.stream", "Empty Buffer Fraction", this,
+ empty_buffer_fraction);
+ bits_per_second = std::min(bits_per_second, soft_max_bitrate);
bits_per_second = std::max(bits_per_second, min_bitrate_configured_);
bits_per_second = std::min(bits_per_second, max_bitrate_configured_);
+
return bits_per_second;
}
diff --git a/chromium/media/cast/sender/congestion_control.h b/chromium/media/cast/sender/congestion_control.h
index 8c3d764e20f..8e171340943 100644
--- a/chromium/media/cast/sender/congestion_control.h
+++ b/chromium/media/cast/sender/congestion_control.h
@@ -5,8 +5,6 @@
#ifndef MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
#define MEDIA_CAST_CONGESTION_CONTROL_CONGESTION_CONTROL_H_
-#include <deque>
-
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/tick_clock.h"
@@ -25,25 +23,29 @@ class CongestionControl {
// Called with an updated target playout delay value.
virtual void UpdateTargetPlayoutDelay(base::TimeDelta delay) = 0;
- // Called when an encoded frame is sent to the transport.
+ // Called when an encoded frame is enqueued for transport.
virtual void SendFrameToTransport(uint32 frame_id,
- size_t frame_size,
+ size_t frame_size_in_bits,
base::TimeTicks when) = 0;
+
// Called when we receive an ACK for a frame.
virtual void AckFrame(uint32 frame_id, base::TimeTicks when) = 0;
- // Returns the bitrate we should use for the next frame.
- virtual uint32 GetBitrate(base::TimeTicks playout_time,
- base::TimeDelta playout_delay) = 0;
+ // Returns the bitrate we should use for the next frame. |soft_max_bitrate|
+ // is a soft upper-bound applied to the computed target bitrate before the
+ // hard upper- and lower-bounds are applied.
+ virtual int GetBitrate(base::TimeTicks playout_time,
+ base::TimeDelta playout_delay,
+ int soft_max_bitrate) = 0;
};
CongestionControl* NewAdaptiveCongestionControl(
base::TickClock* clock,
- uint32 max_bitrate_configured,
- uint32 min_bitrate_configured,
+ int max_bitrate_configured,
+ int min_bitrate_configured,
double max_frame_rate);
-CongestionControl* NewFixedCongestionControl(uint32 bitrate);
+CongestionControl* NewFixedCongestionControl(int bitrate);
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/congestion_control_unittest.cc b/chromium/media/cast/sender/congestion_control_unittest.cc
index bf76a35c27d..5293b58c901 100644
--- a/chromium/media/cast/sender/congestion_control_unittest.cc
+++ b/chromium/media/cast/sender/congestion_control_unittest.cc
@@ -14,8 +14,8 @@
namespace media {
namespace cast {
-static const uint32 kMaxBitrateConfigured = 5000000;
-static const uint32 kMinBitrateConfigured = 500000;
+static const int kMaxBitrateConfigured = 5000000;
+static const int kMinBitrateConfigured = 500000;
static const int64 kFrameDelayMs = 33;
static const double kMaxFrameRate = 1000.0 / kFrameDelayMs;
static const int64 kStartMillisecond = INT64_C(12345678900000);
@@ -67,6 +67,9 @@ class CongestionControlTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(CongestionControlTest);
};
+// Tests that AdaptiveCongestionControl returns reasonable bitrates based on
+// estimations of network bandwidth and how much is in-flight (i.e, using the
+// "target buffer fill" model).
TEST_F(CongestionControlTest, SimpleRun) {
uint32 frame_size = 10000 * 8;
Run(500,
@@ -77,23 +80,30 @@ TEST_F(CongestionControlTest, SimpleRun) {
// Empty the buffer.
task_runner_->Sleep(base::TimeDelta::FromMilliseconds(100));
+ // Use a soft maximum bitrate limit so large it will not bound the results of
+ // the underlying computations.
+ const int soft_max_bitrate = std::numeric_limits<int>::max();
+
uint32 safe_bitrate = frame_size * 1000 / kFrameDelayMs;
uint32 bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
- base::TimeDelta::FromMilliseconds(300));
+ base::TimeDelta::FromMilliseconds(300),
+ soft_max_bitrate);
EXPECT_NEAR(
safe_bitrate / kTargetEmptyBufferFraction, bitrate, safe_bitrate * 0.05);
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(200),
- base::TimeDelta::FromMilliseconds(300));
+ base::TimeDelta::FromMilliseconds(300),
+ soft_max_bitrate);
EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 2 / 3,
bitrate,
safe_bitrate * 0.05);
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(100),
- base::TimeDelta::FromMilliseconds(300));
+ base::TimeDelta::FromMilliseconds(300),
+ soft_max_bitrate);
EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 1 / 3,
bitrate,
safe_bitrate * 0.05);
@@ -102,10 +112,11 @@ TEST_F(CongestionControlTest, SimpleRun) {
congestion_control_->SendFrameToTransport(
frame_id_++, safe_bitrate * 100 / 1000, testing_clock_.NowTicks());
- // Results should show that we have ~200ms to send
+ // Results should show that we have ~200ms to send.
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
- base::TimeDelta::FromMilliseconds(300));
+ base::TimeDelta::FromMilliseconds(300),
+ soft_max_bitrate);
EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 2 / 3,
bitrate,
safe_bitrate * 0.05);
@@ -114,15 +125,15 @@ TEST_F(CongestionControlTest, SimpleRun) {
congestion_control_->SendFrameToTransport(
frame_id_++, safe_bitrate * 100 / 1000, testing_clock_.NowTicks());
- // Resulst should show that we have ~100ms to send
+ // Results should show that we have ~100ms to send.
bitrate = congestion_control_->GetBitrate(
testing_clock_.NowTicks() + base::TimeDelta::FromMilliseconds(300),
- base::TimeDelta::FromMilliseconds(300));
+ base::TimeDelta::FromMilliseconds(300),
+ soft_max_bitrate);
EXPECT_NEAR(safe_bitrate / kTargetEmptyBufferFraction * 1 / 3,
bitrate,
safe_bitrate * 0.05);
}
-
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder.cc b/chromium/media/cast/sender/external_video_encoder.cc
index 0423ea4730f..a44888e35cb 100644
--- a/chromium/media/cast/sender/external_video_encoder.cc
+++ b/chromium/media/cast/sender/external_video_encoder.cc
@@ -4,6 +4,8 @@
#include "media/cast/sender/external_video_encoder.h"
+#include <cmath>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/memory/scoped_vector.h"
@@ -11,10 +13,12 @@
#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "media/base/video_frame.h"
+#include "media/base/video_types.h"
#include "media/base/video_util.h"
#include "media/cast/cast_defines.h"
#include "media/cast/logging/logging_defines.h"
#include "media/cast/net/cast_transport_config.h"
+#include "media/cast/sender/vp8_quantizer_parser.h"
namespace {
@@ -37,16 +41,32 @@ namespace cast {
// Container for the associated data of a video frame being processed.
struct InProgressFrameEncode {
- const RtpTimestamp rtp_timestamp;
+ // The source content to encode.
+ const scoped_refptr<VideoFrame> video_frame;
+
+ // The reference time for this frame.
const base::TimeTicks reference_time;
+
+ // The callback to run when the result is ready.
const VideoEncoder::FrameEncodedCallback frame_encoded_callback;
- InProgressFrameEncode(RtpTimestamp rtp,
+ // The target encode bit rate.
+ const int target_bit_rate;
+
+ // The real-world encode start time. This is used to compute the encoded
+ // frame's |deadline_utilization| and so it uses the real-world clock instead
+ // of the CastEnvironment clock, the latter of which might be simulated.
+ const base::TimeTicks start_time;
+
+ InProgressFrameEncode(const scoped_refptr<VideoFrame>& v_frame,
base::TimeTicks r_time,
- VideoEncoder::FrameEncodedCallback callback)
- : rtp_timestamp(rtp),
+ VideoEncoder::FrameEncodedCallback callback,
+ int bit_rate)
+ : video_frame(v_frame),
reference_time(r_time),
- frame_encoded_callback(callback) {}
+ frame_encoded_callback(callback),
+ target_bit_rate(bit_rate),
+ start_time(base::TimeTicks::Now()) {}
};
// Owns a VideoEncoderAccelerator instance and provides the necessary adapters
@@ -72,8 +92,10 @@ class ExternalVideoEncoder::VEAClientImpl
video_encode_accelerator_(vea.Pass()),
encoder_active_(false),
next_frame_id_(0u),
- key_frame_encountered_(false) {
- }
+ key_frame_encountered_(false),
+ codec_profile_(media::VIDEO_CODEC_PROFILE_UNKNOWN),
+ vp8_key_frame_parsable_(false),
+ requested_bit_rate_(-1) {}
base::SingleThreadTaskRunner* task_runner() const {
return task_runner_.get();
@@ -85,13 +107,12 @@ class ExternalVideoEncoder::VEAClientImpl
uint32 first_frame_id) {
DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ requested_bit_rate_ = start_bit_rate;
encoder_active_ = video_encode_accelerator_->Initialize(
- media::VideoFrame::I420,
- frame_size,
- codec_profile,
- start_bit_rate,
+ media::PIXEL_FORMAT_I420, frame_size, codec_profile, start_bit_rate,
this);
next_frame_id_ = first_frame_id;
+ codec_profile_ = codec_profile;
UMA_HISTOGRAM_BOOLEAN("Cast.Sender.VideoEncodeAcceleratorInitializeSuccess",
encoder_active_);
@@ -107,6 +128,7 @@ class ExternalVideoEncoder::VEAClientImpl
void SetBitRate(int bit_rate) {
DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ requested_bit_rate_ = bit_rate;
video_encode_accelerator_->RequestEncodingParametersChange(bit_rate,
max_frame_rate_);
}
@@ -122,9 +144,8 @@ class ExternalVideoEncoder::VEAClientImpl
return;
in_progress_frame_encodes_.push_back(InProgressFrameEncode(
- TimeDeltaToRtpDelta(video_frame->timestamp(), kVideoFrequency),
- reference_time,
- frame_encoded_callback));
+ video_frame, reference_time, frame_encoded_callback,
+ requested_bit_rate_));
// BitstreamBufferReady will be called once the encoder is done.
video_encode_accelerator_->Encode(video_frame, key_frame_requested);
@@ -208,7 +229,8 @@ class ExternalVideoEncoder::VEAClientImpl
encoded_frame->referenced_frame_id = encoded_frame->frame_id;
else
encoded_frame->referenced_frame_id = encoded_frame->frame_id - 1;
- encoded_frame->rtp_timestamp = request.rtp_timestamp;
+ encoded_frame->rtp_timestamp = TimeDeltaToRtpDelta(
+ request.video_frame->timestamp(), kVideoFrequency);
encoded_frame->reference_time = request.reference_time;
if (!stream_header_.empty()) {
encoded_frame->data = stream_header_;
@@ -216,8 +238,69 @@ class ExternalVideoEncoder::VEAClientImpl
}
encoded_frame->data.append(
static_cast<const char*>(output_buffer->memory()), payload_size);
- // TODO(miu): Compute and populate the |deadline_utilization| and
- // |lossy_utilization| performance metrics in |encoded_frame|.
+
+ // If FRAME_DURATION metadata was provided in the source VideoFrame,
+ // compute the utilization metrics.
+ base::TimeDelta frame_duration;
+ if (request.video_frame->metadata()->GetTimeDelta(
+ media::VideoFrameMetadata::FRAME_DURATION, &frame_duration) &&
+ frame_duration > base::TimeDelta()) {
+ // Compute deadline utilization as the real-world time elapsed divided
+ // by the frame duration.
+ const base::TimeDelta processing_time =
+ base::TimeTicks::Now() - request.start_time;
+ encoded_frame->deadline_utilization =
+ processing_time.InSecondsF() / frame_duration.InSecondsF();
+
+ const double actual_bit_rate =
+ encoded_frame->data.size() * 8.0 / frame_duration.InSecondsF();
+ DCHECK_GT(request.target_bit_rate, 0);
+ const double bitrate_utilization =
+ actual_bit_rate / request.target_bit_rate;
+ double quantizer = QuantizerEstimator::NO_RESULT;
+ if (codec_profile_ == media::VP8PROFILE_ANY) {
+ // If the quantizer can be parsed from the key frame, try to parse
+ // the following delta frames as well.
+ // Otherwise, switch back to entropy estimation for the key frame
+ // and all the following delta frames.
+ if (key_frame || vp8_key_frame_parsable_) {
+ quantizer = ParseVp8HeaderQuantizer(
+ reinterpret_cast<const uint8*>(encoded_frame->data.data()),
+ encoded_frame->data.size());
+ if (quantizer < 0) {
+ LOG(ERROR) << "Unable to parse VP8 quantizer from encoded "
+ << (key_frame ? "key" : "delta")
+ << " frame, id=" << encoded_frame->frame_id;
+ if (key_frame) {
+ vp8_key_frame_parsable_ = false;
+ quantizer = quantizer_estimator_.EstimateForKeyFrame(
+ *request.video_frame);
+ } else {
+ quantizer = QuantizerEstimator::NO_RESULT;
+ }
+ } else {
+ if (key_frame) {
+ vp8_key_frame_parsable_ = true;
+ }
+ }
+ } else {
+ quantizer = quantizer_estimator_.EstimateForDeltaFrame(
+ *request.video_frame);
+ }
+ } else {
+ quantizer = (encoded_frame->dependency == EncodedFrame::KEY)
+ ? quantizer_estimator_.EstimateForKeyFrame(
+ *request.video_frame)
+ : quantizer_estimator_.EstimateForDeltaFrame(
+ *request.video_frame);
+ }
+ if (quantizer != QuantizerEstimator::NO_RESULT) {
+ encoded_frame->lossy_utilization = bitrate_utilization *
+ (quantizer / QuantizerEstimator::MAX_VP8_QUANTIZER);
+ }
+ } else {
+ quantizer_estimator_.Reset();
+ }
cast_environment_->PostTask(
CastEnvironment::MAIN,
@@ -295,6 +378,8 @@ class ExternalVideoEncoder::VEAClientImpl
uint32 next_frame_id_;
bool key_frame_encountered_;
std::string stream_header_;
+ VideoCodecProfile codec_profile_;
+ bool vp8_key_frame_parsable_;
// Shared memory buffers for output with the VideoAccelerator.
ScopedVector<base::SharedMemory> output_buffers_;
@@ -302,6 +387,12 @@ class ExternalVideoEncoder::VEAClientImpl
// FIFO list.
std::list<InProgressFrameEncode> in_progress_frame_encodes_;
+ // The requested encode bit rate for the next frame.
+ int requested_bit_rate_;
+
+ // Used to compute utilization metrics for each frame.
+ QuantizerEstimator quantizer_estimator_;
+
DISALLOW_COPY_AND_ASSIGN(VEAClientImpl);
};
@@ -471,5 +562,151 @@ scoped_ptr<VideoEncoder> SizeAdaptableExternalVideoEncoder::CreateEncoder() {
create_video_encode_memory_cb_));
}
+QuantizerEstimator::QuantizerEstimator() {}
+
+QuantizerEstimator::~QuantizerEstimator() {}
+
+void QuantizerEstimator::Reset() {
+ last_frame_pixel_buffer_.reset();
+}
+
+double QuantizerEstimator::EstimateForKeyFrame(const VideoFrame& frame) {
+ if (!CanExamineFrame(frame))
+ return NO_RESULT;
+
+ // If the size of the frame is different from the last frame, allocate a new
+ // buffer. The buffer only needs to be a fraction of the size of the entire
+ // frame, since the entropy analysis only examines a subset of each frame.
+ const gfx::Size size = frame.visible_rect().size();
+ const int rows_in_subset =
+ std::max(1, size.height() * FRAME_SAMPLING_PERCENT / 100);
+ if (last_frame_size_ != size || !last_frame_pixel_buffer_) {
+ last_frame_pixel_buffer_.reset(new uint8[size.width() * rows_in_subset]);
+ last_frame_size_ = size;
+ }
+
+ // Compute a histogram where each bucket represents the number of times two
+ // neighboring pixels were different by a specific amount. 511 buckets are
+ // needed, one for each integer in the range [-255,255].
+ int histogram[511];
+ memset(histogram, 0, sizeof(histogram));
+ const int row_skip = size.height() / rows_in_subset;
+ int y = 0;
+ for (int i = 0; i < rows_in_subset; ++i, y += row_skip) {
+ const uint8* const row_begin = frame.visible_data(VideoFrame::kYPlane) +
+ y * frame.stride(VideoFrame::kYPlane);
+ const uint8* const row_end = row_begin + size.width();
+ int left_hand_pixel_value = static_cast<int>(*row_begin);
+ for (const uint8* p = row_begin + 1; p < row_end; ++p) {
+ const int right_hand_pixel_value = static_cast<int>(*p);
+ const int difference = right_hand_pixel_value - left_hand_pixel_value;
+ const int histogram_index = difference + 255;
+ ++histogram[histogram_index];
+ left_hand_pixel_value = right_hand_pixel_value; // For next iteration.
+ }
+
+ // Copy the row of pixels into the buffer. This will be used when
+ // generating histograms for future delta frames.
+ memcpy(last_frame_pixel_buffer_.get() + i * size.width(),
+ row_begin,
+ size.width());
+ }
+
+ // Estimate a quantizer value depending on the difference data in the
+ // histogram and return it.
+ const int num_samples = (size.width() - 1) * rows_in_subset;
+ return ToQuantizerEstimate(ComputeEntropyFromHistogram(
+ histogram, arraysize(histogram), num_samples));
+}
+
+double QuantizerEstimator::EstimateForDeltaFrame(const VideoFrame& frame) {
+ if (!CanExamineFrame(frame))
+ return NO_RESULT;
+
+ // If the size of the |frame| has changed, no difference can be examined.
+ // In this case, process this frame as if it were a key frame.
+ const gfx::Size size = frame.visible_rect().size();
+ if (last_frame_size_ != size || !last_frame_pixel_buffer_)
+ return EstimateForKeyFrame(frame);
+ const int rows_in_subset =
+ std::max(1, size.height() * FRAME_SAMPLING_PERCENT / 100);
+
+ // Compute a histogram where each bucket represents the number of times the
+ // same pixel in this frame versus the last frame was different by a specific
+ // amount. 511 buckets are needed, one for each integer in the range
+ // [-255,255].
+ int histogram[511];
+ memset(histogram, 0, sizeof(histogram));
+ const int row_skip = size.height() / rows_in_subset;
+ int y = 0;
+ for (int i = 0; i < rows_in_subset; ++i, y += row_skip) {
+ const uint8* const row_begin = frame.visible_data(VideoFrame::kYPlane) +
+ y * frame.stride(VideoFrame::kYPlane);
+ const uint8* const row_end = row_begin + size.width();
+ uint8* const last_frame_row_begin =
+ last_frame_pixel_buffer_.get() + i * size.width();
+ for (const uint8* p = row_begin, *q = last_frame_row_begin; p < row_end;
+ ++p, ++q) {
+ const int difference = static_cast<int>(*p) - static_cast<int>(*q);
+ const int histogram_index = difference + 255;
+ ++histogram[histogram_index];
+ }
+
+ // Copy the row of pixels into the buffer. This will be used when
+ // generating histograms for future delta frames.
+ memcpy(last_frame_row_begin, row_begin, size.width());
+ }
+
+ // Estimate a quantizer value depending on the difference data in the
+ // histogram and return it.
+ const int num_samples = size.width() * rows_in_subset;
+ return ToQuantizerEstimate(ComputeEntropyFromHistogram(
+ histogram, arraysize(histogram), num_samples));
+}
+
+// static
+bool QuantizerEstimator::CanExamineFrame(const VideoFrame& frame) {
+ DCHECK_EQ(8, VideoFrame::PlaneHorizontalBitsPerPixel(frame.format(),
+ VideoFrame::kYPlane));
+ return media::IsYuvPlanar(frame.format()) &&
+ !frame.visible_rect().IsEmpty();
+}
+
+// static
+double QuantizerEstimator::ComputeEntropyFromHistogram(const int* histogram,
+ size_t num_buckets,
+ int num_samples) {
+ DCHECK_LT(0, num_samples);
+ double entropy = 0.0;
+ for (size_t i = 0; i < num_buckets; ++i) {
+ const double probability = static_cast<double>(histogram[i]) / num_samples;
+ if (probability > 0.0)
+ entropy = entropy - probability * log2(probability);
+ }
+ return entropy;
+}
+
+// static
+double QuantizerEstimator::ToQuantizerEstimate(double shannon_entropy) {
+ DCHECK_GE(shannon_entropy, 0.0);
+
+ // This math is based on an analysis of data produced by running a wide range
+ // of mirroring content in a Cast streaming session on a Chromebook Pixel
+ // (2013 edition). The output from the Pixel's built-in hardware encoder was
+ // compared to an identically-configured software implementation (libvpx)
+ // running alongside. Based on an analysis of the data, the following linear
+ // mapping seems to produce reasonable VP8 quantizer values from the
+ // |shannon_entropy| values.
+ //
+ // TODO(miu): Confirm whether this model and value work well on other
+ // platforms.
+ const double kEntropyAtMaxQuantizer = 7.5;
+ const double slope =
+ (MAX_VP8_QUANTIZER - MIN_VP8_QUANTIZER) / kEntropyAtMaxQuantizer;
+ const double quantizer = std::min<double>(
+ MAX_VP8_QUANTIZER, MIN_VP8_QUANTIZER + slope * shannon_entropy);
+ return quantizer;
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder.h b/chromium/media/cast/sender/external_video_encoder.h
index fcd616c7f22..bea68fcd848 100644
--- a/chromium/media/cast/sender/external_video_encoder.h
+++ b/chromium/media/cast/sender/external_video_encoder.h
@@ -102,6 +102,61 @@ class SizeAdaptableExternalVideoEncoder : public SizeAdaptableVideoEncoderBase {
DISALLOW_COPY_AND_ASSIGN(SizeAdaptableExternalVideoEncoder);
};
+// A utility class for examining the sequence of frames sent to an external
+// encoder, and returning an estimate of the what the software VP8 encoder would
+// have used for a quantizer value when encoding each frame. The quantizer
+// value is related to the complexity of the content of the frame.
+class QuantizerEstimator {
+ public:
+ enum {
+ NO_RESULT = 0,
+ MIN_VP8_QUANTIZER = 4,
+ MAX_VP8_QUANTIZER = 63,
+ };
+
+ QuantizerEstimator();
+ ~QuantizerEstimator();
+
+ // Discard any state related to the processing of prior frames.
+ void Reset();
+
+ // Examine |frame| and estimate and return the quantizer value the software
+ // VP8 encoder would have used when encoding the frame, in the range
+ // [4.0,63.0]. If |frame| is not in planar YUV format, or its size is empty,
+ // this returns |NO_RESULT|.
+ double EstimateForKeyFrame(const VideoFrame& frame);
+ double EstimateForDeltaFrame(const VideoFrame& frame);
+
+ private:
+ enum {
+ // The percentage of each frame to sample. This value is based on an
+ // analysis that showed sampling 10% of the rows of a frame generated
+ // reasonably accurate results.
+ FRAME_SAMPLING_PERCENT = 10,
+ };
+
+ // Returns true if the frame is in planar YUV format.
+ static bool CanExamineFrame(const VideoFrame& frame);
+
+ // Returns a value in the range [0,log2(num_buckets)], the Shannon Entropy
+ // based on the probabilities of values falling within each of the buckets of
+ // the given |histogram|.
+ static double ComputeEntropyFromHistogram(const int* histogram,
+ size_t num_buckets,
+ int num_samples);
+
+ // Map the |shannon_entropy| to its corresponding software VP8 quantizer.
+ static double ToQuantizerEstimate(double shannon_entropy);
+
+ // A cache of a subset of rows of pixels from the last frame examined. This
+ // is used to compute the entropy of the difference between frames, which in
+ // turn is used to compute the entropy and quantizer.
+ scoped_ptr<uint8[]> last_frame_pixel_buffer_;
+ gfx::Size last_frame_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(QuantizerEstimator);
+};
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/external_video_encoder_unittest.cc b/chromium/media/cast/sender/external_video_encoder_unittest.cc
new file mode 100644
index 00000000000..78359f3408c
--- /dev/null
+++ b/chromium/media/cast/sender/external_video_encoder_unittest.cc
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/external_video_encoder.h"
+
+#include "media/base/video_frame.h"
+#include "media/base/video_types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+
+scoped_refptr<VideoFrame> CreateFrame(const uint8* y_plane_data,
+ const gfx::Size& size) {
+ scoped_refptr<VideoFrame> result = VideoFrame::CreateFrame(PIXEL_FORMAT_I420,
+ size,
+ gfx::Rect(size),
+ size,
+ base::TimeDelta());
+ for (int y = 0, y_end = size.height(); y < y_end; ++y) {
+ memcpy(result->visible_data(VideoFrame::kYPlane) +
+ y * result->stride(VideoFrame::kYPlane),
+ y_plane_data + y * size.width(),
+ size.width());
+ }
+ return result;
+}
+
+} // namespace
+
+TEST(QuantizerEstimator, EstimatesForTrivialFrames) {
+ QuantizerEstimator qe;
+
+ const gfx::Size frame_size(320, 180);
+ const scoped_ptr<uint8[]> black_frame_data(new uint8[frame_size.GetArea()]);
+ memset(black_frame_data.get(), 0, frame_size.GetArea());
+ const scoped_refptr<VideoFrame> black_frame =
+ CreateFrame(black_frame_data.get(), frame_size);
+
+ // A solid color frame should always generate a minimum quantizer value (4.0)
+ // as a key frame. If it is provided repeatedly as delta frames, the minimum
+ // quantizer value should be repeatedly generated since there is no difference
+ // between frames.
+ EXPECT_EQ(4.0, qe.EstimateForKeyFrame(*black_frame));
+ for (int i = 0; i < 3; ++i)
+ EXPECT_EQ(4.0, qe.EstimateForDeltaFrame(*black_frame));
+
+ const scoped_ptr<uint8[]> checkerboard_frame_data(
+ new uint8[frame_size.GetArea()]);
+ for (int i = 0, end = frame_size.GetArea(); i < end; ++i)
+ checkerboard_frame_data.get()[i] = (((i % 2) == 0) ? 0 : 255);
+ const scoped_refptr<VideoFrame> checkerboard_frame =
+ CreateFrame(checkerboard_frame_data.get(), frame_size);
+
+ // Now, introduce a frame with a checkerboard pattern. Half of the pixels
+ // will have a difference of 255, and half will have zero difference.
+ // Therefore, the Shannon Entropy should be 1.0 and the resulting quantizer
+ // estimate should be ~11.9.
+ EXPECT_NEAR(11.9, qe.EstimateForDeltaFrame(*checkerboard_frame), 0.1);
+
+ // Now, introduce a series of frames with "random snow" in them. Expect this
+ // results in high quantizer estimates.
+ for (int i = 0; i < 3; ++i) {
+ int rand_seed = 0xdeadbeef + i;
+ const scoped_ptr<uint8[]> random_frame_data(
+ new uint8[frame_size.GetArea()]);
+ for (int j = 0, end = frame_size.GetArea(); j < end; ++j) {
+ rand_seed = (1103515245 * rand_seed + 12345) % (1 << 31);
+ random_frame_data.get()[j] = static_cast<uint8>(rand_seed & 0xff);
+ }
+ const scoped_refptr<VideoFrame> random_frame =
+ CreateFrame(random_frame_data.get(), frame_size);
+ EXPECT_LE(50.0, qe.EstimateForDeltaFrame(*random_frame));
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/frame_sender.cc b/chromium/media/cast/sender/frame_sender.cc
index a7d9a27cb60..ec37a6d4ec4 100644
--- a/chromium/media/cast/sender/frame_sender.cc
+++ b/chromium/media/cast/sender/frame_sender.cc
@@ -260,6 +260,11 @@ void FrameSender::SendEncodedFrame(
encoded_frame->new_playout_delay_ms =
target_playout_delay_.InMilliseconds();
}
+
+ TRACE_EVENT_ASYNC_BEGIN1("cast.stream",
+ is_audio_ ? "Audio Transport" : "Video Transport",
+ frame_id,
+ "rtp_timestamp", encoded_frame->rtp_timestamp);
transport_sender_->InsertFrame(ssrc_, *encoded_frame);
}
@@ -288,6 +293,14 @@ void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
if (cast_feedback.missing_frames_and_packets.empty()) {
OnAck(cast_feedback.ack_frame_id);
+ if (latest_acked_frame_id_ == cast_feedback.ack_frame_id) {
+ VLOG(1) << SENDER_SSRC << "Received duplicate ACK for frame "
+ << latest_acked_frame_id_;
+ TRACE_EVENT_INSTANT2(
+ "cast.stream", "Duplicate ACK", TRACE_EVENT_SCOPE_THREAD,
+ "ack_frame_id", cast_feedback.ack_frame_id,
+ "last_sent_frame_id", last_sent_frame_id_);
+ }
// We only count duplicate ACKs when we have sent newer frames.
if (latest_acked_frame_id_ == cast_feedback.ack_frame_id &&
latest_acked_frame_id_ != last_sent_frame_id_) {
@@ -297,8 +310,6 @@ void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
}
// TODO(miu): The values "2" and "3" should be derived from configuration.
if (duplicate_ack_counter_ >= 2 && duplicate_ack_counter_ % 3 == 2) {
- VLOG(1) << SENDER_SSRC << "Received duplicate ACK for frame "
- << latest_acked_frame_id_;
ResendForKickstart();
}
} else {
@@ -323,12 +334,24 @@ void FrameSender::OnReceivedCastFeedback(const RtcpCastMessage& cast_feedback) {
VLOG(2) << SENDER_SSRC
<< "Received ACK" << (is_acked_out_of_order ? " out-of-order" : "")
<< " for frame " << cast_feedback.ack_frame_id;
- if (!is_acked_out_of_order) {
+ if (is_acked_out_of_order) {
+ TRACE_EVENT_INSTANT2(
+ "cast.stream", "ACK out of order", TRACE_EVENT_SCOPE_THREAD,
+ "ack_frame_id", cast_feedback.ack_frame_id,
+ "latest_acked_frame_id", latest_acked_frame_id_);
+ } else {
// Cancel resends of acked frames.
std::vector<uint32> cancel_sending_frames;
while (latest_acked_frame_id_ != cast_feedback.ack_frame_id) {
latest_acked_frame_id_++;
cancel_sending_frames.push_back(latest_acked_frame_id_);
+ // This is a good place to match the trace for frame ids
+ // since this ensures we not only track frame ids that are
+ // implicitly ACKed, but also handles duplicate ACKs
+ TRACE_EVENT_ASYNC_END1("cast.stream",
+ is_audio_ ? "Audio Transport" : "Video Transport",
+ cast_feedback.ack_frame_id,
+ "RTT_usecs", current_round_trip_time_.InMicroseconds());
}
transport_sender_->CancelSendingFrames(ssrc_, cancel_sending_frames);
latest_acked_frame_id_ = cast_feedback.ack_frame_id;
diff --git a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
index 0f3b7f360c9..7ef7eefdac4 100644
--- a/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
+++ b/chromium/media/cast/sender/h264_vt_encoder_unittest.cc
@@ -252,7 +252,7 @@ class H264VideoToolboxEncoderTest : public ::testing::Test {
video_sender_config_.codec = CODEC_VIDEO_H264;
const gfx::Size size(kVideoWidth, kVideoHeight);
frame_ = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, base::TimeDelta());
+ PIXEL_FORMAT_I420, size, gfx::Rect(size), size, base::TimeDelta());
PopulateVideoFrame(frame_.get(), 123);
}
@@ -306,8 +306,9 @@ TEST_F(H264VideoToolboxEncoderTest, CheckFrameMetadataSequence) {
#if defined(USE_PROPRIETARY_CODECS)
TEST_F(H264VideoToolboxEncoderTest, CheckFramesAreDecodable) {
VideoDecoderConfig config(kCodecH264, H264PROFILE_MAIN, frame_->format(),
- frame_->coded_size(), frame_->visible_rect(),
- frame_->natural_size(), nullptr, 0, false);
+ COLOR_SPACE_UNSPECIFIED, frame_->coded_size(),
+ frame_->visible_rect(), frame_->natural_size(),
+ nullptr, 0, false);
scoped_refptr<EndToEndFrameChecker> checker(new EndToEndFrameChecker(config));
VideoEncoder::FrameEncodedCallback cb =
diff --git a/chromium/media/cast/sender/performance_metrics_overlay.cc b/chromium/media/cast/sender/performance_metrics_overlay.cc
index 5b7c7147ee0..f3086247b36 100644
--- a/chromium/media/cast/sender/performance_metrics_overlay.cc
+++ b/chromium/media/cast/sender/performance_metrics_overlay.cc
@@ -217,6 +217,12 @@ void MaybeRenderPerformanceMetricsOverlay(int target_bitrate,
return;
}
+ // Can't render to unmappable memory (DmaBuf, CVPixelBuffer).
+ if (!frame->IsMappable()) {
+ DVLOG(2) << "Cannot render overlay: frame uses unmappable memory.";
+ return;
+ }
+
// Compute the physical pixel top row for the bottom-most line of text.
const int line_height = (kCharacterHeight + kLineSpacing) * kScale;
int top = frame->visible_rect().height() - line_height;
diff --git a/chromium/media/cast/sender/video_encoder_unittest.cc b/chromium/media/cast/sender/video_encoder_unittest.cc
index a24de5ed11b..2c2d48041b2 100644
--- a/chromium/media/cast/sender/video_encoder_unittest.cc
+++ b/chromium/media/cast/sender/video_encoder_unittest.cc
@@ -154,8 +154,8 @@ class VideoEncoderTest
if (video_frame_factory_)
frame = video_frame_factory_->MaybeCreateFrame(size, timestamp);
if (!frame) {
- frame = media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size, timestamp);
+ frame = media::VideoFrame::CreateFrame(PIXEL_FORMAT_I420, size,
+ gfx::Rect(size), size, timestamp);
}
PopulateVideoFrame(frame.get(), 123);
return frame;
diff --git a/chromium/media/cast/sender/video_sender.cc b/chromium/media/cast/sender/video_sender.cc
index ebdde8e8ea6..a96f2fde278 100644
--- a/chromium/media/cast/sender/video_sender.cc
+++ b/chromium/media/cast/sender/video_sender.cc
@@ -5,6 +5,7 @@
#include "media/cast/sender/video_sender.h"
#include <algorithm>
+#include <cmath>
#include <cstring>
#include "base/bind.h"
@@ -160,6 +161,10 @@ void VideoSender::InsertRawVideoFrame(
last_enqueued_frame_rtp_timestamp_) ||
reference_time <= last_enqueued_frame_reference_time_)) {
VLOG(1) << "Dropping video frame: RTP or reference time did not increase.";
+ TRACE_EVENT_INSTANT2("cast.stream", "Video Frame Drop",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", rtp_timestamp,
+ "reason", "time did not increase");
return;
}
@@ -189,20 +194,27 @@ void VideoSender::InsertRawVideoFrame(
// drop every subsequent frame for the rest of the session.
video_encoder_->EmitFrames();
+ TRACE_EVENT_INSTANT2("cast.stream", "Video Frame Drop",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", rtp_timestamp,
+ "reason", "too much in flight");
return;
}
- uint32 bitrate = congestion_control_->GetBitrate(
- reference_time + target_playout_delay_, target_playout_delay_);
+ if (video_frame->visible_rect().IsEmpty()) {
+ VLOG(1) << "Rejecting empty video frame.";
+ return;
+ }
+
+ const int bitrate = congestion_control_->GetBitrate(
+ reference_time + target_playout_delay_, target_playout_delay_,
+ GetMaximumTargetBitrateForFrame(*video_frame));
if (bitrate != last_bitrate_) {
video_encoder_->SetBitRate(bitrate);
last_bitrate_ = bitrate;
}
- if (video_frame->visible_rect().IsEmpty()) {
- VLOG(1) << "Rejecting empty video frame.";
- return;
- }
+ TRACE_COUNTER_ID1("cast.stream", "Video Target Bitrate", this, bitrate);
MaybeRenderPerformanceMetricsOverlay(bitrate,
frames_in_encoder_ + 1,
@@ -217,12 +229,17 @@ void VideoSender::InsertRawVideoFrame(
weak_factory_.GetWeakPtr(),
video_frame,
bitrate))) {
+ TRACE_EVENT_ASYNC_BEGIN1("cast.stream", "Video Encode", video_frame.get(),
+ "rtp_timestamp", rtp_timestamp);
frames_in_encoder_++;
duration_in_encoder_ += duration_added_by_next_frame;
last_enqueued_frame_rtp_timestamp_ = rtp_timestamp;
last_enqueued_frame_reference_time_ = reference_time;
} else {
VLOG(1) << "Encoder rejected a frame. Skipping...";
+ TRACE_EVENT_INSTANT1("cast.stream", "Video Encode Reject",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", rtp_timestamp);
}
}
@@ -248,6 +265,57 @@ void VideoSender::OnAck(uint32 frame_id) {
video_encoder_->LatestFrameIdToReference(frame_id);
}
+// static
+int VideoSender::GetMaximumTargetBitrateForFrame(
+ const media::VideoFrame& frame) {
+ enum {
+ // Constants used to linearly translate between lines of resolution and a
+ // maximum target bitrate. These values are based on observed quality
+ // trade-offs over a wide range of content. The math will use these values
+ // to compute a bitrate of 2 Mbps for 360 lines of resolution and 4 Mbps for
+ // 720 lines.
+ BITRATE_FOR_HIGH_RESOLUTION = 4000000,
+ BITRATE_FOR_STANDARD_RESOLUTION = 2000000,
+ HIGH_RESOLUTION_LINES = 720,
+ STANDARD_RESOLUTION_LINES = 360,
+
+ // The smallest maximum target bitrate, regardless of what the math says.
+ MAX_BITRATE_LOWER_BOUND = 1000000,
+
+ // Constants used to boost the result for high frame rate content.
+ HIGH_FRAME_RATE_THRESHOLD_USEC = 25000, // 40 FPS
+ HIGH_FRAME_RATE_BOOST_NUMERATOR = 3,
+ HIGH_FRAME_RATE_BOOST_DENOMINATOR = 2,
+ };
+
+ // Determine the approximate height of a 16:9 frame having the same area
+ // (number of pixels) as |frame|.
+ const gfx::Size& resolution = frame.visible_rect().size();
+ const int lines_of_resolution =
+ ((resolution.width() * 9) == (resolution.height() * 16)) ?
+ resolution.height() :
+ static_cast<int>(sqrt(resolution.GetArea() * 9.0 / 16.0));
+
+ // Linearly translate from |lines_of_resolution| to a maximum target bitrate.
+ int64 result = lines_of_resolution - STANDARD_RESOLUTION_LINES;
+ result *= BITRATE_FOR_HIGH_RESOLUTION - BITRATE_FOR_STANDARD_RESOLUTION;
+ result /= HIGH_RESOLUTION_LINES - STANDARD_RESOLUTION_LINES;
+ result += BITRATE_FOR_STANDARD_RESOLUTION;
+
+ // Boost the result for high frame rate content.
+ base::TimeDelta frame_duration;
+ if (frame.metadata()->GetTimeDelta(media::VideoFrameMetadata::FRAME_DURATION,
+ &frame_duration) &&
+ frame_duration > base::TimeDelta() &&
+ frame_duration.InMicroseconds() <= HIGH_FRAME_RATE_THRESHOLD_USEC) {
+ result *= HIGH_FRAME_RATE_BOOST_NUMERATOR;
+ result /= HIGH_FRAME_RATE_BOOST_DENOMINATOR;
+ }
+
+ // Return a lower-bounded result.
+ return std::max<int>(result, MAX_BITRATE_LOWER_BOUND);
+}
+
void VideoSender::OnEncodedVideoFrame(
const scoped_refptr<media::VideoFrame>& video_frame,
int encoder_bitrate,
@@ -263,6 +331,10 @@ void VideoSender::OnEncodedVideoFrame(
last_reported_deadline_utilization_ = encoded_frame->deadline_utilization;
last_reported_lossy_utilization_ = encoded_frame->lossy_utilization;
+ TRACE_EVENT_ASYNC_END2("cast.stream", "Video Encode", video_frame.get(),
+ "deadline_utilization", last_reported_deadline_utilization_,
+ "lossy_utilization", last_reported_lossy_utilization_);
+
// Report the resource utilization for processing this frame. Take the
// greater of the two utilization values and attenuate them such that the
// target utilization is reported as the maximum sustainable amount.
diff --git a/chromium/media/cast/sender/video_sender.h b/chromium/media/cast/sender/video_sender.h
index ac5d6d06dcd..826321cf5b3 100644
--- a/chromium/media/cast/sender/video_sender.h
+++ b/chromium/media/cast/sender/video_sender.h
@@ -64,6 +64,13 @@ class VideoSender : public FrameSender,
base::TimeDelta GetInFlightMediaDuration() const final;
void OnAck(uint32 frame_id) final;
+ // Return the maximum target bitrate that should be used for the given video
+ // |frame|. This will be provided to CongestionControl as a soft maximum
+ // limit, and should be interpreted as "the point above which the extra
+ // encoder CPU time + network bandwidth usage isn't warranted for the amount
+ // of further quality improvement to be gained."
+ static int GetMaximumTargetBitrateForFrame(const media::VideoFrame& frame);
+
private:
// Called by the |video_encoder_| with the next EncodedFrame to send.
void OnEncodedVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
@@ -87,7 +94,7 @@ class VideoSender : public FrameSender,
// Remember what we set the bitrate to before, no need to set it again if
// we get the same value.
- uint32 last_bitrate_;
+ int last_bitrate_;
PlayoutDelayChangeCB playout_delay_change_cb_;
diff --git a/chromium/media/cast/sender/video_sender_unittest.cc b/chromium/media/cast/sender/video_sender_unittest.cc
index 8100b22e556..4e4996b2fee 100644
--- a/chromium/media/cast/sender/video_sender_unittest.cc
+++ b/chromium/media/cast/sender/video_sender_unittest.cc
@@ -116,7 +116,29 @@ class PeerVideoSender : public VideoSender {
transport_sender,
base::Bind(&IgnorePlayoutDelayChanges)) {}
using VideoSender::OnReceivedCastFeedback;
+ using VideoSender::GetMaximumTargetBitrateForFrame;
};
+
+// Creates a VideoFrame NOT backed by actual memory storage. The frame's
+// metadata (i.e., size and frame duration) are all that are needed to test the
+// GetMaximumTargetBitrateForFrame() logic.
+scoped_refptr<VideoFrame> CreateFakeFrame(const gfx::Size& resolution,
+ bool high_frame_rate_in_metadata) {
+ const scoped_refptr<VideoFrame> frame = VideoFrame::WrapExternalData(
+ PIXEL_FORMAT_I420,
+ resolution,
+ gfx::Rect(resolution),
+ resolution,
+ static_cast<uint8*>(nullptr) + 1,
+ resolution.GetArea() * 3 / 2,
+ base::TimeDelta());
+ const double frame_rate = high_frame_rate_in_metadata ? 60.0 : 30.0;
+ frame->metadata()->SetTimeDelta(
+ VideoFrameMetadata::FRAME_DURATION,
+ base::TimeDelta::FromSecondsD(1.0 / frame_rate));
+ return frame;
+}
+
} // namespace
class VideoSenderTest : public ::testing::Test {
@@ -198,7 +220,7 @@ class VideoSenderTest : public ::testing::Test {
gfx::Size size(kWidth, kHeight);
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size,
+ PIXEL_FORMAT_I420, size, gfx::Rect(size), size,
testing_clock_->NowTicks() - first_frame_timestamp_);
PopulateVideoFrame(video_frame.get(), last_pixel_value_++);
return video_frame;
@@ -210,7 +232,7 @@ class VideoSenderTest : public ::testing::Test {
gfx::Size size(kWidth, kHeight);
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::CreateFrame(
- VideoFrame::I420, size, gfx::Rect(size), size,
+ PIXEL_FORMAT_I420, size, gfx::Rect(size), size,
testing_clock_->NowTicks() - first_frame_timestamp_);
PopulateVideoFrameWithNoise(video_frame.get());
return video_frame;
@@ -593,5 +615,51 @@ TEST_F(VideoSenderTest, PopulatesResourceUtilizationInFrameMetadata) {
}
}
+// Tests that VideoSender::GetMaximumTargetBitrateForFrame() returns the correct
+// result for a number of frame resolution combinations.
+TEST(VideoSenderMathTest, ComputesCorrectMaximumTargetBitratesForFrames) {
+ const struct {
+ int width;
+ int height;
+ bool high_frame_rate;
+ int expected_bitrate;
+ } kTestCases[] = {
+ // Standard 16:9 resolutions, non-HFR.
+ { 16, 9, false, 1000000 },
+ { 320, 180, false, 1000000 },
+ { 640, 360, false, 2000000 },
+ { 800, 450, false, 2500000 },
+ { 1280, 720, false, 4000000 },
+ { 1920, 1080, false, 6000000 },
+ { 3840, 2160, false, 12000000 },
+
+ // Standard 16:9 resolutions, HFR.
+ { 16, 9, true, 1000000 },
+ { 320, 180, true, 1500000 },
+ { 640, 360, true, 3000000 },
+ { 800, 450, true, 3750000 },
+ { 1280, 720, true, 6000000 },
+ { 1920, 1080, true, 9000000 },
+ { 3840, 2160, true, 18000000 },
+
+ // 4:3 and oddball resolutions.
+ { 640, 480, false, 2305555 },
+ { 1024, 768, false, 3694444 },
+ { 10, 5000, false, 1000000 },
+ { 1234, 567, false, 3483333 },
+ { 16384, 16384, true, 102399999 },
+ };
+
+ for (size_t i = 0; i < arraysize(kTestCases); ++i) {
+ const gfx::Size resolution(kTestCases[i].width, kTestCases[i].height);
+ SCOPED_TRACE(::testing::Message() << "resolution=" << resolution.ToString()
+ << ", hfr=" << kTestCases[i].high_frame_rate);
+ const scoped_refptr<VideoFrame> frame =
+ CreateFakeFrame(resolution, kTestCases[i].high_frame_rate);
+ EXPECT_EQ(kTestCases[i].expected_bitrate,
+ PeerVideoSender::GetMaximumTargetBitrateForFrame(*frame));
+ }
+}
+
} // namespace cast
} // namespace media
diff --git a/chromium/media/cast/sender/vp8_encoder.cc b/chromium/media/cast/sender/vp8_encoder.cc
index 97139e8d84d..7bb2f11f5ff 100644
--- a/chromium/media/cast/sender/vp8_encoder.cc
+++ b/chromium/media/cast/sender/vp8_encoder.cc
@@ -7,7 +7,7 @@
#include "base/logging.h"
#include "media/base/video_frame.h"
#include "media/cast/cast_defines.h"
-#include "third_party/libvpx/source/libvpx/vpx/vp8cx.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vp8cx.h"
namespace media {
namespace cast {
diff --git a/chromium/media/cast/sender/vp8_encoder.h b/chromium/media/cast/sender/vp8_encoder.h
index fe2c54c8037..8888097ac41 100644
--- a/chromium/media/cast/sender/vp8_encoder.h
+++ b/chromium/media/cast/sender/vp8_encoder.h
@@ -10,7 +10,7 @@
#include "base/threading/thread_checker.h"
#include "media/cast/cast_config.h"
#include "media/cast/sender/software_video_encoder.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vpx_encoder.h"
#include "ui/gfx/geometry/size.h"
namespace media {
diff --git a/chromium/media/cast/sender/vp8_quantizer_parser.cc b/chromium/media/cast/sender/vp8_quantizer_parser.cc
new file mode 100644
index 00000000000..167e7cccb35
--- /dev/null
+++ b/chromium/media/cast/sender/vp8_quantizer_parser.cc
@@ -0,0 +1,209 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/cast/sender/vp8_quantizer_parser.h"
+#include "base/logging.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+// Vp8BitReader is a re-implementation of a subset of the VP8 entropy decoder.
+// It is used to decompress the VP8 bitstream for the purposes of quickly
+// parsing the VP8 frame headers. It is mostly the exact same implementation
+// found in third_party/libvpx_new/.../vp8/decoder/dboolhuff.h except that only
+// the portion of the implementation needed to parse the frame headers is
+// present. As of this writing, the implementation in libvpx could not be
+// re-used because of the way that the code is structured, and lack of the
+// necessary parts being exported.
+class Vp8BitReader {
+ public:
+ Vp8BitReader(const uint8* data, size_t size)
+ : encoded_data_(data), encoded_data_end_(data + size) {
+ Vp8DecoderReadBytes();
+ }
+ ~Vp8BitReader() {}
+
+ // Decode one bit. The output is 0 or 1.
+ unsigned int DecodeBit();
+ // Decode a value with |num_bits|. The decoding order is MSB first.
+ unsigned int DecodeValue(unsigned int num_bits);
+
+ private:
+ // Read new bytes frome the encoded data buffer until |bit_count_| > 0.
+ void Vp8DecoderReadBytes();
+
+ const uint8* encoded_data_; // Current byte to decode.
+ const uint8* const encoded_data_end_; // The end of the byte to decode.
+ // The following two variables are maintained by the decoder.
+ // General decoding rule:
+ // If |value_| is in the range of 0 to half of |range_|, output 0.
+ // Otherwise output 1.
+ // |range_| and |value_| need to be shifted when necessary to avoid underflow.
+ unsigned int range_ = 255;
+ unsigned int value_ = 0;
+ // Number of valid bits left to decode. Initializing it to -8 to let the
+ // decoder load two bytes at the beginning. The lower byte is used as
+ // a buffer byte. During the decoding, decoder needs to call
+ // Vp8DecoderReadBytes() to load new bytes when it becomes negative.
+ int bit_count_ = -8;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8BitReader);
+};
+
+// The number of bits to be left-shifted to make the variable range_ over 128.
+const uint8 vp8_shift[128] = {
+ 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+
+// Mapping from the q_index(0-127) to the quantizer value(0-63).
+const uint8 vp8_quantizer_lookup[128] = {
+ 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14,
+ 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 27, 28, 28, 29, 29,
+ 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39,
+ 39, 40, 40, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45, 45, 46, 46,
+ 46, 47, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50, 50, 51, 51, 51, 52, 52, 52,
+ 53, 53, 53, 54, 54, 54, 55, 55, 55, 56, 56, 56, 57, 57, 57, 58, 58, 58, 59,
+ 59, 59, 60, 60, 60, 61, 61, 61, 62, 62, 62, 63, 63, 63};
+
+void Vp8BitReader::Vp8DecoderReadBytes() {
+ int shift = -bit_count_;
+ while ((shift >= 0) && (encoded_data_ < encoded_data_end_)) {
+ bit_count_ += 8;
+ value_ |= static_cast<unsigned int>(*encoded_data_) << shift;
+ ++encoded_data_;
+ shift -= 8;
+ }
+}
+
+unsigned int Vp8BitReader::DecodeBit() {
+ unsigned int decoded_bit = 0;
+ unsigned int split = 1 + (((range_ - 1) * 128) >> 8);
+ if (bit_count_ < 0) {
+ Vp8DecoderReadBytes();
+ }
+ DCHECK_GE(bit_count_, 0);
+ unsigned int shifted_split = split << 8;
+ if (value_ >= shifted_split) {
+ range_ -= split;
+ value_ -= shifted_split;
+ decoded_bit = 1;
+ } else {
+ range_ = split;
+ }
+ if (range_ < 128) {
+ int shift = vp8_shift[range_];
+ range_ <<= shift;
+ value_ <<= shift;
+ bit_count_ -= shift;
+ }
+ return decoded_bit;
+}
+
+unsigned int Vp8BitReader::DecodeValue(unsigned int num_bits) {
+ unsigned int decoded_value = 0;
+ for (int i = static_cast<int>(num_bits) - 1; i >= 0; i--) {
+ decoded_value |= (DecodeBit() << i);
+ }
+ return decoded_value;
+}
+
+// Parse the Segment Header part in the first partition.
+void ParseSegmentHeader(Vp8BitReader* bit_reader) {
+ const bool segmentation_enabled = (bit_reader->DecodeBit() != 0);
+ DVLOG(2) << "segmentation_enabled:" << segmentation_enabled;
+ if (segmentation_enabled) {
+ const bool update_mb_segmentation_map = (bit_reader->DecodeBit() != 0);
+ const bool update_mb_segmentation_data = (bit_reader->DecodeBit() != 0);
+ DVLOG(2) << "update_mb_segmentation_data:" << update_mb_segmentation_data;
+ if (update_mb_segmentation_data) {
+ bit_reader->DecodeBit();
+ for (int i = 0; i < 4; ++i) {
+ if (bit_reader->DecodeBit()) {
+ bit_reader->DecodeValue(7 + 1); // Parse 7 bits value + 1 sign bit.
+ }
+ }
+ for (int i = 0; i < 4; ++i) {
+ if (bit_reader->DecodeBit()) {
+ bit_reader->DecodeValue(6 + 1); // Parse 6 bits value + 1 sign bit.
+ }
+ }
+ }
+
+ if (update_mb_segmentation_map) {
+ for (int i = 0; i < 3; ++i) {
+ if (bit_reader->DecodeBit()) {
+ bit_reader->DecodeValue(8);
+ }
+ }
+ }
+ }
+}
+
+// Parse the Filter Header in the first partition.
+void ParseFilterHeader(Vp8BitReader* bit_reader) {
+ // Parse 1 bit filter_type + 6 bits loop_filter_level + 3 bits
+ // sharpness_level.
+ bit_reader->DecodeValue(1 + 6 + 3);
+ if (bit_reader->DecodeBit()) {
+ if (bit_reader->DecodeBit()) {
+ for (int i = 0; i < 4; ++i) {
+ if (bit_reader->DecodeBit()) {
+ bit_reader->DecodeValue(6 + 1); // Parse 6 bits value + 1 sign bit.
+ }
+ }
+ for (int i = 0; i < 4; ++i) {
+ if (bit_reader->DecodeBit()) {
+ bit_reader->DecodeValue(6 + 1); // Parse 6 bits value + 1 sign bit.
+ }
+ }
+ }
+ }
+}
+} // unnamed namespace
+
+int ParseVp8HeaderQuantizer(const uint8* encoded_data, size_t size) {
+ DCHECK(encoded_data);
+ if (size <= 3) {
+ return -1;
+ }
+ const bool is_key = !(encoded_data[0] & 1);
+ const unsigned int header_3bytes =
+ encoded_data[0] | (encoded_data[1] << 8) | (encoded_data[2] << 16);
+ // Parse the size of the first partition.
+ unsigned int partition_size = (header_3bytes >> 5);
+ encoded_data += 3; // Skip 3 bytes.
+ size -= 3;
+ if (is_key) {
+ if (size <= 7) {
+ return -1;
+ }
+ encoded_data += 7; // Skip 7 bytes.
+ size -= 7;
+ }
+ if (size < partition_size) {
+ return -1;
+ }
+ Vp8BitReader bit_reader(encoded_data, partition_size);
+ if (is_key) {
+ bit_reader.DecodeValue(1 + 1); // Parse two bits: color_space + clamp_type.
+ }
+ ParseSegmentHeader(&bit_reader);
+ ParseFilterHeader(&bit_reader);
+ // Parse the number of coefficient data partitions.
+ bit_reader.DecodeValue(2);
+ // Parse the base q_index.
+ uint8 q_index = static_cast<uint8>(bit_reader.DecodeValue(7));
+ if (q_index > 127) {
+ return 63;
+ }
+ return vp8_quantizer_lookup[q_index];
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cast/sender/vp8_quantizer_parser.h b/chromium/media/cast/sender/vp8_quantizer_parser.h
new file mode 100644
index 00000000000..abd0d5e0c25
--- /dev/null
+++ b/chromium/media/cast/sender/vp8_quantizer_parser.h
@@ -0,0 +1,20 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_CAST_SENDER_VP8_QUANTIZER_PARSER_H_
+#define MEDIA_CAST_SENDER_VP8_QUANTIZER_PARSER_H_
+
+#include "media/cast/cast_config.h"
+
+namespace media {
+namespace cast {
+
+// Partially parse / skip data in the header and the first partition,
+// and return the base quantizer in the range [0,63], or -1 on parse error.
+int ParseVp8HeaderQuantizer(const uint8* data, size_t size);
+
+} // namespace cast
+} // namespace media
+
+#endif // MEDIA_CAST_SENDER_VP8_QUANTIZER_PARSER_H_
diff --git a/chromium/media/cast/sender/vp8_quantizer_parser_unittest.cc b/chromium/media/cast/sender/vp8_quantizer_parser_unittest.cc
new file mode 100644
index 00000000000..f028264a92c
--- /dev/null
+++ b/chromium/media/cast/sender/vp8_quantizer_parser_unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdlib>
+
+#include "base/time/time.h"
+#include "media/cast/cast_config.h"
+#include "media/cast/receiver/video_decoder.h"
+#include "media/cast/sender/sender_encoded_frame.h"
+#include "media/cast/sender/vp8_encoder.h"
+#include "media/cast/sender/vp8_quantizer_parser.h"
+#include "media/cast/test/utility/default_config.h"
+#include "media/cast/test/utility/video_utility.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+namespace cast {
+
+namespace {
+const int kWidth = 32;
+const int kHeight = 32;
+const int kFrameRate = 10;
+const int kQp = 20;
+
+VideoSenderConfig GetVideoConfigForTest() {
+ VideoSenderConfig config = GetDefaultVideoSenderConfig();
+ config.codec = CODEC_VIDEO_VP8;
+ config.use_external_encoder = false;
+ config.max_frame_rate = kFrameRate;
+ config.min_qp = kQp;
+ config.max_qp = kQp;
+ return config;
+}
+} // unnamed namespace
+
+class Vp8QuantizerParserTest : public ::testing::Test {
+ public:
+ Vp8QuantizerParserTest() : video_config_(GetVideoConfigForTest()) {}
+
+ // Call vp8 software encoder to encode one randomly generated frame.
+ void EncodeOneFrame(SenderEncodedFrame* encoded_frame) {
+ const gfx::Size frame_size = gfx::Size(kWidth, kHeight);
+ const scoped_refptr<VideoFrame> video_frame = VideoFrame::CreateFrame(
+ PIXEL_FORMAT_YV12, frame_size, gfx::Rect(frame_size), frame_size,
+ next_frame_timestamp_);
+ const base::TimeTicks reference_time =
+ base::TimeTicks::UnixEpoch() + next_frame_timestamp_;
+ next_frame_timestamp_ += base::TimeDelta::FromSeconds(1) / kFrameRate;
+ PopulateVideoFrameWithNoise(video_frame.get());
+ vp8_encoder_->Encode(video_frame, reference_time, encoded_frame);
+ }
+
+ // Update the vp8 encoder with the new quantizer.
+ void UpdateQuantizer(int qp) {
+ DCHECK((qp > 3) && (qp < 64));
+ video_config_.min_qp = qp;
+ video_config_.max_qp = qp;
+ RecreateVp8Encoder();
+ }
+
+ protected:
+ void SetUp() final {
+ next_frame_timestamp_ = base::TimeDelta();
+ RecreateVp8Encoder();
+ }
+
+ private:
+ // Reconstruct a vp8 encoder with new config since the Vp8Encoder
+ // class has no interface to update the config.
+ void RecreateVp8Encoder() {
+ vp8_encoder_.reset(new Vp8Encoder(video_config_));
+ vp8_encoder_->Initialize();
+ }
+
+ base::TimeDelta next_frame_timestamp_;
+ VideoSenderConfig video_config_;
+ scoped_ptr<Vp8Encoder> vp8_encoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp8QuantizerParserTest);
+};
+
+// Encode 3 frames to test the cases with insufficient data input.
+TEST_F(Vp8QuantizerParserTest, InsufficientData) {
+ for (int i = 0; i < 3; ++i) {
+ scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
+ const uint8* encoded_data =
+ reinterpret_cast<const uint8*>(encoded_frame->data.data());
+ // Null input.
+ int decoded_quantizer =
+ ParseVp8HeaderQuantizer(encoded_data, encoded_frame->data.size());
+ EXPECT_EQ(-1, decoded_quantizer);
+ EncodeOneFrame(encoded_frame.get());
+ encoded_data = reinterpret_cast<const uint8*>(encoded_frame->data.data());
+ // Zero bytes should not be enough to decode the quantizer value.
+ decoded_quantizer = ParseVp8HeaderQuantizer(encoded_data, 0);
+ EXPECT_EQ(-1, decoded_quantizer);
+ // Three bytes should not be enough to decode the quantizer value..
+ decoded_quantizer = ParseVp8HeaderQuantizer(encoded_data, 3);
+ EXPECT_EQ(-1, decoded_quantizer);
+ unsigned int first_partition_size =
+ (encoded_data[0] | (encoded_data[1] << 8) | (encoded_data[2] << 16)) >>
+ 5;
+ if (encoded_frame->dependency == EncodedFrame::KEY) {
+ // Ten bytes should not be enough to decode the quanitizer value
+ // for a Key frame.
+ decoded_quantizer = ParseVp8HeaderQuantizer(encoded_data, 10);
+ EXPECT_EQ(-1, decoded_quantizer);
+ // One byte less than needed to decode the quantizer value.
+ decoded_quantizer =
+ ParseVp8HeaderQuantizer(encoded_data, 10 + first_partition_size - 1);
+ EXPECT_EQ(-1, decoded_quantizer);
+ // Minimum number of bytes to decode the quantizer value.
+ decoded_quantizer =
+ ParseVp8HeaderQuantizer(encoded_data, 10 + first_partition_size);
+ EXPECT_EQ(kQp, decoded_quantizer);
+ } else {
+ // One byte less than needed to decode the quantizer value.
+ decoded_quantizer =
+ ParseVp8HeaderQuantizer(encoded_data, 3 + first_partition_size - 1);
+ EXPECT_EQ(-1, decoded_quantizer);
+ // Minimum number of bytes to decode the quantizer value.
+ decoded_quantizer =
+ ParseVp8HeaderQuantizer(encoded_data, 3 + first_partition_size);
+ EXPECT_EQ(kQp, decoded_quantizer);
+ }
+ }
+}
+
+// Encode 3 fames for every quantizer value in the range of [4,63].
+TEST_F(Vp8QuantizerParserTest, VariedQuantizer) {
+ int decoded_quantizer = -1;
+ for (int qp = 4; qp <= 63; qp += 10) {
+ UpdateQuantizer(qp);
+ for (int i = 0; i < 3; ++i) {
+ scoped_ptr<SenderEncodedFrame> encoded_frame(new SenderEncodedFrame());
+ EncodeOneFrame(encoded_frame.get());
+ decoded_quantizer = ParseVp8HeaderQuantizer(
+ reinterpret_cast<const uint8*>(encoded_frame->data.data()),
+ encoded_frame->data.size());
+ EXPECT_EQ(qp, decoded_quantizer);
+ }
+ }
+}
+
+} // namespace cast
+} // namespace media
diff --git a/chromium/media/cdm/json_web_key.cc b/chromium/media/cdm/json_web_key.cc
index 0576f369009..5d8c5306993 100644
--- a/chromium/media/cdm/json_web_key.cc
+++ b/chromium/media/cdm/json_web_key.cc
@@ -307,7 +307,7 @@ bool ExtractKeyIdsFromKeyIdsInitData(const std::string& input,
error_message->assign("'");
error_message->append(kKeyIdsTag);
error_message->append("'[");
- error_message->append(base::UintToString(i));
+ error_message->append(base::SizeTToString(i));
error_message->append("] is not string.");
return false;
}
@@ -318,7 +318,7 @@ bool ExtractKeyIdsFromKeyIdsInitData(const std::string& input,
error_message->assign("'");
error_message->append(kKeyIdsTag);
error_message->append("'[");
- error_message->append(base::UintToString(i));
+ error_message->append(base::SizeTToString(i));
error_message->append("] is not valid base64url encoded. Value: ");
error_message->append(ShortenTo64Characters(encoded_key_id));
return false;
diff --git a/chromium/media/cdm/player_tracker_impl.cc b/chromium/media/cdm/player_tracker_impl.cc
index 8f102bd724e..e4abd61f707 100644
--- a/chromium/media/cdm/player_tracker_impl.cc
+++ b/chromium/media/cdm/player_tracker_impl.cc
@@ -19,7 +19,11 @@ PlayerTrackerImpl::PlayerCallbacks::PlayerCallbacks(
PlayerTrackerImpl::PlayerCallbacks::~PlayerCallbacks() {
}
-PlayerTrackerImpl::PlayerTrackerImpl() : next_registration_id_(1) {}
+PlayerTrackerImpl::PlayerTrackerImpl() : next_registration_id_(1) {
+ // Enable PlayerTrackerImpl to be created on another thread than it will be
+ // later exclusively used.
+ thread_checker_.DetachFromThread();
+}
PlayerTrackerImpl::~PlayerTrackerImpl() {}
diff --git a/chromium/media/cdm/ppapi/BUILD.gn b/chromium/media/cdm/ppapi/BUILD.gn
index 585e6bf00a2..1fdebd1bc75 100644
--- a/chromium/media/cdm/ppapi/BUILD.gn
+++ b/chromium/media/cdm/ppapi/BUILD.gn
@@ -30,6 +30,7 @@ shared_library("clearkeycdm") {
deps = [
"//base",
+ "//build/config/sanitizers:deps",
"//media", # For media::AudioTimestampHelper
"//media:shared_memory_support", # For media::AudioBus.
"//url",
@@ -51,11 +52,12 @@ shared_library("clearkeycdm") {
process_version("clearkeycdmadapter_resources") {
visibility = [ ":*" ]
+ template_file = chrome_version_rc_template
sources = [
"//media/clearkeycdmadapter.ver",
"external_clear_key/BRANDING",
]
- output = "$target_gen_dir/clearkeycdmadapter_version.rc"
+ output = "$target_gen_dir/clearkeycdmadapter_version.rc"
}
cdm_adapter("clearkeycdmadapter") {
@@ -66,11 +68,4 @@ cdm_adapter("clearkeycdmadapter") {
":clearkeycdmadapter_resources",
"//ppapi/cpp",
]
-
- if (is_posix && !is_mac && enable_pepper_cdms) {
- # Because clearkeycdm has type 'loadable_module', we must explicitly
- # specify this dependency.
- ldflags = [ rebase_path("$root_out_dir/libclearkeycdm.so", root_build_dir) ]
- libs = [ "rt" ]
- }
}
diff --git a/chromium/media/cdm/ppapi/api/codereview.settings b/chromium/media/cdm/ppapi/api/codereview.settings
index 5b1311c8917..9faf2326bf1 100644
--- a/chromium/media/cdm/ppapi/api/codereview.settings
+++ b/chromium/media/cdm/ppapi/api/codereview.settings
@@ -1,6 +1,9 @@
CODE_REVIEW_SERVER: codereview.chromium.org
CC_LIST: cdm-api-reviews@chromium.org, feature-media-reviews@chromium.org
-VIEW_VC: https://src.chromium.org/viewvc/chrome?view=rev&revision=
-GITCL_PREUPLOAD: http://src.chromium.org/viewvc/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD&root=chrome
-GITCL_PREDCOMMIT: http://src.chromium.org/viewvc/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD&root=chrome
+VIEW_VC: https://chromium.googlesource.com/chromium/cdm/+/
+STATUS: http://chromium-status.appspot.com/status
+TRY_ON_UPLOAD: False
+TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try/try
+GITCL_PREUPLOAD: http://src.chromium.org/viewvc/chrome/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD
+GITCL_PREDCOMMIT: http://src.chromium.org/viewvc/chrome/trunk/tools/depot_tools/git-cl-upload-hook?revision=HEAD
PROJECT: chromium_deps
diff --git a/chromium/media/cdm/ppapi/api/content_decryption_module.h b/chromium/media/cdm/ppapi/api/content_decryption_module.h
index 1e5e6f6ea95..512ca97689e 100644
--- a/chromium/media/cdm/ppapi/api/content_decryption_module.h
+++ b/chromium/media/cdm/ppapi/api/content_decryption_module.h
@@ -305,9 +305,10 @@ enum KeyStatus {
kUsable = 0,
kInternalError = 1,
kExpired = 2,
- kOutputNotAllowed = 3,
+ kOutputRestricted = 3,
kOutputDownscaled = 4,
- kStatusPending = 5
+ kStatusPending = 5,
+ kReleased = 6
};
// Used when passing arrays of key information. Does not own the referenced
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.cc b/chromium/media/cdm/ppapi/cdm_adapter.cc
index 29ca3ea9849..93479e25955 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.cc
+++ b/chromium/media/cdm/ppapi/cdm_adapter.cc
@@ -296,12 +296,14 @@ PP_CdmKeyStatus CdmKeyStatusToPpKeyStatus(cdm::KeyStatus status) {
return PP_CDMKEYSTATUS_INVALID;
case cdm::kExpired:
return PP_CDMKEYSTATUS_EXPIRED;
- case cdm::kOutputNotAllowed:
- return PP_CDMKEYSTATUS_OUTPUTNOTALLOWED;
+ case cdm::kOutputRestricted:
+ return PP_CDMKEYSTATUS_OUTPUTRESTRICTED;
case cdm::kOutputDownscaled:
return PP_CDMKEYSTATUS_OUTPUTDOWNSCALED;
case cdm::kStatusPending:
return PP_CDMKEYSTATUS_STATUSPENDING;
+ case cdm::kReleased:
+ return PP_CDMKEYSTATUS_RELEASED;
}
PP_NOTREACHED();
diff --git a/chromium/media/cdm/ppapi/cdm_adapter.gni b/chromium/media/cdm/ppapi/cdm_adapter.gni
index d5152ae2b16..e326903042f 100644
--- a/chromium/media/cdm/ppapi/cdm_adapter.gni
+++ b/chromium/media/cdm/ppapi/cdm_adapter.gni
@@ -11,8 +11,13 @@ template("cdm_adapter") {
shared_library(target_name) {
# Don't filter sources list again.
set_sources_assignment_filter([])
+ cflags = []
+ sources = []
+ ldflags = []
+ libs = []
+ forward_variables_from(invoker, "*")
- sources = [
+ sources += [
"//media/cdm/ppapi/api/content_decryption_module.h",
"//media/cdm/ppapi/cdm_adapter.cc",
"//media/cdm/ppapi/cdm_adapter.h",
@@ -26,101 +31,22 @@ template("cdm_adapter") {
"//media/cdm/ppapi/linked_ptr.h",
"//media/cdm/ppapi/supported_cdm_versions.h",
]
- if (defined(invoker.sources)) {
- sources += invoker.sources
- }
-
if (is_mac) {
- ldflags = [
+ ldflags += [
# Not to strip important symbols by -Wl,-dead_strip.
"-Wl,-exported_symbol,_PPP_GetInterface",
"-Wl,-exported_symbol,_PPP_InitializeModule",
"-Wl,-exported_symbol,_PPP_ShutdownModule",
]
#TODO(GYP) Mac: 'DYLIB_INSTALL_NAME_BASE': '@loader_path',
- } else if (is_posix && !is_mac) {
- cflags = [ "-fvisibility=hidden" ]
- # Note GYP sets rpath but this is set by default on shared libraries in
- # the GN build.
+ } else if (is_posix) {
+ cflags += [ "-fvisibility=hidden" ]
+
+ # Required for clock_gettime()
+ libs += [ "rt" ]
}
# TODO(jschuh) crbug.com/167187
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
-
- if (defined(invoker.all_dependent_configs)) {
- all_dependent_configs = invoker.all_dependent_configs
- }
- if (defined(invoker.allow_circular_includes_from)) {
- allow_circular_includes_from = invoker.allow_circular_includes_from
- }
- if (defined(invoker.cflags)) {
- cflags = invoker.cflags
- }
- if (defined(invoker.cflags_c)) {
- cflags_c = invoker.cflags_c
- }
- if (defined(invoker.cflags_cc)) {
- cflags_cc = invoker.cflags_cc
- }
- if (defined(invoker.cflags_objc)) {
- cflags_objc = invoker.cflags_objc
- }
- if (defined(invoker.cflags_objcc)) {
- cflags_objcc = invoker.cflags_objcc
- }
- if (defined(invoker.check_includes)) {
- check_includes = invoker.check_includes
- }
- if (defined(invoker.data)) {
- data = invoker.data
- }
- if (defined(invoker.data_deps)) {
- data_deps = invoker.data_deps
- }
- if (defined(invoker.datadeps)) {
- datadeps = invoker.datadeps
- }
- if (defined(invoker.defines)) {
- defines = invoker.defines
- }
- if (defined(invoker.deps)) {
- deps = invoker.deps
- }
- if (defined(invoker.forward_dependent_configs_from)) {
- forward_dependent_configs_from = invoker.forward_dependent_configs_from
- }
- if (defined(invoker.include_dirs)) {
- include_dirs = invoker.include_dirs
- }
- if (defined(invoker.ldflags)) {
- ldflags = invoker.ldflags
- }
- if (defined(invoker.lib_dirs)) {
- lib_dirs = invoker.lib_dirs
- }
- if (defined(invoker.libs)) {
- libs = invoker.libs
- }
- if (defined(invoker.output_extension)) {
- output_extension = invoker.output_extension
- }
- if (defined(invoker.output_name)) {
- output_name = invoker.output_name
- }
- if (defined(invoker.public)) {
- public = invoker.public
- }
- if (defined(invoker.public_configs)) {
- public_configs = invoker.public_configs
- }
- if (defined(invoker.public_deps)) {
- public_deps = invoker.public_deps
- }
- if (defined(invoker.testonly)) {
- testonly = invoker.testonly
- }
- if (defined(invoker.visibility)) {
- visibility = invoker.visibility
- }
}
}
diff --git a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
index a1f67079aad..4533f3959f0 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/clear_key_cdm.cc
@@ -183,12 +183,14 @@ cdm::KeyStatus ConvertKeyStatus(media::CdmKeyInformation::KeyStatus status) {
return cdm::kInternalError;
case media::CdmKeyInformation::KeyStatus::EXPIRED:
return cdm::kExpired;
- case media::CdmKeyInformation::KeyStatus::OUTPUT_NOT_ALLOWED:
- return cdm::kOutputNotAllowed;
+ case media::CdmKeyInformation::KeyStatus::OUTPUT_RESTRICTED:
+ return cdm::kOutputRestricted;
case media::CdmKeyInformation::KeyStatus::OUTPUT_DOWNSCALED:
return cdm::kOutputDownscaled;
case media::CdmKeyInformation::KeyStatus::KEY_STATUS_PENDING:
return cdm::kStatusPending;
+ case media::CdmKeyInformation::KeyStatus::RELEASED:
+ return cdm::kReleased;
}
NOTREACHED();
return cdm::kInternalError;
diff --git a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc
index c35b1789c29..29b29a53d63 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.cc
@@ -9,9 +9,9 @@
#include "base/logging.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
#include "media/base/data_buffer.h"
#include "media/base/limits.h"
+#include "media/base/timestamp_constants.h"
#include "media/ffmpeg/ffmpeg_common.h"
// Include FFmpeg header files.
diff --git a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
index 0bf0a305017..3f1497c708f 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc
@@ -5,7 +5,6 @@
#include "media/cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h"
#include "base/logging.h"
-#include "media/base/buffers.h"
#include "media/base/limits.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -21,26 +20,28 @@ namespace media {
static const int kDecodeThreads = 1;
-static cdm::VideoFormat PixelFormatToCdmVideoFormat(PixelFormat pixel_format) {
+static cdm::VideoFormat AVPixelFormatToCdmVideoFormat(
+ AVPixelFormat pixel_format) {
switch (pixel_format) {
- case PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUV420P:
return cdm::kYv12;
default:
- DVLOG(1) << "Unsupported PixelFormat: " << pixel_format;
+ DVLOG(1) << "Unsupported AVPixelFormat: " << pixel_format;
}
return cdm::kUnknownVideoFormat;
}
-static PixelFormat CdmVideoFormatToPixelFormat(cdm::VideoFormat video_format) {
+static AVPixelFormat CdmVideoFormatToAVPixelFormat(
+ cdm::VideoFormat video_format) {
switch (video_format) {
case cdm::kYv12:
case cdm::kI420:
- return PIX_FMT_YUV420P;
+ return AV_PIX_FMT_YUV420P;
case cdm::kUnknownVideoFormat:
default:
DVLOG(1) << "Unsupported cdm::VideoFormat: " << video_format;
}
- return PIX_FMT_NONE;
+ return AV_PIX_FMT_NONE;
}
static AVCodecID CdmVideoCodecToCodecID(
@@ -95,7 +96,7 @@ static void CdmVideoDecoderConfigToAVCodecContext(
codec_context->profile = CdmVideoCodecProfileToProfileID(config.profile);
codec_context->coded_width = config.coded_size.width;
codec_context->coded_height = config.coded_size.height;
- codec_context->pix_fmt = CdmVideoFormatToPixelFormat(config.format);
+ codec_context->pix_fmt = CdmVideoFormatToAVPixelFormat(config.format);
if (config.extra_data) {
codec_context->extradata_size = config.extra_data_size;
@@ -267,7 +268,7 @@ cdm::Status FFmpegCdmVideoDecoder::DecodeFrame(
bool FFmpegCdmVideoDecoder::CopyAvFrameTo(cdm::VideoFrame* cdm_video_frame) {
DCHECK(cdm_video_frame);
- DCHECK_EQ(av_frame_->format, PIX_FMT_YUV420P);
+ DCHECK_EQ(av_frame_->format, AV_PIX_FMT_YUV420P);
DCHECK_EQ(av_frame_->width % 2, 0);
DCHECK_EQ(av_frame_->height % 2, 0);
@@ -306,8 +307,8 @@ bool FFmpegCdmVideoDecoder::CopyAvFrameTo(cdm::VideoFrame* cdm_video_frame) {
uv_stride,
cdm_video_frame->FrameBuffer()->Data() + y_size + uv_size);
- PixelFormat format = static_cast<PixelFormat>(av_frame_->format);
- cdm_video_frame->SetFormat(PixelFormatToCdmVideoFormat(format));
+ AVPixelFormat format = static_cast<AVPixelFormat>(av_frame_->format);
+ cdm_video_frame->SetFormat(AVPixelFormatToCdmVideoFormat(format));
cdm::Size video_frame_size;
video_frame_size.width = av_frame_->width;
diff --git a/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc
index b5a804389fd..7aed711f88f 100644
--- a/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc
+++ b/chromium/media/cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc
@@ -6,7 +6,6 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "media/base/buffers.h"
#include "media/base/limits.h"
// Include libvpx header files.
@@ -15,8 +14,8 @@
#define VPX_CODEC_DISABLE_COMPAT 1
extern "C" {
// Note: vpx_decoder.h must be first or compile will fail.
-#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h" // NOLINT
+#include "third_party/libvpx_new/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vpx_decoder.h" // NOLINT
}
// Enable USE_COPYPLANE_WITH_LIBVPX to use |CopyPlane()| instead of memcpy to
diff --git a/chromium/media/cdm/stub/stub_cdm.cc b/chromium/media/cdm/stub/stub_cdm.cc
index 0bfa25ebaa0..9fb15924d9c 100644
--- a/chromium/media/cdm/stub/stub_cdm.cc
+++ b/chromium/media/cdm/stub/stub_cdm.cc
@@ -5,6 +5,7 @@
#include "media/cdm/stub/stub_cdm.h"
#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
// Version number for this stub. The third number represents the
@@ -61,9 +62,11 @@ void StubCdm::CreateSessionAndGenerateRequest(
// Provide a dummy message (with a trivial session ID) to enable some testing
// and be consistent with existing testing without a license server.
std::string session_id(base::UintToString(next_session_id_++));
- host_->OnResolveNewSessionPromise(promise_id, session_id.data(),
- session_id.length());
- host_->OnSessionMessage(session_id.data(), session_id.length(),
+ host_->OnResolveNewSessionPromise(
+ promise_id, session_id.data(),
+ base::checked_cast<uint32_t>(session_id.length()));
+ host_->OnSessionMessage(session_id.data(),
+ base::checked_cast<uint32_t>(session_id.length()),
cdm::kLicenseRequest, nullptr, 0, nullptr, 0);
}
@@ -156,7 +159,8 @@ void StubCdm::OnQueryOutputProtectionStatus(
void StubCdm::FailRequest(uint32 promise_id) {
std::string message("Operation not supported by stub CDM.");
host_->OnRejectPromise(promise_id, cdm::kInvalidAccessError, 0,
- message.data(), message.length());
+ message.data(),
+ base::checked_cast<uint32_t>(message.length()));
}
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_common.cc b/chromium/media/ffmpeg/ffmpeg_common.cc
index 8bb1aba12d3..e6d5e912424 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common.cc
@@ -6,11 +6,12 @@
#include "base/basictypes.h"
#include "base/logging.h"
-#include "base/metrics/histogram.h"
+#include "base/sha1.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
namespace media {
@@ -154,6 +155,10 @@ static VideoCodec CodecIDToVideoCodec(AVCodecID codec_id) {
switch (codec_id) {
case AV_CODEC_ID_H264:
return kCodecH264;
+#if defined(ENABLE_HEVC_DEMUXING)
+ case AV_CODEC_ID_HEVC:
+ return kCodecHEVC;
+#endif
case AV_CODEC_ID_THEORA:
return kCodecTheora;
case AV_CODEC_ID_MPEG4:
@@ -168,10 +173,14 @@ static VideoCodec CodecIDToVideoCodec(AVCodecID codec_id) {
return kUnknownVideoCodec;
}
-static AVCodecID VideoCodecToCodecID(VideoCodec video_codec) {
+AVCodecID VideoCodecToCodecID(VideoCodec video_codec) {
switch (video_codec) {
case kCodecH264:
return AV_CODEC_ID_H264;
+#if defined(ENABLE_HEVC_DEMUXING)
+ case kCodecHEVC:
+ return AV_CODEC_ID_HEVC;
+#endif
case kCodecTheora:
return AV_CODEC_ID_THEORA;
case kCodecMPEG4:
@@ -276,11 +285,9 @@ static AVSampleFormat SampleFormatToAVSampleFormat(SampleFormat sample_format) {
return AV_SAMPLE_FMT_NONE;
}
-void AVCodecContextToAudioDecoderConfig(
- const AVCodecContext* codec_context,
- bool is_encrypted,
- AudioDecoderConfig* config,
- bool record_stats) {
+void AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
+ bool is_encrypted,
+ AudioDecoderConfig* config) {
DCHECK_EQ(codec_context->codec_type, AVMEDIA_TYPE_AUDIO);
AudioCodec codec = CodecIDToAudioCodec(codec_context->codec_id);
@@ -319,25 +326,22 @@ void AVCodecContextToAudioDecoderConfig(
codec_context->extradata,
codec_context->extradata_size,
is_encrypted,
- record_stats,
seek_preroll,
codec_context->delay);
+
if (codec != kCodecOpus) {
DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
config->bits_per_channel());
}
}
-void AVStreamToAudioDecoderConfig(
- const AVStream* stream,
- AudioDecoderConfig* config,
- bool record_stats) {
+void AVStreamToAudioDecoderConfig(const AVStream* stream,
+ AudioDecoderConfig* config) {
bool is_encrypted = false;
AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
if (key)
is_encrypted = true;
- return AVCodecContextToAudioDecoderConfig(
- stream->codec, is_encrypted, config, record_stats);
+ AVCodecContextToAudioDecoderConfig(stream->codec, is_encrypted, config);
}
void AudioDecoderConfigToAVCodecContext(const AudioDecoderConfig& config,
@@ -368,10 +372,8 @@ void AudioDecoderConfigToAVCodecContext(const AudioDecoderConfig& config,
}
}
-void AVStreamToVideoDecoderConfig(
- const AVStream* stream,
- VideoDecoderConfig* config,
- bool record_stats) {
+void AVStreamToVideoDecoderConfig(const AVStream* stream,
+ VideoDecoderConfig* config) {
gfx::Size coded_size(stream->codec->coded_width, stream->codec->coded_height);
// TODO(vrk): This assumes decoded frame data starts at (0, 0), which is true
@@ -394,29 +396,39 @@ void AVStreamToVideoDecoderConfig(
else
profile = ProfileIDToVideoCodecProfile(stream->codec->profile);
+ // Without the FFmpeg h264 decoder, AVFormat is unable to get the profile, so
+ // default to baseline and let the VDA fail later if it doesn't support the
+ // real profile. This is alright because if the FFmpeg h264 decoder isn't
+ // enabled, there is no fallback if the VDA fails.
+#if defined(DISABLE_FFMPEG_VIDEO_DECODERS)
+ if (codec == kCodecH264)
+ profile = H264PROFILE_BASELINE;
+#endif
+
gfx::Size natural_size = GetNaturalSize(
visible_rect.size(), aspect_ratio.num, aspect_ratio.den);
- if (record_stats) {
- // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py
- // check for uma enum max usage, since we're abusing
- // UMA_HISTOGRAM_ENUMERATION to report a discrete value.
- UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange",
- stream->codec->color_range,
- AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX
- }
+ VideoPixelFormat format =
+ AVPixelFormatToVideoPixelFormat(stream->codec->pix_fmt);
+ // The format and coded size may be unknown if FFmpeg is compiled without
+ // video decoders.
+#if defined(DISABLE_FFMPEG_VIDEO_DECODERS)
+ if (format == PIXEL_FORMAT_UNKNOWN)
+ format = PIXEL_FORMAT_YV12;
+ if (coded_size == gfx::Size(0, 0))
+ coded_size = visible_rect.size();
+#endif
- VideoFrame::Format format = PixelFormatToVideoFormat(stream->codec->pix_fmt);
if (codec == kCodecVP9) {
// TODO(tomfinegan): libavcodec doesn't know about VP9.
- format = VideoFrame::YV12;
+ format = PIXEL_FORMAT_YV12;
coded_size = visible_rect.size();
}
// Pad out |coded_size| for subsampled YUV formats.
- if (format != VideoFrame::YV24) {
+ if (format != PIXEL_FORMAT_YV24) {
coded_size.set_width((coded_size.width() + 1) / 2 * 2);
- if (format != VideoFrame::YV16)
+ if (format != PIXEL_FORMAT_YV16)
coded_size.set_height((coded_size.height() + 1) / 2 * 2);
}
@@ -428,19 +440,22 @@ void AVStreamToVideoDecoderConfig(
AVDictionaryEntry* webm_alpha =
av_dict_get(stream->metadata, "alpha_mode", NULL, 0);
if (webm_alpha && !strcmp(webm_alpha->value, "1")) {
- format = VideoFrame::YV12A;
+ format = PIXEL_FORMAT_YV12A;
}
- config->Initialize(codec,
- profile,
- format,
- (stream->codec->colorspace == AVCOL_SPC_BT709)
- ? VideoFrame::COLOR_SPACE_HD_REC709
- : VideoFrame::COLOR_SPACE_UNSPECIFIED,
- coded_size, visible_rect, natural_size,
- stream->codec->extradata, stream->codec->extradata_size,
- is_encrypted,
- record_stats);
+ // Prefer the color space found by libavcodec if available.
+ ColorSpace color_space = AVColorSpaceToColorSpace(stream->codec->colorspace,
+ stream->codec->color_range);
+ if (color_space == COLOR_SPACE_UNSPECIFIED) {
+ // Otherwise, assume that SD video is usually Rec.601, and HD is usually
+ // Rec.709.
+ color_space = (natural_size.height() < 720) ? COLOR_SPACE_SD_REC601
+ : COLOR_SPACE_HD_REC709;
+ }
+
+ config->Initialize(codec, profile, format, color_space, coded_size,
+ visible_rect, natural_size, stream->codec->extradata,
+ stream->codec->extradata_size, is_encrypted);
}
void VideoDecoderConfigToAVCodecContext(
@@ -451,7 +466,9 @@ void VideoDecoderConfigToAVCodecContext(
codec_context->profile = VideoCodecProfileToProfileID(config.profile());
codec_context->coded_width = config.coded_size().width();
codec_context->coded_height = config.coded_size().height();
- codec_context->pix_fmt = VideoFormatToPixelFormat(config.format());
+ codec_context->pix_fmt = VideoPixelFormatToAVPixelFormat(config.format());
+ if (config.color_space() == COLOR_SPACE_JPEG)
+ codec_context->color_range = AVCOL_RANGE_JPEG;
if (config.extra_data()) {
codec_context->extradata_size = config.extra_data_size();
@@ -532,41 +549,60 @@ ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout, int channels) {
}
}
-VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
+VideoPixelFormat AVPixelFormatToVideoPixelFormat(AVPixelFormat pixel_format) {
// The YUVJ alternatives are FFmpeg's (deprecated, but still in use) way to
- // specify a pixel format and full range color combination
+ // specify a pixel format and full range color combination.
switch (pixel_format) {
- case PIX_FMT_YUV422P:
- case PIX_FMT_YUVJ422P:
- return VideoFrame::YV16;
- case PIX_FMT_YUV444P:
- case PIX_FMT_YUVJ444P:
- return VideoFrame::YV24;
- case PIX_FMT_YUV420P:
- case PIX_FMT_YUVJ420P:
- return VideoFrame::YV12;
- case PIX_FMT_YUVA420P:
- return VideoFrame::YV12A;
+ case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUVJ422P:
+ return PIXEL_FORMAT_YV16;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUVJ444P:
+ return PIXEL_FORMAT_YV24;
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUVJ420P:
+ return PIXEL_FORMAT_YV12;
+ case AV_PIX_FMT_YUVA420P:
+ return PIXEL_FORMAT_YV12A;
default:
- DVLOG(1) << "Unsupported PixelFormat: " << pixel_format;
+ DVLOG(1) << "Unsupported AVPixelFormat: " << pixel_format;
}
- return VideoFrame::UNKNOWN;
+ return PIXEL_FORMAT_UNKNOWN;
}
-PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
+AVPixelFormat VideoPixelFormatToAVPixelFormat(VideoPixelFormat video_format) {
switch (video_format) {
- case VideoFrame::YV16:
- return PIX_FMT_YUV422P;
- case VideoFrame::YV12:
- return PIX_FMT_YUV420P;
- case VideoFrame::YV12A:
- return PIX_FMT_YUVA420P;
- case VideoFrame::YV24:
- return PIX_FMT_YUV444P;
+ case PIXEL_FORMAT_YV16:
+ return AV_PIX_FMT_YUV422P;
+ case PIXEL_FORMAT_YV12:
+ return AV_PIX_FMT_YUV420P;
+ case PIXEL_FORMAT_YV12A:
+ return AV_PIX_FMT_YUVA420P;
+ case PIXEL_FORMAT_YV24:
+ return AV_PIX_FMT_YUV444P;
+ default:
+ DVLOG(1) << "Unsupported Format: " << video_format;
+ }
+ return AV_PIX_FMT_NONE;
+}
+
+ColorSpace AVColorSpaceToColorSpace(AVColorSpace color_space,
+ AVColorRange color_range) {
+ if (color_range == AVCOL_RANGE_JPEG)
+ return COLOR_SPACE_JPEG;
+
+ switch (color_space) {
+ case AVCOL_SPC_UNSPECIFIED:
+ break;
+ case AVCOL_SPC_BT709:
+ return COLOR_SPACE_HD_REC709;
+ case AVCOL_SPC_SMPTE170M:
+ case AVCOL_SPC_BT470BG:
+ return COLOR_SPACE_SD_REC601;
default:
- DVLOG(1) << "Unsupported VideoFrame::Format: " << video_format;
+ DVLOG(1) << "Unknown AVColorSpace: " << color_space;
}
- return PIX_FMT_NONE;
+ return COLOR_SPACE_UNSPECIFIED;
}
bool FFmpegUTCDateToTime(const char* date_utc, base::Time* out) {
@@ -609,4 +645,11 @@ bool FFmpegUTCDateToTime(const char* date_utc, base::Time* out) {
return false;
}
+int32_t HashCodecName(const char* codec_name) {
+ // Use the first 32-bits from the SHA1 hash as the identifier.
+ int32_t hash;
+ memcpy(&hash, base::SHA1HashString(codec_name).substr(0, 4).c_str(), 4);
+ return hash;
+}
+
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_common.h b/chromium/media/ffmpeg/ffmpeg_common.h
index a73fddd3716..677bd761d96 100644
--- a/chromium/media/ffmpeg/ffmpeg_common.h
+++ b/chromium/media/ffmpeg/ffmpeg_common.h
@@ -13,7 +13,7 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
-#include "media/base/video_decoder_config.h"
+#include "media/base/video_codecs.h"
#include "media/base/video_frame.h"
#include "media/ffmpeg/ffmpeg_deleters.h"
@@ -85,18 +85,14 @@ MEDIA_EXPORT base::TimeDelta ConvertFromTimeBase(const AVRational& time_base,
MEDIA_EXPORT int64 ConvertToTimeBase(const AVRational& time_base,
const base::TimeDelta& timestamp);
-void AVStreamToAudioDecoderConfig(
- const AVStream* stream,
- AudioDecoderConfig* config,
- bool record_stats);
+void AVStreamToAudioDecoderConfig(const AVStream* stream,
+ AudioDecoderConfig* config);
void AudioDecoderConfigToAVCodecContext(
const AudioDecoderConfig& config,
AVCodecContext* codec_context);
-void AVStreamToVideoDecoderConfig(
- const AVStream* stream,
- VideoDecoderConfig* config,
- bool record_stats);
+void AVStreamToVideoDecoderConfig(const AVStream* stream,
+ VideoDecoderConfig* config);
void VideoDecoderConfigToAVCodecContext(
const VideoDecoderConfig& config,
AVCodecContext* codec_context);
@@ -104,8 +100,7 @@ void VideoDecoderConfigToAVCodecContext(
MEDIA_EXPORT void AVCodecContextToAudioDecoderConfig(
const AVCodecContext* codec_context,
bool is_encrypted,
- AudioDecoderConfig* config,
- bool record_stats);
+ AudioDecoderConfig* config);
// Converts FFmpeg's channel layout to chrome's ChannelLayout. |channels| can
// be used when FFmpeg's channel layout is not informative in order to make a
@@ -113,22 +108,31 @@ MEDIA_EXPORT void AVCodecContextToAudioDecoderConfig(
MEDIA_EXPORT ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout,
int channels);
+MEDIA_EXPORT AVCodecID VideoCodecToCodecID(VideoCodec video_codec);
+
// Converts FFmpeg's audio sample format to Chrome's SampleFormat.
MEDIA_EXPORT SampleFormat
- AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
+AVSampleFormatToSampleFormat(AVSampleFormat sample_format);
// Converts FFmpeg's pixel formats to its corresponding supported video format.
-MEDIA_EXPORT VideoFrame::Format PixelFormatToVideoFormat(
- PixelFormat pixel_format);
+MEDIA_EXPORT VideoPixelFormat
+AVPixelFormatToVideoPixelFormat(AVPixelFormat pixel_format);
// Converts video formats to its corresponding FFmpeg's pixel formats.
-PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format);
+AVPixelFormat VideoPixelFormatToAVPixelFormat(VideoPixelFormat video_format);
+
+ColorSpace AVColorSpaceToColorSpace(AVColorSpace color_space,
+ AVColorRange color_range);
// Convert FFmpeg UTC representation (YYYY-MM-DD HH:MM:SS) to base::Time.
// Returns true and sets |*out| if |date_utc| contains a valid
// date string. Otherwise returns fals and timeline_offset is unmodified.
MEDIA_EXPORT bool FFmpegUTCDateToTime(const char* date_utc, base::Time* out);
+// Returns a 32-bit hash for the given codec name. See the VerifyUmaCodecHashes
+// unit test for more information and code for generating the histogram XML.
+MEDIA_EXPORT int32_t HashCodecName(const char* codec_name);
+
} // namespace media
#endif // MEDIA_FFMPEG_FFMPEG_COMMON_H_
diff --git a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
index b8cf99fa576..ca68fa7c170 100644
--- a/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
+++ b/chromium/media/ffmpeg/ffmpeg_common_unittest.cc
@@ -11,380 +11,6 @@
namespace media {
-// Verify that the AV_CODEC_ID values match what is specified in histograms.xml
-// for Media.DetectedAudioCodec and Media.DetectedVideoCodec
-#define STATIC_ASSERT(test) static_assert(test, #test)
-STATIC_ASSERT(0 == AV_CODEC_ID_NONE);
-STATIC_ASSERT(1 == AV_CODEC_ID_MPEG1VIDEO);
-STATIC_ASSERT(2 == AV_CODEC_ID_MPEG2VIDEO);
-STATIC_ASSERT(3 == AV_CODEC_ID_MPEG2VIDEO_XVMC);
-STATIC_ASSERT(4 == AV_CODEC_ID_H261);
-STATIC_ASSERT(5 == AV_CODEC_ID_H263);
-STATIC_ASSERT(6 == AV_CODEC_ID_RV10);
-STATIC_ASSERT(7 == AV_CODEC_ID_RV20);
-STATIC_ASSERT(8 == AV_CODEC_ID_MJPEG);
-STATIC_ASSERT(9 == AV_CODEC_ID_MJPEGB);
-STATIC_ASSERT(10 == AV_CODEC_ID_LJPEG);
-STATIC_ASSERT(11 == AV_CODEC_ID_SP5X);
-STATIC_ASSERT(12 == AV_CODEC_ID_JPEGLS);
-STATIC_ASSERT(13 == AV_CODEC_ID_MPEG4);
-STATIC_ASSERT(14 == AV_CODEC_ID_RAWVIDEO);
-STATIC_ASSERT(15 == AV_CODEC_ID_MSMPEG4V1);
-STATIC_ASSERT(16 == AV_CODEC_ID_MSMPEG4V2);
-STATIC_ASSERT(17 == AV_CODEC_ID_MSMPEG4V3);
-STATIC_ASSERT(18 == AV_CODEC_ID_WMV1);
-STATIC_ASSERT(19 == AV_CODEC_ID_WMV2);
-STATIC_ASSERT(20 == AV_CODEC_ID_H263P);
-STATIC_ASSERT(21 == AV_CODEC_ID_H263I);
-STATIC_ASSERT(22 == AV_CODEC_ID_FLV1);
-STATIC_ASSERT(23 == AV_CODEC_ID_SVQ1);
-STATIC_ASSERT(24 == AV_CODEC_ID_SVQ3);
-STATIC_ASSERT(25 == AV_CODEC_ID_DVVIDEO);
-STATIC_ASSERT(26 == AV_CODEC_ID_HUFFYUV);
-STATIC_ASSERT(27 == AV_CODEC_ID_CYUV);
-STATIC_ASSERT(28 == AV_CODEC_ID_H264);
-STATIC_ASSERT(29 == AV_CODEC_ID_INDEO3);
-STATIC_ASSERT(30 == AV_CODEC_ID_VP3);
-STATIC_ASSERT(31 == AV_CODEC_ID_THEORA);
-STATIC_ASSERT(32 == AV_CODEC_ID_ASV1);
-STATIC_ASSERT(33 == AV_CODEC_ID_ASV2);
-STATIC_ASSERT(34 == AV_CODEC_ID_FFV1);
-STATIC_ASSERT(35 == AV_CODEC_ID_4XM);
-STATIC_ASSERT(36 == AV_CODEC_ID_VCR1);
-STATIC_ASSERT(37 == AV_CODEC_ID_CLJR);
-STATIC_ASSERT(38 == AV_CODEC_ID_MDEC);
-STATIC_ASSERT(39 == AV_CODEC_ID_ROQ);
-STATIC_ASSERT(40 == AV_CODEC_ID_INTERPLAY_VIDEO);
-STATIC_ASSERT(41 == AV_CODEC_ID_XAN_WC3);
-STATIC_ASSERT(42 == AV_CODEC_ID_XAN_WC4);
-STATIC_ASSERT(43 == AV_CODEC_ID_RPZA);
-STATIC_ASSERT(44 == AV_CODEC_ID_CINEPAK);
-STATIC_ASSERT(45 == AV_CODEC_ID_WS_VQA);
-STATIC_ASSERT(46 == AV_CODEC_ID_MSRLE);
-STATIC_ASSERT(47 == AV_CODEC_ID_MSVIDEO1);
-STATIC_ASSERT(48 == AV_CODEC_ID_IDCIN);
-STATIC_ASSERT(49 == AV_CODEC_ID_8BPS);
-STATIC_ASSERT(50 == AV_CODEC_ID_SMC);
-STATIC_ASSERT(51 == AV_CODEC_ID_FLIC);
-STATIC_ASSERT(52 == AV_CODEC_ID_TRUEMOTION1);
-STATIC_ASSERT(53 == AV_CODEC_ID_VMDVIDEO);
-STATIC_ASSERT(54 == AV_CODEC_ID_MSZH);
-STATIC_ASSERT(55 == AV_CODEC_ID_ZLIB);
-STATIC_ASSERT(56 == AV_CODEC_ID_QTRLE);
-STATIC_ASSERT(57 == AV_CODEC_ID_SNOW_DEPRECATED);
-STATIC_ASSERT(58 == AV_CODEC_ID_TSCC);
-STATIC_ASSERT(59 == AV_CODEC_ID_ULTI);
-STATIC_ASSERT(60 == AV_CODEC_ID_QDRAW);
-STATIC_ASSERT(61 == AV_CODEC_ID_VIXL);
-STATIC_ASSERT(62 == AV_CODEC_ID_QPEG);
-STATIC_ASSERT(63 == AV_CODEC_ID_PNG);
-STATIC_ASSERT(64 == AV_CODEC_ID_PPM);
-STATIC_ASSERT(65 == AV_CODEC_ID_PBM);
-STATIC_ASSERT(66 == AV_CODEC_ID_PGM);
-STATIC_ASSERT(67 == AV_CODEC_ID_PGMYUV);
-STATIC_ASSERT(68 == AV_CODEC_ID_PAM);
-STATIC_ASSERT(69 == AV_CODEC_ID_FFVHUFF);
-STATIC_ASSERT(70 == AV_CODEC_ID_RV30);
-STATIC_ASSERT(71 == AV_CODEC_ID_RV40);
-STATIC_ASSERT(72 == AV_CODEC_ID_VC1);
-STATIC_ASSERT(73 == AV_CODEC_ID_WMV3);
-STATIC_ASSERT(74 == AV_CODEC_ID_LOCO);
-STATIC_ASSERT(75 == AV_CODEC_ID_WNV1);
-STATIC_ASSERT(76 == AV_CODEC_ID_AASC);
-STATIC_ASSERT(77 == AV_CODEC_ID_INDEO2);
-STATIC_ASSERT(78 == AV_CODEC_ID_FRAPS);
-STATIC_ASSERT(79 == AV_CODEC_ID_TRUEMOTION2);
-STATIC_ASSERT(80 == AV_CODEC_ID_BMP);
-STATIC_ASSERT(81 == AV_CODEC_ID_CSCD);
-STATIC_ASSERT(82 == AV_CODEC_ID_MMVIDEO);
-STATIC_ASSERT(83 == AV_CODEC_ID_ZMBV);
-STATIC_ASSERT(84 == AV_CODEC_ID_AVS);
-STATIC_ASSERT(85 == AV_CODEC_ID_SMACKVIDEO);
-STATIC_ASSERT(86 == AV_CODEC_ID_NUV);
-STATIC_ASSERT(87 == AV_CODEC_ID_KMVC);
-STATIC_ASSERT(88 == AV_CODEC_ID_FLASHSV);
-STATIC_ASSERT(89 == AV_CODEC_ID_CAVS);
-STATIC_ASSERT(90 == AV_CODEC_ID_JPEG2000);
-STATIC_ASSERT(91 == AV_CODEC_ID_VMNC);
-STATIC_ASSERT(92 == AV_CODEC_ID_VP5);
-STATIC_ASSERT(93 == AV_CODEC_ID_VP6);
-STATIC_ASSERT(94 == AV_CODEC_ID_VP6F);
-STATIC_ASSERT(95 == AV_CODEC_ID_TARGA);
-STATIC_ASSERT(96 == AV_CODEC_ID_DSICINVIDEO);
-STATIC_ASSERT(97 == AV_CODEC_ID_TIERTEXSEQVIDEO);
-STATIC_ASSERT(98 == AV_CODEC_ID_TIFF);
-STATIC_ASSERT(99 == AV_CODEC_ID_GIF);
-STATIC_ASSERT(100 == AV_CODEC_ID_DXA);
-STATIC_ASSERT(101 == AV_CODEC_ID_DNXHD);
-STATIC_ASSERT(102 == AV_CODEC_ID_THP);
-STATIC_ASSERT(103 == AV_CODEC_ID_SGI);
-STATIC_ASSERT(104 == AV_CODEC_ID_C93);
-STATIC_ASSERT(105 == AV_CODEC_ID_BETHSOFTVID);
-STATIC_ASSERT(106 == AV_CODEC_ID_PTX);
-STATIC_ASSERT(107 == AV_CODEC_ID_TXD);
-STATIC_ASSERT(108 == AV_CODEC_ID_VP6A);
-STATIC_ASSERT(109 == AV_CODEC_ID_AMV);
-STATIC_ASSERT(110 == AV_CODEC_ID_VB);
-STATIC_ASSERT(111 == AV_CODEC_ID_PCX);
-STATIC_ASSERT(112 == AV_CODEC_ID_SUNRAST);
-STATIC_ASSERT(113 == AV_CODEC_ID_INDEO4);
-STATIC_ASSERT(114 == AV_CODEC_ID_INDEO5);
-STATIC_ASSERT(115 == AV_CODEC_ID_MIMIC);
-STATIC_ASSERT(116 == AV_CODEC_ID_RL2);
-STATIC_ASSERT(117 == AV_CODEC_ID_ESCAPE124);
-STATIC_ASSERT(118 == AV_CODEC_ID_DIRAC);
-STATIC_ASSERT(119 == AV_CODEC_ID_BFI);
-STATIC_ASSERT(120 == AV_CODEC_ID_CMV);
-STATIC_ASSERT(121 == AV_CODEC_ID_MOTIONPIXELS);
-STATIC_ASSERT(122 == AV_CODEC_ID_TGV);
-STATIC_ASSERT(123 == AV_CODEC_ID_TGQ);
-STATIC_ASSERT(124 == AV_CODEC_ID_TQI);
-STATIC_ASSERT(125 == AV_CODEC_ID_AURA);
-STATIC_ASSERT(126 == AV_CODEC_ID_AURA2);
-STATIC_ASSERT(127 == AV_CODEC_ID_V210X);
-STATIC_ASSERT(128 == AV_CODEC_ID_TMV);
-STATIC_ASSERT(129 == AV_CODEC_ID_V210);
-STATIC_ASSERT(130 == AV_CODEC_ID_DPX);
-STATIC_ASSERT(131 == AV_CODEC_ID_MAD);
-STATIC_ASSERT(132 == AV_CODEC_ID_FRWU);
-STATIC_ASSERT(133 == AV_CODEC_ID_FLASHSV2);
-STATIC_ASSERT(134 == AV_CODEC_ID_CDGRAPHICS);
-STATIC_ASSERT(135 == AV_CODEC_ID_R210);
-STATIC_ASSERT(136 == AV_CODEC_ID_ANM);
-STATIC_ASSERT(137 == AV_CODEC_ID_BINKVIDEO);
-STATIC_ASSERT(138 == AV_CODEC_ID_IFF_ILBM);
-STATIC_ASSERT(139 == AV_CODEC_ID_IFF_BYTERUN1);
-STATIC_ASSERT(140 == AV_CODEC_ID_KGV1);
-STATIC_ASSERT(141 == AV_CODEC_ID_YOP);
-STATIC_ASSERT(142 == AV_CODEC_ID_VP8);
-STATIC_ASSERT(143 == AV_CODEC_ID_PICTOR);
-STATIC_ASSERT(144 == AV_CODEC_ID_ANSI);
-STATIC_ASSERT(145 == AV_CODEC_ID_A64_MULTI);
-STATIC_ASSERT(146 == AV_CODEC_ID_A64_MULTI5);
-STATIC_ASSERT(147 == AV_CODEC_ID_R10K);
-STATIC_ASSERT(148 == AV_CODEC_ID_MXPEG);
-STATIC_ASSERT(149 == AV_CODEC_ID_LAGARITH);
-STATIC_ASSERT(150 == AV_CODEC_ID_PRORES);
-STATIC_ASSERT(151 == AV_CODEC_ID_JV);
-STATIC_ASSERT(152 == AV_CODEC_ID_DFA);
-STATIC_ASSERT(153 == AV_CODEC_ID_WMV3IMAGE);
-STATIC_ASSERT(154 == AV_CODEC_ID_VC1IMAGE);
-STATIC_ASSERT(155 == AV_CODEC_ID_UTVIDEO);
-STATIC_ASSERT(156 == AV_CODEC_ID_BMV_VIDEO);
-STATIC_ASSERT(157 == AV_CODEC_ID_VBLE);
-STATIC_ASSERT(158 == AV_CODEC_ID_DXTORY);
-STATIC_ASSERT(159 == AV_CODEC_ID_V410);
-STATIC_ASSERT(160 == AV_CODEC_ID_XWD);
-STATIC_ASSERT(161 == AV_CODEC_ID_CDXL);
-STATIC_ASSERT(162 == AV_CODEC_ID_XBM);
-STATIC_ASSERT(163 == AV_CODEC_ID_ZEROCODEC);
-STATIC_ASSERT(164 == AV_CODEC_ID_MSS1);
-STATIC_ASSERT(165 == AV_CODEC_ID_MSA1);
-STATIC_ASSERT(166 == AV_CODEC_ID_TSCC2);
-STATIC_ASSERT(167 == AV_CODEC_ID_MTS2);
-STATIC_ASSERT(168 == AV_CODEC_ID_CLLC);
-STATIC_ASSERT(169 == AV_CODEC_ID_MSS2);
-STATIC_ASSERT(170 == AV_CODEC_ID_VP9);
-STATIC_ASSERT(65536 == AV_CODEC_ID_PCM_S16LE);
-STATIC_ASSERT(65537 == AV_CODEC_ID_PCM_S16BE);
-STATIC_ASSERT(65538 == AV_CODEC_ID_PCM_U16LE);
-STATIC_ASSERT(65539 == AV_CODEC_ID_PCM_U16BE);
-STATIC_ASSERT(65540 == AV_CODEC_ID_PCM_S8);
-STATIC_ASSERT(65541 == AV_CODEC_ID_PCM_U8);
-STATIC_ASSERT(65542 == AV_CODEC_ID_PCM_MULAW);
-STATIC_ASSERT(65543 == AV_CODEC_ID_PCM_ALAW);
-STATIC_ASSERT(65544 == AV_CODEC_ID_PCM_S32LE);
-STATIC_ASSERT(65545 == AV_CODEC_ID_PCM_S32BE);
-STATIC_ASSERT(65546 == AV_CODEC_ID_PCM_U32LE);
-STATIC_ASSERT(65547 == AV_CODEC_ID_PCM_U32BE);
-STATIC_ASSERT(65548 == AV_CODEC_ID_PCM_S24LE);
-STATIC_ASSERT(65549 == AV_CODEC_ID_PCM_S24BE);
-STATIC_ASSERT(65550 == AV_CODEC_ID_PCM_U24LE);
-STATIC_ASSERT(65551 == AV_CODEC_ID_PCM_U24BE);
-STATIC_ASSERT(65552 == AV_CODEC_ID_PCM_S24DAUD);
-STATIC_ASSERT(65553 == AV_CODEC_ID_PCM_ZORK);
-STATIC_ASSERT(65554 == AV_CODEC_ID_PCM_S16LE_PLANAR);
-STATIC_ASSERT(65555 == AV_CODEC_ID_PCM_DVD);
-STATIC_ASSERT(65556 == AV_CODEC_ID_PCM_F32BE);
-STATIC_ASSERT(65557 == AV_CODEC_ID_PCM_F32LE);
-STATIC_ASSERT(65558 == AV_CODEC_ID_PCM_F64BE);
-STATIC_ASSERT(65559 == AV_CODEC_ID_PCM_F64LE);
-STATIC_ASSERT(65560 == AV_CODEC_ID_PCM_BLURAY);
-STATIC_ASSERT(65561 == AV_CODEC_ID_PCM_LXF);
-STATIC_ASSERT(65562 == AV_CODEC_ID_S302M);
-STATIC_ASSERT(65563 == AV_CODEC_ID_PCM_S8_PLANAR);
-STATIC_ASSERT(69632 == AV_CODEC_ID_ADPCM_IMA_QT);
-STATIC_ASSERT(69633 == AV_CODEC_ID_ADPCM_IMA_WAV);
-STATIC_ASSERT(69634 == AV_CODEC_ID_ADPCM_IMA_DK3);
-STATIC_ASSERT(69635 == AV_CODEC_ID_ADPCM_IMA_DK4);
-STATIC_ASSERT(69636 == AV_CODEC_ID_ADPCM_IMA_WS);
-STATIC_ASSERT(69637 == AV_CODEC_ID_ADPCM_IMA_SMJPEG);
-STATIC_ASSERT(69638 == AV_CODEC_ID_ADPCM_MS);
-STATIC_ASSERT(69639 == AV_CODEC_ID_ADPCM_4XM);
-STATIC_ASSERT(69640 == AV_CODEC_ID_ADPCM_XA);
-STATIC_ASSERT(69641 == AV_CODEC_ID_ADPCM_ADX);
-STATIC_ASSERT(69642 == AV_CODEC_ID_ADPCM_EA);
-STATIC_ASSERT(69643 == AV_CODEC_ID_ADPCM_G726);
-STATIC_ASSERT(69644 == AV_CODEC_ID_ADPCM_CT);
-STATIC_ASSERT(69645 == AV_CODEC_ID_ADPCM_SWF);
-STATIC_ASSERT(69646 == AV_CODEC_ID_ADPCM_YAMAHA);
-STATIC_ASSERT(69647 == AV_CODEC_ID_ADPCM_SBPRO_4);
-STATIC_ASSERT(69648 == AV_CODEC_ID_ADPCM_SBPRO_3);
-STATIC_ASSERT(69649 == AV_CODEC_ID_ADPCM_SBPRO_2);
-STATIC_ASSERT(69650 == AV_CODEC_ID_ADPCM_THP);
-STATIC_ASSERT(69651 == AV_CODEC_ID_ADPCM_IMA_AMV);
-STATIC_ASSERT(69652 == AV_CODEC_ID_ADPCM_EA_R1);
-STATIC_ASSERT(69653 == AV_CODEC_ID_ADPCM_EA_R3);
-STATIC_ASSERT(69654 == AV_CODEC_ID_ADPCM_EA_R2);
-STATIC_ASSERT(69655 == AV_CODEC_ID_ADPCM_IMA_EA_SEAD);
-STATIC_ASSERT(69656 == AV_CODEC_ID_ADPCM_IMA_EA_EACS);
-STATIC_ASSERT(69657 == AV_CODEC_ID_ADPCM_EA_XAS);
-STATIC_ASSERT(69658 == AV_CODEC_ID_ADPCM_EA_MAXIS_XA);
-STATIC_ASSERT(69659 == AV_CODEC_ID_ADPCM_IMA_ISS);
-STATIC_ASSERT(69660 == AV_CODEC_ID_ADPCM_G722);
-STATIC_ASSERT(69661 == AV_CODEC_ID_ADPCM_IMA_APC);
-STATIC_ASSERT(73728 == AV_CODEC_ID_AMR_NB);
-STATIC_ASSERT(73729 == AV_CODEC_ID_AMR_WB);
-STATIC_ASSERT(77824 == AV_CODEC_ID_RA_144);
-STATIC_ASSERT(77825 == AV_CODEC_ID_RA_288);
-STATIC_ASSERT(81920 == AV_CODEC_ID_ROQ_DPCM);
-STATIC_ASSERT(81921 == AV_CODEC_ID_INTERPLAY_DPCM);
-STATIC_ASSERT(81922 == AV_CODEC_ID_XAN_DPCM);
-STATIC_ASSERT(81923 == AV_CODEC_ID_SOL_DPCM);
-STATIC_ASSERT(86016 == AV_CODEC_ID_MP2);
-STATIC_ASSERT(86017 == AV_CODEC_ID_MP3);
-STATIC_ASSERT(86018 == AV_CODEC_ID_AAC);
-STATIC_ASSERT(86019 == AV_CODEC_ID_AC3);
-STATIC_ASSERT(86020 == AV_CODEC_ID_DTS);
-STATIC_ASSERT(86021 == AV_CODEC_ID_VORBIS);
-STATIC_ASSERT(86022 == AV_CODEC_ID_DVAUDIO);
-STATIC_ASSERT(86023 == AV_CODEC_ID_WMAV1);
-STATIC_ASSERT(86024 == AV_CODEC_ID_WMAV2);
-STATIC_ASSERT(86025 == AV_CODEC_ID_MACE3);
-STATIC_ASSERT(86026 == AV_CODEC_ID_MACE6);
-STATIC_ASSERT(86027 == AV_CODEC_ID_VMDAUDIO);
-STATIC_ASSERT(86028 == AV_CODEC_ID_FLAC);
-STATIC_ASSERT(86029 == AV_CODEC_ID_MP3ADU);
-STATIC_ASSERT(86030 == AV_CODEC_ID_MP3ON4);
-STATIC_ASSERT(86031 == AV_CODEC_ID_SHORTEN);
-STATIC_ASSERT(86032 == AV_CODEC_ID_ALAC);
-STATIC_ASSERT(86033 == AV_CODEC_ID_WESTWOOD_SND1);
-STATIC_ASSERT(86034 == AV_CODEC_ID_GSM);
-STATIC_ASSERT(86035 == AV_CODEC_ID_QDM2);
-STATIC_ASSERT(86036 == AV_CODEC_ID_COOK);
-STATIC_ASSERT(86037 == AV_CODEC_ID_TRUESPEECH);
-STATIC_ASSERT(86038 == AV_CODEC_ID_TTA);
-STATIC_ASSERT(86039 == AV_CODEC_ID_SMACKAUDIO);
-STATIC_ASSERT(86040 == AV_CODEC_ID_QCELP);
-STATIC_ASSERT(86041 == AV_CODEC_ID_WAVPACK);
-STATIC_ASSERT(86042 == AV_CODEC_ID_DSICINAUDIO);
-STATIC_ASSERT(86043 == AV_CODEC_ID_IMC);
-STATIC_ASSERT(86044 == AV_CODEC_ID_MUSEPACK7);
-STATIC_ASSERT(86045 == AV_CODEC_ID_MLP);
-STATIC_ASSERT(86046 == AV_CODEC_ID_GSM_MS);
-STATIC_ASSERT(86047 == AV_CODEC_ID_ATRAC3);
-STATIC_ASSERT(86048 == AV_CODEC_ID_VOXWARE);
-STATIC_ASSERT(86049 == AV_CODEC_ID_APE);
-STATIC_ASSERT(86050 == AV_CODEC_ID_NELLYMOSER);
-STATIC_ASSERT(86051 == AV_CODEC_ID_MUSEPACK8);
-STATIC_ASSERT(86052 == AV_CODEC_ID_SPEEX);
-STATIC_ASSERT(86053 == AV_CODEC_ID_WMAVOICE);
-STATIC_ASSERT(86054 == AV_CODEC_ID_WMAPRO);
-STATIC_ASSERT(86055 == AV_CODEC_ID_WMALOSSLESS);
-STATIC_ASSERT(86056 == AV_CODEC_ID_ATRAC3P);
-STATIC_ASSERT(86057 == AV_CODEC_ID_EAC3);
-STATIC_ASSERT(86058 == AV_CODEC_ID_SIPR);
-STATIC_ASSERT(86059 == AV_CODEC_ID_MP1);
-STATIC_ASSERT(86060 == AV_CODEC_ID_TWINVQ);
-STATIC_ASSERT(86061 == AV_CODEC_ID_TRUEHD);
-STATIC_ASSERT(86062 == AV_CODEC_ID_MP4ALS);
-STATIC_ASSERT(86063 == AV_CODEC_ID_ATRAC1);
-STATIC_ASSERT(86064 == AV_CODEC_ID_BINKAUDIO_RDFT);
-STATIC_ASSERT(86065 == AV_CODEC_ID_BINKAUDIO_DCT);
-STATIC_ASSERT(86066 == AV_CODEC_ID_AAC_LATM);
-STATIC_ASSERT(86067 == AV_CODEC_ID_QDMC);
-STATIC_ASSERT(86068 == AV_CODEC_ID_CELT);
-STATIC_ASSERT(86069 == AV_CODEC_ID_G723_1);
-STATIC_ASSERT(86070 == AV_CODEC_ID_G729);
-STATIC_ASSERT(86071 == AV_CODEC_ID_8SVX_EXP);
-STATIC_ASSERT(86072 == AV_CODEC_ID_8SVX_FIB);
-STATIC_ASSERT(86073 == AV_CODEC_ID_BMV_AUDIO);
-STATIC_ASSERT(86074 == AV_CODEC_ID_RALF);
-STATIC_ASSERT(86075 == AV_CODEC_ID_IAC);
-STATIC_ASSERT(86076 == AV_CODEC_ID_ILBC);
-STATIC_ASSERT(86077 == AV_CODEC_ID_OPUS_DEPRECATED);
-STATIC_ASSERT(86078 == AV_CODEC_ID_COMFORT_NOISE);
-STATIC_ASSERT(86079 == AV_CODEC_ID_TAK_DEPRECATED);
-STATIC_ASSERT(94208 == AV_CODEC_ID_DVD_SUBTITLE);
-STATIC_ASSERT(94209 == AV_CODEC_ID_DVB_SUBTITLE);
-STATIC_ASSERT(94210 == AV_CODEC_ID_TEXT);
-STATIC_ASSERT(94211 == AV_CODEC_ID_XSUB);
-STATIC_ASSERT(94212 == AV_CODEC_ID_SSA);
-STATIC_ASSERT(94213 == AV_CODEC_ID_MOV_TEXT);
-STATIC_ASSERT(94214 == AV_CODEC_ID_HDMV_PGS_SUBTITLE);
-STATIC_ASSERT(94215 == AV_CODEC_ID_DVB_TELETEXT);
-STATIC_ASSERT(94216 == AV_CODEC_ID_SRT);
-STATIC_ASSERT(98304 == AV_CODEC_ID_TTF);
-STATIC_ASSERT(102400 == AV_CODEC_ID_PROBE);
-STATIC_ASSERT(131072 == AV_CODEC_ID_MPEG2TS);
-STATIC_ASSERT(131073 == AV_CODEC_ID_MPEG4SYSTEMS);
-STATIC_ASSERT(135168 == AV_CODEC_ID_FFMETADATA);
-STATIC_ASSERT(4665933 == AV_CODEC_ID_G2M);
-STATIC_ASSERT(4801606 == AV_CODEC_ID_IDF);
-STATIC_ASSERT(5198918 == AV_CODEC_ID_OTF);
-STATIC_ASSERT(407917392 == AV_CODEC_ID_PCM_S24LE_PLANAR);
-STATIC_ASSERT(542135120 == AV_CODEC_ID_PCM_S32LE_PLANAR);
-STATIC_ASSERT(808530518 == AV_CODEC_ID_012V);
-STATIC_ASSERT(809850962 == AV_CODEC_ID_EXR);
-// AV_CODEC_ID_8SVX_RAW(944985688) is no longer supported by ffmpeg but remains
-// in histograms.xml for historical purposes.
-STATIC_ASSERT(1095123744 == AV_CODEC_ID_ADPCM_AFC);
-STATIC_ASSERT(1096176208 == AV_CODEC_ID_AVRP);
-STATIC_ASSERT(1096176238 == AV_CODEC_ID_AVRN);
-STATIC_ASSERT(1096176969 == AV_CODEC_ID_AVUI);
-STATIC_ASSERT(1096373590 == AV_CODEC_ID_AYUV);
-STATIC_ASSERT(1112557912 == AV_CODEC_ID_BRENDER_PIX);
-STATIC_ASSERT(1112823892 == AV_CODEC_ID_BINTEXT);
-STATIC_ASSERT(1129335105 == AV_CODEC_ID_CPIA);
-STATIC_ASSERT(1160852272 == AV_CODEC_ID_ESCAPE130);
-STATIC_ASSERT(1179014995 == AV_CODEC_ID_FFWAVESYNTH);
-STATIC_ASSERT(1211250229 == AV_CODEC_ID_HEVC);
-STATIC_ASSERT(1246975298 == AV_CODEC_ID_JACOSUB);
-STATIC_ASSERT(1263294017 == AV_CODEC_ID_SMPTE_KLV);
-STATIC_ASSERT(1297108018 == AV_CODEC_ID_MPL2);
-STATIC_ASSERT(1297498929 == AV_CODEC_ID_MVC1);
-STATIC_ASSERT(1297498930 == AV_CODEC_ID_MVC2);
-STATIC_ASSERT(1330333984 == AV_CODEC_ID_ADPCM_IMA_OKI);
-STATIC_ASSERT(1330664787 == AV_CODEC_ID_OPUS);
-STATIC_ASSERT(1346455105 == AV_CODEC_ID_PAF_AUDIO);
-STATIC_ASSERT(1346455126 == AV_CODEC_ID_PAF_VIDEO);
-STATIC_ASSERT(1347637264 == AV_CODEC_ID_PCM_S16BE_PLANAR);
-STATIC_ASSERT(1349012051 == AV_CODEC_ID_PJS);
-STATIC_ASSERT(1381259348 == AV_CODEC_ID_REALTEXT);
-STATIC_ASSERT(1396788553 == AV_CODEC_ID_SAMI);
-STATIC_ASSERT(1396788813 == AV_CODEC_ID_SANM);
-STATIC_ASSERT(1397180754 == AV_CODEC_ID_SGIRLE);
-STATIC_ASSERT(1397706307 == AV_CODEC_ID_SONIC);
-STATIC_ASSERT(1397706316 == AV_CODEC_ID_SONIC_LS);
-STATIC_ASSERT(1397909872 == AV_CODEC_ID_SUBRIP);
-STATIC_ASSERT(1398953521 == AV_CODEC_ID_SUBVIEWER1);
-STATIC_ASSERT(1400201814 == AV_CODEC_ID_SUBVIEWER);
-STATIC_ASSERT(1412575542 == AV_CODEC_ID_TARGA_Y216);
-STATIC_ASSERT(1446195256 == AV_CODEC_ID_V308);
-STATIC_ASSERT(1446260792 == AV_CODEC_ID_V408);
-STATIC_ASSERT(1447644481 == AV_CODEC_ID_VIMA);
-STATIC_ASSERT(1448111218 == AV_CODEC_ID_VPLAYER);
-STATIC_ASSERT(1465275476 == AV_CODEC_ID_WEBVTT);
-STATIC_ASSERT(1480739150 == AV_CODEC_ID_XBIN);
-STATIC_ASSERT(1480999235 == AV_CODEC_ID_XFACE);
-STATIC_ASSERT(1496592720 == AV_CODEC_ID_Y41P);
-STATIC_ASSERT(1498764852 == AV_CODEC_ID_YUV4);
-STATIC_ASSERT(1664495672 == AV_CODEC_ID_EIA_608);
-STATIC_ASSERT(1833195076 == AV_CODEC_ID_MICRODVD);
-STATIC_ASSERT(1936029283 == AV_CODEC_ID_EVRC);
-STATIC_ASSERT(1936944502 == AV_CODEC_ID_SMV);
-STATIC_ASSERT(1950507339 == AV_CODEC_ID_TAK);
-
class FFmpegCommonTest : public testing::Test {
public:
FFmpegCommonTest() {
@@ -405,7 +31,7 @@ TEST_F(FFmpegCommonTest, OpusAudioDecoderConfig) {
context.sample_rate = 44100;
AudioDecoderConfig decoder_config;
- AVCodecContextToAudioDecoderConfig(&context, false, &decoder_config, false);
+ AVCodecContextToAudioDecoderConfig(&context, false, &decoder_config);
EXPECT_EQ(48000, decoder_config.samples_per_second());
}
@@ -502,4 +128,37 @@ TEST_F(FFmpegCommonTest, UTCDateToTime_Invalid) {
}
}
+// Verifies there are no collisions of the codec name hashes used for UMA. Also
+// includes code for updating the histograms XML.
+TEST_F(FFmpegCommonTest, VerifyUmaCodecHashes) {
+ const AVCodecDescriptor* desc = avcodec_descriptor_next(nullptr);
+
+ std::map<int32_t, const char*> sorted_hashes;
+ while (desc) {
+ const int32_t hash = HashCodecName(desc->name);
+ // Ensure there are no collisions.
+ ASSERT_TRUE(sorted_hashes.find(hash) == sorted_hashes.end());
+ sorted_hashes[hash] = desc->name;
+
+ desc = avcodec_descriptor_next(desc);
+ }
+
+ // Add a none entry for when no codec is detected.
+ static const char kUnknownCodec[] = "none";
+ const int32_t hash = HashCodecName(kUnknownCodec);
+ ASSERT_TRUE(sorted_hashes.find(hash) == sorted_hashes.end());
+ sorted_hashes[hash] = kUnknownCodec;
+
+ // Uncomment the following lines to generate the "FFmpegCodecHashes" enum for
+ // usage in the histogram metrics file. While it regenerates *ALL* values, it
+ // should only be used to *ADD* values to histograms file. Never delete any
+ // values; diff should verify.
+#if 0
+ printf("<enum name=\"FFmpegCodecHashes\" type=\"int\">\n");
+ for (const auto& kv : sorted_hashes)
+ printf(" <int value=\"%d\" label=\"%s\"/>\n", kv.first, kv.second);
+ printf("</enum>\n");
+#endif
+}
+
} // namespace media
diff --git a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
index ad595eec4fc..c7043b35642 100644
--- a/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
+++ b/chromium/media/ffmpeg/ffmpeg_regression_tests.cc
@@ -5,12 +5,12 @@
// Regression tests for FFmpeg. Test files can be found in the internal media
// test data directory:
//
-// svn://svn.chromium.org/chrome-internal/trunk/data/media/
+// https://chrome-internal.googlesource.com/chrome/data/media
//
// Simply add the custom_dep below to your gclient and sync:
//
// "src/media/test/data/internal":
-// "svn://svn.chromium.org/chrome-internal/trunk/data/media"
+// "https://chrome-internal.googlesource.com/chrome/data/media"
//
// Many of the files here do not cause issues outside of tooling, so you'll need
// to run this test under ASAN, TSAN, and Valgrind to ensure that all issues are
@@ -126,7 +126,10 @@ FFMPEG_TEST_CASE(Cr242786, "security/242786.webm", PIPELINE_OK, PIPELINE_OK);
// Test for out-of-bounds access with slightly corrupt file (detection logic
// thinks it's a MONO file, but actually contains STEREO audio).
FFMPEG_TEST_CASE(Cr275590, "security/275590.m4a", PIPELINE_OK, PIPELINE_OK);
-FFMPEG_TEST_CASE(Cr444522, "security/444522.mp4", PIPELINE_OK, PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr444522,
+ "security/444522.mp4",
+ DEMUXER_ERROR_COULD_NOT_OPEN,
+ DEMUXER_ERROR_COULD_NOT_OPEN);
FFMPEG_TEST_CASE(Cr444539,
"security/444539.m4a",
DEMUXER_ERROR_COULD_NOT_OPEN,
@@ -135,10 +138,22 @@ FFMPEG_TEST_CASE(Cr444546,
"security/444546.mp4",
DEMUXER_ERROR_COULD_NOT_OPEN,
DEMUXER_ERROR_COULD_NOT_OPEN);
+FFMPEG_TEST_CASE(Cr447860,
+ "security/447860.webm",
+ PIPELINE_OK,
+ PIPELINE_OK);
FFMPEG_TEST_CASE(Cr449958,
"security/449958.webm",
PIPELINE_OK,
PIPELINE_ERROR_DECODE);
+FFMPEG_TEST_CASE(Cr536601,
+ "security/536601.m4a",
+ PIPELINE_OK,
+ PIPELINE_OK);
+FFMPEG_TEST_CASE(Cr532967,
+ "security/532967.webm",
+ PIPELINE_OK,
+ PIPELINE_OK);
// General MP4 test cases.
FFMPEG_TEST_CASE(MP4_0,
diff --git a/chromium/media/filters/audio_clock.cc b/chromium/media/filters/audio_clock.cc
index d4fea149e0c..29bf14e8558 100644
--- a/chromium/media/filters/audio_clock.cc
+++ b/chromium/media/filters/audio_clock.cc
@@ -7,7 +7,6 @@
#include <algorithm>
#include "base/logging.h"
-#include "media/base/buffers.h"
namespace media {
@@ -42,52 +41,23 @@ void AudioClock::WroteAudio(int frames_written,
//
// The ordering of compute -> push -> pop eliminates unnecessary memory
// reallocations in cases where |buffered_| gets emptied.
- const int64_t original_buffered_frames = total_buffered_frames_;
int64_t frames_played =
std::max(INT64_C(0), total_buffered_frames_ - delay_frames);
- front_timestamp_ += ComputeBufferedMediaTime(frames_played);
PushBufferedAudioData(frames_written, playback_rate);
PushBufferedAudioData(frames_requested - frames_written, 0.0);
PopBufferedAudioData(frames_played);
+ // Update our front and back timestamps. The back timestamp is considered the
+ // authoritative source of truth, so base the front timestamp on range of data
+ // buffered. Doing so avoids accumulation errors on the front timestamp.
back_timestamp_ += base::TimeDelta::FromMicroseconds(
frames_written * playback_rate * microseconds_per_frame_);
-
- // Ensure something crazy hasn't happened to desync the front and back values.
- DCHECK_LE(front_timestamp_.InMicroseconds(), back_timestamp_.InMicroseconds())
- << "frames_written=" << frames_written
- << ", frames_requested=" << frames_requested
- << ", delay_frames=" << delay_frames
- << ", playback_rate=" << playback_rate
- << ", frames_played=" << frames_played
- << ", original_buffered_frames=" << original_buffered_frames
- << ", total_buffered_frames_=" << total_buffered_frames_;
-
- // Update cached values.
- double scaled_frames = 0;
- double scaled_frames_at_same_rate = 0;
- bool found_silence = false;
- for (size_t i = 0; i < buffered_.size(); ++i) {
- if (buffered_[i].playback_rate == 0) {
- found_silence = true;
- continue;
- }
-
- // Any buffered silence breaks our contiguous stretch of audio data.
- if (found_silence)
- break;
-
- scaled_frames += (buffered_[i].frames * buffered_[i].playback_rate);
-
- if (i == 0)
- scaled_frames_at_same_rate = scaled_frames;
- }
-
- contiguous_audio_data_buffered_ = base::TimeDelta::FromMicroseconds(
- scaled_frames * microseconds_per_frame_);
- contiguous_audio_data_buffered_at_same_rate_ =
- base::TimeDelta::FromMicroseconds(scaled_frames_at_same_rate *
- microseconds_per_frame_);
+ // Don't let front timestamp move earlier in time, as could occur due to delay
+ // frames pushed in the first write, above.
+ front_timestamp_ = std::max(front_timestamp_,
+ back_timestamp_ - ComputeBufferedMediaDuration());
+ DCHECK_GE(front_timestamp_, start_timestamp_);
+ DCHECK_LE(front_timestamp_, back_timestamp_);
}
void AudioClock::CompensateForSuspendedWrites(base::TimeDelta elapsed,
@@ -144,6 +114,34 @@ base::TimeDelta AudioClock::TimeUntilPlayback(base::TimeDelta timestamp) const {
microseconds_per_frame_);
}
+void AudioClock::ContiguousAudioDataBufferedForTesting(
+ base::TimeDelta* total,
+ base::TimeDelta* same_rate_total) const {
+ double scaled_frames = 0;
+ double scaled_frames_at_same_rate = 0;
+ bool found_silence = false;
+ for (size_t i = 0; i < buffered_.size(); ++i) {
+ if (buffered_[i].playback_rate == 0) {
+ found_silence = true;
+ continue;
+ }
+
+ // Any buffered silence breaks our contiguous stretch of audio data.
+ if (found_silence)
+ break;
+
+ scaled_frames += (buffered_[i].frames * buffered_[i].playback_rate);
+
+ if (i == 0)
+ scaled_frames_at_same_rate = scaled_frames;
+ }
+
+ *total = base::TimeDelta::FromMicroseconds(scaled_frames *
+ microseconds_per_frame_);
+ *same_rate_total = base::TimeDelta::FromMicroseconds(
+ scaled_frames_at_same_rate * microseconds_per_frame_);
+}
+
AudioClock::AudioData::AudioData(int64_t frames, double playback_rate)
: frames(frames), playback_rate(playback_rate) {
}
@@ -178,16 +176,10 @@ void AudioClock::PopBufferedAudioData(int64_t frames) {
}
}
-base::TimeDelta AudioClock::ComputeBufferedMediaTime(int64_t frames) const {
- DCHECK_LE(frames, total_buffered_frames_);
-
+base::TimeDelta AudioClock::ComputeBufferedMediaDuration() const {
double scaled_frames = 0;
- for (size_t i = 0; i < buffered_.size() && frames > 0; ++i) {
- int64_t min_frames = std::min(buffered_[i].frames, frames);
- scaled_frames += min_frames * buffered_[i].playback_rate;
- frames -= min_frames;
- }
-
+ for (const auto& buffer : buffered_)
+ scaled_frames += buffer.frames * buffer.playback_rate;
return base::TimeDelta::FromMicroseconds(scaled_frames *
microseconds_per_frame_);
}
diff --git a/chromium/media/filters/audio_clock.h b/chromium/media/filters/audio_clock.h
index 31317c0eada..63159b59e55 100644
--- a/chromium/media/filters/audio_clock.h
+++ b/chromium/media/filters/audio_clock.h
@@ -95,18 +95,9 @@ class MEDIA_EXPORT AudioClock {
// |timestamp| must be within front_timestamp() and back_timestamp().
base::TimeDelta TimeUntilPlayback(base::TimeDelta timestamp) const;
- // Returns the amount of contiguous media time buffered at the head of the
- // audio hardware buffer. Silence introduced into the audio hardware buffer is
- // treated as a break in media time.
- base::TimeDelta contiguous_audio_data_buffered() const {
- return contiguous_audio_data_buffered_;
- }
-
- // Same as above, but also treats changes in playback rate as a break in media
- // time.
- base::TimeDelta contiguous_audio_data_buffered_at_same_rate() const {
- return contiguous_audio_data_buffered_at_same_rate_;
- }
+ void ContiguousAudioDataBufferedForTesting(
+ base::TimeDelta* total,
+ base::TimeDelta* same_rate_total) const;
private:
// Even with a ridiculously high sample rate of 256kHz, using 64 bits will
@@ -123,7 +114,7 @@ class MEDIA_EXPORT AudioClock {
// Helpers for operating on |buffered_|.
void PushBufferedAudioData(int64_t frames, double playback_rate);
void PopBufferedAudioData(int64_t frames);
- base::TimeDelta ComputeBufferedMediaTime(int64_t frames) const;
+ base::TimeDelta ComputeBufferedMediaDuration() const;
const base::TimeDelta start_timestamp_;
const double microseconds_per_frame_;
@@ -134,10 +125,6 @@ class MEDIA_EXPORT AudioClock {
base::TimeDelta front_timestamp_;
base::TimeDelta back_timestamp_;
- // Cached results of last call to WroteAudio().
- base::TimeDelta contiguous_audio_data_buffered_;
- base::TimeDelta contiguous_audio_data_buffered_at_same_rate_;
-
DISALLOW_COPY_AND_ASSIGN(AudioClock);
};
diff --git a/chromium/media/filters/audio_clock_unittest.cc b/chromium/media/filters/audio_clock_unittest.cc
index 2144bb0d670..1f61100b15a 100644
--- a/chromium/media/filters/audio_clock_unittest.cc
+++ b/chromium/media/filters/audio_clock_unittest.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "media/base/audio_timestamp_helper.h"
-#include "media/base/buffers.h"
#include "media/filters/audio_clock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -40,16 +39,21 @@ class AudioClockTest : public testing::Test {
}
int ContiguousAudioDataBufferedInDays() {
- return clock_.contiguous_audio_data_buffered().InDays();
+ base::TimeDelta total, same_rate_total;
+ clock_.ContiguousAudioDataBufferedForTesting(&total, &same_rate_total);
+ return total.InDays();
}
int ContiguousAudioDataBufferedInMilliseconds() {
- return clock_.contiguous_audio_data_buffered().InMilliseconds();
+ base::TimeDelta total, same_rate_total;
+ clock_.ContiguousAudioDataBufferedForTesting(&total, &same_rate_total);
+ return total.InMilliseconds();
}
int ContiguousAudioDataBufferedAtSameRateInMilliseconds() {
- return clock_.contiguous_audio_data_buffered_at_same_rate()
- .InMilliseconds();
+ base::TimeDelta total, same_rate_total;
+ clock_.ContiguousAudioDataBufferedForTesting(&total, &same_rate_total);
+ return same_rate_total.InMilliseconds();
}
const int sample_rate_;
@@ -73,15 +77,6 @@ TEST_F(AudioClockTest, BackTimestampStartsAtStartTimestamp) {
EXPECT_EQ(expected, clock.back_timestamp());
}
-TEST_F(AudioClockTest, ContiguousAudioDataBufferedStartsAtZero) {
- EXPECT_EQ(base::TimeDelta(), clock_.contiguous_audio_data_buffered());
-}
-
-TEST_F(AudioClockTest, ContiguousAudioDataBufferedAtSameRateStartsAtZero) {
- EXPECT_EQ(base::TimeDelta(),
- clock_.contiguous_audio_data_buffered_at_same_rate());
-}
-
TEST_F(AudioClockTest, Playback) {
// The first time we write data we should still expect our start timestamp
// due to delay.
diff --git a/chromium/media/filters/audio_decoder_unittest.cc b/chromium/media/filters/audio_decoder_unittest.cc
index 85e1e621612..83c2ea5985d 100644
--- a/chromium/media/filters/audio_decoder_unittest.cc
+++ b/chromium/media/filters/audio_decoder_unittest.cc
@@ -18,6 +18,7 @@
#include "media/base/decoder_buffer.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/audio_file_reader.h"
#include "media/filters/ffmpeg_audio_decoder.h"
@@ -99,8 +100,8 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
last_decode_status_(AudioDecoder::kDecodeError) {
switch (GetParam().decoder_type) {
case FFMPEG:
- decoder_.reset(new FFmpegAudioDecoder(
- message_loop_.task_runner(), LogCB()));
+ decoder_.reset(new FFmpegAudioDecoder(message_loop_.task_runner(),
+ new MediaLog()));
break;
case OPUS:
decoder_.reset(
@@ -150,8 +151,8 @@ class AudioDecoderTest : public testing::TestWithParam<DecoderTestData> {
ASSERT_TRUE(reader_->SeekForTesting(start_timestamp_));
AudioDecoderConfig config;
- AVCodecContextToAudioDecoderConfig(
- reader_->codec_context_for_testing(), false, &config, false);
+ AVCodecContextToAudioDecoderConfig(reader_->codec_context_for_testing(),
+ false, &config);
EXPECT_EQ(GetParam().codec, config.codec());
EXPECT_EQ(GetParam().samples_per_second, config.samples_per_second());
@@ -381,7 +382,6 @@ TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithNoCodecDelay) {
kOpusExtraData,
arraysize(kOpusExtraData),
false,
- false,
base::TimeDelta::FromMilliseconds(80),
0);
InitializeDecoder(decoder_config);
@@ -398,7 +398,6 @@ TEST_P(OpusAudioDecoderBehavioralTest, InitializeWithBadCodecDelay) {
kOpusExtraData,
arraysize(kOpusExtraData),
false,
- false,
base::TimeDelta::FromMilliseconds(80),
// Use a different codec delay than in the extradata.
100);
@@ -417,11 +416,19 @@ TEST_P(FFmpegAudioDecoderBehavioralTest, InitializeWithBadConfig) {
InitializeDecoderWithResult(decoder_config, false);
}
+#if defined(OPUS_FIXED_POINT)
+const DecodedBufferExpectations kSfxOpusExpectations[] = {
+ {0, 13500, "-2.70,-1.41,-0.78,-1.27,-2.56,-3.73,"},
+ {13500, 20000, "5.48,5.93,6.05,5.83,5.54,5.46,"},
+ {33500, 20000, "-3.44,-3.34,-3.57,-4.11,-4.74,-5.13,"},
+};
+#else
const DecodedBufferExpectations kSfxOpusExpectations[] = {
{0, 13500, "-2.70,-1.41,-0.78,-1.27,-2.56,-3.73,"},
{13500, 20000, "5.48,5.93,6.04,5.83,5.54,5.45,"},
{33500, 20000, "-3.45,-3.35,-3.57,-4.12,-4.74,-5.14,"},
};
+#endif
const DecodedBufferExpectations kBearOpusExpectations[] = {
{500, 3500, "-0.26,0.87,1.36,0.84,-0.30,-1.22,"},
diff --git a/chromium/media/filters/audio_renderer_algorithm.cc b/chromium/media/filters/audio_renderer_algorithm.cc
index 2d2bfbfe71f..a3701245a30 100644
--- a/chromium/media/filters/audio_renderer_algorithm.cc
+++ b/chromium/media/filters/audio_renderer_algorithm.cc
@@ -61,20 +61,14 @@ static const int kWsolaSearchIntervalMs = 30;
// The maximum size in seconds for the |audio_buffer_|. Arbitrarily determined.
static const int kMaxCapacityInSeconds = 3;
-// The starting size in frames for |audio_buffer_|. Previous usage maintained a
-// queue of 16 AudioBuffers, each of 512 frames. This worked well, so we
-// maintain this number of frames.
-static const int kStartingBufferSizeInFrames = 16 * 512;
-
-static_assert(kStartingBufferSizeInFrames <
- (kMaxCapacityInSeconds * limits::kMinSampleRate),
- "max capacity smaller than starting buffer size");
+// The minimum size in ms for the |audio_buffer_|. Arbitrarily determined.
+static const int kStartingCapacityInMs = 200;
AudioRendererAlgorithm::AudioRendererAlgorithm()
: channels_(0),
samples_per_second_(0),
muted_partial_frame_(0),
- capacity_(kStartingBufferSizeInFrames),
+ capacity_(0),
output_time_(0.0),
search_block_center_offset_(0),
search_block_index_(0),
@@ -92,8 +86,9 @@ void AudioRendererAlgorithm::Initialize(const AudioParameters& params) {
channels_ = params.channels();
samples_per_second_ = params.sample_rate();
- num_candidate_blocks_ = (kWsolaSearchIntervalMs * samples_per_second_) / 1000;
- ola_window_size_ = kOlaWindowSizeMs * samples_per_second_ / 1000;
+ capacity_ = ConvertMillisecondsToFrames(kStartingCapacityInMs);
+ num_candidate_blocks_ = ConvertMillisecondsToFrames(kWsolaSearchIntervalMs);
+ ola_window_size_ = ConvertMillisecondsToFrames(kOlaWindowSizeMs);
// Make sure window size in an even number.
ola_window_size_ += ola_window_size_ & 1;
@@ -206,9 +201,9 @@ void AudioRendererAlgorithm::FlushBuffers() {
wsola_output_->Zero();
num_complete_frames_ = 0;
- // Reset |capacity_| so growth triggered by underflows doesn't penalize
- // seek time.
- capacity_ = kStartingBufferSizeInFrames;
+ // Reset |capacity_| so growth triggered by underflows doesn't penalize seek
+ // time.
+ capacity_ = ConvertMillisecondsToFrames(kStartingCapacityInMs);
}
void AudioRendererAlgorithm::EnqueueBuffer(
@@ -235,6 +230,11 @@ bool AudioRendererAlgorithm::CanPerformWsola() const {
search_block_index_ + search_block_size <= frames;
}
+int AudioRendererAlgorithm::ConvertMillisecondsToFrames(int ms) const {
+ return ms * (samples_per_second_ /
+ static_cast<double>(base::Time::kMillisecondsPerSecond));
+}
+
bool AudioRendererAlgorithm::RunOneWsolaIteration(double playback_rate) {
if (!CanPerformWsola())
return false;
diff --git a/chromium/media/filters/audio_renderer_algorithm.h b/chromium/media/filters/audio_renderer_algorithm.h
index 2005bfeb950..940e7e7b9d7 100644
--- a/chromium/media/filters/audio_renderer_algorithm.h
+++ b/chromium/media/filters/audio_renderer_algorithm.h
@@ -118,6 +118,9 @@ class MEDIA_EXPORT AudioRendererAlgorithm {
// Do we have enough data to perform one round of WSOLA?
bool CanPerformWsola() const;
+ // Converts a time in milliseconds to frames using |samples_per_second_|.
+ int ConvertMillisecondsToFrames(int ms) const;
+
// Number of channels in audio stream.
int channels_;
diff --git a/chromium/media/filters/audio_renderer_algorithm_unittest.cc b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
index 003cd512da4..e92223cd2bb 100644
--- a/chromium/media/filters/audio_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/audio_renderer_algorithm_unittest.cc
@@ -17,9 +17,9 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_buffer.h"
#include "media/base/audio_bus.h"
-#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/audio_renderer_algorithm.h"
#include "media/filters/wsola_internals.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -666,6 +666,7 @@ TEST_F(AudioRendererAlgorithmTest, FillBufferOffset) {
ASSERT_EQ(kHalfSize, frames_filled);
ASSERT_TRUE(VerifyAudioData(bus.get(), 0, kHalfSize, 0));
ASSERT_FALSE(VerifyAudioData(bus.get(), kHalfSize, kHalfSize, 0));
+ FillAlgorithmQueue();
}
const float kMutedRates[] = {5.0f, 0.25f};
@@ -679,6 +680,7 @@ TEST_F(AudioRendererAlgorithmTest, FillBufferOffset) {
ASSERT_EQ(kHalfSize, frames_filled);
ASSERT_FALSE(VerifyAudioData(bus.get(), 0, kHalfSize, 0));
ASSERT_TRUE(VerifyAudioData(bus.get(), kHalfSize, kHalfSize, 0));
+ FillAlgorithmQueue();
}
}
diff --git a/chromium/media/filters/chunk_demuxer.cc b/chromium/media/filters/chunk_demuxer.cc
index 72c7ef43e5c..be8ec5700f2 100644
--- a/chromium/media/filters/chunk_demuxer.cc
+++ b/chromium/media/filters/chunk_demuxer.cc
@@ -15,6 +15,7 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/filters/frame_processor.h"
#include "media/filters/stream_parser_factory.h"
@@ -83,6 +84,8 @@ static Ranges<TimeDelta> ComputeIntersection(const RangesList& activeRanges,
}
// Contains state belonging to a source id.
+// TODO: SourceState needs to be moved to a separate file and covered with unit
+// tests (see crbug.com/525836)
class SourceState {
public:
// Callback signature used to create ChunkDemuxerStreams.
@@ -94,11 +97,10 @@ class SourceState {
typedef base::Callback<void(
ChunkDemuxerStream*, const TextTrackConfig&)> NewTextTrackCB;
- SourceState(
- scoped_ptr<StreamParser> stream_parser,
- scoped_ptr<FrameProcessor> frame_processor, const LogCB& log_cb,
- const CreateDemuxerStreamCB& create_demuxer_stream_cb,
- const scoped_refptr<MediaLog>& media_log);
+ SourceState(scoped_ptr<StreamParser> stream_parser,
+ scoped_ptr<FrameProcessor> frame_processor,
+ const CreateDemuxerStreamCB& create_demuxer_stream_cb,
+ const scoped_refptr<MediaLog>& media_log);
~SourceState();
@@ -124,14 +126,20 @@ class SourceState {
const InitSegmentReceivedCB& init_segment_received_cb);
// Aborts the current append sequence and resets the parser.
- void Abort(TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset);
+ void ResetParserState(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset);
// Calls Remove(|start|, |end|, |duration|) on all
// ChunkDemuxerStreams managed by this object.
void Remove(TimeDelta start, TimeDelta end, TimeDelta duration);
+ // If the buffer is full, attempts to try to free up space, as specified in
+ // the "Coded Frame Eviction Algorithm" in the Media Source Extensions Spec.
+ // Returns false iff buffer is still full after running eviction.
+ // https://w3c.github.io/media-source/#sourcebuffer-coded-frame-eviction
+ bool EvictCodedFrames(DecodeTimestamp media_time, size_t newDataSize);
+
// Returns true if currently parsing a media segment, or false otherwise.
bool parsing_media_segment() const { return parsing_media_segment_; }
@@ -165,7 +173,7 @@ class SourceState {
// Sets the memory limit on each stream of a specific type.
// |memory_limit| is the maximum number of bytes each stream of type |type|
// is allowed to hold in its buffer.
- void SetMemoryLimits(DemuxerStream::Type type, int memory_limit);
+ void SetMemoryLimits(DemuxerStream::Type type, size_t memory_limit);
bool IsSeekWaitingForData() const;
private:
@@ -196,6 +204,12 @@ class SourceState {
void OnSourceInitDone(const StreamParser::InitParameters& params);
+ // EstimateVideoDataSize uses some heuristics to estimate the size of the
+ // video size in the chunk of muxed audio/video data without parsing it.
+ // This is used by EvictCodedFrames algorithm, which happens before Append
+ // (and therefore before parsing is performed) to prepare space for new data.
+ size_t EstimateVideoDataSize(size_t muxed_data_chunk_size) const;
+
CreateDemuxerStreamCB create_demuxer_stream_cb_;
NewTextTrackCB new_text_track_cb_;
@@ -234,7 +248,6 @@ class SourceState {
TextStreamMap text_stream_map_; // |this| owns the map's stream pointers.
scoped_ptr<FrameProcessor> frame_processor_;
- LogCB log_cb_;
scoped_refptr<MediaLog> media_log_;
StreamParser::InitCB init_cb_;
@@ -256,7 +269,6 @@ class SourceState {
SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
scoped_ptr<FrameProcessor> frame_processor,
- const LogCB& log_cb,
const CreateDemuxerStreamCB& create_demuxer_stream_cb,
const scoped_refptr<MediaLog>& media_log)
: create_demuxer_stream_cb_(create_demuxer_stream_cb),
@@ -267,7 +279,6 @@ SourceState::SourceState(scoped_ptr<StreamParser> stream_parser,
audio_(NULL),
video_(NULL),
frame_processor_(frame_processor.release()),
- log_cb_(log_cb),
media_log_(media_log),
auto_update_timestamp_offset_(false) {
DCHECK(!create_demuxer_stream_cb_.is_null());
@@ -297,7 +308,7 @@ void SourceState::Init(
new_text_track_cb_.is_null(), encrypted_media_init_data_cb,
base::Bind(&SourceState::OnNewMediaSegment, base::Unretained(this)),
base::Bind(&SourceState::OnEndOfMediaSegment, base::Unretained(this)),
- log_cb_);
+ media_log_);
}
void SourceState::SetSequenceMode(bool sequence_mode) {
@@ -333,7 +344,7 @@ bool SourceState::Append(
// append window and timestamp offset pointer. See http://crbug.com/351454.
bool result = stream_parser_->Parse(data, length);
if (!result) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< __FUNCTION__ << ": stream parsing failed."
<< " Data size=" << length
<< " append_window_start=" << append_window_start.InSecondsF()
@@ -344,9 +355,9 @@ bool SourceState::Append(
return result;
}
-void SourceState::Abort(TimeDelta append_window_start,
- TimeDelta append_window_end,
- base::TimeDelta* timestamp_offset) {
+void SourceState::ResetParserState(TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset) {
DCHECK(timestamp_offset);
DCHECK(!timestamp_offset_during_append_);
timestamp_offset_during_append_ = timestamp_offset;
@@ -373,6 +384,82 @@ void SourceState::Remove(TimeDelta start, TimeDelta end, TimeDelta duration) {
}
}
+size_t SourceState::EstimateVideoDataSize(size_t muxed_data_chunk_size) const {
+ DCHECK(audio_);
+ DCHECK(video_);
+
+ size_t videoBufferedSize = video_->GetBufferedSize();
+ size_t audioBufferedSize = audio_->GetBufferedSize();
+ if (videoBufferedSize == 0 || audioBufferedSize == 0) {
+ // At this point either audio or video buffer is empty, which means buffer
+ // levels are probably low anyway and we should have enough space in the
+ // buffers for appending new data, so just take a very rough guess.
+ return muxed_data_chunk_size / 2;
+ }
+
+ // We need to estimate how much audio and video data is going to be in the
+ // newly appended data chunk to make space for the new data. And we need to do
+ // that without parsing the data (which will happen later, in the Append
+ // phase). So for now we can only rely on some heuristic here. Let's assume
+ // that the proportion of the audio/video in the new data chunk is the same as
+ // the current ratio of buffered audio/video.
+ // Longer term this should go away once we further change the MSE GC algorithm
+ // to work across all streams of a SourceBuffer (see crbug.com/520704).
+ double videoBufferedSizeF = static_cast<double>(videoBufferedSize);
+ double audioBufferedSizeF = static_cast<double>(audioBufferedSize);
+
+ double totalBufferedSizeF = videoBufferedSizeF + audioBufferedSizeF;
+ CHECK_GT(totalBufferedSizeF, 0.0);
+
+ double videoRatio = videoBufferedSizeF / totalBufferedSizeF;
+ CHECK_GE(videoRatio, 0.0);
+ CHECK_LE(videoRatio, 1.0);
+ double estimatedVideoSize = muxed_data_chunk_size * videoRatio;
+ return static_cast<size_t>(estimatedVideoSize);
+}
+
+bool SourceState::EvictCodedFrames(DecodeTimestamp media_time,
+ size_t newDataSize) {
+ bool success = true;
+
+ DVLOG(3) << __FUNCTION__ << " media_time=" << media_time.InSecondsF()
+ << " newDataSize=" << newDataSize
+ << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
+ << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
+
+ size_t newAudioSize = 0;
+ size_t newVideoSize = 0;
+ if (audio_ && video_) {
+ newVideoSize = EstimateVideoDataSize(newDataSize);
+ newAudioSize = newDataSize - newVideoSize;
+ } else if (video_) {
+ newVideoSize = newDataSize;
+ } else if (audio_) {
+ newAudioSize = newDataSize;
+ }
+
+ DVLOG(3) << __FUNCTION__ << " estimated audio/video sizes: "
+ << " newVideoSize=" << newVideoSize
+ << " newAudioSize=" << newAudioSize;
+
+ if (audio_)
+ success = audio_->EvictCodedFrames(media_time, newAudioSize) && success;
+
+ if (video_)
+ success = video_->EvictCodedFrames(media_time, newVideoSize) && success;
+
+ for (TextStreamMap::iterator itr = text_stream_map_.begin();
+ itr != text_stream_map_.end(); ++itr) {
+ success = itr->second->EvictCodedFrames(media_time, 0) && success;
+ }
+
+ DVLOG(3) << __FUNCTION__ << " result=" << success
+ << " videoBufferedSize=" << (video_ ? video_->GetBufferedSize() : 0)
+ << " audioBufferedSize=" << (audio_ ? audio_->GetBufferedSize() : 0);
+
+ return success;
+}
+
Ranges<TimeDelta> SourceState::GetBufferedRanges(TimeDelta duration,
bool ended) const {
// TODO(acolwell): When we start allowing disabled tracks we'll need to update
@@ -513,20 +600,21 @@ void SourceState::Shutdown() {
}
}
-void SourceState::SetMemoryLimits(DemuxerStream::Type type, int memory_limit) {
+void SourceState::SetMemoryLimits(DemuxerStream::Type type,
+ size_t memory_limit) {
switch (type) {
case DemuxerStream::AUDIO:
if (audio_)
- audio_->set_memory_limit(memory_limit);
+ audio_->SetStreamMemoryLimit(memory_limit);
break;
case DemuxerStream::VIDEO:
if (video_)
- video_->set_memory_limit(memory_limit);
+ video_->SetStreamMemoryLimit(memory_limit);
break;
case DemuxerStream::TEXT:
for (TextStreamMap::iterator itr = text_stream_map_.begin();
itr != text_stream_map_.end(); ++itr) {
- itr->second->set_memory_limit(memory_limit);
+ itr->second->SetStreamMemoryLimit(memory_limit);
}
break;
case DemuxerStream::UNKNOWN:
@@ -571,7 +659,7 @@ bool SourceState::OnNewConfigs(
// Signal an error if we get configuration info for stream types that weren't
// specified in AddId() or more configs after a stream is initialized.
if (allow_audio != audio_config.IsValidConfig()) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Initialization segment"
<< (audio_config.IsValidConfig() ? " has" : " does not have")
<< " an audio track, but the mimetype"
@@ -581,7 +669,7 @@ bool SourceState::OnNewConfigs(
}
if (allow_video != video_config.IsValidConfig()) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Initialization segment"
<< (video_config.IsValidConfig() ? " has" : " does not have")
<< " a video track, but the mimetype"
@@ -616,7 +704,7 @@ bool SourceState::OnNewConfigs(
}
frame_processor_->OnPossibleAudioConfigUpdate(audio_config);
- success &= audio_->UpdateAudioConfig(audio_config, log_cb_);
+ success &= audio_->UpdateAudioConfig(audio_config, media_log_);
}
if (video_config.IsValidConfig()) {
@@ -643,7 +731,7 @@ bool SourceState::OnNewConfigs(
}
}
- success &= video_->UpdateVideoConfig(video_config, log_cb_);
+ success &= video_->UpdateVideoConfig(video_config, media_log_);
}
typedef StreamParser::TextTrackConfigMap::const_iterator TextConfigItr;
@@ -654,11 +742,11 @@ bool SourceState::OnNewConfigs(
create_demuxer_stream_cb_.Run(DemuxerStream::TEXT);
if (!frame_processor_->AddTrack(itr->first, text_stream)) {
success &= false;
- MEDIA_LOG(ERROR, log_cb_) << "Failed to add text track ID "
- << itr->first << " to frame processor.";
+ MEDIA_LOG(ERROR, media_log_) << "Failed to add text track ID "
+ << itr->first << " to frame processor.";
break;
}
- text_stream->UpdateTextConfig(itr->second, log_cb_);
+ text_stream->UpdateTextConfig(itr->second, media_log_);
text_stream_map_[itr->first] = text_stream;
new_text_track_cb_.Run(text_stream, itr->second);
}
@@ -666,7 +754,8 @@ bool SourceState::OnNewConfigs(
const size_t text_count = text_stream_map_.size();
if (text_configs.size() != text_count) {
success &= false;
- MEDIA_LOG(ERROR, log_cb_) << "The number of text track configs changed.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "The number of text track configs changed.";
} else if (text_count == 1) {
TextConfigItr config_itr = text_configs.begin();
TextStreamMap::iterator stream_itr = text_stream_map_.begin();
@@ -678,7 +767,7 @@ bool SourceState::OnNewConfigs(
old_config.id());
if (!new_config.Matches(old_config)) {
success &= false;
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "New text track config does not match old one.";
} else {
StreamParser::TrackId old_id = stream_itr->first;
@@ -689,7 +778,7 @@ bool SourceState::OnNewConfigs(
text_stream_map_[config_itr->first] = text_stream;
} else {
success &= false;
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Error remapping single text track number";
}
}
@@ -701,7 +790,7 @@ bool SourceState::OnNewConfigs(
text_stream_map_.find(config_itr->first);
if (stream_itr == text_stream_map_.end()) {
success &= false;
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Unexpected text track configuration for track ID "
<< config_itr->first;
break;
@@ -712,9 +801,9 @@ bool SourceState::OnNewConfigs(
TextTrackConfig old_config = stream->text_track_config();
if (!new_config.Matches(old_config)) {
success &= false;
- MEDIA_LOG(ERROR, log_cb_) << "New text track config for track ID "
- << config_itr->first
- << " does not match old one.";
+ MEDIA_LOG(ERROR, media_log_) << "New text track config for track ID "
+ << config_itr->first
+ << " does not match old one.";
break;
}
}
@@ -881,6 +970,12 @@ void ChunkDemuxerStream::Remove(TimeDelta start, TimeDelta end,
stream_->Remove(start, end, duration);
}
+bool ChunkDemuxerStream::EvictCodedFrames(DecodeTimestamp media_time,
+ size_t newDataSize) {
+ base::AutoLock auto_lock(lock_);
+ return stream_->GarbageCollectIfNeeded(media_time, newDataSize);
+}
+
void ChunkDemuxerStream::OnSetDuration(TimeDelta duration) {
base::AutoLock auto_lock(lock_);
stream_->OnSetDuration(duration);
@@ -917,6 +1012,10 @@ TimeDelta ChunkDemuxerStream::GetBufferedDuration() const {
return stream_->GetBufferedDuration();
}
+size_t ChunkDemuxerStream::GetBufferedSize() const {
+ return stream_->GetBufferedSize();
+}
+
void ChunkDemuxerStream::OnNewMediaSegment(DecodeTimestamp start_timestamp) {
DVLOG(2) << "ChunkDemuxerStream::OnNewMediaSegment("
<< start_timestamp.InSecondsF() << ")";
@@ -924,8 +1023,9 @@ void ChunkDemuxerStream::OnNewMediaSegment(DecodeTimestamp start_timestamp) {
stream_->OnNewMediaSegment(start_timestamp);
}
-bool ChunkDemuxerStream::UpdateAudioConfig(const AudioDecoderConfig& config,
- const LogCB& log_cb) {
+bool ChunkDemuxerStream::UpdateAudioConfig(
+ const AudioDecoderConfig& config,
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK(config.IsValidConfig());
DCHECK_EQ(type_, AUDIO);
base::AutoLock auto_lock(lock_);
@@ -942,15 +1042,16 @@ bool ChunkDemuxerStream::UpdateAudioConfig(const AudioDecoderConfig& config,
splice_frames_enabled_ && codec_supported;
stream_.reset(
- new SourceBufferStream(config, log_cb, splice_frames_enabled_));
+ new SourceBufferStream(config, media_log, splice_frames_enabled_));
return true;
}
return stream_->UpdateAudioConfig(config);
}
-bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
- const LogCB& log_cb) {
+bool ChunkDemuxerStream::UpdateVideoConfig(
+ const VideoDecoderConfig& config,
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK(config.IsValidConfig());
DCHECK_EQ(type_, VIDEO);
base::AutoLock auto_lock(lock_);
@@ -958,20 +1059,22 @@ bool ChunkDemuxerStream::UpdateVideoConfig(const VideoDecoderConfig& config,
if (!stream_) {
DCHECK_EQ(state_, UNINITIALIZED);
stream_.reset(
- new SourceBufferStream(config, log_cb, splice_frames_enabled_));
+ new SourceBufferStream(config, media_log, splice_frames_enabled_));
return true;
}
return stream_->UpdateVideoConfig(config);
}
-void ChunkDemuxerStream::UpdateTextConfig(const TextTrackConfig& config,
- const LogCB& log_cb) {
+void ChunkDemuxerStream::UpdateTextConfig(
+ const TextTrackConfig& config,
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK_EQ(type_, TEXT);
base::AutoLock auto_lock(lock_);
DCHECK(!stream_);
DCHECK_EQ(state_, UNINITIALIZED);
- stream_.reset(new SourceBufferStream(config, log_cb, splice_frames_enabled_));
+ stream_.reset(
+ new SourceBufferStream(config, media_log, splice_frames_enabled_));
}
void ChunkDemuxerStream::MarkEndOfStream() {
@@ -1025,6 +1128,10 @@ TextTrackConfig ChunkDemuxerStream::text_track_config() {
return stream_->GetCurrentTextTrackConfig();
}
+void ChunkDemuxerStream::SetStreamMemoryLimit(size_t memory_limit) {
+ stream_->set_memory_limit(memory_limit);
+}
+
void ChunkDemuxerStream::SetLiveness(Liveness liveness) {
base::AutoLock auto_lock(lock_);
liveness_ = liveness;
@@ -1103,7 +1210,6 @@ void ChunkDemuxerStream::CompletePendingReadIfPossible_Locked() {
ChunkDemuxer::ChunkDemuxer(
const base::Closure& open_cb,
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
- const LogCB& log_cb,
const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled)
: state_(WAITING_FOR_INIT),
@@ -1112,7 +1218,6 @@ ChunkDemuxer::ChunkDemuxer(
open_cb_(open_cb),
encrypted_media_init_data_cb_(encrypted_media_init_data_cb),
enable_text_(false),
- log_cb_(log_cb),
media_log_(media_log),
duration_(kNoTimestamp()),
user_specified_duration_(-1),
@@ -1252,9 +1357,8 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
bool has_audio = false;
bool has_video = false;
- scoped_ptr<media::StreamParser> stream_parser(
- StreamParserFactory::Create(type, codecs, log_cb_,
- &has_audio, &has_video));
+ scoped_ptr<media::StreamParser> stream_parser(StreamParserFactory::Create(
+ type, codecs, media_log_, &has_audio, &has_video));
if (!stream_parser)
return ChunkDemuxer::kNotSupported;
@@ -1271,14 +1375,13 @@ ChunkDemuxer::Status ChunkDemuxer::AddId(const std::string& id,
scoped_ptr<FrameProcessor> frame_processor(
new FrameProcessor(base::Bind(&ChunkDemuxer::IncreaseDurationIfNecessary,
- base::Unretained(this))));
+ base::Unretained(this)),
+ media_log_));
- scoped_ptr<SourceState> source_state(
- new SourceState(stream_parser.Pass(),
- frame_processor.Pass(), log_cb_,
- base::Bind(&ChunkDemuxer::CreateDemuxerStream,
- base::Unretained(this)),
- media_log_));
+ scoped_ptr<SourceState> source_state(new SourceState(
+ stream_parser.Pass(), frame_processor.Pass(),
+ base::Bind(&ChunkDemuxer::CreateDemuxerStream, base::Unretained(this)),
+ media_log_));
SourceState::NewTextTrackCB new_text_track_cb;
@@ -1319,6 +1422,29 @@ Ranges<TimeDelta> ChunkDemuxer::GetBufferedRanges(const std::string& id) const {
return itr->second->GetBufferedRanges(duration_, state_ == ENDED);
}
+bool ChunkDemuxer::EvictCodedFrames(const std::string& id,
+ base::TimeDelta currentMediaTime,
+ size_t newDataSize) {
+ DVLOG(1) << __FUNCTION__ << "(" << id << ")"
+ << " media_time=" << currentMediaTime.InSecondsF()
+ << " newDataSize=" << newDataSize;
+ base::AutoLock auto_lock(lock_);
+
+ // Note: The direct conversion from PTS to DTS is safe here, since we don't
+ // need to know currentTime precisely for GC. GC only needs to know which GOP
+ // currentTime points to.
+ DecodeTimestamp media_time_dts =
+ DecodeTimestamp::FromPresentationTime(currentMediaTime);
+
+ DCHECK(!id.empty());
+ SourceStateMap::const_iterator itr = source_state_map_.find(id);
+ if (itr == source_state_map_.end()) {
+ LOG(WARNING) << __FUNCTION__ << " stream " << id << " not found";
+ return false;
+ }
+ return itr->second->EvictCodedFrames(media_time_dts, newDataSize);
+}
+
void ChunkDemuxer::AppendData(
const std::string& id,
const uint8* data,
@@ -1387,19 +1513,19 @@ void ChunkDemuxer::AppendData(
host_->AddBufferedTimeRange(ranges.start(i), ranges.end(i));
}
-void ChunkDemuxer::Abort(const std::string& id,
- TimeDelta append_window_start,
- TimeDelta append_window_end,
- TimeDelta* timestamp_offset) {
- DVLOG(1) << "Abort(" << id << ")";
+void ChunkDemuxer::ResetParserState(const std::string& id,
+ TimeDelta append_window_start,
+ TimeDelta append_window_end,
+ TimeDelta* timestamp_offset) {
+ DVLOG(1) << "ResetParserState(" << id << ")";
base::AutoLock auto_lock(lock_);
DCHECK(!id.empty());
CHECK(IsValidId(id));
bool old_waiting_for_data = IsSeekWaitingForData_Locked();
- source_state_map_[id]->Abort(append_window_start,
- append_window_end,
- timestamp_offset);
- // Abort can possibly emit some buffers.
+ source_state_map_[id]->ResetParserState(append_window_start,
+ append_window_end,
+ timestamp_offset);
+ // ResetParserState can possibly emit some buffers.
// Need to check whether seeking can be completed.
if (old_waiting_for_data && !IsSeekWaitingForData_Locked() &&
!seek_cb_.is_null()) {
@@ -1587,7 +1713,8 @@ void ChunkDemuxer::Shutdown() {
base::ResetAndReturn(&seek_cb_).Run(PIPELINE_ERROR_ABORT);
}
-void ChunkDemuxer::SetMemoryLimits(DemuxerStream::Type type, int memory_limit) {
+void ChunkDemuxer::SetMemoryLimits(DemuxerStream::Type type,
+ size_t memory_limit) {
for (SourceStateMap::iterator itr = source_state_map_.begin();
itr != source_state_map_.end(); ++itr) {
itr->second->SetMemoryLimits(type, memory_limit);
@@ -1661,7 +1788,7 @@ void ChunkDemuxer::OnSourceInitDone(
if (!params.timeline_offset.is_null()) {
if (!timeline_offset_.is_null() &&
params.timeline_offset != timeline_offset_) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Timeline offset is not the same across all SourceBuffers.";
ReportError_Locked(DEMUXER_ERROR_COULD_NOT_OPEN);
return;
diff --git a/chromium/media/filters/chunk_demuxer.h b/chromium/media/filters/chunk_demuxer.h
index cdf2e524c81..466818378ad 100644
--- a/chromium/media/filters/chunk_demuxer.h
+++ b/chromium/media/filters/chunk_demuxer.h
@@ -11,6 +11,7 @@
#include <utility>
#include <vector>
+#include "base/basictypes.h"
#include "base/synchronization/lock.h"
#include "media/base/byte_queue.h"
#include "media/base/demuxer.h"
@@ -56,6 +57,12 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
void Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration);
+ // If the buffer is full, attempts to try to free up space, as specified in
+ // the "Coded Frame Eviction Algorithm" in the Media Source Extensions Spec.
+ // Returns false iff buffer is still full after running eviction.
+ // https://w3c.github.io/media-source/#sourcebuffer-coded-frame-eviction
+ bool EvictCodedFrames(DecodeTimestamp media_time, size_t newDataSize);
+
// Signal to the stream that duration has changed to |duration|.
void OnSetDuration(base::TimeDelta duration);
@@ -66,6 +73,9 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// Returns base::TimeDelta() if the stream has no buffered data.
base::TimeDelta GetBufferedDuration() const;
+ // Returns the size of the buffered data in bytes.
+ size_t GetBufferedSize() const;
+
// Signal to the stream that buffers handed in through subsequent calls to
// Append() belong to a media segment that starts at |start_timestamp|.
void OnNewMediaSegment(DecodeTimestamp start_timestamp);
@@ -73,9 +83,12 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
// Called when midstream config updates occur.
// Returns true if the new config is accepted.
// Returns false if the new config should trigger an error.
- bool UpdateAudioConfig(const AudioDecoderConfig& config, const LogCB& log_cb);
- bool UpdateVideoConfig(const VideoDecoderConfig& config, const LogCB& log_cb);
- void UpdateTextConfig(const TextTrackConfig& config, const LogCB& log_cb);
+ bool UpdateAudioConfig(const AudioDecoderConfig& config,
+ const scoped_refptr<MediaLog>& media_log);
+ bool UpdateVideoConfig(const VideoDecoderConfig& config,
+ const scoped_refptr<MediaLog>& media_log);
+ void UpdateTextConfig(const TextTrackConfig& config,
+ const scoped_refptr<MediaLog>& media_log);
void MarkEndOfStream();
void UnmarkEndOfStream();
@@ -94,9 +107,7 @@ class MEDIA_EXPORT ChunkDemuxerStream : public DemuxerStream {
TextTrackConfig text_track_config();
// Sets the memory limit, in bytes, on the SourceBufferStream.
- void set_memory_limit(int memory_limit) {
- stream_->set_memory_limit(memory_limit);
- }
+ void SetStreamMemoryLimit(size_t memory_limit);
bool supports_partial_append_window_trimming() const {
return partial_append_window_trimming_enabled_;
@@ -149,21 +160,21 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// is ready to receive media data via AppenData().
// |encrypted_media_init_data_cb| Run when the demuxer determines that an
// encryption key is needed to decrypt the content.
- // |enable_text| Process inband text tracks in the normal way when true,
- // otherwise ignore them.
- // |log_cb| Run when the demuxer needs to emit MediaLog messages.
+ // |media_log| Used to report content and engine debug messages.
// |splice_frames_enabled| Indicates that it's okay to generate splice frames
// per the MSE specification. Renderers must understand DecoderBuffer's
// splice_timestamp() field.
ChunkDemuxer(const base::Closure& open_cb,
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
- const LogCB& log_cb,
const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled);
~ChunkDemuxer() override;
// Demuxer implementation.
std::string GetDisplayName() const override;
+
+ // |enable_text| Process inband text tracks in the normal way when true,
+ // otherwise ignore them.
void Initialize(DemuxerHost* host,
const PipelineStatusCB& cb,
bool enable_text_tracks) override;
@@ -232,16 +243,24 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// it can accept a new segment.
// Some pending frames can be emitted during that process. These frames are
// applied |timestamp_offset|.
- void Abort(const std::string& id,
- base::TimeDelta append_window_start,
- base::TimeDelta append_window_end,
- base::TimeDelta* timestamp_offset);
+ void ResetParserState(const std::string& id,
+ base::TimeDelta append_window_start,
+ base::TimeDelta append_window_end,
+ base::TimeDelta* timestamp_offset);
// Remove buffers between |start| and |end| for the source buffer
// associated with |id|.
void Remove(const std::string& id, base::TimeDelta start,
base::TimeDelta end);
+ // If the buffer is full, attempts to try to free up space, as specified in
+ // the "Coded Frame Eviction Algorithm" in the Media Source Extensions Spec.
+ // Returns false iff buffer is still full after running eviction.
+ // https://w3c.github.io/media-source/#sourcebuffer-coded-frame-eviction
+ bool EvictCodedFrames(const std::string& id,
+ base::TimeDelta currentMediaTime,
+ size_t newDataSize);
+
// Returns the current presentation duration.
double GetDuration();
double GetDuration_Locked();
@@ -277,7 +296,7 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
// Sets the memory limit on each stream of a specific type.
// |memory_limit| is the maximum number of bytes each stream of type |type|
// is allowed to hold in its buffer.
- void SetMemoryLimits(DemuxerStream::Type type, int memory_limit);
+ void SetMemoryLimits(DemuxerStream::Type type, size_t memory_limit);
// Returns the ranges representing the buffered data in the demuxer.
// TODO(wolenetz): Remove this method once MediaSourceDelegate no longer
@@ -360,9 +379,8 @@ class MEDIA_EXPORT ChunkDemuxer : public Demuxer {
base::Closure open_cb_;
EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
bool enable_text_;
- // Callback used to report log messages that can help the web developer
- // figure out what is wrong with the content.
- LogCB log_cb_;
+
+ // MediaLog for reporting messages and properties to debug content and engine.
scoped_refptr<MediaLog> media_log_;
PipelineStatusCB init_cb_;
diff --git a/chromium/media/filters/chunk_demuxer_unittest.cc b/chromium/media/filters/chunk_demuxer_unittest.cc
index c99568500fc..747e512db6c 100644
--- a/chromium/media/filters/chunk_demuxer_unittest.cc
+++ b/chromium/media/filters/chunk_demuxer_unittest.cc
@@ -16,6 +16,7 @@
#include "media/base/mock_demuxer_host.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/chunk_demuxer.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_constants.h"
@@ -176,9 +177,9 @@ class ChunkDemuxerTest : public ::testing::Test {
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
&ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
- demuxer_.reset(new ChunkDemuxer(
- open_cb, encrypted_media_init_data_cb, base::Bind(&AddLogEntryForTest),
- scoped_refptr<MediaLog>(new MediaLog()), true));
+ demuxer_.reset(new ChunkDemuxer(open_cb, encrypted_media_init_data_cb,
+ scoped_refptr<MediaLog>(new MediaLog()),
+ true));
}
virtual ~ChunkDemuxerTest() {
@@ -439,8 +440,8 @@ class ChunkDemuxerTest : public ::testing::Test {
void ParseBlockDescriptions(int track_number,
const std::string block_descriptions,
std::vector<BlockInfo>* blocks) {
- std::vector<std::string> timestamps;
- base::SplitString(block_descriptions, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ block_descriptions, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
for (size_t i = 0; i < timestamps.size(); ++i) {
std::string timestamp_str = timestamps[i];
@@ -449,7 +450,7 @@ class ChunkDemuxerTest : public ::testing::Test {
block_info.flags = 0;
block_info.duration = 0;
- if (base::EndsWith(timestamp_str, "K", true)) {
+ if (base::EndsWith(timestamp_str, "K", base::CompareCase::SENSITIVE)) {
block_info.flags = kWebMFlagKeyframe;
// Remove the "K" off of the token.
timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
@@ -758,10 +759,8 @@ class ChunkDemuxerTest : public ::testing::Test {
return false;
// Append the whole bear1 file.
- // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
- // the files are fixed to have the correct duration in their init segments,
- // and the CreateInitDoneCB() call, above, is fixed to used that duration.
- // See http://crbug.com/354284.
+ // Expect duration adjustment since actual duration differs slightly from
+ // duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
AppendData(bear1->data(), bear1->data_size());
// Last audio frame has timestamp 2721 and duration 24 (estimated from max
@@ -1059,8 +1058,8 @@ class ChunkDemuxerTest : public ::testing::Test {
void CheckExpectedBuffers(DemuxerStream* stream,
const std::string& expected) {
- std::vector<std::string> timestamps;
- base::SplitString(expected, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
// Initialize status to kAborted since it's possible for Read() to return
@@ -1082,7 +1081,7 @@ class ChunkDemuxerTest : public ::testing::Test {
ss << "K";
// Handle preroll buffers.
- if (base::EndsWith(timestamps[i], "P", true)) {
+ if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
ss << "P";
@@ -1985,9 +1984,8 @@ TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
{kSkip, kSkip},
};
- // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
- // ParseWebMFile() call's expected duration, below, once the file is fixed to
- // have the correct duration in the init segment. See http://crbug.com/354284.
+ // Expect duration adjustment since actual duration differs slightly from
+ // duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
@@ -2023,9 +2021,8 @@ TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
{kSkip, kSkip},
};
- // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
- // ParseWebMFile() call's expected duration, below, once the file is fixed to
- // have the correct duration in the init segment. See http://crbug.com/354284.
+ // Expect duration adjustment since actual duration differs slightly from
+ // duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
@@ -2043,9 +2040,8 @@ TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
{kSkip, kSkip},
};
- // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
- // ParseWebMFile() call's expected duration, below, once the file is fixed to
- // have the correct duration in the init segment. See http://crbug.com/354284.
+ // Expect duration adjustment since actual duration differs slightly from
+ // duration in the init segment.
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
@@ -3028,13 +3024,13 @@ TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
// Confirm we're in the middle of parsing a media segment.
ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
- demuxer_->Abort(kSourceId,
- append_window_start_for_next_append_,
- append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ demuxer_->ResetParserState(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
- // After Abort(), parsing should no longer be in the middle of a media
- // segment.
+ // After ResetParserState(), parsing should no longer be in the middle of a
+ // media segment.
ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
}
@@ -3067,14 +3063,14 @@ TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
// Confirm we're in the middle of parsing a media segment.
ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
- // Abort on the Mpeg2 TS parser triggers the emission of the last video
- // buffer which is pending in the stream parser.
+ // ResetParserState on the Mpeg2 TS parser triggers the emission of the last
+ // video buffer which is pending in the stream parser.
Ranges<base::TimeDelta> range_before_abort =
demuxer_->GetBufferedRanges(kSourceId);
- demuxer_->Abort(kSourceId,
- append_window_start_for_next_append_,
- append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ demuxer_->ResetParserState(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
Ranges<base::TimeDelta> range_after_abort =
demuxer_->GetBufferedRanges(kSourceId);
@@ -3115,12 +3111,12 @@ TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
// abort.
Seek(base::TimeDelta::FromMilliseconds(4110));
- // Abort on the Mpeg2 TS parser triggers the emission of the last video
- // buffer which is pending in the stream parser.
- demuxer_->Abort(kSourceId,
- append_window_start_for_next_append_,
- append_window_end_for_next_append_,
- &timestamp_offset_map_[kSourceId]);
+ // ResetParserState on the Mpeg2 TS parser triggers the emission of the last
+ // video buffer which is pending in the stream parser.
+ demuxer_->ResetParserState(kSourceId,
+ append_window_start_for_next_append_,
+ append_window_end_for_next_append_,
+ &timestamp_offset_map_[kSourceId]);
}
#endif
@@ -3334,7 +3330,7 @@ TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
// Set different memory limits for audio and video.
demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
- demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize + 1);
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
@@ -3342,6 +3338,10 @@ TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
+ // We should be right at buffer limit, should pass
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(
+ kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
+
CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
@@ -3354,11 +3354,135 @@ TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
seek_time.InMilliseconds(), 5);
+ // We should delete first append, and be exactly at buffer limit
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 0));
+
// Verify that the old data, and nothing more, has been garbage collected.
CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
}
+TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekForward) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ // Append some data at position 1000ms
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 10);
+ CheckExpectedRanges(kSourceId, "{ [1000,1230) }");
+
+ // GC should be able to evict frames in the currently buffered range, since
+ // those frames are earlier than the seek target position.
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2000);
+ Seek(seek_time);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
+
+ // Append data to complete seek operation
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
+ CheckExpectedRanges(kSourceId, "{ [1115,1230) [2000,2115) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekBack) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ // Append some data at position 1000ms
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 10);
+ CheckExpectedRanges(kSourceId, "{ [1000,1230) }");
+
+ // GC should be able to evict frames in the currently buffered range, since
+ // seek target position has no data and so we should allow some frames to be
+ // evicted to make space for the upcoming append at seek target position.
+ base::TimeDelta seek_time = base::TimeDelta();
+ Seek(seek_time);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
+
+ // Append data to complete seek operation
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 5);
+ CheckExpectedRanges(kSourceId, "{ [0,115) [1115,1230) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekForward) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ // Append some data at position 1000ms then at 2000ms
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
+
+ // GC should be able to evict frames in the currently buffered ranges, since
+ // those frames are earlier than the seek target position.
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(3000);
+ Seek(seek_time);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
+
+ // Append data to complete seek operation
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 3000, 5);
+ CheckExpectedRanges(kSourceId, "{ [2069,2115) [3000,3115) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween1) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ // Append some data at position 1000ms then at 2000ms
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
+
+ // GC should be able to evict all frames from the first buffered range, since
+ // those frames are earlier than the seek target position. But there's only 5
+ // blocks worth of data in the first range and seek target position has no
+ // data, so GC proceeds with trying to delete some frames from the back of
+ // buffered ranges, that doesn't yield anything, since that's the most
+ // recently appended data, so then GC starts removing data from the front of
+ // the remaining buffered range (2000ms) to ensure we free up enough space for
+ // the upcoming append and allow seek to proceed.
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1500);
+ Seek(seek_time);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
+
+ // Append data to complete seek operation
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1500, 5);
+ CheckExpectedRanges(kSourceId, "{ [1500,1615) [2069,2115) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween2) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+
+ // Append some data at position 2000ms first, then at 1000ms, so that the last
+ // appended data position is in the first buffered range (that matters to the
+ // GC algorithm since it tries to preserve more recently appended data).
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
+
+ // Now try performing garbage collection without announcing seek first, i.e.
+ // without calling Seek(), the GC algorithm should try to preserve data in the
+ // first range, since that is most recently appended data.
+ base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2030);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
+
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1500, 5);
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) [1500,1615) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekBack) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ // Append some data at position 1000ms then at 2000ms
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
+
+ // GC should be able to evict frames in the currently buffered ranges, since
+ // those frames are earlier than the seek target position.
+ base::TimeDelta seek_time = base::TimeDelta();
+ Seek(seek_time);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
+
+ // Append data to complete seek operation
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 5);
+ CheckExpectedRanges(kSourceId, "{ [0,115) [2069,2115) }");
+}
+
TEST_F(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
@@ -3375,31 +3499,72 @@ TEST_F(ChunkDemuxerTest, GCDuringSeek) {
seek_time1.InMilliseconds(), 5);
CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
+ // We are under memory limit, so Evict should be a no-op.
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
+ CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
+
// Signal that the second seek is starting.
demuxer_->StartWaitingForSeek(seek_time2);
- // Append data to satisfy the second seek. This append triggers
- // the garbage collection logic since we set the memory limit to
- // 5 blocks.
+ // Append data to satisfy the second seek.
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
seek_time2.InMilliseconds(), 5);
+ CheckExpectedRanges(kSourceId, "{ [500,615) [1000,1115) }");
- // Verify that the buffers that cover |seek_time2| do not get
- // garbage collected.
+ // We are now over our memory usage limit. We have just seeked to |seek_time2|
+ // so data around 500ms position should be preserved, while the previous
+ // append at 1000ms should be removed.
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
CheckExpectedRanges(kSourceId, "{ [500,615) }");
// Complete the seek.
demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
-
- // Append more data and make sure that the blocks for |seek_time2|
- // don't get removed.
- //
- // NOTE: The current GC algorithm tries to preserve the GOP at the
- // current position as well as the last appended GOP. This is
- // why there are 2 ranges in the expectations.
+ // Append more data and make sure that we preserve both the buffered range
+ // around |seek_time2|, because that's the current playback position,
+ // and the newly appended range, since this is the most recent append.
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
- CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
+ EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
+ CheckExpectedRanges(kSourceId, "{ [500,615) [700,815) }");
+}
+
+TEST_F(ChunkDemuxerTest, GCKeepPlayhead) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
+
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
+
+ // Append data at the start that can be garbage collected:
+ AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
+ CheckExpectedRanges(kSourceId, "{ [0,230) }");
+
+ // We expect garbage collection to fail, as we don't want to spontaneously
+ // create gaps in source buffer stream. Gaps could break playback for many
+ // clients, who don't bother to check ranges after append.
+ EXPECT_FALSE(demuxer_->EvictCodedFrames(
+ kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
+ CheckExpectedRanges(kSourceId, "{ [0,230) }");
+
+ // Increase media_time a bit, this will allow some data to be collected, but
+ // we are still over memory usage limit.
+ base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(23*2);
+ Seek(seek_time1);
+ EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
+ CheckExpectedRanges(kSourceId, "{ [46,230) }");
+
+ base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(23*4);
+ Seek(seek_time2);
+ EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
+ CheckExpectedRanges(kSourceId, "{ [92,230) }");
+
+ // media_time has progressed to a point where we can collect enough data to
+ // be under memory limit, so Evict should return true.
+ base::TimeDelta seek_time3 = base::TimeDelta::FromMilliseconds(23*6);
+ Seek(seek_time3);
+ EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time3, 0));
+ // Strictly speaking the current playback time is 23*6==138ms, so we could
+ // release data up to 138ms, but we only release as much data as necessary
+ // to bring memory usage under the limit, so we release only up to 115ms.
+ CheckExpectedRanges(kSourceId, "{ [115,230) }");
}
TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
@@ -3513,10 +3678,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
// Set the append window such that the first file is completely before the
// append window.
- // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
- // have the correct duration in their init segments, and the
- // CreateInitDoneCB() call, above, is fixed to used that duration. See
- // http://crbug.com/354284.
+ // Expect duration adjustment since actual duration differs slightly from
+ // duration in the init segment.
const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
append_window_start_for_next_append_ = duration_1;
@@ -3754,4 +3917,55 @@ TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
CheckExpectedRanges(kSourceId, "{ [0,115) }");
}
+TEST_F(ChunkDemuxerTest, EvictCodedFramesTest) {
+ ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
+ demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
+ demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 15 * kBlockSize);
+ DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
+ DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
+
+ const char* kAudioStreamInfo = "0K 40K 80K 120K 160K 200K 240K 280K";
+ const char* kVideoStreamInfo = "0K 10 20K 30 40K 50 60K 70 80K 90 100K "
+ "110 120K 130 140K";
+ // Append 8 blocks (80 bytes) of data to audio stream and 15 blocks (150
+ // bytes) to video stream.
+ AppendMuxedCluster(
+ MuxedStreamInfo(kAudioTrackNum, kAudioStreamInfo),
+ MuxedStreamInfo(kVideoTrackNum, kVideoStreamInfo));
+ CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
+ CheckExpectedBuffers(video_stream, kVideoStreamInfo);
+
+ // If we want to append 80 more blocks of muxed a+v data and the current
+ // position is 0, that will fail, because EvictCodedFrames won't remove the
+ // data after the current playback position.
+ ASSERT_FALSE(demuxer_->EvictCodedFrames(kSourceId,
+ base::TimeDelta::FromMilliseconds(0),
+ 80));
+ // EvictCodedFrames has failed, so data should be unchanged.
+ Seek(base::TimeDelta::FromMilliseconds(0));
+ CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
+ CheckExpectedBuffers(video_stream, kVideoStreamInfo);
+
+ // But if we pretend that playback position has moved to 120ms, that allows
+ // EvictCodedFrames to garbage-collect enough data to succeed.
+ ASSERT_TRUE(demuxer_->EvictCodedFrames(kSourceId,
+ base::TimeDelta::FromMilliseconds(120),
+ 80));
+
+ Seek(base::TimeDelta::FromMilliseconds(0));
+ // Audio stream had 8 buffers, video stream had 15. We told EvictCodedFrames
+ // that the new data size is 8 blocks muxed, i.e. 80 bytes. Given the current
+ // ratio of video to the total data size (15 : (8+15) ~= 0.65) the estimated
+ // sizes of video and audio data in the new 80 byte chunk are 52 bytes for
+ // video (80*0.65 = 52) and 28 bytes for audio (80 - 52).
+ // Given these numbers MSE GC will remove just one audio block (since current
+ // audio size is 80 bytes, new data is 28 bytes, we need to remove just one 10
+ // byte block to stay under 100 bytes memory limit after append
+ // 80 - 10 + 28 = 98).
+ // For video stream 150 + 52 = 202. Video limit is 150 bytes. We need to
+ // remove at least 6 blocks to stay under limit.
+ CheckExpectedBuffers(audio_stream, "40K 80K 120K 160K 200K 240K 280K");
+ CheckExpectedBuffers(video_stream, "60K 70 80K 90 100K 110 120K 130 140K");
+}
+
} // namespace media
diff --git a/chromium/media/filters/decoder_stream.cc b/chromium/media/filters/decoder_stream.cc
index aaefc04fac4..1236cbc038a 100644
--- a/chromium/media/filters/decoder_stream.cc
+++ b/chromium/media/filters/decoder_stream.cc
@@ -13,7 +13,9 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_log.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder.h"
+#include "media/base/video_frame.h"
#include "media/filters/decrypting_demuxer_stream.h"
namespace media {
@@ -298,7 +300,7 @@ void DecoderStream<StreamType>::Decode(
TRACE_EVENT_ASYNC_BEGIN2(
"media", GetTraceString<StreamType>(), this, "key frame",
!buffer->end_of_stream() && buffer->is_key_frame(), "timestamp (ms)",
- buffer->timestamp().InMilliseconds());
+ !buffer->end_of_stream() ? buffer->timestamp().InMilliseconds() : 0);
if (buffer->end_of_stream())
decoding_eos_ = true;
diff --git a/chromium/media/filters/decoder_stream.h b/chromium/media/filters/decoder_stream.h
index c0828b322f8..d7eba5d47a0 100644
--- a/chromium/media/filters/decoder_stream.h
+++ b/chromium/media/filters/decoder_stream.h
@@ -17,6 +17,7 @@
#include "media/base/media_export.h"
#include "media/base/media_log.h"
#include "media/base/pipeline_status.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/decoder_selector.h"
#include "media/filters/decoder_stream_traits.h"
diff --git a/chromium/media/filters/decrypting_audio_decoder.cc b/chromium/media/filters/decrypting_audio_decoder.cc
index 7d5bfc8eff1..52a85f1d25c 100644
--- a/chromium/media/filters/decrypting_audio_decoder.cc
+++ b/chromium/media/filters/decrypting_audio_decoder.cc
@@ -15,10 +15,10 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/media_log.h"
#include "media/base/pipeline.h"
+#include "media/base/timestamp_constants.h"
namespace media {
diff --git a/chromium/media/filters/decrypting_audio_decoder_unittest.cc b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
index a54eb0ce95d..d6ef2b92d7c 100644
--- a/chromium/media/filters/decrypting_audio_decoder_unittest.cc
+++ b/chromium/media/filters/decrypting_audio_decoder_unittest.cc
@@ -9,12 +9,12 @@
#include "base/callback_helpers.h"
#include "base/message_loop/message_loop.h"
#include "media/base/audio_buffer.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/decrypting_audio_decoder.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -118,7 +118,7 @@ class DecryptingAudioDecoderTest : public testing::Test {
.WillOnce(SaveArg<1>(&key_added_cb_));
config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true, true,
+ CHANNEL_LAYOUT_STEREO, kSampleRate, NULL, 0, true,
base::TimeDelta(), 0);
InitializeAndExpectResult(config_, true);
}
diff --git a/chromium/media/filters/decrypting_demuxer_stream.cc b/chromium/media/filters/decrypting_demuxer_stream.cc
index 435a8fbdd2e..ff981224edd 100644
--- a/chromium/media/filters/decrypting_demuxer_stream.cc
+++ b/chromium/media/filters/decrypting_demuxer_stream.cc
@@ -387,7 +387,6 @@ void DecryptingDemuxerStream::InitializeDecoderConfig() {
input_audio_config.extra_data(),
input_audio_config.extra_data_size(),
false, // Output audio is not encrypted.
- false,
input_audio_config.seek_preroll(),
input_audio_config.codec_delay());
break;
@@ -396,17 +395,13 @@ void DecryptingDemuxerStream::InitializeDecoderConfig() {
case VIDEO: {
VideoDecoderConfig input_video_config =
demuxer_stream_->video_decoder_config();
- video_config_.Initialize(input_video_config.codec(),
- input_video_config.profile(),
- input_video_config.format(),
- VideoFrame::COLOR_SPACE_UNSPECIFIED,
- input_video_config.coded_size(),
- input_video_config.visible_rect(),
- input_video_config.natural_size(),
- input_video_config.extra_data(),
- input_video_config.extra_data_size(),
- false, // Output video is not encrypted.
- false);
+ video_config_.Initialize(
+ input_video_config.codec(), input_video_config.profile(),
+ input_video_config.format(), input_video_config.color_space(),
+ input_video_config.coded_size(), input_video_config.visible_rect(),
+ input_video_config.natural_size(), input_video_config.extra_data(),
+ input_video_config.extra_data_size(),
+ false); // Output video is not encrypted.
break;
}
diff --git a/chromium/media/filters/ffmpeg_aac_bitstream_converter.cc b/chromium/media/filters/ffmpeg_aac_bitstream_converter.cc
index 6a55b6af4c7..616cd1f5404 100644
--- a/chromium/media/filters/ffmpeg_aac_bitstream_converter.cc
+++ b/chromium/media/filters/ffmpeg_aac_bitstream_converter.cc
@@ -19,7 +19,7 @@ bool GenerateAdtsHeader(
int private_stream, int channel_configuration, int originality, int home,
int copyrighted_stream, int copyright_start, int frame_length,
int buffer_fullness, int number_of_frames_minus_one, uint8* hdr) {
- DCHECK_EQ(codec, CODEC_ID_AAC);
+ DCHECK_EQ(codec, AV_CODEC_ID_AAC);
memset(reinterpret_cast<void *>(hdr), 0,
FFmpegAACBitstreamConverter::kAdtsHeaderSize);
diff --git a/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc b/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
index 6262a80550d..6f92f76a117 100644
--- a/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
+++ b/chromium/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
@@ -23,7 +23,7 @@ class FFmpegAACBitstreamConverterTest : public testing::Test {
// Set up reasonable aac context
memset(&test_context_, 0, sizeof(AVCodecContext));
- test_context_.codec_id = CODEC_ID_AAC;
+ test_context_.codec_id = AV_CODEC_ID_AAC;
test_context_.profile = FF_PROFILE_AAC_MAIN;
test_context_.channels = 2;
test_context_.extradata = context_header_;
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.cc b/chromium/media/filters/ffmpeg_audio_decoder.cc
index 22a907b621b..7b0043a50fa 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.cc
+++ b/chromium/media/filters/ffmpeg_audio_decoder.cc
@@ -13,6 +13,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/limits.h"
+#include "media/base/timestamp_constants.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
@@ -126,20 +127,18 @@ static int GetAudioBuffer(struct AVCodecContext* s, AVFrame* frame, int flags) {
FFmpegAudioDecoder::FFmpegAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const LogCB& log_cb)
+ const scoped_refptr<MediaLog>& media_log)
: task_runner_(task_runner),
state_(kUninitialized),
av_sample_format_(0),
- log_cb_(log_cb) {
+ media_log_(media_log) {
}
FFmpegAudioDecoder::~FFmpegAudioDecoder() {
DCHECK(task_runner_->BelongsToCurrentThread());
- if (state_ != kUninitialized) {
+ if (state_ != kUninitialized)
ReleaseFFmpegResources();
- ResetTimestampState();
- }
}
std::string FFmpegAudioDecoder::GetDisplayName() const {
@@ -262,7 +261,7 @@ bool FFmpegAudioDecoder::FFmpegDecode(
<< "This is quite possibly a bug in the audio decoder not handling "
<< "end of stream AVPackets correctly.";
- MEDIA_LOG(DEBUG, log_cb_)
+ MEDIA_LOG(DEBUG, media_log_)
<< "Dropping audio frame which failed decode with timestamp: "
<< buffer->timestamp().InMicroseconds()
<< " us, duration: " << buffer->duration().InMicroseconds()
@@ -292,9 +291,10 @@ bool FFmpegAudioDecoder::FFmpegDecode(
if (config_.codec() == kCodecAAC &&
av_frame_->sample_rate == 2 * config_.samples_per_second()) {
- MEDIA_LOG(DEBUG, log_cb_) << "Implicit HE-AAC signalling is being"
- << " used. Please use mp4a.40.5 instead of"
- << " mp4a.40.2 in the mimetype.";
+ MEDIA_LOG(DEBUG, media_log_)
+ << "Implicit HE-AAC signalling is being"
+ << " used. Please use mp4a.40.5 instead of"
+ << " mp4a.40.2 in the mimetype.";
}
// This is an unrecoverable error, so bail out.
av_frame_unref(av_frame_.get());
@@ -370,8 +370,6 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
// Success!
av_frame_.reset(av_frame_alloc());
- discard_helper_.reset(new AudioDiscardHelper(config_.samples_per_second(),
- config_.codec_delay()));
av_sample_format_ = codec_context_->sample_fmt;
if (codec_context_->channels !=
@@ -390,6 +388,8 @@ bool FFmpegAudioDecoder::ConfigureDecoder() {
}
void FFmpegAudioDecoder::ResetTimestampState() {
+ discard_helper_.reset(new AudioDiscardHelper(config_.samples_per_second(),
+ config_.codec_delay()));
discard_helper_->Reset(config_.codec_delay());
}
diff --git a/chromium/media/filters/ffmpeg_audio_decoder.h b/chromium/media/filters/ffmpeg_audio_decoder.h
index 2446ce59a48..a394e7dcfdb 100644
--- a/chromium/media/filters/ffmpeg_audio_decoder.h
+++ b/chromium/media/filters/ffmpeg_audio_decoder.h
@@ -32,7 +32,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
public:
FFmpegAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const LogCB& log_cb);
+ const scoped_refptr<MediaLog>& media_log);
~FFmpegAudioDecoder() override;
// AudioDecoder implementation.
@@ -107,7 +107,7 @@ class MEDIA_EXPORT FFmpegAudioDecoder : public AudioDecoder {
scoped_ptr<AudioDiscardHelper> discard_helper_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FFmpegAudioDecoder);
};
diff --git a/chromium/media/filters/ffmpeg_demuxer.cc b/chromium/media/filters/ffmpeg_demuxer.cc
index 75bf64683cc..155e98079af 100644
--- a/chromium/media/filters/ffmpeg_demuxer.cc
+++ b/chromium/media/filters/ffmpeg_demuxer.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/sparse_histogram.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
@@ -19,10 +20,12 @@
#include "base/task_runner_util.h"
#include "base/thread_task_runner_handle.h"
#include "base/time/time.h"
+#include "media/audio/sample_rates.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decrypt_config.h"
#include "media/base/limits.h"
#include "media/base/media_log.h"
+#include "media/base/timestamp_constants.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_aac_bitstream_converter.h"
#include "media/filters/ffmpeg_bitstream_converter.h"
@@ -31,6 +34,10 @@
#include "media/filters/webvtt_util.h"
#include "media/formats/webm/webm_crypto_helpers.h"
+#if defined(ENABLE_HEVC_DEMUXING)
+#include "media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h"
+#endif
+
namespace media {
static base::Time ExtractTimelineOffset(AVFormatContext* format_context) {
@@ -80,6 +87,91 @@ static base::TimeDelta ExtractStartTime(AVStream* stream,
return start_time;
}
+// Some videos just want to watch the world burn, with a height of 0; cap the
+// "infinite" aspect ratio resulting.
+const int kInfiniteRatio = 99999;
+
+// Common aspect ratios (multiplied by 100 and truncated) used for histogramming
+// video sizes. These were taken on 20111103 from
+// http://wikipedia.org/wiki/Aspect_ratio_(image)#Previous_and_currently_used_aspect_ratios
+const int kCommonAspectRatios100[] = {
+ 100, 115, 133, 137, 143, 150, 155, 160, 166,
+ 175, 177, 185, 200, 210, 220, 221, 235, 237,
+ 240, 255, 259, 266, 276, 293, 400, 1200, kInfiniteRatio,
+};
+
+template <class T> // T has int width() & height() methods.
+static void UmaHistogramAspectRatio(const char* name, const T& size) {
+ UMA_HISTOGRAM_CUSTOM_ENUMERATION(
+ name,
+ // Intentionally use integer division to truncate the result.
+ size.height() ? (size.width() * 100) / size.height() : kInfiniteRatio,
+ base::CustomHistogram::ArrayToCustomRanges(
+ kCommonAspectRatios100, arraysize(kCommonAspectRatios100)));
+}
+
+// Record audio decoder config UMA stats corresponding to a src= playback.
+static void RecordAudioCodecStats(const AudioDecoderConfig& audio_config) {
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioCodec", audio_config.codec(),
+ kAudioCodecMax + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioSampleFormat",
+ audio_config.sample_format(), kSampleFormatMax + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioChannelLayout",
+ audio_config.channel_layout(),
+ CHANNEL_LAYOUT_MAX + 1);
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(audio_config.samples_per_second(), &asr)) {
+ UMA_HISTOGRAM_ENUMERATION("Media.AudioSamplesPerSecond", asr,
+ kAudioSampleRateMax + 1);
+ } else {
+ UMA_HISTOGRAM_COUNTS("Media.AudioSamplesPerSecondUnexpected",
+ audio_config.samples_per_second());
+ }
+}
+
+// Record video decoder config UMA stats corresponding to a src= playback.
+static void RecordVideoCodecStats(const VideoDecoderConfig& video_config,
+ AVColorRange color_range) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCodec", video_config.codec(),
+ kVideoCodecMax + 1);
+
+ // Drop UNKNOWN because U_H_E() uses one bucket for all values less than 1.
+ if (video_config.profile() >= 0) {
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCodecProfile", video_config.profile(),
+ VIDEO_CODEC_PROFILE_MAX + 1);
+ }
+ UMA_HISTOGRAM_COUNTS_10000("Media.VideoCodedWidth",
+ video_config.coded_size().width());
+ UmaHistogramAspectRatio("Media.VideoCodedAspectRatio",
+ video_config.coded_size());
+ UMA_HISTOGRAM_COUNTS_10000("Media.VideoVisibleWidth",
+ video_config.visible_rect().width());
+ UmaHistogramAspectRatio("Media.VideoVisibleAspectRatio",
+ video_config.visible_rect());
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoPixelFormatUnion",
+ video_config.format(), PIXEL_FORMAT_MAX + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoFrameColorSpace",
+ video_config.color_space(), COLOR_SPACE_MAX + 1);
+
+ // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing
+ // UMA_HISTOGRAM_ENUMERATION to report a discrete value.
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange", color_range,
+ AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX
+}
+
+static int32_t GetCodecHash(const AVCodecContext* context) {
+ if (context->codec_descriptor)
+ return HashCodecName(context->codec_descriptor->name);
+ const AVCodecDescriptor* codec_descriptor =
+ avcodec_descriptor_get(context->codec_id);
+ if (codec_descriptor)
+ return HashCodecName(codec_descriptor->name);
+
+ // If the codec name can't be determined, return none for tracking.
+ return HashCodecName("none");
+}
+
//
// FFmpegDemuxerStream
//
@@ -94,7 +186,7 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
last_packet_timestamp_(kNoTimestamp()),
last_packet_duration_(kNoTimestamp()),
video_rotation_(VIDEO_ROTATION_0),
- fixup_negative_ogg_timestamps_(false) {
+ fixup_negative_timestamps_(false) {
DCHECK(demuxer_);
bool is_encrypted = false;
@@ -105,12 +197,12 @@ FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
switch (stream->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
type_ = AUDIO;
- AVStreamToAudioDecoderConfig(stream, &audio_config_, true);
+ AVStreamToAudioDecoderConfig(stream, &audio_config_);
is_encrypted = audio_config_.is_encrypted();
break;
case AVMEDIA_TYPE_VIDEO:
type_ = VIDEO;
- AVStreamToVideoDecoderConfig(stream, &video_config_, true);
+ AVStreamToVideoDecoderConfig(stream, &video_config_);
is_encrypted = video_config_.is_encrypted();
rotation_entry = av_dict_get(stream->metadata, "rotate", NULL, 0);
@@ -294,10 +386,10 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
if (stream_timestamp != kNoTimestamp()) {
const bool is_audio = type() == AUDIO;
- // If this is an OGG file with negative timestamps don't rebase any other
- // stream types against the negative starting time.
+ // If this file has negative timestamps don't rebase any other stream types
+ // against the negative starting time.
base::TimeDelta start_time = demuxer_->start_time();
- if (fixup_negative_ogg_timestamps_ && !is_audio &&
+ if (fixup_negative_timestamps_ && !is_audio &&
start_time < base::TimeDelta()) {
start_time = base::TimeDelta();
}
@@ -310,19 +402,35 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
buffer->set_timestamp(stream_timestamp - start_time);
- // If enabled, mark audio packets with negative timestamps for post-decode
- // discard.
- if (fixup_negative_ogg_timestamps_ && is_audio &&
+ // If enabled, and no codec delay is present, mark audio packets with
+ // negative timestamps for post-decode discard.
+ if (fixup_negative_timestamps_ && is_audio &&
stream_timestamp < base::TimeDelta() &&
buffer->duration() != kNoTimestamp()) {
- if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
- // Discard the entire packet if it's entirely before zero.
- buffer->set_discard_padding(
- std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+ if (!stream_->codec->delay) {
+ DCHECK_EQ(buffer->discard_padding().first, base::TimeDelta());
+
+ if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
+ DCHECK_EQ(buffer->discard_padding().second, base::TimeDelta());
+
+ // Discard the entire packet if it's entirely before zero.
+ buffer->set_discard_padding(
+ std::make_pair(kInfiniteDuration(), base::TimeDelta()));
+ } else {
+ // Only discard part of the frame if it overlaps zero.
+ buffer->set_discard_padding(std::make_pair(
+ -stream_timestamp, buffer->discard_padding().second));
+ }
} else {
- // Only discard part of the frame if it overlaps zero.
- buffer->set_discard_padding(
- std::make_pair(-stream_timestamp, base::TimeDelta()));
+ // Verify that codec delay would cover discard and that we don't need to
+ // mark the packet for post decode discard. Since timestamps may be in
+ // milliseconds and codec delay in nanosecond precision, round up to the
+ // nearest millisecond. See enable_negative_timestamp_fixups().
+ DCHECK_LE(-std::ceil(FramesToTimeDelta(
+ audio_decoder_config().codec_delay(),
+ audio_decoder_config().samples_per_second())
+ .InMillisecondsF()),
+ stream_timestamp.InMillisecondsF());
}
}
} else {
@@ -344,7 +452,7 @@ void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
// Fixing chained ogg is non-trivial, so for now just reuse the last good
// timestamp. The decoder will rewrite the timestamps to be sample accurate
// later. See http://crbug.com/396864.
- if (fixup_negative_ogg_timestamps_ &&
+ if (fixup_negative_timestamps_ &&
(buffer->timestamp() == kNoTimestamp() ||
buffer->timestamp() < last_packet_timestamp_)) {
buffer->set_timestamp(last_packet_timestamp_ +
@@ -456,6 +564,11 @@ void FFmpegDemuxerStream::InitBitstreamConverter() {
if (stream_->codec->codec_id == AV_CODEC_ID_H264) {
bitstream_converter_.reset(
new FFmpegH264ToAnnexBBitstreamConverter(stream_->codec));
+#if defined(ENABLE_HEVC_DEMUXING)
+ } else if (stream_->codec->codec_id == AV_CODEC_ID_HEVC) {
+ bitstream_converter_.reset(
+ new FFmpegH265ToAnnexBBitstreamConverter(stream_->codec));
+#endif
} else if (stream_->codec->codec_id == AV_CODEC_ID_AAC) {
bitstream_converter_.reset(
new FFmpegAACBitstreamConverter(stream_->codec));
@@ -628,9 +741,19 @@ void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
// Additionally, to workaround limitations in how we expose seekable ranges to
// Blink (http://crbug.com/137275), we also want to clamp seeks before the
// start time to the start time.
- const base::TimeDelta seek_time =
- start_time_ < base::TimeDelta() ? time + start_time_
- : time < start_time_ ? start_time_ : time;
+ base::TimeDelta seek_time = start_time_ < base::TimeDelta()
+ ? time + start_time_
+ : time < start_time_ ? start_time_ : time;
+
+ // When seeking in an opus stream we need to ensure we deliver enough data to
+ // satisfy the seek preroll; otherwise the audio at the actual seek time will
+ // not be entirely accurate.
+ FFmpegDemuxerStream* audio_stream = GetFFmpegStream(DemuxerStream::AUDIO);
+ if (audio_stream) {
+ const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
+ if (config.codec() == kCodecOpus)
+ seek_time = std::max(start_time_, seek_time - config.seek_preroll());
+ }
// Choose the seeking stream based on whether it contains the seek time, if no
// match can be found prefer the preferred stream.
@@ -882,11 +1005,12 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
continue;
// Log the codec detected, whether it is supported or not.
- UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
- codec_context->codec_id);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodecHash",
+ GetCodecHash(codec_context));
+
// Ensure the codec is supported. IsValidConfig() also checks that the
// channel layout and sample format are valid.
- AVStreamToAudioDecoderConfig(stream, &audio_config, false);
+ AVStreamToAudioDecoderConfig(stream, &audio_config);
if (!audio_config.IsValidConfig())
continue;
audio_stream = stream;
@@ -894,12 +1018,34 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
if (video_stream)
continue;
+#if defined(ENABLE_HEVC_DEMUXING)
+ if (stream->codec->codec_id == AV_CODEC_ID_HEVC) {
+ // If ffmpeg is built without HEVC parser/decoder support, it will be
+ // able to demux HEVC based solely on container-provided information,
+ // but unable to get some of the parameters without parsing the stream
+ // (e.g. coded size needs to be read from SPS, pixel format is typically
+ // deduced from decoder config in hvcC box). These are not really needed
+ // when using external decoder (e.g. hardware decoder), so override them
+ // here, to make sure this translates into a valid VideoDecoderConfig.
+ if (stream->codec->coded_width == 0 &&
+ stream->codec->coded_height == 0) {
+ DCHECK(stream->codec->width > 0);
+ DCHECK(stream->codec->height > 0);
+ stream->codec->coded_width = stream->codec->width;
+ stream->codec->coded_height = stream->codec->height;
+ }
+ if (stream->codec->pix_fmt == AV_PIX_FMT_NONE) {
+ stream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
+ }
+ }
+#endif
// Log the codec detected, whether it is supported or not.
- UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
- codec_context->codec_id);
+ UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodecHash",
+ GetCodecHash(codec_context));
+
// Ensure the codec is supported. IsValidConfig() also checks that the
// frame size and visible size are valid.
- AVStreamToVideoDecoderConfig(stream, &video_config, false);
+ AVStreamToVideoDecoderConfig(stream, &video_config);
if (!video_config.IsValidConfig())
continue;
@@ -913,6 +1059,16 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
}
streams_[i] = new FFmpegDemuxerStream(this, stream);
+
+ // Record audio or video src= playback UMA stats for the stream's decoder
+ // config.
+ if (codec_type == AVMEDIA_TYPE_AUDIO) {
+ RecordAudioCodecStats(streams_[i]->audio_decoder_config());
+ } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
+ RecordVideoCodecStats(streams_[i]->video_decoder_config(),
+ stream->codec->color_range);
+ }
+
max_duration = std::max(max_duration, streams_[i]->duration());
const base::TimeDelta start_time =
@@ -960,17 +1116,28 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
max_duration = kInfiniteDuration();
}
- // Ogg has some peculiarities around negative timestamps, so use this flag to
- // setup the FFmpegDemuxerStreams appropriately.
+ // FFmpeg represents audio data marked as before the beginning of stream as
+ // having negative timestamps. This data must be discarded after it has been
+ // decoded, not before since it is used to warmup the decoder. There are
+ // currently two known cases for this: vorbis in ogg and opus in ogg and webm.
+ //
+ // For API clarity, it was decided that the rest of the media pipeline should
+ // not be exposed to negative timestamps. Which means we need to rebase these
+ // negative timestamps and mark them for discard post decoding.
//
// Post-decode frame dropping for packets with negative timestamps is outlined
// in section A.2 in the Ogg Vorbis spec:
// http://xiph.org/vorbis/doc/Vorbis_I_spec.html
- if (strcmp(format_context->iformat->name, "ogg") == 0 && audio_stream &&
- audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS) {
+ //
+ // FFmpeg's use of negative timestamps for opus pre-skip is nonstandard, but
+ // for more information on pre-skip see section 4.2 of the Ogg Opus spec:
+ // https://tools.ietf.org/html/draft-ietf-codec-oggopus-08#section-4.2
+ if (audio_stream && (audio_stream->codec->codec_id == AV_CODEC_ID_OPUS ||
+ (strcmp(format_context->iformat->name, "ogg") == 0 &&
+ audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS))) {
for (size_t i = 0; i < streams_.size(); ++i) {
if (streams_[i])
- streams_[i]->enable_negative_timestamp_fixups_for_ogg();
+ streams_[i]->enable_negative_timestamp_fixups();
}
// Fixup the seeking information to avoid selecting the audio stream simply
@@ -1074,7 +1241,7 @@ void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
video_codec->time_base.num,
video_codec->time_base.den));
media_log_->SetStringProperty(
- "video_format", VideoFrame::FormatToString(video_config.format()));
+ "video_format", VideoPixelFormatToString(video_config.format()));
media_log_->SetBooleanProperty("video_is_encrypted",
video_config.is_encrypted());
} else {
@@ -1204,19 +1371,6 @@ void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
packet.swap(new_packet);
}
- // Special case for opus in ogg. FFmpeg is pre-trimming the codec delay
- // from the packet timestamp. Chrome expects to handle this itself inside
- // the decoder, so shift timestamps by the delay in this case.
- // TODO(dalecurtis): Try to get fixed upstream. See http://crbug.com/328207
- if (strcmp(glue_->format_context()->iformat->name, "ogg") == 0) {
- const AVCodecContext* codec_context =
- glue_->format_context()->streams[packet->stream_index]->codec;
- if (codec_context->codec_id == AV_CODEC_ID_OPUS &&
- codec_context->delay > 0) {
- packet->pts += codec_context->delay;
- }
- }
-
FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index];
demuxer_stream->EnqueuePacket(packet.Pass());
}
diff --git a/chromium/media/filters/ffmpeg_demuxer.h b/chromium/media/filters/ffmpeg_demuxer.h
index 98ee3b2844a..9c20e4a9049 100644
--- a/chromium/media/filters/ffmpeg_demuxer.h
+++ b/chromium/media/filters/ffmpeg_demuxer.h
@@ -27,7 +27,6 @@
#include <vector>
#include "base/callback.h"
-#include "base/gtest_prod_util.h"
#include "base/memory/scoped_vector.h"
#include "base/threading/thread.h"
#include "media/base/audio_decoder_config.h"
@@ -77,12 +76,17 @@ class FFmpegDemuxerStream : public DemuxerStream {
base::TimeDelta duration() const { return duration_; }
- // Enables fixes for ogg files with negative timestamps. For AUDIO streams,
- // all packets with negative timestamps will be marked for post-decode
- // discard. For all other stream types, if FFmpegDemuxer::start_time() is
- // negative, it will not be used to shift timestamps during EnqueuePacket().
- void enable_negative_timestamp_fixups_for_ogg() {
- fixup_negative_ogg_timestamps_ = true;
+ // Enables fixes for files with negative timestamps. Normally all timestamps
+ // are rebased against FFmpegDemuxer::start_time() whenever that value is
+ // negative. When this fix is enabled, only AUDIO stream packets will be
+ // rebased to time zero, all other stream types will use the muxed timestamp.
+ //
+ // Further, when no codec delay is present, all AUDIO packets which originally
+ // had negative timestamps will be marked for post-decode discard. When codec
+ // delay is present, it is assumed the decoder will handle discard and does
+ // not need the AUDIO packets to be marked for discard; just rebased to zero.
+ void enable_negative_timestamp_fixups() {
+ fixup_negative_timestamps_ = true;
}
// DemuxerStream implementation.
@@ -155,7 +159,7 @@ class FFmpegDemuxerStream : public DemuxerStream {
#endif
std::string encryption_key_id_;
- bool fixup_negative_ogg_timestamps_;
+ bool fixup_negative_timestamps_;
DISALLOW_COPY_AND_ASSIGN(FFmpegDemuxerStream);
};
diff --git a/chromium/media/filters/ffmpeg_demuxer_unittest.cc b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
index 2201cbc49be..e75a67fdaaf 100644
--- a/chromium/media/filters/ffmpeg_demuxer_unittest.cc
+++ b/chromium/media/filters/ffmpeg_demuxer_unittest.cc
@@ -15,6 +15,7 @@
#include "media/base/media_log.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/file_data_source.h"
@@ -286,7 +287,7 @@ TEST_F(FFmpegDemuxerTest, Initialize_Successful) {
const VideoDecoderConfig& video_config = stream->video_decoder_config();
EXPECT_EQ(kCodecVP8, video_config.codec());
- EXPECT_EQ(VideoFrame::YV12, video_config.format());
+ EXPECT_EQ(PIXEL_FORMAT_YV12, video_config.format());
EXPECT_EQ(320, video_config.coded_size().width());
EXPECT_EQ(240, video_config.coded_size().height());
EXPECT_EQ(0, video_config.visible_rect().x());
@@ -594,6 +595,81 @@ TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOggDiscard_Sync) {
}
}
+// Similar to the test above, but using an opus clip with a large amount of
+// pre-skip, which ffmpeg encodes as negative timestamps.
+TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOpusDiscard_Sync) {
+ CreateDemuxer("opus-trimming-video-test.webm");
+ InitializeDemuxer();
+
+ // Attempt a read from the video stream and run the message loop until done.
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+ EXPECT_EQ(audio->audio_decoder_config().codec_delay(), 65535);
+
+ // Packet size to timestamp (in microseconds) mapping for the first N packets
+ // which should be fully discarded.
+ static const int kTestExpectations[][2] = {
+ {635, 0}, {594, 120000}, {597, 240000}, {591, 360000},
+ {582, 480000}, {583, 600000}, {592, 720000}, {567, 840000},
+ {579, 960000}, {572, 1080000}, {583, 1200000}};
+
+ // Run the test twice with a seek in between.
+ for (int i = 0; i < 2; ++i) {
+ for (size_t j = 0; j < arraysize(kTestExpectations); ++j) {
+ audio->Read(NewReadCB(FROM_HERE, kTestExpectations[j][0],
+ kTestExpectations[j][1], true));
+ message_loop_.Run();
+ }
+
+ // Though the internal start time may be below zero, the exposed media time
+ // must always be greater than zero.
+ EXPECT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
+
+ video->Read(NewReadCB(FROM_HERE, 16009, 0, true));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 2715, 1000, false));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 427, 33000, false));
+ message_loop_.Run();
+
+ // Seek back to the beginning and repeat the test.
+ WaitableMessageLoopEvent event;
+ demuxer_->Seek(base::TimeDelta(), event.GetPipelineStatusCB());
+ event.RunAndWaitForStatus(PIPELINE_OK);
+ }
+}
+
+// Similar to the test above, but using sfx-opus.ogg, which has a much smaller
+// amount of discard padding and no |start_time| set on the AVStream.
+TEST_F(FFmpegDemuxerTest, Read_AudioNegativeStartTimeAndOpusSfxDiscard_Sync) {
+ CreateDemuxer("sfx-opus.ogg");
+ InitializeDemuxer();
+
+ // Attempt a read from the video stream and run the message loop until done.
+ DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
+ EXPECT_EQ(audio->audio_decoder_config().codec_delay(), 312);
+
+ // Run the test twice with a seek in between.
+ for (int i = 0; i < 2; ++i) {
+ audio->Read(NewReadCB(FROM_HERE, 314, 0, true));
+ message_loop_.Run();
+
+ audio->Read(NewReadCB(FROM_HERE, 244, 20000, true));
+ message_loop_.Run();
+
+ // Though the internal start time may be below zero, the exposed media time
+ // must always be greater than zero.
+ EXPECT_EQ(base::TimeDelta(), demuxer_->GetStartTime());
+
+ // Seek back to the beginning and repeat the test.
+ WaitableMessageLoopEvent event;
+ demuxer_->Seek(base::TimeDelta(), event.GetPipelineStatusCB());
+ event.RunAndWaitForStatus(PIPELINE_OK);
+ }
+}
+
TEST_F(FFmpegDemuxerTest, Read_EndOfStream) {
// Verify that end of stream buffers are created.
CreateDemuxer("bear-320x240.webm");
@@ -879,6 +955,44 @@ TEST_F(FFmpegDemuxerTest, MP4_ZeroStszEntry) {
ReadUntilEndOfStream(demuxer_->GetStream(DemuxerStream::AUDIO));
}
+class Mp3SeekFFmpegDemuxerTest
+ : public FFmpegDemuxerTest,
+ public testing::WithParamInterface<const char*> {
+};
+TEST_P(Mp3SeekFFmpegDemuxerTest, TestFastSeek) {
+ // Init demxuer with given MP3 file parameter.
+ CreateDemuxer(GetParam());
+ InitializeDemuxer();
+
+ // We read a bunch of bytes when we first open the file. Reset the count
+ // here to just track the bytes read for the upcoming seek. This allows us
+ // to use a more narrow threshold for passing the test.
+ data_source_->reset_bytes_read_for_testing();
+
+ FFmpegDemuxerStream* audio = static_cast<FFmpegDemuxerStream*>(
+ demuxer_->GetStream(DemuxerStream::AUDIO));
+ ASSERT_TRUE(audio);
+
+ // Seek to near the end of the file
+ WaitableMessageLoopEvent event;
+ demuxer_->Seek(.9 * audio->duration(), event.GetPipelineStatusCB());
+ event.RunAndWaitForStatus(PIPELINE_OK);
+
+ // Verify that seeking to the end read only a small portion of the file.
+ // Slow that read sequentially up to the seek point will fail this check.
+ int64 file_size = 0;
+ ASSERT_TRUE(data_source_->GetSize(&file_size));
+ EXPECT_LT(data_source_->bytes_read_for_testing(), (file_size * .25));
+}
+
+// MP3s should seek quickly without sequentially reading up to the seek point.
+// VBR vs CBR and the presence/absence of TOC influence the seeking algorithm.
+// See http://crbug.com/530043 and FFmpeg flag AVFMT_FLAG_FAST_SEEK.
+INSTANTIATE_TEST_CASE_P(, Mp3SeekFFmpegDemuxerTest,
+ ::testing::Values("bear-audio-10s-CBR-has-TOC.mp3",
+ "bear-audio-10s-CBR-no-TOC.mp3",
+ "bear-audio-10s-VBR-has-TOC.mp3",
+ "bear-audio-10s-VBR-no-TOC.mp3"));
static void ValidateAnnexB(DemuxerStream* stream,
DemuxerStream::Status status,
@@ -980,7 +1094,7 @@ TEST_F(FFmpegDemuxerTest, NaturalSizeWithoutPASP) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- EXPECT_EQ(gfx::Size(638, 360), video_config.natural_size());
+ EXPECT_EQ(gfx::Size(639, 360), video_config.natural_size());
}
TEST_F(FFmpegDemuxerTest, NaturalSizeWithPASP) {
@@ -991,9 +1105,25 @@ TEST_F(FFmpegDemuxerTest, NaturalSizeWithPASP) {
ASSERT_TRUE(stream);
const VideoDecoderConfig& video_config = stream->video_decoder_config();
- EXPECT_EQ(gfx::Size(638, 360), video_config.natural_size());
+ EXPECT_EQ(gfx::Size(639, 360), video_config.natural_size());
}
#endif
+#if defined(ENABLE_HEVC_DEMUXING)
+TEST_F(FFmpegDemuxerTest, HEVC_in_MP4_container) {
+ CreateDemuxer("bear-hevc-frag.mp4");
+ InitializeDemuxer();
+
+ DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
+ ASSERT_TRUE(video);
+
+ video->Read(NewReadCB(FROM_HERE, 3569, 66733, true));
+ message_loop_.Run();
+
+ video->Read(NewReadCB(FROM_HERE, 1042, 200200, false));
+ message_loop_.Run();
+}
+#endif
+
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_glue.cc b/chromium/media/filters/ffmpeg_glue.cc
index 7643f2356df..31c6828b445 100644
--- a/chromium/media/filters/ffmpeg_glue.cc
+++ b/chromium/media/filters/ffmpeg_glue.cc
@@ -80,7 +80,7 @@ static int LockManagerOperation(void** lock, enum AVLockOp op) {
case AV_LOCK_DESTROY:
delete static_cast<base::Lock*>(*lock);
- *lock = NULL;
+ *lock = nullptr;
return 0;
}
return 1;
@@ -132,7 +132,7 @@ FFmpegGlue::FFmpegGlue(FFmpegURLProtocol* protocol)
format_context_ = avformat_alloc_context();
avio_context_.reset(avio_alloc_context(
static_cast<unsigned char*>(av_malloc(kBufferSize)), kBufferSize, 0,
- protocol, &AVIOReadOperation, NULL, &AVIOSeekOperation));
+ protocol, &AVIOReadOperation, nullptr, &AVIOSeekOperation));
// Ensure FFmpeg only tries to seek on resources we know to be seekable.
avio_context_->seekable =
@@ -145,6 +145,10 @@ FFmpegGlue::FFmpegGlue(FFmpegURLProtocol* protocol)
// will set the AVFMT_FLAG_CUSTOM_IO flag for us, but do so here to ensure an
// early error state doesn't cause FFmpeg to free our resources in error.
format_context_->flags |= AVFMT_FLAG_CUSTOM_IO;
+
+ // Enable fast, but inaccurate seeks for MP3.
+ format_context_->flags |= AVFMT_FLAG_FAST_SEEK;
+
format_context_->pb = avio_context_.get();
}
@@ -171,15 +175,15 @@ bool FFmpegGlue::OpenContext() {
UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedContainer", container);
}
- // By passing NULL for the filename (second parameter) we are telling FFmpeg
- // to use the AVIO context we setup from the AVFormatContext structure.
- return avformat_open_input(&format_context_, NULL, NULL, NULL) == 0;
+ // By passing nullptr for the filename (second parameter) we are telling
+ // FFmpeg to use the AVIO context we setup from the AVFormatContext structure.
+ return avformat_open_input(&format_context_, nullptr, nullptr, nullptr) == 0;
}
FFmpegGlue::~FFmpegGlue() {
// In the event of avformat_open_input() failure, FFmpeg may sometimes free
// our AVFormatContext behind the scenes, but leave the buffer alive. It will
- // helpfully set |format_context_| to NULL in this case.
+ // helpfully set |format_context_| to nullptr in this case.
if (!format_context_) {
av_free(avio_context_->buffer);
return;
diff --git a/chromium/media/filters/ffmpeg_glue_unittest.cc b/chromium/media/filters/ffmpeg_glue_unittest.cc
index f9cfda5a646..5b97f3636f2 100644
--- a/chromium/media/filters/ffmpeg_glue_unittest.cc
+++ b/chromium/media/filters/ffmpeg_glue_unittest.cc
@@ -235,9 +235,11 @@ TEST_F(FFmpegGlueDestructionTest, WithOpenWithOpenStreams) {
ASSERT_TRUE(glue_->OpenContext());
ASSERT_GT(glue_->format_context()->nb_streams, 0u);
- AVCodecContext* context = glue_->format_context()->streams[0]->codec;
- ASSERT_EQ(avcodec_open2(
- context, avcodec_find_decoder(context->codec_id), NULL), 0);
+ // Pick the audio stream (1) so this works when the ffmpeg video decoders are
+ // disabled.
+ AVCodecContext* context = glue_->format_context()->streams[1]->codec;
+ ASSERT_EQ(0, avcodec_open2(
+ context, avcodec_find_decoder(context->codec_id), NULL));
}
} // namespace media
diff --git a/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc b/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc
new file mode 100644
index 00000000000..a643f534758
--- /dev/null
+++ b/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc
@@ -0,0 +1,93 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h"
+
+#include "base/logging.h"
+#include "media/base/decrypt_config.h"
+#include "media/ffmpeg/ffmpeg_common.h"
+#include "media/formats/mp4/avc.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/hevc.h"
+
+namespace media {
+
+FFmpegH265ToAnnexBBitstreamConverter::FFmpegH265ToAnnexBBitstreamConverter(
+ AVCodecContext* stream_codec_context)
+ : stream_codec_context_(stream_codec_context) {
+ CHECK(stream_codec_context_);
+}
+
+FFmpegH265ToAnnexBBitstreamConverter::~FFmpegH265ToAnnexBBitstreamConverter() {}
+
+bool FFmpegH265ToAnnexBBitstreamConverter::ConvertPacket(AVPacket* packet) {
+ DVLOG(3) << __FUNCTION__;
+ if (packet == NULL || !packet->data)
+ return false;
+
+ // Calculate the needed output buffer size.
+ if (!hevc_config_) {
+ if (!stream_codec_context_->extradata ||
+ stream_codec_context_->extradata_size <= 0) {
+ DVLOG(1) << "HEVCDecoderConfiguration not found, no extra codec data";
+ return false;
+ }
+
+ hevc_config_.reset(new mp4::HEVCDecoderConfigurationRecord());
+
+ if (!hevc_config_->Parse(
+ stream_codec_context_->extradata,
+ stream_codec_context_->extradata_size)) {
+ DVLOG(1) << "Parsing HEVCDecoderConfiguration failed";
+ return false;
+ }
+ }
+
+ std::vector<uint8> input_frame;
+ std::vector<SubsampleEntry> subsamples;
+ // TODO(servolk): Performance could be improved here, by reducing unnecessary
+ // data copying, but first annex b conversion code needs to be refactored to
+ // allow that (see crbug.com/455379).
+ input_frame.insert(input_frame.end(),
+ packet->data, packet->data + packet->size);
+ int nalu_size_len = hevc_config_->lengthSizeMinusOne + 1;
+ if (!mp4::AVC::ConvertFrameToAnnexB(nalu_size_len, &input_frame,
+ &subsamples)) {
+ DVLOG(1) << "AnnexB conversion failed";
+ return false;
+ }
+
+ if (packet->flags & AV_PKT_FLAG_KEY) {
+ RCHECK(mp4::HEVC::InsertParamSetsAnnexB(*hevc_config_.get(),
+ &input_frame, &subsamples));
+ DVLOG(4) << "Inserted HEVC decoder params";
+ }
+
+ uint32 output_packet_size = input_frame.size();
+
+ if (output_packet_size == 0)
+ return false; // Invalid input packet.
+
+ // Allocate new packet for the output.
+ AVPacket dest_packet;
+ if (av_new_packet(&dest_packet, output_packet_size) != 0)
+ return false; // Memory allocation failure.
+
+ // This is a bit tricky: since the interface does not allow us to replace
+ // the pointer of the old packet with a new one, we will initially copy the
+ // metadata from old packet to new bigger packet.
+ av_packet_copy_props(&dest_packet, packet);
+
+ // Proceed with the conversion of the actual in-band NAL units, leave room
+ // for configuration in the beginning.
+ memcpy(dest_packet.data, &input_frame[0], input_frame.size());
+
+ // At the end we must destroy the old packet.
+ av_free_packet(packet);
+ *packet = dest_packet; // Finally, replace the values in the input packet.
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h b/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h
new file mode 100644
index 00000000000..5892f429bea
--- /dev/null
+++ b/chromium/media/filters/ffmpeg_h265_to_annex_b_bitstream_converter.h
@@ -0,0 +1,49 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_FFMPEG_H265_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
+#define MEDIA_FILTERS_FFMPEG_H265_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+#include "media/filters/ffmpeg_bitstream_converter.h"
+#include "media/formats/mp4/hevc.h"
+
+// Forward declarations for FFmpeg datatypes used.
+struct AVCodecContext;
+struct AVPacket;
+
+namespace media {
+
+// Bitstream converter that converts H.265 bitstream based FFmpeg packets into
+// H.265 Annex B bytestream format.
+class MEDIA_EXPORT FFmpegH265ToAnnexBBitstreamConverter
+ : public FFmpegBitstreamConverter {
+ public:
+ // The |stream_codec_context| will be used during conversion and should be the
+ // AVCodecContext for the stream sourcing these packets. A reference to
+ // |stream_codec_context| is retained, so it must outlive this class.
+ explicit FFmpegH265ToAnnexBBitstreamConverter(
+ AVCodecContext* stream_codec_context);
+
+ ~FFmpegH265ToAnnexBBitstreamConverter() override;
+
+ // FFmpegBitstreamConverter implementation.
+ bool ConvertPacket(AVPacket* packet) override;
+
+ private:
+ scoped_ptr<mp4::HEVCDecoderConfigurationRecord> hevc_config_;
+
+ // Variable to hold a pointer to memory where we can access the global
+ // data from the FFmpeg file format's global headers.
+ AVCodecContext* stream_codec_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(FFmpegH265ToAnnexBBitstreamConverter);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_FFMPEG_H265_TO_ANNEX_B_BITSTREAM_CONVERTER_H_
+
diff --git a/chromium/media/filters/ffmpeg_video_decoder.cc b/chromium/media/filters/ffmpeg_video_decoder.cc
index 2f2988b11ac..7688e8e6573 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder.cc
@@ -18,6 +18,7 @@
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/base/video_util.h"
#include "media/ffmpeg/ffmpeg_common.h"
@@ -65,10 +66,10 @@ static void ReleaseVideoBufferImpl(void* opaque, uint8* data) {
video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
}
-static size_t RoundUp(size_t value, size_t alignment) {
- // Check that |alignment| is a power of 2.
- DCHECK((alignment + (alignment - 1)) == (alignment | (alignment - 1)));
- return ((value + (alignment - 1)) & ~(alignment - 1));
+// static
+bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) {
+ FFmpegGlue::InitializeFFmpeg();
+ return avcodec_find_decoder(VideoCodecToCodecID(codec)) != nullptr;
}
FFmpegVideoDecoder::FFmpegVideoDecoder(
@@ -84,13 +85,13 @@ int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
// whereas |codec_context| contains the current threads's
// updated width/height/pix_fmt, which can change for adaptive
// content.
- const VideoFrame::Format format =
- PixelFormatToVideoFormat(codec_context->pix_fmt);
+ const VideoPixelFormat format =
+ AVPixelFormatToVideoPixelFormat(codec_context->pix_fmt);
- if (format == VideoFrame::UNKNOWN)
+ if (format == PIXEL_FORMAT_UNKNOWN)
return AVERROR(EINVAL);
- DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
- format == VideoFrame::YV24);
+ DCHECK(format == PIXEL_FORMAT_YV12 || format == PIXEL_FORMAT_YV16 ||
+ format == PIXEL_FORMAT_YV24);
gfx::Size size(codec_context->width, codec_context->height);
const int ret = av_image_check_size(size.width(), size.height(), 0, NULL);
@@ -112,30 +113,30 @@ int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
//
// When lowres is non-zero, dimensions should be divided by 2^(lowres), but
// since we don't use this, just DCHECK that it's zero.
- //
- // Always round up to a multiple of two to match VideoFrame restrictions on
- // frame alignment.
DCHECK_EQ(codec_context->lowres, 0);
- gfx::Size coded_size(
- RoundUp(std::max(size.width(), codec_context->coded_width), 2),
- RoundUp(std::max(size.height(), codec_context->coded_height), 2));
+ gfx::Size coded_size(std::max(size.width(), codec_context->coded_width),
+ std::max(size.height(), codec_context->coded_height));
if (!VideoFrame::IsValidConfig(format, VideoFrame::STORAGE_UNKNOWN,
coded_size, gfx::Rect(size), natural_size)) {
return AVERROR(EINVAL);
}
+ // FFmpeg expects the initialize allocation to be zero-initialized. Failure
+ // to do so can lead to unitialized value usage. See http://crbug.com/390941
scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame(
format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp());
- if (codec_context->color_range == AVCOL_RANGE_JPEG)
- video_frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
- VideoFrame::COLOR_SPACE_JPEG);
- else if (codec_context->colorspace == AVCOL_SPC_BT709) {
- video_frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
- VideoFrame::COLOR_SPACE_HD_REC709);
- }
- for (int i = 0; i < 3; i++) {
+ // Prefer the color space from the codec context. If it's not specified (or is
+ // set to an unsupported value), fall back on the value from the config.
+ ColorSpace color_space = AVColorSpaceToColorSpace(codec_context->colorspace,
+ codec_context->color_range);
+ if (color_space == COLOR_SPACE_UNSPECIFIED)
+ color_space = config_.color_space();
+ video_frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
+ color_space);
+
+ for (size_t i = 0; i < VideoFrame::NumPlanes(video_frame->format()); i++) {
frame->data[i] = video_frame->data(i);
frame->linesize[i] = video_frame->stride(i);
}
diff --git a/chromium/media/filters/ffmpeg_video_decoder.h b/chromium/media/filters/ffmpeg_video_decoder.h
index 6cd1a523b78..53999aae2d7 100644
--- a/chromium/media/filters/ffmpeg_video_decoder.h
+++ b/chromium/media/filters/ffmpeg_video_decoder.h
@@ -27,6 +27,8 @@ class DecoderBuffer;
class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
public:
+ static bool IsCodecSupported(VideoCodec codec);
+
explicit FFmpegVideoDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
~FFmpegVideoDecoder() override;
diff --git a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
index 8ecc83deca0..f8a954a4818 100644
--- a/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/chromium/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -36,7 +36,7 @@ using ::testing::StrictMock;
namespace media {
-static const VideoFrame::Format kVideoFormat = VideoFrame::YV12;
+static const VideoPixelFormat kVideoFormat = PIXEL_FORMAT_YV12;
static const gfx::Size kCodedSize(320, 240);
static const gfx::Rect kVisibleRect(320, 240);
static const gfx::Size kNaturalSize(320, 240);
@@ -223,16 +223,16 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedDecoder) {
TEST_F(FFmpegVideoDecoderTest, Initialize_UnsupportedPixelFormat) {
// Ensure decoder handles unsupported pixel formats without crashing.
VideoDecoderConfig config(kCodecVP8, VIDEO_CODEC_PROFILE_UNKNOWN,
- VideoFrame::UNKNOWN,
- kCodedSize, kVisibleRect, kNaturalSize,
- NULL, 0, false);
+ PIXEL_FORMAT_UNKNOWN, COLOR_SPACE_UNSPECIFIED,
+ kCodedSize, kVisibleRect, kNaturalSize, NULL, 0,
+ false);
InitializeWithConfigWithResult(config, false);
}
TEST_F(FFmpegVideoDecoderTest, Initialize_OpenDecoderFails) {
// Specify Theora w/o extra data so that avcodec_open2() fails.
VideoDecoderConfig config(kCodecTheora, VIDEO_CODEC_PROFILE_UNKNOWN,
- kVideoFormat,
+ kVideoFormat, COLOR_SPACE_UNSPECIFIED,
kCodedSize, kVisibleRect, kNaturalSize,
NULL, 0, false);
InitializeWithConfigWithResult(config, false);
@@ -243,6 +243,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioNumeratorZero) {
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -257,6 +258,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioDenominatorZero) {
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -271,6 +273,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioNumeratorNegative) {
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -285,6 +288,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioDenominatorNegative) {
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -301,6 +305,7 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioNumeratorTooLarge) {
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -311,11 +316,14 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioNumeratorTooLarge) {
}
TEST_F(FFmpegVideoDecoderTest, Initialize_AspectRatioDenominatorTooLarge) {
- int den = kVisibleRect.size().width() + 1;
+ // Denominator is large enough that the natural size height will be zero.
+ int den = 2 * kVisibleRect.size().width() + 1;
gfx::Size natural_size = GetNaturalSize(kVisibleRect.size(), 1, den);
+ EXPECT_EQ(0, natural_size.width());
VideoDecoderConfig config(kCodecVP8,
VP8PROFILE_ANY,
kVideoFormat,
+ COLOR_SPACE_UNSPECIFIED,
kCodedSize,
kVisibleRect,
natural_size,
@@ -392,15 +400,12 @@ TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeError) {
EXPECT_TRUE(output_frames_.empty());
}
-// Multi-threaded decoders have different behavior than single-threaded
-// decoders at the end of the stream. Multithreaded decoders hide errors
-// that happen on the last |codec_context_->thread_count| frames to avoid
-// prematurely signalling EOS. This test just exposes that behavior so we can
-// detect if it changes.
+// A corrupt frame followed by an EOS buffer should raise a decode error.
TEST_F(FFmpegVideoDecoderTest, DecodeFrame_DecodeErrorAtEndOfStream) {
Initialize();
- EXPECT_EQ(VideoDecoder::kOk, DecodeSingleFrame(corrupt_i_frame_buffer_));
+ EXPECT_EQ(VideoDecoder::kDecodeError,
+ DecodeSingleFrame(corrupt_i_frame_buffer_));
}
// Decode |i_frame_buffer_| and then a frame with a larger width and verify
diff --git a/chromium/media/filters/file_data_source.cc b/chromium/media/filters/file_data_source.cc
index 5aad3f93f81..76929aa28d9 100644
--- a/chromium/media/filters/file_data_source.cc
+++ b/chromium/media/filters/file_data_source.cc
@@ -12,12 +12,14 @@ namespace media {
FileDataSource::FileDataSource()
: force_read_errors_(false),
- force_streaming_(false) {
+ force_streaming_(false),
+ bytes_read_(0) {
}
FileDataSource::FileDataSource(base::File file)
: force_read_errors_(false),
- force_streaming_(false) {
+ force_streaming_(false),
+ bytes_read_(0) {
file_.Initialize(file.Pass());
}
@@ -47,6 +49,7 @@ void FileDataSource::Read(int64 position, int size, uint8* data,
int64 clamped_size = std::min(static_cast<int64>(size), file_size - position);
memcpy(data, file_.data() + position, clamped_size);
+ bytes_read_ += clamped_size;
read_cb.Run(clamped_size);
}
diff --git a/chromium/media/filters/file_data_source.h b/chromium/media/filters/file_data_source.h
index ebffee59d9e..68b8b3cdaef 100644
--- a/chromium/media/filters/file_data_source.h
+++ b/chromium/media/filters/file_data_source.h
@@ -37,12 +37,15 @@ class MEDIA_EXPORT FileDataSource : public DataSource {
// Unit test helpers. Recreate the object if you want the default behaviour.
void force_read_errors_for_testing() { force_read_errors_ = true; }
void force_streaming_for_testing() { force_streaming_ = true; }
+ uint64_t bytes_read_for_testing() { return bytes_read_; }
+ void reset_bytes_read_for_testing() { bytes_read_ = 0; }
private:
base::MemoryMappedFile file_;
bool force_read_errors_;
bool force_streaming_;
+ uint64_t bytes_read_;
DISALLOW_COPY_AND_ASSIGN(FileDataSource);
};
diff --git a/chromium/media/filters/frame_processor.cc b/chromium/media/filters/frame_processor.cc
index 6c71ea50724..80c5dab1980 100644
--- a/chromium/media/filters/frame_processor.cc
+++ b/chromium/media/filters/frame_processor.cc
@@ -7,11 +7,14 @@
#include <cstdlib>
#include "base/stl_util.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
namespace media {
+const int kMaxDroppedPrerollWarnings = 10;
+const int kMaxDtsBeyondPtsWarnings = 10;
+
// Helper class to capture per-track details needed by a frame processor. Some
// of this information may be duplicated in the short-term in the associated
// ChunkDemuxerStream and SourceBufferStream for a track.
@@ -148,16 +151,18 @@ bool MseTrackBuffer::FlushProcessedFrames() {
bool result = stream_->Append(processed_frames_);
processed_frames_.clear();
+
DVLOG_IF(3, !result) << __FUNCTION__
<< "(): Failure appending processed frames to stream";
return result;
}
-FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
- : sequence_mode_(false),
- group_start_timestamp_(kNoTimestamp()),
- update_duration_cb_(update_duration_cb) {
+FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb,
+ const scoped_refptr<MediaLog>& media_log)
+ : group_start_timestamp_(kNoTimestamp()),
+ update_duration_cb_(update_duration_cb),
+ media_log_(media_log) {
DVLOG(2) << __FUNCTION__ << "()";
DCHECK(!update_duration_cb.is_null());
}
@@ -193,7 +198,7 @@ bool FrameProcessor::ProcessFrames(
base::TimeDelta* timestamp_offset) {
StreamParser::BufferQueue frames;
if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
- DVLOG(2) << "Parse error discovered while merging parser's buffers";
+ MEDIA_LOG(ERROR, media_log_) << "Parsed buffers not in DTS sequence";
return false;
}
@@ -243,8 +248,11 @@ bool FrameProcessor::AddTrack(StreamParser::TrackId id,
MseTrackBuffer* existing_track = FindTrack(id);
DCHECK(!existing_track);
- if (existing_track)
+ if (existing_track) {
+ MEDIA_LOG(ERROR, media_log_) << "Failure adding track with duplicate ID "
+ << id;
return false;
+ }
track_buffers_[id] = new MseTrackBuffer(stream);
return true;
@@ -254,8 +262,11 @@ bool FrameProcessor::UpdateTrack(StreamParser::TrackId old_id,
StreamParser::TrackId new_id) {
DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
- if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
+ if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id)) {
+ MEDIA_LOG(ERROR, media_log_) << "Failure updating track id from " << old_id
+ << " to " << new_id;
return false;
+ }
track_buffers_[new_id] = track_buffers_[old_id];
CHECK_EQ(1u, track_buffers_.erase(old_id));
@@ -276,6 +287,11 @@ void FrameProcessor::Reset() {
itr != track_buffers_.end(); ++itr) {
itr->second->Reset();
}
+
+ if (sequence_mode_) {
+ DCHECK(kNoTimestamp() != group_end_timestamp_);
+ group_start_timestamp_ = group_end_timestamp_;
+ }
}
void FrameProcessor::OnPossibleAudioConfigUpdate(
@@ -359,10 +375,11 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
if (audio_preroll_buffer_.get()) {
// We only want to use the preroll buffer if it directly precedes (less
// than one sample apart) the current buffer.
- const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
- audio_preroll_buffer_->duration() -
- buffer->timestamp()).InMicroseconds());
- if (delta < sample_duration_.InMicroseconds()) {
+ const int64 delta =
+ (audio_preroll_buffer_->timestamp() +
+ audio_preroll_buffer_->duration() - buffer->timestamp())
+ .InMicroseconds();
+ if (std::abs(delta) < sample_duration_.InMicroseconds()) {
DVLOG(1) << "Attaching audio preroll buffer ["
<< audio_preroll_buffer_->timestamp().InSecondsF() << ", "
<< (audio_preroll_buffer_->timestamp() +
@@ -371,7 +388,14 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
buffer->SetPrerollBuffer(audio_preroll_buffer_);
processed_buffer = true;
} else {
- // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_dropped_preroll_warnings_,
+ kMaxDroppedPrerollWarnings)
+ << "Partial append window trimming dropping unused audio preroll "
+ "buffer with PTS "
+ << audio_preroll_buffer_->timestamp().InMicroseconds()
+ << "us that ends too far (" << delta
+ << "us) from next buffer with PTS "
+ << buffer->timestamp().InMicroseconds() << "us";
}
audio_preroll_buffer_ = NULL;
}
@@ -451,31 +475,44 @@ bool FrameProcessor::ProcessFrame(
// Sanity check the timestamps.
if (presentation_timestamp == kNoTimestamp()) {
- DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS";
+ MEDIA_LOG(ERROR, media_log_) << "Unknown PTS for " << frame->GetTypeName()
+ << " frame";
return false;
}
if (decode_timestamp == kNoDecodeTimestamp()) {
- DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS";
+ MEDIA_LOG(ERROR, media_log_) << "Unknown DTS for " << frame->GetTypeName()
+ << " frame";
return false;
}
if (decode_timestamp.ToPresentationTime() > presentation_timestamp) {
// TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
// http://crbug.com/354518.
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_dts_beyond_pts_warnings_,
+ kMaxDtsBeyondPtsWarnings)
+ << "Parsed " << frame->GetTypeName() << " frame has DTS "
+ << decode_timestamp.InMicroseconds()
+ << "us, which is after the frame's PTS "
+ << presentation_timestamp.InMicroseconds() << "us";
DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
<< decode_timestamp.InSecondsF() << ") > PTS("
- << presentation_timestamp.InSecondsF() << ")";
+ << presentation_timestamp.InSecondsF()
+ << "), frame type=" << frame->GetTypeName();
}
// TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
// frame durations. For now, we allow non-negative frame duration.
// See http://crbug.com/351166.
if (frame_duration == kNoTimestamp()) {
- DVLOG(2) << __FUNCTION__ << ": Frame missing duration (kNoTimestamp())";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unknown duration for " << frame->GetTypeName() << " frame at PTS "
+ << presentation_timestamp.InMicroseconds() << "us";
return false;
}
if (frame_duration < base::TimeDelta()) {
- DVLOG(2) << __FUNCTION__ << ": Frame duration negative: "
- << frame_duration.InSecondsF();
+ MEDIA_LOG(ERROR, media_log_)
+ << "Negative duration " << frame_duration.InMicroseconds()
+ << "us for " << frame->GetTypeName() << " frame at PTS "
+ << presentation_timestamp.InMicroseconds() << "us";
return false;
}
@@ -534,9 +571,10 @@ bool FrameProcessor::ProcessFrame(
MseTrackBuffer* track_buffer = FindTrack(track_id);
if (!track_buffer) {
- DVLOG(2) << __FUNCTION__ << ": Unknown track: type=" << frame->type()
- << ", frame processor track id=" << track_id
- << ", parser track id=" << frame->track_id();
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unknown track with type " << frame->GetTypeName()
+ << ", frame processor track id " << track_id
+ << ", and parser track id " << frame->track_id();
return false;
}
@@ -625,11 +663,12 @@ bool FrameProcessor::ProcessFrame(
if (decode_timestamp < DecodeTimestamp()) {
// B-frames may still result in negative DTS here after being shifted by
// |timestamp_offset_|.
- DVLOG(2) << __FUNCTION__
- << ": frame PTS=" << presentation_timestamp.InSecondsF()
- << " has negative DTS=" << decode_timestamp.InSecondsF()
- << " after applying timestampOffset, handling any discontinuity,"
- << " and filtering against append window";
+ MEDIA_LOG(ERROR, media_log_)
+ << frame->GetTypeName() << " frame with PTS "
+ << presentation_timestamp.InMicroseconds() << "us has negative DTS "
+ << decode_timestamp.InMicroseconds()
+ << "us after applying timestampOffset, handling any discontinuity, "
+ "and filtering against append window";
return false;
}
diff --git a/chromium/media/filters/frame_processor.h b/chromium/media/filters/frame_processor.h
index 0afcf0ae17f..fc6de696543 100644
--- a/chromium/media/filters/frame_processor.h
+++ b/chromium/media/filters/frame_processor.h
@@ -11,6 +11,7 @@
#include "base/callback_forward.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
+#include "media/base/media_log.h"
#include "media/base/stream_parser.h"
#include "media/filters/chunk_demuxer.h"
@@ -33,7 +34,8 @@ class MEDIA_EXPORT FrameProcessor {
kVideoTrackId = -3
};
- explicit FrameProcessor(const UpdateDurationCB& update_duration_cb);
+ FrameProcessor(const UpdateDurationCB& update_duration_cb,
+ const scoped_refptr<MediaLog>& media_log);
~FrameProcessor();
// Get/set the current append mode, which if true means "sequence" and if
@@ -151,7 +153,7 @@ class MEDIA_EXPORT FrameProcessor {
// Per http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode:
// Controls how a sequence of media segments are handled. This is initially
// set to false ("segments").
- bool sequence_mode_;
+ bool sequence_mode_ = false;
// Tracks the MSE coded frame processing variable of same name.
// Initially kNoTimestamp(), meaning "unset".
@@ -165,6 +167,13 @@ class MEDIA_EXPORT FrameProcessor {
UpdateDurationCB update_duration_cb_;
+ // MediaLog for reporting messages and properties to debug content and engine.
+ scoped_refptr<MediaLog> media_log_;
+
+ // Counters that limit spam to |media_log_| for frame processor warnings.
+ int num_dropped_preroll_warnings_ = 0;
+ int num_dts_beyond_pts_warnings_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(FrameProcessor);
};
diff --git a/chromium/media/filters/frame_processor_unittest.cc b/chromium/media/filters/frame_processor_unittest.cc
index c6525f34484..ed4e715941e 100644
--- a/chromium/media/filters/frame_processor_unittest.cc
+++ b/chromium/media/filters/frame_processor_unittest.cc
@@ -14,6 +14,7 @@
#include "media/base/media_log.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/frame_processor.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -54,15 +55,16 @@ class FrameProcessorTestCallbackHelper {
class FrameProcessorTest : public testing::TestWithParam<bool> {
protected:
FrameProcessorTest()
- : frame_processor_(new FrameProcessor(base::Bind(
- &FrameProcessorTestCallbackHelper::OnPossibleDurationIncrease,
- base::Unretained(&callbacks_)))),
+ : frame_processor_(new FrameProcessor(
+ base::Bind(
+ &FrameProcessorTestCallbackHelper::OnPossibleDurationIncrease,
+ base::Unretained(&callbacks_)),
+ new MediaLog())),
append_window_end_(kInfiniteDuration()),
new_media_segment_(false),
audio_id_(FrameProcessor::kAudioTrackId),
video_id_(FrameProcessor::kVideoTrackId),
- frame_duration_(base::TimeDelta::FromMilliseconds(10)) {
- }
+ frame_duration_(base::TimeDelta::FromMilliseconds(10)) {}
enum StreamFlags {
HAS_AUDIO = 1 << 0,
@@ -98,21 +100,21 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
BufferQueue StringToBufferQueue(const std::string& buffers_to_append,
const TrackId track_id,
const DemuxerStream::Type type) {
- std::vector<std::string> timestamps;
- base::SplitString(buffers_to_append, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ buffers_to_append, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
BufferQueue buffers;
for (size_t i = 0; i < timestamps.size(); i++) {
bool is_keyframe = false;
- if (base::EndsWith(timestamps[i], "K", true)) {
+ if (base::EndsWith(timestamps[i], "K", base::CompareCase::SENSITIVE)) {
is_keyframe = true;
// Remove the "K" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
// Use custom decode timestamp if included.
- std::vector<std::string> buffer_timestamps;
- base::SplitString(timestamps[i], '|', &buffer_timestamps);
+ std::vector<std::string> buffer_timestamps = base::SplitString(
+ timestamps[i], "|", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
if (buffer_timestamps.size() == 1)
buffer_timestamps.push_back(buffer_timestamps[0]);
CHECK_EQ(2u, buffer_timestamps.size());
@@ -192,8 +194,8 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
// as timestamp_in_ms.
void CheckReadsThenReadStalls(ChunkDemuxerStream* stream,
const std::string& expected) {
- std::vector<std::string> timestamps;
- base::SplitString(expected, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
int loop_count = 0;
@@ -288,15 +290,14 @@ class FrameProcessorTest : public testing::TestWithParam<bool> {
0,
false);
frame_processor_->OnPossibleAudioConfigUpdate(decoder_config);
- ASSERT_TRUE(audio_->UpdateAudioConfig(decoder_config,
- base::Bind(&AddLogEntryForTest)));
+ ASSERT_TRUE(audio_->UpdateAudioConfig(decoder_config, new MediaLog()));
break;
}
case DemuxerStream::VIDEO: {
ASSERT_FALSE(video_);
video_.reset(new ChunkDemuxerStream(DemuxerStream::VIDEO, true));
ASSERT_TRUE(video_->UpdateVideoConfig(TestVideoConfig::Normal(),
- base::Bind(&AddLogEntryForTest)));
+ new MediaLog()));
break;
}
// TODO(wolenetz): Test text coded frame processing.
@@ -739,6 +740,23 @@ TEST_P(FrameProcessorTest, PartialAppendWindowFilterNoNewMediaSegment) {
CheckReadsThenReadStalls(video_.get(), "0 10");
}
+TEST_F(FrameProcessorTest, AudioOnly_SequenceModeContinuityAcrossReset) {
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ frame_processor_->SetSequenceMode(true);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_));
+ ProcessFrames("0K", "");
+ frame_processor_->Reset();
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ ProcessFrames("100K", "");
+
+ EXPECT_EQ(frame_duration_ * -9, timestamp_offset_);
+ EXPECT_FALSE(new_media_segment_);
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ CheckReadsThenReadStalls(audio_.get(), "0 10:100");
+}
+
INSTANTIATE_TEST_CASE_P(SequenceMode, FrameProcessorTest, Values(true));
INSTANTIATE_TEST_CASE_P(SegmentsMode, FrameProcessorTest, Values(false));
diff --git a/chromium/media/filters/gpu_video_decoder.cc b/chromium/media/filters/gpu_video_decoder.cc
index 20a18c5a48e..7a8203335a7 100644
--- a/chromium/media/filters/gpu_video_decoder.cc
+++ b/chromium/media/filters/gpu_video_decoder.cc
@@ -241,8 +241,11 @@ void GpuVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
}
memcpy(shm_buffer->shm->memory(), buffer->data(), size);
- BitstreamBuffer bitstream_buffer(
- next_bitstream_buffer_id_, shm_buffer->shm->handle(), size);
+ // AndroidVideoDecodeAccelerator needs the timestamp to output frames in
+ // presentation order.
+ BitstreamBuffer bitstream_buffer(next_bitstream_buffer_id_,
+ shm_buffer->shm->handle(), size,
+ buffer->timestamp());
// Mask against 30 bits, to avoid (undefined) wraparound on signed integer.
next_bitstream_buffer_id_ = (next_bitstream_buffer_id_ + 1) & 0x3FFFFFFF;
DCHECK(!ContainsKey(bitstream_buffers_in_decoder_, bitstream_buffer.id()));
@@ -372,42 +375,46 @@ void GpuVideoDecoder::PictureReady(const media::Picture& picture) {
PictureBufferMap::iterator it =
assigned_picture_buffers_.find(picture.picture_buffer_id());
if (it == assigned_picture_buffers_.end()) {
- NOTREACHED() << "Missing picture buffer: " << picture.picture_buffer_id();
+ DLOG(ERROR) << "Missing picture buffer: " << picture.picture_buffer_id();
NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
return;
}
const PictureBuffer& pb = it->second;
- // Validate picture rectangle from GPU. This is for sanity/security check
- // even the rectangle is not used in this class.
- if (picture.visible_rect().IsEmpty() ||
- !gfx::Rect(pb.size()).Contains(picture.visible_rect())) {
- NOTREACHED() << "Invalid picture size from VDA: "
- << picture.visible_rect().ToString() << " should fit in "
- << pb.size().ToString();
- NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE);
- return;
- }
-
// Update frame's timestamp.
base::TimeDelta timestamp;
- // Some of the VDAs don't support and thus don't provide us with visible
- // size in picture.size, passing coded size instead, so always drop it and
- // use config information instead.
+ // Some of the VDAs like DXVA, AVDA, and VTVDA don't support and thus don't
+ // provide us with visible size in picture.size, passing (0, 0) instead, so
+ // for those cases drop it and use config information instead.
gfx::Rect visible_rect;
gfx::Size natural_size;
GetBufferData(picture.bitstream_buffer_id(), &timestamp, &visible_rect,
&natural_size);
+
+ if (!picture.visible_rect().IsEmpty()) {
+ visible_rect = picture.visible_rect();
+ }
+ if (!gfx::Rect(pb.size()).Contains(visible_rect)) {
+ LOG(WARNING) << "Visible size " << visible_rect.ToString()
+ << " is larger than coded size " << pb.size().ToString();
+ visible_rect = gfx::Rect(pb.size());
+ }
+
DCHECK(decoder_texture_target_);
scoped_refptr<VideoFrame> frame(VideoFrame::WrapNativeTexture(
- VideoFrame::ARGB,
+ PIXEL_FORMAT_ARGB,
gpu::MailboxHolder(pb.texture_mailbox(), decoder_texture_target_,
0 /* sync_point */),
BindToCurrentLoop(base::Bind(
&GpuVideoDecoder::ReleaseMailbox, weak_factory_.GetWeakPtr(),
factories_, picture.picture_buffer_id(), pb.texture_id())),
pb.size(), visible_rect, natural_size, timestamp));
+ if (!frame) {
+ DLOG(ERROR) << "Create frame failed for: " << picture.picture_buffer_id();
+ NotifyError(VideoDecodeAccelerator::PLATFORM_FAILURE);
+ return;
+ }
if (picture.allow_overlay())
frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
CHECK_GT(available_pictures_, 0);
diff --git a/chromium/media/filters/h264_parser.cc b/chromium/media/filters/h264_parser.cc
index 22d420bcc69..fe2a443e328 100644
--- a/chromium/media/filters/h264_parser.cc
+++ b/chromium/media/filters/h264_parser.cc
@@ -213,6 +213,7 @@ bool H264Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
off_t annexb_start_code_size = 0;
if (!FindStartCodeInClearRanges(stream_, bytes_left_,
+ encrypted_ranges_,
&nalu_start_off, &annexb_start_code_size)) {
DVLOG(4) << "Could not find start code, end of stream?";
return false;
@@ -238,6 +239,7 @@ bool H264Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
off_t next_start_code_size = 0;
off_t nalu_size_without_start_code = 0;
if (!FindStartCodeInClearRanges(nalu_data, max_nalu_data_size,
+ encrypted_ranges_,
&nalu_size_without_start_code,
&next_start_code_size)) {
nalu_size_without_start_code = max_nalu_data_size;
@@ -250,9 +252,10 @@ bool H264Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
bool H264Parser::FindStartCodeInClearRanges(
const uint8* data,
off_t data_size,
+ const Ranges<const uint8*>& encrypted_ranges,
off_t* offset,
off_t* start_code_size) {
- if (encrypted_ranges_.size() == 0)
+ if (encrypted_ranges.size() == 0)
return FindStartCode(data, data_size, offset, start_code_size);
DCHECK_GE(data_size, 0);
@@ -270,7 +273,7 @@ bool H264Parser::FindStartCodeInClearRanges(
Ranges<const uint8*> start_code_range;
start_code_range.Add(start_code, start_code_end + 1);
- if (encrypted_ranges_.IntersectionWith(start_code_range).size() > 0) {
+ if (encrypted_ranges.IntersectionWith(start_code_range).size() > 0) {
// The start code is inside an encrypted section so we need to scan
// for another start code.
*start_code_size = 0;
diff --git a/chromium/media/filters/h264_parser.h b/chromium/media/filters/h264_parser.h
index b8dde5028ca..36467ba9384 100644
--- a/chromium/media/filters/h264_parser.h
+++ b/chromium/media/filters/h264_parser.h
@@ -341,6 +341,12 @@ class MEDIA_EXPORT H264Parser {
static bool FindStartCode(const uint8* data, off_t data_size,
off_t* offset, off_t* start_code_size);
+ // Wrapper for FindStartCode() that skips over start codes that
+ // may appear inside of |encrypted_ranges_|.
+ // Returns true if a start code was found. Otherwise returns false.
+ static bool FindStartCodeInClearRanges(const uint8* data, off_t data_size,
+ const Ranges<const uint8*>& ranges,
+ off_t* offset, off_t* start_code_size);
H264Parser();
~H264Parser();
@@ -406,12 +412,6 @@ class MEDIA_EXPORT H264Parser {
// - the size in bytes of the start code is returned in |*start_code_size|.
bool LocateNALU(off_t* nalu_size, off_t* start_code_size);
- // Wrapper for FindStartCode() that skips over start codes that
- // may appear inside of |encrypted_ranges_|.
- // Returns true if a start code was found. Otherwise returns false.
- bool FindStartCodeInClearRanges(const uint8* data, off_t data_size,
- off_t* offset, off_t* start_code_size);
-
// Exp-Golomb code parsing as specified in chapter 9.1 of the spec.
// Read one unsigned exp-Golomb code from the stream and return in |*val|.
Result ReadUE(int* val);
diff --git a/chromium/media/filters/h265_parser.cc b/chromium/media/filters/h265_parser.cc
new file mode 100644
index 00000000000..30a8233c143
--- /dev/null
+++ b/chromium/media/filters/h265_parser.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/stl_util.h"
+
+#include "media/base/decrypt_config.h"
+#include "media/filters/h265_parser.h"
+
+namespace media {
+
+#define READ_BITS_OR_RETURN(num_bits, out) \
+ do { \
+ int _out; \
+ if (!br_.ReadBits(num_bits, &_out)) { \
+ DVLOG(1) \
+ << "Error in stream: unexpected EOS while trying to read " #out; \
+ return kInvalidStream; \
+ } \
+ *out = _out; \
+ } while (0)
+
+#define TRUE_OR_RETURN(a) \
+ do { \
+ if (!(a)) { \
+ DVLOG(1) << "Error in stream: invalid value, expected " << #a; \
+ return kInvalidStream; \
+ } \
+ } while (0)
+
+H265NALU::H265NALU() {
+ memset(this, 0, sizeof(*this));
+}
+
+H265Parser::H265Parser() {
+ Reset();
+}
+
+H265Parser::~H265Parser() {
+}
+
+void H265Parser::Reset() {
+ stream_ = NULL;
+ bytes_left_ = 0;
+ encrypted_ranges_.clear();
+}
+
+void H265Parser::SetStream(const uint8* stream, off_t stream_size) {
+ std::vector<SubsampleEntry> subsamples;
+ SetEncryptedStream(stream, stream_size, subsamples);
+}
+
+void H265Parser::SetEncryptedStream(
+ const uint8* stream, off_t stream_size,
+ const std::vector<SubsampleEntry>& subsamples) {
+ DCHECK(stream);
+ DCHECK_GT(stream_size, 0);
+
+ stream_ = stream;
+ bytes_left_ = stream_size;
+
+ encrypted_ranges_.clear();
+ const uint8* start = stream;
+ const uint8* stream_end = stream_ + bytes_left_;
+ for (size_t i = 0; i < subsamples.size() && start < stream_end; ++i) {
+ start += subsamples[i].clear_bytes;
+
+ const uint8* end = std::min(start + subsamples[i].cypher_bytes, stream_end);
+ encrypted_ranges_.Add(start, end);
+ start = end;
+ }
+}
+
+bool H265Parser::LocateNALU(off_t* nalu_size, off_t* start_code_size) {
+ // Find the start code of next NALU.
+ off_t nalu_start_off = 0;
+ off_t annexb_start_code_size = 0;
+
+ if (!H264Parser::FindStartCodeInClearRanges(stream_, bytes_left_,
+ encrypted_ranges_,
+ &nalu_start_off,
+ &annexb_start_code_size)) {
+ DVLOG(4) << "Could not find start code, end of stream?";
+ return false;
+ }
+
+ // Move the stream to the beginning of the NALU (pointing at the start code).
+ stream_ += nalu_start_off;
+ bytes_left_ -= nalu_start_off;
+
+ const uint8* nalu_data = stream_ + annexb_start_code_size;
+ off_t max_nalu_data_size = bytes_left_ - annexb_start_code_size;
+ if (max_nalu_data_size <= 0) {
+ DVLOG(3) << "End of stream";
+ return false;
+ }
+
+ // Find the start code of next NALU;
+ // if successful, |nalu_size_without_start_code| is the number of bytes from
+ // after previous start code to before this one;
+ // if next start code is not found, it is still a valid NALU since there
+ // are some bytes left after the first start code: all the remaining bytes
+ // belong to the current NALU.
+ off_t next_start_code_size = 0;
+ off_t nalu_size_without_start_code = 0;
+ if (!H264Parser::FindStartCodeInClearRanges(nalu_data, max_nalu_data_size,
+ encrypted_ranges_,
+ &nalu_size_without_start_code,
+ &next_start_code_size)) {
+ nalu_size_without_start_code = max_nalu_data_size;
+ }
+ *nalu_size = nalu_size_without_start_code + annexb_start_code_size;
+ *start_code_size = annexb_start_code_size;
+ return true;
+}
+
+H265Parser::Result H265Parser::AdvanceToNextNALU(H265NALU* nalu) {
+ off_t start_code_size;
+ off_t nalu_size_with_start_code;
+ if (!LocateNALU(&nalu_size_with_start_code, &start_code_size)) {
+ DVLOG(4) << "Could not find next NALU, bytes left in stream: "
+ << bytes_left_;
+ return kEOStream;
+ }
+
+ nalu->data = stream_ + start_code_size;
+ nalu->size = nalu_size_with_start_code - start_code_size;
+ DVLOG(4) << "NALU found: size=" << nalu_size_with_start_code;
+
+ // Initialize bit reader at the start of found NALU.
+ if (!br_.Initialize(nalu->data, nalu->size))
+ return kEOStream;
+
+ // Move parser state to after this NALU, so next time AdvanceToNextNALU
+ // is called, we will effectively be skipping it;
+ // other parsing functions will use the position saved
+ // in bit reader for parsing, so we don't have to remember it here.
+ stream_ += nalu_size_with_start_code;
+ bytes_left_ -= nalu_size_with_start_code;
+
+ // Read NALU header, skip the forbidden_zero_bit, but check for it.
+ int data;
+ READ_BITS_OR_RETURN(1, &data);
+ TRUE_OR_RETURN(data == 0);
+
+ READ_BITS_OR_RETURN(6, &nalu->nal_unit_type);
+ READ_BITS_OR_RETURN(6, &nalu->nuh_layer_id);
+ READ_BITS_OR_RETURN(3, &nalu->nuh_temporal_id_plus1);
+
+ DVLOG(4) << "NALU type: " << static_cast<int>(nalu->nal_unit_type)
+ << " at: " << reinterpret_cast<const void*>(nalu->data)
+ << " size: " << nalu->size;
+
+ return kOk;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/h265_parser.h b/chromium/media/filters/h265_parser.h
new file mode 100644
index 00000000000..f3cf7133522
--- /dev/null
+++ b/chromium/media/filters/h265_parser.h
@@ -0,0 +1,151 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of an H265 Annex-B video stream parser.
+
+#ifndef MEDIA_FILTERS_H265_PARSER_H_
+#define MEDIA_FILTERS_H265_PARSER_H_
+
+#include <sys/types.h>
+
+#include <map>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/macros.h"
+#include "media/base/media_export.h"
+#include "media/base/ranges.h"
+#include "media/filters/h264_bit_reader.h"
+#include "media/filters/h264_parser.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+// For explanations of each struct and its members, see H.265 specification
+// at http://www.itu.int/rec/T-REC-H.265.
+struct MEDIA_EXPORT H265NALU {
+ H265NALU();
+
+ // NAL Unit types are taken from Table 7-1 of HEVC/H265 standard
+ // http://www.itu.int/rec/T-REC-H.265-201410-I/en
+ enum Type {
+ TRAIL_N = 0,
+ TRAIL_R = 1,
+ TSA_N = 2,
+ TSA_R = 3,
+ STSA_N = 4,
+ STSA_R = 5,
+ RADL_N = 6,
+ RADL_R = 7,
+ RASL_N = 8,
+ RASL_R = 9,
+ RSV_VCL_N10 = 10,
+ RSV_VCL_R11 = 11,
+ RSV_VCL_N12 = 12,
+ RSV_VCL_R13 = 13,
+ RSV_VCL_N14 = 14,
+ RSV_VCL_R15 = 15,
+ BLA_W_LP = 16,
+ BLA_W_RADL = 17,
+ BLA_N_LP = 18,
+ IDR_W_RADL = 19,
+ IDR_N_LP = 20,
+ CRA_NUT = 21,
+ RSV_IRAP_VCL22 = 22,
+ RSV_IRAP_VCL23 = 23,
+ RSV_VCL24 = 24,
+ RSV_VCL25 = 25,
+ RSV_VCL26 = 26,
+ RSV_VCL27 = 27,
+ RSV_VCL28 = 28,
+ RSV_VCL29 = 29,
+ RSV_VCL30 = 30,
+ RSV_VCL31 = 31,
+ VPS_NUT = 32,
+ SPS_NUT = 33,
+ PPS_NUT = 34,
+ AUD_NUT = 35,
+ EOS_NUT = 36,
+ EOB_NUT = 37,
+ FD_NUT = 38,
+ PREFIX_SEI_NUT = 39,
+ SUFFIX_SEI_NUT = 40,
+ RSV_NVCL41 = 41,
+ RSV_NVCL42 = 42,
+ RSV_NVCL43 = 43,
+ RSV_NVCL44 = 44,
+ RSV_NVCL45 = 45,
+ RSV_NVCL46 = 46,
+ RSV_NVCL47 = 47,
+ };
+
+ // After (without) start code; we don't own the underlying memory
+ // and a shallow copy should be made when copying this struct.
+ const uint8* data;
+ off_t size; // From after start code to start code of next NALU (or EOS).
+
+ int nal_unit_type;
+ int nuh_layer_id;
+ int nuh_temporal_id_plus1;
+};
+
+// Class to parse an Annex-B H.265 stream.
+class MEDIA_EXPORT H265Parser {
+ public:
+ enum Result {
+ kOk,
+ kInvalidStream, // error in stream
+ kUnsupportedStream, // stream not supported by the parser
+ kEOStream, // end of stream
+ };
+
+ H265Parser();
+ ~H265Parser();
+
+ void Reset();
+ // Set current stream pointer to |stream| of |stream_size| in bytes,
+ // |stream| owned by caller.
+ // |subsamples| contains information about what parts of |stream| are
+ // encrypted.
+ void SetStream(const uint8* stream, off_t stream_size);
+ void SetEncryptedStream(const uint8* stream, off_t stream_size,
+ const std::vector<SubsampleEntry>& subsamples);
+
+ // Read the stream to find the next NALU, identify it and return
+ // that information in |*nalu|. This advances the stream to the beginning
+ // of this NALU, but not past it, so subsequent calls to NALU-specific
+ // parsing functions (ParseSPS, etc.) will parse this NALU.
+ // If the caller wishes to skip the current NALU, it can call this function
+ // again, instead of any NALU-type specific parse functions below.
+ Result AdvanceToNextNALU(H265NALU* nalu);
+
+ private:
+ // Move the stream pointer to the beginning of the next NALU,
+ // i.e. pointing at the next start code.
+ // Return true if a NALU has been found.
+ // If a NALU is found:
+ // - its size in bytes is returned in |*nalu_size| and includes
+ // the start code as well as the trailing zero bits.
+ // - the size in bytes of the start code is returned in |*start_code_size|.
+ bool LocateNALU(off_t* nalu_size, off_t* start_code_size);
+
+ // Pointer to the current NALU in the stream.
+ const uint8* stream_;
+
+ // Bytes left in the stream after the current NALU.
+ off_t bytes_left_;
+
+ H264BitReader br_;
+
+ // Ranges of encrypted bytes in the buffer passed to
+ // SetEncryptedStream().
+ Ranges<const uint8*> encrypted_ranges_;
+
+ DISALLOW_COPY_AND_ASSIGN(H265Parser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_H265_PARSER_H_
diff --git a/chromium/media/filters/h265_parser_unittest.cc b/chromium/media/filters/h265_parser_unittest.cc
new file mode 100644
index 00000000000..c590b506c8a
--- /dev/null
+++ b/chromium/media/filters/h265_parser_unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/h265_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(H265ParserTest, RawHevcStreamFileParsing) {
+ base::FilePath file_path = GetTestDataFilePath("bear.hevc");
+ // Number of NALUs in the test stream to be parsed.
+ const int num_nalus = 35;
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path))
+ << "Couldn't open stream file: " << file_path.MaybeAsASCII();
+
+ H265Parser parser;
+ parser.SetStream(stream.data(), stream.length());
+
+ // Parse until the end of stream/unsupported stream/error in stream is found.
+ int num_parsed_nalus = 0;
+ while (true) {
+ H265NALU nalu;
+ H265Parser::Result res = parser.AdvanceToNextNALU(&nalu);
+ if (res == H265Parser::kEOStream) {
+ DVLOG(1) << "Number of successfully parsed NALUs before EOS: "
+ << num_parsed_nalus;
+ ASSERT_EQ(num_nalus, num_parsed_nalus);
+ return;
+ }
+ ASSERT_EQ(res, H265Parser::kOk);
+
+ ++num_parsed_nalus;
+ DVLOG(4) << "Found NALU " << nalu.nal_unit_type;
+ }
+}
+
+} // namespace media
diff --git a/chromium/media/filters/ivf_parser.cc b/chromium/media/filters/ivf_parser.cc
new file mode 100644
index 00000000000..b6160fb533b
--- /dev/null
+++ b/chromium/media/filters/ivf_parser.cc
@@ -0,0 +1,89 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "base/sys_byteorder.h"
+#include "media/filters/ivf_parser.h"
+
+namespace media {
+
+void IvfFileHeader::ByteSwap() {
+ version = base::ByteSwapToLE16(version);
+ header_size = base::ByteSwapToLE16(header_size);
+ fourcc = base::ByteSwapToLE32(fourcc);
+ width = base::ByteSwapToLE16(width);
+ height = base::ByteSwapToLE16(height);
+ timebase_denum = base::ByteSwapToLE32(timebase_denum);
+ timebase_num = base::ByteSwapToLE32(timebase_num);
+ num_frames = base::ByteSwapToLE32(num_frames);
+ unused = base::ByteSwapToLE32(unused);
+}
+
+void IvfFrameHeader::ByteSwap() {
+ frame_size = base::ByteSwapToLE32(frame_size);
+ timestamp = base::ByteSwapToLE64(timestamp);
+}
+
+IvfParser::IvfParser() : ptr_(nullptr), end_(nullptr) {}
+
+bool IvfParser::Initialize(const uint8_t* stream,
+ size_t size,
+ IvfFileHeader* file_header) {
+ DCHECK(stream);
+ DCHECK(file_header);
+ ptr_ = stream;
+ end_ = stream + size;
+
+ if (size < sizeof(IvfFileHeader)) {
+ DLOG(ERROR) << "EOF before file header";
+ return false;
+ }
+
+ memcpy(file_header, ptr_, sizeof(IvfFileHeader));
+ file_header->ByteSwap();
+
+ if (memcmp(file_header->signature, kIvfHeaderSignature,
+ sizeof(file_header->signature)) != 0) {
+ DLOG(ERROR) << "IVF signature mismatch";
+ return false;
+ }
+ DLOG_IF(WARNING, file_header->version != 0)
+ << "IVF version unknown: " << file_header->version
+ << ", the parser may not be able to parse correctly";
+ if (file_header->header_size != sizeof(IvfFileHeader)) {
+ DLOG(ERROR) << "IVF file header size mismatch";
+ return false;
+ }
+
+ ptr_ += sizeof(IvfFileHeader);
+
+ return true;
+}
+
+bool IvfParser::ParseNextFrame(IvfFrameHeader* frame_header,
+ const uint8_t** payload) {
+ DCHECK(ptr_);
+ DCHECK(payload);
+
+ if (end_ < ptr_ + sizeof(IvfFrameHeader)) {
+ DLOG_IF(ERROR, ptr_ != end_) << "Incomplete frame header";
+ return false;
+ }
+
+ memcpy(frame_header, ptr_, sizeof(IvfFrameHeader));
+ frame_header->ByteSwap();
+ ptr_ += sizeof(IvfFrameHeader);
+
+ if (end_ < ptr_ + frame_header->frame_size) {
+ DLOG(ERROR) << "Not enough frame data";
+ return false;
+ }
+
+ *payload = ptr_;
+ ptr_ += frame_header->frame_size;
+
+ return true;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/ivf_parser.h b/chromium/media/filters/ivf_parser.h
new file mode 100644
index 00000000000..fe234aac3b1
--- /dev/null
+++ b/chromium/media/filters/ivf_parser.h
@@ -0,0 +1,86 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_IVF_PARSER_H_
+#define MEDIA_FILTERS_IVF_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+const char kIvfHeaderSignature[] = "DKIF";
+
+#pragma pack(push, 1)
+struct MEDIA_EXPORT IvfFileHeader {
+ // Byte swap interger fields between native and (on disk) little endian.
+ void ByteSwap();
+
+ char signature[4]; // signature: 'DKIF'
+ uint16_t version; // version (should be 0)
+ uint16_t header_size; // size of header in bytes
+ uint32_t fourcc; // codec FourCC (e.g., 'VP80')
+ uint16_t width; // width in pixels
+ uint16_t height; // height in pixels
+ uint32_t timebase_denum; // timebase denumerator
+ uint32_t timebase_num; // timebase numerator. For example, if
+ // timebase_denum is 30 and timebase_num is 2, the
+ // unit of IvfFrameHeader.timestamp is 2/30
+ // seconds.
+ uint32_t num_frames; // number of frames in file
+ uint32_t unused; // unused
+};
+static_assert(
+ sizeof(IvfFileHeader) == 32,
+ "sizeof(IvfFileHeader) must be fixed since it will be used with file IO");
+
+struct MEDIA_EXPORT IvfFrameHeader {
+ // Byte swap interger fields between native and (on disk) little endian.
+ void ByteSwap();
+
+ uint32_t frame_size; // Size of frame in bytes (not including the header)
+ uint64_t timestamp; // 64-bit presentation timestamp in unit timebase,
+ // which is defined in IvfFileHeader.
+};
+static_assert(
+ sizeof(IvfFrameHeader) == 12,
+ "sizeof(IvfFrameHeader) must be fixed since it will be used with file IO");
+#pragma pack(pop)
+
+// IVF is a simple container format for video frame. It is used by libvpx to
+// transport VP8 and VP9 bitstream.
+class MEDIA_EXPORT IvfParser {
+ public:
+ IvfParser();
+
+ // Initializes the parser for IVF |stream| with size |size| and parses the
+ // file header. Returns true on success.
+ bool Initialize(const uint8_t* stream,
+ size_t size,
+ IvfFileHeader* file_header);
+
+ // Parses the next frame. Returns true if the next frame is parsed without
+ // error. |frame_header| will be filled with the frame header and |payload|
+ // will point to frame payload (inside the |stream| buffer given to
+ // Initialize.)
+ bool ParseNextFrame(IvfFrameHeader* frame_header, const uint8_t** payload);
+
+ private:
+ bool ParseFileHeader(IvfFileHeader* file_header);
+
+ // Current reading position of input stream.
+ const uint8_t* ptr_;
+
+ // The end position of input stream.
+ const uint8_t* end_;
+
+ DISALLOW_COPY_AND_ASSIGN(IvfParser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_IVF_PARSER_H_
diff --git a/chromium/media/filters/ivf_parser_unittest.cc b/chromium/media/filters/ivf_parser_unittest.cc
new file mode 100644
index 00000000000..2d89dc2f745
--- /dev/null
+++ b/chromium/media/filters/ivf_parser_unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/ivf_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(IvfParserTest, StreamFileParsing) {
+ base::FilePath file_path = GetTestDataFilePath("test-25fps.vp8");
+
+ base::MemoryMappedFile stream;
+ ASSERT_TRUE(stream.Initialize(file_path)) << "Couldn't open stream file: "
+ << file_path.MaybeAsASCII();
+
+ IvfParser parser;
+ IvfFileHeader file_header = {};
+
+ EXPECT_TRUE(parser.Initialize(stream.data(), stream.length(), &file_header));
+
+ // Check file header fields.
+ EXPECT_EQ(0, memcmp(file_header.signature, kIvfHeaderSignature,
+ sizeof(file_header.signature)));
+ EXPECT_EQ(0, file_header.version);
+ EXPECT_EQ(sizeof(IvfFileHeader), file_header.header_size);
+ EXPECT_EQ(0x30385056u, file_header.fourcc); // VP80
+ EXPECT_EQ(320u, file_header.width);
+ EXPECT_EQ(240u, file_header.height);
+ EXPECT_EQ(50u, file_header.timebase_denum);
+ EXPECT_EQ(2u, file_header.timebase_num);
+ EXPECT_EQ(250u, file_header.num_frames);
+
+ IvfFrameHeader frame_header;
+ size_t num_parsed_frames = 0;
+ const uint8_t* payload = nullptr;
+ while (parser.ParseNextFrame(&frame_header, &payload)) {
+ ++num_parsed_frames;
+ EXPECT_TRUE(payload != nullptr);
+
+ // Only check the first frame.
+ if (num_parsed_frames == 1u) {
+ EXPECT_EQ(14788u, frame_header.frame_size);
+ EXPECT_EQ(0u, frame_header.timestamp);
+ EXPECT_EQ(
+ static_cast<ptrdiff_t>(sizeof(file_header) + sizeof(frame_header)),
+ payload - stream.data());
+ }
+ }
+
+ EXPECT_EQ(file_header.num_frames, num_parsed_frames);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/jpeg_parser.cc b/chromium/media/filters/jpeg_parser.cc
index 7400dc183a6..d46a7e306a4 100644
--- a/chromium/media/filters/jpeg_parser.cc
+++ b/chromium/media/filters/jpeg_parser.cc
@@ -33,30 +33,6 @@ using base::BigEndianReader;
namespace media {
-namespace {
-enum JpegMarker {
- SOF0 = 0xC0, // start of frame (baseline)
- SOF1 = 0xC1, // start of frame (extended sequential)
- SOF2 = 0xC2, // start of frame (progressive)
- SOF3 = 0xC3, // start of frame (lossless))
- SOF5 = 0xC5, // start of frame (differential, sequential)
- SOF6 = 0xC6, // start of frame (differential, progressive)
- SOF7 = 0xC7, // start of frame (differential, lossless)
- SOF9 = 0xC9, // start of frame (arithmetic coding, extended)
- SOF10 = 0xCA, // start of frame (arithmetic coding, progressive)
- SOF11 = 0xCB, // start of frame (arithmetic coding, lossless)
- SOF13 = 0xCD, // start of frame (differential, arithmetic, sequential)
- SOF14 = 0xCE, // start of frame (differential, arithmetic, progressive)
- SOF15 = 0xCF, // start of frame (differential, arithmetic, lossless)
- DHT = 0xC4, // define huffman table
- SOI = 0xD8, // start of image
- SOS = 0xDA, // start of scan
- DQT = 0xDB, // define quantization table
- DRI = 0xDD, // define restart internal
- MARKER1 = 0xFF, // jpeg marker prefix
-};
-}
-
static bool InRange(int value, int a, int b) {
return a <= value && value <= b;
}
@@ -293,6 +269,66 @@ static bool ParseSOS(const char* buffer,
return true;
}
+// |eoi_ptr| will point to the end of image (after EOI marker) after search
+// succeeds. Returns true on EOI marker found, or false.
+static bool SearchEOI(const char* buffer, size_t length, const char** eoi_ptr) {
+ DCHECK(buffer);
+ DCHECK(eoi_ptr);
+ BigEndianReader reader(buffer, length);
+ uint8_t marker2;
+
+ while (reader.remaining() > 0) {
+ const char* marker1_ptr = static_cast<const char*>(
+ memchr(reader.ptr(), JPEG_MARKER_PREFIX, reader.remaining()));
+ if (!marker1_ptr)
+ return false;
+ reader.Skip(marker1_ptr - reader.ptr() + 1);
+
+ do {
+ READ_U8_OR_RETURN_FALSE(&marker2);
+ } while (marker2 == JPEG_MARKER_PREFIX); // skip fill bytes
+
+ switch (marker2) {
+ // Compressed data escape.
+ case 0x00:
+ break;
+ // Restart
+ case JPEG_RST0:
+ case JPEG_RST1:
+ case JPEG_RST2:
+ case JPEG_RST3:
+ case JPEG_RST4:
+ case JPEG_RST5:
+ case JPEG_RST6:
+ case JPEG_RST7:
+ break;
+ case JPEG_EOI:
+ *eoi_ptr = reader.ptr();
+ return true;
+ default:
+ // Skip for other markers.
+ uint16_t size;
+ READ_U16_OR_RETURN_FALSE(&size);
+ if (size < sizeof(size)) {
+ DLOG(ERROR) << "Ill-formed JPEG. Segment size (" << size
+ << ") is smaller than size field (" << sizeof(size)
+ << ")";
+ return false;
+ }
+ size -= sizeof(size);
+
+ if (!reader.Skip(size)) {
+ DLOG(ERROR) << "Ill-formed JPEG. Remaining size ("
+ << reader.remaining()
+ << ") is smaller than header specified (" << size << ")";
+ return false;
+ }
+ break;
+ }
+ }
+ return false;
+}
+
// |result| is already initialized to 0 in ParseJpegPicture.
static bool ParseSOI(const char* buffer,
size_t length,
@@ -309,21 +345,15 @@ static bool ParseSOI(const char* buffer,
// Once reached SOS, all neccesary data are parsed.
while (!has_marker_sos) {
READ_U8_OR_RETURN_FALSE(&marker1);
- if (marker1 != MARKER1)
+ if (marker1 != JPEG_MARKER_PREFIX)
return false;
do {
READ_U8_OR_RETURN_FALSE(&marker2);
- } while (marker2 == MARKER1); // skip fill bytes
+ } while (marker2 == JPEG_MARKER_PREFIX); // skip fill bytes
uint16_t size;
READ_U16_OR_RETURN_FALSE(&size);
- if (reader.remaining() < size) {
- DLOG(ERROR) << "Ill-formed JPEG. Remaining size (" << reader.remaining()
- << ") is smaller than header specified (" << size << ")";
- return false;
- }
-
// The size includes the size field itself.
if (size < sizeof(size)) {
DLOG(ERROR) << "Ill-formed JPEG. Segment size (" << size
@@ -332,48 +362,54 @@ static bool ParseSOI(const char* buffer,
}
size -= sizeof(size);
+ if (reader.remaining() < size) {
+ DLOG(ERROR) << "Ill-formed JPEG. Remaining size (" << reader.remaining()
+ << ") is smaller than header specified (" << size << ")";
+ return false;
+ }
+
switch (marker2) {
- case SOF0:
+ case JPEG_SOF0:
if (!ParseSOF(reader.ptr(), size, &result->frame_header)) {
DLOG(ERROR) << "ParseSOF failed";
return false;
}
break;
- case SOF1:
- case SOF2:
- case SOF3:
- case SOF5:
- case SOF6:
- case SOF7:
- case SOF9:
- case SOF10:
- case SOF11:
- case SOF13:
- case SOF14:
- case SOF15:
+ case JPEG_SOF1:
+ case JPEG_SOF2:
+ case JPEG_SOF3:
+ case JPEG_SOF5:
+ case JPEG_SOF6:
+ case JPEG_SOF7:
+ case JPEG_SOF9:
+ case JPEG_SOF10:
+ case JPEG_SOF11:
+ case JPEG_SOF13:
+ case JPEG_SOF14:
+ case JPEG_SOF15:
DLOG(ERROR) << "Only SOF0 (baseline) is supported, but got SOF"
- << (marker2 - SOF0);
+ << (marker2 - JPEG_SOF0);
return false;
- case DQT:
+ case JPEG_DQT:
if (!ParseDQT(reader.ptr(), size, result->q_table)) {
DLOG(ERROR) << "ParseDQT failed";
return false;
}
has_marker_dqt = true;
break;
- case DHT:
+ case JPEG_DHT:
if (!ParseDHT(reader.ptr(), size, result->dc_table, result->ac_table)) {
DLOG(ERROR) << "ParseDHT failed";
return false;
}
break;
- case DRI:
+ case JPEG_DRI:
if (!ParseDRI(reader.ptr(), size, &result->restart_interval)) {
DLOG(ERROR) << "ParseDRI failed";
return false;
}
break;
- case SOS:
+ case JPEG_SOS:
if (!ParseSOS(reader.ptr(), size, result->frame_header,
&result->scan)) {
DLOG(ERROR) << "ParseSOS failed";
@@ -396,6 +432,8 @@ static bool ParseSOI(const char* buffer,
// Scan data follows scan header immediately.
result->data = reader.ptr();
result->data_size = reader.remaining();
+ const size_t kSoiSize = 2;
+ result->image_size = length + kSoiSize;
return true;
}
@@ -411,7 +449,7 @@ bool ParseJpegPicture(const uint8_t* buffer,
uint8_t marker1, marker2;
READ_U8_OR_RETURN_FALSE(&marker1);
READ_U8_OR_RETURN_FALSE(&marker2);
- if (marker1 != MARKER1 || marker2 != SOI) {
+ if (marker1 != JPEG_MARKER_PREFIX || marker2 != JPEG_SOI) {
DLOG(ERROR) << "Not a JPEG";
return false;
}
@@ -419,4 +457,25 @@ bool ParseJpegPicture(const uint8_t* buffer,
return ParseSOI(reader.ptr(), reader.remaining(), result);
}
+bool ParseJpegStream(const uint8_t* buffer,
+ size_t length,
+ JpegParseResult* result) {
+ DCHECK(buffer);
+ DCHECK(result);
+ if (!ParseJpegPicture(buffer, length, result))
+ return false;
+
+ BigEndianReader reader(
+ reinterpret_cast<const char*>(result->data), result->data_size);
+ const char* eoi_ptr = nullptr;
+ if (!SearchEOI(reader.ptr(), reader.remaining(), &eoi_ptr)) {
+ DLOG(ERROR) << "SearchEOI failed";
+ return false;
+ }
+ DCHECK(eoi_ptr);
+ result->data_size = eoi_ptr - result->data;
+ result->image_size = eoi_ptr - reinterpret_cast<const char*>(buffer);
+ return true;
+}
+
} // namespace media
diff --git a/chromium/media/filters/jpeg_parser.h b/chromium/media/filters/jpeg_parser.h
index f27fa582d2d..0d9e17fa544 100644
--- a/chromium/media/filters/jpeg_parser.h
+++ b/chromium/media/filters/jpeg_parser.h
@@ -11,6 +11,42 @@
namespace media {
+// It's not a full featured JPEG parser implememtation. It only parses JPEG
+// baseline sequential process. For explanations of each struct and its
+// members, see JPEG specification at
+// http://www.w3.org/Graphics/JPEG/itu-t81.pdf.
+
+enum JpegMarker {
+ JPEG_SOF0 = 0xC0, // start of frame (baseline)
+ JPEG_SOF1 = 0xC1, // start of frame (extended sequential)
+ JPEG_SOF2 = 0xC2, // start of frame (progressive)
+ JPEG_SOF3 = 0xC3, // start of frame (lossless))
+ JPEG_DHT = 0xC4, // define huffman table
+ JPEG_SOF5 = 0xC5, // start of frame (differential, sequential)
+ JPEG_SOF6 = 0xC6, // start of frame (differential, progressive)
+ JPEG_SOF7 = 0xC7, // start of frame (differential, lossless)
+ JPEG_SOF9 = 0xC9, // start of frame (arithmetic coding, extended)
+ JPEG_SOF10 = 0xCA, // start of frame (arithmetic coding, progressive)
+ JPEG_SOF11 = 0xCB, // start of frame (arithmetic coding, lossless)
+ JPEG_SOF13 = 0xCD, // start of frame (differential, arithmetic, sequential)
+ JPEG_SOF14 = 0xCE, // start of frame (differential, arithmetic, progressive)
+ JPEG_SOF15 = 0xCF, // start of frame (differential, arithmetic, lossless)
+ JPEG_RST0 = 0xD0, // restart
+ JPEG_RST1 = 0xD1, // restart
+ JPEG_RST2 = 0xD2, // restart
+ JPEG_RST3 = 0xD3, // restart
+ JPEG_RST4 = 0xD4, // restart
+ JPEG_RST5 = 0xD5, // restart
+ JPEG_RST6 = 0xD6, // restart
+ JPEG_RST7 = 0xD7, // restart
+ JPEG_SOI = 0xD8, // start of image
+ JPEG_EOI = 0xD9, // end of image
+ JPEG_SOS = 0xDA, // start of scan
+ JPEG_DQT = 0xDB, // define quantization table
+ JPEG_DRI = 0xDD, // define restart internal
+ JPEG_MARKER_PREFIX = 0xFF, // jpeg marker prefix
+};
+
const size_t kJpegMaxHuffmanTableNumBaseline = 2;
const size_t kJpegMaxComponents = 4;
const size_t kJpegMaxQuantizationTableNum = 4;
@@ -64,20 +100,26 @@ struct JpegParseResult {
uint16_t restart_interval;
JpegScanHeader scan;
const char* data;
+ // The size of compressed data of the first image.
size_t data_size;
+ // The size of the first entire image including header.
+ size_t image_size;
};
// Parses JPEG picture in |buffer| with |length|. Returns true iff header is
// valid and JPEG baseline sequential process is present. If parsed
// successfully, |result| is the parsed result.
-// It's not a full featured JPEG parser implememtation. It only parses JPEG
-// baseline sequential process. For explanations of each struct and its
-// members, see JPEG specification at
-// http://www.w3.org/Graphics/JPEG/itu-t81.pdf.
MEDIA_EXPORT bool ParseJpegPicture(const uint8_t* buffer,
size_t length,
JpegParseResult* result);
+// Parses the first image of JPEG stream in |buffer| with |length|. Returns
+// true iff header is valid and JPEG baseline sequential process is present.
+// If parsed successfully, |result| is the parsed result.
+MEDIA_EXPORT bool ParseJpegStream(const uint8_t* buffer,
+ size_t length,
+ JpegParseResult* result);
+
} // namespace media
#endif // MEDIA_FILTERS_JPEG_PARSER_H_
diff --git a/chromium/media/filters/jpeg_parser_unittest.cc b/chromium/media/filters/jpeg_parser_unittest.cc
index f2ae889a4a3..40e30b69f17 100644
--- a/chromium/media/filters/jpeg_parser_unittest.cc
+++ b/chromium/media/filters/jpeg_parser_unittest.cc
@@ -76,6 +76,7 @@ TEST(JpegParserTest, Parsing) {
EXPECT_EQ(1, result.scan.components[2].dc_selector);
EXPECT_EQ(1, result.scan.components[2].ac_selector);
EXPECT_EQ(121150u, result.data_size);
+ EXPECT_EQ(121358u, result.image_size);
}
TEST(JpegParserTest, CodedSizeNotEqualVisibleSize) {
diff --git a/chromium/media/filters/opus_audio_decoder.cc b/chromium/media/filters/opus_audio_decoder.cc
index 181f34014da..c0fde498547 100644
--- a/chromium/media/filters/opus_audio_decoder.cc
+++ b/chromium/media/filters/opus_audio_decoder.cc
@@ -12,8 +12,8 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/audio_discard_helper.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/buffers.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "third_party/opus/src/include/opus.h"
#include "third_party/opus/src/include/opus_multistream.h"
@@ -245,9 +245,7 @@ static bool ParseOpusExtraData(const uint8* data, int data_size,
OpusAudioDecoder::OpusAudioDecoder(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
- : task_runner_(task_runner),
- opus_decoder_(NULL),
- start_input_timestamp_(kNoTimestamp()) {}
+ : task_runner_(task_runner), opus_decoder_(nullptr) {}
std::string OpusAudioDecoder::GetDisplayName() const {
return "OpusAudioDecoder";
@@ -293,7 +291,6 @@ OpusAudioDecoder::~OpusAudioDecoder() {
return;
opus_multistream_decoder_ctl(opus_decoder_, OPUS_RESET_STATE);
- ResetTimestampState();
CloseDecoder();
}
@@ -319,14 +316,6 @@ void OpusAudioDecoder::DecodeBuffer(
return;
}
- // Apply the necessary codec delay.
- if (start_input_timestamp_ == kNoTimestamp())
- start_input_timestamp_ = input->timestamp();
- if (!discard_helper_->initialized() &&
- input->timestamp() == start_input_timestamp_) {
- discard_helper_->Reset(config_.codec_delay());
- }
-
scoped_refptr<AudioBuffer> output_buffer;
if (!Decode(input, &output_buffer)) {
@@ -420,22 +409,21 @@ bool OpusAudioDecoder::ConfigureDecoder() {
return false;
}
- discard_helper_.reset(
- new AudioDiscardHelper(config_.samples_per_second(), 0));
- start_input_timestamp_ = kNoTimestamp();
+ ResetTimestampState();
return true;
}
void OpusAudioDecoder::CloseDecoder() {
if (opus_decoder_) {
opus_multistream_decoder_destroy(opus_decoder_);
- opus_decoder_ = NULL;
+ opus_decoder_ = nullptr;
}
}
void OpusAudioDecoder::ResetTimestampState() {
- discard_helper_->Reset(
- discard_helper_->TimeDeltaToFrames(config_.seek_preroll()));
+ discard_helper_.reset(
+ new AudioDiscardHelper(config_.samples_per_second(), 0));
+ discard_helper_->Reset(config_.codec_delay());
}
bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
@@ -479,7 +467,7 @@ bool OpusAudioDecoder::Decode(const scoped_refptr<DecoderBuffer>& input,
// Handles discards and timestamping. Discard the buffer if more data needed.
if (!discard_helper_->ProcessBuffers(input, *output_buffer))
- *output_buffer = NULL;
+ *output_buffer = nullptr;
return true;
}
diff --git a/chromium/media/filters/opus_audio_decoder.h b/chromium/media/filters/opus_audio_decoder.h
index 3b0cd7164ae..116fcb0be88 100644
--- a/chromium/media/filters/opus_audio_decoder.h
+++ b/chromium/media/filters/opus_audio_decoder.h
@@ -56,11 +56,6 @@ class MEDIA_EXPORT OpusAudioDecoder : public AudioDecoder {
AudioDecoderConfig config_;
OutputCB output_cb_;
OpusMSDecoder* opus_decoder_;
-
- // When the input timestamp is |start_input_timestamp_| the decoder needs to
- // drop |config_.codec_delay()| frames.
- base::TimeDelta start_input_timestamp_;
-
scoped_ptr<AudioDiscardHelper> discard_helper_;
DISALLOW_IMPLICIT_CONSTRUCTORS(OpusAudioDecoder);
diff --git a/chromium/media/filters/source_buffer_platform.cc b/chromium/media/filters/source_buffer_platform.cc
index 6457e8cca0c..89761a12604 100644
--- a/chromium/media/filters/source_buffer_platform.cc
+++ b/chromium/media/filters/source_buffer_platform.cc
@@ -8,7 +8,7 @@ namespace media {
// 12MB: approximately 5 minutes of 320Kbps content.
// 150MB: approximately 5 minutes of 4Mbps content.
-const int kSourceBufferAudioMemoryLimit = 12 * 1024 * 1024;
-const int kSourceBufferVideoMemoryLimit = 150 * 1024 * 1024;
+const size_t kSourceBufferAudioMemoryLimit = 12 * 1024 * 1024;
+const size_t kSourceBufferVideoMemoryLimit = 150 * 1024 * 1024;
} // namespace media
diff --git a/chromium/media/filters/source_buffer_platform.h b/chromium/media/filters/source_buffer_platform.h
index b063f99a5e4..7feb05bff97 100644
--- a/chromium/media/filters/source_buffer_platform.h
+++ b/chromium/media/filters/source_buffer_platform.h
@@ -5,13 +5,14 @@
#ifndef MEDIA_FILTERS_SOURCE_BUFFER_PLATFORM_H_
#define MEDIA_FILTERS_SOURCE_BUFFER_PLATFORM_H_
+#include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media {
// The maximum amount of data in bytes the stream will keep in memory.
-MEDIA_EXPORT extern const int kSourceBufferAudioMemoryLimit;
-MEDIA_EXPORT extern const int kSourceBufferVideoMemoryLimit;
+MEDIA_EXPORT extern const size_t kSourceBufferAudioMemoryLimit;
+MEDIA_EXPORT extern const size_t kSourceBufferVideoMemoryLimit;
} // namespace media
diff --git a/chromium/media/filters/source_buffer_platform_lowmem.cc b/chromium/media/filters/source_buffer_platform_lowmem.cc
index 79757cd81c1..90ed051b6cf 100644
--- a/chromium/media/filters/source_buffer_platform_lowmem.cc
+++ b/chromium/media/filters/source_buffer_platform_lowmem.cc
@@ -8,7 +8,7 @@ namespace media {
// 2MB: approximately 1 minute of 256Kbps content.
// 30MB: approximately 1 minute of 4Mbps content.
-const int kSourceBufferAudioMemoryLimit = 2 * 1024 * 1024;
-const int kSourceBufferVideoMemoryLimit = 30 * 1024 * 1024;
+const size_t kSourceBufferAudioMemoryLimit = 2 * 1024 * 1024;
+const size_t kSourceBufferVideoMemoryLimit = 30 * 1024 * 1024;
} // namespace media
diff --git a/chromium/media/filters/source_buffer_range.cc b/chromium/media/filters/source_buffer_range.cc
index 4fad27dbcef..866a0120452 100644
--- a/chromium/media/filters/source_buffer_range.cc
+++ b/chromium/media/filters/source_buffer_range.cc
@@ -6,6 +6,8 @@
#include <algorithm>
+#include "media/base/timestamp_constants.h"
+
namespace media {
// Comparison operators for std::upper_bound() and std::lower_bound().
@@ -56,6 +58,7 @@ void SourceBufferRange::AppendBuffersToEnd(const BufferQueue& new_buffers) {
++itr) {
DCHECK((*itr)->GetDecodeTimestamp() != kNoDecodeTimestamp());
buffers_.push_back(*itr);
+ DCHECK_GE((*itr)->data_size(), 0);
size_in_bytes_ += (*itr)->data_size();
if ((*itr)->is_key_frame()) {
@@ -225,12 +228,12 @@ bool SourceBufferRange::TruncateAt(
return TruncateAt(starting_point, removed_buffers);
}
-int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
+size_t SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
DCHECK(!FirstGOPContainsNextBufferPosition());
DCHECK(deleted_buffers);
int buffers_deleted = 0;
- int total_bytes_deleted = 0;
+ size_t total_bytes_deleted = 0;
KeyframeMap::iterator front = keyframe_map_.begin();
DCHECK(front != keyframe_map_.end());
@@ -247,7 +250,9 @@ int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
// Delete buffers from the beginning of the buffered range up until (but not
// including) the next keyframe.
for (int i = 0; i < end_index; i++) {
- int bytes_deleted = buffers_.front()->data_size();
+ DCHECK_GE(buffers_.front()->data_size(), 0);
+ size_t bytes_deleted = buffers_.front()->data_size();
+ DCHECK_GE(size_in_bytes_, bytes_deleted);
size_in_bytes_ -= bytes_deleted;
total_bytes_deleted += bytes_deleted;
deleted_buffers->push_back(buffers_.front());
@@ -271,7 +276,7 @@ int SourceBufferRange::DeleteGOPFromFront(BufferQueue* deleted_buffers) {
return total_bytes_deleted;
}
-int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
+size_t SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
DCHECK(!LastGOPContainsNextBufferPosition());
DCHECK(deleted_buffers);
@@ -285,9 +290,11 @@ int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
size_t goal_size = back->second - keyframe_map_index_base_;
keyframe_map_.erase(back);
- int total_bytes_deleted = 0;
+ size_t total_bytes_deleted = 0;
while (buffers_.size() != goal_size) {
- int bytes_deleted = buffers_.back()->data_size();
+ DCHECK_GE(buffers_.back()->data_size(), 0);
+ size_t bytes_deleted = buffers_.back()->data_size();
+ DCHECK_GE(size_in_bytes_, bytes_deleted);
size_in_bytes_ -= bytes_deleted;
total_bytes_deleted += bytes_deleted;
// We're removing buffers from the back, so push each removed buffer to the
@@ -300,11 +307,10 @@ int SourceBufferRange::DeleteGOPFromBack(BufferQueue* deleted_buffers) {
return total_bytes_deleted;
}
-int SourceBufferRange::GetRemovalGOP(
+size_t SourceBufferRange::GetRemovalGOP(
DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
- int total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
- int bytes_to_free = total_bytes_to_free;
- int bytes_removed = 0;
+ size_t total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
+ size_t bytes_removed = 0;
KeyframeMap::iterator gop_itr = GetFirstKeyframeAt(start_timestamp, false);
if (gop_itr == keyframe_map_.end())
@@ -321,18 +327,19 @@ int SourceBufferRange::GetRemovalGOP(
if (gop_itr_prev != keyframe_map_.begin() && --gop_itr_prev == gop_end)
gop_end = gop_itr;
- while (gop_itr != gop_end && bytes_to_free > 0) {
+ while (gop_itr != gop_end && bytes_removed < total_bytes_to_free) {
++gop_itr;
- int gop_size = 0;
+ size_t gop_size = 0;
int next_gop_index = gop_itr == keyframe_map_.end() ?
buffers_.size() : gop_itr->second - keyframe_map_index_base_;
BufferQueue::iterator next_gop_start = buffers_.begin() + next_gop_index;
- for (; buffer_itr != next_gop_start; ++buffer_itr)
+ for (; buffer_itr != next_gop_start; ++buffer_itr) {
+ DCHECK_GE((*buffer_itr)->data_size(), 0);
gop_size += (*buffer_itr)->data_size();
+ }
bytes_removed += gop_size;
- bytes_to_free -= gop_size;
}
if (bytes_removed > 0) {
*removal_end_timestamp = gop_itr == keyframe_map_.end() ?
@@ -341,6 +348,16 @@ int SourceBufferRange::GetRemovalGOP(
return bytes_removed;
}
+bool SourceBufferRange::FirstGOPEarlierThanMediaTime(
+ DecodeTimestamp media_time) const {
+ if (keyframe_map_.size() == 1u)
+ return (GetEndTimestamp() < media_time);
+
+ KeyframeMap::const_iterator second_gop = keyframe_map_.begin();
+ ++second_gop;
+ return second_gop->first <= media_time;
+}
+
bool SourceBufferRange::FirstGOPContainsNextBufferPosition() const {
if (!HasNextBufferPosition())
return false;
@@ -372,8 +389,10 @@ void SourceBufferRange::FreeBufferRange(
const BufferQueue::iterator& ending_point) {
for (BufferQueue::iterator itr = starting_point;
itr != ending_point; ++itr) {
- size_in_bytes_ -= (*itr)->data_size();
- DCHECK_GE(size_in_bytes_, 0);
+ DCHECK_GE((*itr)->data_size(), 0);
+ size_t itr_data_size = static_cast<size_t>((*itr)->data_size());
+ DCHECK_GE(size_in_bytes_, itr_data_size);
+ size_in_bytes_ -= itr_data_size;
}
buffers_.erase(starting_point, ending_point);
}
diff --git a/chromium/media/filters/source_buffer_range.h b/chromium/media/filters/source_buffer_range.h
index 0c6a8b36f34..13ba873cc20 100644
--- a/chromium/media/filters/source_buffer_range.h
+++ b/chromium/media/filters/source_buffer_range.h
@@ -7,6 +7,7 @@
#include <map>
+#include "base/basictypes.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "media/base/stream_parser_buffer.h"
@@ -111,17 +112,19 @@ class SourceBufferRange {
// Deletes a GOP from the front or back of the range and moves these
// buffers into |deleted_buffers|. Returns the number of bytes deleted from
// the range (i.e. the size in bytes of |deleted_buffers|).
- int DeleteGOPFromFront(BufferQueue* deleted_buffers);
- int DeleteGOPFromBack(BufferQueue* deleted_buffers);
+ size_t DeleteGOPFromFront(BufferQueue* deleted_buffers);
+ size_t DeleteGOPFromBack(BufferQueue* deleted_buffers);
// Gets the range of GOP to secure at least |bytes_to_free| from
// [|start_timestamp|, |end_timestamp|).
// Returns the size of the buffers to secure if the buffers of
// [|start_timestamp|, |end_removal_timestamp|) is removed.
// Will not update |end_removal_timestamp| if the returned size is 0.
- int GetRemovalGOP(
+ size_t GetRemovalGOP(
DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
- int bytes_to_free, DecodeTimestamp* end_removal_timestamp);
+ size_t bytes_to_free, DecodeTimestamp* end_removal_timestamp);
+
+ bool FirstGOPEarlierThanMediaTime(DecodeTimestamp media_time) const;
// Indicates whether the GOP at the beginning or end of the range contains the
// next buffer position.
@@ -201,7 +204,7 @@ class SourceBufferRange {
bool GetBuffersInRange(DecodeTimestamp start, DecodeTimestamp end,
BufferQueue* buffers);
- int size_in_bytes() const { return size_in_bytes_; }
+ size_t size_in_bytes() const { return size_in_bytes_; }
private:
typedef std::map<DecodeTimestamp, int> KeyframeMap;
@@ -284,7 +287,7 @@ class SourceBufferRange {
InterbufferDistanceCB interbuffer_distance_cb_;
// Stores the amount of memory taken up by the data in |buffers_|.
- int size_in_bytes_;
+ size_t size_in_bytes_;
DISALLOW_COPY_AND_ASSIGN(SourceBufferRange);
};
diff --git a/chromium/media/filters/source_buffer_stream.cc b/chromium/media/filters/source_buffer_stream.cc
index e812023ad25..6b98b4fb12e 100644
--- a/chromium/media/filters/source_buffer_stream.cc
+++ b/chromium/media/filters/source_buffer_stream.cc
@@ -12,15 +12,34 @@
#include "base/logging.h"
#include "base/trace_event/trace_event.h"
#include "media/base/audio_splicer.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/source_buffer_platform.h"
#include "media/filters/source_buffer_range.h"
namespace media {
+namespace {
+
+// An arbitrarily-chosen number to estimate the duration of a buffer if none is
+// set and there's not enough information to get a better estimate.
+const int kDefaultBufferDurationInMs = 125;
+
+// Limit the number of MEDIA_LOG() logs for splice buffer generation warnings
+// and successes. Though these values are high enough to possibly exhaust the
+// media internals event cache (along with other events), these logs are
+// important for debugging splice generation.
+const int kMaxSpliceGenerationWarningLogs = 50;
+const int kMaxSpliceGenerationSuccessLogs = 20;
+
+// Limit the number of MEDIA_LOG() logs for track buffer time gaps.
+const int kMaxTrackBufferGapWarningLogs = 20;
+
+// Limit the number of MEDIA_LOG() logs for MSE GC algorithm warnings.
+const int kMaxGarbageCollectAlgorithmWarningLogs = 20;
+
// Helper method that returns true if |ranges| is sorted in increasing order,
// false otherwise.
-static bool IsRangeListSorted(
- const std::list<media::SourceBufferRange*>& ranges) {
+bool IsRangeListSorted(const std::list<media::SourceBufferRange*>& ranges) {
DecodeTimestamp prev = kNoDecodeTimestamp();
for (std::list<SourceBufferRange*>::const_iterator itr =
ranges.begin(); itr != ranges.end(); ++itr) {
@@ -39,25 +58,24 @@ static bool IsRangeListSorted(
// instead of an overall maximum interbuffer delta for range discontinuity
// detection, and adjust similarly for splice frame discontinuity detection.
// See http://crbug.com/351489 and http://crbug.com/351166.
-static base::TimeDelta ComputeFudgeRoom(base::TimeDelta approximate_duration) {
+base::TimeDelta ComputeFudgeRoom(base::TimeDelta approximate_duration) {
// Because we do not know exactly when is the next timestamp, any buffer
// that starts within 2x the approximate duration of a buffer is considered
// within this range.
return 2 * approximate_duration;
}
-// An arbitrarily-chosen number to estimate the duration of a buffer if none
-// is set and there's not enough information to get a better estimate.
-static int kDefaultBufferDurationInMs = 125;
-
// The amount of time the beginning of the buffered data can differ from the
// start time in order to still be considered the start of stream.
-static base::TimeDelta kSeekToStartFudgeRoom() {
+base::TimeDelta kSeekToStartFudgeRoom() {
return base::TimeDelta::FromMilliseconds(1000);
}
// Helper method for logging, converts a range into a readable string.
-static std::string RangeToString(const SourceBufferRange& range) {
+std::string RangeToString(const SourceBufferRange& range) {
+ if (range.size_in_bytes() == 0) {
+ return "[]";
+ }
std::stringstream ss;
ss << "[" << range.GetStartTimestamp().InSecondsF()
<< ";" << range.GetEndTimestamp().InSecondsF()
@@ -66,7 +84,7 @@ static std::string RangeToString(const SourceBufferRange& range) {
}
// Helper method for logging, converts a set of ranges into a readable string.
-static std::string RangesToString(const SourceBufferStream::RangeList& ranges) {
+std::string RangesToString(const SourceBufferStream::RangeList& ranges) {
if (ranges.empty())
return "<EMPTY>";
@@ -79,8 +97,27 @@ static std::string RangesToString(const SourceBufferStream::RangeList& ranges) {
return ss.str();
}
-static SourceBufferRange::GapPolicy TypeToGapPolicy(
- SourceBufferStream::Type type) {
+std::string BufferQueueToLogString(
+ const SourceBufferStream::BufferQueue& buffers) {
+ std::stringstream result;
+ if (buffers.front()->GetDecodeTimestamp().InMicroseconds() ==
+ buffers.front()->timestamp().InMicroseconds() &&
+ buffers.back()->GetDecodeTimestamp().InMicroseconds() ==
+ buffers.back()->timestamp().InMicroseconds()) {
+ result << "dts/pts=[" << buffers.front()->timestamp().InSecondsF() << ";"
+ << buffers.back()->timestamp().InSecondsF() << "(last frame dur="
+ << buffers.back()->duration().InSecondsF() << ")]";
+ } else {
+ result << "dts=[" << buffers.front()->GetDecodeTimestamp().InSecondsF()
+ << ";" << buffers.back()->GetDecodeTimestamp().InSecondsF()
+ << "] pts=[" << buffers.front()->timestamp().InSecondsF() << ";"
+ << buffers.back()->timestamp().InSecondsF() << "(last frame dur="
+ << buffers.back()->duration().InSecondsF() << ")]";
+ }
+ return result.str();
+}
+
+SourceBufferRange::GapPolicy TypeToGapPolicy(SourceBufferStream::Type type) {
switch (type) {
case SourceBufferStream::kAudio:
case SourceBufferStream::kVideo:
@@ -93,82 +130,53 @@ static SourceBufferRange::GapPolicy TypeToGapPolicy(
return SourceBufferRange::NO_GAPS_ALLOWED;
}
+} // namespace
+
SourceBufferStream::SourceBufferStream(const AudioDecoderConfig& audio_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled)
- : log_cb_(log_cb),
- current_config_index_(0),
- append_config_index_(0),
- seek_pending_(false),
- end_of_stream_(false),
+ : media_log_(media_log),
seek_buffer_timestamp_(kNoTimestamp()),
- selected_range_(NULL),
media_segment_start_time_(kNoDecodeTimestamp()),
range_for_next_append_(ranges_.end()),
- new_media_segment_(false),
last_appended_buffer_timestamp_(kNoDecodeTimestamp()),
- last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoDecodeTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kSourceBufferAudioMemoryLimit),
- config_change_pending_(false),
- splice_buffers_index_(0),
- pending_buffers_complete_(false),
splice_frames_enabled_(splice_frames_enabled) {
DCHECK(audio_config.IsValidConfig());
audio_configs_.push_back(audio_config);
}
SourceBufferStream::SourceBufferStream(const VideoDecoderConfig& video_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled)
- : log_cb_(log_cb),
- current_config_index_(0),
- append_config_index_(0),
- seek_pending_(false),
- end_of_stream_(false),
+ : media_log_(media_log),
seek_buffer_timestamp_(kNoTimestamp()),
- selected_range_(NULL),
media_segment_start_time_(kNoDecodeTimestamp()),
range_for_next_append_(ranges_.end()),
- new_media_segment_(false),
last_appended_buffer_timestamp_(kNoDecodeTimestamp()),
- last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoDecodeTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kSourceBufferVideoMemoryLimit),
- config_change_pending_(false),
- splice_buffers_index_(0),
- pending_buffers_complete_(false),
splice_frames_enabled_(splice_frames_enabled) {
DCHECK(video_config.IsValidConfig());
video_configs_.push_back(video_config);
}
SourceBufferStream::SourceBufferStream(const TextTrackConfig& text_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled)
- : log_cb_(log_cb),
- current_config_index_(0),
- append_config_index_(0),
+ : media_log_(media_log),
text_track_config_(text_config),
- seek_pending_(false),
- end_of_stream_(false),
seek_buffer_timestamp_(kNoTimestamp()),
- selected_range_(NULL),
media_segment_start_time_(kNoDecodeTimestamp()),
range_for_next_append_(ranges_.end()),
- new_media_segment_(false),
last_appended_buffer_timestamp_(kNoDecodeTimestamp()),
- last_appended_buffer_is_keyframe_(false),
last_output_buffer_timestamp_(kNoDecodeTimestamp()),
max_interbuffer_distance_(kNoTimestamp()),
memory_limit_(kSourceBufferAudioMemoryLimit),
- config_change_pending_(false),
- splice_buffers_index_(0),
- pending_buffers_complete_(false),
- splice_frames_enabled_(splice_frames_enabled) {
-}
+ splice_frames_enabled_(splice_frames_enabled) {}
SourceBufferStream::~SourceBufferStream() {
while (!ranges_.empty()) {
@@ -213,20 +221,16 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
DCHECK(media_segment_start_time_ <= buffers.front()->GetDecodeTimestamp());
DCHECK(!end_of_stream_);
- DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName() << ": buffers dts=["
- << buffers.front()->GetDecodeTimestamp().InSecondsF() << ";"
- << buffers.back()->GetDecodeTimestamp().InSecondsF() << "] pts=["
- << buffers.front()->timestamp().InSecondsF() << ";"
- << buffers.back()->timestamp().InSecondsF() << "(last frame dur="
- << buffers.back()->duration().InSecondsF() << ")]";
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
+ << ": buffers " << BufferQueueToLogString(buffers);
// New media segments must begin with a keyframe.
// TODO(wolenetz): Relax this requirement. See http://crbug.com/229412.
if (new_media_segment_ && !buffers.front()->is_key_frame()) {
- MEDIA_LOG(ERROR, log_cb_) << "Media segment did not begin with key "
- "frame. Support for such segments will be "
- "available in a future version. Please see "
- "https://crbug.com/229412.";
+ MEDIA_LOG(ERROR, media_log_) << "Media segment did not begin with key "
+ "frame. Support for such segments will be "
+ "available in a future version. Please see "
+ "https://crbug.com/229412.";
return false;
}
@@ -236,7 +240,7 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
if (media_segment_start_time_ < DecodeTimestamp() ||
buffers.front()->GetDecodeTimestamp() < DecodeTimestamp()) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Cannot append a media segment with negative timestamps.";
return false;
}
@@ -244,8 +248,9 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
if (!IsNextTimestampValid(buffers.front()->GetDecodeTimestamp(),
buffers.front()->is_key_frame())) {
const DecodeTimestamp& dts = buffers.front()->GetDecodeTimestamp();
- MEDIA_LOG(ERROR, log_cb_) << "Invalid same timestamp construct detected at"
- << " time " << dts.InSecondsF();
+ MEDIA_LOG(ERROR, media_log_)
+ << "Invalid same timestamp construct detected at"
+ << " time " << dts.InSecondsF();
return false;
}
@@ -341,11 +346,13 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
track_buffer_.insert(track_buffer_.end(), deleted_buffers.begin(),
deleted_buffers.end());
- DVLOG(3) << __FUNCTION__ << " Added " << deleted_buffers.size()
- << " deleted buffers to track buffer. TB size is now "
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName() << " Added "
+ << deleted_buffers.size()
+ << " buffers to track buffer. TB size is now "
<< track_buffer_.size();
} else {
- DVLOG(3) << __FUNCTION__ << " No deleted buffers for track buffer";
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " No deleted buffers for track buffer";
}
// Prune any extra buffers in |track_buffer_| if new keyframes
@@ -359,8 +366,6 @@ bool SourceBufferStream::Append(const BufferQueue& buffers) {
SetSelectedRangeIfNeeded(next_buffer_timestamp);
- GarbageCollectIfNeeded();
-
DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
<< ": done. ranges_=" << RangesToString(ranges_);
DCHECK(IsRangeListSorted(ranges_));
@@ -500,6 +505,7 @@ void SourceBufferStream::ResetSeekState() {
track_buffer_.clear();
config_change_pending_ = false;
last_output_buffer_timestamp_ = kNoDecodeTimestamp();
+ just_exhausted_track_buffer_ = false;
splice_buffers_index_ = 0;
pending_buffer_ = NULL;
pending_buffers_complete_ = false;
@@ -533,16 +539,17 @@ bool SourceBufferStream::IsMonotonicallyIncreasing(
if (prev_timestamp != kNoDecodeTimestamp()) {
if (current_timestamp < prev_timestamp) {
- MEDIA_LOG(ERROR, log_cb_) << "Buffers did not monotonically increase.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Buffers did not monotonically increase.";
return false;
}
if (current_timestamp == prev_timestamp &&
!SourceBufferRange::AllowSameTimestamp(prev_is_keyframe,
current_is_keyframe)) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected combination of buffers with"
- << " the same timestamp detected at "
- << current_timestamp.InSecondsF();
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected combination of buffers with"
+ << " the same timestamp detected at "
+ << current_timestamp.InSecondsF();
return false;
}
}
@@ -607,46 +614,133 @@ void SourceBufferStream::SetConfigIds(const BufferQueue& buffers) {
}
}
-void SourceBufferStream::GarbageCollectIfNeeded() {
+bool SourceBufferStream::GarbageCollectIfNeeded(DecodeTimestamp media_time,
+ size_t newDataSize) {
+ DCHECK(media_time != kNoDecodeTimestamp());
+ // Garbage collection should only happen before/during appending new data,
+ // which should not happen in end-of-stream state.
+ DCHECK(!end_of_stream_);
// Compute size of |ranges_|.
- int ranges_size = 0;
- for (RangeList::iterator itr = ranges_.begin(); itr != ranges_.end(); ++itr)
- ranges_size += (*itr)->size_in_bytes();
+ size_t ranges_size = GetBufferedSize();
+
+ // Sanity and overflow checks
+ if ((newDataSize > memory_limit_) ||
+ (ranges_size + newDataSize < ranges_size)) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_garbage_collect_algorithm_logs_,
+ kMaxGarbageCollectAlgorithmWarningLogs)
+ << GetStreamTypeName() << " stream: "
+ << "new append of newDataSize=" << newDataSize
+ << " bytes exceeds memory_limit_=" << memory_limit_
+ << ", currently buffered ranges_size=" << ranges_size;
+ return false;
+ }
// Return if we're under or at the memory limit.
- if (ranges_size <= memory_limit_)
- return;
+ if (ranges_size + newDataSize <= memory_limit_)
+ return true;
- int bytes_to_free = ranges_size - memory_limit_;
+ size_t bytes_to_free = ranges_size + newDataSize - memory_limit_;
DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName() << ": Before GC"
- << " ranges_size=" << ranges_size
+ << " media_time=" << media_time.InSecondsF()
<< " ranges_=" << RangesToString(ranges_)
- << " memory_limit_=" << memory_limit_;
+ << " seek_pending_=" << seek_pending_
+ << " ranges_size=" << ranges_size
+ << " newDataSize=" << newDataSize
+ << " memory_limit_=" << memory_limit_
+ << " last_appended_buffer_timestamp_="
+ << last_appended_buffer_timestamp_.InSecondsF();
+
+ size_t bytes_freed = 0;
+
+ // If last appended buffer position was earlier than the current playback time
+ // then try deleting data between last append and current media_time.
+ if (last_appended_buffer_timestamp_ != kNoDecodeTimestamp() &&
+ last_appended_buffer_timestamp_ < media_time) {
+ size_t between = FreeBuffersAfterLastAppended(bytes_to_free, media_time);
+ DVLOG(3) << __FUNCTION__ << " FreeBuffersAfterLastAppended "
+ << " released " << between << " bytes"
+ << " ranges_=" << RangesToString(ranges_);
+ bytes_freed += between;
+
+ // Some players start appending data at the new seek target position before
+ // actually initiating the seek operation (i.e. they try to improve seek
+ // performance by prebuffering some data at the seek target position and
+ // initiating seek once enough data is pre-buffered. In those cases we'll
+ // see that data is being appended at some new position, but there is no
+ // pending seek reported yet. In this situation we need to try preserving
+ // the most recently appended data, i.e. data belonging to the same buffered
+ // range as the most recent append.
+ if (range_for_next_append_ != ranges_.end()) {
+ DCHECK((*range_for_next_append_)->GetStartTimestamp() <= media_time);
+ media_time = (*range_for_next_append_)->GetStartTimestamp();
+ }
+ }
- // Begin deleting after the last appended buffer.
- int bytes_freed = FreeBuffersAfterLastAppended(bytes_to_free);
+ // If there is an unsatisfied pending seek, we can safely remove all data that
+ // is earlier than seek target, then remove from the back until we reach the
+ // most recently appended GOP and then remove from the front if we still don't
+ // have enough space for the upcoming append.
+ if (bytes_freed < bytes_to_free && seek_pending_) {
+ DCHECK(!ranges_.empty());
+ // All data earlier than the seek target |media_time| can be removed safely
+ size_t front = FreeBuffers(bytes_to_free - bytes_freed, media_time, false);
+ DVLOG(3) << __FUNCTION__ << " Removed " << front << " bytes from the"
+ << " front. ranges_=" << RangesToString(ranges_);
+ bytes_freed += front;
+
+ // If removing data earlier than |media_time| didn't free up enough space,
+ // then try deleting from the back until we reach most recently appended GOP
+ if (bytes_freed < bytes_to_free) {
+ size_t back = FreeBuffers(bytes_to_free - bytes_freed, media_time, true);
+ DVLOG(3) << __FUNCTION__ << " Removed " << back << " bytes from the back"
+ << " ranges_=" << RangesToString(ranges_);
+ bytes_freed += back;
+ }
- // Begin deleting from the front.
- if (bytes_to_free - bytes_freed > 0)
- bytes_freed += FreeBuffers(bytes_to_free - bytes_freed, false);
+ // If even that wasn't enough, then try greedily deleting from the front,
+ // that should allow us to remove as much data as necessary to succeed.
+ if (bytes_freed < bytes_to_free) {
+ size_t front2 = FreeBuffers(bytes_to_free - bytes_freed,
+ ranges_.back()->GetEndTimestamp(), false);
+ DVLOG(3) << __FUNCTION__ << " Removed " << front << " bytes from the"
+ << " front. ranges_=" << RangesToString(ranges_);
+ bytes_freed += front2;
+ }
+ DCHECK(bytes_freed >= bytes_to_free);
+ }
- // Begin deleting from the back.
- if (bytes_to_free - bytes_freed > 0)
- bytes_freed += FreeBuffers(bytes_to_free - bytes_freed, true);
+ // Try removing data from the front of the SourceBuffer up to |media_time|
+ // position.
+ if (bytes_freed < bytes_to_free) {
+ size_t front = FreeBuffers(bytes_to_free - bytes_freed, media_time, false);
+ DVLOG(3) << __FUNCTION__ << " Removed " << front << " bytes from the"
+ << " front. ranges_=" << RangesToString(ranges_);
+ bytes_freed += front;
+ }
+
+ // Try removing data from the back of the SourceBuffer, until we reach the
+ // most recent append position.
+ if (bytes_freed < bytes_to_free) {
+ size_t back = FreeBuffers(bytes_to_free - bytes_freed, media_time, true);
+ DVLOG(3) << __FUNCTION__ << " Removed " << back << " bytes from the back."
+ << " ranges_=" << RangesToString(ranges_);
+ bytes_freed += back;
+ }
DVLOG(2) << __FUNCTION__ << " " << GetStreamTypeName() << ": After GC"
+ << " bytes_to_free=" << bytes_to_free
<< " bytes_freed=" << bytes_freed
<< " ranges_=" << RangesToString(ranges_);
+
+ return bytes_freed >= bytes_to_free;
}
-int SourceBufferStream::FreeBuffersAfterLastAppended(int total_bytes_to_free) {
- DecodeTimestamp next_buffer_timestamp = GetNextBufferTimestamp();
- if (last_appended_buffer_timestamp_ == kNoDecodeTimestamp() ||
- next_buffer_timestamp == kNoDecodeTimestamp() ||
- last_appended_buffer_timestamp_ >= next_buffer_timestamp) {
- return 0;
- }
+size_t SourceBufferStream::FreeBuffersAfterLastAppended(
+ size_t total_bytes_to_free, DecodeTimestamp media_time) {
+ DVLOG(4) << __FUNCTION__ << " last_appended_buffer_timestamp_="
+ << last_appended_buffer_timestamp_.InSecondsF()
+ << " media_time=" << media_time.InSecondsF();
DecodeTimestamp remove_range_start = last_appended_buffer_timestamp_;
if (last_appended_buffer_is_keyframe_)
@@ -656,81 +750,91 @@ int SourceBufferStream::FreeBuffersAfterLastAppended(int total_bytes_to_free) {
remove_range_start);
if (remove_range_start_keyframe != kNoDecodeTimestamp())
remove_range_start = remove_range_start_keyframe;
- if (remove_range_start >= next_buffer_timestamp)
+ if (remove_range_start >= media_time)
return 0;
DecodeTimestamp remove_range_end;
- int bytes_freed = GetRemovalRange(
- remove_range_start, next_buffer_timestamp, total_bytes_to_free,
- &remove_range_end);
+ size_t bytes_freed = GetRemovalRange(remove_range_start,
+ media_time,
+ total_bytes_to_free,
+ &remove_range_end);
if (bytes_freed > 0) {
+ DVLOG(4) << __FUNCTION__ << " removing ["
+ << remove_range_start.ToPresentationTime().InSecondsF() << ";"
+ << remove_range_end.ToPresentationTime().InSecondsF() << "]";
Remove(remove_range_start.ToPresentationTime(),
remove_range_end.ToPresentationTime(),
- next_buffer_timestamp.ToPresentationTime());
+ media_time.ToPresentationTime());
}
return bytes_freed;
}
-int SourceBufferStream::GetRemovalRange(
+size_t SourceBufferStream::GetRemovalRange(
DecodeTimestamp start_timestamp, DecodeTimestamp end_timestamp,
- int total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
+ size_t total_bytes_to_free, DecodeTimestamp* removal_end_timestamp) {
DCHECK(start_timestamp >= DecodeTimestamp()) << start_timestamp.InSecondsF();
DCHECK(start_timestamp < end_timestamp)
<< "start " << start_timestamp.InSecondsF()
<< ", end " << end_timestamp.InSecondsF();
- int bytes_to_free = total_bytes_to_free;
- int bytes_freed = 0;
+ size_t bytes_freed = 0;
for (RangeList::iterator itr = ranges_.begin();
- itr != ranges_.end() && bytes_to_free > 0; ++itr) {
+ itr != ranges_.end() && bytes_freed < total_bytes_to_free; ++itr) {
SourceBufferRange* range = *itr;
if (range->GetStartTimestamp() >= end_timestamp)
break;
if (range->GetEndTimestamp() < start_timestamp)
continue;
- int bytes_removed = range->GetRemovalGOP(
+ size_t bytes_to_free = total_bytes_to_free - bytes_freed;
+ size_t bytes_removed = range->GetRemovalGOP(
start_timestamp, end_timestamp, bytes_to_free, removal_end_timestamp);
- bytes_to_free -= bytes_removed;
bytes_freed += bytes_removed;
}
return bytes_freed;
}
-int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
- bool reverse_direction) {
+size_t SourceBufferStream::FreeBuffers(size_t total_bytes_to_free,
+ DecodeTimestamp media_time,
+ bool reverse_direction) {
TRACE_EVENT2("media", "SourceBufferStream::FreeBuffers",
"total bytes to free", total_bytes_to_free,
"reverse direction", reverse_direction);
- DCHECK_GT(total_bytes_to_free, 0);
- int bytes_to_free = total_bytes_to_free;
- int bytes_freed = 0;
+ DCHECK_GT(total_bytes_to_free, 0u);
+ size_t bytes_freed = 0;
// This range will save the last GOP appended to |range_for_next_append_|
// if the buffers surrounding it get deleted during garbage collection.
SourceBufferRange* new_range_for_append = NULL;
- while (!ranges_.empty() && bytes_to_free > 0) {
+ while (!ranges_.empty() && bytes_freed < total_bytes_to_free) {
SourceBufferRange* current_range = NULL;
BufferQueue buffers;
- int bytes_deleted = 0;
+ size_t bytes_deleted = 0;
if (reverse_direction) {
current_range = ranges_.back();
+ DVLOG(5) << "current_range=" << RangeToString(*current_range);
if (current_range->LastGOPContainsNextBufferPosition()) {
DCHECK_EQ(current_range, selected_range_);
+ DVLOG(5) << "current_range contains next read position, stopping GC";
break;
}
+ DVLOG(5) << "Deleting GOP from back: " << RangeToString(*current_range);
bytes_deleted = current_range->DeleteGOPFromBack(&buffers);
} else {
current_range = ranges_.front();
- if (current_range->FirstGOPContainsNextBufferPosition()) {
- DCHECK_EQ(current_range, selected_range_);
+ DVLOG(5) << "current_range=" << RangeToString(*current_range);
+ if (!current_range->FirstGOPEarlierThanMediaTime(media_time)) {
+ // We have removed all data up to the GOP that contains current playback
+ // position, we can't delete any further.
+ DVLOG(5) << "current_range contains playback position, stopping GC";
break;
}
+ DVLOG(4) << "Deleting GOP from front: " << RangeToString(*current_range);
bytes_deleted = current_range->DeleteGOPFromFront(&buffers);
}
@@ -739,6 +843,7 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
if (end_timestamp == last_appended_buffer_timestamp_) {
DCHECK(last_appended_buffer_timestamp_ != kNoDecodeTimestamp());
DCHECK(!new_range_for_append);
+
// Create a new range containing these buffers.
new_range_for_append = new SourceBufferRange(
TypeToGapPolicy(GetType()),
@@ -747,7 +852,6 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
base::Unretained(this)));
range_for_next_append_ = ranges_.end();
} else {
- bytes_to_free -= bytes_deleted;
bytes_freed += bytes_deleted;
}
@@ -758,6 +862,11 @@ int SourceBufferStream::FreeBuffers(int total_bytes_to_free,
delete current_range;
reverse_direction ? ranges_.pop_back() : ranges_.pop_front();
}
+
+ if (reverse_direction && new_range_for_append) {
+ // We don't want to delete any further, or we'll be creating gaps
+ break;
+ }
}
// Insert |new_range_for_append| into |ranges_|, if applicable.
@@ -879,6 +988,9 @@ void SourceBufferStream::PruneTrackBuffer(const DecodeTimestamp timestamp) {
track_buffer_.back()->GetDecodeTimestamp() >= timestamp) {
track_buffer_.pop_back();
}
+ DVLOG(3) << __FUNCTION__ << " " << GetStreamTypeName()
+ << " Removed all buffers with DTS >= " << timestamp.InSecondsF()
+ << ". New track buffer size:" << track_buffer_.size();
}
void SourceBufferStream::MergeWithAdjacentRangeIfNecessary(
@@ -1103,12 +1215,15 @@ SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
DVLOG(3) << __FUNCTION__ << " Next buffer coming from track_buffer_";
*out_buffer = next_buffer;
track_buffer_.pop_front();
+ WarnIfTrackBufferExhaustionSkipsForward(*out_buffer);
last_output_buffer_timestamp_ = (*out_buffer)->GetDecodeTimestamp();
// If the track buffer becomes empty, then try to set the selected range
// based on the timestamp of this buffer being returned.
- if (track_buffer_.empty())
+ if (track_buffer_.empty()) {
+ just_exhausted_track_buffer_ = true;
SetSelectedRangeIfNeeded(last_output_buffer_timestamp_);
+ }
return kSuccess;
}
@@ -1132,10 +1247,34 @@ SourceBufferStream::Status SourceBufferStream::GetNextBufferInternal(
}
CHECK(selected_range_->GetNextBuffer(out_buffer));
+ WarnIfTrackBufferExhaustionSkipsForward(*out_buffer);
last_output_buffer_timestamp_ = (*out_buffer)->GetDecodeTimestamp();
return kSuccess;
}
+void SourceBufferStream::WarnIfTrackBufferExhaustionSkipsForward(
+ const scoped_refptr<StreamParserBuffer>& next_buffer) {
+ if (!just_exhausted_track_buffer_)
+ return;
+
+ just_exhausted_track_buffer_ = false;
+ DCHECK(next_buffer->is_key_frame());
+ DecodeTimestamp next_output_buffer_timestamp =
+ next_buffer->GetDecodeTimestamp();
+ base::TimeDelta delta =
+ next_output_buffer_timestamp - last_output_buffer_timestamp_;
+ DCHECK_GE(delta, base::TimeDelta());
+ if (delta > GetMaxInterbufferDistance()) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_track_buffer_gap_warning_logs_,
+ kMaxTrackBufferGapWarningLogs)
+ << "Media append that overlapped current playback position caused time "
+ "gap in playing "
+ << GetStreamTypeName() << " stream because the next keyframe is "
+ << delta.InMilliseconds() << "ms beyond last overlapped frame. Media "
+ "may appear temporarily frozen.";
+ }
+}
+
DecodeTimestamp SourceBufferStream::GetNextBufferTimestamp() {
if (!track_buffer_.empty())
return track_buffer_.front()->GetDecodeTimestamp();
@@ -1187,8 +1326,10 @@ void SourceBufferStream::SeekAndSetSelectedRange(
}
void SourceBufferStream::SetSelectedRange(SourceBufferRange* range) {
- DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName()
- << ": " << selected_range_ << " -> " << range;
+ DVLOG(1) << __FUNCTION__ << " " << GetStreamTypeName() << ": "
+ << selected_range_ << " "
+ << (selected_range_ ? RangeToString(*selected_range_) : "")
+ << " -> " << range << " " << (range ? RangeToString(*range) : "");
if (selected_range_)
selected_range_->ResetNextBufferPosition();
DCHECK(!range || range->HasNextBufferPosition());
@@ -1212,6 +1353,13 @@ base::TimeDelta SourceBufferStream::GetBufferedDuration() const {
return ranges_.back()->GetBufferedEndTimestamp().ToPresentationTime();
}
+size_t SourceBufferStream::GetBufferedSize() const {
+ size_t ranges_size = 0;
+ for (const auto& range : ranges_)
+ ranges_size += range->size_in_bytes();
+ return ranges_size;
+}
+
void SourceBufferStream::MarkEndOfStream() {
DCHECK(!end_of_stream_);
end_of_stream_ = true;
@@ -1269,12 +1417,12 @@ bool SourceBufferStream::UpdateAudioConfig(const AudioDecoderConfig& config) {
DVLOG(3) << "UpdateAudioConfig.";
if (audio_configs_[0].codec() != config.codec()) {
- MEDIA_LOG(ERROR, log_cb_) << "Audio codec changes not allowed.";
+ MEDIA_LOG(ERROR, media_log_) << "Audio codec changes not allowed.";
return false;
}
if (audio_configs_[0].is_encrypted() != config.is_encrypted()) {
- MEDIA_LOG(ERROR, log_cb_) << "Audio encryption changes not allowed.";
+ MEDIA_LOG(ERROR, media_log_) << "Audio encryption changes not allowed.";
return false;
}
@@ -1300,12 +1448,12 @@ bool SourceBufferStream::UpdateVideoConfig(const VideoDecoderConfig& config) {
DVLOG(3) << "UpdateVideoConfig.";
if (video_configs_[0].codec() != config.codec()) {
- MEDIA_LOG(ERROR, log_cb_) << "Video codec changes not allowed.";
+ MEDIA_LOG(ERROR, media_log_) << "Video codec changes not allowed.";
return false;
}
if (video_configs_[0].is_encrypted() != config.is_encrypted()) {
- MEDIA_LOG(ERROR, log_cb_) << "Video encryption changes not allowed.";
+ MEDIA_LOG(ERROR, media_log_) << "Video encryption changes not allowed.";
return false;
}
@@ -1548,8 +1696,17 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
//
// We also do not want to generate splices if the first new buffer replaces an
// existing buffer exactly.
- if (pre_splice_buffers.front()->timestamp() >= splice_timestamp)
+ if (pre_splice_buffers.front()->timestamp() >= splice_timestamp) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_splice_generation_warning_logs_,
+ kMaxSpliceGenerationWarningLogs)
+ << "Skipping splice frame generation: first new buffer at "
+ << splice_timestamp.InMicroseconds()
+ << "us begins at or before existing buffer at "
+ << pre_splice_buffers.front()->timestamp().InMicroseconds() << "us.";
+ DVLOG(1) << "Skipping splice: overlapped buffers begin at or after the "
+ "first new buffer.";
return;
+ }
// If any |pre_splice_buffers| are already splices or preroll, do not generate
// a splice.
@@ -1557,12 +1714,22 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
const BufferQueue& original_splice_buffers =
pre_splice_buffers[i]->splice_buffers();
if (!original_splice_buffers.empty()) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_splice_generation_warning_logs_,
+ kMaxSpliceGenerationWarningLogs)
+ << "Skipping splice frame generation: overlapped buffers at "
+ << pre_splice_buffers[i]->timestamp().InMicroseconds()
+ << "us are in a previously buffered splice.";
DVLOG(1) << "Can't generate splice: overlapped buffers contain a "
"pre-existing splice.";
return;
}
if (pre_splice_buffers[i]->preroll_buffer().get()) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_splice_generation_warning_logs_,
+ kMaxSpliceGenerationWarningLogs)
+ << "Skipping splice frame generation: overlapped buffers at "
+ << pre_splice_buffers[i]->timestamp().InMicroseconds()
+ << "us contain preroll.";
DVLOG(1) << "Can't generate splice: overlapped buffers contain preroll.";
return;
}
@@ -1570,7 +1737,7 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
// Don't generate splice frames which represent less than a millisecond (which
// is frequently the extent of timestamp resolution for poorly encoded media)
- // or less than two frames (need at least two to crossfade).
+ // or less than two samples (need at least two to crossfade).
const base::TimeDelta splice_duration =
pre_splice_buffers.back()->timestamp() +
pre_splice_buffers.back()->duration() - splice_timestamp;
@@ -1579,15 +1746,27 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
base::TimeDelta::FromSecondsD(
2.0 / audio_configs_[append_config_index_].samples_per_second()));
if (splice_duration < minimum_splice_duration) {
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_splice_generation_warning_logs_,
+ kMaxSpliceGenerationWarningLogs)
+ << "Skipping splice frame generation: not enough samples for splicing "
+ "new buffer at "
+ << splice_timestamp.InMicroseconds() << "us. Have "
+ << splice_duration.InMicroseconds() << "us, but need "
+ << minimum_splice_duration.InMicroseconds() << "us.";
DVLOG(1) << "Can't generate splice: not enough samples for crossfade; have "
- << splice_duration.InMicroseconds() << " us, but need "
- << minimum_splice_duration.InMicroseconds() << " us.";
+ << splice_duration.InMicroseconds() << "us, but need "
+ << minimum_splice_duration.InMicroseconds() << "us.";
return;
}
DVLOG(1) << "Generating splice frame @ " << new_buffers.front()->timestamp()
<< ", splice duration: " << splice_duration.InMicroseconds()
<< " us";
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_splice_generation_success_logs_,
+ kMaxSpliceGenerationSuccessLogs)
+ << "Generated splice of overlap duration "
+ << splice_duration.InMicroseconds() << "us into new buffer at "
+ << splice_timestamp.InMicroseconds() << "us.";
new_buffers.front()->ConvertToSpliceBuffer(pre_splice_buffers);
}
diff --git a/chromium/media/filters/source_buffer_stream.h b/chromium/media/filters/source_buffer_stream.h
index afef6f74202..deec40970ba 100644
--- a/chromium/media/filters/source_buffer_stream.h
+++ b/chromium/media/filters/source_buffer_stream.h
@@ -16,6 +16,7 @@
#include <utility>
#include <vector>
+#include "base/basictypes.h"
#include "base/memory/ref_counted.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_export.h"
@@ -54,13 +55,13 @@ class MEDIA_EXPORT SourceBufferStream {
};
SourceBufferStream(const AudioDecoderConfig& audio_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled);
SourceBufferStream(const VideoDecoderConfig& video_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled);
SourceBufferStream(const TextTrackConfig& text_config,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool splice_frames_enabled);
~SourceBufferStream();
@@ -89,6 +90,11 @@ class MEDIA_EXPORT SourceBufferStream {
void Remove(base::TimeDelta start, base::TimeDelta end,
base::TimeDelta duration);
+ // Frees up space if the SourceBufferStream is taking up too much memory.
+ // |media_time| is current playback position.
+ bool GarbageCollectIfNeeded(DecodeTimestamp media_time,
+ size_t newDataSize);
+
// Changes the SourceBufferStream's state so that it will start returning
// buffers starting from the closest keyframe before |timestamp|.
void Seek(base::TimeDelta timestamp);
@@ -119,6 +125,9 @@ class MEDIA_EXPORT SourceBufferStream {
// then base::TimeDelta() is returned.
base::TimeDelta GetBufferedDuration() const;
+ // Returns the size of the buffered data in bytes.
+ size_t GetBufferedSize() const;
+
// Notifies this object that end of stream has been signalled.
void MarkEndOfStream();
@@ -142,35 +151,36 @@ class MEDIA_EXPORT SourceBufferStream {
// yet.
base::TimeDelta GetMaxInterbufferDistance() const;
- void set_memory_limit(int memory_limit) {
+ void set_memory_limit(size_t memory_limit) {
memory_limit_ = memory_limit;
}
private:
friend class SourceBufferStreamTest;
- // Frees up space if the SourceBufferStream is taking up too much memory.
- void GarbageCollectIfNeeded();
-
// Attempts to delete approximately |total_bytes_to_free| amount of data
// |ranges_|, starting at the front of |ranges_| and moving linearly forward
// through the buffers. Deletes starting from the back if |reverse_direction|
- // is true. Returns the number of bytes freed.
- int FreeBuffers(int total_bytes_to_free, bool reverse_direction);
+ // is true. |media_time| is current playback position.
+ // Returns the number of bytes freed.
+ size_t FreeBuffers(size_t total_bytes_to_free,
+ DecodeTimestamp media_time,
+ bool reverse_direction);
// Attempts to delete approximately |total_bytes_to_free| amount of data from
// |ranges_|, starting after the last appended buffer before the current
- // playback position.
- int FreeBuffersAfterLastAppended(int total_bytes_to_free);
+ // playback position |media_time|.
+ size_t FreeBuffersAfterLastAppended(size_t total_bytes_to_free,
+ DecodeTimestamp media_time);
// Gets the removal range to secure |byte_to_free| from
// [|start_timestamp|, |end_timestamp|).
// Returns the size of buffers to secure if future
// Remove(|start_timestamp|, |removal_end_timestamp|, duration) is called.
// Will not update |removal_end_timestamp| if the returned size is 0.
- int GetRemovalRange(DecodeTimestamp start_timestamp,
- DecodeTimestamp end_timestamp, int byte_to_free,
- DecodeTimestamp* removal_end_timestamp);
+ size_t GetRemovalRange(DecodeTimestamp start_timestamp,
+ DecodeTimestamp end_timestamp, size_t byte_to_free,
+ DecodeTimestamp* removal_end_timestamp);
// Prepares |range_for_next_append_| so |new_buffers| can be appended.
// This involves removing buffers between the end of the previous append
@@ -318,6 +328,14 @@ class MEDIA_EXPORT SourceBufferStream {
// additional processing for splice frame or preroll buffers.
Status GetNextBufferInternal(scoped_refptr<StreamParserBuffer>* out_buffer);
+ // If the next buffer's timestamp is significantly beyond the last output
+ // buffer, and if we just exhausted |track_buffer_| on the previous read, this
+ // method logs a warning to |media_log_| that there could be perceivable
+ // delay. Apps can avoid this behavior by not overlap-appending buffers near
+ // current playback position.
+ void WarnIfTrackBufferExhaustionSkipsForward(
+ const scoped_refptr<StreamParserBuffer>& next_buffer);
+
// Called by PrepareRangesForNextAppend() before pruning overlapped buffers to
// generate a splice frame with a small portion of the overlapped buffers. If
// a splice frame is generated, the first buffer in |new_buffers| will have
@@ -328,9 +346,9 @@ class MEDIA_EXPORT SourceBufferStream {
// appropriately and returns true. Otherwise returns false.
bool SetPendingBuffer(scoped_refptr<StreamParserBuffer>* out_buffer);
- // Callback used to report log messages that can help the web developer figure
- // out what is wrong with the content.
- LogCB log_cb_;
+ // Used to report log messages that can help the web developer figure out what
+ // is wrong with the content.
+ scoped_refptr<MediaLog> media_log_;
// List of disjoint buffered ranges, ordered by start time.
RangeList ranges_;
@@ -339,12 +357,12 @@ class MEDIA_EXPORT SourceBufferStream {
// GetNextBuffer() is only allows to return buffers that have a
// config ID that matches this index. If there is a mismatch then
// it must signal that a config change is needed.
- int current_config_index_;
+ int current_config_index_ = 0;
// Indicates which decoder config to associate with new buffers
// being appended. Each new buffer appended has its config ID set
// to the value of this field.
- int append_config_index_;
+ int append_config_index_ = 0;
// Holds the audio/video configs for this stream. |current_config_index_|
// and |append_config_index_| represent indexes into one of these vectors.
@@ -356,10 +374,10 @@ class MEDIA_EXPORT SourceBufferStream {
// True if more data needs to be appended before the Seek() can complete,
// false if no Seek() has been requested or the Seek() is completed.
- bool seek_pending_;
+ bool seek_pending_ = false;
// True if the end of the stream has been signalled.
- bool end_of_stream_;
+ bool end_of_stream_ = false;
// Timestamp of the last request to Seek().
base::TimeDelta seek_buffer_timestamp_;
@@ -367,12 +385,16 @@ class MEDIA_EXPORT SourceBufferStream {
// Pointer to the seeked-to Range. This is the range from which
// GetNextBuffer() calls are fulfilled after the |track_buffer_| has been
// emptied.
- SourceBufferRange* selected_range_;
+ SourceBufferRange* selected_range_ = nullptr;
// Queue of the next buffers to be returned from calls to GetNextBuffer(). If
// |track_buffer_| is empty, return buffers from |selected_range_|.
BufferQueue track_buffer_;
+ // If there has been no intervening Seek, this will be true if the last
+ // emitted buffer emptied |track_buffer_|.
+ bool just_exhausted_track_buffer_ = false;
+
// The start time of the current media segment being appended.
DecodeTimestamp media_segment_start_time_;
@@ -380,12 +402,12 @@ class MEDIA_EXPORT SourceBufferStream {
RangeList::iterator range_for_next_append_;
// True when the next call to Append() begins a new media segment.
- bool new_media_segment_;
+ bool new_media_segment_ = false;
// The timestamp of the last buffer appended to the media segment, set to
// kNoDecodeTimestamp() if the beginning of the segment.
DecodeTimestamp last_appended_buffer_timestamp_;
- bool last_appended_buffer_is_keyframe_;
+ bool last_appended_buffer_is_keyframe_ = false;
// The decode timestamp on the last buffer returned by the most recent
// GetNextBuffer() call. Set to kNoDecodeTimestamp() if GetNextBuffer() hasn't
@@ -396,13 +418,13 @@ class MEDIA_EXPORT SourceBufferStream {
base::TimeDelta max_interbuffer_distance_;
// The maximum amount of data in bytes the stream will keep in memory.
- int memory_limit_;
+ size_t memory_limit_;
// Indicates that a kConfigChanged status has been reported by GetNextBuffer()
// and GetCurrentXXXDecoderConfig() must be called to update the current
// config. GetNextBuffer() must not be called again until
// GetCurrentXXXDecoderConfig() has been called.
- bool config_change_pending_;
+ bool config_change_pending_ = false;
// Used by HandleNextBufferWithSplice() or HandleNextBufferWithPreroll() when
// a splice frame buffer or buffer with preroll is returned from
@@ -411,14 +433,20 @@ class MEDIA_EXPORT SourceBufferStream {
// Indicates which of the splice buffers in |splice_buffer_| should be
// handled out next.
- size_t splice_buffers_index_;
+ size_t splice_buffers_index_ = 0;
// Indicates that all buffers before |pending_buffer_| have been handed out.
- bool pending_buffers_complete_;
+ bool pending_buffers_complete_ = false;
// Indicates that splice frame generation is enabled.
const bool splice_frames_enabled_;
+ // To prevent log spam, count the number of warnings and successes logged.
+ int num_splice_generation_warning_logs_ = 0;
+ int num_splice_generation_success_logs_ = 0;
+ int num_track_buffer_gap_warning_logs_ = 0;
+ int num_garbage_collect_algorithm_logs_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(SourceBufferStream);
};
diff --git a/chromium/media/filters/source_buffer_stream_unittest.cc b/chromium/media/filters/source_buffer_stream_unittest.cc
index 8270bd9ea68..bf15f328351 100644
--- a/chromium/media/filters/source_buffer_stream_unittest.cc
+++ b/chromium/media/filters/source_buffer_stream_unittest.cc
@@ -15,11 +15,17 @@
#include "base/strings/string_util.h"
#include "media/base/data_buffer.h"
#include "media/base/media_log.h"
+#include "media/base/mock_media_log.h"
#include "media/base/test_helpers.h"
#include "media/base/text_track_config.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/webvtt_util.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::HasSubstr;
+using ::testing::InSequence;
+using ::testing::StrictMock;
+
namespace media {
typedef StreamParser::BufferQueue BufferQueue;
@@ -30,15 +36,49 @@ static const uint8 kDataA = 0x11;
static const uint8 kDataB = 0x33;
static const int kDataSize = 1;
+// Matchers for verifying common media log entry strings.
+MATCHER(ContainsMissingKeyframeLog, "") {
+ return CONTAINS_STRING(arg,
+ "Media segment did not begin with key frame. Support "
+ "for such segments will be available in a future "
+ "version. Please see https://crbug.com/229412.");
+}
+
+MATCHER(ContainsSameTimestampAt30MillisecondsLog, "") {
+ return CONTAINS_STRING(arg,
+ "Unexpected combination of buffers with the same "
+ "timestamp detected at 0.03");
+}
+
+MATCHER_P(ContainsTrackBufferExhaustionSkipLog, skip_milliseconds, "") {
+ return CONTAINS_STRING(arg,
+ "Media append that overlapped current playback "
+ "position caused time gap in playing VIDEO stream "
+ "because the next keyframe is " +
+ base::IntToString(skip_milliseconds) +
+ "ms beyond last overlapped frame. Media may "
+ "appear temporarily frozen.");
+}
+
+MATCHER_P2(ContainsGeneratedSpliceLog,
+ duration_microseconds,
+ time_microseconds,
+ "") {
+ return CONTAINS_STRING(arg, "Generated splice of overlap duration " +
+ base::IntToString(duration_microseconds) +
+ "us into new buffer at " +
+ base::IntToString(time_microseconds) + "us.");
+}
+
class SourceBufferStreamTest : public testing::Test {
protected:
- SourceBufferStreamTest() {
+ SourceBufferStreamTest() : media_log_(new StrictMock<MockMediaLog>()) {
video_config_ = TestVideoConfig::Normal();
SetStreamInfo(kDefaultFramesPerSecond, kDefaultKeyframesPerSecond);
- stream_.reset(new SourceBufferStream(video_config_, log_cb(), true));
+ stream_.reset(new SourceBufferStream(video_config_, media_log_, true));
}
- void SetMemoryLimit(int buffers_of_data) {
+ void SetMemoryLimit(size_t buffers_of_data) {
stream_->set_memory_limit(buffers_of_data * kDataSize);
}
@@ -51,7 +91,7 @@ class SourceBufferStreamTest : public testing::Test {
void SetTextStream() {
video_config_ = TestVideoConfig::Invalid();
TextTrackConfig config(kTextSubtitles, "", "", "");
- stream_.reset(new SourceBufferStream(config, log_cb(), true));
+ stream_.reset(new SourceBufferStream(config, media_log_, true));
SetStreamInfo(2, 2);
}
@@ -64,10 +104,9 @@ class SourceBufferStreamTest : public testing::Test {
NULL,
0,
false,
- false,
base::TimeDelta(),
0);
- stream_.reset(new SourceBufferStream(audio_config_, log_cb(), true));
+ stream_.reset(new SourceBufferStream(audio_config_, media_log_, true));
// Equivalent to 2ms per frame.
SetStreamInfo(500, 500);
@@ -145,6 +184,12 @@ class SourceBufferStreamTest : public testing::Test {
stream_->Seek(base::TimeDelta::FromMilliseconds(timestamp_ms));
}
+ bool GarbageCollectWithPlaybackAtBuffer(int position, int newDataBuffers) {
+ return stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromPresentationTime(position * frame_duration_),
+ newDataBuffers * kDataSize);
+ }
+
void RemoveInMs(int start, int end, int duration) {
Remove(base::TimeDelta::FromMilliseconds(start),
base::TimeDelta::FromMilliseconds(end),
@@ -252,8 +297,8 @@ class SourceBufferStreamTest : public testing::Test {
}
void CheckExpectedBuffers(const std::string& expected) {
- std::vector<std::string> timestamps;
- base::SplitString(expected, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
std::stringstream ss;
const SourceBufferStream::Type type = stream_->GetType();
base::TimeDelta active_splice_timestamp = kNoTimestamp();
@@ -306,7 +351,7 @@ class SourceBufferStreamTest : public testing::Test {
}
// Handle preroll buffers.
- if (base::EndsWith(timestamps[i], "P", true)) {
+ if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
ASSERT_TRUE(buffer->is_key_frame());
scoped_refptr<StreamParserBuffer> preroll_buffer;
preroll_buffer.swap(buffer);
@@ -370,13 +415,12 @@ class SourceBufferStreamTest : public testing::Test {
<< "\nActual: " << actual.AsHumanReadableString();
}
- const LogCB log_cb() { return base::Bind(&AddLogEntryForTest); }
-
base::TimeDelta frame_duration() const { return frame_duration_; }
scoped_ptr<SourceBufferStream> stream_;
VideoDecoderConfig video_config_;
AudioDecoderConfig audio_config_;
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
private:
base::TimeDelta ConvertToFrameDuration(int frames_per_second) {
@@ -486,8 +530,8 @@ class SourceBufferStreamTest : public testing::Test {
// id to use for that and subsequent preroll appends is incremented by one.
// The config id for non-splice frame appends will not be affected.
BufferQueue StringToBufferQueue(const std::string& buffers_to_append) {
- std::vector<std::string> timestamps;
- base::SplitString(buffers_to_append, ' ', &timestamps);
+ std::vector<std::string> timestamps = base::SplitString(
+ buffers_to_append, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
CHECK_GT(timestamps.size(), 0u);
@@ -502,40 +546,42 @@ class SourceBufferStreamTest : public testing::Test {
bool is_duration_estimated = false;
// Handle splice frame starts.
- if (base::StartsWithASCII(timestamps[i], "S(", true)) {
+ if (base::StartsWith(timestamps[i], "S(", base::CompareCase::SENSITIVE)) {
CHECK(!splice_frame);
splice_frame = true;
// Remove the "S(" off of the token.
timestamps[i] = timestamps[i].substr(2, timestamps[i].length());
}
- if (splice_frame && base::EndsWith(timestamps[i], ")", true)) {
+ if (splice_frame &&
+ base::EndsWith(timestamps[i], ")", base::CompareCase::SENSITIVE)) {
splice_frame = false;
last_splice_frame = true;
// Remove the ")" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
// Handle config changes within the splice frame.
- if (splice_frame && base::EndsWith(timestamps[i], "C", true)) {
+ if (splice_frame &&
+ base::EndsWith(timestamps[i], "C", base::CompareCase::SENSITIVE)) {
splice_config_id++;
CHECK(splice_config_id < stream_->audio_configs_.size() ||
splice_config_id < stream_->video_configs_.size());
// Remove the "C" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
- if (base::EndsWith(timestamps[i], "K", true)) {
+ if (base::EndsWith(timestamps[i], "K", base::CompareCase::SENSITIVE)) {
is_keyframe = true;
// Remove the "K" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
// Handle preroll buffers.
- if (base::EndsWith(timestamps[i], "P", true)) {
+ if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
is_keyframe = true;
has_preroll = true;
// Remove the "P" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
}
- if (base::EndsWith(timestamps[i], "E", true)) {
+ if (base::EndsWith(timestamps[i], "E", base::CompareCase::SENSITIVE)) {
is_duration_estimated = true;
// Remove the "E" off of the token.
timestamps[i] = timestamps[i].substr(0, timestamps[i].length() - 1);
@@ -549,8 +595,8 @@ class SourceBufferStreamTest : public testing::Test {
timestamps[i] = timestamps[i].substr(0, duration_pos);
}
- std::vector<std::string> buffer_timestamps;
- base::SplitString(timestamps[i], '|', &buffer_timestamps);
+ std::vector<std::string> buffer_timestamps = base::SplitString(
+ timestamps[i], "|", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
if (buffer_timestamps.size() == 1)
buffer_timestamps.push_back(buffer_timestamps[0]);
@@ -726,6 +772,8 @@ TEST_F(SourceBufferStreamTest, Append_AdjacentRanges) {
}
TEST_F(SourceBufferStreamTest, Append_DoesNotBeginWithKeyframe) {
+ EXPECT_MEDIA_LOG(ContainsMissingKeyframeLog()).Times(2);
+
// Append fails because the range doesn't begin with a keyframe.
NewSegmentAppend_ExpectFailure(3, 2);
@@ -747,6 +795,8 @@ TEST_F(SourceBufferStreamTest, Append_DoesNotBeginWithKeyframe) {
}
TEST_F(SourceBufferStreamTest, Append_DoesNotBeginWithKeyframe_Adjacent) {
+ EXPECT_MEDIA_LOG(ContainsMissingKeyframeLog());
+
// Append 8 buffers at positions 0 through 7.
NewSegmentAppend(0, 8);
@@ -1412,6 +1462,8 @@ TEST_F(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew) {
// after: |A a a a a A| |B b b b b B|
// track: |a|
TEST_F(SourceBufferStreamTest, End_Overlap_Selected_NoKeyframeAfterNew2) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(133));
+
// Append 7 buffers at positions 10 through 16.
NewSegmentAppend(10, 7, &kDataA);
@@ -1676,6 +1728,8 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_BetweenMediaSegments) {
// new : 0K 30 60 90 120K
// after: 0K 30 60 90 *120K* 130K
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(50));
+
NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -1706,6 +1760,8 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer) {
// new : 110K 130
// after: 0K 30 60 90 *110K* 130
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(40));
+
NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -1736,6 +1792,8 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer2) {
// after: 0K 30 50K 80 110 140 * (waiting for keyframe)
// track: 70
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer3) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(80));
+
NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
CheckExpectedRangesByTimestamp("{ [10,160) }");
@@ -1831,6 +1889,8 @@ TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer5) {
// after: 0K 30 60 90 *120K* 130K ... 200K 230 260K 290
// track: 70
TEST_F(SourceBufferStreamTest, Overlap_OneByOne_TrackBuffer6) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(50));
+
NewSegmentAppendOneByOne("10K 40 70 100K 125 130D30K");
NewSegmentAppendOneByOne("200K 230");
CheckExpectedRangesByTimestamp("{ [10,160) [200,260) }");
@@ -2359,8 +2419,8 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFront) {
for (int i = 1; i < 20; i++)
AppendBuffers(i, 1, &kDataA);
- // None of the buffers should trigger garbage collection, so all data should
- // be there as expected.
+ // GC should be a no-op, since we are just under memory limit.
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
CheckExpectedRanges("{ [0,19) }");
Seek(0);
CheckExpectedBuffers(0, 19, &kDataA);
@@ -2368,11 +2428,15 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFront) {
// Seek to the middle of the stream.
Seek(10);
+ // We are about to append 5 new buffers and current playback position is 10,
+ // so the GC algorithm should be able to delete some old data from the front.
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(10, 5));
+ CheckExpectedRanges("{ [5,19) }");
+
// Append 5 buffers to the end of the stream.
AppendBuffers(20, 5, &kDataA);
-
- // GC should have deleted the first 5 buffers.
CheckExpectedRanges("{ [5,24) }");
+
CheckExpectedBuffers(10, 24, &kDataA);
Seek(5);
CheckExpectedBuffers(5, 9, &kDataA);
@@ -2387,8 +2451,10 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFrontGOPsAtATime) {
// Seek to position 10.
Seek(10);
+ CheckExpectedRanges("{ [0,19) }");
// Add one buffer to put the memory over the cap.
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(10, 1));
AppendBuffers(20, 1, &kDataA);
// GC should have deleted the first 5 buffers so that the range still begins
@@ -2403,14 +2469,19 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteBack) {
// Set memory limit to 5 buffers.
SetMemoryLimit(5);
- // Seek to position 0.
- Seek(0);
-
- // Append 20 buffers at positions 0 through 19.
- NewSegmentAppend(0, 20, &kDataA);
+ // Append 5 buffers at positions 15 through 19.
+ NewSegmentAppend(15, 5, &kDataA);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
- // Should leave the first 5 buffers from 0 to 4 and the last GOP appended.
+ // Append 5 buffers at positions 0 through 4.
+ NewSegmentAppend(0, 5, &kDataA);
CheckExpectedRanges("{ [0,4) [15,19) }");
+
+ // Seek to position 0.
+ Seek(0);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
+ // Should leave the first 5 buffers from 0 to 4.
+ CheckExpectedRanges("{ [0,4) }");
CheckExpectedBuffers(0, 4, &kDataA);
}
@@ -2423,10 +2494,16 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteFrontAndBack) {
// Append 40 buffers at positions 0 through 39.
NewSegmentAppend(0, 40, &kDataA);
+ // GC will try to keep data between current playback position and last append
+ // position. This will ensure that the last append position is 19 and will
+ // allow GC algorithm to collect data outside of the range [15,19)
+ NewSegmentAppend(15, 5, &kDataA);
+ CheckExpectedRanges("{ [0,39) }");
- // Should leave the GOP containing the seek position and the last GOP
- // appended.
- CheckExpectedRanges("{ [15,19) [35,39) }");
+ // Should leave the GOP containing the current playback position 15 and the
+ // last append position 19. GC returns false, since we are still above limit.
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(15, 0));
+ CheckExpectedRanges("{ [15,19) }");
CheckExpectedBuffers(15, 19, &kDataA);
CheckNoNextBuffer();
}
@@ -2441,43 +2518,49 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteSeveralRanges) {
// Append 5 buffers at positions 20 through 24.
NewSegmentAppend(20, 5);
- // Append 5 buffers at positions 30 through 34.
- NewSegmentAppend(30, 5);
+ // Append 5 buffers at positions 40 through 44.
+ NewSegmentAppend(40, 5);
- CheckExpectedRanges("{ [0,4) [10,14) [20,24) [30,34) }");
+ CheckExpectedRanges("{ [0,4) [10,14) [20,24) [40,44) }");
- // Seek to position 21.
+ // Seek to position 20.
Seek(20);
CheckExpectedBuffers(20, 20);
// Set memory limit to 1 buffer.
SetMemoryLimit(1);
- // Append 5 buffers at positions 40 through 44. This will trigger GC.
- NewSegmentAppend(40, 5);
+ // Append 5 buffers at positions 30 through 34.
+ NewSegmentAppend(30, 5);
+
+ // We will have more than 1 buffer left, GC will fail
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(20, 0));
- // Should delete everything except the GOP containing the current buffer and
- // the last GOP appended.
- CheckExpectedRanges("{ [20,24) [40,44) }");
+ // Should have deleted all buffer ranges before the current buffer and after
+ // last GOP
+ CheckExpectedRanges("{ [20,24) [30,34) }");
CheckExpectedBuffers(21, 24);
CheckNoNextBuffer();
// Continue appending into the last range to make sure it didn't break.
- AppendBuffers(45, 10);
- // Should only save last GOP appended.
- CheckExpectedRanges("{ [20,24) [50,54) }");
+ AppendBuffers(35, 10);
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(20, 0));
+ // Should save everything between read head and last appended
+ CheckExpectedRanges("{ [20,24) [30,44) }");
// Make sure appending before and after the ranges didn't somehow break.
SetMemoryLimit(100);
NewSegmentAppend(0, 10);
- CheckExpectedRanges("{ [0,9) [20,24) [50,54) }");
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(20, 0));
+ CheckExpectedRanges("{ [0,9) [20,24) [30,44) }");
Seek(0);
CheckExpectedBuffers(0, 9);
NewSegmentAppend(90, 10);
- CheckExpectedRanges("{ [0,9) [20,24) [50,54) [90,99) }");
- Seek(50);
- CheckExpectedBuffers(50, 54);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
+ CheckExpectedRanges("{ [0,9) [20,24) [30,44) [90,99) }");
+ Seek(30);
+ CheckExpectedBuffers(30, 44);
CheckNoNextBuffer();
Seek(90);
CheckExpectedBuffers(90, 99);
@@ -2494,6 +2577,8 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
// Append 2 GOPs starting at 490ms, 30ms apart.
NewSegmentAppend("490K 520 550 580K 610 640");
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
+
CheckExpectedRangesByTimestamp("{ [310,400) [490,670) }");
// Seek to the GOP at 580ms.
@@ -2503,6 +2588,9 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppend) {
// So the ranges before GC are "{ [100,280) [310,400) [490,670) }".
NewSegmentAppend("100K 130 160 190K 220 250K");
+ EXPECT_TRUE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(580), 0));
+
// Should save the newly appended GOPs.
CheckExpectedRangesByTimestamp("{ [100,280) [580,670) }");
}
@@ -2521,6 +2609,9 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_DeleteAfterLastAppendMerged) {
// range. So the range before GC is "{ [220,670) }".
NewSegmentAppend("220K 250 280 310K 340 370");
+ EXPECT_TRUE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(580), 0));
+
// Should save the newly appended GOPs.
CheckExpectedRangesByTimestamp("{ [220,400) [580,670) }");
}
@@ -2532,7 +2623,13 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_NoSeek) {
// Append 25 buffers at positions 0 through 24.
NewSegmentAppend(0, 25, &kDataA);
- // GC deletes the first 5 buffers to keep the memory limit within cap.
+ // If playback is still in the first GOP (starting at 0), GC should fail.
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(2, 0));
+ CheckExpectedRanges("{ [0,24) }");
+
+ // As soon as playback position moves past the first GOP, it should be removed
+ // and after removing the first GOP we are under memory limit.
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(5, 0));
CheckExpectedRanges("{ [5,24) }");
CheckNoNextBuffer();
Seek(5);
@@ -2549,7 +2646,6 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
// Seek to position 15.
Seek(15);
CheckNoNextBuffer();
-
CheckExpectedRanges("{ [0,9) [25,29) }");
// Set memory limit to 5 buffers.
@@ -2558,6 +2654,8 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
// Append 5 buffers as positions 30 to 34 to trigger GC.
AppendBuffers(30, 5, &kDataA);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(30, 0));
+
// The current algorithm will delete from the beginning until the memory is
// under cap.
CheckExpectedRanges("{ [30,34) }");
@@ -2566,6 +2664,7 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_PendingSeek) {
SetMemoryLimit(100);
// Append data to fulfill seek.
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(30, 5));
NewSegmentAppend(15, 5, &kDataA);
// Check to make sure all is well.
@@ -2584,25 +2683,32 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_NeedsMoreData) {
// Advance next buffer position to 10.
Seek(0);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(0, 0));
CheckExpectedBuffers(0, 9, &kDataA);
CheckNoNextBuffer();
// Append 20 buffers at positions 15 through 34.
NewSegmentAppend(15, 20, &kDataA);
+ CheckExpectedRanges("{ [0,9) [15,34) }");
- // GC should have saved the keyframe before the current seek position and the
- // data closest to the current seek position. It will also save the last GOP
- // appended.
- CheckExpectedRanges("{ [5,9) [15,19) [30,34) }");
+ // GC should save the keyframe before the next buffer position and the data
+ // closest to the next buffer position. It will also save all buffers from
+ // next buffer to the last GOP appended, which overflows limit and leads to
+ // failure.
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(5, 0));
+ CheckExpectedRanges("{ [5,9) [15,34) }");
// Now fulfill the seek at position 10. This will make GC delete the data
// before position 10 to keep it within cap.
NewSegmentAppend(10, 5, &kDataA);
- CheckExpectedRanges("{ [10,19) [30,34) }");
- CheckExpectedBuffers(10, 19, &kDataA);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(10, 0));
+ CheckExpectedRanges("{ [10,24) }");
+ CheckExpectedBuffers(10, 24, &kDataA);
}
TEST_F(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(99));
+
// Set memory limit to 3 buffers.
SetMemoryLimit(3);
@@ -2612,15 +2718,21 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
// Append 18 buffers at positions 0 through 17.
NewSegmentAppend(0, 18, &kDataA);
+ EXPECT_TRUE(GarbageCollectWithPlaybackAtBuffer(15, 0));
+
// Should leave GOP containing seek position.
CheckExpectedRanges("{ [15,17) }");
- // Seek ahead to position 16.
+ // Move next buffer position to 16.
CheckExpectedBuffers(15, 15, &kDataA);
// Completely overlap the existing buffers.
NewSegmentAppend(0, 20, &kDataB);
+ // Final GOP [15,19) contains 5 buffers, which is more than memory limit of
+ // 3 buffers set at the beginning of the test, so GC will fail.
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(15, 0));
+
// Because buffers 16 and 17 are not keyframes, they are moved to the track
// buffer upon overlap. The source buffer (i.e. not the track buffer) is now
// waiting for the next keyframe.
@@ -2631,6 +2743,9 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
// Now add a keyframe at position 20.
AppendBuffers(20, 5, &kDataB);
+ // 5 buffers in final GOP, GC will fail
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(20, 0));
+
// Should garbage collect such that there are 5 frames remaining, starting at
// the keyframe.
CheckExpectedRanges("{ [20,24) }");
@@ -2638,39 +2753,100 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_TrackBuffer) {
CheckNoNextBuffer();
}
+// Test GC preserves data starting at first GOP containing playback position.
+TEST_F(SourceBufferStreamTest, GarbageCollection_SaveDataAtPlaybackPosition) {
+ // Set memory limit to 30 buffers = 1 second of data.
+ SetMemoryLimit(30);
+ // And append 300 buffers = 10 seconds of data.
+ NewSegmentAppend(0, 300, &kDataA);
+ CheckExpectedRanges("{ [0,299) }");
+
+ // Playback position at 0, all data must be preserved.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(0), 0));
+ CheckExpectedRanges("{ [0,299) }");
+
+ // Playback position at 1 sec, the first second of data [0,29) should be
+ // collected, since we are way over memory limit.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(1000), 0));
+ CheckExpectedRanges("{ [30,299) }");
+
+ // Playback position at 1.1 sec, no new data can be collected, since the
+ // playback position is still in the first GOP of buffered data.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(1100), 0));
+ CheckExpectedRanges("{ [30,299) }");
+
+ // Playback position at 5.166 sec, just at the very end of GOP corresponding
+ // to buffer range 150-155, which should be preserved.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(5166), 0));
+ CheckExpectedRanges("{ [150,299) }");
+
+ // Playback position at 5.167 sec, just past the end of GOP corresponding to
+ // buffer range 150-155, it should be garbage collected now.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(5167), 0));
+ CheckExpectedRanges("{ [155,299) }");
+
+ // Playback at 9.0 sec, we can now successfully collect all data except the
+ // last second and we are back under memory limit of 30 buffers, so GCIfNeeded
+ // should return true.
+ EXPECT_TRUE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(9000), 0));
+ CheckExpectedRanges("{ [270,299) }");
+
+ // Playback at 9.999 sec, GC succeeds, since we are under memory limit even
+ // without removing any data.
+ EXPECT_TRUE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(9999), 0));
+ CheckExpectedRanges("{ [270,299) }");
+
+ // Playback at 15 sec, this should never happen during regular playback in
+ // browser, since this position has no data buffered, but it should still
+ // cause no problems to GC algorithm, so test it just in case.
+ EXPECT_TRUE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(15000), 0));
+ CheckExpectedRanges("{ [270,299) }");
+}
+
// Test saving the last GOP appended when this GOP is the only GOP in its range.
TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP) {
// Set memory limit to 3 and make sure the 4-byte GOP is not garbage
// collected.
SetMemoryLimit(3);
NewSegmentAppend("0K 30 60 90");
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(0, 0));
CheckExpectedRangesByTimestamp("{ [0,120) }");
// Make sure you can continue appending data to this GOP; again, GC should not
// wipe out anything.
AppendBuffers("120D30");
+ EXPECT_FALSE(GarbageCollectWithPlaybackAtBuffer(0, 0));
CheckExpectedRangesByTimestamp("{ [0,150) }");
- // Set memory limit to 100 and append a 2nd range after this without
- // triggering GC.
- SetMemoryLimit(100);
+ // Append a 2nd range after this without triggering GC.
NewSegmentAppend("200K 230 260 290K 320 350");
CheckExpectedRangesByTimestamp("{ [0,150) [200,380) }");
// Seek to 290ms.
SeekToTimestampMs(290);
- // Now set memory limit to 3 and append a GOP in a separate range after the
- // selected range. Because it is after 290ms, this tests that the GOP is saved
- // when deleting from the back.
- SetMemoryLimit(3);
+ // Now append a GOP in a separate range after the selected range and trigger
+ // GC. Because it is after 290ms, this tests that the GOP is saved when
+ // deleting from the back.
NewSegmentAppend("500K 530 560 590");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(290), 0));
- // Should save GOP with 290ms and last GOP appended.
+ // Should save GOPs between 290ms and the last GOP appended.
CheckExpectedRangesByTimestamp("{ [290,380) [500,620) }");
// Continue appending to this GOP after GC.
AppendBuffers("620D30");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(290), 0));
CheckExpectedRangesByTimestamp("{ [290,380) [500,650) }");
}
@@ -2686,34 +2862,39 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Middle) {
SetMemoryLimit(1);
NewSegmentAppend("80K 110 140");
- // This whole GOP should be saved, and should be able to continue appending
- // data to it.
+ // This whole GOP should be saved after GC, which will fail due to GOP being
+ // larger than 1 buffer
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(80), 0));
CheckExpectedRangesByTimestamp("{ [80,170) }");
+ // We should still be able to continue appending data to GOP
AppendBuffers("170D30");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(80), 0));
CheckExpectedRangesByTimestamp("{ [80,200) }");
- // Set memory limit to 100 and append a 2nd range after this without
- // triggering GC.
- SetMemoryLimit(100);
+ // Append a 2nd range after this range, without triggering GC.
NewSegmentAppend("400K 430 460 490K 520 550 580K 610 640");
CheckExpectedRangesByTimestamp("{ [80,200) [400,670) }");
// Seek to 80ms to make the first range the selected range.
SeekToTimestampMs(80);
- // Now set memory limit to 3 and append a GOP in the middle of the second
- // range. Because it is after the selected range, this tests that the GOP is
- // saved when deleting from the back.
- SetMemoryLimit(3);
+ // Now append a GOP in the middle of the second range and trigger GC. Because
+ // it is after the selected range, this tests that the GOP is saved when
+ // deleting from the back.
NewSegmentAppend("500K 530 560 590");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(80), 0));
- // Should save the GOP containing the seek point and GOP that was last
- // appended.
- CheckExpectedRangesByTimestamp("{ [80,200) [500,620) }");
+ // Should save the GOPs between the seek point and GOP that was last appended
+ CheckExpectedRangesByTimestamp("{ [80,200) [400,620) }");
// Continue appending to this GOP after GC.
AppendBuffers("620D30");
- CheckExpectedRangesByTimestamp("{ [80,200) [500,650) }");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(80), 0));
+ CheckExpectedRangesByTimestamp("{ [80,200) [400,650) }");
}
// Test saving the last GOP appended when the GOP containing the next buffer is
@@ -2730,7 +2911,10 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
SetMemoryLimit(1);
NewSegmentAppend("0K 30 60");
- // Should save the GOP at 0ms and 90ms.
+ // GC should save the GOP at 0ms and 90ms, and will fail since GOP larger
+ // than 1 buffer
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(90), 0));
CheckExpectedRangesByTimestamp("{ [0,180) }");
// Seek to 0 and check all buffers.
@@ -2743,6 +2927,8 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected1) {
NewSegmentAppend("180K 210 240");
// Should save the GOP at 90ms and the GOP at 180ms.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(90), 0));
CheckExpectedRangesByTimestamp("{ [90,270) }");
CheckExpectedBuffers("90K 120 150 180K 210 240");
CheckNoNextBuffer();
@@ -2763,22 +2949,24 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected2) {
SetMemoryLimit(1);
NewSegmentAppend("90K 120 150");
- // Should save the GOP at 90ms and the GOP at 270ms.
- CheckExpectedRangesByTimestamp("{ [90,180) [270,360) }");
+ // GC will save data in the range where the most recent append has happened
+ // [0; 180) and the range where the next read position is [270;360)
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(270), 0));
+ CheckExpectedRangesByTimestamp("{ [0,180) [270,360) }");
- // Set memory limit to 100 and add 3 GOPs to the end of the selected range
- // at 360ms, 450ms, and 540ms.
- SetMemoryLimit(100);
+ // Add 3 GOPs to the end of the selected range at 360ms, 450ms, and 540ms.
NewSegmentAppend("360K 390 420 450K 480 510 540K 570 600");
- CheckExpectedRangesByTimestamp("{ [90,180) [270,630) }");
+ CheckExpectedRangesByTimestamp("{ [0,180) [270,630) }");
- // Constrain the memory limit again and overlap the GOP at 450ms to test
- // deleting from the back.
- SetMemoryLimit(1);
+ // Overlap the GOP at 450ms and garbage collect to test deleting from the
+ // back.
NewSegmentAppend("450K 480 510");
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(270), 0));
- // Should save GOP at 270ms and the GOP at 450ms.
- CheckExpectedRangesByTimestamp("{ [270,360) [450,540) }");
+ // Should save GOPs from GOP at 270ms to GOP at 450ms.
+ CheckExpectedRangesByTimestamp("{ [270,540) }");
}
// Test saving the last GOP appended when it is the same as the GOP containing
@@ -2796,8 +2984,10 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
SetMemoryLimit(1);
NewSegmentAppend("0K 30");
- // Should save the newly appended GOP, which is also the next GOP that will be
- // returned from the seek request.
+ // GC should save the newly appended GOP, which is also the next GOP that
+ // will be returned from the seek request.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(0), 0));
CheckExpectedRangesByTimestamp("{ [0,60) }");
// Check the buffers in the range.
@@ -2807,8 +2997,10 @@ TEST_F(SourceBufferStreamTest, GarbageCollection_SaveAppendGOP_Selected3) {
// Continue appending to this buffer.
AppendBuffers("60 90");
- // Should still save the rest of this GOP and should be able to fulfill the
- // read.
+ // GC should still save the rest of this GOP and should be able to fulfill
+ // the read.
+ EXPECT_FALSE(stream_->GarbageCollectIfNeeded(
+ DecodeTimestamp::FromMilliseconds(0), 0));
CheckExpectedRangesByTimestamp("{ [0,120) }");
CheckExpectedBuffers("60 90");
CheckNoNextBuffer();
@@ -3352,12 +3544,16 @@ TEST_F(SourceBufferStreamTest, SameTimestamp_Video_TwoAppends) {
// Verify that a non-keyframe followed by a keyframe with the same timestamp
// is not allowed.
TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Invalid_1) {
+ EXPECT_MEDIA_LOG(ContainsSameTimestampAt30MillisecondsLog());
+
Seek(0);
NewSegmentAppend("0K 30");
AppendBuffers_ExpectFailure("30K 60");
}
TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Invalid_2) {
+ EXPECT_MEDIA_LOG(ContainsSameTimestampAt30MillisecondsLog());
+
Seek(0);
NewSegmentAppend_ExpectFailure("0K 30 30K 60");
}
@@ -3408,16 +3604,18 @@ TEST_F(SourceBufferStreamTest, SameTimestamp_Video_Overlap_3) {
TEST_F(SourceBufferStreamTest, SameTimestamp_Audio) {
AudioDecoderConfig config(kCodecMP3, kSampleFormatF32, CHANNEL_LAYOUT_STEREO,
44100, NULL, 0, false);
- stream_.reset(new SourceBufferStream(config, log_cb(), true));
+ stream_.reset(new SourceBufferStream(config, media_log_, true));
Seek(0);
NewSegmentAppend("0K 0K 30K 30 60 60");
CheckExpectedBuffers("0K 0K 30K 30 60 60");
}
TEST_F(SourceBufferStreamTest, SameTimestamp_Audio_Invalid_1) {
+ EXPECT_MEDIA_LOG(ContainsSameTimestampAt30MillisecondsLog());
+
AudioDecoderConfig config(kCodecMP3, kSampleFormatF32, CHANNEL_LAYOUT_STEREO,
44100, NULL, 0, false);
- stream_.reset(new SourceBufferStream(config, log_cb(), true));
+ stream_.reset(new SourceBufferStream(config, media_log_, true));
Seek(0);
NewSegmentAppend_ExpectFailure("0K 30 30K 60");
}
@@ -3894,6 +4092,8 @@ TEST_F(SourceBufferStreamTest,
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Basic) {
+ EXPECT_MEDIA_LOG(ContainsGeneratedSpliceLog(3000, 11000));
+
SetAudioStream();
Seek(0);
NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
@@ -3903,6 +4103,10 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Basic) {
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoExactSplices) {
+ EXPECT_MEDIA_LOG(
+ HasSubstr("Skipping splice frame generation: first new buffer at 10000us "
+ "begins at or before existing buffer at 10000us."));
+
SetAudioStream();
Seek(0);
NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
@@ -3913,6 +4117,12 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoExactSplices) {
// Do not allow splices on top of splices.
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoDoubleSplice) {
+ InSequence s;
+ EXPECT_MEDIA_LOG(ContainsGeneratedSpliceLog(3000, 11000));
+ EXPECT_MEDIA_LOG(
+ HasSubstr("Skipping splice frame generation: overlapped buffers at "
+ "10000us are in a previously buffered splice."));
+
SetAudioStream();
Seek(0);
NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
@@ -3944,6 +4154,8 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoSplice) {
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_CorrectMediaSegmentStartTime) {
+ EXPECT_MEDIA_LOG(ContainsGeneratedSpliceLog(5000, 1000));
+
SetAudioStream();
Seek(0);
NewSegmentAppend("0K 2K 4K");
@@ -3957,6 +4169,8 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_CorrectMediaSegmentStartTime) {
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_ConfigChange) {
+ EXPECT_MEDIA_LOG(ContainsGeneratedSpliceLog(3000, 5000));
+
SetAudioStream();
AudioDecoderConfig new_config(kCodecVorbis,
@@ -3980,6 +4194,10 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_ConfigChange) {
// Ensure splices are not created if there are not enough frames to crossfade.
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoTinySplices) {
+ EXPECT_MEDIA_LOG(HasSubstr(
+ "Skipping splice frame generation: not enough samples for splicing new "
+ "buffer at 1000us. Have 1000us, but need 2000us."));
+
SetAudioStream();
Seek(0);
@@ -3996,11 +4214,15 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoTinySplices) {
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
+ EXPECT_MEDIA_LOG(
+ HasSubstr("Skipping splice frame generation: not enough samples for "
+ "splicing new buffer at 1250us. Have 750us, but need 1000us."));
+
video_config_ = TestVideoConfig::Invalid();
audio_config_.Initialize(kCodecVorbis, kSampleFormatPlanarF32,
- CHANNEL_LAYOUT_STEREO, 4000, NULL, 0, false, false,
+ CHANNEL_LAYOUT_STEREO, 4000, NULL, 0, false,
base::TimeDelta(), 0);
- stream_.reset(new SourceBufferStream(audio_config_, log_cb(), true));
+ stream_.reset(new SourceBufferStream(audio_config_, media_log_, true));
// Equivalent to 0.5ms per frame.
SetStreamInfo(2000, 2000);
Seek(0);
@@ -4023,6 +4245,8 @@ TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_NoMillisecondSplices) {
}
TEST_F(SourceBufferStreamTest, Audio_SpliceFrame_Preroll) {
+ EXPECT_MEDIA_LOG(ContainsGeneratedSpliceLog(3000, 11000));
+
SetAudioStream();
Seek(0);
NewSegmentAppend("0K 2K 4K 6K 8K 10K 12K");
@@ -4285,6 +4509,64 @@ TEST_F(SourceBufferStreamTest, ConfigChange_ReSeek) {
CheckVideoConfig(new_config);
}
+TEST_F(SourceBufferStreamTest, TrackBuffer_ExhaustionWithSkipForward) {
+ NewSegmentAppend("0K 10 20 30 40");
+
+ // Read the first 4 buffers, so next buffer is at time 40.
+ Seek(0);
+ CheckExpectedRangesByTimestamp("{ [0,50) }");
+ CheckExpectedBuffers("0K 10 20 30");
+
+ // Overlap-append, populating track buffer with timestamp 40 from original
+ // append. Confirm there could be a large jump in time until the next key
+ // frame after exhausting the track buffer.
+ NewSegmentAppend(
+ "31K 41 51 61 71 81 91 101 111 121 "
+ "131K 141");
+ CheckExpectedRangesByTimestamp("{ [0,151) }");
+
+ // Confirm the large jump occurs and warning log is generated.
+ // If this test is changed, update
+ // TrackBufferExhaustion_ImmediateNewTrackBuffer accordingly.
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(91));
+
+ CheckExpectedBuffers("40 131K 141");
+ CheckNoNextBuffer();
+}
+
+TEST_F(SourceBufferStreamTest,
+ TrackBuffer_ExhaustionAndImmediateNewTrackBuffer) {
+ NewSegmentAppend("0K 10 20 30 40");
+
+ // Read the first 4 buffers, so next buffer is at time 40.
+ Seek(0);
+ CheckExpectedRangesByTimestamp("{ [0,50) }");
+ CheckExpectedBuffers("0K 10 20 30");
+
+ // Overlap-append
+ NewSegmentAppend(
+ "31K 41 51 61 71 81 91 101 111 121 "
+ "131K 141");
+ CheckExpectedRangesByTimestamp("{ [0,151) }");
+
+ // Exhaust the track buffer, but don't read any of the overlapping append yet.
+ CheckExpectedBuffers("40");
+
+ // Selected range's next buffer is now the 131K buffer from the overlapping
+ // append. (See TrackBuffer_ExhaustionWithSkipForward for that verification.)
+ // Do another overlap-append to immediately create another track buffer and
+ // verify both track buffer exhaustions skip forward and emit log warnings.
+ NewSegmentAppend("22K 32 42 52 62 72 82 92 102 112 122K 132 142 152K 162");
+ CheckExpectedRangesByTimestamp("{ [0,172) }");
+
+ InSequence s;
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(91));
+ EXPECT_MEDIA_LOG(ContainsTrackBufferExhaustionSkipLog(11));
+
+ CheckExpectedBuffers("131K 141 152K 162");
+ CheckNoNextBuffer();
+}
+
// TODO(vrk): Add unit tests where keyframes are unaligned between streams.
// (crbug.com/133557)
diff --git a/chromium/media/filters/stream_parser_factory.cc b/chromium/media/filters/stream_parser_factory.cc
index 9ea2b0e3a2d..fc3257b139d 100644
--- a/chromium/media/filters/stream_parser_factory.cc
+++ b/chromium/media/filters/stream_parser_factory.cc
@@ -30,7 +30,8 @@
namespace media {
typedef bool (*CodecIDValidatorFunction)(
- const std::string& codecs_id, const LogCB& log_cb);
+ const std::string& codecs_id,
+ const scoped_refptr<MediaLog>& media_log);
struct CodecInfo {
enum Type {
@@ -51,7 +52,8 @@ struct CodecInfo {
HISTOGRAM_EAC3,
HISTOGRAM_MP3,
HISTOGRAM_OPUS,
- HISTOGRAM_MAX = HISTOGRAM_OPUS // Must be equal to largest logged entry.
+ HISTOGRAM_HEVC,
+ HISTOGRAM_MAX = HISTOGRAM_HEVC // Must be equal to largest logged entry.
};
const char* pattern;
@@ -62,7 +64,7 @@ struct CodecInfo {
typedef StreamParser* (*ParserFactoryFunction)(
const std::vector<std::string>& codecs,
- const LogCB& log_cb);
+ const scoped_refptr<MediaLog>& media_log);
struct SupportedTypeInfo {
const char* type;
@@ -93,9 +95,8 @@ static const CodecInfo* kAudioWebMCodecs[] = {
NULL
};
-static StreamParser* BuildWebMParser(
- const std::vector<std::string>& codecs,
- const LogCB& log_cb) {
+static StreamParser* BuildWebMParser(const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log) {
return new WebMStreamParser();
}
@@ -106,7 +107,7 @@ static const int kAACSBRObjectType = 5;
static const int kAACPSObjectType = 29;
static int GetMP4AudioObjectType(const std::string& codec_id,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
// From RFC 6381 section 3.3 (ISO Base Media File Format Name Space):
// When the first element of a ['codecs' parameter value] is 'mp4a' ...,
// the second element is a hexadecimal representation of the MP4 Registration
@@ -124,21 +125,23 @@ static int GetMP4AudioObjectType(const std::string& codec_id,
return audio_object_type;
}
- MEDIA_LOG(DEBUG, log_cb) << "Malformed mimetype codec '" << codec_id << "'";
+ MEDIA_LOG(DEBUG, media_log) << "Malformed mimetype codec '" << codec_id
+ << "'";
return -1;
}
-bool ValidateMP4ACodecID(const std::string& codec_id, const LogCB& log_cb) {
- int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
+bool ValidateMP4ACodecID(const std::string& codec_id,
+ const scoped_refptr<MediaLog>& media_log) {
+ int audio_object_type = GetMP4AudioObjectType(codec_id, media_log);
if (audio_object_type == kAACLCObjectType ||
audio_object_type == kAACSBRObjectType ||
audio_object_type == kAACPSObjectType) {
return true;
}
- MEDIA_LOG(DEBUG, log_cb) << "Unsupported audio object type "
- << audio_object_type << " in codec '" << codec_id
- << "'";
+ MEDIA_LOG(DEBUG, media_log) << "Unsupported audio object type "
+ << audio_object_type << " in codec '" << codec_id
+ << "'";
return false;
}
@@ -146,6 +149,12 @@ static const CodecInfo kH264AVC1CodecInfo = { "avc1.*", CodecInfo::VIDEO, NULL,
CodecInfo::HISTOGRAM_H264 };
static const CodecInfo kH264AVC3CodecInfo = { "avc3.*", CodecInfo::VIDEO, NULL,
CodecInfo::HISTOGRAM_H264 };
+#if defined(ENABLE_HEVC_DEMUXING)
+static const CodecInfo kHEVCHEV1CodecInfo = { "hev1.*", CodecInfo::VIDEO, NULL,
+ CodecInfo::HISTOGRAM_HEVC };
+static const CodecInfo kHEVCHVC1CodecInfo = { "hvc1.*", CodecInfo::VIDEO, NULL,
+ CodecInfo::HISTOGRAM_HEVC };
+#endif
static const CodecInfo kMPEG4AACCodecInfo = { "mp4a.40.*", CodecInfo::AUDIO,
&ValidateMP4ACodecID,
CodecInfo::HISTOGRAM_MPEG4AAC };
@@ -156,6 +165,10 @@ static const CodecInfo kMPEG2AACLCCodecInfo = { "mp4a.67", CodecInfo::AUDIO,
static const CodecInfo* kVideoMP4Codecs[] = {
&kH264AVC1CodecInfo,
&kH264AVC3CodecInfo,
+#if defined(ENABLE_HEVC_DEMUXING)
+ &kHEVCHEV1CodecInfo,
+ &kHEVCHVC1CodecInfo,
+#endif
&kMPEG4AACCodecInfo,
&kMPEG2AACLCCodecInfo,
NULL
@@ -167,8 +180,8 @@ static const CodecInfo* kAudioMP4Codecs[] = {
NULL
};
-static StreamParser* BuildMP4Parser(
- const std::vector<std::string>& codecs, const LogCB& log_cb) {
+static StreamParser* BuildMP4Parser(const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log) {
std::set<int> audio_object_types;
bool has_sbr = false;
@@ -177,7 +190,7 @@ static StreamParser* BuildMP4Parser(
if (base::MatchPattern(codec_id, kMPEG2AACLCCodecInfo.pattern)) {
audio_object_types.insert(mp4::kISO_13818_7_AAC_LC);
} else if (base::MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern)) {
- int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
+ int audio_object_type = GetMP4AudioObjectType(codec_id, media_log);
DCHECK_GT(audio_object_type, 0);
audio_object_types.insert(mp4::kISO_14496_3);
@@ -201,8 +214,8 @@ static const CodecInfo* kAudioMP3Codecs[] = {
NULL
};
-static StreamParser* BuildMP3Parser(
- const std::vector<std::string>& codecs, const LogCB& log_cb) {
+static StreamParser* BuildMP3Parser(const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log) {
return new MPEG1AudioStreamParser();
}
@@ -213,8 +226,8 @@ static const CodecInfo* kAudioADTSCodecs[] = {
NULL
};
-static StreamParser* BuildADTSParser(
- const std::vector<std::string>& codecs, const LogCB& log_cb) {
+static StreamParser* BuildADTSParser(const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log) {
return new ADTSStreamParser();
}
@@ -227,13 +240,13 @@ static const CodecInfo* kVideoMP2TCodecs[] = {
NULL
};
-static StreamParser* BuildMP2TParser(
- const std::vector<std::string>& codecs, const media::LogCB& log_cb) {
+static StreamParser* BuildMP2TParser(const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log) {
bool has_sbr = false;
for (size_t i = 0; i < codecs.size(); ++i) {
std::string codec_id = codecs[i];
if (base::MatchPattern(codec_id, kMPEG4AACCodecInfo.pattern)) {
- int audio_object_type = GetMP4AudioObjectType(codec_id, log_cb);
+ int audio_object_type = GetMP4AudioObjectType(codec_id, media_log);
if (audio_object_type == kAACSBRObjectType ||
audio_object_type == kAACPSObjectType) {
has_sbr = true;
@@ -320,11 +333,10 @@ static bool VerifyCodec(
static bool CheckTypeAndCodecs(
const std::string& type,
const std::vector<std::string>& codecs,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
ParserFactoryFunction* factory_function,
std::vector<CodecInfo::HistogramTag>* audio_codecs,
std::vector<CodecInfo::HistogramTag>* video_codecs) {
-
// Search for the SupportedTypeInfo for |type|.
for (size_t i = 0; i < arraysize(kSupportedTypeInfo); ++i) {
const SupportedTypeInfo& type_info = kSupportedTypeInfo[i];
@@ -339,8 +351,8 @@ static bool CheckTypeAndCodecs(
return true;
}
- MEDIA_LOG(DEBUG, log_cb) << "A codecs parameter must be provided for '"
- << type << "'";
+ MEDIA_LOG(DEBUG, media_log)
+ << "A codecs parameter must be provided for '" << type << "'";
return false;
}
@@ -353,7 +365,7 @@ static bool CheckTypeAndCodecs(
for (int k = 0; type_info.codecs[k]; ++k) {
if (base::MatchPattern(codec_id, type_info.codecs[k]->pattern) &&
(!type_info.codecs[k]->validator ||
- type_info.codecs[k]->validator(codec_id, log_cb))) {
+ type_info.codecs[k]->validator(codec_id, media_log))) {
found_codec =
VerifyCodec(type_info.codecs[k], audio_codecs, video_codecs);
break; // Since only 1 pattern will match, no need to check others.
@@ -361,8 +373,9 @@ static bool CheckTypeAndCodecs(
}
if (!found_codec) {
- MEDIA_LOG(DEBUG, log_cb) << "Codec '" << codec_id
- << "' is not supported for '" << type << "'";
+ MEDIA_LOG(DEBUG, media_log) << "Codec '" << codec_id
+ << "' is not supported for '" << type
+ << "'";
return false;
}
}
@@ -381,13 +394,13 @@ static bool CheckTypeAndCodecs(
bool StreamParserFactory::IsTypeSupported(
const std::string& type, const std::vector<std::string>& codecs) {
- return CheckTypeAndCodecs(type, codecs, LogCB(), NULL, NULL, NULL);
+ return CheckTypeAndCodecs(type, codecs, new MediaLog(), NULL, NULL, NULL);
}
scoped_ptr<StreamParser> StreamParserFactory::Create(
const std::string& type,
const std::vector<std::string>& codecs,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool* has_audio,
bool* has_video) {
scoped_ptr<StreamParser> stream_parser;
@@ -397,12 +410,8 @@ scoped_ptr<StreamParser> StreamParserFactory::Create(
*has_audio = false;
*has_video = false;
- if (CheckTypeAndCodecs(type,
- codecs,
- log_cb,
- &factory_function,
- &audio_codecs,
- &video_codecs)) {
+ if (CheckTypeAndCodecs(type, codecs, media_log, &factory_function,
+ &audio_codecs, &video_codecs)) {
*has_audio = !audio_codecs.empty();
*has_video = !video_codecs.empty();
@@ -419,7 +428,7 @@ scoped_ptr<StreamParser> StreamParserFactory::Create(
CodecInfo::HISTOGRAM_MAX + 1);
}
- stream_parser.reset(factory_function(codecs, log_cb));
+ stream_parser.reset(factory_function(codecs, media_log));
}
return stream_parser.Pass();
diff --git a/chromium/media/filters/stream_parser_factory.h b/chromium/media/filters/stream_parser_factory.h
index 1f9ad347d12..3bbd58ab9be 100644
--- a/chromium/media/filters/stream_parser_factory.h
+++ b/chromium/media/filters/stream_parser_factory.h
@@ -24,8 +24,8 @@ class MEDIA_EXPORT StreamParserFactory {
const std::string& type, const std::vector<std::string>& codecs);
// Creates a new StreamParser object if the specified |type| and |codecs| list
- // are supported. |log_cb| can be used to report errors if there is something
- // wrong with |type| or the codec IDs in |codecs|.
+ // are supported. |media_log| can be used to report errors if there is
+ // something wrong with |type| or the codec IDs in |codecs|.
// Returns a new StreamParser object if |type| and all codecs listed in
// |codecs| are supported.
// |has_audio| is true if an audio codec was specified.
@@ -33,8 +33,11 @@ class MEDIA_EXPORT StreamParserFactory {
// Returns NULL otherwise. The values of |has_audio| and |has_video| are
// undefined.
static scoped_ptr<StreamParser> Create(
- const std::string& type, const std::vector<std::string>& codecs,
- const LogCB& log_cb, bool* has_audio, bool* has_video);
+ const std::string& type,
+ const std::vector<std::string>& codecs,
+ const scoped_refptr<MediaLog>& media_log,
+ bool* has_audio,
+ bool* has_video);
};
} // namespace media
diff --git a/chromium/media/filters/video_cadence_estimator.cc b/chromium/media/filters/video_cadence_estimator.cc
index bad4fd056b7..fbbe836f33e 100644
--- a/chromium/media/filters/video_cadence_estimator.cc
+++ b/chromium/media/filters/video_cadence_estimator.cc
@@ -5,6 +5,7 @@
#include "media/filters/video_cadence_estimator.h"
#include <algorithm>
+#include <cmath>
#include <iterator>
#include <limits>
#include <string>
@@ -25,6 +26,42 @@ static void HistogramCadenceChangeCount(int cadence_changes) {
kCadenceChangeMax);
}
+// Construct a Cadence vector, a vector of integers satisfying the following
+// conditions:
+// 1. Size is |n|.
+// 2. Sum of entries is |k|.
+// 3. Each entry is in {|k|/|n|, |k|/|n| + 1}.
+// 4. Distribution of |k|/|n| and |k|/|n| + 1 is as even as possible.
+VideoCadenceEstimator::Cadence ConstructCadence(int k, int n) {
+ const int quotient = k / n;
+ std::vector<int> output(n, 0);
+
+ // Fill the vector entries with |quotient| or |quotient + 1|, and make sure
+ // the two values are distributed as evenly as possible.
+ int target_accumulate = 0;
+ int actual_accumulate = 0;
+ for (int i = 0; i < n; ++i) {
+ // After each loop
+ // target_accumulate = (i + 1) * k
+ // actual_accumulate = \sum_{j = 0}^i {n * V[j]} where V is output vector
+ // We want to make actual_accumulate as close to target_accumulate as
+ // possible.
+ // One exception is that in case k < n, we always want the vector to start
+ // with 1 to make sure the first frame is always rendered.
+ // (To avoid float calculation, we use scaled version of accumulated count)
+ target_accumulate += k;
+ const int target_current = target_accumulate - actual_accumulate;
+ if ((i == 0 && k < n) || target_current * 2 >= n * (quotient * 2 + 1)) {
+ output[i] = quotient + 1;
+ } else {
+ output[i] = quotient;
+ }
+ actual_accumulate += output[i] * n;
+ }
+
+ return output;
+}
+
VideoCadenceEstimator::VideoCadenceEstimator(
base::TimeDelta minimum_time_until_max_drift)
: cadence_hysteresis_threshold_(
@@ -116,109 +153,58 @@ VideoCadenceEstimator::Cadence VideoCadenceEstimator::CalculateCadence(
base::TimeDelta frame_duration,
base::TimeDelta max_acceptable_drift,
base::TimeDelta* time_until_max_drift) const {
- // See if we can find a cadence which fits the data.
- Cadence result;
- if (CalculateOneFrameCadence(render_interval, frame_duration,
- max_acceptable_drift, &result,
- time_until_max_drift)) {
- DCHECK_EQ(1u, result.size());
- } else if (CalculateFractionalCadence(render_interval, frame_duration,
- max_acceptable_drift, &result,
- time_until_max_drift)) {
- DCHECK(!result.empty());
- } else if (CalculateOneFrameCadence(render_interval, frame_duration * 2,
- max_acceptable_drift, &result,
- time_until_max_drift)) {
- // By finding cadence for double the frame duration, we're saying there
- // exist two integers a and b, where a > b and a + b = |result|; this
- // matches all patterns which regularly have half a frame per render
- // interval; i.e. 24fps in 60hz.
- DCHECK_EQ(1u, result.size());
-
- // While we may find a two pattern cadence, sometimes one extra frame
- // duration is enough to allow a match for 1-frame cadence if the
- // |time_until_max_drift| was on the edge.
- //
- // All 2-frame cadence values should be odd, so we can detect this and fall
- // back to 1-frame cadence when this occurs.
- if (result[0] & 1) {
- result[0] = std::ceil(result[0] / 2.0);
- result.push_back(result[0] - 1);
- } else {
- result[0] /= 2;
- }
- }
- return result;
-}
+ DCHECK_LT(max_acceptable_drift, minimum_time_until_max_drift_);
-bool VideoCadenceEstimator::CalculateOneFrameCadence(
- base::TimeDelta render_interval,
- base::TimeDelta frame_duration,
- base::TimeDelta max_acceptable_drift,
- Cadence* cadence,
- base::TimeDelta* time_until_max_drift) const {
- DCHECK(cadence->empty());
-
- // The perfect cadence is the number of render intervals per frame, while the
- // clamped cadence is the nearest matching integer value.
- //
- // As mentioned in the introduction, |perfect_cadence| is the ratio of the
- // frame duration to render interval length; while |clamped_cadence| is the
- // nearest integer value to |perfect_cadence|.
+ // The perfect cadence is the number of render intervals per frame.
const double perfect_cadence =
frame_duration.InSecondsF() / render_interval.InSecondsF();
- const int clamped_cadence = perfect_cadence + 0.5;
- if (!clamped_cadence)
- return false;
- // For cadence based rendering the actual frame duration is just the frame
- // duration, while the |rendered_frame_duration| is how long the frame would
- // be displayed for if we rendered it |clamped_cadence| times.
- const base::TimeDelta rendered_frame_duration =
- clamped_cadence * render_interval;
- if (!IsAcceptableCadence(rendered_frame_duration, frame_duration,
- max_acceptable_drift, time_until_max_drift)) {
- return false;
+ // We want to construct a cadence pattern to approximate the perfect cadence
+ // while ensuring error doesn't accumulate too quickly.
+ const double drift_ratio = max_acceptable_drift.InSecondsF() /
+ minimum_time_until_max_drift_.InSecondsF();
+ const double minimum_acceptable_cadence =
+ perfect_cadence / (1.0 + drift_ratio);
+ const double maximum_acceptable_cadence =
+ perfect_cadence / (1.0 - drift_ratio);
+
+ // We've arbitrarily chosen the maximum allowable cadence length as 5. It's
+ // proven sufficient to support most standard frame and render rates, while
+ // being small enough that small frame and render timing errors don't render
+ // it useless.
+ const int kMaxCadenceSize = 5;
+
+ double best_error = 0;
+ int best_n = 0;
+ int best_k = 0;
+ for (int n = 1; n <= kMaxCadenceSize; ++n) {
+ // A cadence pattern only exists if there exists an integer K such that K/N
+ // is between |minimum_acceptable_cadence| and |maximum_acceptable_cadence|.
+ // The best pattern is the one with the smallest error over time relative to
+ // the |perfect_cadence|.
+ if (std::floor(minimum_acceptable_cadence * n) <
+ std::floor(maximum_acceptable_cadence * n)) {
+ const int k = round(perfect_cadence * n);
+
+ const double error = std::fabs(1.0 - perfect_cadence * n / k);
+
+ // Prefer the shorter cadence pattern unless a longer one "significantly"
+ // reduces the error.
+ if (!best_n || error < best_error * 0.99) {
+ best_error = error;
+ best_k = k;
+ best_n = n;
+ }
+ }
}
- cadence->push_back(clamped_cadence);
- return true;
-}
-
-bool VideoCadenceEstimator::CalculateFractionalCadence(
- base::TimeDelta render_interval,
- base::TimeDelta frame_duration,
- base::TimeDelta max_acceptable_drift,
- Cadence* cadence,
- base::TimeDelta* time_until_max_drift) const {
- DCHECK(cadence->empty());
-
- // Fractional cadence allows us to see if we have a cadence which would look
- // best if we consistently drop the same frames.
- //
- // In this case, the perfect cadence is the number of frames per render
- // interval, while the clamped cadence is the nearest integer value.
- const double perfect_cadence =
- render_interval.InSecondsF() / frame_duration.InSecondsF();
- const int clamped_cadence = perfect_cadence + 0.5;
- if (!clamped_cadence)
- return false;
+ if (!best_n) return Cadence();
- // For fractional cadence, the rendered duration of each frame is just the
- // render interval. While the actual frame duration is the total duration of
- // all the frames we would end up dropping during that time.
- const base::TimeDelta actual_frame_duration =
- clamped_cadence * frame_duration;
- if (!IsAcceptableCadence(render_interval, actual_frame_duration,
- max_acceptable_drift, time_until_max_drift)) {
- return false;
- }
+ // If we've found a solution.
+ Cadence best_result = ConstructCadence(best_k, best_n);
+ *time_until_max_drift = max_acceptable_drift / best_error;
- // Fractional cadence means we render the first of |clamped_cadence| frames
- // and drop |clamped_cadence| - 1 frames.
- cadence->insert(cadence->begin(), clamped_cadence, 0);
- (*cadence)[0] = 1;
- return true;
+ return best_result;
}
std::string VideoCadenceEstimator::CadenceToString(
@@ -234,28 +220,4 @@ std::string VideoCadenceEstimator::CadenceToString(
return os.str();
}
-bool VideoCadenceEstimator::IsAcceptableCadence(
- base::TimeDelta rendered_frame_duration,
- base::TimeDelta actual_frame_duration,
- base::TimeDelta max_acceptable_drift,
- base::TimeDelta* time_until_max_drift) const {
- if (rendered_frame_duration == actual_frame_duration)
- return true;
-
- // Compute how long it'll take to exhaust the drift if frames are rendered for
- // |rendered_frame_duration| instead of |actual_frame_duration|.
- const double duration_delta =
- (rendered_frame_duration - actual_frame_duration)
- .magnitude()
- .InMicroseconds();
- const int64 frames_until_drift_exhausted =
- std::ceil(max_acceptable_drift.InMicroseconds() / duration_delta);
-
- // If the time until a frame would be repeated or dropped is greater than our
- // limit of acceptability, the cadence is acceptable.
- *time_until_max_drift =
- rendered_frame_duration * frames_until_drift_exhausted;
- return *time_until_max_drift >= minimum_time_until_max_drift_;
-}
-
} // namespace media
diff --git a/chromium/media/filters/video_cadence_estimator.h b/chromium/media/filters/video_cadence_estimator.h
index 89b2436c0df..b1a79e6a002 100644
--- a/chromium/media/filters/video_cadence_estimator.h
+++ b/chromium/media/filters/video_cadence_estimator.h
@@ -17,8 +17,8 @@ namespace media {
// durations over time.
//
// Cadence is the ideal repeating frame pattern for a group of frames; currently
-// VideoCadenceEstimator supports 1-frame ([N]), 2-frame ([3:2]), and N-frame
-// fractional ([1:0:...:0]) cadences. Details on what this means are below.
+// VideoCadenceEstimator supports N-frame ([a1:a2:..:aN]) cadences where N <= 5.
+// Details on what this means are below.
//
// The perfect cadence of a set of frames is the ratio of the frame duration to
// render interval length. I.e. for 30fps in 60Hz the cadence would be (1/30) /
@@ -38,37 +38,33 @@ namespace media {
// shortening or lengthening the actual rendered frame duration. Doing so
// ensures each frame gets an optimal amount of display time.
//
+// For N-frame cadence, the idea is similar, we just round the perfect cadence
+// to some K/N, where K is an integer, and distribute [floor(K/N), floor(K/N)+1]
+// into the cadence vector as evenly as possible. For example, 23.97fps in
+// 60Hz, the perfect cadence is 2.50313, we can round it to 2.5 = 5/2, and we
+// can then construct the cadence vector as [2:3].
+//
// The delta between the perfect cadence and the rounded cadence leads to drift
// over time of the actual VideoFrame timestamp relative to its rendered time,
// so we perform some calculations to ensure we only use a cadence when it will
// take some time to drift an undesirable amount; see CalculateCadence() for
// details on how this calculation is made.
//
-// 2-frame cadence is an extension of 1-frame cadence. Consider the case of
-// 24fps in 60Hz, which has a perfect cadence of 2.5; rounding up to a cadence
-// of 3 would cause drift to accumulate unusably fast. A better approximation
-// of this cadence would be [3:2].
-//
-// Fractional cadence is a special case of N-frame cadence which can be used
-// when the frame duration is shorter than the render interval; e.g. 120fps in
-// 60Hz. In this case, the first frame in each group of N frames is displayed
-// once, while the next N - 1 frames are dropped; i.e. the cadence is of the
-// form [1:0:..:0]. Using the previous example N = 120/60 = 2, which means the
-// cadence would be [1:0]. See CalculateFractionalCadence() for more details.
-//
// In practice this works out to the following for common setups if we use
// cadence based selection:
//
// 29.5fps in 60Hz, ~17ms max drift => exhausted in ~1 second.
// 29.9fps in 60Hz, ~17ms max drift => exhausted in ~16.4 seconds.
-// 24fps in 60Hz, ~21ms max drift => exhausted in ~0.15 seconds.
-// 25fps in 60Hz, 20ms max drift => exhausted in ~4.0 seconds.
-// 59.9fps in 60Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
+// 24fps in 59.9Hz, ~21ms max drift => exhausted in ~12.6 seconds.
+// 24.9fps in 60Hz, ~20ms max drift => exhausted in ~4.0 seconds.
+// 59.9fps in 60Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
// 24.9fps in 50Hz, ~20ms max drift => exhausted in ~20.5 seconds.
-// 120fps in 59.9Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
+// 120fps in 59.9Hz, ~8.3ms max drift => exhausted in ~8.2 seconds.
//
class MEDIA_EXPORT VideoCadenceEstimator {
public:
+ using Cadence = std::vector<int>;
+
// As mentioned in the introduction, the determination of whether to clamp to
// a given cadence is based on how long it takes before a frame would have to
// be dropped or repeated to compensate for reaching the maximum acceptable
@@ -116,54 +112,20 @@ class MEDIA_EXPORT VideoCadenceEstimator {
std::string GetCadenceForTesting() const { return CadenceToString(cadence_); }
private:
- using Cadence = std::vector<int>;
-
- // Attempts to find a 1-frame, 2-frame, or N-frame fractional cadence; returns
- // the cadence vector if cadence is found and sets |time_until_max_drift| for
- // the computed cadence.
+ // Attempts to find an N-frame cadence. Returns the cadence vector if cadence
+ // is found and sets |time_until_max_drift| for the computed cadence. If
+ // multiple cadences satisfying the max drift constraint exist, we are going
+ // to return the one with largest |time_until_max_drift|.
+ // For details on the math and algorithm, see https://goo.gl/QK0vbz
Cadence CalculateCadence(base::TimeDelta render_interval,
base::TimeDelta frame_duration,
base::TimeDelta max_acceptable_drift,
base::TimeDelta* time_until_max_drift) const;
- // Calculates the clamped cadence for the given |render_interval| and
- // |frame_duration|, then calculates how long that cadence can be used before
- // exhausting |max_acceptable_drift|. If the time until exhaustion is greater
- // than |minimum_time_until_max_drift_|, returns true and sets |cadence| to
- // the clamped cadence. If the clamped cadence is unusable, |cadence| will be
- // set to zero.
- //
- // Sets |time_until_max_drift| to the computed glitch time. Set to zero if
- // the clamped cadence is unusable.
- bool CalculateOneFrameCadence(base::TimeDelta render_interval,
- base::TimeDelta frame_duration,
- base::TimeDelta max_acceptable_drift,
- Cadence* cadence,
- base::TimeDelta* time_until_max_drift) const;
-
- // Similar to CalculateCadence() except it tries to find the ideal number of
- // frames which can fit into a |render_interval|; which means doing the same
- // calculations as CalculateCadence() but with the ratio of |render_interval|
- // to |frame_duration| instead of the other way around.
- bool CalculateFractionalCadence(base::TimeDelta render_interval,
- base::TimeDelta frame_duration,
- base::TimeDelta max_acceptable_drift,
- Cadence* cadence,
- base::TimeDelta* time_until_max_drift) const;
-
// Converts a cadence vector into a human readable string of the form
- // "[a, b, ..., z]".
+ // "[a: b: ...: z]".
std::string CadenceToString(const Cadence& cadence) const;
- // Returns true if the drift of the rendered frame duration versus its actual
- // frame duration take longer than |minimum_time_until_max_drift_| to exhaust
- // |max_acceptable_drift|. |time_until_max_drift| is set to how long it will
- // take before a glitch (frame drop or repeat occurs).
- bool IsAcceptableCadence(base::TimeDelta rendered_frame_duration,
- base::TimeDelta actual_frame_duration,
- base::TimeDelta max_acceptable_drift,
- base::TimeDelta* time_until_max_drift) const;
-
// The approximate best N-frame cadence for all frames seen thus far; updated
// by UpdateCadenceEstimate(). Empty when no cadence has been detected.
Cadence cadence_;
diff --git a/chromium/media/filters/video_cadence_estimator_unittest.cc b/chromium/media/filters/video_cadence_estimator_unittest.cc
index d96700436c6..a5e498cbdde 100644
--- a/chromium/media/filters/video_cadence_estimator_unittest.cc
+++ b/chromium/media/filters/video_cadence_estimator_unittest.cc
@@ -25,13 +25,13 @@ static base::TimeDelta Interval(double hertz) {
}
std::vector<int> CreateCadenceFromString(const std::string& cadence) {
- std::vector<std::string> tokens;
CHECK_EQ('[', cadence[0]);
CHECK_EQ(']', cadence[cadence.length() - 1]);
- base::SplitString(cadence.substr(1, cadence.length() - 2), ':', &tokens);
std::vector<int> result;
- for (const auto& token : tokens) {
+ for (const std::string& token :
+ base::SplitString(cadence.substr(1, cadence.length() - 2),
+ ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL)) {
int cadence_value = 0;
CHECK(base::StringToInt(token, &cadence_value)) << token;
result.push_back(cadence_value);
@@ -51,16 +51,20 @@ static void VerifyCadenceVector(VideoCadenceEstimator* estimator,
CreateCadenceFromString(expected_cadence);
estimator->Reset();
- const base::TimeDelta acceptable_drift = Interval(frame_hertz) / 2;
+ const base::TimeDelta acceptable_drift = std::max(Interval(frame_hertz) / 2,
+ Interval(render_hertz));
const bool cadence_changed = estimator->UpdateCadenceEstimate(
Interval(render_hertz), Interval(frame_hertz), acceptable_drift);
EXPECT_EQ(cadence_changed, estimator->has_cadence());
EXPECT_EQ(expected_cadence_vector.empty(), !estimator->has_cadence());
// Nothing further to test.
- if (expected_cadence_vector.empty())
+ if (expected_cadence_vector.empty() || !estimator->has_cadence())
return;
+ EXPECT_EQ(expected_cadence_vector.size(),
+ estimator->cadence_size_for_testing());
+
// Spot two cycles of the cadence.
for (size_t i = 0; i < expected_cadence_vector.size() * 2; ++i) {
ASSERT_EQ(expected_cadence_vector[i % expected_cadence_vector.size()],
@@ -75,30 +79,50 @@ TEST(VideoCadenceEstimatorTest, CadenceCalculations) {
estimator.set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
const std::string kEmptyCadence = "[]";
+ VerifyCadenceVector(&estimator, 1, NTSC(60), "[60]");
+
VerifyCadenceVector(&estimator, 24, 60, "[3:2]");
VerifyCadenceVector(&estimator, NTSC(24), 60, "[3:2]");
+ VerifyCadenceVector(&estimator, 24, NTSC(60), "[3:2]");
+
+ VerifyCadenceVector(&estimator, 25, 60, "[2:3:2:3:2]");
+ VerifyCadenceVector(&estimator, NTSC(25), 60, "[2:3:2:3:2]");
+ VerifyCadenceVector(&estimator, 25, NTSC(60), "[2:3:2:3:2]");
- VerifyCadenceVector(&estimator, 25, 60, kEmptyCadence);
- VerifyCadenceVector(&estimator, NTSC(30), 60, "[2]");
VerifyCadenceVector(&estimator, 30, 60, "[2]");
- VerifyCadenceVector(&estimator, 50, 60, kEmptyCadence);
+ VerifyCadenceVector(&estimator, NTSC(30), 60, "[2]");
+ VerifyCadenceVector(&estimator, 29.5, 60, kEmptyCadence);
+
+ VerifyCadenceVector(&estimator, 50, 60, "[1:1:2:1:1]");
+ VerifyCadenceVector(&estimator, NTSC(50), 60, "[1:1:2:1:1]");
+ VerifyCadenceVector(&estimator, 50, NTSC(60), "[1:1:2:1:1]");
+
VerifyCadenceVector(&estimator, NTSC(60), 60, "[1]");
+ VerifyCadenceVector(&estimator, 60, NTSC(60), "[1]");
+
VerifyCadenceVector(&estimator, 120, 60, "[1:0]");
+ VerifyCadenceVector(&estimator, NTSC(120), 60, "[1:0]");
+ VerifyCadenceVector(&estimator, 120, NTSC(60), "[1:0]");
+
+ // Test cases for cadence below 1.
VerifyCadenceVector(&estimator, 120, 24, "[1:0:0:0:0]");
+ VerifyCadenceVector(&estimator, 120, 48, "[1:0:0:1:0]");
+ VerifyCadenceVector(&estimator, 120, 72, "[1:0:1:0:1]");
+ VerifyCadenceVector(&estimator, 90, 60, "[1:0:1]");
// 50Hz is common in the EU.
VerifyCadenceVector(&estimator, NTSC(24), 50, kEmptyCadence);
VerifyCadenceVector(&estimator, 24, 50, kEmptyCadence);
+
VerifyCadenceVector(&estimator, NTSC(25), 50, "[2]");
VerifyCadenceVector(&estimator, 25, 50, "[2]");
- VerifyCadenceVector(&estimator, NTSC(30), 50, kEmptyCadence);
- VerifyCadenceVector(&estimator, 30, 50, kEmptyCadence);
+
+ VerifyCadenceVector(&estimator, NTSC(30), 50, "[2:1:2]");
+ VerifyCadenceVector(&estimator, 30, 50, "[2:1:2]");
+
VerifyCadenceVector(&estimator, NTSC(60), 50, kEmptyCadence);
VerifyCadenceVector(&estimator, 60, 50, kEmptyCadence);
- VerifyCadenceVector(&estimator, 25, NTSC(60), kEmptyCadence);
- VerifyCadenceVector(&estimator, 120, NTSC(60), kEmptyCadence);
- VerifyCadenceVector(&estimator, 1, NTSC(60), "[60]");
}
TEST(VideoCadenceEstimatorTest, CadenceVariesWithAcceptableDrift) {
@@ -187,18 +211,4 @@ TEST(VideoCadenceEstimatorTest, CadenceHystersisPreventsOscillation) {
EXPECT_FALSE(estimator->has_cadence());
}
-TEST(VideoCadenceEstimatorTest, TwoFrameCadenceIsActuallyOneFrame) {
- VideoCadenceEstimator estimator(
- base::TimeDelta::FromSeconds(kMinimumAcceptableTimeBetweenGlitchesSecs));
- estimator.set_cadence_hysteresis_threshold_for_testing(base::TimeDelta());
-
- const base::TimeDelta render_interval =
- base::TimeDelta::FromMicroseconds(16715);
- const base::TimeDelta frame_duration =
- base::TimeDelta::FromMicroseconds(33360);
-
- EXPECT_TRUE(estimator.UpdateCadenceEstimate(render_interval, frame_duration,
- frame_duration / 2));
-}
-
} // namespace media
diff --git a/chromium/media/filters/video_frame_stream_unittest.cc b/chromium/media/filters/video_frame_stream_unittest.cc
index b2073116a30..b0a430e487f 100644
--- a/chromium/media/filters/video_frame_stream_unittest.cc
+++ b/chromium/media/filters/video_frame_stream_unittest.cc
@@ -9,6 +9,7 @@
#include "media/base/gmock_callback_support.h"
#include "media/base/mock_filters.h"
#include "media/base/test_helpers.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/decoder_stream.h"
#include "media/filters/fake_video_decoder.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/filters/video_renderer_algorithm_unittest.cc b/chromium/media/filters/video_renderer_algorithm_unittest.cc
index 9f2a0336cf9..e8b82f2bc31 100644
--- a/chromium/media/filters/video_renderer_algorithm_unittest.cc
+++ b/chromium/media/filters/video_renderer_algorithm_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind_helpers.h"
#include "base/strings/stringprintf.h"
#include "base/test/simple_test_tick_clock.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame_pool.h"
#include "media/base/wall_clock_time_source.h"
#include "media/filters/video_renderer_algorithm.h"
@@ -75,7 +76,7 @@ class VideoRendererAlgorithmTest : public testing::Test {
scoped_refptr<VideoFrame> CreateFrame(base::TimeDelta timestamp) {
const gfx::Size natural_size(8, 8);
- return frame_pool_.CreateFrame(VideoFrame::YV12, natural_size,
+ return frame_pool_.CreateFrame(PIXEL_FORMAT_YV12, natural_size,
gfx::Rect(natural_size), natural_size,
timestamp);
}
@@ -102,9 +103,28 @@ class VideoRendererAlgorithmTest : public testing::Test {
return algorithm_.cadence_estimator_.has_cadence();
}
- bool IsUsingFractionalCadence() const {
- return is_using_cadence() &&
- !algorithm_.cadence_estimator_.GetCadenceForFrame(1);
+ bool IsCadenceBelowOne() const {
+ if (!is_using_cadence())
+ return false;
+
+ size_t size = algorithm_.cadence_estimator_.cadence_size_for_testing();
+ for (size_t i = 0; i < size; ++i) {
+ if (!algorithm_.cadence_estimator_.GetCadenceForFrame(i))
+ return true;
+ }
+
+ return false;
+ }
+
+ double CadenceValue() const {
+ int num_render_intervals = 0;
+ size_t size = algorithm_.cadence_estimator_.cadence_size_for_testing();
+ for (size_t i = 0; i < size; ++i) {
+ num_render_intervals +=
+ algorithm_.cadence_estimator_.GetCadenceForFrame(i);
+ }
+
+ return (num_render_intervals + 0.0) / size;
}
size_t frames_queued() const { return algorithm_.frame_queue_.size(); }
@@ -222,16 +242,15 @@ class VideoRendererAlgorithmTest : public testing::Test {
ASSERT_NEAR(GetUsableFrameCount(deadline_max),
algorithm_.EffectiveFramesQueued(),
fresh_algorithm ? 0 : 1);
- } else if (is_using_cadence() && !IsUsingFractionalCadence()) {
+ } else if (is_using_cadence() && !IsCadenceBelowOne()) {
// If there was no glitch in the last render, the two queue sizes should
// be off by exactly one frame; i.e., the current frame doesn't count.
if (!last_render_had_glitch() && fresh_algorithm)
ASSERT_EQ(frames_queued() - 1, algorithm_.EffectiveFramesQueued());
- } else if (IsUsingFractionalCadence()) {
+ } else if (IsCadenceBelowOne()) {
// The frame estimate should be off by at most one frame.
const size_t estimated_frames_queued =
- frames_queued() /
- algorithm_.cadence_estimator_.cadence_size_for_testing();
+ std::floor(frames_queued() * CadenceValue());
ASSERT_NEAR(algorithm_.EffectiveFramesQueued(), estimated_frames_queued,
1);
}
@@ -1013,10 +1032,10 @@ TEST_F(VideoRendererAlgorithmTest, FilmCadence) {
TEST_F(VideoRendererAlgorithmTest, CadenceCalculations) {
ASSERT_EQ("[3:2]", GetCadence(24, 60));
ASSERT_EQ("[3:2]", GetCadence(NTSC(24), 60));
- ASSERT_EQ("[]", GetCadence(25, 60));
+ ASSERT_EQ("[2:3:2:3:2]", GetCadence(25, 60));
ASSERT_EQ("[2]", GetCadence(NTSC(30), 60));
ASSERT_EQ("[2]", GetCadence(30, 60));
- ASSERT_EQ("[]", GetCadence(50, 60));
+ ASSERT_EQ("[1:1:2:1:1]", GetCadence(50, 60));
ASSERT_EQ("[1]", GetCadence(NTSC(60), 60));
ASSERT_EQ("[1:0]", GetCadence(120, 60));
@@ -1025,12 +1044,12 @@ TEST_F(VideoRendererAlgorithmTest, CadenceCalculations) {
ASSERT_EQ("[]", GetCadence(24, 50));
ASSERT_EQ("[2]", GetCadence(NTSC(25), 50));
ASSERT_EQ("[2]", GetCadence(25, 50));
- ASSERT_EQ("[]", GetCadence(NTSC(30), 50));
- ASSERT_EQ("[]", GetCadence(30, 50));
+ ASSERT_EQ("[2:1:2]", GetCadence(NTSC(30), 50));
+ ASSERT_EQ("[2:1:2]", GetCadence(30, 50));
ASSERT_EQ("[]", GetCadence(NTSC(60), 50));
ASSERT_EQ("[]", GetCadence(60, 50));
- ASSERT_EQ("[]", GetCadence(25, NTSC(60)));
+ ASSERT_EQ("[2:3:2:3:2]", GetCadence(25, NTSC(60)));
ASSERT_EQ("[1:0]", GetCadence(120, NTSC(60)));
ASSERT_EQ("[60]", GetCadence(1, NTSC(60)));
}
@@ -1162,9 +1181,9 @@ TEST_F(VideoRendererAlgorithmTest, VariableFrameRateCadence) {
TickGenerator frame_tg(base::TimeTicks(), NTSC(30));
TickGenerator display_tg(tick_clock_->NowTicks(), 60);
- const double kTestRates[] = {1.0, 2, 0.215, 0.5, 1.0};
- const bool kTestRateHasCadence[arraysize(kTestRates)] = {
- true, true, false, true, true};
+ const double kTestRates[] = {1.0, 2, 0.215, 0.5, 1.0, 3.15};
+ const bool kTestRateHasCadence[arraysize(kTestRates)] = {true, true, true,
+ true, true, false};
for (size_t i = 0; i < arraysize(kTestRates); ++i) {
const double playback_rate = kTestRates[i];
diff --git a/chromium/media/filters/vp8_parser_unittest.cc b/chromium/media/filters/vp8_parser_unittest.cc
index 39a2a801032..3087ed0cfdf 100644
--- a/chromium/media/filters/vp8_parser_unittest.cc
+++ b/chromium/media/filters/vp8_parser_unittest.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/command_line.h"
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/sys_byteorder.h"
#include "media/base/test_data_util.h"
+#include "media/filters/ivf_parser.h"
#include "media/filters/vp8_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -15,45 +13,35 @@ namespace media {
TEST(Vp8ParserTest, StreamFileParsing) {
base::FilePath file_path = GetTestDataFilePath("test-25fps.vp8");
- // Number of frames in the test stream to be parsed.
- const int num_frames = 250;
-
base::MemoryMappedFile stream;
ASSERT_TRUE(stream.Initialize(file_path))
<< "Couldn't open stream file: " << file_path.MaybeAsASCII();
- Vp8Parser parser;
+ IvfParser ivf_parser;
+ IvfFileHeader ivf_file_header = {};
+ ASSERT_TRUE(
+ ivf_parser.Initialize(stream.data(), stream.length(), &ivf_file_header));
+ ASSERT_EQ(ivf_file_header.fourcc, 0x30385056u); // VP80
+
+ Vp8Parser vp8_parser;
+ IvfFrameHeader ivf_frame_header = {};
+ size_t num_parsed_frames = 0;
// Parse until the end of stream/unsupported stream/error in stream is found.
- int num_parsed_frames = 0;
- const uint8_t* stream_ptr = stream.data();
- size_t bytes_left = stream.length();
- // Skip IVF file header.
- const size_t kIvfStreamHeaderLen = 32;
- CHECK_GE(bytes_left, kIvfStreamHeaderLen);
- stream_ptr += kIvfStreamHeaderLen;
- bytes_left -= kIvfStreamHeaderLen;
-
- const size_t kIvfFrameHeaderLen = 12;
- while (bytes_left > kIvfFrameHeaderLen) {
+ const uint8_t* payload = nullptr;
+ while (ivf_parser.ParseNextFrame(&ivf_frame_header, &payload)) {
Vp8FrameHeader fhdr;
- uint32_t frame_size =
- base::ByteSwapToLE32(*reinterpret_cast<const uint32_t*>(stream_ptr));
- // Skip IVF frame header.
- stream_ptr += kIvfFrameHeaderLen;
- bytes_left -= kIvfFrameHeaderLen;
- ASSERT_TRUE(parser.ParseFrame(stream_ptr, frame_size, &fhdr));
+ ASSERT_TRUE(
+ vp8_parser.ParseFrame(payload, ivf_frame_header.frame_size, &fhdr));
- stream_ptr += frame_size;
- bytes_left -= frame_size;
++num_parsed_frames;
}
DVLOG(1) << "Number of successfully parsed frames before EOS: "
<< num_parsed_frames;
- EXPECT_EQ(num_frames, num_parsed_frames);
+ EXPECT_EQ(ivf_file_header.num_frames, num_parsed_frames);
}
} // namespace media
diff --git a/chromium/media/filters/vp9_parser.cc b/chromium/media/filters/vp9_parser.cc
new file mode 100644
index 00000000000..b425f198fc9
--- /dev/null
+++ b/chromium/media/filters/vp9_parser.cc
@@ -0,0 +1,689 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP9 bitstream parser.
+
+#include "media/filters/vp9_parser.h"
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+
+namespace {
+
+const int kMaxLoopFilterLevel = 63;
+
+// Helper function for Vp9Parser::ReadTiles. Defined as get_min_log2_tile_cols
+// in spec.
+int GetMinLog2TileCols(int sb64_cols) {
+ const int kMaxTileWidthB64 = 64;
+ int min_log2 = 0;
+ while ((kMaxTileWidthB64 << min_log2) < sb64_cols)
+ min_log2++;
+ return min_log2;
+}
+
+// Helper function for Vp9Parser::ReadTiles. Defined as get_max_log2_tile_cols
+// in spec.
+int GetMaxLog2TileCols(int sb64_cols) {
+ const int kMinTileWidthB64 = 4;
+ int max_log2 = 1;
+ while ((sb64_cols >> max_log2) >= kMinTileWidthB64)
+ max_log2++;
+ return max_log2 - 1;
+}
+
+} // namespace
+
+namespace media {
+
+bool Vp9FrameHeader::IsKeyframe() const {
+ // When show_existing_frame is true, the frame header does not precede an
+ // actual frame to be decoded, so frame_type does not apply (and is not read
+ // from the stream).
+ return !show_existing_frame && frame_type == KEYFRAME;
+}
+
+Vp9Parser::FrameInfo::FrameInfo(const uint8_t* ptr, off_t size)
+ : ptr(ptr), size(size) {}
+
+Vp9Parser::Vp9Parser() {
+ Reset();
+}
+
+Vp9Parser::~Vp9Parser() {}
+
+void Vp9Parser::SetStream(const uint8_t* stream, off_t stream_size) {
+ DCHECK(stream);
+ stream_ = stream;
+ bytes_left_ = stream_size;
+ frames_.clear();
+}
+
+void Vp9Parser::Reset() {
+ stream_ = nullptr;
+ bytes_left_ = 0;
+ frames_.clear();
+
+ memset(&segmentation_, 0, sizeof(segmentation_));
+ memset(&loop_filter_, 0, sizeof(loop_filter_));
+ memset(&ref_slots_, 0, sizeof(ref_slots_));
+}
+
+uint8_t Vp9Parser::ReadProfile() {
+ uint8_t profile = 0;
+
+ // LSB first.
+ if (reader_.ReadBool())
+ profile |= 1;
+ if (reader_.ReadBool())
+ profile |= 2;
+ if (profile > 2 && reader_.ReadBool())
+ profile += 1;
+ return profile;
+}
+
+bool Vp9Parser::VerifySyncCode() {
+ const int kSyncCode = 0x498342;
+ if (reader_.ReadLiteral(8 * 3) != kSyncCode) {
+ DVLOG(1) << "Invalid frame sync code";
+ return false;
+ }
+ return true;
+}
+
+bool Vp9Parser::ReadBitDepthColorSpaceSampling(Vp9FrameHeader* fhdr) {
+ if (fhdr->profile == 2 || fhdr->profile == 3) {
+ fhdr->bit_depth = reader_.ReadBool() ? 12 : 10;
+ } else {
+ fhdr->bit_depth = 8;
+ }
+
+ fhdr->color_space = static_cast<Vp9ColorSpace>(reader_.ReadLiteral(3));
+ if (fhdr->color_space != Vp9ColorSpace::SRGB) {
+ fhdr->yuv_range = reader_.ReadBool();
+ if (fhdr->profile == 1 || fhdr->profile == 3) {
+ fhdr->subsampling_x = reader_.ReadBool() ? 1 : 0;
+ fhdr->subsampling_y = reader_.ReadBool() ? 1 : 0;
+ if (fhdr->subsampling_x == 1 && fhdr->subsampling_y == 1) {
+ DVLOG(1) << "4:2:0 color not supported in profile 1 or 3";
+ return false;
+ }
+ bool reserved = reader_.ReadBool();
+ if (reserved) {
+ DVLOG(1) << "reserved bit set";
+ return false;
+ }
+ } else {
+ fhdr->subsampling_x = fhdr->subsampling_y = 1;
+ }
+ } else {
+ if (fhdr->profile == 1 || fhdr->profile == 3) {
+ fhdr->subsampling_x = fhdr->subsampling_y = 0;
+
+ bool reserved = reader_.ReadBool();
+ if (reserved) {
+ DVLOG(1) << "reserved bit set";
+ return false;
+ }
+ } else {
+ DVLOG(1) << "4:4:4 color not supported in profile 0 or 2";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void Vp9Parser::ReadFrameSize(Vp9FrameHeader* fhdr) {
+ fhdr->width = reader_.ReadLiteral(16) + 1;
+ fhdr->height = reader_.ReadLiteral(16) + 1;
+}
+
+bool Vp9Parser::ReadFrameSizeFromRefs(Vp9FrameHeader* fhdr) {
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ if (reader_.ReadBool()) {
+ fhdr->width = ref_slots_[i].width;
+ fhdr->height = ref_slots_[i].height;
+
+ const int kMaxDimension = 1 << 16;
+ if (fhdr->width == 0 || fhdr->width > kMaxDimension ||
+ fhdr->height == 0 || fhdr->height > kMaxDimension) {
+ DVLOG(1) << "The size of reference frame is out of range: "
+ << ref_slots_[i].width << "," << ref_slots_[i].height;
+ return false;
+ }
+ return true;
+ }
+ }
+
+ fhdr->width = reader_.ReadLiteral(16) + 1;
+ fhdr->height = reader_.ReadLiteral(16) + 1;
+ return true;
+}
+
+void Vp9Parser::ReadDisplayFrameSize(Vp9FrameHeader* fhdr) {
+ if (reader_.ReadBool()) {
+ fhdr->display_width = reader_.ReadLiteral(16) + 1;
+ fhdr->display_height = reader_.ReadLiteral(16) + 1;
+ } else {
+ fhdr->display_width = fhdr->width;
+ fhdr->display_height = fhdr->height;
+ }
+}
+
+Vp9InterpFilter Vp9Parser::ReadInterpFilter() {
+ if (reader_.ReadBool())
+ return Vp9InterpFilter::SWICHABLE;
+
+ // The mapping table for next two bits.
+ const Vp9InterpFilter table[] = {
+ Vp9InterpFilter::EIGHTTAP_SMOOTH, Vp9InterpFilter::EIGHTTAP,
+ Vp9InterpFilter::EIGHTTAP_SHARP, Vp9InterpFilter::BILINEAR,
+ };
+ return table[reader_.ReadLiteral(2)];
+}
+
+void Vp9Parser::ReadLoopFilter() {
+ loop_filter_.filter_level = reader_.ReadLiteral(6);
+ loop_filter_.sharpness_level = reader_.ReadLiteral(3);
+ loop_filter_.mode_ref_delta_update = false;
+
+ loop_filter_.mode_ref_delta_enabled = reader_.ReadBool();
+ if (loop_filter_.mode_ref_delta_enabled) {
+ loop_filter_.mode_ref_delta_update = reader_.ReadBool();
+ if (loop_filter_.mode_ref_delta_update) {
+ for (size_t i = 0; i < Vp9LoopFilter::VP9_FRAME_MAX; i++) {
+ loop_filter_.update_ref_deltas[i] = reader_.ReadBool();
+ if (loop_filter_.update_ref_deltas[i])
+ loop_filter_.ref_deltas[i] = reader_.ReadSignedLiteral(6);
+ }
+
+ for (size_t i = 0; i < Vp9LoopFilter::kNumModeDeltas; i++) {
+ loop_filter_.update_mode_deltas[i] = reader_.ReadBool();
+ if (loop_filter_.update_mode_deltas[i])
+ loop_filter_.mode_deltas[i] = reader_.ReadLiteral(6);
+ }
+ }
+ }
+}
+
+void Vp9Parser::ReadQuantization(Vp9QuantizationParams* quants) {
+ quants->base_qindex = reader_.ReadLiteral(8);
+
+ if (reader_.ReadBool())
+ quants->y_dc_delta = reader_.ReadSignedLiteral(4);
+
+ if (reader_.ReadBool())
+ quants->uv_ac_delta = reader_.ReadSignedLiteral(4);
+
+ if (reader_.ReadBool())
+ quants->uv_dc_delta = reader_.ReadSignedLiteral(4);
+}
+
+void Vp9Parser::ReadSegmentationMap() {
+ for (size_t i = 0; i < Vp9Segmentation::kNumTreeProbs; i++) {
+ segmentation_.tree_probs[i] =
+ reader_.ReadBool() ? reader_.ReadLiteral(8) : kVp9MaxProb;
+ }
+
+ for (size_t i = 0; i < Vp9Segmentation::kNumPredictionProbs; i++)
+ segmentation_.pred_probs[i] = kVp9MaxProb;
+
+ segmentation_.temporal_update = reader_.ReadBool();
+ if (segmentation_.temporal_update) {
+ for (size_t i = 0; i < Vp9Segmentation::kNumPredictionProbs; i++) {
+ if (reader_.ReadBool())
+ segmentation_.pred_probs[i] = reader_.ReadLiteral(8);
+ }
+ }
+}
+
+void Vp9Parser::ReadSegmentationData() {
+ segmentation_.abs_delta = reader_.ReadBool();
+
+ const int kFeatureDataBits[] = {7, 6, 2, 0};
+ const bool kFeatureDataSigned[] = {true, true, false, false};
+
+ for (size_t i = 0; i < Vp9Segmentation::kNumSegments; i++) {
+ for (size_t j = 0; j < Vp9Segmentation::SEG_LVL_MAX; j++) {
+ int8_t data = 0;
+ segmentation_.feature_enabled[i][j] = reader_.ReadBool();
+ if (segmentation_.feature_enabled[i][j]) {
+ data = reader_.ReadLiteral(kFeatureDataBits[j]);
+ if (kFeatureDataSigned[j])
+ if (reader_.ReadBool())
+ data = -data;
+ }
+ segmentation_.feature_data[i][j] = data;
+ }
+ }
+}
+
+void Vp9Parser::ReadSegmentation() {
+ segmentation_.update_map = false;
+ segmentation_.update_data = false;
+
+ segmentation_.enabled = reader_.ReadBool();
+ if (!segmentation_.enabled)
+ return;
+
+ segmentation_.update_map = reader_.ReadBool();
+ if (segmentation_.update_map)
+ ReadSegmentationMap();
+
+ segmentation_.update_data = reader_.ReadBool();
+ if (segmentation_.update_data)
+ ReadSegmentationData();
+}
+
+void Vp9Parser::ReadTiles(Vp9FrameHeader* fhdr) {
+ int sb64_cols = (fhdr->width + 63) / 64;
+
+ int min_log2_tile_cols = GetMinLog2TileCols(sb64_cols);
+ int max_log2_tile_cols = GetMaxLog2TileCols(sb64_cols);
+
+ int max_ones = max_log2_tile_cols - min_log2_tile_cols;
+ fhdr->log2_tile_cols = min_log2_tile_cols;
+ while (max_ones-- && reader_.ReadBool())
+ fhdr->log2_tile_cols++;
+
+ if (reader_.ReadBool())
+ fhdr->log2_tile_rows = reader_.ReadLiteral(2) - 1;
+}
+
+bool Vp9Parser::ParseUncompressedHeader(const uint8_t* stream,
+ off_t frame_size,
+ Vp9FrameHeader* fhdr) {
+ reader_.Initialize(stream, frame_size);
+
+ fhdr->data = stream;
+ fhdr->frame_size = frame_size;
+
+ // frame marker
+ if (reader_.ReadLiteral(2) != 0x2)
+ return false;
+
+ fhdr->profile = ReadProfile();
+ if (fhdr->profile >= kVp9MaxProfile) {
+ DVLOG(1) << "Unsupported bitstream profile";
+ return false;
+ }
+
+ fhdr->show_existing_frame = reader_.ReadBool();
+ if (fhdr->show_existing_frame) {
+ fhdr->frame_to_show = reader_.ReadLiteral(3);
+ fhdr->show_frame = true;
+
+ if (!reader_.IsValid()) {
+ DVLOG(1) << "parser reads beyond the end of buffer";
+ return false;
+ }
+ fhdr->uncompressed_header_size = reader_.GetBytesRead();
+ return true;
+ }
+
+ fhdr->frame_type = static_cast<Vp9FrameHeader::FrameType>(reader_.ReadBool());
+ fhdr->show_frame = reader_.ReadBool();
+ fhdr->error_resilient_mode = reader_.ReadBool();
+
+ if (fhdr->IsKeyframe()) {
+ if (!VerifySyncCode())
+ return false;
+
+ if (!ReadBitDepthColorSpaceSampling(fhdr))
+ return false;
+
+ fhdr->refresh_flags = 0xff;
+
+ ReadFrameSize(fhdr);
+ ReadDisplayFrameSize(fhdr);
+ } else {
+ if (!fhdr->show_frame)
+ fhdr->intra_only = reader_.ReadBool();
+
+ if (!fhdr->error_resilient_mode)
+ fhdr->reset_context = reader_.ReadLiteral(2);
+
+ if (fhdr->intra_only) {
+ if (!VerifySyncCode())
+ return false;
+
+ if (fhdr->profile > 0) {
+ if (!ReadBitDepthColorSpaceSampling(fhdr))
+ return false;
+ } else {
+ fhdr->bit_depth = 8;
+ fhdr->color_space = Vp9ColorSpace::BT_601;
+ fhdr->subsampling_x = fhdr->subsampling_y = 1;
+ }
+
+ fhdr->refresh_flags = reader_.ReadLiteral(8);
+
+ ReadFrameSize(fhdr);
+ ReadDisplayFrameSize(fhdr);
+ } else {
+ fhdr->refresh_flags = reader_.ReadLiteral(8);
+
+ for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) {
+ fhdr->frame_refs[i] = reader_.ReadLiteral(kVp9NumRefFramesLog2);
+ fhdr->ref_sign_biases[i] = reader_.ReadBool();
+ }
+
+ if (!ReadFrameSizeFromRefs(fhdr))
+ return false;
+ ReadDisplayFrameSize(fhdr);
+
+ fhdr->allow_high_precision_mv = reader_.ReadBool();
+ fhdr->interp_filter = ReadInterpFilter();
+ }
+ }
+
+ if (fhdr->error_resilient_mode) {
+ fhdr->frame_parallel_decoding_mode = true;
+ } else {
+ fhdr->refresh_frame_context = reader_.ReadBool();
+ fhdr->frame_parallel_decoding_mode = reader_.ReadBool();
+ }
+
+ fhdr->frame_context_idx = reader_.ReadLiteral(2);
+
+ if (fhdr->IsKeyframe() || fhdr->intra_only)
+ SetupPastIndependence();
+
+ ReadLoopFilter();
+ ReadQuantization(&fhdr->quant_params);
+ ReadSegmentation();
+
+ ReadTiles(fhdr);
+
+ fhdr->first_partition_size = reader_.ReadLiteral(16);
+ if (fhdr->first_partition_size == 0) {
+ DVLOG(1) << "invalid header size";
+ return false;
+ }
+
+ if (!reader_.IsValid()) {
+ DVLOG(1) << "parser reads beyond the end of buffer";
+ return false;
+ }
+ fhdr->uncompressed_header_size = reader_.GetBytesRead();
+
+ SetupSegmentationDequant(fhdr->quant_params);
+ SetupLoopFilter();
+
+ UpdateSlots(fhdr);
+
+ return true;
+}
+
+void Vp9Parser::UpdateSlots(const Vp9FrameHeader* fhdr) {
+ for (size_t i = 0; i < kVp9NumRefFrames; i++) {
+ if (fhdr->RefreshFlag(i)) {
+ ref_slots_[i].width = fhdr->width;
+ ref_slots_[i].height = fhdr->height;
+ }
+ }
+}
+
+Vp9Parser::Result Vp9Parser::ParseNextFrame(Vp9FrameHeader* fhdr) {
+ if (frames_.empty()) {
+ // No frames to be decoded, if there is no more stream, request more.
+ if (!stream_)
+ return kEOStream;
+
+ // New stream to be parsed, parse it and fill frames_.
+ if (!ParseSuperframe()) {
+ DVLOG(1) << "Failed parsing superframes";
+ return kInvalidStream;
+ }
+ }
+
+ DCHECK(!frames_.empty());
+ FrameInfo frame_info = frames_.front();
+ frames_.pop_front();
+
+ memset(fhdr, 0, sizeof(*fhdr));
+ if (!ParseUncompressedHeader(frame_info.ptr, frame_info.size, fhdr))
+ return kInvalidStream;
+
+ return kOk;
+}
+
+bool Vp9Parser::ParseSuperframe() {
+ const uint8_t* stream = stream_;
+ off_t bytes_left = bytes_left_;
+
+ DCHECK(frames_.empty());
+
+ // Make sure we don't parse stream_ more than once.
+ stream_ = nullptr;
+ bytes_left_ = 0;
+
+ if (bytes_left < 1)
+ return false;
+
+ // If this is a superframe, the last byte in the stream will contain the
+ // superframe marker. If not, the whole buffer contains a single frame.
+ uint8_t marker = *(stream + bytes_left - 1);
+ if ((marker & 0xe0) != 0xc0) {
+ frames_.push_back(FrameInfo(stream, bytes_left));
+ return true;
+ }
+
+ DVLOG(1) << "Parsing a superframe";
+
+ // The bytes immediately before the superframe marker constitute superframe
+ // index, which stores information about sizes of each frame in it.
+ // Calculate its size and set index_ptr to the beginning of it.
+ size_t num_frames = (marker & 0x7) + 1;
+ size_t mag = ((marker >> 3) & 0x3) + 1;
+ off_t index_size = 2 + mag * num_frames;
+
+ if (bytes_left < index_size)
+ return false;
+
+ const uint8_t* index_ptr = stream + bytes_left - index_size;
+ if (marker != *index_ptr)
+ return false;
+
+ ++index_ptr;
+ bytes_left -= index_size;
+
+ // Parse frame information contained in the index and add a pointer to and
+ // size of each frame to frames_.
+ for (size_t i = 0; i < num_frames; ++i) {
+ uint32_t size = 0;
+ for (size_t j = 0; j < mag; ++j) {
+ size |= *index_ptr << (j * 8);
+ ++index_ptr;
+ }
+
+ if (base::checked_cast<off_t>(size) > bytes_left) {
+ DVLOG(1) << "Not enough data in the buffer for frame " << i;
+ return false;
+ }
+
+ frames_.push_back(FrameInfo(stream, size));
+ stream += size;
+ bytes_left -= size;
+
+ DVLOG(1) << "Frame " << i << ", size: " << size;
+ }
+
+ return true;
+}
+
+void Vp9Parser::ResetLoopfilter() {
+ loop_filter_.mode_ref_delta_enabled = true;
+ loop_filter_.mode_ref_delta_update = true;
+
+ const int8_t default_ref_deltas[] = {1, 0, -1, -1};
+ static_assert(
+ arraysize(default_ref_deltas) == arraysize(loop_filter_.ref_deltas),
+ "ref_deltas arrays of incorrect size");
+ for (size_t i = 0; i < arraysize(loop_filter_.ref_deltas); ++i)
+ loop_filter_.ref_deltas[i] = default_ref_deltas[i];
+
+ memset(loop_filter_.mode_deltas, 0, sizeof(loop_filter_.mode_deltas));
+}
+
+void Vp9Parser::SetupPastIndependence() {
+ memset(&segmentation_, 0, sizeof(segmentation_));
+ ResetLoopfilter();
+}
+
+const size_t QINDEX_RANGE = 256;
+const int16_t kDcQLookup[QINDEX_RANGE] = {
+ 4, 8, 8, 9, 10, 11, 12, 12,
+ 13, 14, 15, 16, 17, 18, 19, 19,
+ 20, 21, 22, 23, 24, 25, 26, 26,
+ 27, 28, 29, 30, 31, 32, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 43, 44, 45, 46, 47,
+ 48, 48, 49, 50, 51, 52, 53, 53,
+ 54, 55, 56, 57, 57, 58, 59, 60,
+ 61, 62, 62, 63, 64, 65, 66, 66,
+ 67, 68, 69, 70, 70, 71, 72, 73,
+ 74, 74, 75, 76, 77, 78, 78, 79,
+ 80, 81, 81, 82, 83, 84, 85, 85,
+ 87, 88, 90, 92, 93, 95, 96, 98,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 116, 117, 118, 120, 121,
+ 123, 125, 127, 129, 131, 134, 136, 138,
+ 140, 142, 144, 146, 148, 150, 152, 154,
+ 156, 158, 161, 164, 166, 169, 172, 174,
+ 177, 180, 182, 185, 187, 190, 192, 195,
+ 199, 202, 205, 208, 211, 214, 217, 220,
+ 223, 226, 230, 233, 237, 240, 243, 247,
+ 250, 253, 257, 261, 265, 269, 272, 276,
+ 280, 284, 288, 292, 296, 300, 304, 309,
+ 313, 317, 322, 326, 330, 335, 340, 344,
+ 349, 354, 359, 364, 369, 374, 379, 384,
+ 389, 395, 400, 406, 411, 417, 423, 429,
+ 435, 441, 447, 454, 461, 467, 475, 482,
+ 489, 497, 505, 513, 522, 530, 539, 549,
+ 559, 569, 579, 590, 602, 614, 626, 640,
+ 654, 668, 684, 700, 717, 736, 755, 775,
+ 796, 819, 843, 869, 896, 925, 955, 988,
+ 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336,
+};
+
+const int16_t kAcQLookup[QINDEX_RANGE] = {
+ 4, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 158, 161, 164, 167, 170, 173,
+ 176, 179, 182, 185, 188, 191, 194, 197,
+ 200, 203, 207, 211, 215, 219, 223, 227,
+ 231, 235, 239, 243, 247, 251, 255, 260,
+ 265, 270, 275, 280, 285, 290, 295, 300,
+ 305, 311, 317, 323, 329, 335, 341, 347,
+ 353, 359, 366, 373, 380, 387, 394, 401,
+ 408, 416, 424, 432, 440, 448, 456, 465,
+ 474, 483, 492, 501, 510, 520, 530, 540,
+ 550, 560, 571, 582, 593, 604, 615, 627,
+ 639, 651, 663, 676, 689, 702, 715, 729,
+ 743, 757, 771, 786, 801, 816, 832, 848,
+ 864, 881, 898, 915, 933, 951, 969, 988,
+ 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151,
+ 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343,
+ 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567,
+ 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
+};
+
+static_assert(arraysize(kDcQLookup) == arraysize(kAcQLookup),
+ "quantizer lookup arrays of incorrect size");
+
+#define CLAMP_Q(q) \
+ std::min(std::max(static_cast<size_t>(0), q), arraysize(kDcQLookup) - 1)
+
+size_t Vp9Parser::GetQIndex(const Vp9QuantizationParams& quant,
+ size_t segid) const {
+ if (segmentation_.FeatureEnabled(segid, Vp9Segmentation::SEG_LVL_ALT_Q)) {
+ int8_t feature_data =
+ segmentation_.FeatureData(segid, Vp9Segmentation::SEG_LVL_ALT_Q);
+ size_t q_index = segmentation_.abs_delta ? feature_data
+ : quant.base_qindex + feature_data;
+ return CLAMP_Q(q_index);
+ }
+
+ return quant.base_qindex;
+}
+
+void Vp9Parser::SetupSegmentationDequant(const Vp9QuantizationParams& quant) {
+ if (segmentation_.enabled) {
+ for (size_t i = 0; i < Vp9Segmentation::kNumSegments; ++i) {
+ const size_t q_index = GetQIndex(quant, i);
+ segmentation_.y_dequant[i][0] =
+ kDcQLookup[CLAMP_Q(q_index + quant.y_dc_delta)];
+ segmentation_.y_dequant[i][1] = kAcQLookup[CLAMP_Q(q_index)];
+ segmentation_.uv_dequant[i][0] =
+ kDcQLookup[CLAMP_Q(q_index + quant.uv_dc_delta)];
+ segmentation_.uv_dequant[i][1] =
+ kAcQLookup[CLAMP_Q(q_index + quant.uv_ac_delta)];
+ }
+ } else {
+ const size_t q_index = quant.base_qindex;
+ segmentation_.y_dequant[0][0] =
+ kDcQLookup[CLAMP_Q(q_index + quant.y_dc_delta)];
+ segmentation_.y_dequant[0][1] = kAcQLookup[CLAMP_Q(q_index)];
+ segmentation_.uv_dequant[0][0] =
+ kDcQLookup[CLAMP_Q(q_index + quant.uv_dc_delta)];
+ segmentation_.uv_dequant[0][1] =
+ kAcQLookup[CLAMP_Q(q_index + quant.uv_ac_delta)];
+ }
+}
+#undef CLAMP_Q
+
+#define CLAMP_LF(l) std::min(std::max(0, l), kMaxLoopFilterLevel)
+void Vp9Parser::SetupLoopFilter() {
+ if (!loop_filter_.filter_level)
+ return;
+
+ int scale = loop_filter_.filter_level < 32 ? 1 : 2;
+
+ for (size_t i = 0; i < Vp9Segmentation::kNumSegments; ++i) {
+ int level = loop_filter_.filter_level;
+
+ if (segmentation_.FeatureEnabled(i, Vp9Segmentation::SEG_LVL_ALT_LF)) {
+ int feature_data =
+ segmentation_.FeatureData(i, Vp9Segmentation::SEG_LVL_ALT_LF);
+ level = CLAMP_LF(segmentation_.abs_delta ? feature_data
+ : level + feature_data);
+ }
+
+ if (!loop_filter_.mode_ref_delta_enabled) {
+ memset(loop_filter_.lvl[i], level, sizeof(loop_filter_.lvl[i]));
+ } else {
+ loop_filter_.lvl[i][Vp9LoopFilter::VP9_FRAME_INTRA][0] = CLAMP_LF(
+ level +
+ loop_filter_.ref_deltas[Vp9LoopFilter::VP9_FRAME_INTRA] * scale);
+ loop_filter_.lvl[i][Vp9LoopFilter::VP9_FRAME_INTRA][1] = 0;
+
+ for (size_t type = Vp9LoopFilter::VP9_FRAME_LAST;
+ type < Vp9LoopFilter::VP9_FRAME_MAX; ++type) {
+ for (size_t mode = 0; mode < Vp9LoopFilter::kNumModeDeltas; ++mode) {
+ loop_filter_.lvl[i][type][mode] =
+ CLAMP_LF(level + loop_filter_.ref_deltas[type] * scale +
+ loop_filter_.mode_deltas[mode] * scale);
+ }
+ }
+ }
+ }
+}
+#undef CLAMP_LF
+
+} // namespace media
diff --git a/chromium/media/filters/vp9_parser.h b/chromium/media/filters/vp9_parser.h
new file mode 100644
index 00000000000..5724ae91498
--- /dev/null
+++ b/chromium/media/filters/vp9_parser.h
@@ -0,0 +1,290 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file contains an implementation of a VP9 bitstream parser. The main
+// purpose of this parser is to support hardware decode acceleration. Some
+// accelerators, e.g. libva which implements VA-API, require the caller
+// (chrome) to feed them parsed VP9 frame header.
+//
+// See content::VP9Decoder for example usage.
+//
+#ifndef MEDIA_FILTERS_VP9_PARSER_H_
+#define MEDIA_FILTERS_VP9_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <deque>
+
+#include "base/macros.h"
+#include "media/base/media_export.h"
+#include "media/filters/vp9_raw_bits_reader.h"
+
+namespace media {
+
+const int kVp9MaxProfile = 4;
+const int kVp9NumRefFramesLog2 = 3;
+const size_t kVp9NumRefFrames = 1 << kVp9NumRefFramesLog2;
+const uint8_t kVp9MaxProb = 255;
+const size_t kVp9NumRefsPerFrame = 3;
+
+enum class Vp9ColorSpace {
+ UNKNOWN = 0,
+ BT_601 = 1,
+ BT_709 = 2,
+ SMPTE_170 = 3,
+ SMPTE_240 = 4,
+ BT_2020 = 5,
+ RESERVED = 6,
+ SRGB = 7,
+};
+
+enum Vp9InterpFilter {
+ EIGHTTAP = 0,
+ EIGHTTAP_SMOOTH = 1,
+ EIGHTTAP_SHARP = 2,
+ BILINEAR = 3,
+ SWICHABLE = 4,
+};
+
+struct MEDIA_EXPORT Vp9Segmentation {
+ static const size_t kNumSegments = 8;
+ static const size_t kNumTreeProbs = kNumSegments - 1;
+ static const size_t kNumPredictionProbs = 3;
+ enum SegmentLevelFeature {
+ SEG_LVL_ALT_Q = 0,
+ SEG_LVL_ALT_LF = 1,
+ SEG_LVL_REF_FRAME = 2,
+ SEG_LVL_SKIP = 3,
+ SEG_LVL_MAX
+ };
+
+ bool enabled;
+
+ bool update_map;
+ uint8_t tree_probs[kNumTreeProbs];
+ bool temporal_update;
+ uint8_t pred_probs[kNumPredictionProbs];
+
+ bool update_data;
+ bool abs_delta;
+ bool feature_enabled[kNumSegments][SEG_LVL_MAX];
+ int8_t feature_data[kNumSegments][SEG_LVL_MAX];
+
+ int16_t y_dequant[kNumSegments][2];
+ int16_t uv_dequant[kNumSegments][2];
+
+ bool FeatureEnabled(size_t seg_id, SegmentLevelFeature feature) const {
+ return feature_enabled[seg_id][feature];
+ }
+
+ int8_t FeatureData(size_t seg_id, SegmentLevelFeature feature) const {
+ return feature_data[seg_id][feature];
+ }
+};
+
+struct MEDIA_EXPORT Vp9LoopFilter {
+ enum Vp9FrameType {
+ VP9_FRAME_INTRA = 0,
+ VP9_FRAME_LAST = 1,
+ VP9_FRAME_GOLDEN = 2,
+ VP9_FRAME_ALTREF = 3,
+ VP9_FRAME_MAX = 4,
+ };
+
+ static const size_t kNumModeDeltas = 2;
+
+ uint8_t filter_level;
+ uint8_t sharpness_level;
+
+ bool mode_ref_delta_enabled;
+ bool mode_ref_delta_update;
+ bool update_ref_deltas[VP9_FRAME_MAX];
+ int8_t ref_deltas[VP9_FRAME_MAX];
+ bool update_mode_deltas[kNumModeDeltas];
+ int8_t mode_deltas[kNumModeDeltas];
+
+ uint8_t lvl[Vp9Segmentation::kNumSegments][VP9_FRAME_MAX][kNumModeDeltas];
+};
+
+// Members of Vp9FrameHeader will be 0-initialized by Vp9Parser::ParseNextFrame.
+struct MEDIA_EXPORT Vp9QuantizationParams {
+ bool IsLossless() const {
+ return base_qindex == 0 && y_dc_delta == 0 && uv_dc_delta == 0 &&
+ uv_ac_delta == 0;
+ }
+
+ uint8_t base_qindex;
+ int8_t y_dc_delta;
+ int8_t uv_dc_delta;
+ int8_t uv_ac_delta;
+};
+
+// VP9 frame header.
+struct MEDIA_EXPORT Vp9FrameHeader {
+ enum FrameType {
+ KEYFRAME = 0,
+ INTERFRAME = 1,
+ };
+
+ bool IsKeyframe() const;
+ bool RefreshFlag(size_t i) const { return !!(refresh_flags & (1u << i)); }
+
+ uint8_t profile;
+
+ bool show_existing_frame;
+ uint8_t frame_to_show;
+
+ FrameType frame_type;
+
+ bool show_frame;
+ bool error_resilient_mode;
+
+ uint8_t bit_depth;
+ Vp9ColorSpace color_space;
+ bool yuv_range;
+ uint8_t subsampling_x;
+ uint8_t subsampling_y;
+
+ // The range of width and height is 1..2^16.
+ uint32_t width;
+ uint32_t height;
+ uint32_t display_width;
+ uint32_t display_height;
+
+ bool intra_only;
+ uint8_t reset_context;
+ uint8_t refresh_flags;
+ uint8_t frame_refs[kVp9NumRefsPerFrame];
+ bool ref_sign_biases[kVp9NumRefsPerFrame];
+ bool allow_high_precision_mv;
+ Vp9InterpFilter interp_filter;
+
+ bool refresh_frame_context;
+ bool frame_parallel_decoding_mode;
+ uint8_t frame_context_idx;
+
+ Vp9QuantizationParams quant_params;
+
+ uint8_t log2_tile_cols;
+ uint8_t log2_tile_rows;
+
+ // Pointer to the beginning of frame data. It is a responsibility of the
+ // client of the Vp9Parser to maintain validity of this data while it is
+ // being used outside of that class.
+ const uint8_t* data;
+
+ // Size of |data| in bytes.
+ size_t frame_size;
+
+ // Size of compressed header in bytes.
+ size_t first_partition_size;
+
+ // Size of uncompressed header in bytes.
+ size_t uncompressed_header_size;
+};
+
+// A parser for VP9 bitstream.
+class MEDIA_EXPORT Vp9Parser {
+ public:
+ // ParseNextFrame() return values. See documentation for ParseNextFrame().
+ enum Result {
+ kOk,
+ kInvalidStream,
+ kEOStream,
+ };
+
+ Vp9Parser();
+ ~Vp9Parser();
+
+ // Set a new stream buffer to read from, starting at |stream| and of size
+ // |stream_size| in bytes. |stream| must point to the beginning of a single
+ // frame or a single superframe, is owned by caller and must remain valid
+ // until the next call to SetStream().
+ void SetStream(const uint8_t* stream, off_t stream_size);
+
+ // Parse the next frame in the current stream buffer, filling |fhdr| with
+ // the parsed frame header and updating current segmentation and loop filter
+ // state. Return kOk if a frame has successfully been parsed, kEOStream if
+ // there is no more data in the current stream buffer, or kInvalidStream
+ // on error.
+ Result ParseNextFrame(Vp9FrameHeader* fhdr);
+
+ // Return current segmentation state.
+ const Vp9Segmentation& GetSegmentation() const { return segmentation_; }
+
+ // Return current loop filter state.
+ const Vp9LoopFilter& GetLoopFilter() const { return loop_filter_; }
+
+ // Clear parser state and return to an initialized state.
+ void Reset();
+
+ private:
+ // The parsing context to keep track of references.
+ struct ReferenceSlot {
+ uint32_t width;
+ uint32_t height;
+ };
+
+ bool ParseSuperframe();
+ uint8_t ReadProfile();
+ bool VerifySyncCode();
+ bool ReadBitDepthColorSpaceSampling(Vp9FrameHeader* fhdr);
+ void ReadFrameSize(Vp9FrameHeader* fhdr);
+ bool ReadFrameSizeFromRefs(Vp9FrameHeader* fhdr);
+ void ReadDisplayFrameSize(Vp9FrameHeader* fhdr);
+ Vp9InterpFilter ReadInterpFilter();
+ void ReadLoopFilter();
+ void ReadQuantization(Vp9QuantizationParams* quants);
+ void ReadSegmentationMap();
+ void ReadSegmentationData();
+ void ReadSegmentation();
+ void ReadTiles(Vp9FrameHeader* fhdr);
+ bool ParseUncompressedHeader(const uint8_t* stream,
+ off_t frame_size,
+ Vp9FrameHeader* fhdr);
+ void UpdateSlots(const Vp9FrameHeader* fhdr);
+
+ void ResetLoopfilter();
+ void SetupPastIndependence();
+ size_t GetQIndex(const Vp9QuantizationParams& quant, size_t segid) const;
+ void SetupSegmentationDequant(const Vp9QuantizationParams& quant);
+ void SetupLoopFilter();
+
+ // Current address in the bitstream buffer.
+ const uint8_t* stream_;
+
+ // Remaining bytes in stream_.
+ off_t bytes_left_;
+
+ // Stores start pointer and size of each frame within the current superframe.
+ struct FrameInfo {
+ FrameInfo(const uint8_t* ptr, off_t size);
+
+ // Starting address of the frame.
+ const uint8_t* ptr;
+
+ // Size of the frame in bytes.
+ off_t size;
+ };
+
+ // FrameInfo for the remaining frames in the current superframe to be parsed.
+ std::deque<FrameInfo> frames_;
+
+ // Raw bits decoder for uncompressed frame header.
+ Vp9RawBitsReader reader_;
+
+ // Segmentation and loop filter state that persists across frames.
+ Vp9Segmentation segmentation_;
+ Vp9LoopFilter loop_filter_;
+
+ // The parsing context to keep track of references.
+ ReferenceSlot ref_slots_[kVp9NumRefFrames];
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9Parser);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VP9_PARSER_H_
diff --git a/chromium/media/filters/vp9_parser_unittest.cc b/chromium/media/filters/vp9_parser_unittest.cc
new file mode 100644
index 00000000000..15496aef436
--- /dev/null
+++ b/chromium/media/filters/vp9_parser_unittest.cc
@@ -0,0 +1,159 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "media/base/test_data_util.h"
+#include "media/filters/ivf_parser.h"
+#include "media/filters/vp9_parser.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class Vp9ParserTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ base::FilePath file_path = GetTestDataFilePath("test-25fps.vp9");
+
+ stream_.reset(new base::MemoryMappedFile());
+ ASSERT_TRUE(stream_->Initialize(file_path)) << "Couldn't open stream file: "
+ << file_path.MaybeAsASCII();
+
+ IvfFileHeader ivf_file_header;
+ ASSERT_TRUE(ivf_parser_.Initialize(stream_->data(), stream_->length(),
+ &ivf_file_header));
+ ASSERT_EQ(ivf_file_header.fourcc, 0x30395056u); // VP90
+ }
+
+ void TearDown() override { stream_.reset(); }
+
+ bool ParseNextFrame(struct Vp9FrameHeader* frame_hdr);
+
+ const Vp9Segmentation& GetSegmentation() const {
+ return vp9_parser_.GetSegmentation();
+ }
+
+ const Vp9LoopFilter& GetLoopFilter() const {
+ return vp9_parser_.GetLoopFilter();
+ }
+
+ IvfParser ivf_parser_;
+ scoped_ptr<base::MemoryMappedFile> stream_;
+
+ Vp9Parser vp9_parser_;
+};
+
+bool Vp9ParserTest::ParseNextFrame(Vp9FrameHeader* fhdr) {
+ while (1) {
+ Vp9Parser::Result res = vp9_parser_.ParseNextFrame(fhdr);
+ if (res == Vp9Parser::kEOStream) {
+ IvfFrameHeader ivf_frame_header;
+ const uint8_t* ivf_payload;
+
+ if (!ivf_parser_.ParseNextFrame(&ivf_frame_header, &ivf_payload))
+ return false;
+
+ vp9_parser_.SetStream(ivf_payload, ivf_frame_header.frame_size);
+ continue;
+ }
+
+ return res == Vp9Parser::kOk;
+ }
+}
+
+TEST_F(Vp9ParserTest, StreamFileParsing) {
+ // Number of frames in the test stream to be parsed.
+ const int num_frames = 250;
+ int num_parsed_frames = 0;
+
+ while (num_parsed_frames < num_frames) {
+ Vp9FrameHeader fhdr;
+ if (!ParseNextFrame(&fhdr))
+ break;
+
+ ++num_parsed_frames;
+ }
+
+ DVLOG(1) << "Number of successfully parsed frames before EOS: "
+ << num_parsed_frames;
+
+ EXPECT_EQ(num_frames, num_parsed_frames);
+}
+
+TEST_F(Vp9ParserTest, VerifyFirstFrame) {
+ Vp9FrameHeader fhdr;
+
+ ASSERT_TRUE(ParseNextFrame(&fhdr));
+
+ EXPECT_EQ(0, fhdr.profile);
+ EXPECT_FALSE(fhdr.show_existing_frame);
+ EXPECT_EQ(Vp9FrameHeader::KEYFRAME, fhdr.frame_type);
+ EXPECT_TRUE(fhdr.show_frame);
+ EXPECT_FALSE(fhdr.error_resilient_mode);
+
+ EXPECT_EQ(8, fhdr.bit_depth);
+ EXPECT_EQ(Vp9ColorSpace::UNKNOWN, fhdr.color_space);
+ EXPECT_FALSE(fhdr.yuv_range);
+ EXPECT_EQ(1, fhdr.subsampling_x);
+ EXPECT_EQ(1, fhdr.subsampling_y);
+
+ EXPECT_EQ(320u, fhdr.width);
+ EXPECT_EQ(240u, fhdr.height);
+ EXPECT_EQ(320u, fhdr.display_width);
+ EXPECT_EQ(240u, fhdr.display_height);
+
+ EXPECT_TRUE(fhdr.refresh_frame_context);
+ EXPECT_TRUE(fhdr.frame_parallel_decoding_mode);
+ EXPECT_EQ(0, fhdr.frame_context_idx);
+
+ const Vp9LoopFilter& lf = GetLoopFilter();
+ EXPECT_EQ(9, lf.filter_level);
+ EXPECT_EQ(0, lf.sharpness_level);
+ EXPECT_TRUE(lf.mode_ref_delta_enabled);
+ EXPECT_TRUE(lf.mode_ref_delta_update);
+ EXPECT_TRUE(lf.update_ref_deltas[0]);
+ EXPECT_EQ(1, lf.ref_deltas[0]);
+ EXPECT_EQ(-1, lf.ref_deltas[2]);
+ EXPECT_EQ(-1, lf.ref_deltas[3]);
+
+ const Vp9QuantizationParams& qp = fhdr.quant_params;
+ EXPECT_EQ(65, qp.base_qindex);
+ EXPECT_FALSE(qp.y_dc_delta);
+ EXPECT_FALSE(qp.uv_dc_delta);
+ EXPECT_FALSE(qp.uv_ac_delta);
+ EXPECT_FALSE(qp.IsLossless());
+
+ const Vp9Segmentation& seg = GetSegmentation();
+ EXPECT_FALSE(seg.enabled);
+
+ EXPECT_EQ(0, fhdr.log2_tile_cols);
+ EXPECT_EQ(0, fhdr.log2_tile_rows);
+
+ EXPECT_EQ(120u, fhdr.first_partition_size);
+ EXPECT_EQ(18u, fhdr.uncompressed_header_size);
+}
+
+TEST_F(Vp9ParserTest, VerifyInterFrame) {
+ Vp9FrameHeader fhdr;
+
+ // To verify the second frame.
+ for (int i = 0; i < 2; i++)
+ ASSERT_TRUE(ParseNextFrame(&fhdr));
+
+ EXPECT_EQ(Vp9FrameHeader::INTERFRAME, fhdr.frame_type);
+ EXPECT_FALSE(fhdr.show_frame);
+ EXPECT_FALSE(fhdr.intra_only);
+ EXPECT_FALSE(fhdr.reset_context);
+ EXPECT_TRUE(fhdr.RefreshFlag(2));
+ EXPECT_EQ(0, fhdr.frame_refs[0]);
+ EXPECT_EQ(1, fhdr.frame_refs[1]);
+ EXPECT_EQ(2, fhdr.frame_refs[2]);
+ EXPECT_TRUE(fhdr.allow_high_precision_mv);
+ EXPECT_EQ(Vp9InterpFilter::EIGHTTAP, fhdr.interp_filter);
+
+ EXPECT_EQ(48u, fhdr.first_partition_size);
+ EXPECT_EQ(11u, fhdr.uncompressed_header_size);
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vp9_raw_bits_reader.cc b/chromium/media/filters/vp9_raw_bits_reader.cc
new file mode 100644
index 00000000000..4124085913e
--- /dev/null
+++ b/chromium/media/filters/vp9_raw_bits_reader.cc
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/vp9_raw_bits_reader.h"
+
+#include <limits.h>
+
+#include "base/logging.h"
+#include "media/base/bit_reader.h"
+
+namespace media {
+
+Vp9RawBitsReader::Vp9RawBitsReader() : valid_(true) {}
+
+Vp9RawBitsReader::~Vp9RawBitsReader() {}
+
+void Vp9RawBitsReader::Initialize(const uint8_t* data, size_t size) {
+ DCHECK(data);
+ reader_.reset(new BitReader(data, size));
+ valid_ = true;
+}
+
+bool Vp9RawBitsReader::ReadBool() {
+ DCHECK(reader_);
+ if (!valid_)
+ return false;
+
+ int value = 0;
+ valid_ = reader_->ReadBits(1, &value);
+ return valid_ ? value == 1 : false;
+}
+
+int Vp9RawBitsReader::ReadLiteral(int bits) {
+ DCHECK(reader_);
+ if (!valid_)
+ return 0;
+
+ int value = 0;
+ DCHECK_LT(static_cast<size_t>(bits), sizeof(value) * 8);
+ valid_ = reader_->ReadBits(bits, &value);
+ return valid_ ? value : 0;
+}
+
+int Vp9RawBitsReader::ReadSignedLiteral(int bits) {
+ int value = ReadLiteral(bits);
+ return ReadBool() ? -value : value;
+}
+
+size_t Vp9RawBitsReader::GetBytesRead() const {
+ DCHECK(reader_);
+ return (reader_->bits_read() + 7) / 8;
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vp9_raw_bits_reader.h b/chromium/media/filters/vp9_raw_bits_reader.h
new file mode 100644
index 00000000000..5cc46e25a97
--- /dev/null
+++ b/chromium/media/filters/vp9_raw_bits_reader.h
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_VP9_RAW_BITS_READER_
+#define MEDIA_FILTERS_VP9_RAW_BITS_READER_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+
+namespace media {
+
+class BitReader;
+
+// A class to read raw bits stream. See VP9 spec, "RAW-BITS DECODING" section
+// for detail.
+class MEDIA_EXPORT Vp9RawBitsReader {
+ public:
+ Vp9RawBitsReader();
+ ~Vp9RawBitsReader();
+
+ // |data| is the input buffer with |size| bytes.
+ void Initialize(const uint8_t* data, size_t size);
+
+ // Returns true if none of the reads since the last Initialize() call has
+ // gone beyond the end of available data.
+ bool IsValid() const { return valid_; }
+
+ // Returns how many bytes were read since the last Initialize() call.
+ // Partial bytes will be counted as one byte. For example, it will return 1
+ // if 3 bits were read.
+ size_t GetBytesRead() const;
+
+ // Reads one bit.
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ bool ReadBool();
+
+ // Reads a literal with |bits| bits.
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ int ReadLiteral(int bits);
+
+ // Reads a signed literal with |bits| bits (not including the sign bit).
+ // If the read goes beyond the end of buffer, the return value is undefined.
+ int ReadSignedLiteral(int bits);
+
+ private:
+ scoped_ptr<BitReader> reader_;
+
+ // Indicates if none of the reads since the last Initialize() call has gone
+ // beyond the end of available data.
+ bool valid_;
+
+ DISALLOW_COPY_AND_ASSIGN(Vp9RawBitsReader);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_VP9_RAW_BITS_READER_
diff --git a/chromium/media/filters/vp9_raw_bits_reader_unittest.cc b/chromium/media/filters/vp9_raw_bits_reader_unittest.cc
new file mode 100644
index 00000000000..999dbe1e989
--- /dev/null
+++ b/chromium/media/filters/vp9_raw_bits_reader_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/filters/vp9_raw_bits_reader.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+TEST(Vp9RawBitsReaderTest, ReadBool) {
+ uint8_t data[] = {0xf1};
+ Vp9RawBitsReader reader;
+ reader.Initialize(data, 1);
+
+ EXPECT_TRUE(reader.IsValid());
+ EXPECT_EQ(0u, reader.GetBytesRead());
+ EXPECT_TRUE(reader.ReadBool());
+ EXPECT_EQ(1u, reader.GetBytesRead());
+ EXPECT_TRUE(reader.ReadBool());
+ EXPECT_TRUE(reader.ReadBool());
+ EXPECT_TRUE(reader.ReadBool());
+ EXPECT_FALSE(reader.ReadBool());
+ EXPECT_FALSE(reader.ReadBool());
+ EXPECT_FALSE(reader.ReadBool());
+ EXPECT_TRUE(reader.ReadBool());
+ EXPECT_TRUE(reader.IsValid());
+
+ // The return value is undefined.
+ ignore_result(reader.ReadBool());
+ EXPECT_FALSE(reader.IsValid());
+ EXPECT_EQ(1u, reader.GetBytesRead());
+}
+
+TEST(Vp9RawBitsReader, ReadLiteral) {
+ uint8_t data[] = {0x3d, 0x67, 0x9a};
+ Vp9RawBitsReader reader;
+ reader.Initialize(data, 3);
+
+ EXPECT_TRUE(reader.IsValid());
+ EXPECT_EQ(0x03, reader.ReadLiteral(4));
+ EXPECT_EQ(0xd679, reader.ReadLiteral(16));
+ EXPECT_TRUE(reader.IsValid());
+
+ // The return value is undefined.
+ ignore_result(reader.ReadLiteral(8));
+ EXPECT_FALSE(reader.IsValid());
+ EXPECT_EQ(3u, reader.GetBytesRead());
+}
+
+TEST(Vp9RawBitsReader, ReadSignedLiteral) {
+ uint8_t data[] = {0x3d, 0x67, 0x9a};
+ Vp9RawBitsReader reader;
+ reader.Initialize(data, 3);
+
+ EXPECT_TRUE(reader.IsValid());
+ EXPECT_EQ(-0x03, reader.ReadSignedLiteral(4));
+ EXPECT_EQ(-0x5679, reader.ReadSignedLiteral(15));
+ EXPECT_TRUE(reader.IsValid());
+
+ // The return value is undefined.
+ ignore_result(reader.ReadSignedLiteral(7));
+ EXPECT_FALSE(reader.IsValid());
+ EXPECT_EQ(3u, reader.GetBytesRead());
+}
+
+} // namespace media
diff --git a/chromium/media/filters/vpx_video_decoder.cc b/chromium/media/filters/vpx_video_decoder.cc
index 26b60fa55d7..31b3a376b22 100644
--- a/chromium/media/filters/vpx_video_decoder.cc
+++ b/chromium/media/filters/vpx_video_decoder.cc
@@ -13,17 +13,23 @@
#include "base/command_line.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/sys_byteorder.h"
#include "base/sys_info.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/decoder_buffer.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_util.h"
// Include libvpx header files.
@@ -31,9 +37,9 @@
// backwards compatibility for legacy applications using the library.
#define VPX_CODEC_DISABLE_COMPAT 1
extern "C" {
-#include "third_party/libvpx/source/libvpx/vpx/vpx_decoder.h"
-#include "third_party/libvpx/source/libvpx/vpx/vpx_frame_buffer.h"
-#include "third_party/libvpx/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vp8dx.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vpx_decoder.h"
+#include "third_party/libvpx_new/source/libvpx/vpx/vpx_frame_buffer.h"
}
namespace media {
@@ -73,7 +79,8 @@ static int GetThreadCount(const VideoDecoderConfig& config) {
}
class VpxVideoDecoder::MemoryPool
- : public base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool> {
+ : public base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool>,
+ public base::trace_event::MemoryDumpProvider {
public:
MemoryPool();
@@ -97,9 +104,15 @@ class VpxVideoDecoder::MemoryPool
// to this pool.
base::Closure CreateFrameCallback(void* fb_priv_data);
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
+
+ int NumberOfFrameBuffersInUseByDecoder() const;
+ int NumberOfFrameBuffersInUseByDecoderAndVideoFrame() const;
+
private:
friend class base::RefCountedThreadSafe<VpxVideoDecoder::MemoryPool>;
- ~MemoryPool();
+ ~MemoryPool() override;
// Reference counted frame buffers used for VP9 decoding. Reference counting
// is done manually because both chromium and libvpx has to release this
@@ -120,6 +133,10 @@ class VpxVideoDecoder::MemoryPool
// Frame buffers to be used by libvpx for VP9 Decoding.
std::vector<VP9FrameBuffer*> frame_buffers_;
+ // Number of VP9FrameBuffer currently in use by the decoder.
+ int in_use_by_decoder_ = 0;
+ // Number of VP9FrameBuffer currently in use by the decoder and a video frame.
+ int in_use_by_decoder_and_video_frame_ = 0;
DISALLOW_COPY_AND_ASSIGN(MemoryPool);
};
@@ -164,6 +181,7 @@ int32 VpxVideoDecoder::MemoryPool::GetVP9FrameBuffer(
fb->data = &fb_to_use->data[0];
fb->size = fb_to_use->data.size();
++fb_to_use->ref_cnt;
+ ++memory_pool->in_use_by_decoder_;
// Set the frame buffer's private data to point at the external frame buffer.
fb->priv = static_cast<void*>(fb_to_use);
@@ -172,8 +190,16 @@ int32 VpxVideoDecoder::MemoryPool::GetVP9FrameBuffer(
int32 VpxVideoDecoder::MemoryPool::ReleaseVP9FrameBuffer(
void *user_priv, vpx_codec_frame_buffer *fb) {
+ DCHECK(user_priv);
+ DCHECK(fb);
VP9FrameBuffer* frame_buffer = static_cast<VP9FrameBuffer*>(fb->priv);
--frame_buffer->ref_cnt;
+
+ VpxVideoDecoder::MemoryPool* memory_pool =
+ static_cast<VpxVideoDecoder::MemoryPool*>(user_priv);
+ --memory_pool->in_use_by_decoder_;
+ if (frame_buffer->ref_cnt)
+ --memory_pool->in_use_by_decoder_and_video_frame_;
return 0;
}
@@ -181,14 +207,57 @@ base::Closure VpxVideoDecoder::MemoryPool::CreateFrameCallback(
void* fb_priv_data) {
VP9FrameBuffer* frame_buffer = static_cast<VP9FrameBuffer*>(fb_priv_data);
++frame_buffer->ref_cnt;
+ if (frame_buffer->ref_cnt > 1)
+ ++in_use_by_decoder_and_video_frame_;
return BindToCurrentLoop(
base::Bind(&MemoryPool::OnVideoFrameDestroyed, this,
frame_buffer));
}
+bool VpxVideoDecoder::MemoryPool::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ base::trace_event::MemoryAllocatorDump* memory_dump =
+ pmd->CreateAllocatorDump("media/vpx/memory_pool");
+ base::trace_event::MemoryAllocatorDump* used_memory_dump =
+ pmd->CreateAllocatorDump("media/vpx/memory_pool/used");
+
+ pmd->AddSuballocation(memory_dump->guid(),
+ base::trace_event::MemoryDumpManager::GetInstance()
+ ->system_allocator_pool_name());
+ size_t bytes_used = 0;
+ size_t bytes_reserved = 0;
+ for (const VP9FrameBuffer* frame_buffer : frame_buffers_) {
+ if (frame_buffer->ref_cnt) {
+ bytes_used += frame_buffer->data.size();
+ }
+ bytes_reserved += frame_buffer->data.size();
+ }
+
+ memory_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ bytes_reserved);
+ used_memory_dump->AddScalar(
+ base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes, bytes_used);
+
+ return true;
+}
+
+int VpxVideoDecoder::MemoryPool::NumberOfFrameBuffersInUseByDecoder() const {
+ return in_use_by_decoder_;
+}
+
+int VpxVideoDecoder::MemoryPool::
+ NumberOfFrameBuffersInUseByDecoderAndVideoFrame() const {
+ return in_use_by_decoder_and_video_frame_;
+}
+
void VpxVideoDecoder::MemoryPool::OnVideoFrameDestroyed(
VP9FrameBuffer* frame_buffer) {
--frame_buffer->ref_cnt;
+ if (frame_buffer->ref_cnt)
+ --in_use_by_decoder_and_video_frame_;
}
VpxVideoDecoder::VpxVideoDecoder(
@@ -256,10 +325,12 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
if (config.codec() != kCodecVP8 && config.codec() != kCodecVP9)
return false;
- // In VP8 videos, only those with alpha are handled by VpxVideoDecoder. All
- // other VP8 videos go to FFmpegVideoDecoder.
- if (config.codec() == kCodecVP8 && config.format() != VideoFrame::YV12A)
+#if !defined(DISABLE_FFMPEG_VIDEO_DECODERS)
+ // When FFmpegVideoDecoder is available it handles VP8 that doesn't have
+ // alpha, and VpxVideoDecoder will handle VP8 with alpha.
+ if (config.codec() == kCodecVP8 && config.format() != PIXEL_FORMAT_YV12A)
return false;
+#endif
CloseDecoder();
@@ -271,6 +342,8 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
// decoding.
if (config.codec() == kCodecVP9) {
memory_pool_ = new MemoryPool();
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ memory_pool_.get(), task_runner_);
if (vpx_codec_set_frame_buffer_functions(vpx_codec_,
&MemoryPool::GetVP9FrameBuffer,
&MemoryPool::ReleaseVP9FrameBuffer,
@@ -280,7 +353,7 @@ bool VpxVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config) {
}
}
- if (config.format() == VideoFrame::YV12A) {
+ if (config.format() == PIXEL_FORMAT_YV12A) {
vpx_codec_alpha_ = InitializeVpxContext(vpx_codec_alpha_, config);
if (!vpx_codec_alpha_)
return false;
@@ -294,6 +367,8 @@ void VpxVideoDecoder::CloseDecoder() {
vpx_codec_destroy(vpx_codec_);
delete vpx_codec_;
vpx_codec_ = NULL;
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ memory_pool_.get());
memory_pool_ = NULL;
}
if (vpx_codec_alpha_) {
@@ -459,19 +534,24 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
vpx_image->fmt == VPX_IMG_FMT_YV12 ||
vpx_image->fmt == VPX_IMG_FMT_I444);
- VideoFrame::Format codec_format = VideoFrame::YV12;
+ VideoPixelFormat codec_format = PIXEL_FORMAT_YV12;
int uv_rows = (vpx_image->d_h + 1) / 2;
- VideoFrame::ColorSpace color_space = VideoFrame::COLOR_SPACE_UNSPECIFIED;
if (vpx_image->fmt == VPX_IMG_FMT_I444) {
CHECK(!vpx_codec_alpha_);
- codec_format = VideoFrame::YV24;
+ codec_format = PIXEL_FORMAT_YV24;
uv_rows = vpx_image->d_h;
} else if (vpx_codec_alpha_) {
- codec_format = VideoFrame::YV12A;
+ codec_format = PIXEL_FORMAT_YV12A;
}
+
+ // Default to the color space from the config, but if the bistream specifies
+ // one, prefer that instead.
+ ColorSpace color_space = config_.color_space();
if (vpx_image->cs == VPX_CS_BT_709)
- color_space = VideoFrame::COLOR_SPACE_HD_REC709;
+ color_space = COLOR_SPACE_HD_REC709;
+ else if (vpx_image->cs == VPX_CS_BT_601)
+ color_space = COLOR_SPACE_SD_REC601;
// The mixed |w|/|d_h| in |coded_size| is intentional. Setting the correct
// coded width is necessary to allow coalesced memory access, which may avoid
@@ -495,6 +575,13 @@ void VpxVideoDecoder::CopyVpxImageTo(const vpx_image* vpx_image,
memory_pool_->CreateFrameCallback(vpx_image->fb_priv));
video_frame->get()->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
color_space);
+
+ UMA_HISTOGRAM_COUNTS("Media.Vpx.VideoDecoderBuffersInUseByDecoder",
+ memory_pool_->NumberOfFrameBuffersInUseByDecoder());
+ UMA_HISTOGRAM_COUNTS(
+ "Media.Vpx.VideoDecoderBuffersInUseByDecoderAndVideoFrame",
+ memory_pool_->NumberOfFrameBuffersInUseByDecoderAndVideoFrame());
+
return;
}
diff --git a/chromium/media/formats/common/stream_parser_test_base.cc b/chromium/media/formats/common/stream_parser_test_base.cc
index f47a87da502..b6555b95247 100644
--- a/chromium/media/formats/common/stream_parser_test_base.cc
+++ b/chromium/media/formats/common/stream_parser_test_base.cc
@@ -38,7 +38,7 @@ StreamParserTestBase::StreamParserTestBase(
base::Bind(&StreamParserTestBase::OnKeyNeeded, base::Unretained(this)),
base::Bind(&StreamParserTestBase::OnNewSegment, base::Unretained(this)),
base::Bind(&StreamParserTestBase::OnEndOfSegment, base::Unretained(this)),
- LogCB());
+ new MediaLog());
}
StreamParserTestBase::~StreamParserTestBase() {}
diff --git a/chromium/media/formats/mp2t/es_adapter_video.cc b/chromium/media/formats/mp2t/es_adapter_video.cc
index 0208d44fb70..972f8c7f777 100644
--- a/chromium/media/formats/mp2t/es_adapter_video.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video.cc
@@ -4,7 +4,7 @@
#include "media/formats/mp2t/es_adapter_video.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/mp2t_common.h"
diff --git a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
index b601f7ece1f..396b6559a65 100644
--- a/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
+++ b/chromium/media/formats/mp2t/es_adapter_video_unittest.cc
@@ -11,6 +11,7 @@
#include "base/strings/string_util.h"
#include "base/time/time.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/es_adapter_video.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -24,14 +25,9 @@ VideoDecoderConfig CreateFakeVideoConfig() {
gfx::Size coded_size(320, 240);
gfx::Rect visible_rect(0, 0, 320, 240);
gfx::Size natural_size(320, 240);
- return VideoDecoderConfig(
- kCodecH264,
- H264PROFILE_MAIN,
- VideoFrame::I420,
- coded_size,
- visible_rect,
- natural_size,
- NULL, 0, false);
+ return VideoDecoderConfig(kCodecH264, H264PROFILE_MAIN, PIXEL_FORMAT_I420,
+ COLOR_SPACE_UNSPECIFIED, coded_size, visible_rect,
+ natural_size, NULL, 0, false);
}
StreamParserBuffer::BufferQueue
diff --git a/chromium/media/formats/mp2t/es_parser.cc b/chromium/media/formats/mp2t/es_parser.cc
index c57b79b0b6d..cdd6e071f49 100644
--- a/chromium/media/formats/mp2t/es_parser.cc
+++ b/chromium/media/formats/mp2t/es_parser.cc
@@ -4,6 +4,7 @@
#include "media/formats/mp2t/es_parser.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/common/offset_byte_queue.h"
namespace media {
diff --git a/chromium/media/formats/mp2t/es_parser_adts.cc b/chromium/media/formats/mp2t/es_parser_adts.cc
index 8699e20ccb5..9c57de19611 100644
--- a/chromium/media/formats/mp2t/es_parser_adts.cc
+++ b/chromium/media/formats/mp2t/es_parser_adts.cc
@@ -4,15 +4,14 @@
#include "media/formats/mp2t/es_parser_adts.h"
-
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/bit_reader.h"
-#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/common/offset_byte_queue.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mpeg/adts_constants.h"
diff --git a/chromium/media/formats/mp2t/es_parser_adts_unittest.cc b/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
index 966553803f6..e7ccb5cb4ef 100644
--- a/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_adts_unittest.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/formats/mp2t/es_parser_adts.h"
#include "media/formats/mp2t/es_parser_test_base.h"
@@ -65,6 +64,14 @@ TEST_F(EsParserAdtsTest, SinglePts) {
EXPECT_EQ(45u, buffer_count_);
}
+TEST_F(EsParserAdtsTest, AacLcAdts) {
+ LoadStream("sfx.adts");
+ std::vector<Packet> pes_packets = GenerateFixedSizePesPacket(512);
+ pes_packets.front().pts = base::TimeDelta::FromSeconds(1);
+ EXPECT_TRUE(Process(pes_packets, false));
+ EXPECT_EQ(1u, config_count_);
+ EXPECT_EQ(14u, buffer_count_);
+}
} // namespace mp2t
} // namespace media
diff --git a/chromium/media/formats/mp2t/es_parser_h264.cc b/chromium/media/formats/mp2t/es_parser_h264.cc
index 830da34fc54..cbdec71de0d 100644
--- a/chromium/media/formats/mp2t/es_parser_h264.cc
+++ b/chromium/media/formats/mp2t/es_parser_h264.cc
@@ -6,8 +6,8 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_frame.h"
#include "media/filters/h264_parser.h"
#include "media/formats/common/offset_byte_queue.h"
@@ -279,13 +279,8 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
return false;
VideoDecoderConfig video_decoder_config(
- kCodecH264,
- VIDEO_CODEC_PROFILE_UNKNOWN,
- VideoFrame::YV12,
- coded_size,
- visible_rect,
- natural_size,
- NULL, 0,
+ kCodecH264, VIDEO_CODEC_PROFILE_UNKNOWN, PIXEL_FORMAT_YV12,
+ COLOR_SPACE_HD_REC709, coded_size, visible_rect, natural_size, NULL, 0,
false);
if (!video_decoder_config.Matches(last_video_decoder_config_)) {
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc b/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
index 51176140e3a..aee4cba68c4 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio.cc
@@ -4,16 +4,15 @@
#include "media/formats/mp2t/es_parser_mpeg1audio.h"
-
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/bit_reader.h"
-#include "media/base/buffers.h"
#include "media/base/channel_layout.h"
#include "media/base/stream_parser_buffer.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/common/offset_byte_queue.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mpeg/mpeg1_audio_stream_parser.h"
@@ -38,10 +37,10 @@ struct EsParserMpeg1Audio::Mpeg1AudioFrame {
EsParserMpeg1Audio::EsParserMpeg1Audio(
const NewAudioConfigCB& new_audio_config_cb,
const EmitBufferCB& emit_buffer_cb,
- const LogCB& log_cb)
- : log_cb_(log_cb),
- new_audio_config_cb_(new_audio_config_cb),
- emit_buffer_cb_(emit_buffer_cb) {
+ const scoped_refptr<MediaLog>& media_log)
+ : media_log_(media_log),
+ new_audio_config_cb_(new_audio_config_cb),
+ emit_buffer_cb_(emit_buffer_cb) {
}
EsParserMpeg1Audio::~EsParserMpeg1Audio() {
@@ -122,7 +121,7 @@ bool EsParserMpeg1Audio::LookForMpeg1AudioFrame(
int remaining_size = es_size - offset;
DCHECK_GE(remaining_size, MPEG1AudioStreamParser::kHeaderSize);
MPEG1AudioStreamParser::Header header;
- if (!MPEG1AudioStreamParser::ParseHeader(log_cb_, cur_buf, &header))
+ if (!MPEG1AudioStreamParser::ParseHeader(media_log_, cur_buf, &header))
continue;
if (remaining_size < header.frame_size) {
@@ -162,8 +161,7 @@ bool EsParserMpeg1Audio::LookForMpeg1AudioFrame(
bool EsParserMpeg1Audio::UpdateAudioConfiguration(
const uint8* mpeg1audio_header) {
MPEG1AudioStreamParser::Header header;
- if (!MPEG1AudioStreamParser::ParseHeader(log_cb_,
- mpeg1audio_header,
+ if (!MPEG1AudioStreamParser::ParseHeader(media_log_, mpeg1audio_header,
&header)) {
return false;
}
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio.h b/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
index c10f8fdfffa..6ffb197788a 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio.h
@@ -33,7 +33,7 @@ class MEDIA_EXPORT EsParserMpeg1Audio : public EsParser {
EsParserMpeg1Audio(const NewAudioConfigCB& new_audio_config_cb,
const EmitBufferCB& emit_buffer_cb,
- const LogCB& log_cb);
+ const scoped_refptr<MediaLog>& media_log);
~EsParserMpeg1Audio() override;
// EsParser implementation.
@@ -65,7 +65,7 @@ class MEDIA_EXPORT EsParserMpeg1Audio : public EsParser {
void SkipMpeg1AudioFrame(const Mpeg1AudioFrame& mpeg1audio_frame);
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
// Callbacks:
// - to signal a new audio configuration,
diff --git a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
index e875c03fe98..cb78950f271 100644
--- a/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
+++ b/chromium/media/formats/mp2t/es_parser_mpeg1audio_unittest.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/base/buffers.h"
#include "media/base/media_log.h"
#include "media/base/stream_parser_buffer.h"
#include "media/formats/mp2t/es_parser_mpeg1audio.h"
@@ -40,9 +39,8 @@ bool EsParserMpeg1AudioTest::Process(
EsParserMpeg1Audio es_parser(
base::Bind(&EsParserMpeg1AudioTest::NewAudioConfig,
base::Unretained(this)),
- base::Bind(&EsParserMpeg1AudioTest::EmitBuffer,
- base::Unretained(this)),
- LogCB());
+ base::Bind(&EsParserMpeg1AudioTest::EmitBuffer, base::Unretained(this)),
+ new MediaLog());
return ProcessPesPackets(&es_parser, pes_packets, force_timing);
}
diff --git a/chromium/media/formats/mp2t/es_parser_test_base.cc b/chromium/media/formats/mp2t/es_parser_test_base.cc
index cba060fff17..afe7dd3c0ab 100644
--- a/chromium/media/formats/mp2t/es_parser_test_base.cc
+++ b/chromium/media/formats/mp2t/es_parser_test_base.cc
@@ -7,9 +7,9 @@
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
#include "base/strings/string_util.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/mp2t/es_parser.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser.cc b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
index 4d2e2d4c1aa..0075e6c141d 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.cc
@@ -7,9 +7,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/stl_util.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/es_parser_adts.h"
#include "media/formats/mp2t/es_parser_h264.h"
@@ -170,7 +170,7 @@ void Mp2tStreamParser::Init(
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK(!is_initialized_);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
@@ -185,7 +185,7 @@ void Mp2tStreamParser::Init(
encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
- log_cb_ = log_cb;
+ media_log_ = media_log;
}
void Mp2tStreamParser::Flush() {
@@ -200,6 +200,25 @@ void Mp2tStreamParser::Flush() {
delete pid_state;
}
pids_.clear();
+
+ // Flush is invoked from SourceBuffer.abort/SourceState::ResetParserState, and
+ // MSE spec prohibits emitting new configs in ResetParserState algorithm (see
+ // https://w3c.github.io/media-source/#sourcebuffer-reset-parser-state,
+ // 3.5.2 Reset Parser State states that new frames might be processed only in
+ // PARSING_MEDIA_SEGMENT and therefore doesn't allow emitting new configs,
+ // since that might need to run "init segment received" algorithm).
+ // So before we emit remaining buffers here, we need to trim our buffer queue
+ // so that we leave only buffers with configs that were already sent.
+ for (auto buffer_queue_iter = buffer_queue_chain_.begin();
+ buffer_queue_iter != buffer_queue_chain_.end(); ++buffer_queue_iter) {
+ const BufferQueueWithConfig& queue_with_config = *buffer_queue_iter;
+ if (!queue_with_config.is_config_sent) {
+ DVLOG(LOG_LEVEL_ES) << "Flush: dropping buffers with unsent new configs.";
+ buffer_queue_chain_.erase(buffer_queue_iter, buffer_queue_chain_.end());
+ break;
+ }
+ }
+
EmitRemainingBuffers();
buffer_queue_chain_.clear();
@@ -351,15 +370,12 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
sbr_in_mimetype_));
is_audio = true;
} else if (stream_type == kStreamTypeMpeg1Audio) {
- es_parser.reset(
- new EsParserMpeg1Audio(
- base::Bind(&Mp2tStreamParser::OnAudioConfigChanged,
- base::Unretained(this),
- pes_pid),
- base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer,
- base::Unretained(this),
- pes_pid),
- log_cb_));
+ es_parser.reset(new EsParserMpeg1Audio(
+ base::Bind(&Mp2tStreamParser::OnAudioConfigChanged,
+ base::Unretained(this), pes_pid),
+ base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer, base::Unretained(this),
+ pes_pid),
+ media_log_));
is_audio = true;
} else {
return;
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser.h b/chromium/media/formats/mp2t/mp2t_stream_parser.h
index ea91d841834..2bd7fa5941f 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser.h
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser.h
@@ -31,15 +31,14 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
~Mp2tStreamParser() override;
// StreamParser implementation.
- void Init(
- const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
+ void Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const scoped_refptr<MediaLog>& media_log) override;
void Flush() override;
bool Parse(const uint8* buf, int size) override;
@@ -101,7 +100,7 @@ class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
// True when AAC SBR extension is signalled in the mimetype
// (mp4a.40.5 in the codecs parameter).
diff --git a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
index 263f117358e..6663a054814 100644
--- a/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp2t/mp2t_stream_parser_unittest.cc
@@ -186,20 +186,15 @@ class Mp2tStreamParserTest : public testing::Test {
void InitializeParser() {
parser_->Init(
- base::Bind(&Mp2tStreamParserTest::OnInit,
- base::Unretained(this)),
- base::Bind(&Mp2tStreamParserTest::OnNewConfig,
- base::Unretained(this)),
- base::Bind(&Mp2tStreamParserTest::OnNewBuffers,
- base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnInit, base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewConfig, base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewBuffers, base::Unretained(this)),
true,
- base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
- base::Unretained(this)),
- base::Bind(&Mp2tStreamParserTest::OnNewSegment,
- base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnKeyNeeded, base::Unretained(this)),
+ base::Bind(&Mp2tStreamParserTest::OnNewSegment, base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnEndOfSegment,
base::Unretained(this)),
- LogCB());
+ new MediaLog());
}
bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) {
diff --git a/chromium/media/formats/mp2t/ts_section_pes.cc b/chromium/media/formats/mp2t/ts_section_pes.cc
index 6c7484809ee..fe7b4dc2f78 100644
--- a/chromium/media/formats/mp2t/ts_section_pes.cc
+++ b/chromium/media/formats/mp2t/ts_section_pes.cc
@@ -7,7 +7,7 @@
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/bit_reader.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mp2t/timestamp_unroller.h"
diff --git a/chromium/media/formats/mp4/aac.cc b/chromium/media/formats/mp4/aac.cc
index bbfd4f57854..853271f3250 100644
--- a/chromium/media/formats/mp4/aac.cc
+++ b/chromium/media/formats/mp4/aac.cc
@@ -22,7 +22,8 @@ AAC::AAC()
AAC::~AAC() {
}
-bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
+bool AAC::Parse(const std::vector<uint8>& data,
+ const scoped_refptr<MediaLog>& media_log) {
#if defined(OS_ANDROID)
codec_specific_data_ = data;
#endif
@@ -37,6 +38,11 @@ bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
frequency_ = 0;
extension_frequency_ = 0;
+ // TODO(msu.koo): Need to update comments after checking which version of
+ // ISO 14496-3 this implementation is according to. Also need to reflect
+ // ISO 14496-3:2009 if ISO 14496-3:2005 was reflected here.
+ // https://crbug.com/532281
+
// The following code is written according to ISO 14496 Part 3 Table 1.13 -
// Syntax of AudioSpecificConfig.
@@ -57,9 +63,6 @@ bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
RCHECK(reader.ReadBits(5, &profile_));
}
- MEDIA_LOG(INFO, log_cb) << "Audio codec: mp4a.40." << std::hex
- << static_cast<int>(profile_);
-
RCHECK(SkipDecoderGASpecificConfig(&reader));
RCHECK(SkipErrorSpecificConfig());
@@ -95,12 +98,26 @@ bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
}
if (frequency_ == 0) {
- RCHECK(frequency_index_ < kADTSFrequencyTableSize);
+ if (frequency_index_ >= kADTSFrequencyTableSize) {
+ MEDIA_LOG(ERROR, media_log)
+ << "Sampling Frequency Index(0x"
+ << std::hex << static_cast<int>(frequency_index_)
+ << ") is not supported. Please see ISO 14496-3:2005 Table 1.16 "
+ << "for supported Sampling Frequencies.";
+ return false;
+ }
frequency_ = kADTSFrequencyTable[frequency_index_];
}
if (extension_frequency_ == 0 && extension_frequency_index != 0xff) {
- RCHECK(extension_frequency_index < kADTSFrequencyTableSize);
+ if (extension_frequency_index >= kADTSFrequencyTableSize) {
+ MEDIA_LOG(ERROR, media_log)
+ << "Extension Sampling Frequency Index(0x"
+ << std::hex << static_cast<int>(extension_frequency_index)
+ << ") is not supported. Please see ISO 14496-3:2005 Table 1.16 "
+ << "for supported Sampling Frequencies.";
+ return false;
+ }
extension_frequency_ = kADTSFrequencyTable[extension_frequency_index];
}
@@ -108,12 +125,33 @@ bool AAC::Parse(const std::vector<uint8>& data, const LogCB& log_cb) {
if (ps_present && channel_config_ == 1) {
channel_layout_ = CHANNEL_LAYOUT_STEREO;
} else {
- RCHECK(channel_config_ < kADTSChannelLayoutTableSize);
+ if (channel_config_ >= kADTSChannelLayoutTableSize) {
+ MEDIA_LOG(ERROR, media_log)
+ << "Channel Configuration("
+ << static_cast<int>(channel_config_)
+ << ") is not supported. Please see ISO 14496-3:2005 Table 1.17 "
+ << "for supported Channel Configurations.";
+ return false;
+ }
channel_layout_ = kADTSChannelLayoutTable[channel_config_];
}
+ DCHECK(channel_layout_ != CHANNEL_LAYOUT_NONE);
+
+ if (profile_ < 1 || profile_ > 4) {
+ MEDIA_LOG(ERROR, media_log)
+ << "Audio codec(mp4a.40." << static_cast<int>(profile_)
+ << ") is not supported. Please see ISO 14496-3:2005 Table 1.3 "
+ << "for Audio Profile Definitions.";
+ return false;
+ }
+
+ MEDIA_LOG(INFO, media_log)
+ << "Audio codec: mp4a.40." << static_cast<int>(profile_)
+ << ". Sampling frequency: " << frequency_ << "Hz"
+ << ". Sampling frequency(Extension): " << extension_frequency_ << "Hz"
+ << ". Channel layout: " << channel_layout_ << ".";
- return frequency_ != 0 && channel_layout_ != CHANNEL_LAYOUT_NONE &&
- profile_ >= 1 && profile_ <= 4;
+ return true;
}
int AAC::GetOutputSamplesPerSecond(bool sbr_in_mimetype) const {
diff --git a/chromium/media/formats/mp4/aac.h b/chromium/media/formats/mp4/aac.h
index 67f981e2598..e76ea36fa7f 100644
--- a/chromium/media/formats/mp4/aac.h
+++ b/chromium/media/formats/mp4/aac.h
@@ -31,7 +31,8 @@ class MEDIA_EXPORT AAC {
// The function will parse the data and get the ElementaryStreamDescriptor,
// then it will parse the ElementaryStreamDescriptor to get audio stream
// configurations.
- bool Parse(const std::vector<uint8>& data, const LogCB& log_cb);
+ bool Parse(const std::vector<uint8>& data,
+ const scoped_refptr<MediaLog>& media_log);
// Gets the output sample rate for the AAC stream.
// |sbr_in_mimetype| should be set to true if the SBR mode is
diff --git a/chromium/media/formats/mp4/aac_unittest.cc b/chromium/media/formats/mp4/aac_unittest.cc
index 9d65c31cea5..a31952a727b 100644
--- a/chromium/media/formats/mp4/aac_unittest.cc
+++ b/chromium/media/formats/mp4/aac_unittest.cc
@@ -2,20 +2,77 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/formats/mp4/aac.h"
+#include <string>
+#include "media/base/mock_media_log.h"
+#include "media/formats/mp4/aac.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::InSequence;
+using ::testing::StrictMock;
+using ::testing::AllOf;
+
namespace media {
namespace mp4 {
+MATCHER_P(AudioProfileLog, profile_string, "") {
+ return CONTAINS_STRING(arg,
+ "Audio codec: " + std::string(profile_string) + ".");
+}
+
+MATCHER_P(AudioSamplingFrequencyLog, frequency_string, "") {
+ return CONTAINS_STRING(
+ arg, "Sampling frequency: " + std::string(frequency_string) + "Hz.");
+}
+
+MATCHER_P(AudioExtensionSamplingFrequencyLog, ex_string, "") {
+ return CONTAINS_STRING(
+ arg, "Sampling frequency(Extension): " + std::string(ex_string) + "Hz.");
+}
+
+MATCHER_P(AudioChannelLayoutLog, layout_string, "") {
+ return CONTAINS_STRING(
+ arg, "Channel layout: " + std::string(layout_string) + ".");
+}
+
+MATCHER_P(UnsupportedFrequencyIndexLog, frequency_index, "") {
+ return CONTAINS_STRING(
+ arg,
+ "Sampling Frequency Index(0x" +
+ std::string(frequency_index) + ") is not supported.");
+}
+
+MATCHER_P(UnsupportedExtensionFrequencyIndexLog, frequency_index, "") {
+ return CONTAINS_STRING(
+ arg,
+ "Extension Sampling Frequency Index(0x" +
+ std::string(frequency_index) + ") is not supported.");
+}
+
+MATCHER_P(UnsupportedChannelConfigLog, channel_index, "") {
+ return CONTAINS_STRING(
+ arg,
+ "Channel Configuration(" + std::string(channel_index) +
+ ") is not supported");
+}
+
+MATCHER_P(UnsupportedAudioProfileLog, profile_string, "") {
+ return CONTAINS_STRING(
+ arg,
+ "Audio codec(" + std::string(profile_string) + ") is not supported");
+}
+
class AACTest : public testing::Test {
public:
+ AACTest() : media_log_(new StrictMock<MockMediaLog>()) {}
+
bool Parse(const std::vector<uint8>& data) {
- return aac_.Parse(data, LogCB());
+ return aac_.Parse(data, media_log_);
}
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
AAC aac_;
};
@@ -25,6 +82,10 @@ TEST_F(AACTest, BasicProfileTest) {
data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("44100"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("3")));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 44100);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_STEREO);
@@ -36,6 +97,10 @@ TEST_F(AACTest, ExtensionTest) {
data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("24000"),
+ AudioExtensionSamplingFrequencyLog("48000"),
+ AudioChannelLayoutLog("3")));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(true), 48000);
@@ -52,6 +117,10 @@ TEST_F(AACTest, ImplicitSBR_ChannelConfig0) {
data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("24000"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("2")));
EXPECT_TRUE(Parse(data));
// Test w/o implict SBR.
@@ -70,6 +139,10 @@ TEST_F(AACTest, ImplicitSBR_ChannelConfig1) {
data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("24000"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("3")));
EXPECT_TRUE(Parse(data));
// Test w/o implict SBR.
@@ -87,6 +160,10 @@ TEST_F(AACTest, SixChannelTest) {
data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("48000"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("12")));
EXPECT_TRUE(Parse(data));
EXPECT_EQ(aac_.GetOutputSamplesPerSecond(false), 48000);
EXPECT_EQ(aac_.GetChannelLayout(false), CHANNEL_LAYOUT_5_1_BACK);
@@ -102,17 +179,23 @@ TEST_F(AACTest, DataTooShortTest) {
}
TEST_F(AACTest, IncorrectProfileTest) {
+ InSequence s;
uint8 buffer[] = {0x0, 0x08};
std::vector<uint8> data;
data.assign(buffer, buffer + sizeof(buffer));
-
EXPECT_FALSE(Parse(data));
data[0] = 0x08;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.1"),
+ AudioSamplingFrequencyLog("96000"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("2")));
EXPECT_TRUE(Parse(data));
data[0] = 0x28;
+ // No media log for this profile 5, since not enough bits are in |data| to
+ // first parse profile 5's extension frequency index.
EXPECT_FALSE(Parse(data));
}
@@ -121,11 +204,14 @@ TEST_F(AACTest, IncorrectFrequencyTest) {
std::vector<uint8> data;
data.assign(buffer, buffer + sizeof(buffer));
-
EXPECT_FALSE(Parse(data));
data[0] = 0x0e;
data[1] = 0x08;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.1"),
+ AudioSamplingFrequencyLog("7350"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("2")));
EXPECT_TRUE(Parse(data));
}
@@ -134,10 +220,82 @@ TEST_F(AACTest, IncorrectChannelTest) {
std::vector<uint8> data;
data.assign(buffer, buffer + sizeof(buffer));
-
EXPECT_FALSE(Parse(data));
data[1] = 0x08;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.1"),
+ AudioSamplingFrequencyLog("7350"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("2")));
+ EXPECT_TRUE(Parse(data));
+}
+
+TEST_F(AACTest, UnsupportedProfileTest) {
+ InSequence s;
+ uint8 buffer[] = {0x3a, 0x08};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(UnsupportedAudioProfileLog("mp4a.40.7"));
+ EXPECT_FALSE(Parse(data));
+
+ data[0] = 0x12;
+ data[1] = 0x18;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("44100"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("5")));
+ EXPECT_TRUE(Parse(data));
+}
+
+TEST_F(AACTest, UnsupportedChannelLayoutTest) {
+ InSequence s;
+ uint8 buffer[] = {0x12, 0x78};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(UnsupportedChannelConfigLog("15"));
+ EXPECT_FALSE(Parse(data));
+
+ data[1] = 0x18;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("44100"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("5")));
+ EXPECT_TRUE(Parse(data));
+}
+
+TEST_F(AACTest, UnsupportedFrequencyIndexTest) {
+ InSequence s;
+ uint8 buffer[] = {0x17, 0x10};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(UnsupportedFrequencyIndexLog("e"));
+ EXPECT_FALSE(Parse(data));
+
+ data[0] = 0x13;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("24000"),
+ AudioExtensionSamplingFrequencyLog("0"),
+ AudioChannelLayoutLog("3")));
+ EXPECT_TRUE(Parse(data));
+}
+
+TEST_F(AACTest, UnsupportedExFrequencyIndexTest) {
+ InSequence s;
+ uint8 buffer[] = {0x29, 0x17, 0x08, 0x0};
+ std::vector<uint8> data;
+
+ data.assign(buffer, buffer + sizeof(buffer));
+ EXPECT_MEDIA_LOG(UnsupportedExtensionFrequencyIndexLog("e"));
+ EXPECT_FALSE(Parse(data));
+
+ data[1] = 0x11;
+ EXPECT_MEDIA_LOG(AllOf(AudioProfileLog("mp4a.40.2"),
+ AudioSamplingFrequencyLog("64000"),
+ AudioExtensionSamplingFrequencyLog("64000"),
+ AudioChannelLayoutLog("3")));
EXPECT_TRUE(Parse(data));
}
diff --git a/chromium/media/formats/mp4/avc.cc b/chromium/media/formats/mp4/avc.cc
index 0368d1ba5de..7451a5fe4a2 100644
--- a/chromium/media/formats/mp4/avc.cc
+++ b/chromium/media/formats/mp4/avc.cc
@@ -22,19 +22,19 @@ static bool ConvertAVCToAnnexBInPlaceForLengthSize4(std::vector<uint8>* buf) {
const int kLengthSize = 4;
size_t pos = 0;
while (pos + kLengthSize < buf->size()) {
- uint32 nal_size = (*buf)[pos];
- nal_size = (nal_size << 8) + (*buf)[pos+1];
- nal_size = (nal_size << 8) + (*buf)[pos+2];
- nal_size = (nal_size << 8) + (*buf)[pos+3];
+ uint32 nal_length = (*buf)[pos];
+ nal_length = (nal_length << 8) + (*buf)[pos+1];
+ nal_length = (nal_length << 8) + (*buf)[pos+2];
+ nal_length = (nal_length << 8) + (*buf)[pos+3];
- if (nal_size == 0) {
- DVLOG(1) << "nal_size is 0";
+ if (nal_length == 0) {
+ DVLOG(1) << "nal_length is 0";
return false;
}
std::copy(kAnnexBStartCode, kAnnexBStartCode + kAnnexBStartCodeSize,
buf->begin() + pos);
- pos += kLengthSize + nal_size;
+ pos += kLengthSize + nal_length;
}
return pos == buf->size();
}
@@ -59,7 +59,8 @@ int AVC::FindSubsampleIndex(const std::vector<uint8>& buffer,
}
// static
-bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer) {
+bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples) {
RCHECK(length_size == 1 || length_size == 2 || length_size == 4);
if (length_size == 4)
@@ -71,21 +72,28 @@ bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer) {
size_t pos = 0;
while (pos + length_size < temp.size()) {
- int nal_size = temp[pos];
- if (length_size == 2) nal_size = (nal_size << 8) + temp[pos+1];
+ int nal_length = temp[pos];
+ if (length_size == 2) nal_length = (nal_length << 8) + temp[pos+1];
pos += length_size;
- if (nal_size == 0) {
- DVLOG(1) << "nal_size is 0";
+ if (nal_length == 0) {
+ DVLOG(1) << "nal_length is 0";
return false;
}
- RCHECK(pos + nal_size <= temp.size());
+ RCHECK(pos + nal_length <= temp.size());
buffer->insert(buffer->end(), kAnnexBStartCode,
kAnnexBStartCode + kAnnexBStartCodeSize);
+ if (subsamples && !subsamples->empty()) {
+ uint8* buffer_pos = &(*(buffer->end() - kAnnexBStartCodeSize));
+ int subsample_index = FindSubsampleIndex(*buffer, subsamples, buffer_pos);
+ // We've replaced NALU size value with an AnnexB start code.
+ int size_adjustment = kAnnexBStartCodeSize - length_size;
+ (*subsamples)[subsample_index].clear_bytes += size_adjustment;
+ }
buffer->insert(buffer->end(), temp.begin() + pos,
- temp.begin() + pos + nal_size);
- pos += nal_size;
+ temp.begin() + pos + nal_length);
+ pos += nal_length;
}
return pos == temp.size();
}
@@ -299,5 +307,38 @@ bool AVC::IsValidAnnexB(const uint8* buffer, size_t size,
return order_state >= kAfterFirstVCL;
}
+
+AVCBitstreamConverter::AVCBitstreamConverter(
+ scoped_ptr<AVCDecoderConfigurationRecord> avc_config)
+ : avc_config_(avc_config.Pass()) {
+ DCHECK(avc_config_);
+}
+
+AVCBitstreamConverter::~AVCBitstreamConverter() {
+}
+
+bool AVCBitstreamConverter::ConvertFrame(
+ std::vector<uint8>* frame_buf,
+ bool is_keyframe,
+ std::vector<SubsampleEntry>* subsamples) const {
+ // Convert the AVC NALU length fields to Annex B headers, as expected by
+ // decoding libraries. Since this may enlarge the size of the buffer, we also
+ // update the clear byte count for each subsample if encryption is used to
+ // account for the difference in size between the length prefix and Annex B
+ // start code.
+ RCHECK(AVC::ConvertFrameToAnnexB(avc_config_->length_size, frame_buf,
+ subsamples));
+
+ if (is_keyframe) {
+ // If this is a keyframe, we (re-)inject SPS and PPS headers at the start of
+ // a frame. If subsample info is present, we also update the clear byte
+ // count for that first subsample.
+ RCHECK(AVC::InsertParamSetsAnnexB(*avc_config_, frame_buf, subsamples));
+ }
+
+ DCHECK(AVC::IsValidAnnexB(*frame_buf, *subsamples));
+ return true;
+}
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/formats/mp4/avc.h b/chromium/media/formats/mp4/avc.h
index 4d8769c8f6a..a774ddd1b73 100644
--- a/chromium/media/formats/mp4/avc.h
+++ b/chromium/media/formats/mp4/avc.h
@@ -8,7 +8,9 @@
#include <vector>
#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
+#include "media/formats/mp4/bitstream_converter.h"
namespace media {
@@ -20,7 +22,9 @@ struct AVCDecoderConfigurationRecord;
class MEDIA_EXPORT AVC {
public:
- static bool ConvertFrameToAnnexB(int length_size, std::vector<uint8>* buffer);
+ static bool ConvertFrameToAnnexB(int length_size,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples);
// Inserts the SPS & PPS data from |avc_config| into |buffer|.
// |buffer| is expected to contain AnnexB conformant data.
@@ -55,6 +59,24 @@ class MEDIA_EXPORT AVC {
const uint8* ptr);
};
+// AVCBitstreamConverter converts AVC/H.264 bitstream from MP4 container format
+// with embedded NALU lengths into AnnexB bitstream format (described in ISO/IEC
+// 14496-10) with 4-byte start codes. It also knows how to handle CENC-encrypted
+// streams and adjusts subsample data for those streams while converting.
+class AVCBitstreamConverter : public BitstreamConverter {
+ public:
+ explicit AVCBitstreamConverter(
+ scoped_ptr<AVCDecoderConfigurationRecord> avc_config);
+
+ // BitstreamConverter interface
+ bool ConvertFrame(std::vector<uint8>* frame_buf,
+ bool is_keyframe,
+ std::vector<SubsampleEntry>* subsamples) const override;
+ private:
+ ~AVCBitstreamConverter() override;
+ scoped_ptr<AVCDecoderConfigurationRecord> avc_config_;
+};
+
} // namespace mp4
} // namespace media
diff --git a/chromium/media/formats/mp4/avc_unittest.cc b/chromium/media/formats/mp4/avc_unittest.cc
index e9ec17b8bd8..19f10f3a602 100644
--- a/chromium/media/formats/mp4/avc_unittest.cc
+++ b/chromium/media/formats/mp4/avc_unittest.cc
@@ -145,8 +145,8 @@ void StringToAnnexB(const std::string& str, std::vector<uint8>* buffer,
size_t start = buffer->size();
std::vector<std::string> subsample_nalus = base::SplitString(
- subsample_specs[i], ",", base::KEEP_WHITESPACE,
- base::SPLIT_WANT_NONEMPTY);
+ subsample_specs[i], ",", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
EXPECT_GT(subsample_nalus.size(), 0u);
for (size_t j = 0; j < subsample_nalus.size(); ++j) {
WriteStartCodeAndNALUType(buffer, subsample_nalus[j]);
@@ -227,7 +227,7 @@ TEST_P(AVCConversionTest, ParseCorrectly) {
std::vector<uint8> buf;
std::vector<SubsampleEntry> subsamples;
MakeInputForLength(GetParam(), &buf);
- EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, &subsamples));
EXPECT_TRUE(AVC::IsValidAnnexB(buf, subsamples));
EXPECT_EQ(buf.size(), sizeof(kExpected));
EXPECT_EQ(0, memcmp(kExpected, &buf[0], sizeof(kExpected)));
@@ -239,7 +239,7 @@ TEST_P(AVCConversionTest, NALUSizeTooLarge) {
std::vector<uint8> buf;
WriteLength(GetParam(), 10 * sizeof(kNALU1), &buf);
buf.insert(buf.end(), kNALU1, kNALU1 + sizeof(kNALU1));
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, nullptr));
}
TEST_P(AVCConversionTest, NALUSizeIsZero) {
@@ -254,26 +254,66 @@ TEST_P(AVCConversionTest, NALUSizeIsZero) {
WriteLength(GetParam(), sizeof(kNALU2), &buf);
buf.insert(buf.end(), kNALU2, kNALU2 + sizeof(kNALU2));
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, nullptr));
+}
+
+TEST_P(AVCConversionTest, SubsampleSizesUpdatedAfterAnnexBConversion) {
+ std::vector<uint8> buf;
+ std::vector<SubsampleEntry> subsamples;
+ SubsampleEntry subsample;
+
+ // Write the first subsample, consisting of only one NALU
+ WriteLength(GetParam(), sizeof(kNALU1), &buf);
+ buf.insert(buf.end(), kNALU1, kNALU1 + sizeof(kNALU1));
+
+ subsample.clear_bytes = GetParam() + sizeof(kNALU1);
+ subsample.cypher_bytes = 0;
+ subsamples.push_back(subsample);
+
+ // Write the second subsample, containing two NALUs
+ WriteLength(GetParam(), sizeof(kNALU1), &buf);
+ buf.insert(buf.end(), kNALU1, kNALU1 + sizeof(kNALU1));
+ WriteLength(GetParam(), sizeof(kNALU2), &buf);
+ buf.insert(buf.end(), kNALU2, kNALU2 + sizeof(kNALU2));
+
+ subsample.clear_bytes = 2*GetParam() + sizeof(kNALU1) + sizeof(kNALU2);
+ subsample.cypher_bytes = 0;
+ subsamples.push_back(subsample);
+
+ // Write the third subsample, containing a single one-byte NALU
+ WriteLength(GetParam(), 1, &buf);
+ buf.push_back(0);
+ subsample.clear_bytes = GetParam() + 1;
+ subsample.cypher_bytes = 0;
+ subsamples.push_back(subsample);
+
+ EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, &subsamples));
+ EXPECT_EQ(subsamples.size(), 3u);
+ EXPECT_EQ(subsamples[0].clear_bytes, 4 + sizeof(kNALU1));
+ EXPECT_EQ(subsamples[0].cypher_bytes, 0u);
+ EXPECT_EQ(subsamples[1].clear_bytes, 8 + sizeof(kNALU1) + sizeof(kNALU2));
+ EXPECT_EQ(subsamples[1].cypher_bytes, 0u);
+ EXPECT_EQ(subsamples[2].clear_bytes, 4 + 1u);
+ EXPECT_EQ(subsamples[2].cypher_bytes, 0u);
}
TEST_P(AVCConversionTest, ParsePartial) {
std::vector<uint8> buf;
MakeInputForLength(GetParam(), &buf);
buf.pop_back();
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, nullptr));
// This tests a buffer ending in the middle of a NAL length. For length size
// of one, this can't happen, so we skip that case.
if (GetParam() != 1) {
MakeInputForLength(GetParam(), &buf);
buf.erase(buf.end() - (sizeof(kNALU2) + 1), buf.end());
- EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_FALSE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, nullptr));
}
}
TEST_P(AVCConversionTest, ParseEmpty) {
std::vector<uint8> buf;
- EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf));
+ EXPECT_TRUE(AVC::ConvertFrameToAnnexB(GetParam(), &buf, nullptr));
EXPECT_EQ(0u, buf.size());
}
diff --git a/chromium/media/formats/mp4/bitstream_converter.cc b/chromium/media/formats/mp4/bitstream_converter.cc
new file mode 100644
index 00000000000..fc972cb1498
--- /dev/null
+++ b/chromium/media/formats/mp4/bitstream_converter.cc
@@ -0,0 +1,14 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/bitstream_converter.h"
+
+namespace media {
+namespace mp4 {
+
+BitstreamConverter::~BitstreamConverter() {
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/formats/mp4/bitstream_converter.h b/chromium/media/formats/mp4/bitstream_converter.h
new file mode 100644
index 00000000000..b5dfdf1b3b9
--- /dev/null
+++ b/chromium/media/formats/mp4/bitstream_converter.h
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MP4_BITSTREAM_CONVERTER_H_
+#define MEDIA_FORMATS_MP4_BITSTREAM_CONVERTER_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+namespace mp4 {
+
+// BitstreamConverter provides a unified interface for performing some common
+// bitstream conversions (e.g. H.264 MP4 bitstream to Annex B, and elementary
+// AAC stream to ADTS).
+class BitstreamConverter
+ : public base::RefCountedThreadSafe<BitstreamConverter> {
+ public:
+ // Converts a single frame/buffer |frame_buf| into the output format.
+ // Returns true iff the conversion was successful.
+ // |frame_buf| is an input/output parameter, it contains input frame data and
+ // contains converted output data if conversion was successful.
+ // |is_keyframe| indicates whether it's a key frame or not.
+ // |subsamples| is an input/output parameter that contains CENC subsample
+ // information. The conversion code should |subsamples| to determine if parts
+ // of input frame are encrypted and should update |subsamples| if necessary,
+ // to make sure it correctly describes the converted output frame. See
+ // SubsampleEntry definition in media/base/decrypt_config.h for more info.
+ virtual bool ConvertFrame(std::vector<uint8>* frame_buf,
+ bool is_keyframe,
+ std::vector<SubsampleEntry>* subsamples) const = 0;
+ protected:
+ friend class base::RefCountedThreadSafe<BitstreamConverter>;
+ virtual ~BitstreamConverter();
+};
+
+} // namespace mp4
+} // namespace media
+
+#endif // MEDIA_FORMATS_MP4_AVC_H_
diff --git a/chromium/media/formats/mp4/box_definitions.cc b/chromium/media/formats/mp4/box_definitions.cc
index 72809cfdffd..fc0250d13cb 100644
--- a/chromium/media/formats/mp4/box_definitions.cc
+++ b/chromium/media/formats/mp4/box_definitions.cc
@@ -5,9 +5,16 @@
#include "media/formats/mp4/box_definitions.h"
#include "base/logging.h"
+#include "media/base/video_types.h"
+#include "media/base/video_util.h"
+#include "media/formats/mp4/avc.h"
#include "media/formats/mp4/es_descriptor.h"
#include "media/formats/mp4/rcheck.h"
+#if defined(ENABLE_HEVC_DEMUXING)
+#include "media/formats/mp4/hevc.h"
+#endif
+
namespace media {
namespace mp4 {
@@ -265,8 +272,17 @@ bool TrackHeader::Parse(BoxReader* reader) {
reader->SkipBytes(36) && // matrix
reader->Read4(&width) &&
reader->Read4(&height));
- width >>= 16;
- height >>= 16;
+
+ // Round width and height to the nearest number.
+ // Note: width and height are fixed-point 16.16 values. The following code
+ // rounds a.1x to a + 1, and a.0x to a.
+ width >>= 15;
+ width += 1;
+ width >>= 1;
+ height >>= 15;
+ height += 1;
+ height >>= 1;
+
return true;
}
@@ -291,55 +307,24 @@ bool SampleDescription::Parse(BoxReader* reader) {
return true;
}
-SyncSample::SyncSample() : is_present(false) {}
-SyncSample::~SyncSample() {}
-FourCC SyncSample::BoxType() const { return FOURCC_STSS; }
-
-bool SyncSample::Parse(BoxReader* reader) {
- uint32 entry_count;
- RCHECK(reader->ReadFullBoxHeader() &&
- reader->Read4(&entry_count));
-
- is_present = true;
-
- entries.resize(entry_count);
-
- if (entry_count == 0)
- return true;
-
- for (size_t i = 0; i < entry_count; ++i)
- RCHECK(reader->Read4(&entries[i]));
-
- return true;
-}
-
-bool SyncSample::IsSyncSample(size_t k) const {
- // ISO/IEC 14496-12 Section 8.6.2.1 : If the sync sample box is not present,
- // every sample is a sync sample.
- if (!is_present)
- return true;
-
- // ISO/IEC 14496-12 Section 8.6.2.3 : If entry_count is zero, there are no
- // sync samples within the stream.
- if (entries.size() == 0u)
- return false;
-
- for (size_t i = 0; i < entries.size(); ++i) {
- if (entries[i] == k)
- return true;
- }
-
- return false;
-}
-
SampleTable::SampleTable() {}
SampleTable::~SampleTable() {}
FourCC SampleTable::BoxType() const { return FOURCC_STBL; }
bool SampleTable::Parse(BoxReader* reader) {
- return reader->ScanChildren() &&
- reader->ReadChild(&description) &&
- reader->MaybeReadChild(&sync_sample);
+ RCHECK(reader->ScanChildren() &&
+ reader->ReadChild(&description));
+ // There could be multiple SampleGroupDescription boxes with different
+ // grouping types. For common encryption, the relevant grouping type is
+ // 'seig'. Continue reading until 'seig' is found, or until running out of
+ // child boxes.
+ while (reader->HasChild(&sample_group_description)) {
+ RCHECK(reader->ReadChild(&sample_group_description));
+ if (sample_group_description.grouping_type == FOURCC_SEIG)
+ break;
+ sample_group_description.entries.clear();
+ }
+ return true;
}
EditList::EditList() {}
@@ -409,16 +394,17 @@ AVCDecoderConfigurationRecord::~AVCDecoderConfigurationRecord() {}
FourCC AVCDecoderConfigurationRecord::BoxType() const { return FOURCC_AVCC; }
bool AVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
- return ParseInternal(reader, reader->log_cb());
+ return ParseInternal(reader, reader->media_log());
}
bool AVCDecoderConfigurationRecord::Parse(const uint8* data, int data_size) {
BufferReader reader(data, data_size);
- return ParseInternal(&reader, LogCB());
+ return ParseInternal(&reader, new MediaLog());
}
-bool AVCDecoderConfigurationRecord::ParseInternal(BufferReader* reader,
- const LogCB& log_cb) {
+bool AVCDecoderConfigurationRecord::ParseInternal(
+ BufferReader* reader,
+ const scoped_refptr<MediaLog>& media_log) {
RCHECK(reader->Read1(&version) && version == 1 &&
reader->Read1(&profile_indication) &&
reader->Read1(&profile_compatibility) &&
@@ -441,11 +427,11 @@ bool AVCDecoderConfigurationRecord::ParseInternal(BufferReader* reader,
reader->ReadVec(&sps_list[i], sps_length));
RCHECK(sps_list[i].size() > 4);
- if (!log_cb.is_null()) {
- MEDIA_LOG(INFO, log_cb) << "Video codec: avc1." << std::hex
- << static_cast<int>(sps_list[i][1])
- << static_cast<int>(sps_list[i][2])
- << static_cast<int>(sps_list[i][3]);
+ if (media_log.get()) {
+ MEDIA_LOG(INFO, media_log) << "Video codec: avc1." << std::hex
+ << static_cast<int>(sps_list[i][1])
+ << static_cast<int>(sps_list[i][2])
+ << static_cast<int>(sps_list[i][3]);
}
}
@@ -476,7 +462,9 @@ VideoSampleEntry::VideoSampleEntry()
: format(FOURCC_NULL),
data_reference_index(0),
width(0),
- height(0) {}
+ height(0),
+ video_codec(kUnknownVideoCodec),
+ video_codec_profile(VIDEO_CODEC_PROFILE_UNKNOWN) {}
VideoSampleEntry::~VideoSampleEntry() {}
FourCC VideoSampleEntry::BoxType() const {
@@ -485,6 +473,26 @@ FourCC VideoSampleEntry::BoxType() const {
return FOURCC_NULL;
}
+namespace {
+
+bool IsFormatValidH264(const FourCC& format,
+ const ProtectionSchemeInfo& sinf) {
+ return format == FOURCC_AVC1 || format == FOURCC_AVC3 ||
+ (format == FOURCC_ENCV && (sinf.format.format == FOURCC_AVC1 ||
+ sinf.format.format == FOURCC_AVC3));
+}
+
+#if defined(ENABLE_HEVC_DEMUXING)
+bool IsFormatValidHEVC(const FourCC& format,
+ const ProtectionSchemeInfo& sinf) {
+ return format == FOURCC_HEV1 || format == FOURCC_HVC1 ||
+ (format == FOURCC_ENCV && (sinf.format.format == FOURCC_HEV1 ||
+ sinf.format.format == FOURCC_HVC1));
+}
+#endif
+
+}
+
bool VideoSampleEntry::Parse(BoxReader* reader) {
format = reader->type();
RCHECK(reader->SkipBytes(6) &&
@@ -506,16 +514,44 @@ bool VideoSampleEntry::Parse(BoxReader* reader) {
}
}
- if (IsFormatValid())
- RCHECK(reader->ReadChild(&avcc));
+ if (IsFormatValidH264(format, sinf)) {
+ DVLOG(2) << __FUNCTION__
+ << " reading AVCDecoderConfigurationRecord (avcC)";
+ scoped_ptr<AVCDecoderConfigurationRecord> avcConfig(
+ new AVCDecoderConfigurationRecord());
+ RCHECK(reader->ReadChild(avcConfig.get()));
+ frame_bitstream_converter = make_scoped_refptr(
+ new AVCBitstreamConverter(avcConfig.Pass()));
+ video_codec = kCodecH264;
+ video_codec_profile = H264PROFILE_MAIN;
+#if defined(ENABLE_HEVC_DEMUXING)
+ } else if (IsFormatValidHEVC(format, sinf)) {
+ DVLOG(2) << __FUNCTION__
+ << " parsing HEVCDecoderConfigurationRecord (hvcC)";
+ scoped_ptr<HEVCDecoderConfigurationRecord> hevcConfig(
+ new HEVCDecoderConfigurationRecord());
+ RCHECK(reader->ReadChild(hevcConfig.get()));
+ frame_bitstream_converter = make_scoped_refptr(
+ new HEVCBitstreamConverter(hevcConfig.Pass()));
+ video_codec = kCodecHEVC;
+#endif
+ } else {
+ // Unknown/unsupported format
+ MEDIA_LOG(ERROR, reader->media_log()) << __FUNCTION__
+ << " unsupported video format "
+ << FourCCToString(format);
+ return false;
+ }
return true;
}
bool VideoSampleEntry::IsFormatValid() const {
- return format == FOURCC_AVC1 || format == FOURCC_AVC3 ||
- (format == FOURCC_ENCV && (sinf.format.format == FOURCC_AVC1 ||
- sinf.format.format == FOURCC_AVC3));
+#if defined(ENABLE_HEVC_DEMUXING)
+ if (IsFormatValidHEVC(format, sinf))
+ return true;
+#endif
+ return IsFormatValidH264(format, sinf);
}
ElementaryStreamDescriptor::ElementaryStreamDescriptor()
@@ -538,12 +574,12 @@ bool ElementaryStreamDescriptor::Parse(BoxReader* reader) {
object_type = es_desc.object_type();
if (object_type != 0x40) {
- MEDIA_LOG(INFO, reader->log_cb()) << "Audio codec: mp4a." << std::hex
- << static_cast<int>(object_type);
+ MEDIA_LOG(INFO, reader->media_log()) << "Audio codec: mp4a." << std::hex
+ << static_cast<int>(object_type);
}
if (es_desc.IsAAC(object_type))
- RCHECK(aac.Parse(es_desc.decoder_specific_info(), reader->log_cb()));
+ RCHECK(aac.Parse(es_desc.decoder_specific_info(), reader->media_log()));
return true;
}
@@ -709,7 +745,7 @@ bool Movie::Parse(BoxReader* reader) {
RCHECK(reader->ScanChildren() && reader->ReadChild(&header) &&
reader->ReadChildren(&tracks));
- RCHECK_MEDIA_LOGGED(reader->ReadChild(&extends), reader->log_cb(),
+ RCHECK_MEDIA_LOGGED(reader->ReadChild(&extends), reader->media_log(),
"Detected unfragmented MP4. Media Source Extensions "
"require ISO BMFF moov to contain mvex to indicate that "
"Movie Fragments are to be expected.");
@@ -949,13 +985,17 @@ bool TrackFragment::Parse(BoxReader* reader) {
// different grouping types. For common encryption, the relevant grouping type
// is 'seig'. Continue reading until 'seig' is found, or until running out of
// child boxes.
- while (sample_group_description.grouping_type != FOURCC_SEIG &&
- reader->HasChild(&sample_group_description)) {
+ while (reader->HasChild(&sample_group_description)) {
RCHECK(reader->ReadChild(&sample_group_description));
+ if (sample_group_description.grouping_type == FOURCC_SEIG)
+ break;
+ sample_group_description.entries.clear();
}
- while (sample_to_group.grouping_type != FOURCC_SEIG &&
- reader->HasChild(&sample_to_group)) {
+ while (reader->HasChild(&sample_to_group)) {
RCHECK(reader->ReadChild(&sample_to_group));
+ if (sample_to_group.grouping_type == FOURCC_SEIG)
+ break;
+ sample_to_group.entries.clear();
}
return true;
}
diff --git a/chromium/media/formats/mp4/box_definitions.h b/chromium/media/formats/mp4/box_definitions.h
index 2dfab63b5d1..e1f17293e0c 100644
--- a/chromium/media/formats/mp4/box_definitions.h
+++ b/chromium/media/formats/mp4/box_definitions.h
@@ -12,6 +12,7 @@
#include "base/compiler_specific.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
+#include "media/base/video_codecs.h"
#include "media/formats/mp4/aac.h"
#include "media/formats/mp4/avc.h"
#include "media/formats/mp4/box_reader.h"
@@ -185,7 +186,8 @@ struct MEDIA_EXPORT AVCDecoderConfigurationRecord : Box {
std::vector<PPS> pps_list;
private:
- bool ParseInternal(BufferReader* reader, const LogCB& log_cb);
+ bool ParseInternal(BufferReader* reader,
+ const scoped_refptr<MediaLog>& media_log);
};
struct MEDIA_EXPORT PixelAspectRatioBox : Box {
@@ -206,10 +208,12 @@ struct MEDIA_EXPORT VideoSampleEntry : Box {
PixelAspectRatioBox pixel_aspect;
ProtectionSchemeInfo sinf;
- // Currently expected to be present regardless of format.
- AVCDecoderConfigurationRecord avcc;
+ VideoCodec video_codec;
+ VideoCodecProfile video_codec_profile;
bool IsFormatValid() const;
+
+ scoped_refptr<BitstreamConverter> frame_bitstream_converter;
};
struct MEDIA_EXPORT ElementaryStreamDescriptor : Box {
@@ -240,15 +244,20 @@ struct MEDIA_EXPORT SampleDescription : Box {
std::vector<AudioSampleEntry> audio_entries;
};
-struct MEDIA_EXPORT SyncSample : Box {
- DECLARE_BOX_METHODS(SyncSample);
+struct MEDIA_EXPORT CencSampleEncryptionInfoEntry {
+ CencSampleEncryptionInfoEntry();
+ ~CencSampleEncryptionInfoEntry();
- // Returns true if the |k|th sample is a sync sample (aka a random
- // access point). Returns false if sample |k| is not a sync sample.
- bool IsSyncSample(size_t k) const;
+ bool is_encrypted;
+ uint8 iv_size;
+ std::vector<uint8> key_id;
+};
- bool is_present;
- std::vector<uint32> entries;
+struct MEDIA_EXPORT SampleGroupDescription : Box { // 'sgpd'.
+ DECLARE_BOX_METHODS(SampleGroupDescription);
+
+ uint32 grouping_type;
+ std::vector<CencSampleEncryptionInfoEntry> entries;
};
struct MEDIA_EXPORT SampleTable : Box {
@@ -259,7 +268,7 @@ struct MEDIA_EXPORT SampleTable : Box {
// includes the 'stts', 'stsc', and 'stco' boxes, which must contain no
// samples in order to be compliant files.
SampleDescription description;
- SyncSample sync_sample;
+ SampleGroupDescription sample_group_description;
};
struct MEDIA_EXPORT MediaHeader : Box {
@@ -385,22 +394,6 @@ class MEDIA_EXPORT IndependentAndDisposableSamples : public Box {
std::vector<SampleDependsOn> sample_depends_on_;
};
-struct MEDIA_EXPORT CencSampleEncryptionInfoEntry {
- CencSampleEncryptionInfoEntry();
- ~CencSampleEncryptionInfoEntry();
-
- bool is_encrypted;
- uint8 iv_size;
- std::vector<uint8> key_id;
-};
-
-struct MEDIA_EXPORT SampleGroupDescription : Box { // 'sgpd'.
- DECLARE_BOX_METHODS(SampleGroupDescription);
-
- uint32 grouping_type;
- std::vector<CencSampleEncryptionInfoEntry> entries;
-};
-
struct MEDIA_EXPORT SampleToGroupEntry {
enum GroupDescriptionIndexBase {
kTrackGroupDescriptionIndexBase = 0,
diff --git a/chromium/media/formats/mp4/box_reader.cc b/chromium/media/formats/mp4/box_reader.cc
index 4368d8ddc22..d4683dd9d6c 100644
--- a/chromium/media/formats/mp4/box_reader.cc
+++ b/chromium/media/formats/mp4/box_reader.cc
@@ -77,10 +77,10 @@ bool BufferReader::Read4sInto8s(int64* v) {
BoxReader::BoxReader(const uint8* buf,
const int size,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool is_EOS)
: BufferReader(buf, size),
- log_cb_(log_cb),
+ media_log_(media_log),
type_(FOURCC_NULL),
version_(0),
flags_(0),
@@ -100,14 +100,13 @@ BoxReader::~BoxReader() {
// static
BoxReader* BoxReader::ReadTopLevelBox(const uint8* buf,
const int buf_size,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool* err) {
- scoped_ptr<BoxReader> reader(
- new BoxReader(buf, buf_size, log_cb, false));
+ scoped_ptr<BoxReader> reader(new BoxReader(buf, buf_size, media_log, false));
if (!reader->ReadHeader(err))
return NULL;
- if (!IsValidTopLevelBox(reader->type(), log_cb)) {
+ if (!IsValidTopLevelBox(reader->type(), media_log)) {
*err = true;
return NULL;
}
@@ -121,13 +120,13 @@ BoxReader* BoxReader::ReadTopLevelBox(const uint8* buf,
// static
bool BoxReader::StartTopLevelBox(const uint8* buf,
const int buf_size,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
FourCC* type,
int* box_size,
bool* err) {
- BoxReader reader(buf, buf_size, log_cb, false);
+ BoxReader reader(buf, buf_size, media_log, false);
if (!reader.ReadHeader(err)) return false;
- if (!IsValidTopLevelBox(reader.type(), log_cb)) {
+ if (!IsValidTopLevelBox(reader.type(), media_log)) {
*err = true;
return false;
}
@@ -139,12 +138,12 @@ bool BoxReader::StartTopLevelBox(const uint8* buf,
// static
BoxReader* BoxReader::ReadConcatentatedBoxes(const uint8* buf,
const int buf_size) {
- return new BoxReader(buf, buf_size, LogCB(), true);
+ return new BoxReader(buf, buf_size, new MediaLog(), true);
}
// static
bool BoxReader::IsValidTopLevelBox(const FourCC& type,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
switch (type) {
case FOURCC_FTYP:
case FOURCC_PDIN:
@@ -166,8 +165,8 @@ bool BoxReader::IsValidTopLevelBox(const FourCC& type,
return true;
default:
// Hex is used to show nonprintable characters and aid in debugging
- MEDIA_LOG(DEBUG, log_cb) << "Unrecognized top-level box type "
- << FourCCToString(type);
+ MEDIA_LOG(DEBUG, media_log) << "Unrecognized top-level box type "
+ << FourCCToString(type);
return false;
}
}
@@ -178,7 +177,7 @@ bool BoxReader::ScanChildren() {
bool err = false;
while (pos() < size()) {
- BoxReader child(&buf_[pos_], size_ - pos_, log_cb_, is_EOS_);
+ BoxReader child(&buf_[pos_], size_ - pos_, media_log_, is_EOS_);
if (!child.ReadHeader(&err)) break;
children_.insert(std::pair<FourCC, BoxReader>(child.type(), child));
@@ -237,7 +236,7 @@ bool BoxReader::ReadHeader(bool* err) {
// All the data bytes are expected to be provided.
size = size_;
} else {
- MEDIA_LOG(DEBUG, log_cb_)
+ MEDIA_LOG(DEBUG, media_log_)
<< "ISO BMFF boxes that run to EOS are not supported";
*err = true;
return false;
diff --git a/chromium/media/formats/mp4/box_reader.h b/chromium/media/formats/mp4/box_reader.h
index 345516a2b61..cf72343f1e8 100644
--- a/chromium/media/formats/mp4/box_reader.h
+++ b/chromium/media/formats/mp4/box_reader.h
@@ -23,7 +23,7 @@ class BoxReader;
struct MEDIA_EXPORT Box {
virtual ~Box();
- // Parse errors may be logged using the BoxReader's log callback.
+ // Parse errors may be logged using the BoxReader's media log.
virtual bool Parse(BoxReader* reader) = 0;
virtual FourCC BoxType() const = 0;
@@ -85,7 +85,7 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
// |buf| is retained but not owned, and must outlive the BoxReader instance.
static BoxReader* ReadTopLevelBox(const uint8* buf,
const int buf_size,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
bool* err);
// Read the box header from the current buffer. This function returns true if
@@ -96,7 +96,7 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
// |buf| is not retained.
static bool StartTopLevelBox(const uint8* buf,
const int buf_size,
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
FourCC* type,
int* box_size,
bool* err) WARN_UNUSED_RESULT;
@@ -113,7 +113,7 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
// otherwise. This returns true for some boxes which we do not parse.
// Helpful in debugging misaligned appends.
static bool IsValidTopLevelBox(const FourCC& type,
- const LogCB& log_cb);
+ const scoped_refptr<MediaLog>& media_log);
// Scan through all boxes within the current box, starting at the current
// buffer position. Must be called before any of the *Child functions work.
@@ -162,12 +162,15 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
uint8 version() const { return version_; }
uint32 flags() const { return flags_; }
- const LogCB& log_cb() const { return log_cb_; }
+ const scoped_refptr<MediaLog>& media_log() const { return media_log_; }
private:
// Create a BoxReader from |buf|. |is_EOS| should be true if |buf| is
// complete stream (i.e. no additional data is expected to be appended).
- BoxReader(const uint8* buf, const int size, const LogCB& log_cb, bool is_EOS);
+ BoxReader(const uint8* buf,
+ const int size,
+ const scoped_refptr<MediaLog>& media_log,
+ bool is_EOS);
// Must be called immediately after init. If the return is false, this
// indicates that the box header and its contents were not available in the
@@ -185,7 +188,7 @@ class MEDIA_EXPORT BoxReader : public BufferReader {
template <typename T>
bool ReadAllChildrenInternal(std::vector<T>* children, bool check_box_type);
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
FourCC type_;
uint8 version_;
uint32 flags_;
@@ -248,7 +251,7 @@ bool BoxReader::ReadAllChildrenInternal(std::vector<T>* children,
bool err = false;
while (pos_ < size_) {
- BoxReader child_reader(&buf_[pos_], size_ - pos_, log_cb_, is_EOS_);
+ BoxReader child_reader(&buf_[pos_], size_ - pos_, media_log_, is_EOS_);
if (!child_reader.ReadHeader(&err)) break;
T child;
RCHECK(!check_box_type || child_reader.type() == child.BoxType());
diff --git a/chromium/media/formats/mp4/box_reader_unittest.cc b/chromium/media/formats/mp4/box_reader_unittest.cc
index 62284e8a52b..fb97c075ec1 100644
--- a/chromium/media/formats/mp4/box_reader_unittest.cc
+++ b/chromium/media/formats/mp4/box_reader_unittest.cc
@@ -7,10 +7,15 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "media/base/mock_media_log.h"
#include "media/formats/mp4/box_reader.h"
#include "media/formats/mp4/rcheck.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::HasSubstr;
+using ::testing::StrictMock;
+
namespace media {
namespace mp4 {
@@ -76,17 +81,35 @@ SkipBox::SkipBox() {}
SkipBox::~SkipBox() {}
class BoxReaderTest : public testing::Test {
+ public:
+ BoxReaderTest() : media_log_(new StrictMock<MockMediaLog>()) {}
+
protected:
std::vector<uint8> GetBuf() {
return std::vector<uint8>(kSkipBox, kSkipBox + sizeof(kSkipBox));
}
+
+ void TestTopLevelBox(const uint8* data, int size, uint32 fourCC) {
+ std::vector<uint8> buf(data, data + size);
+
+ bool err;
+ scoped_ptr<BoxReader> reader(
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
+
+ EXPECT_FALSE(err);
+ EXPECT_TRUE(reader);
+ EXPECT_EQ(fourCC, reader->type());
+ EXPECT_EQ(reader->size(), size);
+ }
+
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
};
TEST_F(BoxReaderTest, ExpectedOperationTest) {
std::vector<uint8> buf = GetBuf();
bool err;
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
EXPECT_FALSE(err);
EXPECT_TRUE(reader.get());
@@ -114,7 +137,7 @@ TEST_F(BoxReaderTest, OuterTooShortTest) {
// Create a soft failure by truncating the outer box.
scoped_ptr<BoxReader> r(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size() - 2, LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size() - 2, media_log_, &err));
EXPECT_FALSE(err);
EXPECT_FALSE(r.get());
@@ -127,7 +150,7 @@ TEST_F(BoxReaderTest, InnerTooLongTest) {
// Make an inner box too big for its outer box.
buf[25] = 1;
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
SkipBox box;
EXPECT_FALSE(box.Parse(reader.get()));
@@ -139,8 +162,11 @@ TEST_F(BoxReaderTest, WrongFourCCTest) {
// Set an unrecognized top-level FourCC.
buf[5] = 1;
+
+ EXPECT_MEDIA_LOG(HasSubstr("Unrecognized top-level box type s\\u0001ip"));
+
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
EXPECT_FALSE(reader.get());
EXPECT_TRUE(err);
}
@@ -149,7 +175,7 @@ TEST_F(BoxReaderTest, ScanChildrenTest) {
std::vector<uint8> buf = GetBuf();
bool err;
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
EXPECT_TRUE(reader->SkipBytes(16) && reader->ScanChildren());
@@ -173,7 +199,7 @@ TEST_F(BoxReaderTest, ReadAllChildrenTest) {
buf[3] = 0x38;
bool err;
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
+ BoxReader::ReadTopLevelBox(&buf[0], buf.size(), media_log_, &err));
std::vector<PsshBox> kids;
EXPECT_TRUE(reader->SkipBytes(16) && reader->ReadAllChildren(&kids));
@@ -181,20 +207,6 @@ TEST_F(BoxReaderTest, ReadAllChildrenTest) {
EXPECT_EQ(kids[0].val, 0xdeadbeef); // Ensure order is preserved
}
-static void TestTopLevelBox(const uint8* data, int size, uint32 fourCC) {
-
- std::vector<uint8> buf(data, data + size);
-
- bool err;
- scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(&buf[0], buf.size(), LogCB(), &err));
-
- EXPECT_FALSE(err);
- EXPECT_TRUE(reader);
- EXPECT_EQ(fourCC, reader->type());
- EXPECT_EQ(reader->size(), size);
-}
-
TEST_F(BoxReaderTest, SkippingBloc) {
static const uint8 kData[] = {
0x00, 0x00, 0x00, 0x09, 'b', 'l', 'o', 'c', 0x00
diff --git a/chromium/media/formats/mp4/fourccs.h b/chromium/media/formats/mp4/fourccs.h
index d9086fa8d74..fd977976218 100644
--- a/chromium/media/formats/mp4/fourccs.h
+++ b/chromium/media/formats/mp4/fourccs.h
@@ -31,6 +31,11 @@ enum FourCC {
FOURCC_FTYP = 0x66747970,
FOURCC_HDLR = 0x68646c72,
FOURCC_HINT = 0x68696e74,
+#if defined(ENABLE_HEVC_DEMUXING)
+ FOURCC_HEV1 = 0x68657631,
+ FOURCC_HVC1 = 0x68766331,
+ FOURCC_HVCC = 0x68766343,
+#endif
FOURCC_IODS = 0x696f6473,
FOURCC_MDAT = 0x6d646174,
FOURCC_MDHD = 0x6d646864,
diff --git a/chromium/media/formats/mp4/hevc.cc b/chromium/media/formats/mp4/hevc.cc
new file mode 100644
index 00000000000..4d6b53e6780
--- /dev/null
+++ b/chromium/media/formats/mp4/hevc.cc
@@ -0,0 +1,237 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/formats/mp4/hevc.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "media/base/decrypt_config.h"
+#include "media/filters/h265_parser.h"
+#include "media/formats/mp4/avc.h"
+#include "media/formats/mp4/box_definitions.h"
+#include "media/formats/mp4/box_reader.h"
+
+namespace media {
+namespace mp4 {
+
+HEVCDecoderConfigurationRecord::HEVCDecoderConfigurationRecord()
+ : configurationVersion(0),
+ general_profile_space(0),
+ general_tier_flag(0),
+ general_profile_idc(0),
+ general_profile_compatibility_flags(0),
+ general_constraint_indicator_flags(0),
+ general_level_idc(0),
+ min_spatial_segmentation_idc(0),
+ parallelismType(0),
+ chromaFormat(0),
+ bitDepthLumaMinus8(0),
+ bitDepthChromaMinus8(0),
+ avgFrameRate(0),
+ constantFrameRate(0),
+ numTemporalLayers(0),
+ temporalIdNested(0),
+ lengthSizeMinusOne(0),
+ numOfArrays(0) {}
+
+HEVCDecoderConfigurationRecord::~HEVCDecoderConfigurationRecord() {}
+FourCC HEVCDecoderConfigurationRecord::BoxType() const { return FOURCC_HVCC; }
+
+bool HEVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
+ return ParseInternal(reader, reader->media_log());
+}
+
+bool HEVCDecoderConfigurationRecord::Parse(const uint8* data, int data_size) {
+ BufferReader reader(data, data_size);
+ return ParseInternal(&reader, new MediaLog());
+}
+
+HEVCDecoderConfigurationRecord::HVCCNALArray::HVCCNALArray()
+ : first_byte(0) {}
+
+HEVCDecoderConfigurationRecord::HVCCNALArray::~HVCCNALArray() {}
+
+bool HEVCDecoderConfigurationRecord::ParseInternal(
+ BufferReader* reader,
+ const scoped_refptr<MediaLog>& media_log) {
+ uint8 profile_indication = 0;
+ uint32 general_constraint_indicator_flags_hi = 0;
+ uint16 general_constraint_indicator_flags_lo = 0;
+ uint8 misc = 0;
+ RCHECK(reader->Read1(&configurationVersion) && configurationVersion == 1 &&
+ reader->Read1(&profile_indication) &&
+ reader->Read4(&general_profile_compatibility_flags) &&
+ reader->Read4(&general_constraint_indicator_flags_hi) &&
+ reader->Read2(&general_constraint_indicator_flags_lo) &&
+ reader->Read1(&general_level_idc) &&
+ reader->Read2(&min_spatial_segmentation_idc) &&
+ reader->Read1(&parallelismType) &&
+ reader->Read1(&chromaFormat) &&
+ reader->Read1(&bitDepthLumaMinus8) &&
+ reader->Read1(&bitDepthChromaMinus8) &&
+ reader->Read2(&avgFrameRate) &&
+ reader->Read1(&misc) &&
+ reader->Read1(&numOfArrays));
+
+ general_profile_space = profile_indication >> 6;
+ general_tier_flag = (profile_indication >> 5) & 1;
+ general_profile_idc = profile_indication & 0x1f;
+
+ general_constraint_indicator_flags = general_constraint_indicator_flags_hi;
+ general_constraint_indicator_flags <<= 16;
+ general_constraint_indicator_flags |= general_constraint_indicator_flags_lo;
+
+ min_spatial_segmentation_idc &= 0xfff;
+ parallelismType &= 3;
+ chromaFormat &= 3;
+ bitDepthLumaMinus8 &= 7;
+ bitDepthChromaMinus8 &= 7;
+
+ constantFrameRate = misc >> 6;
+ numTemporalLayers = (misc >> 3) & 7;
+ temporalIdNested = (misc >> 2) & 1;
+ lengthSizeMinusOne = misc & 3;
+
+ DVLOG(2) << __FUNCTION__ << " numOfArrays=" << (int)numOfArrays;
+ arrays.resize(numOfArrays);
+ for (uint32 j = 0; j < numOfArrays; j++) {
+ RCHECK(reader->Read1(&arrays[j].first_byte));
+ uint16 numNalus = 0;
+ RCHECK(reader->Read2(&numNalus));
+ arrays[j].units.resize(numNalus);
+ for (uint32 i = 0; i < numNalus; ++i) {
+ uint16 naluLength = 0;
+ RCHECK(reader->Read2(&naluLength) &&
+ reader->ReadVec(&arrays[j].units[i], naluLength));
+ DVLOG(4) << __FUNCTION__ << " naluType="
+ << (int)(arrays[j].first_byte & 0x3f)
+ << " size=" << arrays[j].units[i].size();
+ }
+ }
+
+ if (media_log.get()) {
+ MEDIA_LOG(INFO, media_log) << "Video codec: hevc";
+ }
+
+ return true;
+}
+
+static const uint8 kAnnexBStartCode[] = {0, 0, 0, 1};
+static const int kAnnexBStartCodeSize = 4;
+
+bool HEVC::InsertParamSetsAnnexB(
+ const HEVCDecoderConfigurationRecord& hevc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples) {
+ DCHECK(HEVC::IsValidAnnexB(*buffer, *subsamples));
+
+ scoped_ptr<H265Parser> parser(new H265Parser());
+ const uint8* start = &(*buffer)[0];
+ parser->SetEncryptedStream(start, buffer->size(), *subsamples);
+
+ H265NALU nalu;
+ if (parser->AdvanceToNextNALU(&nalu) != H265Parser::kOk)
+ return false;
+
+ std::vector<uint8>::iterator config_insert_point = buffer->begin();
+
+ if (nalu.nal_unit_type == H265NALU::AUD_NUT) {
+ // Move insert point to just after the AUD.
+ config_insert_point += (nalu.data + nalu.size) - start;
+ }
+
+ // Clear |parser| and |start| since they aren't needed anymore and
+ // will hold stale pointers once the insert happens.
+ parser.reset();
+ start = NULL;
+
+ std::vector<uint8> param_sets;
+ RCHECK(HEVC::ConvertConfigToAnnexB(hevc_config, &param_sets));
+ DVLOG(4) << __FUNCTION__ << " converted hvcC to AnnexB "
+ << " size=" << param_sets.size() << " inserted at "
+ << (int)(config_insert_point - buffer->begin());
+
+ if (subsamples && !subsamples->empty()) {
+ int subsample_index = AVC::FindSubsampleIndex(*buffer, subsamples,
+ &(*config_insert_point));
+ // Update the size of the subsample where SPS/PPS is to be inserted.
+ (*subsamples)[subsample_index].clear_bytes += param_sets.size();
+ }
+
+ buffer->insert(config_insert_point,
+ param_sets.begin(), param_sets.end());
+
+ DCHECK(HEVC::IsValidAnnexB(*buffer, *subsamples));
+ return true;
+}
+
+bool HEVC::ConvertConfigToAnnexB(
+ const HEVCDecoderConfigurationRecord& hevc_config,
+ std::vector<uint8>* buffer) {
+ DCHECK(buffer->empty());
+ buffer->clear();
+
+ for (size_t j = 0; j < hevc_config.arrays.size(); j++) {
+ uint8 naluType = hevc_config.arrays[j].first_byte & 0x3f;
+ for (size_t i = 0; i < hevc_config.arrays[j].units.size(); ++i) {
+ DVLOG(3) << __FUNCTION__ << " naluType=" << (int)naluType
+ << " size=" << hevc_config.arrays[j].units[i].size();
+ buffer->insert(buffer->end(), kAnnexBStartCode,
+ kAnnexBStartCode + kAnnexBStartCodeSize);
+ buffer->insert(buffer->end(), hevc_config.arrays[j].units[i].begin(),
+ hevc_config.arrays[j].units[i].end());
+ }
+ }
+
+ return true;
+}
+
+// Verifies AnnexB NALU order according to section 7.4.2.4.4 of ISO/IEC 23008-2.
+bool HEVC::IsValidAnnexB(const std::vector<uint8>& buffer,
+ const std::vector<SubsampleEntry>& subsamples) {
+ return IsValidAnnexB(&buffer[0], buffer.size(), subsamples);
+}
+
+bool HEVC::IsValidAnnexB(const uint8* buffer, size_t size,
+ const std::vector<SubsampleEntry>& subsamples) {
+ DCHECK(buffer);
+
+ if (size == 0)
+ return true;
+
+ // TODO(servolk): Implement this, see crbug.com/527595
+ return true;
+}
+
+HEVCBitstreamConverter::HEVCBitstreamConverter(
+ scoped_ptr<HEVCDecoderConfigurationRecord> hevc_config)
+ : hevc_config_(hevc_config.Pass()) {
+ DCHECK(hevc_config_);
+}
+
+HEVCBitstreamConverter::~HEVCBitstreamConverter() {
+}
+
+bool HEVCBitstreamConverter::ConvertFrame(
+ std::vector<uint8>* frame_buf,
+ bool is_keyframe,
+ std::vector<SubsampleEntry>* subsamples) const {
+ RCHECK(AVC::ConvertFrameToAnnexB(hevc_config_->lengthSizeMinusOne + 1,
+ frame_buf, subsamples));
+
+ if (is_keyframe) {
+ // If this is a keyframe, we (re-)inject HEVC params headers at the start of
+ // a frame. If subsample info is present, we also update the clear byte
+ // count for that first subsample.
+ RCHECK(HEVC::InsertParamSetsAnnexB(*hevc_config_, frame_buf, subsamples));
+ }
+
+ DCHECK(HEVC::IsValidAnnexB(*frame_buf, *subsamples));
+ return true;
+}
+
+} // namespace mp4
+} // namespace media
diff --git a/chromium/media/formats/mp4/hevc.h b/chromium/media/formats/mp4/hevc.h
new file mode 100644
index 00000000000..06974c0dfd1
--- /dev/null
+++ b/chromium/media/formats/mp4/hevc.h
@@ -0,0 +1,106 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FORMATS_MP4_HEVC_H_
+#define MEDIA_FORMATS_MP4_HEVC_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/base/media_export.h"
+#include "media/formats/mp4/bitstream_converter.h"
+#include "media/formats/mp4/box_definitions.h"
+
+namespace media {
+
+struct SubsampleEntry;
+
+namespace mp4 {
+
+struct MEDIA_EXPORT HEVCDecoderConfigurationRecord : Box {
+ DECLARE_BOX_METHODS(HEVCDecoderConfigurationRecord);
+
+ // Parses HEVCDecoderConfigurationRecord data encoded in |data|.
+ // Note: This method is intended to parse data outside the MP4StreamParser
+ // context and therefore the box header is not expected to be present
+ // in |data|.
+ // Returns true if |data| was successfully parsed.
+ bool Parse(const uint8* data, int data_size);
+
+ uint8 configurationVersion;
+ uint8 general_profile_space;
+ uint8 general_tier_flag;
+ uint8 general_profile_idc;
+ uint32 general_profile_compatibility_flags;
+ uint64 general_constraint_indicator_flags;
+ uint8 general_level_idc;
+ uint16 min_spatial_segmentation_idc;
+ uint8 parallelismType;
+ uint8 chromaFormat;
+ uint8 bitDepthLumaMinus8;
+ uint8 bitDepthChromaMinus8;
+ uint16 avgFrameRate;
+ uint8 constantFrameRate;
+ uint8 numTemporalLayers;
+ uint8 temporalIdNested;
+ uint8 lengthSizeMinusOne;
+ uint8 numOfArrays;
+
+ typedef std::vector<uint8> HVCCNALUnit;
+ struct HVCCNALArray {
+ HVCCNALArray();
+ ~HVCCNALArray();
+ uint8 first_byte;
+ std::vector<HVCCNALUnit> units;
+ };
+ std::vector<HVCCNALArray> arrays;
+
+ private:
+ bool ParseInternal(BufferReader* reader,
+ const scoped_refptr<MediaLog>& media_log);
+};
+
+class MEDIA_EXPORT HEVC {
+ public:
+ static bool ConvertConfigToAnnexB(
+ const HEVCDecoderConfigurationRecord& hevc_config,
+ std::vector<uint8>* buffer);
+
+ static bool InsertParamSetsAnnexB(
+ const HEVCDecoderConfigurationRecord& hevc_config,
+ std::vector<uint8>* buffer,
+ std::vector<SubsampleEntry>* subsamples);
+
+ // Verifies that the contents of |buffer| conform to
+ // Section 7.4.2.4.4 of ISO/IEC 23008-2.
+ // |subsamples| contains the information about what parts of the buffer are
+ // encrypted and which parts are clear.
+ // Returns true if |buffer| contains conformant Annex B data
+ // TODO(servolk): Remove the std::vector version when we can use,
+ // C++11's std::vector<T>::data() method.
+ static bool IsValidAnnexB(const std::vector<uint8>& buffer,
+ const std::vector<SubsampleEntry>& subsamples);
+ static bool IsValidAnnexB(const uint8* buffer, size_t size,
+ const std::vector<SubsampleEntry>& subsamples);
+};
+
+class HEVCBitstreamConverter : public BitstreamConverter {
+ public:
+ explicit HEVCBitstreamConverter(
+ scoped_ptr<HEVCDecoderConfigurationRecord> hevc_config);
+
+ // BitstreamConverter interface
+ bool ConvertFrame(std::vector<uint8>* frame_buf,
+ bool is_keyframe,
+ std::vector<SubsampleEntry>* subsamples) const override;
+ private:
+ ~HEVCBitstreamConverter() override;
+ scoped_ptr<HEVCDecoderConfigurationRecord> hevc_config_;
+};
+
+} // namespace mp4
+} // namespace media
+
+#endif // MEDIA_FORMATS_MP4_HEVC_H_
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.cc b/chromium/media/formats/mp4/mp4_stream_parser.cc
index 8298f0cedb9..18698b36c4a 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser.cc
@@ -10,6 +10,7 @@
#include "media/base/audio_decoder_config.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_util.h"
#include "media/formats/mp4/box_definitions.h"
@@ -48,7 +49,7 @@ void MP4StreamParser::Init(
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK_EQ(state_, kWaitingForInit);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
@@ -64,7 +65,7 @@ void MP4StreamParser::Init(
encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
- log_cb_ = log_cb;
+ media_log_ = media_log;
}
void MP4StreamParser::Reset() {
@@ -142,7 +143,7 @@ bool MP4StreamParser::ParseBox(bool* err) {
if (!size) return false;
scoped_ptr<BoxReader> reader(
- BoxReader::ReadTopLevelBox(buf, size, log_cb_, err));
+ BoxReader::ReadTopLevelBox(buf, size, media_log_, err));
if (reader.get() == NULL) return false;
if (reader->type() == FOURCC_MOOV) {
@@ -216,18 +217,19 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
if (!(entry.format == FOURCC_MP4A ||
(entry.format == FOURCC_ENCA &&
entry.sinf.format.format == FOURCC_MP4A))) {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio format 0x" << std::hex
- << entry.format << " in stsd box.";
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported audio format 0x"
+ << std::hex << entry.format
+ << " in stsd box.";
return false;
}
uint8 audio_type = entry.esds.object_type;
DVLOG(1) << "audio_type " << std::hex << static_cast<int>(audio_type);
if (audio_object_types_.find(audio_type) == audio_object_types_.end()) {
- MEDIA_LOG(ERROR, log_cb_) << "audio object type 0x" << std::hex
- << audio_type
- << " does not match what is specified in the"
- << " mimetype.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "audio object type 0x" << std::hex << audio_type
+ << " does not match what is specified in the"
+ << " mimetype.";
return false;
}
@@ -245,8 +247,8 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
extra_data = aac.codec_specific_data();
#endif
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio object type 0x"
- << std::hex << audio_type << " in esds.";
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported audio object type 0x"
+ << std::hex << audio_type << " in esds.";
return false;
}
@@ -267,8 +269,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
audio_config.Initialize(
codec, sample_format, channel_layout, sample_per_second,
extra_data.size() ? &extra_data[0] : NULL, extra_data.size(),
- is_audio_track_encrypted_, false, base::TimeDelta(),
- 0);
+ is_audio_track_encrypted_, base::TimeDelta(), 0);
has_audio_ = true;
audio_track_id_ = track->header.track_id;
}
@@ -279,8 +280,9 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
const VideoSampleEntry& entry = samp_descr.video_entries[desc_idx];
if (!entry.IsFormatValid()) {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported video format 0x" << std::hex
- << entry.format << " in stsd box.";
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported video format 0x"
+ << std::hex << entry.format
+ << " in stsd box.";
return false;
}
@@ -297,20 +299,18 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
GetNaturalSize(visible_rect.size(), entry.pixel_aspect.h_spacing,
entry.pixel_aspect.v_spacing);
} else if (track->header.width && track->header.height) {
- // An even width makes things easier for YV12 and appears to be the
- // behavior expected by WebKit layout tests. See GetNaturalSize().
natural_size =
- gfx::Size(track->header.width & ~1, track->header.height);
+ gfx::Size(track->header.width, track->header.height);
}
is_video_track_encrypted_ = entry.sinf.info.track_encryption.is_encrypted;
DVLOG(1) << "is_video_track_encrypted_: " << is_video_track_encrypted_;
- video_config.Initialize(kCodecH264, H264PROFILE_MAIN, VideoFrame::YV12,
- VideoFrame::COLOR_SPACE_UNSPECIFIED, coded_size,
- visible_rect, natural_size,
+ video_config.Initialize(entry.video_codec, entry.video_codec_profile,
+ PIXEL_FORMAT_YV12, COLOR_SPACE_HD_REC709,
+ coded_size, visible_rect, natural_size,
// No decoder-specific buffer needed for AVC;
// SPS/PPS are embedded in the video stream
- NULL, 0, is_video_track_encrypted_, false);
+ NULL, 0, is_video_track_encrypted_);
has_video_ = true;
video_track_id_ = track->header.track_id;
}
@@ -354,7 +354,7 @@ bool MP4StreamParser::ParseMoof(BoxReader* reader) {
MovieFragment moof;
RCHECK(moof.Parse(reader));
if (!runs_)
- runs_.reset(new TrackRunIterator(moov_.get(), log_cb_));
+ runs_.reset(new TrackRunIterator(moov_.get(), media_log_));
RCHECK(runs_->Init(moof));
RCHECK(ComputeHighestEndOffset(moof));
@@ -385,36 +385,6 @@ void MP4StreamParser::OnEncryptedMediaInitData(
encrypted_media_init_data_cb_.Run(EmeInitDataType::CENC, init_data);
}
-bool MP4StreamParser::PrepareAVCBuffer(
- const AVCDecoderConfigurationRecord& avc_config,
- std::vector<uint8>* frame_buf,
- std::vector<SubsampleEntry>* subsamples) const {
- // Convert the AVC NALU length fields to Annex B headers, as expected by
- // decoding libraries. Since this may enlarge the size of the buffer, we also
- // update the clear byte count for each subsample if encryption is used to
- // account for the difference in size between the length prefix and Annex B
- // start code.
- RCHECK(AVC::ConvertFrameToAnnexB(avc_config.length_size, frame_buf));
- if (!subsamples->empty()) {
- const int nalu_size_diff = 4 - avc_config.length_size;
- size_t expected_size = runs_->sample_size() +
- subsamples->size() * nalu_size_diff;
- RCHECK(frame_buf->size() == expected_size);
- for (size_t i = 0; i < subsamples->size(); i++)
- (*subsamples)[i].clear_bytes += nalu_size_diff;
- }
-
- if (runs_->is_keyframe()) {
- // If this is a keyframe, we (re-)inject SPS and PPS headers at the start of
- // a frame. If subsample info is present, we also update the clear byte
- // count for that first subsample.
- RCHECK(AVC::InsertParamSetsAnnexB(avc_config, frame_buf, subsamples));
- }
-
- DCHECK(AVC::IsValidAnnexB(*frame_buf, *subsamples));
- return true;
-}
-
bool MP4StreamParser::PrepareAACBuffer(
const AAC& aac_config, std::vector<uint8>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const {
@@ -505,9 +475,11 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
std::vector<uint8> frame_buf(buf, buf + runs_->sample_size());
if (video) {
- if (!PrepareAVCBuffer(runs_->video_description().avcc,
- &frame_buf, &subsamples)) {
- MEDIA_LOG(ERROR, log_cb_) << "Failed to prepare AVC sample for decode";
+ DCHECK(runs_->video_description().frame_bitstream_converter);
+ if (!runs_->video_description().frame_bitstream_converter->ConvertFrame(
+ &frame_buf, runs_->is_keyframe(), &subsamples)) {
+ MEDIA_LOG(ERROR, media_log_)
+ << "Failed to prepare video sample for decode";
*err = true;
return false;
}
@@ -517,7 +489,7 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
if (ESDescriptor::IsAAC(runs_->audio_description().esds.object_type) &&
!PrepareAACBuffer(runs_->audio_description().esds.aac,
&frame_buf, &subsamples)) {
- MEDIA_LOG(ERROR, log_cb_) << "Failed to prepare AAC sample for decode";
+ MEDIA_LOG(ERROR, media_log_) << "Failed to prepare AAC sample for decode";
*err = true;
return false;
}
@@ -546,12 +518,9 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
// TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
// type and allow multiple tracks for same media type, if applicable. See
// https://crbug.com/341581.
- //
- // NOTE: MPEG's "random access point" concept is equivalent to the
- // downstream code's "is keyframe" concept.
scoped_refptr<StreamParserBuffer> stream_buf =
StreamParserBuffer::CopyFrom(&frame_buf[0], frame_buf.size(),
- runs_->is_random_access_point(),
+ runs_->is_keyframe(),
buffer_type, 0);
if (decrypt_config)
@@ -563,7 +532,6 @@ bool MP4StreamParser::EnqueueSample(BufferQueue* audio_buffers,
DVLOG(3) << "Pushing frame: aud=" << audio
<< ", key=" << runs_->is_keyframe()
- << ", rap=" << runs_->is_random_access_point()
<< ", dur=" << runs_->duration().InMilliseconds()
<< ", dts=" << runs_->dts().InMilliseconds()
<< ", cts=" << runs_->cts().InMilliseconds()
@@ -603,13 +571,14 @@ bool MP4StreamParser::ReadAndDiscardMDATsUntil(int64 max_clear_offset) {
FourCC type;
int box_sz;
- if (!BoxReader::StartTopLevelBox(buf, size, log_cb_,
- &type, &box_sz, &err))
+ if (!BoxReader::StartTopLevelBox(buf, size, media_log_, &type, &box_sz,
+ &err))
break;
if (type != FOURCC_MDAT) {
- MEDIA_LOG(DEBUG, log_cb_) << "Unexpected box type while parsing MDATs: "
- << FourCCToString(type);
+ MEDIA_LOG(DEBUG, media_log_)
+ << "Unexpected box type while parsing MDATs: "
+ << FourCCToString(type);
}
mdat_tail_ += box_sz;
}
@@ -636,7 +605,7 @@ bool MP4StreamParser::HaveEnoughDataToEnqueueSamples() {
bool MP4StreamParser::ComputeHighestEndOffset(const MovieFragment& moof) {
highest_end_offset_ = 0;
- TrackRunIterator runs(moov_.get(), log_cb_);
+ TrackRunIterator runs(moov_.get(), media_log_);
RCHECK(runs.Init(moof));
while (runs.IsRunValid()) {
diff --git a/chromium/media/formats/mp4/mp4_stream_parser.h b/chromium/media/formats/mp4/mp4_stream_parser.h
index a5e9f62c32e..2ffe6403348 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser.h
+++ b/chromium/media/formats/mp4/mp4_stream_parser.h
@@ -28,15 +28,14 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
MP4StreamParser(const std::set<int>& audio_object_types, bool has_sbr);
~MP4StreamParser() override;
- void Init(
- const InitCB& init_cb,
- const NewConfigCB& config_cb,
- const NewBuffersCB& new_buffers_cb,
- bool ignore_text_tracks,
- const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
- const NewMediaSegmentCB& new_segment_cb,
- const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
+ void Init(const InitCB& init_cb,
+ const NewConfigCB& config_cb,
+ const NewBuffersCB& new_buffers_cb,
+ bool ignore_text_tracks,
+ const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
+ const NewMediaSegmentCB& new_segment_cb,
+ const base::Closure& end_of_segment_cb,
+ const scoped_refptr<MediaLog>& media_log) override;
void Flush() override;
bool Parse(const uint8* buf, int size) override;
@@ -69,9 +68,6 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
void ChangeState(State new_state);
bool EmitConfigs();
- bool PrepareAVCBuffer(const AVCDecoderConfigurationRecord& avc_config,
- std::vector<uint8>* frame_buf,
- std::vector<SubsampleEntry>* subsamples) const;
bool PrepareAACBuffer(const AAC& aac_config,
std::vector<uint8>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const;
@@ -99,7 +95,7 @@ class MEDIA_EXPORT MP4StreamParser : public StreamParser {
EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
OffsetByteQueue queue_;
diff --git a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
index ccd91a0bd10..60e41659fba 100644
--- a/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
+++ b/chromium/media/formats/mp4/mp4_stream_parser_unittest.cc
@@ -12,23 +12,41 @@
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
+#include "media/base/mock_media_log.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp4/es_descriptor.h"
#include "media/formats/mp4/mp4_stream_parser.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::InSequence;
+using ::testing::StrictMock;
using base::TimeDelta;
namespace media {
namespace mp4 {
+// Matchers for verifying common media log entry strings.
+MATCHER_P(VideoCodecLog, codec_string, "") {
+ return CONTAINS_STRING(arg, "Video codec: " + std::string(codec_string));
+}
+
+MATCHER_P(AudioCodecLog, codec_string, "") {
+ return CONTAINS_STRING(arg, "Audio codec: " + std::string(codec_string));
+}
+
+MATCHER(AuxInfoUnavailableLog, "") {
+ return CONTAINS_STRING(arg, "Aux Info is not available.");
+}
+
class MP4StreamParserTest : public testing::Test {
public:
MP4StreamParserTest()
- : configs_received_(false),
+ : media_log_(new StrictMock<MockMediaLog>()),
+ configs_received_(false),
lower_bound_(
DecodeTimestamp::FromPresentationTime(base::TimeDelta::Max())) {
std::set<int> audio_object_types;
@@ -37,6 +55,7 @@ class MP4StreamParserTest : public testing::Test {
}
protected:
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
scoped_ptr<MP4StreamParser> parser_;
bool configs_received_;
AudioDecoderConfig audio_decoder_config_;
@@ -149,7 +168,7 @@ class MP4StreamParserTest : public testing::Test {
base::Bind(&MP4StreamParserTest::KeyNeededF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::NewSegmentF, base::Unretained(this)),
base::Bind(&MP4StreamParserTest::EndOfSegmentF, base::Unretained(this)),
- LogCB());
+ media_log_);
}
void InitializeParser() {
@@ -172,22 +191,30 @@ class MP4StreamParserTest : public testing::Test {
TEST_F(MP4StreamParserTest, UnalignedAppend) {
// Test small, non-segment-aligned appends (small enough to exercise
// incremental append system)
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f"));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2"));
ParseMP4File("bear-1280x720-av_frag.mp4", 512);
}
TEST_F(MP4StreamParserTest, BytewiseAppend) {
// Ensure no incremental errors occur when parsing
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f"));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2"));
ParseMP4File("bear-1280x720-av_frag.mp4", 1);
}
TEST_F(MP4StreamParserTest, MultiFragmentAppend) {
// Large size ensures multiple fragments are appended in one call (size is
// larger than this particular test file)
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f"));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2"));
ParseMP4File("bear-1280x720-av_frag.mp4", 768432);
}
TEST_F(MP4StreamParserTest, Flush) {
// Flush while reading sample data, then start a new stream.
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f")).Times(2);
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2")).Times(2);
InitializeParser();
scoped_refptr<DecoderBuffer> buffer =
@@ -200,6 +227,8 @@ TEST_F(MP4StreamParserTest, Flush) {
}
TEST_F(MP4StreamParserTest, Reinitialization) {
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f")).Times(2);
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2")).Times(2);
InitializeParser();
scoped_refptr<DecoderBuffer> buffer =
@@ -213,14 +242,19 @@ TEST_F(MP4StreamParserTest, Reinitialization) {
}
TEST_F(MP4StreamParserTest, MPEG2_AAC_LC) {
+ InSequence s;
std::set<int> audio_object_types;
audio_object_types.insert(kISO_13818_7_AAC_LC);
parser_.reset(new MP4StreamParser(audio_object_types, false));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.67"));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2"));
ParseMP4File("bear-mpeg2-aac-only_frag.mp4", 512);
}
// Test that a moov box is not always required after Flush() is called.
TEST_F(MP4StreamParserTest, NoMoovAfterFlush) {
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f"));
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2"));
InitializeParser();
scoped_refptr<DecoderBuffer> buffer =
@@ -240,21 +274,35 @@ TEST_F(MP4StreamParserTest, NoMoovAfterFlush) {
// SampleAuxiliaryInformation{Sizes|Offsets}Box (saiz|saio) are missing.
// The parser should fail instead of crash. See http://crbug.com/361347
TEST_F(MP4StreamParserTest, MissingSampleAuxInfo) {
+ InSequence s;
+
// Encrypted test mp4 files have non-zero duration and are treated as
// recorded streams.
InitializeParserAndExpectLiveness(DemuxerStream::LIVENESS_RECORDED);
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-1280x720-a_frag-cenc_missing-saiz-saio.mp4");
+ EXPECT_MEDIA_LOG(AudioCodecLog("mp4a.40.2")).Times(2);
+ EXPECT_MEDIA_LOG(AuxInfoUnavailableLog());
EXPECT_FALSE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
}
// Test a file where all video samples start with an Access Unit
// Delimiter (AUD) NALU.
TEST_F(MP4StreamParserTest, VideoSamplesStartWithAUDs) {
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.4d4028"));
ParseMP4File("bear-1280x720-av_with-aud-nalus_frag.mp4", 512);
}
+#if defined(ENABLE_HEVC_DEMUXING)
+TEST_F(MP4StreamParserTest, HEVC_in_MP4_container) {
+ InitializeParserAndExpectLiveness(DemuxerStream::LIVENESS_RECORDED);
+ scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-hevc-frag.mp4");
+ EXPECT_MEDIA_LOG(VideoCodecLog("hevc"));
+ EXPECT_TRUE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
+}
+#endif
+
TEST_F(MP4StreamParserTest, CENC) {
// Encrypted test mp4 files have non-zero duration and are treated as
// recorded streams.
@@ -262,6 +310,7 @@ TEST_F(MP4StreamParserTest, CENC) {
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-1280x720-v_frag-cenc.mp4");
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401f"));
EXPECT_TRUE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
}
@@ -271,8 +320,9 @@ TEST_F(MP4StreamParserTest, NaturalSizeWithoutPASP) {
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-640x360-non_square_pixel-without_pasp.mp4");
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401e"));
EXPECT_TRUE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
- EXPECT_EQ(gfx::Size(638, 360), video_decoder_config_.natural_size());
+ EXPECT_EQ(gfx::Size(639, 360), video_decoder_config_.natural_size());
}
TEST_F(MP4StreamParserTest, NaturalSizeWithPASP) {
@@ -281,8 +331,9 @@ TEST_F(MP4StreamParserTest, NaturalSizeWithPASP) {
scoped_refptr<DecoderBuffer> buffer =
ReadTestDataFile("bear-640x360-non_square_pixel-with_pasp.mp4");
+ EXPECT_MEDIA_LOG(VideoCodecLog("avc1.6401e"));
EXPECT_TRUE(AppendDataInPieces(buffer->data(), buffer->data_size(), 512));
- EXPECT_EQ(gfx::Size(638, 360), video_decoder_config_.natural_size());
+ EXPECT_EQ(gfx::Size(639, 360), video_decoder_config_.natural_size());
}
} // namespace mp4
diff --git a/chromium/media/formats/mp4/track_run_iterator.cc b/chromium/media/formats/mp4/track_run_iterator.cc
index a216ba8ccf0..27d57256ced 100644
--- a/chromium/media/formats/mp4/track_run_iterator.cc
+++ b/chromium/media/formats/mp4/track_run_iterator.cc
@@ -5,8 +5,8 @@
#include "media/formats/mp4/track_run_iterator.h"
#include <algorithm>
+#include <iomanip>
-#include "media/base/buffers.h"
#include "media/formats/mp4/rcheck.h"
#include "media/formats/mp4/sample_to_group_iterator.h"
@@ -18,7 +18,6 @@ struct SampleInfo {
int duration;
int cts_offset;
bool is_keyframe;
- bool is_random_access_point;
uint32 cenc_group_description_index;
};
@@ -32,13 +31,14 @@ struct TrackRunInfo {
bool is_audio;
const AudioSampleEntry* audio_description;
const VideoSampleEntry* video_description;
+ const SampleGroupDescription* track_sample_encryption_group;
int64 aux_info_start_offset; // Only valid if aux_info_total_size > 0.
int aux_info_default_size;
std::vector<uint8> aux_info_sizes; // Populated if default_size == 0.
int aux_info_total_size;
- std::vector<CencSampleEncryptionInfoEntry> sample_encryption_info;
+ std::vector<CencSampleEncryptionInfoEntry> fragment_sample_encryption_info;
TrackRunInfo();
~TrackRunInfo();
@@ -81,13 +81,20 @@ DecodeTimestamp DecodeTimestampFromRational(int64 numer, int64 denom) {
}
TrackRunIterator::TrackRunIterator(const Movie* moov,
- const LogCB& log_cb)
- : moov_(moov), log_cb_(log_cb), sample_offset_(0) {
+ const scoped_refptr<MediaLog>& media_log)
+ : moov_(moov), media_log_(media_log), sample_offset_(0) {
CHECK(moov);
}
TrackRunIterator::~TrackRunIterator() {}
+static std::string HexFlags(uint32 flags) {
+ std::stringstream stream;
+ stream << std::setfill('0') << std::setw(sizeof(flags)*2) << std::hex
+ << flags;
+ return stream.str();
+}
+
static bool PopulateSampleInfo(const TrackExtends& trex,
const TrackFragmentHeader& tfhd,
const TrackFragmentRun& trun,
@@ -95,7 +102,8 @@ static bool PopulateSampleInfo(const TrackExtends& trex,
const uint32 i,
SampleInfo* sample_info,
const SampleDependsOn sdtp_sample_depends_on,
- const LogCB& log_cb) {
+ bool is_audio,
+ const scoped_refptr<MediaLog>& media_log) {
if (i < trun.sample_sizes.size()) {
sample_info->size = trun.sample_sizes[i];
} else if (tfhd.default_sample_size > 0) {
@@ -122,46 +130,80 @@ static bool PopulateSampleInfo(const TrackExtends& trex,
uint32 flags;
if (i < trun.sample_flags.size()) {
flags = trun.sample_flags[i];
+ DVLOG(4) << __FUNCTION__ << " trun sample flags " << HexFlags(flags);
} else if (tfhd.has_default_sample_flags) {
flags = tfhd.default_sample_flags;
+ DVLOG(4) << __FUNCTION__ << " tfhd sample flags " << HexFlags(flags);
} else {
flags = trex.default_sample_flags;
+ DVLOG(4) << __FUNCTION__ << " trex sample flags " << HexFlags(flags);
}
SampleDependsOn sample_depends_on =
static_cast<SampleDependsOn>((flags >> 24) & 0x3);
-
- if (sample_depends_on == kSampleDependsOnUnknown)
+ if (sample_depends_on == kSampleDependsOnUnknown) {
sample_depends_on = sdtp_sample_depends_on;
+ }
+ DVLOG(4) << __FUNCTION__ << " sample_depends_on " << sample_depends_on;
+ if (sample_depends_on == kSampleDependsOnReserved) {
+ MEDIA_LOG(ERROR, media_log) << "Reserved value used in sample dependency"
+ " info.";
+ return false;
+ }
- // ISO/IEC 14496-12 Section 8.8.3.1 : The negation of |sample_is_sync_sample|
- // provides the same information as the sync sample table [8.6.2]. When
- // |sample_is_sync_sample| is true for a sample, it is the same as if the
- // sample were not in a movie fragment and marked with an entry in the sync
- // sample table (or, if all samples are sync samples, the sync sample table
- // were absent).
+ // Per spec (ISO 14496-12:2012), the definition for a "sync sample" is
+ // equivalent to the downstream code's "is keyframe" concept. But media exists
+ // that marks non-key video frames as sync samples (http://crbug.com/507916
+ // and http://crbug.com/310712). Hence, for video we additionally check that
+ // the sample does not depend on others (FFmpeg does too, see mov_read_trun).
+ // Sample dependency is not ignored for audio because encoded audio samples
+ // can depend on other samples and still be used for random access. Generally
+ // all audio samples are expected to be sync samples, but we prefer to check
+ // the flags to catch badly muxed audio (for now anyway ;P). History of
+ // attempts to get this right discussed in http://crrev.com/1319813002
bool sample_is_sync_sample = !(flags & kSampleIsNonSyncSample);
- sample_info->is_random_access_point = sample_is_sync_sample;
-
- switch (sample_depends_on) {
- case kSampleDependsOnUnknown:
- sample_info->is_keyframe = sample_is_sync_sample;
- break;
+ bool sample_depends_on_others = sample_depends_on == kSampleDependsOnOthers;
+ sample_info->is_keyframe = sample_is_sync_sample &&
+ (!sample_depends_on_others || is_audio);
- case kSampleDependsOnOthers:
- sample_info->is_keyframe = false;
- break;
+ DVLOG(4) << __FUNCTION__ << " is_kf:" << sample_info->is_keyframe
+ << " is_sync:" << sample_is_sync_sample
+ << " deps:" << sample_depends_on_others
+ << " audio:" << is_audio;
- case kSampleDependsOnNoOther:
- sample_info->is_keyframe = true;
- break;
+ return true;
+}
- case kSampleDependsOnReserved:
- MEDIA_LOG(ERROR, log_cb) << "Reserved value used in sample dependency"
- " info.";
- return false;
+static const CencSampleEncryptionInfoEntry* GetSampleEncryptionInfoEntry(
+ const TrackRunInfo& run_info,
+ uint32 group_description_index) {
+ const std::vector<CencSampleEncryptionInfoEntry>* entries = nullptr;
+
+ // ISO-14496-12 Section 8.9.2.3 and 8.9.4 : group description index
+ // (1) ranges from 1 to the number of sample group entries in the track
+ // level SampleGroupDescription Box, or (2) takes the value 0 to
+ // indicate that this sample is a member of no group, in this case, the
+ // sample is associated with the default values specified in
+ // TrackEncryption Box, or (3) starts at 0x10001, i.e. the index value
+ // 1, with the value 1 in the top 16 bits, to reference fragment-local
+ // SampleGroupDescription Box.
+ // Case (2) is not supported here. The caller must handle it externally
+ // before invoking this function.
+ DCHECK_NE(group_description_index, 0u);
+ if (group_description_index >
+ SampleToGroupEntry::kFragmentGroupDescriptionIndexBase) {
+ group_description_index -=
+ SampleToGroupEntry::kFragmentGroupDescriptionIndexBase;
+ entries = &run_info.fragment_sample_encryption_info;
+ } else {
+ entries = &run_info.track_sample_encryption_group->entries;
}
- return true;
+
+ // |group_description_index| is 1-based.
+ DCHECK_LE(group_description_index, entries->size());
+ return (group_description_index > entries->size())
+ ? nullptr
+ : &(*entries)[group_description_index - 1];
}
// In well-structured encrypted media, each track run will be immediately
@@ -251,7 +293,10 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.timescale = trak->media.header.timescale;
tri.start_dts = run_start_dts;
tri.sample_start_offset = trun.data_offset;
- tri.sample_encryption_info = traf.sample_group_description.entries;
+ tri.track_sample_encryption_group =
+ &trak->media.information.sample_table.sample_group_description;
+ tri.fragment_sample_encryption_info =
+ traf.sample_group_description.entries;
tri.is_audio = (stsd.type == kAudio);
if (tri.is_audio) {
@@ -304,10 +349,9 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
tri.samples.resize(trun.sample_count);
for (size_t k = 0; k < trun.sample_count; k++) {
- if (!PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset,
- k, &tri.samples[k],
- traf.sdtp.sample_depends_on(k),
- log_cb_)) {
+ if (!PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset, k,
+ &tri.samples[k], traf.sdtp.sample_depends_on(k),
+ tri.is_audio, media_log_)) {
return false;
}
@@ -320,25 +364,10 @@ bool TrackRunIterator::Init(const MovieFragment& moof) {
continue;
}
- // ISO-14496-12 Section 8.9.2.3 and 8.9.4 : group description index
- // (1) ranges from 1 to the number of sample group entries in the track
- // level SampleGroupDescription Box, or (2) takes the value 0 to
- // indicate that this sample is a member of no group, in this case, the
- // sample is associated with the default values specified in
- // TrackEncryption Box, or (3) starts at 0x10001, i.e. the index value
- // 1, with the value 1 in the top 16 bits, to reference fragment-local
- // SampleGroupDescription Box.
- // Case (1) is not supported currently. We might not need it either as
- // the same functionality can be better achieved using (2).
uint32 index = sample_to_group_itr.group_description_index();
- if (index >= SampleToGroupEntry::kFragmentGroupDescriptionIndexBase) {
- index -= SampleToGroupEntry::kFragmentGroupDescriptionIndexBase;
- RCHECK(index != 0 && index <= tri.sample_encryption_info.size());
- } else if (index != 0) {
- NOTIMPLEMENTED() << "'sgpd' box in 'moov' is not supported.";
- return false;
- }
tri.samples[k].cenc_group_description_index = index;
+ if (index != 0)
+ RCHECK(GetSampleEncryptionInfoEntry(tri, index));
is_sample_to_group_valid = sample_to_group_itr.Advance();
}
runs_.push_back(tri);
@@ -503,11 +532,6 @@ bool TrackRunIterator::is_keyframe() const {
return sample_itr_->is_keyframe;
}
-bool TrackRunIterator::is_random_access_point() const {
- DCHECK(IsSampleValid());
- return sample_itr_->is_random_access_point;
-}
-
const TrackEncryption& TrackRunIterator::track_encryption() const {
if (is_audio())
return audio_description().sinf.info.track_encryption;
@@ -519,7 +543,7 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
if (cenc_info_.empty()) {
DCHECK_EQ(0, aux_info_size());
- MEDIA_LOG(ERROR, log_cb_) << "Aux Info is not available.";
+ MEDIA_LOG(ERROR, media_log_) << "Aux Info is not available.";
return scoped_ptr<DecryptConfig>();
}
@@ -531,7 +555,7 @@ scoped_ptr<DecryptConfig> TrackRunIterator::GetDecryptConfig() {
if (!cenc_info.subsamples.empty() &&
(!cenc_info.GetTotalSizeOfSubsamples(&total_size) ||
total_size != static_cast<size_t>(sample_size()))) {
- MEDIA_LOG(ERROR, log_cb_) << "Incorrect CENC subsample size.";
+ MEDIA_LOG(ERROR, media_log_) << "Incorrect CENC subsample size.";
return scoped_ptr<DecryptConfig>();
}
@@ -549,33 +573,24 @@ uint32 TrackRunIterator::GetGroupDescriptionIndex(uint32 sample_index) const {
return run_itr_->samples[sample_index].cenc_group_description_index;
}
-const CencSampleEncryptionInfoEntry&
-TrackRunIterator::GetSampleEncryptionInfoEntry(
- uint32 group_description_index) const {
- DCHECK(IsRunValid());
- DCHECK_NE(group_description_index, 0u);
- DCHECK_LE(group_description_index, run_itr_->sample_encryption_info.size());
- // |group_description_index| is 1-based. Subtract by 1 to index the vector.
- return run_itr_->sample_encryption_info[group_description_index - 1];
-}
-
bool TrackRunIterator::IsSampleEncrypted(size_t sample_index) const {
uint32 index = GetGroupDescriptionIndex(sample_index);
- return (index == 0) ? track_encryption().is_encrypted
- : GetSampleEncryptionInfoEntry(index).is_encrypted;
+ return (index == 0)
+ ? track_encryption().is_encrypted
+ : GetSampleEncryptionInfoEntry(*run_itr_, index)->is_encrypted;
}
const std::vector<uint8>& TrackRunIterator::GetKeyId(
size_t sample_index) const {
uint32 index = GetGroupDescriptionIndex(sample_index);
return (index == 0) ? track_encryption().default_kid
- : GetSampleEncryptionInfoEntry(index).key_id;
+ : GetSampleEncryptionInfoEntry(*run_itr_, index)->key_id;
}
uint8 TrackRunIterator::GetIvSize(size_t sample_index) const {
uint32 index = GetGroupDescriptionIndex(sample_index);
return (index == 0) ? track_encryption().default_iv_size
- : GetSampleEncryptionInfoEntry(index).iv_size;
+ : GetSampleEncryptionInfoEntry(*run_itr_, index)->iv_size;
}
} // namespace mp4
diff --git a/chromium/media/formats/mp4/track_run_iterator.h b/chromium/media/formats/mp4/track_run_iterator.h
index b5009678b8b..fa82cc1e637 100644
--- a/chromium/media/formats/mp4/track_run_iterator.h
+++ b/chromium/media/formats/mp4/track_run_iterator.h
@@ -32,7 +32,7 @@ class MEDIA_EXPORT TrackRunIterator {
public:
// Create a new TrackRunIterator. A reference to |moov| will be retained for
// the lifetime of this object.
- TrackRunIterator(const Movie* moov, const LogCB& log_cb);
+ TrackRunIterator(const Movie* moov, const scoped_refptr<MediaLog>& media_log);
~TrackRunIterator();
// Sets up the iterator to handle all the runs from the current fragment.
@@ -80,7 +80,6 @@ class MEDIA_EXPORT TrackRunIterator {
base::TimeDelta cts() const;
base::TimeDelta duration() const;
bool is_keyframe() const;
- bool is_random_access_point() const;
// Only call when is_encrypted() is true and AuxInfoNeedsToBeCached() is
// false. Result is owned by caller.
@@ -91,8 +90,6 @@ class MEDIA_EXPORT TrackRunIterator {
const TrackEncryption& track_encryption() const;
uint32 GetGroupDescriptionIndex(uint32 sample_index) const;
- const CencSampleEncryptionInfoEntry& GetSampleEncryptionInfoEntry(
- uint32 group_description_index) const;
// Sample encryption information.
bool IsSampleEncrypted(size_t sample_index) const;
@@ -100,7 +97,7 @@ class MEDIA_EXPORT TrackRunIterator {
const std::vector<uint8>& GetKeyId(size_t sample_index) const;
const Movie* moov_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
std::vector<TrackRunInfo> runs_;
std::vector<TrackRunInfo>::const_iterator run_itr_;
diff --git a/chromium/media/formats/mp4/track_run_iterator_unittest.cc b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
index d1d6e078eae..e220f9130bc 100644
--- a/chromium/media/formats/mp4/track_run_iterator_unittest.cc
+++ b/chromium/media/formats/mp4/track_run_iterator_unittest.cc
@@ -6,11 +6,15 @@
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_split.h"
+#include "media/base/mock_media_log.h"
#include "media/formats/mp4/box_definitions.h"
#include "media/formats/mp4/rcheck.h"
#include "media/formats/mp4/track_run_iterator.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::StrictMock;
+
// The sum of the elements in a vector initialized with SumAscending,
// less the value of the last element.
static const int kSumAscending1 = 45;
@@ -36,23 +40,32 @@ static const uint8 kKeyId[] = {
0x65, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x44
};
-static const uint8 kCencSampleGroupKeyId[] = {
+static const uint8 kTrackCencSampleGroupKeyId[] = {
0x46, 0x72, 0x61, 0x67, 0x53, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4b
};
+static const uint8 kFragmentCencSampleGroupKeyId[] = {
+ 0x6b, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e,
+ 0x74, 0x43, 0x65, 0x6e, 0x63, 0x53, 0x61, 0x6d
+};
+
namespace media {
namespace mp4 {
+MATCHER(ReservedValueInSampleDependencyInfo, "") {
+ return CONTAINS_STRING(arg, "Reserved value used in sample dependency info.");
+}
+
class TrackRunIteratorTest : public testing::Test {
public:
- TrackRunIteratorTest() {
+ TrackRunIteratorTest() : media_log_(new StrictMock<MockMediaLog>()) {
CreateMovie();
}
protected:
Movie moov_;
- LogCB log_cb_;
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
scoped_ptr<TrackRunIterator> iter_;
void CreateMovie() {
@@ -70,8 +83,6 @@ class TrackRunIteratorTest : public testing::Test {
desc1.audio_entries.push_back(aud_desc);
moov_.extends.tracks[0].track_id = 1;
moov_.extends.tracks[0].default_sample_description_index = 1;
- moov_.tracks[0].media.information.sample_table.sync_sample.is_present =
- false;
moov_.tracks[1].header.track_id = 2;
moov_.tracks[1].media.header.timescale = kVideoScale;
SampleDescription& desc2 =
@@ -83,11 +94,6 @@ class TrackRunIteratorTest : public testing::Test {
desc2.video_entries.push_back(vid_desc);
moov_.extends.tracks[1].track_id = 2;
moov_.extends.tracks[1].default_sample_description_index = 1;
- SyncSample& video_sync_sample =
- moov_.tracks[1].media.information.sample_table.sync_sample;
- video_sync_sample.is_present = true;
- video_sync_sample.entries.resize(1);
- video_sync_sample.entries[0] = 0;
moov_.tracks[2].header.track_id = 3;
moov_.tracks[2].media.information.sample_table.description.type = kHint;
@@ -143,8 +149,8 @@ class TrackRunIteratorTest : public testing::Test {
// ON - SampleDependsOnOthers & IsNonSyncSample
// NS - SampleDependsOnNoOthers & IsSyncSample
// NN - SampleDependsOnNoOthers & IsNonSyncSample
- std::vector<std::string> flags_data;
- base::SplitString(sample_info, ' ', &flags_data);
+ std::vector<std::string> flags_data = base::SplitString(
+ sample_info, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
if (flags_data.size() == 1u) {
// Simulates the first_sample_flags_present set scenario,
@@ -167,8 +173,6 @@ class TrackRunIteratorTest : public testing::Test {
while (iter->IsSampleValid()) {
ss << " " << (iter->is_keyframe() ? "K" : "P");
- if (iter->is_random_access_point())
- ss << "R";
iter->AdvanceSample();
}
@@ -223,11 +227,22 @@ class TrackRunIteratorTest : public testing::Test {
kKeyId + arraysize(kKeyId));
}
- // Add SampleGroupDescription Box with two entries (an unencrypted entry and
- // an encrypted entry). Populate SampleToGroup Box from input array.
- void AddCencSampleGroup(TrackFragment* frag,
+ // Add SampleGroupDescription Box to track level sample table and to
+ // fragment. Populate SampleToGroup Box from input array.
+ void AddCencSampleGroup(Track* track,
+ TrackFragment* frag,
const SampleToGroupEntry* sample_to_group_entries,
size_t num_entries) {
+ auto& track_cenc_group =
+ track->media.information.sample_table.sample_group_description;
+ track_cenc_group.grouping_type = FOURCC_SEIG;
+ track_cenc_group.entries.resize(1);
+ track_cenc_group.entries[0].is_encrypted = true;
+ track_cenc_group.entries[0].iv_size = 8;
+ track_cenc_group.entries[0].key_id.assign(
+ kTrackCencSampleGroupKeyId,
+ kTrackCencSampleGroupKeyId + arraysize(kTrackCencSampleGroupKeyId));
+
frag->sample_group_description.grouping_type = FOURCC_SEIG;
frag->sample_group_description.entries.resize(2);
frag->sample_group_description.entries[0].is_encrypted = false;
@@ -235,8 +250,9 @@ class TrackRunIteratorTest : public testing::Test {
frag->sample_group_description.entries[1].is_encrypted = true;
frag->sample_group_description.entries[1].iv_size = 8;
frag->sample_group_description.entries[1].key_id.assign(
- kCencSampleGroupKeyId,
- kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+ kFragmentCencSampleGroupKeyId,
+ kFragmentCencSampleGroupKeyId +
+ arraysize(kFragmentCencSampleGroupKeyId));
frag->sample_to_group.grouping_type = FOURCC_SEIG;
frag->sample_to_group.entries.assign(sample_to_group_entries,
@@ -276,14 +292,14 @@ class TrackRunIteratorTest : public testing::Test {
};
TEST_F(TrackRunIteratorTest, NoRunsTest) {
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
ASSERT_TRUE(iter_->Init(MovieFragment()));
EXPECT_FALSE(iter_->IsRunValid());
EXPECT_FALSE(iter_->IsSampleValid());
}
TEST_F(TrackRunIteratorTest, BasicOperationTest) {
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
// Test that runs are sorted correctly, and that properties of the initial
@@ -340,7 +356,7 @@ TEST_F(TrackRunIteratorTest, TrackExtendsDefaultsTest) {
moov_.extends.tracks[0].default_sample_duration = 50;
moov_.extends.tracks[0].default_sample_size = 3;
moov_.extends.tracks[0].default_sample_flags = ToSampleFlags("UN");
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
moof.tracks[0].header.has_default_sample_flags = false;
moof.tracks[0].header.default_sample_size = 0;
@@ -359,22 +375,23 @@ TEST_F(TrackRunIteratorTest, FirstSampleFlagTest) {
// Ensure that keyframes are flagged correctly in the face of BMFF boxes which
// explicitly specify the flags for the first sample in a run and rely on
// defaults for all subsequent samples
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
moof.tracks[1].header.has_default_sample_flags = true;
moof.tracks[1].header.default_sample_flags = ToSampleFlags("UN");
SetFlagsOnSamples("US", &moof.tracks[1].runs[0]);
ASSERT_TRUE(iter_->Init(moof));
- EXPECT_EQ("1 KR KR KR KR KR KR KR KR KR KR", KeyframeAndRAPInfo(iter_.get()));
+ EXPECT_EQ("1 K K K K K K K K K K", KeyframeAndRAPInfo(iter_.get()));
iter_->AdvanceRun();
- EXPECT_EQ("2 KR P P P P P P P P P", KeyframeAndRAPInfo(iter_.get()));
+ EXPECT_EQ("2 K P P P P P P P P P", KeyframeAndRAPInfo(iter_.get()));
}
// Verify that parsing fails if a reserved value is in the sample flags.
TEST_F(TrackRunIteratorTest, SampleInfoTest_ReservedInSampleFlags) {
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ EXPECT_MEDIA_LOG(ReservedValueInSampleDependencyInfo());
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
// Change the "depends on" field on one of the samples to a
// reserved value.
@@ -384,7 +401,8 @@ TEST_F(TrackRunIteratorTest, SampleInfoTest_ReservedInSampleFlags) {
// Verify that parsing fails if a reserved value is in the default sample flags.
TEST_F(TrackRunIteratorTest, SampleInfoTest_ReservedInDefaultSampleFlags) {
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ EXPECT_MEDIA_LOG(ReservedValueInSampleDependencyInfo());
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
// Set the default flag to contain a reserved "depends on" value.
moof.tracks[0].header.default_sample_flags = ToSampleFlags("RN");
@@ -407,7 +425,7 @@ TEST_F(TrackRunIteratorTest, ReorderingTest) {
// (that is, 2 / kVideoTimescale) and a duration of zero (which is treated as
// infinite according to 14496-12:2012). This will cause the first 80ms of the
// media timeline - which will be empty, due to CTS biasing - to be discarded.
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
EditListEntry entry;
entry.segment_duration = 0;
entry.media_time = 2;
@@ -444,7 +462,7 @@ TEST_F(TrackRunIteratorTest, ReorderingTest) {
}
TEST_F(TrackRunIteratorTest, IgnoreUnknownAuxInfoTest) {
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
moof.tracks[1].auxiliary_offset.offsets.push_back(50);
moof.tracks[1].auxiliary_size.default_sample_info_size = 2;
@@ -457,7 +475,7 @@ TEST_F(TrackRunIteratorTest, IgnoreUnknownAuxInfoTest) {
TEST_F(TrackRunIteratorTest, DecryptConfigTest) {
AddEncryption(&moov_.tracks[1]);
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
AddAuxInfoHeaders(50, &moof.tracks[1]);
@@ -501,15 +519,15 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupTest) {
{1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 2},
// Associated with the first entry in SampleGroupDescription Box.
{1, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1}};
- AddCencSampleGroup(
- &moof.tracks[0], kSampleToGroupTable, arraysize(kSampleToGroupTable));
+ AddCencSampleGroup(&moov_.tracks[0], &moof.tracks[0], kSampleToGroupTable,
+ arraysize(kSampleToGroupTable));
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
std::string cenc_sample_group_key_id(
- kCencSampleGroupKeyId,
- kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+ kFragmentCencSampleGroupKeyId,
+ kFragmentCencSampleGroupKeyId + arraysize(kFragmentCencSampleGroupKeyId));
// The first sample is encrypted and the second sample is unencrypted.
EXPECT_TRUE(iter_->is_encrypted());
EXPECT_EQ(cenc_sample_group_key_id, iter_->GetDecryptConfig()->key_id());
@@ -524,26 +542,32 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupWithTrackEncryptionBoxTest) {
MovieFragment moof = CreateFragment();
const SampleToGroupEntry kSampleToGroupTable[] = {
- // Associated with the second entry in SampleGroupDescription Box.
+ // Associated with the 2nd entry in fragment SampleGroupDescription Box.
{2, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 2},
// Associated with the default values specified in TrackEncryption Box.
- {4, 0},
- // Associated with the first entry in SampleGroupDescription Box.
- {3, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1}};
- AddCencSampleGroup(
- &moof.tracks[0], kSampleToGroupTable, arraysize(kSampleToGroupTable));
-
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ {1, 0},
+ // Associated with the 1st entry in fragment SampleGroupDescription Box.
+ {3, SampleToGroupEntry::kFragmentGroupDescriptionIndexBase + 1},
+ // Associated with the 1st entry in track SampleGroupDescription Box.
+ {2, 1}};
+ AddCencSampleGroup(&moov_.tracks[0], &moof.tracks[0], kSampleToGroupTable,
+ arraysize(kSampleToGroupTable));
+
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
ASSERT_TRUE(InitMoofWithArbitraryAuxInfo(&moof));
std::string track_encryption_key_id(kKeyId, kKeyId + arraysize(kKeyId));
- std::string cenc_sample_group_key_id(
- kCencSampleGroupKeyId,
- kCencSampleGroupKeyId + arraysize(kCencSampleGroupKeyId));
+ std::string track_cenc_sample_group_key_id(
+ kTrackCencSampleGroupKeyId,
+ kTrackCencSampleGroupKeyId + arraysize(kTrackCencSampleGroupKeyId));
+ std::string fragment_cenc_sample_group_key_id(
+ kFragmentCencSampleGroupKeyId,
+ kFragmentCencSampleGroupKeyId + arraysize(kFragmentCencSampleGroupKeyId));
for (size_t i = 0; i < kSampleToGroupTable[0].sample_count; ++i) {
EXPECT_TRUE(iter_->is_encrypted());
- EXPECT_EQ(cenc_sample_group_key_id, iter_->GetDecryptConfig()->key_id());
+ EXPECT_EQ(fragment_cenc_sample_group_key_id,
+ iter_->GetDecryptConfig()->key_id());
iter_->AdvanceSample();
}
@@ -558,6 +582,13 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupWithTrackEncryptionBoxTest) {
iter_->AdvanceSample();
}
+ for (size_t i = 0; i < kSampleToGroupTable[3].sample_count; ++i) {
+ EXPECT_TRUE(iter_->is_encrypted());
+ EXPECT_EQ(track_cenc_sample_group_key_id,
+ iter_->GetDecryptConfig()->key_id());
+ iter_->AdvanceSample();
+ }
+
// The remaining samples should be associated with the default values
// specified in TrackEncryption Box.
EXPECT_TRUE(iter_->is_encrypted());
@@ -568,7 +599,7 @@ TEST_F(TrackRunIteratorTest, CencSampleGroupWithTrackEncryptionBoxTest) {
TEST_F(TrackRunIteratorTest, SharedAuxInfoTest) {
AddEncryption(&moov_.tracks[0]);
AddEncryption(&moov_.tracks[1]);
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
moof.tracks[0].runs.resize(1);
@@ -610,7 +641,7 @@ TEST_F(TrackRunIteratorTest, SharedAuxInfoTest) {
TEST_F(TrackRunIteratorTest, UnexpectedOrderingTest) {
AddEncryption(&moov_.tracks[0]);
AddEncryption(&moov_.tracks[1]);
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
MovieFragment moof = CreateFragment();
AddAuxInfoHeaders(20000, &moof.tracks[0]);
@@ -644,45 +675,43 @@ TEST_F(TrackRunIteratorTest, UnexpectedOrderingTest) {
EXPECT_EQ(iter_->GetMaxClearOffset(), 10000);
}
-TEST_F(TrackRunIteratorTest, MissingAndEmptyStss) {
+TEST_F(TrackRunIteratorTest, KeyFrameFlagCombinations) {
+ // Setup both audio and video tracks to each have 6 samples covering all the
+ // combinations of mp4 "sync sample" and "depends on" relationships.
MovieFragment moof = CreateFragment();
-
- // Setup track 0 to not have an stss box, which means that all samples should
- // be marked as random access points unless the kSampleIsNonSyncSample flag is
- // set in the sample flags.
- moov_.tracks[0].media.information.sample_table.sync_sample.is_present = false;
- moov_.tracks[0].media.information.sample_table.sync_sample.entries.resize(0);
moof.tracks[0].runs.resize(1);
- moof.tracks[0].runs[0].sample_count = 6;
- moof.tracks[0].runs[0].data_offset = 100;
- SetFlagsOnSamples("US UN OS ON NS NN", &moof.tracks[0].runs[0]);
-
- // Setup track 1 to have an stss box with no entries, which normally means
- // that none of the samples should be random access points. If the
- // kSampleIsNonSyncSample flag is NOT set though, the sample should be
- // considered a random access point.
- moov_.tracks[1].media.information.sample_table.sync_sample.is_present = true;
- moov_.tracks[1].media.information.sample_table.sync_sample.entries.resize(0);
moof.tracks[1].runs.resize(1);
+ moof.tracks[0].runs[0].sample_count = 6;
moof.tracks[1].runs[0].sample_count = 6;
- moof.tracks[1].runs[0].data_offset = 200;
+ SetFlagsOnSamples("US UN OS ON NS NN", &moof.tracks[0].runs[0]);
SetFlagsOnSamples("US UN OS ON NS NN", &moof.tracks[1].runs[0]);
-
- iter_.reset(new TrackRunIterator(&moov_, log_cb_));
+ iter_.reset(new TrackRunIterator(&moov_, media_log_));
ASSERT_TRUE(iter_->Init(moof));
EXPECT_TRUE(iter_->IsRunValid());
- // Verify that all samples except for the ones that have the
- // kSampleIsNonSyncSample flag set are marked as random access points.
- EXPECT_EQ("1 KR P PR P KR K", KeyframeAndRAPInfo(iter_.get()));
+ // Keyframes should be marked according to downstream's expectations that
+ // keyframes serve as points of random access for seeking.
+
+ // For audio, any sync sample should be marked as a key frame. Whether a
+ // sample "depends on" other samples is not considered. Unlike video samples,
+ // audio samples are often marked as depending on other samples but are still
+ // workable for random access. While we allow for parsing of audio samples
+ // that are non-sync samples, we generally expect all audio samples to be sync
+ // samples and downstream will log and discard any non-sync audio samples.
+ EXPECT_EQ("1 K P K P K P", KeyframeAndRAPInfo(iter_.get()));
iter_->AdvanceRun();
- // Verify that nothing is marked as a random access point.
- EXPECT_EQ("2 KR P PR P KR K", KeyframeAndRAPInfo(iter_.get()));
+ // For video, any key frame should be both a sync sample and have no known
+ // dependents. Ideally, a video sync sample should always be marked as having
+ // no dependents, but we occasionally encounter media where all samples are
+ // marked "sync" and we must rely on combining the two flags to pick out the
+ // true key frames. See http://crbug.com/310712 and http://crbug.com/507916.
+ // Realiably knowing the keyframes for video is also critical to SPS PPS
+ // insertion.
+ EXPECT_EQ("2 K P P P K P", KeyframeAndRAPInfo(iter_.get()));
}
-
} // namespace mp4
} // namespace media
diff --git a/chromium/media/formats/mpeg/adts_stream_parser.cc b/chromium/media/formats/mpeg/adts_stream_parser.cc
index ea0f5923096..a59d9ba219d 100644
--- a/chromium/media/formats/mpeg/adts_stream_parser.cc
+++ b/chromium/media/formats/mpeg/adts_stream_parser.cc
@@ -69,7 +69,7 @@ int ADTSStreamParser::ParseFrameHeader(const uint8* data,
if (sync != 0xfff || layer != 0 || frame_length < bytes_read ||
sample_rate_index >= kADTSFrequencyTableSize ||
channel_layout_index >= kADTSChannelLayoutTableSize) {
- MEDIA_LOG(DEBUG, log_cb())
+ MEDIA_LOG(DEBUG, media_log())
<< "Invalid header data :" << std::hex << " sync 0x" << sync
<< " version 0x" << version << " layer 0x" << layer
<< " sample_rate_index 0x" << sample_rate_index
diff --git a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
index 204ee732fce..67b3705b030 100644
--- a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
+++ b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.cc
@@ -87,7 +87,7 @@ static const int kCodecDelay = 529;
// static
bool MPEG1AudioStreamParser::ParseHeader(
- const LogCB& log_cb,
+ const scoped_refptr<MediaLog>& media_log,
const uint8* data,
Header* header) {
BitReader reader(data, kHeaderSize);
@@ -128,7 +128,7 @@ bool MPEG1AudioStreamParser::ParseHeader(
layer == kLayerReserved ||
bitrate_index == kBitrateFree || bitrate_index == kBitrateBad ||
sample_rate_index == kSampleRateReserved) {
- MEDIA_LOG(ERROR, log_cb)
+ MEDIA_LOG(ERROR, media_log)
<< "Invalid header data :" << std::hex << " sync 0x" << sync
<< " version 0x" << version << " layer 0x" << layer
<< " bitrate_index 0x" << bitrate_index << " sample_rate_index 0x"
@@ -137,19 +137,19 @@ bool MPEG1AudioStreamParser::ParseHeader(
}
if (layer == kLayer2 && kIsAllowed[bitrate_index][channel_mode]) {
- MEDIA_LOG(ERROR, log_cb) << "Invalid (bitrate_index, channel_mode)"
- << " combination :" << std::hex
- << " bitrate_index " << bitrate_index
- << " channel_mode " << channel_mode;
+ MEDIA_LOG(ERROR, media_log) << "Invalid (bitrate_index, channel_mode)"
+ << " combination :" << std::hex
+ << " bitrate_index " << bitrate_index
+ << " channel_mode " << channel_mode;
return false;
}
int bitrate = kBitrateMap[bitrate_index][kVersionLayerMap[version][layer]];
if (bitrate == 0) {
- MEDIA_LOG(ERROR, log_cb) << "Invalid bitrate :" << std::hex << " version "
- << version << " layer " << layer
- << " bitrate_index " << bitrate_index;
+ MEDIA_LOG(ERROR, media_log) << "Invalid bitrate :" << std::hex
+ << " version " << version << " layer " << layer
+ << " bitrate_index " << bitrate_index;
return false;
}
@@ -157,9 +157,9 @@ bool MPEG1AudioStreamParser::ParseHeader(
int frame_sample_rate = kSampleRateMap[sample_rate_index][version];
if (frame_sample_rate == 0) {
- MEDIA_LOG(ERROR, log_cb) << "Invalid sample rate :" << std::hex
- << " version " << version << " sample_rate_index "
- << sample_rate_index;
+ MEDIA_LOG(ERROR, media_log) << "Invalid sample rate :" << std::hex
+ << " version " << version
+ << " sample_rate_index " << sample_rate_index;
return false;
}
header->sample_rate = frame_sample_rate;
@@ -236,7 +236,7 @@ int MPEG1AudioStreamParser::ParseFrameHeader(const uint8* data,
return 0;
Header header;
- if (!ParseHeader(log_cb(), data, &header))
+ if (!ParseHeader(media_log(), data, &header))
return -1;
*frame_size = header.frame_size;
@@ -275,7 +275,7 @@ int MPEG1AudioStreamParser::ParseFrameHeader(const uint8* data,
// Check to see if the tag contains 'Xing' or 'Info'
if (tag == 0x496e666f || tag == 0x58696e67) {
- MEDIA_LOG(DEBUG, log_cb()) << "Skipping XING header.";
+ MEDIA_LOG(DEBUG, media_log()) << "Skipping XING header.";
if (metadata_frame)
*metadata_frame = true;
return header_bytes_read + reader.bits_read() / 8;
diff --git a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.h b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.h
index b2e5086f847..3b299a105bd 100644
--- a/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.h
+++ b/chromium/media/formats/mpeg/mpeg1_audio_stream_parser.h
@@ -62,10 +62,9 @@ class MEDIA_EXPORT MPEG1AudioStreamParser : public MPEGAudioStreamParserBase {
// Parses the header starting at |data|.
// Assumption: size of array |data| should be at least |kHeaderSize|.
// Returns false if the header is not valid.
- static bool ParseHeader(
- const LogCB& log_cb,
- const uint8* data,
- Header* header);
+ static bool ParseHeader(const scoped_refptr<MediaLog>& media_log,
+ const uint8* data,
+ Header* header);
MPEG1AudioStreamParser();
~MPEG1AudioStreamParser() override;
diff --git a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
index 14fe143c791..d9bf2b529ee 100644
--- a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.cc
@@ -7,9 +7,9 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/message_loop/message_loop.h"
-#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
+#include "media/base/timestamp_constants.h"
#include "media/base/video_decoder_config.h"
namespace media {
@@ -62,7 +62,7 @@ void MPEGAudioStreamParserBase::Init(
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, UNINITIALIZED);
init_cb_ = init_cb;
@@ -70,7 +70,7 @@ void MPEGAudioStreamParserBase::Init(
new_buffers_cb_ = new_buffers_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
- log_cb_ = log_cb;
+ media_log_ = media_log;
ChangeState(INITIALIZED);
}
@@ -79,7 +79,8 @@ void MPEGAudioStreamParserBase::Flush() {
DVLOG(1) << __FUNCTION__;
DCHECK_NE(state_, UNINITIALIZED);
queue_.Reset();
- timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
+ if (timestamp_helper_)
+ timestamp_helper_->SetBaseTimestamp(base::TimeDelta());
in_media_segment_ = false;
}
@@ -209,7 +210,6 @@ int MPEGAudioStreamParserBase::ParseFrame(const uint8* data,
NULL,
0,
false,
- false,
base::TimeDelta(),
codec_delay_);
@@ -262,7 +262,7 @@ int MPEGAudioStreamParserBase::ParseIcecastHeader(const uint8* data, int size) {
int offset = LocateEndOfHeaders(data, locate_size, 4);
if (offset < 0) {
if (locate_size == kMaxIcecastHeaderSize) {
- MEDIA_LOG(ERROR, log_cb_) << "Icecast header is too large.";
+ MEDIA_LOG(ERROR, media_log_) << "Icecast header is too large.";
return -1;
}
@@ -323,7 +323,7 @@ bool MPEGAudioStreamParserBase::ParseSyncSafeInt(BitReader* reader,
for (int i = 0; i < 4; ++i) {
uint8 tmp;
if (!reader->ReadBits(1, &tmp) || tmp != 0) {
- MEDIA_LOG(ERROR, log_cb_) << "ID3 syncsafe integer byte MSb is not 0!";
+ MEDIA_LOG(ERROR, media_log_) << "ID3 syncsafe integer byte MSb is not 0!";
return false;
}
diff --git a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
index 2443322fdd9..832199389eb 100644
--- a/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
+++ b/chromium/media/formats/mpeg/mpeg_audio_stream_parser_base.h
@@ -38,7 +38,7 @@ class MEDIA_EXPORT MPEGAudioStreamParserBase : public StreamParser {
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
+ const scoped_refptr<MediaLog>& media_log) override;
void Flush() override;
bool Parse(const uint8* buf, int size) override;
@@ -81,7 +81,7 @@ class MEDIA_EXPORT MPEGAudioStreamParserBase : public StreamParser {
int* sample_count,
bool* metadata_frame) const = 0;
- const LogCB& log_cb() const { return log_cb_; }
+ const scoped_refptr<MediaLog>& media_log() const { return media_log_; }
private:
enum State {
@@ -137,7 +137,7 @@ class MEDIA_EXPORT MPEGAudioStreamParserBase : public StreamParser {
NewBuffersCB new_buffers_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
ByteQueue queue_;
diff --git a/chromium/media/formats/webm/cluster_builder.h b/chromium/media/formats/webm/cluster_builder.h
index ab5797cd34a..f6f60018499 100644
--- a/chromium/media/formats/webm/cluster_builder.h
+++ b/chromium/media/formats/webm/cluster_builder.h
@@ -7,7 +7,6 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
-#include "media/base/buffers.h"
namespace media {
diff --git a/chromium/media/formats/webm/webm_audio_client.cc b/chromium/media/formats/webm/webm_audio_client.cc
index ea911ec290e..bde72332542 100644
--- a/chromium/media/formats/webm/webm_audio_client.cc
+++ b/chromium/media/formats/webm/webm_audio_client.cc
@@ -10,8 +10,8 @@
namespace media {
-WebMAudioClient::WebMAudioClient(const LogCB& log_cb)
- : log_cb_(log_cb) {
+WebMAudioClient::WebMAudioClient(const scoped_refptr<MediaLog>& media_log)
+ : media_log_(media_log) {
Reset();
}
@@ -37,7 +37,7 @@ bool WebMAudioClient::InitializeConfig(
} else if (codec_id == "A_OPUS") {
audio_codec = kCodecOpus;
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported audio codec_id " << codec_id;
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported audio codec_id " << codec_id;
return false;
}
@@ -51,7 +51,7 @@ bool WebMAudioClient::InitializeConfig(
ChannelLayout channel_layout = GuessChannelLayout(channels_);
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported channel count " << channels_;
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported channel count " << channels_;
return false;
}
@@ -90,7 +90,6 @@ bool WebMAudioClient::InitializeConfig(
extra_data,
extra_data_size,
is_encrypted,
- true,
base::TimeDelta::FromMicroseconds(
(seek_preroll != -1 ? seek_preroll : 0) / 1000),
codec_delay_in_frames);
@@ -100,9 +99,9 @@ bool WebMAudioClient::InitializeConfig(
bool WebMAudioClient::OnUInt(int id, int64 val) {
if (id == kWebMIdChannels) {
if (channels_ != -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
- << " specified. (" << channels_ << " and "
- << val << ")";
+ MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex
+ << id << " specified. (" << channels_
+ << " and " << val << ")";
return false;
}
@@ -129,9 +128,9 @@ bool WebMAudioClient::OnFloat(int id, double val) {
return false;
if (*dst != -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
- << " specified (" << *dst << " and " << val
- << ")";
+ MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
+ << " specified (" << *dst << " and " << val
+ << ")";
return false;
}
diff --git a/chromium/media/formats/webm/webm_audio_client.h b/chromium/media/formats/webm/webm_audio_client.h
index 2e23c13eacb..67f5f01aae1 100644
--- a/chromium/media/formats/webm/webm_audio_client.h
+++ b/chromium/media/formats/webm/webm_audio_client.h
@@ -17,7 +17,7 @@ class AudioDecoderConfig;
// Helper class used to parse an Audio element inside a TrackEntry element.
class WebMAudioClient : public WebMParserClient {
public:
- explicit WebMAudioClient(const LogCB& log_cb);
+ explicit WebMAudioClient(const scoped_refptr<MediaLog>& media_log);
~WebMAudioClient() override;
// Reset this object's state so it can process a new audio track element.
@@ -41,7 +41,7 @@ class WebMAudioClient : public WebMParserClient {
bool OnUInt(int id, int64 val) override;
bool OnFloat(int id, double val) override;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
int channels_;
double samples_per_second_;
double output_samples_per_second_;
diff --git a/chromium/media/formats/webm/webm_cluster_parser.cc b/chromium/media/formats/webm/webm_cluster_parser.cc
index a76559d241a..ee491371d3c 100644
--- a/chromium/media/formats/webm/webm_cluster_parser.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser.cc
@@ -8,8 +8,8 @@
#include "base/logging.h"
#include "base/sys_byteorder.h"
-#include "media/base/buffers.h"
#include "media/base/decrypt_config.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/webvtt_util.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_crypto_helpers.h"
@@ -42,32 +42,23 @@ WebMClusterParser::WebMClusterParser(
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
const AudioCodec audio_codec,
- const LogCB& log_cb)
- : num_duration_errors_(0),
- timecode_multiplier_(timecode_scale / 1000.0),
+ const scoped_refptr<MediaLog>& media_log)
+ : timecode_multiplier_(timecode_scale / 1000.0),
ignored_tracks_(ignored_tracks),
audio_encryption_key_id_(audio_encryption_key_id),
video_encryption_key_id_(video_encryption_key_id),
audio_codec_(audio_codec),
parser_(kWebMIdCluster, this),
- last_block_timecode_(-1),
- block_data_size_(-1),
- block_duration_(-1),
- block_add_id_(-1),
- block_additional_data_size_(0),
- discard_padding_(-1),
- cluster_timecode_(-1),
cluster_start_time_(kNoTimestamp()),
- cluster_ended_(false),
- audio_(audio_track_num, false, audio_default_duration, log_cb),
- video_(video_track_num, true, video_default_duration, log_cb),
+ audio_(audio_track_num, false, audio_default_duration, media_log),
+ video_(video_track_num, true, video_default_duration, media_log),
ready_buffer_upper_bound_(kNoDecodeTimestamp()),
- log_cb_(log_cb) {
+ media_log_(media_log) {
for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
it != text_tracks.end();
++it) {
text_track_map_.insert(std::make_pair(
- it->first, Track(it->first, false, kNoTimestamp(), log_cb_)));
+ it->first, Track(it->first, false, kNoTimestamp(), media_log_)));
}
}
@@ -188,7 +179,7 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
base::TimeDelta::FromMilliseconds(120);
if (size < 1) {
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Invalid zero-byte Opus packet; demuxed block duration may be "
"imprecise.";
@@ -210,7 +201,7 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
case 3:
// Type 3 indicates an arbitrary frame count described in the next byte.
if (size < 2) {
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Second byte missing from 'Code 3' Opus packet; demuxed block "
"duration may be imprecise.";
@@ -220,7 +211,7 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
frame_count = data[1] & kFrameCountMask;
if (frame_count == 0) {
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Illegal 'Code 3' Opus packet with frame count zero; demuxed "
"block duration may be imprecise.";
@@ -229,7 +220,7 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
break;
default:
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Unexpected Opus frame count type: " << frame_count_type << "; "
<< "demuxed block duration may be imprecise.";
@@ -248,10 +239,11 @@ base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
// Intentionally allowing packet to pass through for now. Decoder should
// either handle or fail gracefully. MEDIA_LOG as breadcrumbs in case
// things go sideways.
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
- << "Warning, demuxed Opus packet with encoded duration: " << duration
- << ". Should be no greater than " << kPacketDurationMax;
+ << "Warning, demuxed Opus packet with encoded duration: "
+ << duration.InMilliseconds() << "ms. Should be no greater than "
+ << kPacketDurationMax.InMilliseconds() << "ms.";
}
return duration;
@@ -282,7 +274,7 @@ bool WebMClusterParser::OnListEnd(int id) {
// Make sure the BlockGroup actually had a Block.
if (block_data_size_ == -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Block missing from BlockGroup.";
+ MEDIA_LOG(ERROR, media_log_) << "Block missing from BlockGroup.";
return false;
}
@@ -335,7 +327,7 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block,
// Return an error if the trackNum > 127. We just aren't
// going to support large track numbers right now.
if (!(buf[0] & 0x80)) {
- MEDIA_LOG(ERROR, log_cb_) << "TrackNumber over 127 not supported";
+ MEDIA_LOG(ERROR, media_log_) << "TrackNumber over 127 not supported";
return false;
}
@@ -345,8 +337,8 @@ bool WebMClusterParser::ParseBlock(bool is_simple_block,
int lacing = (flags >> 1) & 0x3;
if (lacing) {
- MEDIA_LOG(ERROR, log_cb_) << "Lacing " << lacing
- << " is not supported yet.";
+ MEDIA_LOG(ERROR, media_log_) << "Lacing " << lacing
+ << " is not supported yet.";
return false;
}
@@ -368,8 +360,9 @@ bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
case kWebMIdBlock:
if (block_data_) {
- MEDIA_LOG(ERROR, log_cb_) << "More than 1 Block in a BlockGroup is not "
- "supported.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "More than 1 Block in a BlockGroup is not "
+ "supported.";
return false;
}
block_data_.reset(new uint8_t[size]);
@@ -384,8 +377,8 @@ bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
// as per matroska spec. But for now we don't have a use case to
// support parsing of such files. Take a look at this again when such a
// case arises.
- MEDIA_LOG(ERROR, log_cb_) << "More than 1 BlockAdditional in a "
- "BlockGroup is not supported.";
+ MEDIA_LOG(ERROR, media_log_) << "More than 1 BlockAdditional in a "
+ "BlockGroup is not supported.";
return false;
}
// First 8 bytes of side_data in DecoderBuffer is the BlockAddID
@@ -427,20 +420,20 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
int64 discard_padding) {
DCHECK_GE(size, 0);
if (cluster_timecode_ == -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Got a block before cluster timecode.";
+ MEDIA_LOG(ERROR, media_log_) << "Got a block before cluster timecode.";
return false;
}
// TODO(acolwell): Should relative negative timecode offsets be rejected? Or
// only when the absolute timecode is negative? See http://crbug.com/271794
if (timecode < 0) {
- MEDIA_LOG(ERROR, log_cb_) << "Got a block with negative timecode offset "
- << timecode;
+ MEDIA_LOG(ERROR, media_log_) << "Got a block with negative timecode offset "
+ << timecode;
return false;
}
if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Got a block with a timecode before the previous block.";
return false;
}
@@ -469,7 +462,7 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
track = text_track;
buffer_type = DemuxerStream::TEXT;
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected track number " << track_num;
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected track number " << track_num;
return false;
}
@@ -567,12 +560,11 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
const auto kWarnDurationDiff =
base::TimeDelta::FromMicroseconds(timecode_multiplier_ * 2);
if (duration_difference.magnitude() > kWarnDurationDiff) {
- LIMITED_MEDIA_LOG(DEBUG, log_cb_, num_duration_errors_,
+ LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
- << "BlockDuration "
- << "(" << block_duration_time_delta << ") "
- << "differs significantly from encoded duration "
- << "(" << encoded_duration << ").";
+ << "BlockDuration (" << block_duration_time_delta.InMilliseconds()
+ << "ms) differs significantly from encoded duration ("
+ << encoded_duration.InMilliseconds() << "ms).";
}
}
} else if (block_duration_time_delta != kNoTimestamp()) {
@@ -594,13 +586,12 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
WebMClusterParser::Track::Track(int track_num,
bool is_video,
base::TimeDelta default_duration,
- const LogCB& log_cb)
- : num_duration_estimates_(0),
- track_num_(track_num),
+ const scoped_refptr<MediaLog>& media_log)
+ : track_num_(track_num),
is_video_(is_video),
default_duration_(default_duration),
estimated_next_frame_duration_(kNoTimestamp()),
- log_cb_(log_cb) {
+ media_log_(media_log) {
DCHECK(default_duration_ == kNoTimestamp() ||
default_duration_ > base::TimeDelta());
}
@@ -700,12 +691,13 @@ void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
last_added_buffer_missing_duration_->set_is_duration_estimated(true);
}
- LIMITED_MEDIA_LOG(INFO, log_cb_, num_duration_estimates_,
+ LIMITED_MEDIA_LOG(INFO, media_log_, num_duration_estimates_,
kMaxDurationEstimateLogs)
- << "Estimating WebM block duration to be " << estimated_duration << " "
- << "for the last (Simple)Block in the Cluster for this Track. Use "
- << "BlockGroups with BlockDurations at the end of each Track in a "
- << "Cluster to avoid estimation.";
+ << "Estimating WebM block duration to be "
+ << estimated_duration.InMilliseconds()
+ << "ms for the last (Simple)Block in the Cluster for this Track. Use "
+ "BlockGroups with BlockDurations at the end of each Track in a "
+ "Cluster to avoid estimation.";
DVLOG(2) << __FUNCTION__ << " new dur : ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
@@ -768,7 +760,7 @@ bool WebMClusterParser::Track::QueueBuffer(
base::TimeDelta duration = buffer->duration();
if (duration < base::TimeDelta() || duration == kNoTimestamp()) {
- MEDIA_LOG(ERROR, log_cb_)
+ MEDIA_LOG(ERROR, media_log_)
<< "Invalid buffer duration: " << duration.InSecondsF();
return false;
}
diff --git a/chromium/media/formats/webm/webm_cluster_parser.h b/chromium/media/formats/webm/webm_cluster_parser.h
index 0eef9867376..e2d5b98e6de 100644
--- a/chromium/media/formats/webm/webm_cluster_parser.h
+++ b/chromium/media/formats/webm/webm_cluster_parser.h
@@ -51,7 +51,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
Track(int track_num,
bool is_video,
base::TimeDelta default_duration,
- const LogCB& log_cb);
+ const scoped_refptr<MediaLog>& media_log);
~Track();
int track_num() const { return track_num_; }
@@ -113,7 +113,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// Counts the number of estimated durations used in this track. Used to
// prevent log spam for MEDIA_LOG()s about estimated duration.
- int num_duration_estimates_;
+ int num_duration_estimates_ = 0;
int track_num_;
bool is_video_;
@@ -143,7 +143,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// splicing when these estimates are observed in SourceBufferStream.
base::TimeDelta estimated_next_frame_duration_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
};
typedef std::map<int, Track> TextTrackMap;
@@ -158,8 +158,8 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
- const AudioCodec audio_codec_,
- const LogCB& log_cb);
+ const AudioCodec audio_codec,
+ const scoped_refptr<MediaLog>& media_log);
~WebMClusterParser() override;
// Resets the parser state so it can accept a new cluster.
@@ -269,7 +269,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// Tracks the number of MEDIA_LOGs made in process of reading encoded
// duration. Useful to prevent log spam.
- int num_duration_errors_;
+ int num_duration_errors_ = 0;
double timecode_multiplier_; // Multiplier used to convert timecodes into
// microseconds.
@@ -280,23 +280,23 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
WebMListParser parser_;
- int64 last_block_timecode_;
+ int64 last_block_timecode_ = -1;
scoped_ptr<uint8_t[]> block_data_;
- int block_data_size_;
- int64 block_duration_;
- int64 block_add_id_;
+ int block_data_size_ = -1;
+ int64 block_duration_ = -1;
+ int64 block_add_id_ = -1;
scoped_ptr<uint8_t[]> block_additional_data_;
// Must be 0 if |block_additional_data_| is null. Must be > 0 if
// |block_additional_data_| is NOT null.
- int block_additional_data_size_;
+ int block_additional_data_size_ = 0;
- int64 discard_padding_;
- bool discard_padding_set_;
+ int64 discard_padding_ = -1;
+ bool discard_padding_set_ = false;
- int64 cluster_timecode_;
+ int64 cluster_timecode_ = -1;
base::TimeDelta cluster_start_time_;
- bool cluster_ended_;
+ bool cluster_ended_ = false;
Track audio_;
Track video_;
@@ -315,7 +315,7 @@ class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
// kInfiniteDuration() if no buffers are currently missing duration.
DecodeTimestamp ready_buffer_upper_bound_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMClusterParser);
};
diff --git a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
index 4020df458bd..7581bbf8665 100644
--- a/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_cluster_parser_unittest.cc
@@ -4,12 +4,16 @@
#include <algorithm>
#include <cstdlib>
+#include <string>
#include <vector>
#include "base/bind.h"
#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decrypt_config.h"
+#include "media/base/mock_media_log.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/opus_packet_builder.h"
#include "media/formats/webm/webm_cluster_parser.h"
@@ -17,22 +21,54 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::Return;
+using ::testing::StrictMock;
+using ::testing::Mock;
using ::testing::_;
namespace media {
typedef WebMTracksParser::TextTracks TextTracks;
-enum {
- kTimecodeScale = 1000000, // Timecode scale for millisecond timestamps.
- kAudioTrackNum = 1,
- kVideoTrackNum = 2,
- kTextTrackNum = 3,
- kTestAudioFrameDefaultDurationInMs = 13,
- kTestVideoFrameDefaultDurationInMs = 17
-};
+// Matchers for verifying common media log entry strings.
+MATCHER_P(OpusPacketDurationTooHigh, actual_duration_ms, "") {
+ return CONTAINS_STRING(
+ arg, "Warning, demuxed Opus packet with encoded duration: " +
+ base::IntToString(actual_duration_ms) +
+ "ms. Should be no greater than 120ms.");
+}
+
+MATCHER_P(WebMSimpleBlockDurationEstimated, estimated_duration_ms, "") {
+ return CONTAINS_STRING(arg, "Estimating WebM block duration to be " +
+ base::IntToString(estimated_duration_ms) +
+ "ms for the last (Simple)Block in the "
+ "Cluster for this Track. Use BlockGroups "
+ "with BlockDurations at the end of each "
+ "Track in a Cluster to avoid estimation.");
+}
+
+MATCHER_P2(WebMBlockDurationMismatchesOpusDuration,
+ block_duration_ms,
+ opus_duration_ms,
+ "") {
+ return CONTAINS_STRING(
+ arg, "BlockDuration (" + base::IntToString(block_duration_ms) +
+ "ms) differs significantly from encoded duration (" +
+ base::IntToString(opus_duration_ms) + "ms).");
+}
+
+namespace {
+
+// Timecode scale for millisecond timestamps.
+const int kTimecodeScale = 1000000;
+
+const int kAudioTrackNum = 1;
+const int kVideoTrackNum = 2;
+const int kTextTrackNum = 3;
+const int kTestAudioFrameDefaultDurationInMs = 13;
+const int kTestVideoFrameDefaultDurationInMs = 17;
// Test duration defaults must differ from parser estimation defaults to know
// which durations parser used when emitting buffers.
@@ -63,7 +99,7 @@ struct BlockInfo {
int data_length;
};
-static const BlockInfo kDefaultBlockInfo[] = {
+const BlockInfo kDefaultBlockInfo[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0},
{kAudioTrackNum, 23, 23, true, NULL, 0},
// Assumes not using DefaultDuration
@@ -74,14 +110,16 @@ static const BlockInfo kDefaultBlockInfo[] = {
{kVideoTrackNum, 100, 33, false, NULL, 0},
};
-static const uint8_t kEncryptedFrame[] = {
- 0x01, // Block is encrypted
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 // IV
-};
+const uint8_t kEncryptedFrame[] = {
+ // Block is encrypted
+ 0x01,
+
+ // IV
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
-static scoped_ptr<Cluster> CreateCluster(int timecode,
- const BlockInfo* block_info,
- int block_count) {
+scoped_ptr<Cluster> CreateCluster(int timecode,
+ const BlockInfo* block_info,
+ int block_count) {
ClusterBuilder cb;
cb.SetClusterTimecode(0);
@@ -121,7 +159,7 @@ static scoped_ptr<Cluster> CreateCluster(int timecode,
// Creates a Cluster with one encrypted Block. |bytes_to_write| is number of
// bytes of the encrypted frame to write.
-static scoped_ptr<Cluster> CreateEncryptedCluster(int bytes_to_write) {
+scoped_ptr<Cluster> CreateEncryptedCluster(int bytes_to_write) {
CHECK_GT(bytes_to_write, 0);
CHECK_LE(bytes_to_write, static_cast<int>(sizeof(kEncryptedFrame)));
@@ -131,11 +169,11 @@ static scoped_ptr<Cluster> CreateEncryptedCluster(int bytes_to_write) {
return cb.Finish();
}
-static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
- const WebMClusterParser::BufferQueue& video_buffers,
- const WebMClusterParser::BufferQueue& text_buffers,
- const BlockInfo* block_info,
- int block_count) {
+bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
+ const WebMClusterParser::BufferQueue& video_buffers,
+ const WebMClusterParser::BufferQueue& text_buffers,
+ const BlockInfo* block_info,
+ int block_count) {
int buffer_count = audio_buffers.size() + video_buffers.size() +
text_buffers.size();
if (block_count != buffer_count) {
@@ -188,9 +226,9 @@ static bool VerifyBuffers(const WebMClusterParser::BufferQueue& audio_buffers,
return true;
}
-static bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
- const BlockInfo* block_info,
- int block_count) {
+bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
+ const BlockInfo* block_info,
+ int block_count) {
const WebMClusterParser::TextBufferQueueMap& text_map =
parser->GetTextBuffers();
const WebMClusterParser::BufferQueue* text_buffers;
@@ -207,12 +245,11 @@ static bool VerifyBuffers(const scoped_ptr<WebMClusterParser>& parser,
block_count);
}
-static bool VerifyTextBuffers(
- const scoped_ptr<WebMClusterParser>& parser,
- const BlockInfo* block_info_ptr,
- int block_count,
- int text_track_num,
- const WebMClusterParser::BufferQueue& text_buffers) {
+bool VerifyTextBuffers(const scoped_ptr<WebMClusterParser>& parser,
+ const BlockInfo* block_info_ptr,
+ int block_count,
+ int text_track_num,
+ const WebMClusterParser::BufferQueue& text_buffers) {
const BlockInfo* const block_info_end = block_info_ptr + block_count;
typedef WebMClusterParser::BufferQueue::const_iterator TextBufferIter;
@@ -240,35 +277,27 @@ static bool VerifyTextBuffers(
return true;
}
-static void VerifyEncryptedBuffer(
- scoped_refptr<StreamParserBuffer> buffer) {
+void VerifyEncryptedBuffer(scoped_refptr<StreamParserBuffer> buffer) {
EXPECT_TRUE(buffer->decrypt_config());
EXPECT_EQ(static_cast<unsigned long>(DecryptConfig::kDecryptionKeySize),
buffer->decrypt_config()->iv().length());
}
-static void AppendToEnd(const WebMClusterParser::BufferQueue& src,
- WebMClusterParser::BufferQueue* dest) {
+void AppendToEnd(const WebMClusterParser::BufferQueue& src,
+ WebMClusterParser::BufferQueue* dest) {
for (WebMClusterParser::BufferQueue::const_iterator itr = src.begin();
itr != src.end(); ++itr) {
dest->push_back(*itr);
}
}
+} // namespace
+
class WebMClusterParserTest : public testing::Test {
public:
WebMClusterParserTest()
- : parser_(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- TextTracks(),
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB())) {}
+ : media_log_(new StrictMock<MockMediaLog>()),
+ parser_(CreateDefaultParser()) {}
protected:
void ResetParserToHaveDefaultDurations() {
@@ -281,19 +310,63 @@ class WebMClusterParserTest : public testing::Test {
ASSERT_NE(kNoTimestamp(), default_audio_duration);
ASSERT_NE(kNoTimestamp(), default_video_duration);
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- default_audio_duration,
- kVideoTrackNum,
- default_video_duration,
- TextTracks(),
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ default_audio_duration, default_video_duration));
}
+ // Helper that hard-codes some non-varying constructor parameters.
+ WebMClusterParser* CreateParserHelper(
+ base::TimeDelta audio_default_duration,
+ base::TimeDelta video_default_duration,
+ const WebMTracksParser::TextTracks& text_tracks,
+ const std::set<int64>& ignored_tracks,
+ const std::string& audio_encryption_key_id,
+ const std::string& video_encryption_key_id,
+ const AudioCodec audio_codec) {
+ return new WebMClusterParser(
+ kTimecodeScale, kAudioTrackNum, audio_default_duration, kVideoTrackNum,
+ video_default_duration, text_tracks, ignored_tracks,
+ audio_encryption_key_id, video_encryption_key_id, audio_codec,
+ media_log_);
+ }
+
+ // Create a default version of the parser for test.
+ WebMClusterParser* CreateDefaultParser() {
+ return CreateParserHelper(kNoTimestamp(), kNoTimestamp(), TextTracks(),
+ std::set<int64>(), std::string(), std::string(),
+ kUnknownAudioCodec);
+ }
+
+ // Create a parser for test with custom audio and video default durations, and
+ // optionally custom text tracks.
+ WebMClusterParser* CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ base::TimeDelta audio_default_duration,
+ base::TimeDelta video_default_duration,
+ const WebMTracksParser::TextTracks& text_tracks = TextTracks()) {
+ return CreateParserHelper(audio_default_duration, video_default_duration,
+ text_tracks, std::set<int64>(), std::string(),
+ std::string(), kUnknownAudioCodec);
+ }
+
+ // Create a parser for test with custom ignored tracks.
+ WebMClusterParser* CreateParserWithIgnoredTracks(
+ std::set<int64>& ignored_tracks) {
+ return CreateParserHelper(kNoTimestamp(), kNoTimestamp(), TextTracks(),
+ ignored_tracks, std::string(), std::string(),
+ kUnknownAudioCodec);
+ }
+
+ // Create a parser for test with custom encryption key ids and audio codec.
+ WebMClusterParser* CreateParserWithKeyIdsAndAudioCodec(
+ const std::string& audio_encryption_key_id,
+ const std::string& video_encryption_key_id,
+ const AudioCodec audio_codec) {
+ return CreateParserHelper(kNoTimestamp(), kNoTimestamp(), TextTracks(),
+ std::set<int64>(), audio_encryption_key_id,
+ video_encryption_key_id, audio_codec);
+ }
+
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
scoped_ptr<WebMClusterParser> parser_;
private:
@@ -317,17 +390,10 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
base::TimeDelta::FromMilliseconds(kTestAudioFrameDefaultDurationInMs);
ASSERT_GE(default_audio_duration, base::TimeDelta());
ASSERT_NE(kNoTimestamp(), default_audio_duration);
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- default_audio_duration,
- kVideoTrackNum,
- kNoTimestamp(),
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ default_audio_duration, kNoTimestamp(), text_tracks));
+
+ const int kExpectedVideoEstimationInMs = 33;
const BlockInfo kBlockInfo[] = {
{kVideoTrackNum, 0, 33, true, NULL, 0},
@@ -336,7 +402,7 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
{kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
{kVideoTrackNum, 33, 33, true, NULL, 0},
{kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
- {kVideoTrackNum, 66, 33, true, NULL, 0},
+ {kVideoTrackNum, 66, kExpectedVideoEstimationInMs, true, NULL, 0},
{kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
{kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
};
@@ -373,6 +439,12 @@ TEST_F(WebMClusterParserTest, HeldBackBufferHoldsBackAllTracks) {
blocks_in_cluster));
// Parse all but the last byte unless we need to parse the full cluster.
bool parse_full_cluster = i == (block_count - 1);
+
+ if (parse_full_cluster) {
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedVideoEstimationInMs));
+ }
+
int result = parser_->Parse(cluster->data(), parse_full_cluster ?
cluster->size() : cluster->size() - 1);
if (parse_full_cluster) {
@@ -510,17 +582,7 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
std::set<int64> ignored_tracks;
ignored_tracks.insert(kTextTrackNum);
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- TextTracks(),
- ignored_tracks,
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithIgnoredTracks(ignored_tracks));
const BlockInfo kInputBlockInfo[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0},
@@ -544,6 +606,8 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
scoped_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(23));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(34));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kOutputBlockInfo, output_block_count));
@@ -556,17 +620,8 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
TextTrackConfig(kTextSubtitles, "", "",
"")));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ kNoTimestamp(), kNoTimestamp(), text_tracks));
const BlockInfo kInputBlockInfo[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0},
@@ -582,6 +637,8 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
scoped_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(23));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(34));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kInputBlockInfo, input_block_count));
@@ -594,17 +651,8 @@ TEST_F(WebMClusterParserTest, TextTracksSimpleBlock) {
TextTrackConfig(kTextSubtitles, "", "",
"")));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ kNoTimestamp(), kNoTimestamp(), text_tracks));
const BlockInfo kInputBlockInfo[] = {
{ kTextTrackNum, 33, 42, true },
@@ -632,17 +680,8 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
TextTrackConfig(kTextCaptions, "", "",
"")));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ kNoTimestamp(), kNoTimestamp(), text_tracks));
const BlockInfo kInputBlockInfo[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0},
@@ -659,6 +698,8 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
scoped_ptr<Cluster> cluster(
CreateCluster(0, kInputBlockInfo, input_block_count));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(23));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(34));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
@@ -679,17 +720,13 @@ TEST_F(WebMClusterParserTest, ParseMultipleTextTracks) {
TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
scoped_ptr<Cluster> cluster(CreateEncryptedCluster(sizeof(kEncryptedFrame)));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- TextTracks(),
- std::set<int64>(),
- std::string(),
- "video_key_id",
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
+ std::string(), "video_key_id", kUnknownAudioCodec));
+
+ // The encrypted cluster contains just one block, video.
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(
+ WebMClusterParser::kDefaultVideoBufferDurationInMs));
+
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_EQ(1UL, parser_->GetVideoBuffers().size());
@@ -701,17 +738,8 @@ TEST_F(WebMClusterParserTest, ParseBadEncryptedBlock) {
scoped_ptr<Cluster> cluster(
CreateEncryptedCluster(sizeof(kEncryptedFrame) - 1));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- TextTracks(),
- std::set<int64>(),
- std::string(),
- "video_key_id",
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
+ std::string(), "video_key_id", kUnknownAudioCodec));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(-1, result);
}
@@ -741,17 +769,8 @@ TEST_F(WebMClusterParserTest, ParseInvalidTextBlockGroupWithoutDuration) {
TextTrackConfig(kTextSubtitles, "", "",
"")));
- parser_.reset(new WebMClusterParser(kTimecodeScale,
- kAudioTrackNum,
- kNoTimestamp(),
- kVideoTrackNum,
- kNoTimestamp(),
- text_tracks,
- std::set<int64>(),
- std::string(),
- std::string(),
- kUnknownAudioCodec,
- LogCB()));
+ parser_.reset(CreateParserWithDefaultDurationsAndOptionalTextTracks(
+ kNoTimestamp(), kNoTimestamp(), text_tracks));
const BlockInfo kBlockInfo[] = {
{ kTextTrackNum, 33, -42, false },
@@ -808,16 +827,17 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
// cluster. For video tracks we use the maximum seen so far. For audio we use
// the the minimum.
// TODO(chcunningham): Move audio over to use the maximum.
+
+ const int kExpectedAudioEstimationInMs = 22;
+ const int kExpectedVideoEstimationInMs = 34;
const BlockInfo kBlockInfo1[] = {
{kAudioTrackNum, 0, 23, true, NULL, 0},
{kAudioTrackNum, 23, 22, true, NULL, 0},
{kVideoTrackNum, 33, 33, true, NULL, 0},
{kAudioTrackNum, 45, 23, true, NULL, 0},
{kVideoTrackNum, 66, 34, true, NULL, 0},
- // Estimated from minimum audio dur
- {kAudioTrackNum, 68, 22, true, NULL, 0},
- // Estimated from maximum video dur
- {kVideoTrackNum, 100, 34, true, NULL, 0},
+ {kAudioTrackNum, 68, kExpectedAudioEstimationInMs, true, NULL, 0},
+ {kVideoTrackNum, 100, kExpectedVideoEstimationInMs, true, NULL, 0},
};
int block_count1 = arraysize(kBlockInfo1);
@@ -838,6 +858,10 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
parser_->Reset();
// Now parse the full first cluster and verify all the blocks are parsed.
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedAudioEstimationInMs));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedVideoEstimationInMs));
result = parser_->Parse(cluster1->data(), cluster1->size());
EXPECT_EQ(cluster1->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1));
@@ -846,13 +870,17 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
// each track.
const BlockInfo kBlockInfo2[] = {
// Estimate carries over across clusters
- {kAudioTrackNum, 200, 22, true, NULL, 0},
+ {kAudioTrackNum, 200, kExpectedAudioEstimationInMs, true, NULL, 0},
// Estimate carries over across clusters
- {kVideoTrackNum, 201, 34, true, NULL, 0},
+ {kVideoTrackNum, 201, kExpectedVideoEstimationInMs, true, NULL, 0},
};
int block_count2 = arraysize(kBlockInfo2);
scoped_ptr<Cluster> cluster2(CreateCluster(0, kBlockInfo2, block_count2));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedAudioEstimationInMs));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedVideoEstimationInMs));
result = parser_->Parse(cluster2->data(), cluster2->size());
EXPECT_EQ(cluster2->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo2, block_count2));
@@ -867,16 +895,17 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
// independently for each track in the cluster. For video tracks we use the
// maximum seen so far. For audio we use the the minimum.
// TODO(chcunningham): Move audio over to use the maximum.
+
+ const int kExpectedAudioEstimationInMs = 22;
+ const int kExpectedVideoEstimationInMs = 34;
const BlockInfo kBlockInfo1[] = {
{kAudioTrackNum, 0, -23, false, NULL, 0},
{kAudioTrackNum, 23, -22, false, NULL, 0},
{kVideoTrackNum, 33, -33, false, NULL, 0},
{kAudioTrackNum, 45, -23, false, NULL, 0},
{kVideoTrackNum, 66, -34, false, NULL, 0},
- // Estimated from minimum audio dur
- {kAudioTrackNum, 68, -22, false, NULL, 0},
- // Estimated from maximum video dur
- {kVideoTrackNum, 100, -34, false, NULL, 0},
+ {kAudioTrackNum, 68, -kExpectedAudioEstimationInMs, false, NULL, 0},
+ {kVideoTrackNum, 100, -kExpectedVideoEstimationInMs, false, NULL, 0},
};
int block_count1 = arraysize(kBlockInfo1);
@@ -897,6 +926,10 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
parser_->Reset();
// Now parse the full first cluster and verify all the blocks are parsed.
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedAudioEstimationInMs));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedVideoEstimationInMs));
result = parser_->Parse(cluster1->data(), cluster1->size());
EXPECT_EQ(cluster1->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo1, block_count1));
@@ -904,12 +937,16 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
// Verify that the estimated frame duration is tracked across clusters for
// each track.
const BlockInfo kBlockInfo2[] = {
- {kAudioTrackNum, 200, -22, false, NULL, 0},
- {kVideoTrackNum, 201, -34, false, NULL, 0},
+ {kAudioTrackNum, 200, -kExpectedAudioEstimationInMs, false, NULL, 0},
+ {kVideoTrackNum, 201, -kExpectedVideoEstimationInMs, false, NULL, 0},
};
int block_count2 = arraysize(kBlockInfo2);
scoped_ptr<Cluster> cluster2(CreateCluster(0, kBlockInfo2, block_count2));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedAudioEstimationInMs));
+ EXPECT_MEDIA_LOG(
+ WebMSimpleBlockDurationEstimated(kExpectedVideoEstimationInMs));
result = parser_->Parse(cluster2->data(), cluster2->size());
EXPECT_EQ(cluster2->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo2, block_count2));
@@ -976,6 +1013,10 @@ TEST_F(WebMClusterParserTest,
int block_count = arraysize(kBlockInfo);
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(
+ WebMClusterParser::kDefaultAudioBufferDurationInMs));
+ EXPECT_MEDIA_LOG(WebMSimpleBlockDurationEstimated(
+ WebMClusterParser::kDefaultVideoBufferDurationInMs));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
@@ -998,14 +1039,14 @@ TEST_F(WebMClusterParserTest,
}
TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
- // Reset parser to expect Opus codec audio.
- parser_.reset(new WebMClusterParser(
- kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
- kNoTimestamp(), TextTracks(), std::set<int64>(), std::string(),
- std::string(), kCodecOpus, LogCB()));
-
int loop_count = 0;
for (const auto* packet_ptr : BuildAllOpusPackets()) {
+ InSequence s;
+
+ // Get a new parser each iteration to prevent exceeding the media log cap.
+ parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
+ std::string(), std::string(), kCodecOpus));
+
const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
0,
packet_ptr->duration_ms(),
@@ -1015,9 +1056,18 @@ TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
int block_count = arraysize(kBlockInfo);
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
+ int duration_ms = packet_ptr->duration_ms(); // Casts from double.
+ if (duration_ms > 120) {
+ EXPECT_MEDIA_LOG(OpusPacketDurationTooHigh(duration_ms));
+ }
+
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
ASSERT_TRUE(VerifyBuffers(parser_, kBlockInfo, block_count));
+
+ // Fail early if any iteration fails to meet the logging expectations.
+ ASSERT_TRUE(Mock::VerifyAndClearExpectations(media_log_.get()));
+
loop_count++;
}
@@ -1026,16 +1076,22 @@ TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
}
TEST_F(WebMClusterParserTest, PreferOpusDurationsOverBlockDurations) {
- // Reset parser to expect Opus codec audio.
- parser_.reset(new WebMClusterParser(
- kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
- kNoTimestamp(), TextTracks(), std::set<int64>(), std::string(),
- std::string(), kCodecOpus, LogCB()));
-
int loop_count = 0;
for (const auto* packet_ptr : BuildAllOpusPackets()) {
+ InSequence s;
+
+ // Get a new parser each iteration to prevent exceeding the media log cap.
+ parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
+ std::string(), std::string(), kCodecOpus));
+
// Setting BlockDuration != Opus duration to see which one the parser uses.
int block_duration_ms = packet_ptr->duration_ms() + 10;
+ if (packet_ptr->duration_ms() > 120) {
+ EXPECT_MEDIA_LOG(OpusPacketDurationTooHigh(packet_ptr->duration_ms()));
+ }
+
+ EXPECT_MEDIA_LOG(WebMBlockDurationMismatchesOpusDuration(
+ block_duration_ms, packet_ptr->duration_ms()));
BlockInfo block_infos[] = {{kAudioTrackNum,
0,
@@ -1054,6 +1110,10 @@ TEST_F(WebMClusterParserTest, PreferOpusDurationsOverBlockDurations) {
block_infos[0].duration = packet_ptr->duration_ms();
ASSERT_TRUE(VerifyBuffers(parser_, block_infos, block_count));
+
+ // Fail early if any iteration fails to meet the logging expectations.
+ ASSERT_TRUE(Mock::VerifyAndClearExpectations(media_log_.get()));
+
loop_count++;
}
@@ -1069,10 +1129,8 @@ TEST_F(WebMClusterParserTest, DontReadEncodedDurationWhenEncrypted) {
std::string audio_encryption_id("audio_key_id");
// Reset parser to expect Opus codec audio and use audio encryption key id.
- parser_.reset(new WebMClusterParser(
- kTimecodeScale, kAudioTrackNum, kNoTimestamp(), kVideoTrackNum,
- kNoTimestamp(), TextTracks(), std::set<int64>(), audio_encryption_id,
- std::string(), kCodecOpus, LogCB()));
+ parser_.reset(CreateParserWithKeyIdsAndAudioCodec(audio_encryption_id,
+ std::string(), kCodecOpus));
// Single Block with BlockDuration and encrypted data.
const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
diff --git a/chromium/media/formats/webm/webm_content_encodings_client.cc b/chromium/media/formats/webm/webm_content_encodings_client.cc
index 7265df94f6e..a9783ddf99d 100644
--- a/chromium/media/formats/webm/webm_content_encodings_client.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client.cc
@@ -10,8 +10,9 @@
namespace media {
-WebMContentEncodingsClient::WebMContentEncodingsClient(const LogCB& log_cb)
- : log_cb_(log_cb),
+WebMContentEncodingsClient::WebMContentEncodingsClient(
+ const scoped_refptr<MediaLog>& media_log)
+ : media_log_(media_log),
content_encryption_encountered_(false),
content_encodings_ready_(false) {
}
@@ -44,7 +45,7 @@ WebMParserClient* WebMContentEncodingsClient::OnListStart(int id) {
if (id == kWebMIdContentEncryption) {
DCHECK(cur_content_encoding_.get());
if (content_encryption_encountered_) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncryption.";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncryption.";
return NULL;
}
content_encryption_encountered_ = true;
@@ -67,7 +68,7 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
if (id == kWebMIdContentEncodings) {
// ContentEncoding element is mandatory. Check this!
if (content_encodings_.empty()) {
- MEDIA_LOG(ERROR, log_cb_) << "Missing ContentEncoding.";
+ MEDIA_LOG(ERROR, media_log_) << "Missing ContentEncoding.";
return false;
}
content_encodings_ready_ = true;
@@ -85,7 +86,7 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
// Default value of encoding order is 0, which should only be used on the
// first ContentEncoding.
if (!content_encodings_.empty()) {
- MEDIA_LOG(ERROR, log_cb_) << "Missing ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, media_log_) << "Missing ContentEncodingOrder.";
return false;
}
cur_content_encoding_->set_order(0);
@@ -99,15 +100,15 @@ bool WebMContentEncodingsClient::OnListEnd(int id) {
// Check for elements valid in spec but not supported for now.
if (cur_content_encoding_->type() == ContentEncoding::kTypeCompression) {
- MEDIA_LOG(ERROR, log_cb_) << "ContentCompression not supported.";
+ MEDIA_LOG(ERROR, media_log_) << "ContentCompression not supported.";
return false;
}
// Enforce mandatory elements without default values.
DCHECK(cur_content_encoding_->type() == ContentEncoding::kTypeEncryption);
if (!content_encryption_encountered_) {
- MEDIA_LOG(ERROR, log_cb_) << "ContentEncodingType is encryption but"
- << " ContentEncryption is missing.";
+ MEDIA_LOG(ERROR, media_log_) << "ContentEncodingType is encryption but"
+ << " ContentEncryption is missing.";
return false;
}
@@ -146,13 +147,14 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingOrder) {
if (cur_content_encoding_->order() != ContentEncoding::kOrderInvalid) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unexpected multiple ContentEncodingOrder.";
return false;
}
if (val != static_cast<int64>(content_encodings_.size())) {
// According to the spec, encoding order starts with 0 and counts upwards.
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingOrder.";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingOrder.";
return false;
}
@@ -162,19 +164,20 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingScope) {
if (cur_content_encoding_->scope() != ContentEncoding::kScopeInvalid) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingScope.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unexpected multiple ContentEncodingScope.";
return false;
}
if (val == ContentEncoding::kScopeInvalid ||
val > ContentEncoding::kScopeMax) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingScope.";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingScope.";
return false;
}
if (val & ContentEncoding::kScopeNextContentEncodingData) {
- MEDIA_LOG(ERROR, log_cb_) << "Encoded next ContentEncoding is not "
- "supported.";
+ MEDIA_LOG(ERROR, media_log_) << "Encoded next ContentEncoding is not "
+ "supported.";
return false;
}
@@ -184,18 +187,19 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncodingType) {
if (cur_content_encoding_->type() != ContentEncoding::kTypeInvalid) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncodingType.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unexpected multiple ContentEncodingType.";
return false;
}
if (val == ContentEncoding::kTypeCompression) {
- MEDIA_LOG(ERROR, log_cb_) << "ContentCompression not supported.";
+ MEDIA_LOG(ERROR, media_log_) << "ContentCompression not supported.";
return false;
}
if (val != ContentEncoding::kTypeEncryption) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncodingType " << val
- << ".";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingType " << val
+ << ".";
return false;
}
@@ -206,13 +210,14 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdContentEncAlgo) {
if (cur_content_encoding_->encryption_algo() !=
ContentEncoding::kEncAlgoInvalid) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncAlgo.";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncAlgo.";
return false;
}
if (val < ContentEncoding::kEncAlgoNotEncrypted ||
val > ContentEncoding::kEncAlgoAes) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected ContentEncAlgo " << val << ".";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncAlgo " << val
+ << ".";
return false;
}
@@ -224,13 +229,14 @@ bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
if (id == kWebMIdAESSettingsCipherMode) {
if (cur_content_encoding_->cipher_mode() !=
ContentEncoding::kCipherModeInvalid) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple AESSettingsCipherMode.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Unexpected multiple AESSettingsCipherMode.";
return false;
}
if (val != ContentEncoding::kCipherModeCtr) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected AESSettingsCipherMode " << val
- << ".";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected AESSettingsCipherMode " << val
+ << ".";
return false;
}
@@ -253,7 +259,7 @@ bool WebMContentEncodingsClient::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdContentEncKeyID) {
if (!cur_content_encoding_->encryption_key_id().empty()) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected multiple ContentEncKeyID";
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncKeyID";
return false;
}
cur_content_encoding_->SetEncryptionKeyId(data, size);
diff --git a/chromium/media/formats/webm/webm_content_encodings_client.h b/chromium/media/formats/webm/webm_content_encodings_client.h
index 4b1bfe03ec2..85f7bf329a8 100644
--- a/chromium/media/formats/webm/webm_content_encodings_client.h
+++ b/chromium/media/formats/webm/webm_content_encodings_client.h
@@ -22,7 +22,7 @@ typedef std::vector<ContentEncoding*> ContentEncodings;
// Parser for WebM ContentEncodings element.
class MEDIA_EXPORT WebMContentEncodingsClient : public WebMParserClient {
public:
- explicit WebMContentEncodingsClient(const LogCB& log_cb);
+ explicit WebMContentEncodingsClient(const scoped_refptr<MediaLog>& media_log);
~WebMContentEncodingsClient() override;
const ContentEncodings& content_encodings() const;
@@ -34,7 +34,7 @@ class MEDIA_EXPORT WebMContentEncodingsClient : public WebMParserClient {
bool OnBinary(int id, const uint8* data, int size) override;
private:
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
scoped_ptr<ContentEncoding> cur_content_encoding_;
bool content_encryption_encountered_;
ContentEncodings content_encodings_;
diff --git a/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc b/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc
index e124f2d883a..d31ad278b9e 100644
--- a/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc
+++ b/chromium/media/formats/webm/webm_content_encodings_client_unittest.cc
@@ -2,18 +2,55 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "media/formats/webm/webm_content_encodings_client.h"
+
+#include <string>
+
#include "base/bind.h"
+#include "base/strings/string_number_conversions.h"
+#include "media/base/mock_media_log.h"
#include "media/formats/webm/webm_constants.h"
-#include "media/formats/webm/webm_content_encodings_client.h"
#include "media/formats/webm/webm_parser.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::StrictMock;
+
namespace media {
+// Matchers for verifying common media log entry strings.
+MATCHER(MissingContentEncoding, "") {
+ return CONTAINS_STRING(arg, "Missing ContentEncoding.");
+}
+
+MATCHER(UnexpectedContentEncodingOrder, "") {
+ return CONTAINS_STRING(arg, "Unexpected ContentEncodingOrder.");
+}
+
+MATCHER(UnexpectedContentEncodingScope, "") {
+ return CONTAINS_STRING(arg, "Unexpected ContentEncodingScope.");
+}
+
+MATCHER(ContentCompressionNotSupported, "") {
+ return CONTAINS_STRING(arg, "ContentCompression not supported.");
+}
+
+MATCHER(MissingContentEncryption, "") {
+ return CONTAINS_STRING(
+ arg,
+ "ContentEncodingType is encryption but ContentEncryption is missing.");
+}
+
+MATCHER_P(UnexpectedContentEncAlgo, algo, "") {
+ return CONTAINS_STRING(
+ arg, "Unexpected ContentEncAlgo " + base::IntToString(algo) + ".");
+}
+
class WebMContentEncodingsClientTest : public testing::Test {
public:
WebMContentEncodingsClientTest()
- : client_(LogCB()),
+ : media_log_(new StrictMock<MockMediaLog>()),
+ client_(media_log_),
parser_(kWebMIdContentEncodings, &client_) {}
void ParseAndExpectToFail(const uint8* buf, int size) {
@@ -22,6 +59,7 @@ class WebMContentEncodingsClientTest : public testing::Test {
}
protected:
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
WebMContentEncodingsClient client_;
WebMListParser parser_;
};
@@ -31,6 +69,7 @@ TEST_F(WebMContentEncodingsClientTest, EmptyContentEncodings) {
0x6D, 0x80, 0x80, // ContentEncodings (size = 0)
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(MissingContentEncoding());
ParseAndExpectToFail(kContentEncodings, size);
}
@@ -183,6 +222,7 @@ TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingOrder) {
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(UnexpectedContentEncodingOrder());
ParseAndExpectToFail(kContentEncodings, size);
}
@@ -195,6 +235,7 @@ TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingScope) {
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(UnexpectedContentEncodingScope());
ParseAndExpectToFail(kContentEncodings, size);
}
@@ -206,6 +247,7 @@ TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingType) {
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(ContentCompressionNotSupported());
ParseAndExpectToFail(kContentEncodings, size);
}
@@ -218,6 +260,7 @@ TEST_F(WebMContentEncodingsClientTest, MissingContentEncryption) {
// ContentEncryption missing
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(MissingContentEncryption());
ParseAndExpectToFail(kContentEncodings, size);
}
@@ -232,6 +275,7 @@ TEST_F(WebMContentEncodingsClientTest, InvalidContentEncAlgo) {
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
};
int size = sizeof(kContentEncodings);
+ EXPECT_MEDIA_LOG(UnexpectedContentEncAlgo(0xEE));
ParseAndExpectToFail(kContentEncodings, size);
}
diff --git a/chromium/media/formats/webm/webm_stream_parser.cc b/chromium/media/formats/webm/webm_stream_parser.cc
index 95a4eb01c74..b978b96b4d6 100644
--- a/chromium/media/formats/webm/webm_stream_parser.cc
+++ b/chromium/media/formats/webm/webm_stream_parser.cc
@@ -9,6 +9,7 @@
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/webm/webm_cluster_parser.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_content_encodings.h"
@@ -33,7 +34,7 @@ void WebMStreamParser::Init(
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) {
+ const scoped_refptr<MediaLog>& media_log) {
DCHECK_EQ(state_, kWaitingForInit);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
@@ -51,7 +52,7 @@ void WebMStreamParser::Init(
encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
- log_cb_ = log_cb;
+ media_log_ = media_log;
}
void WebMStreamParser::Flush() {
@@ -154,7 +155,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
break;
case kWebMIdCluster:
if (!cluster_parser_) {
- MEDIA_LOG(ERROR, log_cb_) << "Found Cluster element before Info.";
+ MEDIA_LOG(ERROR, media_log_) << "Found Cluster element before Info.";
return -1;
}
ChangeState(kParsingClusters);
@@ -172,7 +173,8 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
// We've found the element we are looking for.
break;
default: {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected element ID 0x" << std::hex << id;
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected element ID 0x" << std::hex
+ << id;
return -1;
}
}
@@ -187,7 +189,7 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
cur_size -= result;
bytes_parsed += result;
- WebMTracksParser tracks_parser(log_cb_, ignore_text_tracks_);
+ WebMTracksParser tracks_parser(media_log_, ignore_text_tracks_);
result = tracks_parser.Parse(cur, cur_size);
if (result <= 0)
@@ -230,17 +232,14 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
}
cluster_parser_.reset(new WebMClusterParser(
- info_parser.timecode_scale(),
- tracks_parser.audio_track_num(),
+ info_parser.timecode_scale(), tracks_parser.audio_track_num(),
tracks_parser.GetAudioDefaultDuration(timecode_scale_in_us),
tracks_parser.video_track_num(),
tracks_parser.GetVideoDefaultDuration(timecode_scale_in_us),
- tracks_parser.text_tracks(),
- tracks_parser.ignored_tracks(),
+ tracks_parser.text_tracks(), tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
- tracks_parser.video_encryption_key_id(),
- audio_config.codec(),
- log_cb_));
+ tracks_parser.video_encryption_key_id(), audio_config.codec(),
+ media_log_));
if (!init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(params);
diff --git a/chromium/media/formats/webm/webm_stream_parser.h b/chromium/media/formats/webm/webm_stream_parser.h
index e808413c712..bb53c9440d1 100644
--- a/chromium/media/formats/webm/webm_stream_parser.h
+++ b/chromium/media/formats/webm/webm_stream_parser.h
@@ -8,7 +8,6 @@
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
#include "media/base/audio_decoder_config.h"
-#include "media/base/buffers.h"
#include "media/base/byte_queue.h"
#include "media/base/stream_parser.h"
#include "media/base/video_decoder_config.h"
@@ -30,7 +29,7 @@ class WebMStreamParser : public StreamParser {
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
- const LogCB& log_cb) override;
+ const scoped_refptr<MediaLog>& media_log) override;
void Flush() override;
bool Parse(const uint8* buf, int size) override;
@@ -75,7 +74,7 @@ class WebMStreamParser : public StreamParser {
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
bool unknown_segment_size_;
diff --git a/chromium/media/formats/webm/webm_tracks_parser.cc b/chromium/media/formats/webm/webm_tracks_parser.cc
index 86bb0a73103..1f2ca69afd4 100644
--- a/chromium/media/formats/webm/webm_tracks_parser.cc
+++ b/chromium/media/formats/webm/webm_tracks_parser.cc
@@ -7,7 +7,7 @@
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
-#include "media/base/buffers.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_content_encodings.h"
@@ -43,7 +43,8 @@ static base::TimeDelta PrecisionCappedDefaultDuration(
return base::TimeDelta::FromMicroseconds(mult);
}
-WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
+WebMTracksParser::WebMTracksParser(const scoped_refptr<MediaLog>& media_log,
+ bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
seek_preroll_(-1),
@@ -54,9 +55,9 @@ WebMTracksParser::WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks)
video_track_num_(-1),
video_default_duration_(-1),
ignore_text_tracks_(ignore_text_tracks),
- log_cb_(log_cb),
- audio_client_(log_cb),
- video_client_(log_cb) {
+ media_log_(media_log),
+ audio_client_(media_log),
+ video_client_(media_log) {
}
WebMTracksParser::~WebMTracksParser() {}
@@ -102,7 +103,7 @@ WebMParserClient* WebMTracksParser::OnListStart(int id) {
if (id == kWebMIdContentEncodings) {
DCHECK(!track_content_encodings_client_.get());
track_content_encodings_client_.reset(
- new WebMContentEncodingsClient(log_cb_));
+ new WebMContentEncodingsClient(media_log_));
return track_content_encodings_client_->OnListStart(id);
}
@@ -136,9 +137,9 @@ bool WebMTracksParser::OnListEnd(int id) {
if (id == kWebMIdTrackEntry) {
if (track_type_ == -1 || track_num_ == -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry data for "
- << " TrackType " << track_type_ << " TrackNum "
- << track_num_;
+ MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry data for "
+ << " TrackType " << track_type_
+ << " TrackNum " << track_num_;
return false;
}
@@ -146,7 +147,7 @@ bool WebMTracksParser::OnListEnd(int id) {
track_type_ != kWebMTrackTypeVideo &&
track_type_ != kWebMTrackTypeSubtitlesOrCaptions &&
track_type_ != kWebMTrackTypeDescriptionsOrMetadata) {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected TrackType " << track_type_;
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected TrackType " << track_type_;
return false;
}
@@ -154,29 +155,29 @@ bool WebMTracksParser::OnListEnd(int id) {
if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
- MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextSubtitles &&
text_track_kind != kTextCaptions) {
- MEDIA_LOG(ERROR, log_cb_) << "Wrong TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, media_log_) << "Wrong TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
} else if (track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
- MEDIA_LOG(ERROR, log_cb_) << "Missing TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextDescriptions &&
text_track_kind != kTextMetadata) {
- MEDIA_LOG(ERROR, log_cb_) << "Wrong TrackEntry CodecID"
- << " TrackNum " << track_num_;
+ MEDIA_LOG(ERROR, media_log_) << "Wrong TrackEntry CodecID"
+ << " TrackNum " << track_num_;
return false;
}
}
@@ -196,8 +197,8 @@ bool WebMTracksParser::OnListEnd(int id) {
audio_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
- MEDIA_LOG(ERROR, log_cb_) << "Illegal 0ns audio TrackEntry "
- "DefaultDuration";
+ MEDIA_LOG(ERROR, media_log_) << "Illegal 0ns audio TrackEntry "
+ "DefaultDuration";
return false;
}
audio_default_duration_ = default_duration_;
@@ -209,7 +210,7 @@ bool WebMTracksParser::OnListEnd(int id) {
return false;
}
} else {
- MEDIA_LOG(DEBUG, log_cb_) << "Ignoring audio track " << track_num_;
+ MEDIA_LOG(DEBUG, media_log_) << "Ignoring audio track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeVideo) {
@@ -218,8 +219,8 @@ bool WebMTracksParser::OnListEnd(int id) {
video_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
- MEDIA_LOG(ERROR, log_cb_) << "Illegal 0ns video TrackEntry "
- "DefaultDuration";
+ MEDIA_LOG(ERROR, media_log_) << "Illegal 0ns video TrackEntry "
+ "DefaultDuration";
return false;
}
video_default_duration_ = default_duration_;
@@ -231,13 +232,13 @@ bool WebMTracksParser::OnListEnd(int id) {
return false;
}
} else {
- MEDIA_LOG(DEBUG, log_cb_) << "Ignoring video track " << track_num_;
+ MEDIA_LOG(DEBUG, media_log_) << "Ignoring video track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions ||
track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
if (ignore_text_tracks_) {
- MEDIA_LOG(DEBUG, log_cb_) << "Ignoring text track " << track_num_;
+ MEDIA_LOG(DEBUG, media_log_) << "Ignoring text track " << track_num_;
ignored_tracks_.insert(track_num_);
} else {
std::string track_num = base::Int64ToString(track_num_);
@@ -245,7 +246,7 @@ bool WebMTracksParser::OnListEnd(int id) {
text_track_kind, track_name_, track_language_, track_num);
}
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unexpected TrackType " << track_type_;
+ MEDIA_LOG(ERROR, media_log_) << "Unexpected TrackType " << track_type_;
return false;
}
@@ -290,8 +291,8 @@ bool WebMTracksParser::OnUInt(int id, int64 val) {
}
if (*dst != -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
- << " specified";
+ MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
+ << " specified";
return false;
}
@@ -306,7 +307,8 @@ bool WebMTracksParser::OnFloat(int id, double val) {
bool WebMTracksParser::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdCodecPrivate) {
if (!codec_private_.empty()) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple CodecPrivate fields in a track.";
+ MEDIA_LOG(ERROR, media_log_)
+ << "Multiple CodecPrivate fields in a track.";
return false;
}
codec_private_.assign(data, data + size);
@@ -318,7 +320,7 @@ bool WebMTracksParser::OnBinary(int id, const uint8* data, int size) {
bool WebMTracksParser::OnString(int id, const std::string& str) {
if (id == kWebMIdCodecID) {
if (!codec_id_.empty()) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple CodecID fields in a track";
+ MEDIA_LOG(ERROR, media_log_) << "Multiple CodecID fields in a track";
return false;
}
diff --git a/chromium/media/formats/webm/webm_tracks_parser.h b/chromium/media/formats/webm/webm_tracks_parser.h
index 2801a3e5322..d5513b520da 100644
--- a/chromium/media/formats/webm/webm_tracks_parser.h
+++ b/chromium/media/formats/webm/webm_tracks_parser.h
@@ -27,7 +27,8 @@ namespace media {
// Parser for WebM Tracks element.
class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
public:
- explicit WebMTracksParser(const LogCB& log_cb, bool ignore_text_tracks);
+ WebMTracksParser(const scoped_refptr<MediaLog>& media_log,
+ bool ignore_text_tracks);
~WebMTracksParser() override;
// Parses a WebM Tracks element in |buf|.
@@ -102,7 +103,7 @@ class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
std::set<int64> ignored_tracks_;
std::string audio_encryption_key_id_;
std::string video_encryption_key_id_;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
WebMAudioClient audio_client_;
AudioDecoderConfig audio_decoder_config_;
diff --git a/chromium/media/formats/webm/webm_tracks_parser_unittest.cc b/chromium/media/formats/webm/webm_tracks_parser_unittest.cc
index 958e610f427..9c424ec3afc 100644
--- a/chromium/media/formats/webm/webm_tracks_parser_unittest.cc
+++ b/chromium/media/formats/webm/webm_tracks_parser_unittest.cc
@@ -4,14 +4,18 @@
#include "base/logging.h"
#include "media/base/channel_layout.h"
+#include "media/base/mock_media_log.h"
+#include "media/base/timestamp_constants.h"
#include "media/formats/webm/tracks_builder.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_tracks_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::Return;
+using ::testing::StrictMock;
using ::testing::_;
namespace media {
@@ -20,32 +24,37 @@ static const double kDefaultTimecodeScaleInUs = 1000.0; // 1 ms resolution
class WebMTracksParserTest : public testing::Test {
public:
- WebMTracksParserTest() {}
+ WebMTracksParserTest() : media_log_(new StrictMock<MockMediaLog>()) {}
+
+ protected:
+ void VerifyTextTrackInfo(const uint8* buffer,
+ int buffer_size,
+ TextKind text_kind,
+ const std::string& name,
+ const std::string& language) {
+ scoped_ptr<WebMTracksParser> parser(
+ new WebMTracksParser(media_log_, false));
+
+ int result = parser->Parse(buffer, buffer_size);
+ EXPECT_GT(result, 0);
+ EXPECT_EQ(result, buffer_size);
+
+ const WebMTracksParser::TextTracks& text_tracks = parser->text_tracks();
+ EXPECT_EQ(text_tracks.size(), WebMTracksParser::TextTracks::size_type(1));
+
+ const WebMTracksParser::TextTracks::const_iterator itr =
+ text_tracks.begin();
+ EXPECT_EQ(itr->first, 1); // track num
+
+ const TextTrackConfig& config = itr->second;
+ EXPECT_EQ(config.kind(), text_kind);
+ EXPECT_TRUE(config.label() == name);
+ EXPECT_TRUE(config.language() == language);
+ }
+
+ scoped_refptr<StrictMock<MockMediaLog>> media_log_;
};
-static void VerifyTextTrackInfo(const uint8* buffer,
- int buffer_size,
- TextKind text_kind,
- const std::string& name,
- const std::string& language) {
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), false));
-
- int result = parser->Parse(buffer, buffer_size);
- EXPECT_GT(result, 0);
- EXPECT_EQ(result, buffer_size);
-
- const WebMTracksParser::TextTracks& text_tracks = parser->text_tracks();
- EXPECT_EQ(text_tracks.size(), WebMTracksParser::TextTracks::size_type(1));
-
- const WebMTracksParser::TextTracks::const_iterator itr = text_tracks.begin();
- EXPECT_EQ(itr->first, 1); // track num
-
- const TextTrackConfig& config = itr->second;
- EXPECT_EQ(config.kind(), text_kind);
- EXPECT_TRUE(config.label() == name);
- EXPECT_TRUE(config.language() == language);
-}
-
TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
InSequence s;
@@ -94,7 +103,10 @@ TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
tb.AddTextTrack(2, 2, kWebMCodecSubtitles, "Commentary", "fre");
const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
+
+ EXPECT_MEDIA_LOG(HasSubstr("Ignoring text track 1"));
+ EXPECT_MEDIA_LOG(HasSubstr("Ignoring text track 2"));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_GT(result, 0);
@@ -107,7 +119,7 @@ TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
EXPECT_TRUE(ignored_tracks.find(2) != ignored_tracks.end());
// Test again w/o ignoring the test tracks.
- parser.reset(new WebMTracksParser(LogCB(), false));
+ parser.reset(new WebMTracksParser(media_log_, false));
result = parser->Parse(&buf[0], buf.size());
EXPECT_GT(result, 0);
@@ -127,7 +139,7 @@ TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationUnset) {
tb.AddVideoTrack(2, 2, "V_VP8", "video", "", -1, 320, 240);
const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_LE(0, result);
EXPECT_EQ(static_cast<int>(buf.size()), result);
@@ -156,7 +168,7 @@ TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationSet) {
tb.AddVideoTrack(2, 2, "V_VP8", "video", "", 987654321, 320, 240);
const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_LE(0, result);
EXPECT_EQ(static_cast<int>(buf.size()), result);
@@ -178,7 +190,10 @@ TEST_F(WebMTracksParserTest, InvalidZeroDefaultDurationSet) {
tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", 0, 2, 8000);
const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
+
+ EXPECT_MEDIA_LOG(HasSubstr("Illegal 0ns audio TrackEntry DefaultDuration"));
+
EXPECT_EQ(-1, parser->Parse(&buf[0], buf.size()));
}
@@ -189,7 +204,7 @@ TEST_F(WebMTracksParserTest, HighTrackUID) {
tb.AddAudioTrack(1, 1ULL << 31, "A_VORBIS", "audio", "", 40, 2, 8000);
const std::vector<uint8> buf = tb.Finish();
- scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(LogCB(), true));
+ scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
EXPECT_GT(parser->Parse(&buf[0], buf.size()),0);
}
diff --git a/chromium/media/formats/webm/webm_video_client.cc b/chromium/media/formats/webm/webm_video_client.cc
index 07f0bfbc17b..2d3f43b9ce7 100644
--- a/chromium/media/formats/webm/webm_video_client.cc
+++ b/chromium/media/formats/webm/webm_video_client.cc
@@ -9,8 +9,8 @@
namespace media {
-WebMVideoClient::WebMVideoClient(const LogCB& log_cb)
- : log_cb_(log_cb) {
+WebMVideoClient::WebMVideoClient(const scoped_refptr<MediaLog>& media_log)
+ : media_log_(media_log) {
Reset();
}
@@ -44,12 +44,12 @@ bool WebMVideoClient::InitializeConfig(
video_codec = kCodecVP9;
profile = VP9PROFILE_ANY;
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported video codec_id " << codec_id;
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported video codec_id " << codec_id;
return false;
}
- VideoFrame::Format format =
- (alpha_mode_ == 1) ? VideoFrame::YV12A : VideoFrame::YV12;
+ VideoPixelFormat format =
+ (alpha_mode_ == 1) ? PIXEL_FORMAT_YV12A : PIXEL_FORMAT_YV12;
if (pixel_width_ <= 0 || pixel_height_ <= 0)
return false;
@@ -83,8 +83,8 @@ bool WebMVideoClient::InitializeConfig(
if (display_width_ <= 0 || display_height_ <= 0)
return false;
} else {
- MEDIA_LOG(ERROR, log_cb_) << "Unsupported display unit type "
- << display_unit_;
+ MEDIA_LOG(ERROR, media_log_) << "Unsupported display unit type "
+ << display_unit_;
return false;
}
gfx::Size natural_size = gfx::Size(display_width_, display_height_);
@@ -95,10 +95,9 @@ bool WebMVideoClient::InitializeConfig(
extra_data_size = codec_private.size();
}
- config->Initialize(video_codec, profile, format,
- VideoFrame::COLOR_SPACE_UNSPECIFIED, coded_size,
- visible_rect, natural_size, extra_data, extra_data_size,
- is_encrypted, true);
+ config->Initialize(video_codec, profile, format, COLOR_SPACE_HD_REC709,
+ coded_size, visible_rect, natural_size, extra_data,
+ extra_data_size, is_encrypted);
return config->IsValidConfig();
}
@@ -141,9 +140,9 @@ bool WebMVideoClient::OnUInt(int id, int64 val) {
}
if (*dst != -1) {
- MEDIA_LOG(ERROR, log_cb_) << "Multiple values for id " << std::hex << id
- << " specified (" << *dst << " and " << val
- << ")";
+ MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
+ << " specified (" << *dst << " and " << val
+ << ")";
return false;
}
diff --git a/chromium/media/formats/webm/webm_video_client.h b/chromium/media/formats/webm/webm_video_client.h
index 8e246d9a784..f2f1df4c9fd 100644
--- a/chromium/media/formats/webm/webm_video_client.h
+++ b/chromium/media/formats/webm/webm_video_client.h
@@ -17,7 +17,7 @@ class VideoDecoderConfig;
// Helper class used to parse a Video element inside a TrackEntry element.
class WebMVideoClient : public WebMParserClient {
public:
- explicit WebMVideoClient(const LogCB& log_cb);
+ explicit WebMVideoClient(const scoped_refptr<MediaLog>& media_log);
~WebMVideoClient() override;
// Reset this object's state so it can process a new video track element.
@@ -41,7 +41,7 @@ class WebMVideoClient : public WebMParserClient {
bool OnBinary(int id, const uint8* data, int size) override;
bool OnFloat(int id, double val) override;
- LogCB log_cb_;
+ scoped_refptr<MediaLog> media_log_;
int64 pixel_width_;
int64 pixel_height_;
int64 crop_bottom_;
diff --git a/chromium/media/media.gyp b/chromium/media/media.gyp
index f041c15b4c1..5a462e658df 100644
--- a/chromium/media/media.gyp
+++ b/chromium/media/media.gyp
@@ -12,14 +12,6 @@
# detection of ABI mismatches and prevents silent errors.
'linux_link_pulseaudio%': 0,
'conditions': [
- ['OS=="android" or OS=="ios"', {
- # Android and iOS don't use ffmpeg or libvpx.
- 'media_use_ffmpeg%': 0,
- 'media_use_libvpx%': 0,
- }, { # 'OS!="android" and OS!="ios"'
- 'media_use_ffmpeg%': 1,
- 'media_use_libvpx%': 1,
- }],
# Enable ALSA and Pulse for runtime selection.
['(OS=="linux" or OS=="freebsd" or OS=="solaris") and (embedded!=1 or (chromecast==1 and target_arch!="arm"))', {
# ALSA is always needed for Web MIDI even if the cras is enabled.
@@ -41,10 +33,18 @@
}, {
'use_low_memory_buffer%': 0,
}],
+ ['chromecast==1', {
+ # Enable HEVC/H265 demuxing. Actual decoding must be provided by the
+ # platform.
+ 'enable_hevc_demuxing%': 1,
+ }, {
+ 'enable_hevc_demuxing%': 0,
+ }],
],
},
'includes': [
'media_cdm.gypi',
+ 'media_variables.gypi',
],
'targets': [
{
@@ -66,6 +66,9 @@
'../url/url.gyp:url_lib',
'shared_memory_support',
],
+ 'export_dependent_settings': [
+ '../third_party/opus/opus.gyp:opus',
+ ],
'defines': [
'MEDIA_IMPLEMENTATION',
],
@@ -158,8 +161,6 @@
'audio/mac/audio_manager_mac.h',
'audio/null_audio_sink.cc',
'audio/null_audio_sink.h',
- 'audio/openbsd/audio_manager_openbsd.cc',
- 'audio/openbsd/audio_manager_openbsd.h',
'audio/pulse/audio_manager_pulse.cc',
'audio/pulse/audio_manager_pulse.h',
'audio/pulse/pulse_input.cc',
@@ -249,7 +250,6 @@
'base/bit_reader_core.h',
'base/bitstream_buffer.h',
'base/buffering_state.h',
- 'base/buffers.h',
'base/byte_queue.cc',
'base/byte_queue.h',
'base/cdm_callback_promise.cc',
@@ -260,7 +260,7 @@
'base/cdm_factory.cc',
'base/cdm_factory.h',
'base/cdm_initialized_promise.cc',
- 'base/cdm_initialized_promise.h',
+ 'base/cdm_initialized_promise.h',
'base/cdm_key_information.cc',
'base/cdm_key_information.h',
'base/cdm_promise.cc',
@@ -324,6 +324,8 @@
'base/media_log_event.h',
'base/media_permission.cc',
'base/media_permission.h',
+ 'base/media_resources.cc',
+ 'base/media_resources.h',
'base/media_switches.cc',
'base/media_switches.h',
'base/mime_util.cc',
@@ -334,6 +336,7 @@
'base/multi_channel_resampler.h',
'base/null_video_sink.cc',
'base/null_video_sink.h',
+ 'base/output_device.h',
'base/pipeline.cc',
'base/pipeline.h',
'base/pipeline_status.h',
@@ -375,6 +378,7 @@
'base/time_delta_interpolator.cc',
'base/time_delta_interpolator.h',
'base/time_source.h',
+ 'base/timestamp_constants.h',
'base/user_input_monitor.cc',
'base/user_input_monitor.h',
'base/user_input_monitor_linux.cc',
@@ -397,6 +401,8 @@
'base/video_renderer.cc',
'base/video_renderer.h',
'base/video_rotation.h',
+ 'base/video_types.cc',
+ 'base/video_types.h',
'base/video_util.cc',
'base/video_util.h',
'base/wall_clock_time_source.cc',
@@ -407,20 +413,78 @@
'base/yuv_convert.h',
'blink/skcanvas_video_renderer.cc',
'blink/skcanvas_video_renderer.h',
- 'capture/animated_content_sampler.cc',
- 'capture/animated_content_sampler.h',
- 'capture/capture_resolution_chooser.cc',
- 'capture/capture_resolution_chooser.h',
- 'capture/feedback_signal_accumulator.cc',
- 'capture/feedback_signal_accumulator.h',
- 'capture/screen_capture_device_core.cc',
- 'capture/screen_capture_device_core.h',
- 'capture/thread_safe_capture_oracle.cc',
- 'capture/thread_safe_capture_oracle.h',
- 'capture/smooth_event_sampler.cc',
- 'capture/smooth_event_sampler.h',
- 'capture/video_capture_oracle.cc',
- 'capture/video_capture_oracle.h',
+ 'capture/content/animated_content_sampler.cc',
+ 'capture/content/animated_content_sampler.h',
+ 'capture/content/capture_resolution_chooser.cc',
+ 'capture/content/capture_resolution_chooser.h',
+ 'capture/content/feedback_signal_accumulator.cc',
+ 'capture/content/feedback_signal_accumulator.h',
+ 'capture/content/screen_capture_device_core.cc',
+ 'capture/content/screen_capture_device_core.h',
+ 'capture/content/thread_safe_capture_oracle.cc',
+ 'capture/content/thread_safe_capture_oracle.h',
+ 'capture/content/smooth_event_sampler.cc',
+ 'capture/content/smooth_event_sampler.h',
+ 'capture/content/video_capture_oracle.cc',
+ 'capture/content/video_capture_oracle.h',
+ 'capture/video/android/video_capture_device_android.cc',
+ 'capture/video/android/video_capture_device_android.h',
+ 'capture/video/android/video_capture_device_factory_android.cc',
+ 'capture/video/android/video_capture_device_factory_android.h',
+ 'capture/video/fake_video_capture_device.cc',
+ 'capture/video/fake_video_capture_device.h',
+ 'capture/video/fake_video_capture_device_factory.cc',
+ 'capture/video/fake_video_capture_device_factory.h',
+ 'capture/video/file_video_capture_device.cc',
+ 'capture/video/file_video_capture_device.h',
+ 'capture/video/file_video_capture_device_factory.cc',
+ 'capture/video/file_video_capture_device_factory.h',
+ 'capture/video/linux/v4l2_capture_delegate.cc',
+ 'capture/video/linux/v4l2_capture_delegate.h',
+ 'capture/video/linux/v4l2_capture_delegate_multi_plane.cc',
+ 'capture/video/linux/v4l2_capture_delegate_multi_plane.h',
+ 'capture/video/linux/v4l2_capture_delegate_single_plane.cc',
+ 'capture/video/linux/v4l2_capture_delegate_single_plane.h',
+ 'capture/video/linux/video_capture_device_chromeos.cc',
+ 'capture/video/linux/video_capture_device_chromeos.h',
+ 'capture/video/linux/video_capture_device_factory_linux.cc',
+ 'capture/video/linux/video_capture_device_factory_linux.h',
+ 'capture/video/linux/video_capture_device_linux.cc',
+ 'capture/video/linux/video_capture_device_linux.h',
+ 'capture/video/mac/platform_video_capturing_mac.h',
+ 'capture/video/mac/video_capture_device_avfoundation_mac.h',
+ 'capture/video/mac/video_capture_device_avfoundation_mac.mm',
+ 'capture/video/mac/video_capture_device_decklink_mac.h',
+ 'capture/video/mac/video_capture_device_decklink_mac.mm',
+ 'capture/video/mac/video_capture_device_factory_mac.h',
+ 'capture/video/mac/video_capture_device_factory_mac.mm',
+ 'capture/video/mac/video_capture_device_mac.h',
+ 'capture/video/mac/video_capture_device_mac.mm',
+ 'capture/video/mac/video_capture_device_qtkit_mac.h',
+ 'capture/video/mac/video_capture_device_qtkit_mac.mm',
+ 'capture/video/video_capture_device.cc',
+ 'capture/video/video_capture_device.h',
+ 'capture/video/video_capture_device_factory.cc',
+ 'capture/video/video_capture_device_factory.h',
+ 'capture/video/video_capture_device_info.cc',
+ 'capture/video/video_capture_device_info.h',
+ 'capture/video/win/capability_list_win.cc',
+ 'capture/video/win/capability_list_win.h',
+ 'capture/video/win/filter_base_win.cc',
+ 'capture/video/win/filter_base_win.h',
+ 'capture/video/win/pin_base_win.cc',
+ 'capture/video/win/pin_base_win.h',
+ 'capture/video/win/sink_filter_observer_win.h',
+ 'capture/video/win/sink_filter_win.cc',
+ 'capture/video/win/sink_filter_win.h',
+ 'capture/video/win/sink_input_pin_win.cc',
+ 'capture/video/win/sink_input_pin_win.h',
+ 'capture/video/win/video_capture_device_factory_win.cc',
+ 'capture/video/win/video_capture_device_factory_win.h',
+ 'capture/video/win/video_capture_device_mf_win.cc',
+ 'capture/video/win/video_capture_device_mf_win.h',
+ 'capture/video/win/video_capture_device_win.cc',
+ 'capture/video/win/video_capture_device_win.h',
'cdm/aes_decryptor.cc',
'cdm/aes_decryptor.h',
'cdm/default_cdm_factory.cc',
@@ -480,6 +544,8 @@
'filters/h264_bit_reader.h',
'filters/h264_parser.cc',
'filters/h264_parser.h',
+ 'filters/ivf_parser.cc',
+ 'filters/ivf_parser.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
'filters/jpeg_parser.cc',
@@ -500,6 +566,10 @@
'filters/vp8_bool_decoder.h',
'filters/vp8_parser.cc',
'filters/vp8_parser.h',
+ 'filters/vp9_parser.cc',
+ 'filters/vp9_parser.h',
+ 'filters/vp9_raw_bits_reader.cc',
+ 'filters/vp9_raw_bits_reader.h',
'filters/vpx_video_decoder.cc',
'filters/vpx_video_decoder.h',
'filters/webvtt_util.h',
@@ -541,64 +611,6 @@
'renderers/renderer_impl.h',
'renderers/video_renderer_impl.cc',
'renderers/video_renderer_impl.h',
- 'video/capture/android/video_capture_device_android.cc',
- 'video/capture/android/video_capture_device_android.h',
- 'video/capture/android/video_capture_device_factory_android.cc',
- 'video/capture/android/video_capture_device_factory_android.h',
- 'video/capture/fake_video_capture_device.cc',
- 'video/capture/fake_video_capture_device.h',
- 'video/capture/fake_video_capture_device_factory.cc',
- 'video/capture/fake_video_capture_device_factory.h',
- 'video/capture/file_video_capture_device.cc',
- 'video/capture/file_video_capture_device.h',
- 'video/capture/file_video_capture_device_factory.cc',
- 'video/capture/file_video_capture_device_factory.h',
- 'video/capture/linux/v4l2_capture_delegate.cc',
- 'video/capture/linux/v4l2_capture_delegate.h',
- 'video/capture/linux/v4l2_capture_delegate_multi_plane.cc',
- 'video/capture/linux/v4l2_capture_delegate_multi_plane.h',
- 'video/capture/linux/v4l2_capture_delegate_single_plane.cc',
- 'video/capture/linux/v4l2_capture_delegate_single_plane.h',
- 'video/capture/linux/video_capture_device_chromeos.cc',
- 'video/capture/linux/video_capture_device_chromeos.h',
- 'video/capture/linux/video_capture_device_factory_linux.cc',
- 'video/capture/linux/video_capture_device_factory_linux.h',
- 'video/capture/linux/video_capture_device_linux.cc',
- 'video/capture/linux/video_capture_device_linux.h',
- 'video/capture/mac/platform_video_capturing_mac.h',
- 'video/capture/mac/video_capture_device_avfoundation_mac.h',
- 'video/capture/mac/video_capture_device_avfoundation_mac.mm',
- 'video/capture/mac/video_capture_device_decklink_mac.h',
- 'video/capture/mac/video_capture_device_decklink_mac.mm',
- 'video/capture/mac/video_capture_device_factory_mac.h',
- 'video/capture/mac/video_capture_device_factory_mac.mm',
- 'video/capture/mac/video_capture_device_mac.h',
- 'video/capture/mac/video_capture_device_mac.mm',
- 'video/capture/mac/video_capture_device_qtkit_mac.h',
- 'video/capture/mac/video_capture_device_qtkit_mac.mm',
- 'video/capture/video_capture_device.cc',
- 'video/capture/video_capture_device.h',
- 'video/capture/video_capture_device_factory.cc',
- 'video/capture/video_capture_device_factory.h',
- 'video/capture/video_capture_device_info.cc',
- 'video/capture/video_capture_device_info.h',
- 'video/capture/win/capability_list_win.cc',
- 'video/capture/win/capability_list_win.h',
- 'video/capture/win/filter_base_win.cc',
- 'video/capture/win/filter_base_win.h',
- 'video/capture/win/pin_base_win.cc',
- 'video/capture/win/pin_base_win.h',
- 'video/capture/win/sink_filter_observer_win.h',
- 'video/capture/win/sink_filter_win.cc',
- 'video/capture/win/sink_filter_win.h',
- 'video/capture/win/sink_input_pin_win.cc',
- 'video/capture/win/sink_input_pin_win.h',
- 'video/capture/win/video_capture_device_factory_win.cc',
- 'video/capture/win/video_capture_device_factory_win.h',
- 'video/capture/win/video_capture_device_mf_win.cc',
- 'video/capture/win/video_capture_device_mf_win.h',
- 'video/capture/win/video_capture_device_win.cc',
- 'video/capture/win/video_capture_device_win.h',
'video/fake_video_encode_accelerator.cc',
'video/fake_video_encode_accelerator.h',
'video/gpu_memory_buffer_video_frame_pool.cc',
@@ -672,7 +684,7 @@
}],
['media_use_libvpx==1', {
'dependencies': [
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
],
}, { # media_use_libvpx==0
'defines': [
@@ -689,6 +701,15 @@
'filters/vpx_video_decoder.h',
],
}],
+ ['media_use_libwebm==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/libwebm/libwebm.gyp:libwebm',
+ ],
+ 'sources': [
+ 'capture/webm_muxer.cc',
+ 'capture/webm_muxer.h',
+ ],
+ }],
['enable_browser_cdms==1', {
'sources': [
'base/browser_cdm.cc',
@@ -704,17 +725,38 @@
'player_android',
'video_capture_android_jni_headers',
],
- 'sources': [
- 'base/media.cc',
- 'base/media.h',
- ],
'sources!': [
- 'filters/opus_audio_decoder.cc',
- 'filters/opus_audio_decoder.h',
+ 'base/audio_video_metadata_extractor.cc',
+ 'base/audio_video_metadata_extractor.h',
+ 'base/media_file_checker.cc',
+ 'base/media_file_checker.h',
+ 'filters/ffmpeg_video_decoder.cc',
+ 'filters/ffmpeg_video_decoder.h',
],
'defines': [
'DISABLE_USER_INPUT_MONITOR',
],
+ 'conditions': [
+ ['media_use_ffmpeg == 1', {
+ 'defines': [
+ # On Android, FFmpeg is built without video decoders. We only
+ # support hardware video decoding.
+ 'ENABLE_MEDIA_PIPELINE_ON_ANDROID',
+ 'DISABLE_FFMPEG_VIDEO_DECODERS',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'ENABLE_MEDIA_PIPELINE_ON_ANDROID',
+ 'DISABLE_FFMPEG_VIDEO_DECODERS',
+ ],
+ },
+ }, { # media_use_ffmpeg == 0
+ 'sources!': [
+ 'filters/opus_audio_decoder.cc',
+ 'filters/opus_audio_decoder.h',
+ ],
+ }],
+ ],
}],
# For VaapiVideoEncodeAccelerator.
['target_arch != "arm" and chromeos == 1', {
@@ -738,15 +780,10 @@
['exclude', '_alsa\\.(h|cc)$'],
],
}],
- ['OS!="openbsd"', {
- 'sources!': [
- 'audio/openbsd/audio_manager_openbsd.cc',
- 'audio/openbsd/audio_manager_openbsd.h',
- ],
- }, { # else: openbsd==1
+ ['OS=="openbsd"', {
'sources!': [
- 'video/capture/linux/v4l2_capture_delegate_multi_plane.cc',
- 'video/capture/linux/v4l2_capture_delegate_multi_plane.h',
+ 'capture/video/linux/v4l2_capture_delegate_multi_plane.cc',
+ 'capture/video/linux/v4l2_capture_delegate_multi_plane.h',
],
}],
['OS=="linux"', {
@@ -769,6 +806,9 @@
],
}],
['use_cras==1', {
+ 'dependencies': [
+ '../chromeos/chromeos.gyp:chromeos',
+ ],
'cflags': [
'<!@(<(pkg-config) --cflags libcras)',
],
@@ -1028,6 +1068,8 @@
'formats/mp4/aac.h',
'formats/mp4/avc.cc',
'formats/mp4/avc.h',
+ 'formats/mp4/bitstream_converter.cc',
+ 'formats/mp4/bitstream_converter.h',
'formats/mp4/box_definitions.cc',
'formats/mp4/box_definitions.h',
'formats/mp4/box_reader.cc',
@@ -1052,6 +1094,23 @@
'formats/mpeg/mpeg_audio_stream_parser_base.h',
],
}],
+ ['proprietary_codecs==1 and enable_hevc_demuxing==1', {
+ 'defines': [
+ 'ENABLE_HEVC_DEMUXING'
+ ],
+ 'sources': [
+ 'filters/h265_parser.cc',
+ 'filters/h265_parser.h',
+ 'formats/mp4/hevc.cc',
+ 'formats/mp4/hevc.h',
+ ],
+ }],
+ ['proprietary_codecs==1 and enable_hevc_demuxing==1 and media_use_ffmpeg==1', {
+ 'sources': [
+ 'filters/ffmpeg_h265_to_annex_b_bitstream_converter.cc',
+ 'filters/ffmpeg_h265_to_annex_b_bitstream_converter.h',
+ ],
+ }],
['target_arch=="ia32" or target_arch=="x64"', {
'dependencies': [
'media_asm',
@@ -1126,9 +1185,11 @@
'base/android/media_codec_decoder_unittest.cc',
'base/android/media_codec_player_unittest.cc',
'base/android/media_drm_bridge_unittest.cc',
+ 'base/android/media_player_bridge_unittest.cc',
'base/android/media_source_player_unittest.cc',
'base/android/test_data_factory.cc',
'base/android/test_data_factory.h',
+ 'base/android/test_statistics.h',
'base/audio_block_fifo_unittest.cc',
'base/audio_buffer_converter_unittest.cc',
'base/audio_buffer_queue_unittest.cc',
@@ -1185,11 +1246,13 @@
'base/wall_clock_time_source_unittest.cc',
'base/yuv_convert_unittest.cc',
'blink/skcanvas_video_renderer_unittest.cc',
- 'capture/animated_content_sampler_unittest.cc',
- 'capture/capture_resolution_chooser_unittest.cc',
- 'capture/feedback_signal_accumulator_unittest.cc',
- 'capture/smooth_event_sampler_unittest.cc',
- 'capture/video_capture_oracle_unittest.cc',
+ 'capture/content/animated_content_sampler_unittest.cc',
+ 'capture/content/capture_resolution_chooser_unittest.cc',
+ 'capture/content/feedback_signal_accumulator_unittest.cc',
+ 'capture/content/smooth_event_sampler_unittest.cc',
+ 'capture/content/video_capture_oracle_unittest.cc',
+ 'capture/video/fake_video_capture_device_unittest.cc',
+ 'capture/video/video_capture_device_unittest.cc',
'cdm/aes_decryptor_unittest.cc',
'cdm/json_web_key_unittest.cc',
'ffmpeg/ffmpeg_common_unittest.cc',
@@ -1214,6 +1277,7 @@
'filters/h264_bit_reader_unittest.cc',
'filters/h264_parser_unittest.cc',
'filters/in_memory_url_protocol_unittest.cc',
+ 'filters/ivf_parser_unittest.cc',
'filters/jpeg_parser_unittest.cc',
'filters/source_buffer_stream_unittest.cc',
'filters/video_cadence_estimator_unittest.cc',
@@ -1222,6 +1286,8 @@
'filters/video_renderer_algorithm_unittest.cc',
'filters/vp8_bool_decoder_unittest.cc',
'filters/vp8_parser_unittest.cc',
+ 'filters/vp9_parser_unittest.cc',
+ 'filters/vp9_raw_bits_reader_unittest.cc',
'formats/common/offset_byte_queue_unittest.cc',
'formats/webm/cluster_builder.cc',
'formats/webm/cluster_builder.h',
@@ -1239,8 +1305,6 @@
'renderers/video_renderer_impl_unittest.cc',
'test/pipeline_integration_test.cc',
'test/pipeline_integration_test_base.cc',
- 'video/capture/fake_video_capture_device_unittest.cc',
- 'video/capture/video_capture_device_unittest.cc',
'video/h264_poc_unittest.cc',
'video/gpu_memory_buffer_video_frame_pool_unittest.cc',
],
@@ -1254,29 +1318,52 @@
'USE_NEON'
],
}],
+ ['proprietary_codecs==1 and enable_hevc_demuxing==1', {
+ 'defines': [
+ 'ENABLE_HEVC_DEMUXING'
+ ],
+ 'sources': [
+ 'filters/h265_parser_unittest.cc',
+ ],
+ }],
['media_use_ffmpeg==1', {
'dependencies': [
'../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
],
- }, { # media_use_ffmpeg== 0
+ }, { # media_use_ffmpeg==0
'sources!': [
- 'base/audio_video_metadata_extractor_unittest.cc',
- 'base/container_names_unittest.cc',
- 'base/media_file_checker_unittest.cc',
'ffmpeg/ffmpeg_common_unittest.cc',
'filters/audio_decoder_unittest.cc',
- 'filters/audio_file_reader_unittest.cc',
- 'filters/blocking_url_protocol_unittest.cc',
'filters/ffmpeg_aac_bitstream_converter_unittest.cc',
'filters/ffmpeg_demuxer_unittest.cc',
'filters/ffmpeg_glue_unittest.cc',
'filters/ffmpeg_h264_to_annex_b_bitstream_converter_unittest.cc',
+ ],
+ }],
+ # Even if FFmpeg is enabled on Android we don't want these.
+ # TODO(watk): Refactor tests that could be made to run on Android.
+ ['media_use_ffmpeg==0 or OS=="android"', {
+ 'sources!': [
+ 'base/audio_video_metadata_extractor_unittest.cc',
+ 'base/container_names_unittest.cc',
+ 'base/media_file_checker_unittest.cc',
+ 'filters/audio_file_reader_unittest.cc',
+ 'filters/blocking_url_protocol_unittest.cc',
'filters/ffmpeg_video_decoder_unittest.cc',
'filters/in_memory_url_protocol_unittest.cc',
'test/pipeline_integration_test.cc',
'test/pipeline_integration_test_base.cc',
],
}],
+ ['media_use_libwebm==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/libwebm/libwebm.gyp:libwebm',
+ ],
+ 'sources': [
+ 'capture/webm_muxer_unittest.cc',
+ ],
+ }],
+
['(os_posix==1 and OS!="mac") or (OS=="win" and component!="shared_library" and win_use_allocator_shim==1)', {
'conditions': [
['use_allocator!="none"', {
@@ -1331,7 +1418,7 @@
}],
['OS=="mac"', {
'sources': [
- 'video/capture/mac/video_capture_device_factory_mac_unittest.mm',
+ 'capture/video/mac/video_capture_device_factory_mac_unittest.mm',
]
}],
['use_x11==1', {
@@ -1410,6 +1497,7 @@
'audio/audio_parameters_unittest.cc',
'audio/audio_power_monitor_unittest.cc',
'audio/fake_audio_worker_unittest.cc',
+ 'audio/point_unittest.cc',
'audio/simple_sources_unittest.cc',
'audio/virtual_audio_input_stream_unittest.cc',
'audio/virtual_audio_output_stream_unittest.cc',
@@ -1424,10 +1512,6 @@
'sources': [
'audio/android/audio_android_unittest.cc',
],
- }, {
- 'sources': [
- 'audio/audio_input_volume_unittest.cc',
- ],
}],
['OS=="mac"', {
'sources': [
@@ -1527,6 +1611,10 @@
'base/fake_audio_renderer_sink.h',
'base/fake_demuxer_stream.cc',
'base/fake_demuxer_stream.h',
+ 'base/fake_media_resources.cc',
+ 'base/fake_media_resources.h',
+ 'base/fake_output_device.cc',
+ 'base/fake_output_device.h',
'base/fake_text_track_stream.cc',
'base/fake_text_track_stream.h',
'base/gmock_callback_support.h',
@@ -1536,6 +1624,8 @@
'base/mock_demuxer_host.h',
'base/mock_filters.cc',
'base/mock_filters.h',
+ 'base/mock_media_log.cc',
+ 'base/mock_media_log.h',
'base/test_data_util.cc',
'base/test_data_util.h',
'base/test_helpers.cc',
@@ -1555,6 +1645,7 @@
'type': '<(component)',
'dependencies': [
'../base/base.gyp:base',
+ '../ui/gfx/gfx.gyp:gfx_geometry',
],
'defines': [
'MEDIA_IMPLEMENTATION',
@@ -1774,6 +1865,10 @@
'base/android/media_resource_getter.h',
'base/android/media_source_player.cc',
'base/android/media_source_player.h',
+ 'base/android/media_statistics.cc',
+ 'base/android/media_statistics.h',
+ 'base/android/media_task_runner.cc',
+ 'base/android/media_task_runner.h',
'base/android/media_url_interceptor.h',
'base/android/video_decoder_job.cc',
'base/android/video_decoder_job.h',
@@ -1818,7 +1913,7 @@
'target_name': 'media_android_captureapitype',
'type': 'none',
'variables': {
- 'source_file': 'video/capture/video_capture_device.h',
+ 'source_file': 'capture/video/video_capture_device.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
@@ -1827,7 +1922,7 @@
'target_name': 'media_android_imageformat',
'type': 'none',
'variables': {
- 'source_file': 'video/capture/android/video_capture_device_android.h',
+ 'source_file': 'capture/video/android/video_capture_device_android.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
@@ -1916,6 +2011,8 @@
'base/video_frame.h',
'base/video_frame_metadata.cc',
'base/video_frame_metadata.h',
+ 'base/video_types.cc',
+ 'base/video_types.h',
'base/video_util.cc',
'base/video_util.h',
'base/yuv_convert.cc',
diff --git a/chromium/media/media_cdm.gypi b/chromium/media/media_cdm.gypi
index 1068aa1580f..0dd6a354361 100644
--- a/chromium/media/media_cdm.gypi
+++ b/chromium/media/media_cdm.gypi
@@ -3,20 +3,16 @@
# found in the LICENSE file.
{
+ 'includes': [
+ 'media_variables.gypi',
+ ],
'variables': {
- 'conditions': [
- ['OS == "android"', {
- # Android doesn't use ffmpeg.
- 'use_ffmpeg%': 0,
- }, { # 'OS != "android"'
- 'use_ffmpeg%': 1,
- }],
- ],
# Set |use_fake_video_decoder| to 1 to ignore input frames in |clearkeycdm|,
# and produce video frames filled with a solid color instead.
'use_fake_video_decoder%': 0,
- # Set |use_libvpx| to 1 to use libvpx for VP8 decoding in |clearkeycdm|.
- 'use_libvpx%': 0,
+ # Set |use_libvpx_in_clear_key_cdm| to 1 to use libvpx for VP8 decoding in
+ # |clearkeycdm|.
+ 'use_libvpx_in_clear_key_cdm%': 0,
},
'conditions': [
['enable_pepper_cdms==1', {
@@ -39,7 +35,7 @@
'cdm/ppapi/external_clear_key/fake_cdm_video_decoder.h',
],
}],
- ['use_ffmpeg == 1' , {
+ ['media_use_ffmpeg == 1' , {
'defines': ['CLEAR_KEY_CDM_USE_FFMPEG_DECODER'],
'dependencies': [
'<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
@@ -49,16 +45,16 @@
'cdm/ppapi/external_clear_key/ffmpeg_cdm_audio_decoder.h',
],
}],
- ['use_ffmpeg == 1 and use_fake_video_decoder == 0' , {
+ ['media_use_ffmpeg == 1 and use_fake_video_decoder == 0' , {
'sources': [
'cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.cc',
'cdm/ppapi/external_clear_key/ffmpeg_cdm_video_decoder.h',
],
}],
- ['use_libvpx == 1 and use_fake_video_decoder == 0' , {
+ ['use_libvpx_in_clear_key_cdm == 1 and use_fake_video_decoder == 0' , {
'defines': ['CLEAR_KEY_CDM_USE_LIBVPX_DECODER'],
'dependencies': [
- '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx',
+ '<(DEPTH)/third_party/libvpx_new/libvpx.gyp:libvpx_new',
],
'sources': [
'cdm/ppapi/external_clear_key/libvpx_cdm_video_decoder.cc',
diff --git a/chromium/media/media_nacl.gyp b/chromium/media/media_nacl.gyp
index f345376f658..595ee519da6 100644
--- a/chromium/media/media_nacl.gyp
+++ b/chromium/media/media_nacl.gyp
@@ -41,31 +41,6 @@
'<@(shared_memory_support_sources)',
],
}, # end of target 'shared_memory_support_nacl'
- {
- 'target_name': 'media_yuv_nacl',
- 'type': 'none',
- 'variables': {
- 'nlib_target': 'libmedia_yuv_nacl.a',
- 'build_glibc': 0,
- 'build_newlib': 0,
- 'build_pnacl_newlib': 1,
- },
- 'sources': [
- 'base/media.cc',
- 'base/media.h',
- 'base/simd/convert_rgb_to_yuv.h',
- 'base/simd/convert_rgb_to_yuv_c.cc',
- 'base/simd/convert_yuv_to_rgb.h',
- 'base/simd/convert_yuv_to_rgb_c.cc',
- 'base/simd/filter_yuv.h',
- 'base/simd/filter_yuv_c.cc',
- 'base/yuv_convert.cc',
- 'base/yuv_convert.h',
- ],
- 'defines': [
- 'MEDIA_DISABLE_FFMPEG',
- ],
- }, # end of target 'media_yuv_nacl'
],
}],
],
diff --git a/chromium/media/media_options.gni b/chromium/media/media_options.gni
index cd7440a8db6..b6c63fb4e7f 100644
--- a/chromium/media/media_options.gni
+++ b/chromium/media/media_options.gni
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/chromecast_build.gni")
+
declare_args() {
# Allows distributions to link pulseaudio directly (DT_NEEDED) instead of
# using dlopen. This helps with automated detection of ABI mismatches and
@@ -17,10 +19,14 @@ declare_args() {
# decoding of VP9 and VP8A type content.
media_use_libvpx = true
- # Neither Android nor iOS use ffmpeg or libvpx.
+ # Enable libwebm for multiplexing video and audio for JS recording API.
+ media_use_libwebm = true
+
+ # Neither Android nor iOS use ffmpeg, libvpx nor libwebm.
if (is_android || is_ios) {
media_use_ffmpeg = false
media_use_libvpx = false
+ media_use_libwebm = false
}
# Override to dynamically link the cras (ChromeOS audio) library.
@@ -32,18 +38,27 @@ declare_args() {
# Enables runtime selection of ALSA library for audio.
use_alsa = false
- # TODO(GYP): How to handled the "embedded" use case?
- # Original conditional: (OS=="linux" or OS=="freebsd" or OS=="solaris") and embedded!=1
- if (is_posix && !is_android && !is_mac) {
+ # Alsa should be used on non-Android, non-Mac POSIX systems, and Chromecast
+ # builds for desktop Linux.
+ if (is_posix && !is_android && !is_mac &&
+ (!is_chromecast || target_cpu != "arm")) {
use_alsa = true
if (!use_cras) {
use_pulseaudio = true
}
}
+ # Use low-memory buffers on non-Android builds of Chromecast.
+ use_low_memory_buffer = is_chromecast && !is_android
+
# Enables the MPEG2-TS stream parser for use with Media Source. Disabled by
- # default since it's not available on the normal Web Platform and costs money.
- enable_mpeg2ts_stream_parser = false
+ # default (except on Chromecast) since it's not available on the normal Web
+ # Platform and costs money.
+ enable_mpeg2ts_stream_parser = is_chromecast
+
+ # Enable HEVC/H265 demuxing. Actual decoding must be provided by the
+ # platform. Enable by default for Chromecast.
+ enable_hevc_demuxing = is_chromecast
# Experiment to enable mojo media application: http://crbug.com/431776
# Valid options are:
diff --git a/chromium/media/media_unittests.isolate b/chromium/media/media_unittests.isolate
index 7c0d4e4e04f..67c1da535c8 100644
--- a/chromium/media/media_unittests.isolate
+++ b/chromium/media/media_unittests.isolate
@@ -48,7 +48,6 @@
'variables': {
'files': [
'../testing/test_env.py',
- '<(PRODUCT_DIR)/media_unittests<(EXECUTABLE_SUFFIX)',
],
},
}],
diff --git a/chromium/media/media_variables.gypi b/chromium/media/media_variables.gypi
new file mode 100644
index 00000000000..b203ca09d67
--- /dev/null
+++ b/chromium/media/media_variables.gypi
@@ -0,0 +1,23 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Common media variables.
+{
+ 'variables': {
+ 'conditions': [
+ ['OS == "android" or OS == "ios"', {
+ # Android and iOS don't use FFmpeg, libvpx nor libwebm by default.
+ # Set media_use_ffmpeg=1 for Android builds to compile experimental
+ # support for FFmpeg and the desktop media pipeline.
+ 'media_use_ffmpeg%': 0,
+ 'media_use_libvpx%': 0,
+ 'media_use_libwebm%': 0,
+ }, {
+ 'media_use_ffmpeg%': 1,
+ 'media_use_libvpx%': 1,
+ 'media_use_libwebm%': 1,
+ }],
+ ],
+ },
+}
diff --git a/chromium/media/midi/BUILD.gn b/chromium/media/midi/BUILD.gn
index 28085cb764a..bbf0a149ab3 100644
--- a/chromium/media/midi/BUILD.gn
+++ b/chromium/media/midi/BUILD.gn
@@ -15,7 +15,7 @@ if (is_android) {
# Common configuration for targets in the media/midi directory.
config("midi_config") {
- if (use_alsa) {
+ if (use_alsa && use_udev) {
defines = [
"USE_ALSA",
"USE_UDEV",
@@ -54,6 +54,10 @@ if (is_android) {
generate_jni("midi_jni_headers") {
sources = [
+ "java/src/org/chromium/media/midi/MidiDeviceAndroid.java",
+ "java/src/org/chromium/media/midi/MidiInputPortAndroid.java",
+ "java/src/org/chromium/media/midi/MidiManagerAndroid.java",
+ "java/src/org/chromium/media/midi/MidiOutputPortAndroid.java",
"java/src/org/chromium/media/midi/UsbMidiDeviceAndroid.java",
"java/src/org/chromium/media/midi/UsbMidiDeviceFactoryAndroid.java",
]
@@ -78,6 +82,8 @@ component("midi") {
"midi_port_info.h",
"midi_scheduler.cc",
"midi_scheduler.h",
+ "midi_switches.cc",
+ "midi_switches.h",
]
configs += [ ":midi_config" ]
@@ -89,10 +95,18 @@ component("midi") {
libs = []
if (is_android) {
+ # TODO(yhirano): Consider having "android" subdirectory.
sources += [
+ "midi_device_android.cc",
+ "midi_device_android.h",
"midi_jni_registrar.cc",
"midi_jni_registrar.h",
+ "midi_input_port_android.cc",
+ "midi_input_port_android.h",
"midi_manager_android.cc",
+ "midi_manager_android.h",
+ "midi_output_port_android.cc",
+ "midi_output_port_android.h",
"usb_midi_device_android.cc",
"usb_midi_device_android.h",
"usb_midi_device_factory_android.cc",
@@ -136,6 +150,15 @@ component("midi") {
}
}
+# TODO(GYP): Delete this after we've converted everything to GN.
+# The _run targets exist only for compatibility w/ GYP.
+group("midi_unittests_run") {
+ testonly = true
+ deps = [
+ ":midi_unittests",
+ ]
+}
+
test("midi_unittests") {
sources = [
"midi_manager_unittest.cc",
diff --git a/chromium/media/midi/midi.gyp b/chromium/media/midi/midi.gyp
index c4fc9455ce9..723a9a3166b 100644
--- a/chromium/media/midi/midi.gyp
+++ b/chromium/media/midi/midi.gyp
@@ -47,6 +47,7 @@
'midi_manager.cc',
'midi_manager.h',
'midi_manager_android.cc',
+ 'midi_manager_android.h',
'midi_manager_mac.cc',
'midi_manager_mac.h',
'midi_manager_win.cc',
@@ -59,6 +60,8 @@
'midi_port_info.h',
'midi_scheduler.cc',
'midi_scheduler.h',
+ 'midi_switches.cc',
+ 'midi_switches.h',
'usb_midi_device_android.cc',
'usb_midi_device_android.h',
'usb_midi_device_factory_android.cc',
@@ -97,8 +100,14 @@
],
'sources': [
'<@(usb_midi_sources)',
+ 'midi_device_android.cc',
+ 'midi_device_android.h',
+ 'midi_input_port_android.cc',
+ 'midi_input_port_android.h',
'midi_jni_registrar.cc',
'midi_jni_registrar.h',
+ 'midi_output_port_android.cc',
+ 'midi_output_port_android.h',
],
'defines': [
'EXPORT_USB_MIDI',
@@ -207,6 +216,10 @@
'target_name': 'midi_jni_headers',
'type': 'none',
'sources': [
+ 'java/src/org/chromium/media/midi/MidiDeviceAndroid.java',
+ 'java/src/org/chromium/media/midi/MidiManagerAndroid.java',
+ 'java/src/org/chromium/media/midi/MidiInputPortAndroid.java',
+ 'java/src/org/chromium/media/midi/MidiOutputPortAndroid.java',
'java/src/org/chromium/media/midi/UsbMidiDeviceAndroid.java',
'java/src/org/chromium/media/midi/UsbMidiDeviceFactoryAndroid.java',
],
diff --git a/chromium/media/midi/midi_device_android.cc b/chromium/media/midi/midi_device_android.cc
new file mode 100644
index 00000000000..4fa6b137c5d
--- /dev/null
+++ b/chromium/media/midi/midi_device_android.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_device_android.h"
+
+#include <string>
+
+#include "jni/MidiDeviceAndroid_jni.h"
+#include "media/midi/midi_output_port_android.h"
+
+namespace media {
+namespace midi {
+
+MidiDeviceAndroid::MidiDeviceAndroid(JNIEnv* env,
+ jobject raw_device,
+ MidiInputPortAndroid::Delegate* delegate)
+ : raw_device_(env, raw_device) {
+ ScopedJavaLocalRef<jobjectArray> raw_input_ports =
+ Java_MidiDeviceAndroid_getInputPorts(env, raw_device);
+ jsize num_input_ports = env->GetArrayLength(raw_input_ports.obj());
+
+ for (jsize i = 0; i < num_input_ports; ++i) {
+ jobject port = env->GetObjectArrayElement(raw_input_ports.obj(), i);
+ input_ports_.push_back(new MidiInputPortAndroid(env, port, delegate));
+ }
+
+ ScopedJavaLocalRef<jobjectArray> raw_output_ports =
+ Java_MidiDeviceAndroid_getOutputPorts(env, raw_device);
+ jsize num_output_ports = env->GetArrayLength(raw_output_ports.obj());
+ for (jsize i = 0; i < num_output_ports; ++i) {
+ jobject port = env->GetObjectArrayElement(raw_output_ports.obj(), i);
+ output_ports_.push_back(new MidiOutputPortAndroid(env, port));
+ }
+}
+
+MidiDeviceAndroid::~MidiDeviceAndroid() {}
+
+std::string MidiDeviceAndroid::GetManufacturer() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> ret =
+ Java_MidiDeviceAndroid_getManufacturer(env, raw_device_.obj());
+ return std::string(env->GetStringUTFChars(ret.obj(), nullptr),
+ env->GetStringUTFLength(ret.obj()));
+}
+
+std::string MidiDeviceAndroid::GetProductName() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> ret =
+ Java_MidiDeviceAndroid_getProduct(env, raw_device_.obj());
+ return std::string(env->GetStringUTFChars(ret.obj(), nullptr),
+ env->GetStringUTFLength(ret.obj()));
+}
+
+std::string MidiDeviceAndroid::GetDeviceVersion() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ ScopedJavaLocalRef<jstring> ret =
+ Java_MidiDeviceAndroid_getVersion(env, raw_device_.obj());
+ return std::string(env->GetStringUTFChars(ret.obj(), nullptr),
+ env->GetStringUTFLength(ret.obj()));
+}
+
+bool MidiDeviceAndroid::Register(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_device_android.h b/chromium/media/midi/midi_device_android.h
new file mode 100644
index 00000000000..4b4882301c2
--- /dev/null
+++ b/chromium/media/midi/midi_device_android.h
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_DEVICE_ANDROID_H_
+#define MEDIA_MIDI_MIDI_DEVICE_ANDROID_H_
+
+#include <jni.h>
+#include <string>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/memory/scoped_vector.h"
+#include "media/midi/midi_input_port_android.h"
+
+namespace media {
+namespace midi {
+
+class MidiOutputPortAndroid;
+
+class MidiDeviceAndroid final {
+ public:
+ MidiDeviceAndroid(JNIEnv* env,
+ jobject raw_device,
+ MidiInputPortAndroid::Delegate* delegate);
+ ~MidiDeviceAndroid();
+
+ std::string GetManufacturer();
+ std::string GetProductName();
+ std::string GetDeviceVersion();
+
+ const ScopedVector<MidiInputPortAndroid>& input_ports() const {
+ return input_ports_;
+ }
+ const ScopedVector<MidiOutputPortAndroid>& output_ports() const {
+ return output_ports_;
+ }
+ bool HasRawDevice(JNIEnv* env, jobject raw_device) const {
+ return env->IsSameObject(raw_device_.obj(), raw_device);
+ }
+
+ static bool Register(JNIEnv* env);
+
+ private:
+ base::android::ScopedJavaGlobalRef<jobject> raw_device_;
+ ScopedVector<MidiInputPortAndroid> input_ports_;
+ ScopedVector<MidiOutputPortAndroid> output_ports_;
+};
+
+} // namespace midi
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_DEVICE_ANDROID_H_
diff --git a/chromium/media/midi/midi_input_port_android.cc b/chromium/media/midi/midi_input_port_android.cc
new file mode 100644
index 00000000000..d749cc70f0e
--- /dev/null
+++ b/chromium/media/midi/midi_input_port_android.cc
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_input_port_android.h"
+
+#include "base/android/jni_array.h"
+#include "base/time/time.h"
+#include "jni/MidiInputPortAndroid_jni.h"
+
+namespace media {
+namespace midi {
+
+MidiInputPortAndroid::MidiInputPortAndroid(JNIEnv* env,
+ jobject raw,
+ Delegate* delegate)
+ : raw_port_(env, raw), delegate_(delegate) {}
+
+MidiInputPortAndroid::~MidiInputPortAndroid() {
+ Close();
+}
+
+bool MidiInputPortAndroid::Open() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return Java_MidiInputPortAndroid_open(env, raw_port_.obj(),
+ reinterpret_cast<jlong>(this));
+}
+
+void MidiInputPortAndroid::Close() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MidiInputPortAndroid_close(env, raw_port_.obj());
+}
+
+void MidiInputPortAndroid::OnData(JNIEnv* env,
+ jobject caller,
+ jbyteArray data,
+ jint offset,
+ jint size,
+ jlong timestamp) {
+ std::vector<uint8> bytes;
+ base::android::JavaByteArrayToByteVector(env, data, &bytes);
+
+ if (size == 0) {
+ return;
+ }
+
+ // TimeTick's internal value is in microseconds, |timestamp| is in
+ // nanoseconds. Both are monotonic.
+ base::TimeTicks timestamp_to_pass = base::TimeTicks::FromInternalValue(
+ timestamp / base::TimeTicks::kNanosecondsPerMicrosecond);
+ delegate_->OnReceivedData(this, &bytes[offset], size, timestamp_to_pass);
+}
+
+bool MidiInputPortAndroid::Register(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_input_port_android.h b/chromium/media/midi/midi_input_port_android.h
new file mode 100644
index 00000000000..448c158903d
--- /dev/null
+++ b/chromium/media/midi/midi_input_port_android.h
@@ -0,0 +1,51 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_INPUT_PORT_ANDROID_H_
+#define MEDIA_MIDI_MIDI_INPUT_PORT_ANDROID_H_
+
+#include <jni.h>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace midi {
+
+class MidiInputPortAndroid final {
+ public:
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+ virtual void OnReceivedData(MidiInputPortAndroid* port,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks time) = 0;
+ };
+ MidiInputPortAndroid(JNIEnv* env, jobject raw, Delegate* delegate);
+ ~MidiInputPortAndroid();
+
+ // Returns true when the operation succeeds.
+ bool Open();
+ void Close();
+
+ // Called by the Java world.
+ void OnData(JNIEnv* env,
+ jobject caller,
+ jbyteArray data,
+ jint offset,
+ jint size,
+ jlong timestamp);
+
+ static bool Register(JNIEnv* env);
+
+ private:
+ base::android::ScopedJavaGlobalRef<jobject> raw_port_;
+ Delegate* const delegate_;
+};
+
+} // namespace midi
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_INPUT_PORT_ANDROID_H_
diff --git a/chromium/media/midi/midi_manager.cc b/chromium/media/midi/midi_manager.cc
index d61bd7a14a3..6e8c1a266fa 100644
--- a/chromium/media/midi/midi_manager.cc
+++ b/chromium/media/midi/midi_manager.cc
@@ -12,24 +12,58 @@
namespace media {
namespace midi {
+namespace {
+
+using Sample = base::HistogramBase::Sample;
+
+// If many users have more devices, this number will be increased.
+// But the number is expected to be big enough for now.
+const Sample kMaxUmaDevices = 31;
+
+// Used to count events for usage histogram.
+enum class Usage {
+ CREATED,
+ CREATED_ON_UNSUPPORTED_PLATFORMS,
+ SESSION_STARTED,
+ SESSION_ENDED,
+ INITIALIZED,
+ INPUT_PORT_ADDED,
+ OUTPUT_PORT_ADDED,
+
+ // New items should be inserted here, and |MAX| should point the last item.
+ MAX = INITIALIZED,
+};
+
+void ReportUsage(Usage usage) {
+ UMA_HISTOGRAM_ENUMERATION("Media.Midi.Usage",
+ static_cast<Sample>(usage),
+ static_cast<Sample>(Usage::MAX) + 1);
+}
+
+} // namespace
+
MidiManager::MidiManager()
: initialized_(false), result_(Result::NOT_INITIALIZED) {
+ ReportUsage(Usage::CREATED);
}
MidiManager::~MidiManager() {
UMA_HISTOGRAM_ENUMERATION("Media.Midi.ResultOnShutdown",
- static_cast<int>(result_),
- static_cast<int>(Result::MAX) + 1);
+ static_cast<Sample>(result_),
+ static_cast<Sample>(Result::MAX) + 1);
}
#if !defined(OS_MACOSX) && !defined(OS_WIN) && \
!(defined(USE_ALSA) && defined(USE_UDEV)) && !defined(OS_ANDROID)
MidiManager* MidiManager::Create() {
+ ReportUsage(Usage::CREATED_ON_UNSUPPORTED_PLATFORMS);
return new MidiManager;
}
#endif
void MidiManager::StartSession(MidiManagerClient* client) {
+ ReportUsage(Usage::SESSION_STARTED);
+
bool session_is_ready;
bool session_needs_initialization = false;
bool too_many_pending_clients_exist = false;
@@ -90,6 +124,8 @@ void MidiManager::StartSession(MidiManagerClient* client) {
}
void MidiManager::EndSession(MidiManagerClient* client) {
+ ReportUsage(Usage::SESSION_ENDED);
+
// At this point, |client| can be in the destruction process, and calling
// any method of |client| is dangerous.
base::AutoLock auto_lock(lock_);
@@ -129,6 +165,7 @@ void MidiManager::CompleteInitialization(Result result) {
}
void MidiManager::AddInputPort(const MidiPortInfo& info) {
+ ReportUsage(Usage::INPUT_PORT_ADDED);
base::AutoLock auto_lock(lock_);
input_ports_.push_back(info);
for (auto client : clients_)
@@ -136,6 +173,7 @@ void MidiManager::AddInputPort(const MidiPortInfo& info) {
}
void MidiManager::AddOutputPort(const MidiPortInfo& info) {
+ ReportUsage(Usage::OUTPUT_PORT_ADDED);
base::AutoLock auto_lock(lock_);
output_ports_.push_back(info);
for (auto client : clients_)
@@ -171,6 +209,13 @@ void MidiManager::ReceiveMidiData(
void MidiManager::CompleteInitializationInternal(Result result) {
TRACE_EVENT0("midi", "MidiManager::CompleteInitialization");
+ ReportUsage(Usage::INITIALIZED);
+ UMA_HISTOGRAM_ENUMERATION("Media.Midi.InputPorts",
+ static_cast<Sample>(input_ports_.size()),
+ kMaxUmaDevices + 1);
+ UMA_HISTOGRAM_ENUMERATION("Media.Midi.OutputPorts",
+ static_cast<Sample>(output_ports_.size()),
+ kMaxUmaDevices + 1);
base::AutoLock auto_lock(lock_);
DCHECK(clients_.empty());
diff --git a/chromium/media/midi/midi_manager.h b/chromium/media/midi/midi_manager.h
index 957c84d427f..83d96f2aa5a 100644
--- a/chromium/media/midi/midi_manager.h
+++ b/chromium/media/midi/midi_manager.h
@@ -151,6 +151,9 @@ class MIDI_EXPORT MidiManager {
return pending_clients_.size();
}
+ const MidiPortInfoList& input_ports() const { return input_ports_; }
+ const MidiPortInfoList& output_ports() const { return output_ports_; }
+
private:
void CompleteInitializationInternal(Result result);
void AddInitialPorts(MidiManagerClient* client);
diff --git a/chromium/media/midi/midi_manager_android.cc b/chromium/media/midi/midi_manager_android.cc
index ce3db4aa385..19140e5acc4 100644
--- a/chromium/media/midi/midi_manager_android.cc
+++ b/chromium/media/midi/midi_manager_android.cc
@@ -2,16 +2,164 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "media/midi/midi_manager_android.h"
+
+#include "base/android/build_info.h"
+#include "base/command_line.h"
#include "base/memory/scoped_ptr.h"
+#include "base/strings/stringprintf.h"
+#include "jni/MidiManagerAndroid_jni.h"
+#include "media/midi/midi_device_android.h"
#include "media/midi/midi_manager_usb.h"
+#include "media/midi/midi_output_port_android.h"
+#include "media/midi/midi_switches.h"
#include "media/midi/usb_midi_device_factory_android.h"
namespace media {
namespace midi {
MidiManager* MidiManager::Create() {
- return new MidiManagerUsb(
- scoped_ptr<UsbMidiDevice::Factory>(new UsbMidiDeviceFactoryAndroid));
+ auto sdk_version = base::android::BuildInfo::GetInstance()->sdk_int();
+ if (sdk_version <= base::android::SDK_VERSION_LOLLIPOP_MR1 ||
+ !base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kUseAndroidMidiApi)) {
+ return new MidiManagerUsb(
+ scoped_ptr<UsbMidiDevice::Factory>(new UsbMidiDeviceFactoryAndroid));
+ }
+
+ return new MidiManagerAndroid();
+}
+
+MidiManagerAndroid::MidiManagerAndroid() {}
+
+MidiManagerAndroid::~MidiManagerAndroid() {}
+
+void MidiManagerAndroid::StartInitialization() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+
+ uintptr_t pointer = reinterpret_cast<uintptr_t>(this);
+ raw_manager_.Reset(Java_MidiManagerAndroid_create(
+ env, base::android::GetApplicationContext(), pointer));
+ scheduler_.reset(new MidiScheduler(this));
+ Java_MidiManagerAndroid_initialize(env, raw_manager_.obj());
+}
+
+void MidiManagerAndroid::DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) {
+ if (port_index >= all_output_ports_.size()) {
+ // |port_index| is provided by a renderer so we can't believe that it is
+ // in the valid range.
+ return;
+ }
+ DCHECK_EQ(output_ports().size(), all_output_ports_.size());
+ if (output_ports()[port_index].state == MIDI_PORT_CONNECTED) {
+ // We treat send call as implicit open.
+ // TODO(yhirano): Implement explicit open operation from the renderer.
+ if (all_output_ports_[port_index]->Open()) {
+ SetOutputPortState(port_index, MIDI_PORT_OPENED);
+ } else {
+ // We cannot open the port. It's useless to send data to such a port.
+ return;
+ }
+ }
+
+ // output_streams_[port_index] is alive unless MidiManagerUsb is deleted.
+ // The task posted to the MidiScheduler will be disposed safely on deleting
+ // the scheduler.
+ scheduler_->PostSendDataTask(
+ client, data.size(), timestamp,
+ base::Bind(&MidiOutputPortAndroid::Send,
+ base::Unretained(all_output_ports_[port_index]), data));
+}
+
+void MidiManagerAndroid::OnReceivedData(MidiInputPortAndroid* port,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks timestamp) {
+ const auto i = input_port_to_index_.find(port);
+ DCHECK(input_port_to_index_.end() != i);
+ ReceiveMidiData(i->second, data, size, timestamp);
+}
+
+void MidiManagerAndroid::OnInitialized(JNIEnv* env,
+ jobject caller,
+ jobjectArray devices) {
+ jsize length = env->GetArrayLength(devices);
+
+ for (jsize i = 0; i < length; ++i) {
+ jobject raw_device = env->GetObjectArrayElement(devices, i);
+ AddDevice(make_scoped_ptr(new MidiDeviceAndroid(env, raw_device, this)));
+ }
+ CompleteInitialization(Result::OK);
+}
+
+void MidiManagerAndroid::OnAttached(JNIEnv* env,
+ jobject caller,
+ jobject raw_device) {
+ AddDevice(make_scoped_ptr(new MidiDeviceAndroid(env, raw_device, this)));
+}
+
+void MidiManagerAndroid::OnDetached(JNIEnv* env,
+ jobject caller,
+ jobject raw_device) {
+ for (const auto& device : devices_) {
+ if (device->HasRawDevice(env, raw_device)) {
+ for (const auto& port : device->input_ports()) {
+ DCHECK(input_port_to_index_.end() != input_port_to_index_.find(port));
+ size_t index = input_port_to_index_[port];
+ SetInputPortState(index, MIDI_PORT_DISCONNECTED);
+ }
+ for (const auto& port : device->output_ports()) {
+ DCHECK(output_port_to_index_.end() != output_port_to_index_.find(port));
+ size_t index = output_port_to_index_[port];
+ SetOutputPortState(index, MIDI_PORT_DISCONNECTED);
+ }
+ }
+ }
+}
+
+void MidiManagerAndroid::AddDevice(scoped_ptr<MidiDeviceAndroid> device) {
+ for (auto& port : device->input_ports()) {
+ // We implicitly open input ports here, because there are no signal
+ // from the renderer when to open.
+ // TODO(yhirano): Implement open operation in Blink.
+ MidiPortState state = port->Open() ? MIDI_PORT_OPENED : MIDI_PORT_CONNECTED;
+
+ const size_t index = all_input_ports_.size();
+ all_input_ports_.push_back(port);
+ // Port ID must be unique in a MIDI manager. This ID setting is
+ // sufficiently unique although there is no user-friendly meaning.
+ // TODO(yhirano): Use a hashed string as ID.
+ const std::string id(
+ base::StringPrintf("native:port-in-%ld", static_cast<long>(index)));
+
+ input_port_to_index_.insert(std::make_pair(port, index));
+ AddInputPort(MidiPortInfo(id, device->GetManufacturer(),
+ device->GetProductName(),
+ device->GetDeviceVersion(), state));
+ }
+ for (const auto& port : device->output_ports()) {
+ const size_t index = all_output_ports_.size();
+ all_output_ports_.push_back(port);
+
+ // Port ID must be unique in a MIDI manager. This ID setting is
+ // sufficiently unique although there is no user-friendly meaning.
+ // TODO(yhirano): Use a hashed string as ID.
+ const std::string id(
+ base::StringPrintf("native:port-out-%ld", static_cast<long>(index)));
+
+ output_port_to_index_.insert(std::make_pair(port, index));
+ AddOutputPort(
+ MidiPortInfo(id, device->GetManufacturer(), device->GetProductName(),
+ device->GetDeviceVersion(), MIDI_PORT_CONNECTED));
+ }
+ devices_.push_back(device.release());
+}
+
+bool MidiManagerAndroid::Register(JNIEnv* env) {
+ return RegisterNativesImpl(env);
}
} // namespace midi
diff --git a/chromium/media/midi/midi_manager_android.h b/chromium/media/midi/midi_manager_android.h
new file mode 100644
index 00000000000..196643ed048
--- /dev/null
+++ b/chromium/media/midi/midi_manager_android.h
@@ -0,0 +1,82 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_MANAGER_ANDROID_H_
+#define MEDIA_MIDI_MIDI_MANAGER_ANDROID_H_
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/basictypes.h"
+#include "base/containers/hash_tables.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/time/time.h"
+#include "media/midi/midi_input_port_android.h"
+#include "media/midi/midi_manager.h"
+#include "media/midi/midi_scheduler.h"
+
+namespace media {
+namespace midi {
+
+class MidiDeviceAndroid;
+class MidiOutputPortAndroid;
+
+// MidiManagerAndroid is a MidiManager subclass for Android M or newer. For
+// older android OSes, we use MidiManagerUsb.
+class MidiManagerAndroid final : public MidiManager,
+ public MidiInputPortAndroid::Delegate {
+ public:
+ MidiManagerAndroid();
+ ~MidiManagerAndroid() override;
+
+ // MidiManager implementation.
+ void StartInitialization() override;
+ void DispatchSendMidiData(MidiManagerClient* client,
+ uint32 port_index,
+ const std::vector<uint8>& data,
+ double timestamp) override;
+
+ // MidiInputPortAndroid::Delegate implementation.
+ void OnReceivedData(MidiInputPortAndroid*,
+ const uint8* data,
+ size_t size,
+ base::TimeTicks timestamp) override;
+
+ // Called from the Java world.
+ void OnInitialized(JNIEnv* env, jobject caller, jobjectArray devices);
+ void OnAttached(JNIEnv* env, jobject caller, jobject device);
+ void OnDetached(JNIEnv* env, jobject caller, jobject device);
+
+ static bool Register(JNIEnv* env);
+
+ private:
+ void AddDevice(scoped_ptr<MidiDeviceAndroid> device);
+ void AddInputPortAndroid(MidiInputPortAndroid* port,
+ MidiDeviceAndroid* device);
+ void AddOutputPortAndroid(MidiOutputPortAndroid* port,
+ MidiDeviceAndroid* device);
+
+ ScopedVector<MidiDeviceAndroid> devices_;
+ // All ports held in |devices_|. Each device has ownership of ports, but we
+ // can store pointers here because a device will keep its ports while it is
+ // alive.
+ std::vector<MidiInputPortAndroid*> all_input_ports_;
+ // A dictionary from a port to its index.
+ // input_port_to_index_[all_input_ports_[i]] == i for each valid |i|.
+ base::hash_map<MidiInputPortAndroid*, size_t> input_port_to_index_;
+
+ // Ditto for output ports.
+ std::vector<MidiOutputPortAndroid*> all_output_ports_;
+ base::hash_map<MidiOutputPortAndroid*, size_t> output_port_to_index_;
+
+ base::android::ScopedJavaGlobalRef<jobject> raw_manager_;
+ scoped_ptr<MidiScheduler> scheduler_;
+};
+
+} // namespace midi
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_MANAGER_ANDROID_H_
diff --git a/chromium/media/midi/midi_manager_usb.cc b/chromium/media/midi/midi_manager_usb.cc
index f493b151c88..9e4778c3b39 100644
--- a/chromium/media/midi/midi_manager_usb.cc
+++ b/chromium/media/midi/midi_manager_usb.cc
@@ -137,7 +137,7 @@ bool MidiManagerUsb::AddPorts(UsbMidiDevice* device, int device_id) {
// sufficiently unique although there is no user-friendly meaning.
// TODO(yhirano): Use a hashed string as ID.
std::string id(
- base::StringPrintf("port-%d-%ld", device_id, static_cast<long>(j)));
+ base::StringPrintf("usb:port-%d-%ld", device_id, static_cast<long>(j)));
if (jacks[j].direction() == UsbMidiJack::DIRECTION_OUT) {
output_streams_.push_back(new UsbMidiOutputStream(jacks[j]));
AddOutputPort(MidiPortInfo(id, manufacturer, product_name, version,
diff --git a/chromium/media/midi/midi_manager_usb_unittest.cc b/chromium/media/midi/midi_manager_usb_unittest.cc
index 21dd2369262..e2205aa3b46 100644
--- a/chromium/media/midi/midi_manager_usb_unittest.cc
+++ b/chromium/media/midi/midi_manager_usb_unittest.cc
@@ -117,7 +117,8 @@ class FakeMidiManagerClient : public MidiManagerClient {
size_t size,
double timestamp) override {
logger_->AddLog("MidiManagerClient::ReceiveMidiData ");
- logger_->AddLog(base::StringPrintf("port_index = %d data =", port_index));
+ logger_->AddLog(
+ base::StringPrintf("usb:port_index = %d data =", port_index));
for (size_t i = 0; i < size; ++i)
logger_->AddLog(base::StringPrintf(" 0x%02x", data[i]));
logger_->AddLog("\n");
@@ -259,17 +260,17 @@ TEST_F(MidiManagerUsbTest, Initialize) {
EXPECT_EQ(Result::OK, GetInitializationResult());
ASSERT_EQ(1u, input_ports().size());
- EXPECT_EQ("port-0-2", input_ports()[0].id);
+ EXPECT_EQ("usb:port-0-2", input_ports()[0].id);
EXPECT_EQ("vendor1", input_ports()[0].manufacturer);
EXPECT_EQ("device1", input_ports()[0].name);
EXPECT_EQ("1.02", input_ports()[0].version);
ASSERT_EQ(2u, output_ports().size());
- EXPECT_EQ("port-0-0", output_ports()[0].id);
+ EXPECT_EQ("usb:port-0-0", output_ports()[0].id);
EXPECT_EQ("vendor1", output_ports()[0].manufacturer);
EXPECT_EQ("device1", output_ports()[0].name);
EXPECT_EQ("1.02", output_ports()[0].version);
- EXPECT_EQ("port-0-1", output_ports()[1].id);
+ EXPECT_EQ("usb:port-0-1", output_ports()[1].id);
EXPECT_EQ("vendor1", output_ports()[1].manufacturer);
EXPECT_EQ("device1", output_ports()[1].name);
EXPECT_EQ("1.02", output_ports()[1].version);
@@ -320,29 +321,29 @@ TEST_F(MidiManagerUsbTest, InitializeMultipleDevices) {
EXPECT_EQ(Result::OK, GetInitializationResult());
ASSERT_EQ(2u, input_ports().size());
- EXPECT_EQ("port-0-2", input_ports()[0].id);
+ EXPECT_EQ("usb:port-0-2", input_ports()[0].id);
EXPECT_EQ("vendor1", input_ports()[0].manufacturer);
EXPECT_EQ("device1", input_ports()[0].name);
EXPECT_EQ("1.02", input_ports()[0].version);
- EXPECT_EQ("port-1-2", input_ports()[1].id);
+ EXPECT_EQ("usb:port-1-2", input_ports()[1].id);
EXPECT_EQ("vendor2", input_ports()[1].manufacturer);
EXPECT_EQ("device2", input_ports()[1].name);
EXPECT_EQ("98.76", input_ports()[1].version);
ASSERT_EQ(4u, output_ports().size());
- EXPECT_EQ("port-0-0", output_ports()[0].id);
+ EXPECT_EQ("usb:port-0-0", output_ports()[0].id);
EXPECT_EQ("vendor1", output_ports()[0].manufacturer);
EXPECT_EQ("device1", output_ports()[0].name);
EXPECT_EQ("1.02", output_ports()[0].version);
- EXPECT_EQ("port-0-1", output_ports()[1].id);
+ EXPECT_EQ("usb:port-0-1", output_ports()[1].id);
EXPECT_EQ("vendor1", output_ports()[1].manufacturer);
EXPECT_EQ("device1", output_ports()[1].name);
EXPECT_EQ("1.02", output_ports()[1].version);
- EXPECT_EQ("port-1-0", output_ports()[2].id);
+ EXPECT_EQ("usb:port-1-0", output_ports()[2].id);
EXPECT_EQ("vendor2", output_ports()[2].manufacturer);
EXPECT_EQ("device2", output_ports()[2].name);
EXPECT_EQ("98.76", output_ports()[2].version);
- EXPECT_EQ("port-1-1", output_ports()[3].id);
+ EXPECT_EQ("usb:port-1-1", output_ports()[3].id);
EXPECT_EQ("vendor2", output_ports()[3].manufacturer);
EXPECT_EQ("device2", output_ports()[3].name);
EXPECT_EQ("98.76", output_ports()[3].version);
@@ -512,13 +513,14 @@ TEST_F(MidiManagerUsbTest, Receive) {
base::TimeTicks());
Finalize();
- EXPECT_EQ("UsbMidiDevice::GetDescriptors\n"
- "MidiManagerClient::ReceiveMidiData port_index = 0 "
- "data = 0x90 0x45 0x7f\n"
- "MidiManagerClient::ReceiveMidiData port_index = 0 "
- "data = 0xf0 0x00 0x01\n"
- "MidiManagerClient::ReceiveMidiData port_index = 0 data = 0xf7\n",
- logger_.TakeLog());
+ EXPECT_EQ(
+ "UsbMidiDevice::GetDescriptors\n"
+ "MidiManagerClient::ReceiveMidiData usb:port_index = 0 "
+ "data = 0x90 0x45 0x7f\n"
+ "MidiManagerClient::ReceiveMidiData usb:port_index = 0 "
+ "data = 0xf0 0x00 0x01\n"
+ "MidiManagerClient::ReceiveMidiData usb:port_index = 0 data = 0xf7\n",
+ logger_.TakeLog());
}
TEST_F(MidiManagerUsbTest, AttachDevice) {
diff --git a/chromium/media/midi/midi_manager_win.cc b/chromium/media/midi/midi_manager_win.cc
index 3f23ad9f0e4..40f2fb854e5 100644
--- a/chromium/media/midi/midi_manager_win.cc
+++ b/chromium/media/midi/midi_manager_win.cc
@@ -315,7 +315,8 @@ using PortNumberCache = base::hash_map<
std::priority_queue<uint32, std::vector<uint32>, std::greater<uint32>>,
MidiDeviceInfo::Hasher>;
-struct MidiInputDeviceState final : base::RefCounted<MidiInputDeviceState> {
+struct MidiInputDeviceState final
+ : base::RefCountedThreadSafe<MidiInputDeviceState> {
explicit MidiInputDeviceState(const MidiDeviceInfo& device_info)
: device_info(device_info),
midi_handle(kInvalidMidiInHandle),
@@ -342,11 +343,12 @@ struct MidiInputDeviceState final : base::RefCounted<MidiInputDeviceState> {
bool start_time_initialized;
private:
- friend class base::RefCounted<MidiInputDeviceState>;
+ friend class base::RefCountedThreadSafe<MidiInputDeviceState>;
~MidiInputDeviceState() {}
};
-struct MidiOutputDeviceState final : base::RefCounted<MidiOutputDeviceState> {
+struct MidiOutputDeviceState final
+ : base::RefCountedThreadSafe<MidiOutputDeviceState> {
explicit MidiOutputDeviceState(const MidiDeviceInfo& device_info)
: device_info(device_info),
midi_handle(kInvalidMidiOutHandle),
@@ -371,7 +373,7 @@ struct MidiOutputDeviceState final : base::RefCounted<MidiOutputDeviceState> {
volatile bool closed;
private:
- friend class base::RefCounted<MidiOutputDeviceState>;
+ friend class base::RefCountedThreadSafe<MidiOutputDeviceState>;
~MidiOutputDeviceState() {}
};
@@ -898,7 +900,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
/////////////////////////////////////////////////////////////////////////////
void AssertOnSenderThread() {
- DCHECK_EQ(sender_thread_.thread_id(), base::PlatformThread::CurrentId());
+ DCHECK_EQ(sender_thread_.GetThreadId(), base::PlatformThread::CurrentId());
}
void SendOnSenderThread(uint32 port_number,
@@ -954,7 +956,7 @@ class MidiServiceWinImpl : public MidiServiceWin,
/////////////////////////////////////////////////////////////////////////////
void AssertOnTaskThread() {
- DCHECK_EQ(task_thread_.thread_id(), base::PlatformThread::CurrentId());
+ DCHECK_EQ(task_thread_.GetThreadId(), base::PlatformThread::CurrentId());
}
void UpdateDeviceListOnTaskThread() {
diff --git a/chromium/media/midi/midi_output_port_android.cc b/chromium/media/midi/midi_output_port_android.cc
new file mode 100644
index 00000000000..f82ad51d63b
--- /dev/null
+++ b/chromium/media/midi/midi_output_port_android.cc
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_output_port_android.h"
+
+#include "base/android/jni_array.h"
+#include "jni/MidiOutputPortAndroid_jni.h"
+
+namespace media {
+namespace midi {
+
+MidiOutputPortAndroid::MidiOutputPortAndroid(JNIEnv* env, jobject raw)
+ : raw_port_(env, raw) {}
+MidiOutputPortAndroid::~MidiOutputPortAndroid() {
+ Close();
+}
+
+bool MidiOutputPortAndroid::Open() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ return Java_MidiOutputPortAndroid_open(env, raw_port_.obj());
+}
+
+void MidiOutputPortAndroid::Close() {
+ JNIEnv* env = base::android::AttachCurrentThread();
+ Java_MidiOutputPortAndroid_close(env, raw_port_.obj());
+}
+
+void MidiOutputPortAndroid::Send(const std::vector<uint8>& data) {
+ if (data.size() == 0) {
+ return;
+ }
+
+ JNIEnv* env = base::android::AttachCurrentThread();
+ ScopedJavaLocalRef<jbyteArray> data_to_pass =
+ base::android::ToJavaByteArray(env, &data[0], data.size());
+
+ Java_MidiOutputPortAndroid_send(env, raw_port_.obj(), data_to_pass.obj());
+}
+
+bool MidiOutputPortAndroid::Register(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+} // namespace midi
+} // namespace media
diff --git a/chromium/media/midi/midi_output_port_android.h b/chromium/media/midi/midi_output_port_android.h
new file mode 100644
index 00000000000..5fe4b9de184
--- /dev/null
+++ b/chromium/media/midi/midi_output_port_android.h
@@ -0,0 +1,36 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MIDI_MIDI_OUTPUT_PORT_ANDROID_H_
+#define MEDIA_MIDI_MIDI_OUTPUT_PORT_ANDROID_H_
+
+#include <jni.h>
+#include <vector>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/time/time.h"
+
+namespace media {
+namespace midi {
+
+class MidiOutputPortAndroid final {
+ public:
+ MidiOutputPortAndroid(JNIEnv* env, jobject raw);
+ ~MidiOutputPortAndroid();
+
+ // Returns the when the operation succeeds or the port is already open.
+ bool Open();
+ void Close();
+ void Send(const std::vector<uint8>& data);
+
+ static bool Register(JNIEnv* env);
+
+ private:
+ base::android::ScopedJavaGlobalRef<jobject> raw_port_;
+};
+
+} // namespace midi
+} // namespace media
+
+#endif // MEDIA_MIDI_MIDI_OUTPUT_PORT_ANDROID_H_
diff --git a/chromium/media/midi/midi_switches.cc b/chromium/media/midi/midi_switches.cc
new file mode 100644
index 00000000000..1fee12ce92d
--- /dev/null
+++ b/chromium/media/midi/midi_switches.cc
@@ -0,0 +1,14 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/midi/midi_switches.h"
+
+namespace switches {
+
+#if defined(OS_ANDROID)
+// Use Android Midi API for WebMIDI
+const char kUseAndroidMidiApi[] = "use-android-midi-api";
+#endif
+
+} // namespace switches
diff --git a/chromium/media/midi/midi_switches.h b/chromium/media/midi/midi_switches.h
new file mode 100644
index 00000000000..471d3a7be43
--- /dev/null
+++ b/chromium/media/midi/midi_switches.h
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Defines all the "midi" command-line switches.
+
+#ifndef MEDIA_MIDI_MIDI_SWITCHES_H_
+#define MEDIA_MIDI_MIDI_SWITCHES_H_
+
+#include "build/build_config.h"
+#include "media/midi/midi_export.h"
+
+namespace switches {
+
+#if defined(OS_ANDROID)
+MIDI_EXPORT extern const char kUseAndroidMidiApi[];
+#endif
+
+} // namespace switches
+
+#endif // MEDIA_MIDI_MIDI_SWITCHES_H_
diff --git a/chromium/media/midi/midi_unittests.isolate b/chromium/media/midi/midi_unittests.isolate
index dca0d3596d1..bfbec4c7bef 100644
--- a/chromium/media/midi/midi_unittests.isolate
+++ b/chromium/media/midi/midi_unittests.isolate
@@ -41,7 +41,6 @@
'variables': {
'files': [
'../../testing/test_env.py',
- '<(PRODUCT_DIR)/midi_unittests<(EXECUTABLE_SUFFIX)',
],
},
}],
diff --git a/chromium/media/midi/usb_midi_device_factory_android.h b/chromium/media/midi/usb_midi_device_factory_android.h
index d81586a37b3..319ea629c1b 100644
--- a/chromium/media/midi/usb_midi_device_factory_android.h
+++ b/chromium/media/midi/usb_midi_device_factory_android.h
@@ -11,7 +11,6 @@
#include "base/android/scoped_java_ref.h"
#include "base/basictypes.h"
#include "base/callback.h"
-#include "base/memory/scoped_vector.h"
#include "base/memory/weak_ptr.h"
#include "media/midi/usb_midi_device.h"
#include "media/midi/usb_midi_export.h"
diff --git a/chromium/media/mojo/interfaces/BUILD.gn b/chromium/media/mojo/interfaces/BUILD.gn
index 1f2ae38da4e..1f9b19b13fc 100644
--- a/chromium/media/mojo/interfaces/BUILD.gn
+++ b/chromium/media/mojo/interfaces/BUILD.gn
@@ -9,8 +9,9 @@ mojom("interfaces") {
"content_decryption_module.mojom",
"decryptor.mojom",
"demuxer_stream.mojom",
- "media_renderer.mojom",
"media_types.mojom",
+ "renderer.mojom",
+ "service_factory.mojom",
]
if (is_chromeos) {
diff --git a/chromium/media/mojo/interfaces/content_decryption_module.mojom b/chromium/media/mojo/interfaces/content_decryption_module.mojom
index 3e217641ab2..ed4831aae9b 100644
--- a/chromium/media/mojo/interfaces/content_decryption_module.mojom
+++ b/chromium/media/mojo/interfaces/content_decryption_module.mojom
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module mojo;
+module media.interfaces;
import "media/mojo/interfaces/decryptor.mojom";
@@ -26,7 +26,9 @@ enum CdmKeyStatus {
USABLE,
INTERNAL_ERROR,
EXPIRED,
- OUTPUT_NOT_ALLOWED
+ OUTPUT_RESTRICTED,
+ OUTPUT_DOWNSCALED,
+ KEY_STATUS_PENDING
};
// Transport layer of media::CdmConfig (see media/base/cdm_config.h).
diff --git a/chromium/media/mojo/interfaces/decryptor.mojom b/chromium/media/mojo/interfaces/decryptor.mojom
index 55091e4671c..9c5d6784251 100644
--- a/chromium/media/mojo/interfaces/decryptor.mojom
+++ b/chromium/media/mojo/interfaces/decryptor.mojom
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module mojo;
+module media.interfaces;
import "media/mojo/interfaces/demuxer_stream.mojom";
import "media/mojo/interfaces/media_types.mojom";
@@ -26,8 +26,8 @@ interface Decryptor {
// Decrypts the |encrypted| buffer and returns the decrypt |status| and
// decrypted |buffer|.
// At most one decrypt call is allowed at any time for a |stream_type|.
- Decrypt(DemuxerStream.Type stream_type, MediaDecoderBuffer encrypted)
- => (Status status, MediaDecoderBuffer? buffer);
+ Decrypt(DemuxerStream.Type stream_type, DecoderBuffer encrypted)
+ => (Status status, DecoderBuffer? buffer);
// Cancels any pending decrypt for |stream_type| with SUCCESS.
CancelDecrypt(DemuxerStream.Type stream_type);
@@ -45,10 +45,10 @@ interface Decryptor {
// been successfully initialized.
// At most one decrypt-and-decode call is allowed at any time for a
// |stream_type|.
- DecryptAndDecodeAudio(MediaDecoderBuffer encrypted)
+ DecryptAndDecodeAudio(DecoderBuffer encrypted)
=> (Status status, array<AudioBuffer>? audio_buffers);
DecryptAndDecodeVideo(
- MediaDecoderBuffer encrypted) => (Status status, VideoFrame? video_frame);
+ DecoderBuffer encrypted) => (Status status, VideoFrame? video_frame);
// Resets the decoder for |stream_type| to a clean initialized state and
// cancels any pending decrypt-and-decode operations immediately with ERROR.
diff --git a/chromium/media/mojo/interfaces/demuxer_stream.mojom b/chromium/media/mojo/interfaces/demuxer_stream.mojom
index 24386964d3e..5b880b747c4 100644
--- a/chromium/media/mojo/interfaces/demuxer_stream.mojom
+++ b/chromium/media/mojo/interfaces/demuxer_stream.mojom
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module mojo;
+module media.interfaces;
import "media/mojo/interfaces/media_types.mojom";
@@ -35,7 +35,7 @@ interface DemuxerStream {
AudioDecoderConfig? audio_config,
VideoDecoderConfig? video_config);
- // Requests a MediaDecoderBuffer from this stream for decoding and rendering.
+ // Requests a DecoderBuffer from this stream for decoding and rendering.
// See media::DemuxerStream::ReadCB for a general explanation of the fields.
//
// Notes on the callback:
@@ -50,7 +50,7 @@ interface DemuxerStream {
// into the DataPipe given to Initialize() once DataPipe supports framed data
// in a nicer fashion.
Read() => (Status status,
- MediaDecoderBuffer? buffer,
+ DecoderBuffer? buffer,
AudioDecoderConfig? audio_config,
VideoDecoderConfig? video_config);
};
diff --git a/chromium/media/mojo/interfaces/media_types.mojom b/chromium/media/mojo/interfaces/media_types.mojom
index ff87cec727f..a16203ed551 100644
--- a/chromium/media/mojo/interfaces/media_types.mojom
+++ b/chromium/media/mojo/interfaces/media_types.mojom
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module mojo;
+module media.interfaces;
import "ui/mojo/geometry/geometry.mojom";
@@ -89,19 +89,35 @@ enum SampleFormat {
Max = PlanarS32,
};
-// See media/base/video_frame.h for descriptions.
-// Kept in sync with media::VideoFrame::Format via static_asserts.
+// See media/base/video_types.h for descriptions.
+// Kept in sync with media::VideoPixelFormat via static_asserts.
enum VideoFormat {
UNKNOWN = 0,
- YV12,
I420,
+ YV12,
YV16,
YV12A,
YV24,
NV12,
+ NV21,
+ UYVY,
+ YUY2,
ARGB,
XRGB,
- FORMAT_MAX = XRGB,
+ RGB24,
+ RGB32,
+ MJPEG,
+ MT21,
+ FORMAT_MAX = MT21,
+};
+
+// Kept in sync with media::ColorSpace via static_asserts.
+enum ColorSpace {
+ UNSPECIFIED = 0,
+ JPEG = 1,
+ HD_REC709 = 2,
+ SD_REC601 = 3,
+ MAX = SD_REC601,
};
// See media/base/video_decoder_config.h for descriptions.
@@ -115,7 +131,8 @@ enum VideoCodec {
Theora,
VP8,
VP9,
- Max = VP9,
+ HEVC,
+ Max = HEVC,
};
// See media/base/video_decoder_config.h for descriptions.
@@ -164,9 +181,10 @@ struct VideoDecoderConfig {
VideoCodec codec;
VideoCodecProfile profile;
VideoFormat format;
- Size coded_size;
- Rect visible_rect;
- Size natural_size;
+ ColorSpace color_space;
+ mojo.Size coded_size;
+ mojo.Rect visible_rect;
+ mojo.Size natural_size;
array<uint8>? extra_data;
bool is_encrypted;
};
@@ -187,8 +205,7 @@ struct DecryptConfig {
};
// This defines a mojo transport format for media::DecoderBuffer.
-struct MediaDecoderBuffer {
- // See media/base/buffers.h for details.
+struct DecoderBuffer {
int64 timestamp_usec;
int64 duration_usec;
diff --git a/chromium/media/mojo/interfaces/media_renderer.mojom b/chromium/media/mojo/interfaces/renderer.mojom
index cbf558f176f..422bc72e02d 100644
--- a/chromium/media/mojo/interfaces/media_renderer.mojom
+++ b/chromium/media/mojo/interfaces/renderer.mojom
@@ -2,22 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-module mojo;
+module media.interfaces;
import "media/mojo/interfaces/demuxer_stream.mojom";
import "media/mojo/interfaces/media_types.mojom";
-interface MediaRenderer {
+interface Renderer {
// Initializes the Renderer with one or both of an audio and video stream,
- // calling back upon completion.
- // NOTE: If an error occurs, MediaRendererClient::OnError() will be called
- // before the callback is executed.
- Initialize(MediaRendererClient client,
+ // executing the callback with whether the initialization succeeded.
+ Initialize(RendererClient client,
DemuxerStream? audio,
- DemuxerStream? video) => ();
+ DemuxerStream? video) => (bool success);
// Discards any buffered data, executing callback when completed.
- // NOTE: If an error occurs, MediaRendererClient::OnError() can be called
+ // NOTE: If an error occurs, RendererClient::OnError() can be called
// before the callback is executed.
Flush() => ();
@@ -35,7 +33,7 @@ interface MediaRenderer {
SetCdm(int32 cdm_id) => (bool success);
};
-interface MediaRendererClient {
+interface RendererClient {
// Called to report media time advancement by |time_usec|.
// |time_usec| and |max_time_usec| can be used to interpolate time between
// calls to OnTimeUpdate().
diff --git a/chromium/media/mojo/interfaces/service_factory.mojom b/chromium/media/mojo/interfaces/service_factory.mojom
new file mode 100644
index 00000000000..6308792b1ee
--- /dev/null
+++ b/chromium/media/mojo/interfaces/service_factory.mojom
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module media.interfaces;
+
+import "media/mojo/interfaces/content_decryption_module.mojom";
+import "media/mojo/interfaces/renderer.mojom";
+
+// A factory interface to create media services. Renderers can only access
+// ContentDecryptionModules created in the same factory.
+interface ServiceFactory {
+ // Creates a ContentDecryptionModule service.
+ CreateCdm(ContentDecryptionModule& cdm);
+
+ // Creates a Renderer service.
+ CreateRenderer(Renderer& renderer);
+};
diff --git a/chromium/media/mojo/services/BUILD.gn b/chromium/media/mojo/services/BUILD.gn
index c55ce35de2c..0df256abb61 100644
--- a/chromium/media/mojo/services/BUILD.gn
+++ b/chromium/media/mojo/services/BUILD.gn
@@ -101,7 +101,7 @@ source_set("cdm_service") {
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
}
-# MediaRenderer service; without a specific config.
+# Renderer service; without a specific config.
source_set("renderer_service_generic") {
sources = [
"demuxer_stream_provider_shim.cc",
@@ -124,7 +124,7 @@ source_set("renderer_service_generic") {
]
}
-# MediaRenderer service with a default config.
+# Renderer service with a default config.
source_set("renderer_service") {
sources = [
"default_mojo_media_client.cc",
@@ -144,6 +144,8 @@ source_set("application") {
sources = [
"mojo_media_application.cc",
"mojo_media_application.h",
+ "service_factory_impl.cc",
+ "service_factory_impl.h",
]
public_configs = [ ":enable_mojo_media_config" ]
diff --git a/chromium/media/mojo/services/demuxer_stream_provider_shim.cc b/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
index c3dbc2c7b82..2c16cc52a4b 100644
--- a/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
+++ b/chromium/media/mojo/services/demuxer_stream_provider_shim.cc
@@ -11,8 +11,8 @@
namespace media {
DemuxerStreamProviderShim::DemuxerStreamProviderShim(
- mojo::DemuxerStreamPtr audio,
- mojo::DemuxerStreamPtr video,
+ interfaces::DemuxerStreamPtr audio,
+ interfaces::DemuxerStreamPtr video,
const base::Closure& demuxer_ready_cb)
: demuxer_ready_cb_(demuxer_ready_cb),
streams_ready_(0),
diff --git a/chromium/media/mojo/services/demuxer_stream_provider_shim.h b/chromium/media/mojo/services/demuxer_stream_provider_shim.h
index ec60fe4c88a..9fdab66f900 100644
--- a/chromium/media/mojo/services/demuxer_stream_provider_shim.h
+++ b/chromium/media/mojo/services/demuxer_stream_provider_shim.h
@@ -13,14 +13,14 @@
namespace media {
-// DemuxerStreamProvider shim for mojo::DemuxerStreams.
+// DemuxerStreamProvider shim for interfaces::DemuxerStreams.
class DemuxerStreamProviderShim : public DemuxerStreamProvider {
public:
// Constructs the shim; at least a single audio or video stream must be
// provided. |demuxer_ready_cb| will be called once the streams have been
// initialized. Calling any method before then is an error.
- DemuxerStreamProviderShim(mojo::DemuxerStreamPtr audio,
- mojo::DemuxerStreamPtr video,
+ DemuxerStreamProviderShim(interfaces::DemuxerStreamPtr audio,
+ interfaces::DemuxerStreamPtr video,
const base::Closure& demuxer_ready_cb);
~DemuxerStreamProviderShim() override;
@@ -28,8 +28,9 @@ class DemuxerStreamProviderShim : public DemuxerStreamProvider {
DemuxerStream* GetStream(DemuxerStream::Type type) override;
private:
- // Called as each mojo::DemuxerStream becomes ready. Once all streams are
- // ready it will fire the |demuxer_ready_cb_| provided during construction.
+ // Called as each interfaces::DemuxerStream becomes ready. Once all streams
+ // are ready it will fire the |demuxer_ready_cb_| provided during
+ // construction.
void OnStreamReady();
// Stored copy the ready callback provided during construction; cleared once
diff --git a/chromium/media/mojo/services/media_apptest.cc b/chromium/media/mojo/services/media_apptest.cc
index 3741f373799..958c66299ac 100644
--- a/chromium/media/mojo/services/media_apptest.cc
+++ b/chromium/media/mojo/services/media_apptest.cc
@@ -12,7 +12,8 @@
#include "media/base/test_helpers.h"
#include "media/cdm/key_system_names.h"
#include "media/mojo/interfaces/content_decryption_module.mojom.h"
-#include "media/mojo/interfaces/media_renderer.mojom.h"
+#include "media/mojo/interfaces/renderer.mojom.h"
+#include "media/mojo/interfaces/service_factory.mojom.h"
#include "media/mojo/services/media_type_converters.h"
#include "media/mojo/services/mojo_demuxer_stream_impl.h"
#include "mojo/application/public/cpp/application_connection.h"
@@ -31,25 +32,25 @@ namespace {
const char kInvalidKeySystem[] = "invalid.key.system";
const char kSecurityOrigin[] = "http://foo.com";
-class MockMediaRendererClient : public mojo::MediaRendererClient {
+class MockRendererClient : public interfaces::RendererClient {
public:
- MockMediaRendererClient(){};
- ~MockMediaRendererClient() override{};
+ MockRendererClient(){};
+ ~MockRendererClient() override{};
- // mojo::MediaRendererClient implementation.
+ // interfaces::RendererClient implementation.
MOCK_METHOD2(OnTimeUpdate, void(int64_t time_usec, int64_t max_time_usec));
- MOCK_METHOD1(OnBufferingStateChange, void(mojo::BufferingState state));
+ MOCK_METHOD1(OnBufferingStateChange, void(interfaces::BufferingState state));
MOCK_METHOD0(OnEnded, void());
MOCK_METHOD0(OnError, void());
private:
- DISALLOW_COPY_AND_ASSIGN(MockMediaRendererClient);
+ DISALLOW_COPY_AND_ASSIGN(MockRendererClient);
};
class MediaAppTest : public mojo::test::ApplicationTestBase {
public:
MediaAppTest()
- : media_renderer_client_binding_(&media_renderer_client_),
+ : renderer_client_binding_(&renderer_client_),
video_demuxer_stream_(DemuxerStream::VIDEO) {}
~MediaAppTest() override {}
@@ -58,11 +59,13 @@ class MediaAppTest : public mojo::test::ApplicationTestBase {
mojo::URLRequestPtr request = mojo::URLRequest::New();
request->url = "mojo:media";
- mojo::ApplicationConnection* connection =
- application_impl()->ConnectToApplication(request.Pass());
+ connection_ = application_impl()->ConnectToApplication(request.Pass());
+ connection_->SetRemoteServiceProviderConnectionErrorHandler(
+ base::Bind(&MediaAppTest::ConnectionClosed, base::Unretained(this)));
- connection->ConnectToService(&cdm_);
- connection->ConnectToService(&media_renderer_);
+ connection_->ConnectToService(&service_factory_);
+ service_factory_->CreateCdm(mojo::GetProxy(&cdm_));
+ service_factory_->CreateRenderer(mojo::GetProxy(&renderer_));
run_loop_.reset(new base::RunLoop());
}
@@ -70,7 +73,7 @@ class MediaAppTest : public mojo::test::ApplicationTestBase {
// MOCK_METHOD* doesn't support move only types. Work around this by having
// an extra method.
MOCK_METHOD1(OnCdmInitializedInternal, void(bool result));
- void OnCdmInitialized(mojo::CdmPromiseResultPtr result) {
+ void OnCdmInitialized(interfaces::CdmPromiseResultPtr result) {
OnCdmInitializedInternal(result->success);
}
@@ -79,41 +82,47 @@ class MediaAppTest : public mojo::test::ApplicationTestBase {
.Times(Exactly(1))
.WillOnce(InvokeWithoutArgs(run_loop_.get(), &base::RunLoop::Quit));
cdm_->Initialize(
- key_system, kSecurityOrigin, mojo::CdmConfig::From(CdmConfig()), 1,
- base::Bind(&MediaAppTest::OnCdmInitialized, base::Unretained(this)));
+ key_system, kSecurityOrigin, interfaces::CdmConfig::From(CdmConfig()),
+ 1, base::Bind(&MediaAppTest::OnCdmInitialized, base::Unretained(this)));
}
- MOCK_METHOD0(OnRendererInitialized, void());
+ MOCK_METHOD1(OnRendererInitialized, void(bool));
- void InitializeRenderer(const VideoDecoderConfig& video_config) {
+ void InitializeRenderer(const VideoDecoderConfig& video_config,
+ bool expected_result) {
video_demuxer_stream_.set_video_decoder_config(video_config);
- mojo::DemuxerStreamPtr video_stream;
+ interfaces::DemuxerStreamPtr video_stream;
new MojoDemuxerStreamImpl(&video_demuxer_stream_, GetProxy(&video_stream));
- mojo::MediaRendererClientPtr client_ptr;
- media_renderer_client_binding_.Bind(GetProxy(&client_ptr));
+ interfaces::RendererClientPtr client_ptr;
+ renderer_client_binding_.Bind(GetProxy(&client_ptr));
- EXPECT_CALL(*this, OnRendererInitialized())
+ EXPECT_CALL(*this, OnRendererInitialized(expected_result))
.Times(Exactly(1))
- .WillOnce(Invoke(run_loop_.get(), &base::RunLoop::Quit));
- media_renderer_->Initialize(client_ptr.Pass(), nullptr, video_stream.Pass(),
- base::Bind(&MediaAppTest::OnRendererInitialized,
- base::Unretained(this)));
+ .WillOnce(InvokeWithoutArgs(run_loop_.get(), &base::RunLoop::Quit));
+ renderer_->Initialize(client_ptr.Pass(), nullptr, video_stream.Pass(),
+ base::Bind(&MediaAppTest::OnRendererInitialized,
+ base::Unretained(this)));
}
+ MOCK_METHOD0(ConnectionClosed, void());
+
protected:
scoped_ptr<base::RunLoop> run_loop_;
- mojo::ContentDecryptionModulePtr cdm_;
- mojo::MediaRendererPtr media_renderer_;
+ interfaces::ServiceFactoryPtr service_factory_;
+ interfaces::ContentDecryptionModulePtr cdm_;
+ interfaces::RendererPtr renderer_;
- StrictMock<MockMediaRendererClient> media_renderer_client_;
- mojo::Binding<mojo::MediaRendererClient> media_renderer_client_binding_;
+ StrictMock<MockRendererClient> renderer_client_;
+ mojo::Binding<interfaces::RendererClient> renderer_client_binding_;
StrictMock<MockDemuxerStream> video_demuxer_stream_;
private:
+ scoped_ptr<mojo::ApplicationConnection> connection_;
+
DISALLOW_COPY_AND_ASSIGN(MediaAppTest);
};
@@ -133,13 +142,27 @@ TEST_F(MediaAppTest, InitializeCdm_InvalidKeySystem) {
}
TEST_F(MediaAppTest, InitializeRenderer_Success) {
- InitializeRenderer(TestVideoConfig::Normal());
+ InitializeRenderer(TestVideoConfig::Normal(), true);
run_loop_->Run();
}
TEST_F(MediaAppTest, InitializeRenderer_InvalidConfig) {
- EXPECT_CALL(media_renderer_client_, OnError());
- InitializeRenderer(TestVideoConfig::Invalid());
+ InitializeRenderer(TestVideoConfig::Invalid(), false);
+ run_loop_->Run();
+}
+
+TEST_F(MediaAppTest, Lifetime) {
+ // Disconnecting CDM and Renderer services doesn't terminate the app.
+ cdm_.reset();
+ renderer_.reset();
+
+ // Disconnecting ServiceFactory service should terminate the app, which will
+ // close the connection.
+ EXPECT_CALL(*this, ConnectionClosed())
+ .Times(Exactly(1))
+ .WillOnce(Invoke(run_loop_.get(), &base::RunLoop::Quit));
+ service_factory_.reset();
+
run_loop_->Run();
}
diff --git a/chromium/media/mojo/services/media_type_converters.cc b/chromium/media/mojo/services/media_type_converters.cc
index 1d89a6f371c..6dd84fb3bca 100644
--- a/chromium/media/mojo/services/media_type_converters.cc
+++ b/chromium/media/mojo/services/media_type_converters.cc
@@ -18,16 +18,18 @@
namespace mojo {
-#define ASSERT_ENUM_EQ(media_enum, media_prefix, mojo_prefix, value) \
- static_assert(media::media_prefix##value == \
- static_cast<media::media_enum>(mojo_prefix##value), \
- "Mismatched enum: " #media_prefix #value \
+#define ASSERT_ENUM_EQ(media_enum, media_prefix, mojo_prefix, value) \
+ static_assert(media::media_prefix##value == \
+ static_cast<media::media_enum>( \
+ media::interfaces::mojo_prefix##value), \
+ "Mismatched enum: " #media_prefix #value \
" != " #mojo_prefix #value)
-#define ASSERT_ENUM_EQ_RAW(media_enum, media_enum_value, mojo_enum_value) \
- static_assert(media::media_enum_value == \
- static_cast<media::media_enum>(mojo_enum_value), \
- "Mismatched enum: " #media_enum_value " != " #mojo_enum_value)
+#define ASSERT_ENUM_EQ_RAW(media_enum, media_enum_value, mojo_enum_value) \
+ static_assert( \
+ media::media_enum_value == \
+ static_cast<media::media_enum>(media::interfaces::mojo_enum_value), \
+ "Mismatched enum: " #media_enum_value " != " #mojo_enum_value)
// BufferingState.
ASSERT_ENUM_EQ(BufferingState, BUFFERING_, BUFFERING_STATE_, HAVE_NOTHING);
@@ -127,26 +129,39 @@ ASSERT_ENUM_EQ_RAW(DemuxerStream::Status,
DemuxerStream::STATUS_CONFIG_CHANGED);
// VideoFormat.
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
- VideoFrame::UNKNOWN,
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat,
+ PIXEL_FORMAT_UNKNOWN,
VIDEO_FORMAT_UNKNOWN);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV12, VIDEO_FORMAT_YV12);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::I420, VIDEO_FORMAT_I420);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV16, VIDEO_FORMAT_YV16);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV12A, VIDEO_FORMAT_YV12A);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::YV24, VIDEO_FORMAT_YV24);
-#if defined(OS_MACOSX)
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::NV12, VIDEO_FORMAT_NV12);
-#endif
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::ARGB, VIDEO_FORMAT_ARGB);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format, VideoFrame::XRGB, VIDEO_FORMAT_XRGB);
-ASSERT_ENUM_EQ_RAW(VideoFrame::Format,
- VideoFrame::FORMAT_MAX,
- VIDEO_FORMAT_FORMAT_MAX);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_I420, VIDEO_FORMAT_I420);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_YV12, VIDEO_FORMAT_YV12);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_YV16, VIDEO_FORMAT_YV16);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_YV12A, VIDEO_FORMAT_YV12A);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_YV24, VIDEO_FORMAT_YV24);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_NV12, VIDEO_FORMAT_NV12);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_NV21, VIDEO_FORMAT_NV21);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_UYVY, VIDEO_FORMAT_UYVY);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_YUY2, VIDEO_FORMAT_YUY2);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_ARGB, VIDEO_FORMAT_ARGB);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_XRGB, VIDEO_FORMAT_XRGB);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_RGB24, VIDEO_FORMAT_RGB24);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_RGB32, VIDEO_FORMAT_RGB32);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_MJPEG, VIDEO_FORMAT_MJPEG);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_MT21, VIDEO_FORMAT_MT21);
+ASSERT_ENUM_EQ_RAW(VideoPixelFormat, PIXEL_FORMAT_MAX, VIDEO_FORMAT_FORMAT_MAX);
+
+// ColorSpace.
+ASSERT_ENUM_EQ_RAW(ColorSpace,
+ COLOR_SPACE_UNSPECIFIED,
+ COLOR_SPACE_UNSPECIFIED);
+ASSERT_ENUM_EQ_RAW(ColorSpace, COLOR_SPACE_JPEG, COLOR_SPACE_JPEG);
+ASSERT_ENUM_EQ_RAW(ColorSpace, COLOR_SPACE_HD_REC709, COLOR_SPACE_HD_REC709);
+ASSERT_ENUM_EQ_RAW(ColorSpace, COLOR_SPACE_SD_REC601, COLOR_SPACE_SD_REC601);
+ASSERT_ENUM_EQ_RAW(ColorSpace, COLOR_SPACE_MAX, COLOR_SPACE_MAX);
// VideoCodec
ASSERT_ENUM_EQ_RAW(VideoCodec, kUnknownVideoCodec, VIDEO_CODEC_UNKNOWN);
ASSERT_ENUM_EQ(VideoCodec, kCodec, VIDEO_CODEC_, H264);
+ASSERT_ENUM_EQ(VideoCodec, kCodec, VIDEO_CODEC_, HEVC);
ASSERT_ENUM_EQ(VideoCodec, kCodec, VIDEO_CODEC_, VC1);
ASSERT_ENUM_EQ(VideoCodec, kCodec, VIDEO_CODEC_, MPEG2);
ASSERT_ENUM_EQ(VideoCodec, kCodec, VIDEO_CODEC_, MPEG4);
@@ -210,11 +225,11 @@ ASSERT_ENUM_EQ(VideoCodecProfile,
VIDEO_CODEC_PROFILE_MAX);
// CdmException
-#define ASSERT_CDM_EXCEPTION(value) \
- static_assert( \
- media::MediaKeys::value == \
- static_cast<media::MediaKeys::Exception>(CDM_EXCEPTION_##value), \
- "Mismatched CDM Exception")
+#define ASSERT_CDM_EXCEPTION(value) \
+ static_assert(media::MediaKeys::value == \
+ static_cast<media::MediaKeys::Exception>( \
+ media::interfaces::CDM_EXCEPTION_##value), \
+ "Mismatched CDM Exception")
ASSERT_CDM_EXCEPTION(NOT_SUPPORTED_ERROR);
ASSERT_CDM_EXCEPTION(INVALID_STATE_ERROR);
ASSERT_CDM_EXCEPTION(INVALID_ACCESS_ERROR);
@@ -224,20 +239,22 @@ ASSERT_CDM_EXCEPTION(CLIENT_ERROR);
ASSERT_CDM_EXCEPTION(OUTPUT_ERROR);
// CDM Session Type
-#define ASSERT_CDM_SESSION_TYPE(value) \
- static_assert(media::MediaKeys::value == \
- static_cast<media::MediaKeys::SessionType>( \
- ContentDecryptionModule::SESSION_TYPE_##value), \
+#define ASSERT_CDM_SESSION_TYPE(value) \
+ static_assert(media::MediaKeys::value == \
+ static_cast<media::MediaKeys::SessionType>( \
+ media::interfaces::ContentDecryptionModule:: \
+ SESSION_TYPE_##value), \
"Mismatched CDM Session Type")
ASSERT_CDM_SESSION_TYPE(TEMPORARY_SESSION);
ASSERT_CDM_SESSION_TYPE(PERSISTENT_LICENSE_SESSION);
ASSERT_CDM_SESSION_TYPE(PERSISTENT_RELEASE_MESSAGE_SESSION);
// CDM InitDataType
-#define ASSERT_CDM_INIT_DATA_TYPE(value) \
- static_assert(media::EmeInitDataType::value == \
- static_cast<media::EmeInitDataType>( \
- ContentDecryptionModule::INIT_DATA_TYPE_##value), \
+#define ASSERT_CDM_INIT_DATA_TYPE(value) \
+ static_assert(media::EmeInitDataType::value == \
+ static_cast<media::EmeInitDataType>( \
+ media::interfaces::ContentDecryptionModule:: \
+ INIT_DATA_TYPE_##value), \
"Mismatched CDM Init Data Type")
ASSERT_CDM_INIT_DATA_TYPE(UNKNOWN);
ASSERT_CDM_INIT_DATA_TYPE(WEBM);
@@ -248,28 +265,31 @@ ASSERT_CDM_INIT_DATA_TYPE(KEYIDS);
#define ASSERT_CDM_KEY_STATUS(value) \
static_assert(media::CdmKeyInformation::value == \
static_cast<media::CdmKeyInformation::KeyStatus>( \
- CDM_KEY_STATUS_##value), \
+ media::interfaces::CDM_KEY_STATUS_##value), \
"Mismatched CDM Key Status")
ASSERT_CDM_KEY_STATUS(USABLE);
ASSERT_CDM_KEY_STATUS(INTERNAL_ERROR);
ASSERT_CDM_KEY_STATUS(EXPIRED);
-ASSERT_CDM_KEY_STATUS(OUTPUT_NOT_ALLOWED);
+ASSERT_CDM_KEY_STATUS(OUTPUT_RESTRICTED);
+ASSERT_CDM_KEY_STATUS(OUTPUT_DOWNSCALED);
+ASSERT_CDM_KEY_STATUS(KEY_STATUS_PENDING);
// CDM Message Type
-#define ASSERT_CDM_MESSAGE_TYPE(value) \
- static_assert( \
- media::MediaKeys::value == static_cast<media::MediaKeys::MessageType>( \
- CDM_MESSAGE_TYPE_##value), \
- "Mismatched CDM Message Type")
+#define ASSERT_CDM_MESSAGE_TYPE(value) \
+ static_assert(media::MediaKeys::value == \
+ static_cast<media::MediaKeys::MessageType>( \
+ media::interfaces::CDM_MESSAGE_TYPE_##value), \
+ "Mismatched CDM Message Type")
ASSERT_CDM_MESSAGE_TYPE(LICENSE_REQUEST);
ASSERT_CDM_MESSAGE_TYPE(LICENSE_RENEWAL);
ASSERT_CDM_MESSAGE_TYPE(LICENSE_RELEASE);
// static
-SubsampleEntryPtr
-TypeConverter<SubsampleEntryPtr, media::SubsampleEntry>::Convert(
- const media::SubsampleEntry& input) {
- SubsampleEntryPtr mojo_subsample_entry(SubsampleEntry::New());
+media::interfaces::SubsampleEntryPtr TypeConverter<
+ media::interfaces::SubsampleEntryPtr,
+ media::SubsampleEntry>::Convert(const media::SubsampleEntry& input) {
+ media::interfaces::SubsampleEntryPtr mojo_subsample_entry(
+ media::interfaces::SubsampleEntry::New());
mojo_subsample_entry->clear_bytes = input.clear_bytes;
mojo_subsample_entry->cypher_bytes = input.cypher_bytes;
return mojo_subsample_entry.Pass();
@@ -277,38 +297,43 @@ TypeConverter<SubsampleEntryPtr, media::SubsampleEntry>::Convert(
// static
media::SubsampleEntry
-TypeConverter<media::SubsampleEntry, SubsampleEntryPtr>::Convert(
- const SubsampleEntryPtr& input) {
+TypeConverter<media::SubsampleEntry, media::interfaces::SubsampleEntryPtr>::
+ Convert(const media::interfaces::SubsampleEntryPtr& input) {
return media::SubsampleEntry(input->clear_bytes, input->cypher_bytes);
}
// static
-DecryptConfigPtr TypeConverter<DecryptConfigPtr, media::DecryptConfig>::Convert(
- const media::DecryptConfig& input) {
- DecryptConfigPtr mojo_decrypt_config(DecryptConfig::New());
+media::interfaces::DecryptConfigPtr TypeConverter<
+ media::interfaces::DecryptConfigPtr,
+ media::DecryptConfig>::Convert(const media::DecryptConfig& input) {
+ media::interfaces::DecryptConfigPtr mojo_decrypt_config(
+ media::interfaces::DecryptConfig::New());
mojo_decrypt_config->key_id = input.key_id();
mojo_decrypt_config->iv = input.iv();
mojo_decrypt_config->subsamples =
- Array<SubsampleEntryPtr>::From(input.subsamples());
+ Array<media::interfaces::SubsampleEntryPtr>::From(input.subsamples());
return mojo_decrypt_config.Pass();
}
// static
scoped_ptr<media::DecryptConfig>
-TypeConverter<scoped_ptr<media::DecryptConfig>, DecryptConfigPtr>::Convert(
- const DecryptConfigPtr& input) {
+TypeConverter<scoped_ptr<media::DecryptConfig>,
+ media::interfaces::DecryptConfigPtr>::
+ Convert(const media::interfaces::DecryptConfigPtr& input) {
return make_scoped_ptr(new media::DecryptConfig(
input->key_id, input->iv,
input->subsamples.To<std::vector<media::SubsampleEntry>>()));
}
// static
-MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
- scoped_refptr<media::DecoderBuffer> >::Convert(
- const scoped_refptr<media::DecoderBuffer>& input) {
+media::interfaces::DecoderBufferPtr
+TypeConverter<media::interfaces::DecoderBufferPtr,
+ scoped_refptr<media::DecoderBuffer>>::
+ Convert(const scoped_refptr<media::DecoderBuffer>& input) {
DCHECK(input);
- MediaDecoderBufferPtr mojo_buffer(MediaDecoderBuffer::New());
+ media::interfaces::DecoderBufferPtr mojo_buffer(
+ media::interfaces::DecoderBuffer::New());
if (input->end_of_stream())
return mojo_buffer.Pass();
@@ -329,8 +354,10 @@ MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
input->side_data() + input->side_data_size());
mojo_buffer->side_data.Swap(&side_data);
- if (input->decrypt_config())
- mojo_buffer->decrypt_config = DecryptConfig::From(*input->decrypt_config());
+ if (input->decrypt_config()) {
+ mojo_buffer->decrypt_config =
+ media::interfaces::DecryptConfig::From(*input->decrypt_config());
+ }
// TODO(dalecurtis): We intentionally do not serialize the data section of
// the DecoderBuffer here; this must instead be done by clients via their
@@ -340,9 +367,10 @@ MediaDecoderBufferPtr TypeConverter<MediaDecoderBufferPtr,
}
// static
-scoped_refptr<media::DecoderBuffer> TypeConverter<
- scoped_refptr<media::DecoderBuffer>, MediaDecoderBufferPtr>::Convert(
- const MediaDecoderBufferPtr& input) {
+scoped_refptr<media::DecoderBuffer>
+TypeConverter<scoped_refptr<media::DecoderBuffer>,
+ media::interfaces::DecoderBufferPtr>::
+ Convert(const media::interfaces::DecoderBufferPtr& input) {
if (!input->data_size)
return media::DecoderBuffer::CreateEOSBuffer();
@@ -379,15 +407,17 @@ scoped_refptr<media::DecoderBuffer> TypeConverter<
}
// static
-AudioDecoderConfigPtr
-TypeConverter<AudioDecoderConfigPtr, media::AudioDecoderConfig>::Convert(
- const media::AudioDecoderConfig& input) {
- AudioDecoderConfigPtr config(AudioDecoderConfig::New());
- config->codec = static_cast<AudioCodec>(input.codec());
+media::interfaces::AudioDecoderConfigPtr TypeConverter<
+ media::interfaces::AudioDecoderConfigPtr,
+ media::AudioDecoderConfig>::Convert(const media::AudioDecoderConfig&
+ input) {
+ media::interfaces::AudioDecoderConfigPtr config(
+ media::interfaces::AudioDecoderConfig::New());
+ config->codec = static_cast<media::interfaces::AudioCodec>(input.codec());
config->sample_format =
- static_cast<SampleFormat>(input.sample_format());
+ static_cast<media::interfaces::SampleFormat>(input.sample_format());
config->channel_layout =
- static_cast<ChannelLayout>(input.channel_layout());
+ static_cast<media::interfaces::ChannelLayout>(input.channel_layout());
config->samples_per_second = input.samples_per_second();
if (input.extra_data()) {
std::vector<uint8_t> data(input.extra_data(),
@@ -402,8 +432,9 @@ TypeConverter<AudioDecoderConfigPtr, media::AudioDecoderConfig>::Convert(
// static
media::AudioDecoderConfig
-TypeConverter<media::AudioDecoderConfig, AudioDecoderConfigPtr>::Convert(
- const AudioDecoderConfigPtr& input) {
+TypeConverter<media::AudioDecoderConfig,
+ media::interfaces::AudioDecoderConfigPtr>::
+ Convert(const media::interfaces::AudioDecoderConfigPtr& input) {
media::AudioDecoderConfig config;
config.Initialize(
static_cast<media::AudioCodec>(input->codec),
@@ -413,20 +444,24 @@ TypeConverter<media::AudioDecoderConfig, AudioDecoderConfigPtr>::Convert(
input->extra_data.size() ? &input->extra_data.front() : NULL,
input->extra_data.size(),
input->is_encrypted,
- false,
base::TimeDelta::FromMicroseconds(input->seek_preroll_usec),
input->codec_delay);
return config;
}
// static
-VideoDecoderConfigPtr
-TypeConverter<VideoDecoderConfigPtr, media::VideoDecoderConfig>::Convert(
- const media::VideoDecoderConfig& input) {
- VideoDecoderConfigPtr config(VideoDecoderConfig::New());
- config->codec = static_cast<VideoCodec>(input.codec());
- config->profile = static_cast<VideoCodecProfile>(input.profile());
- config->format = static_cast<VideoFormat>(input.format());
+media::interfaces::VideoDecoderConfigPtr TypeConverter<
+ media::interfaces::VideoDecoderConfigPtr,
+ media::VideoDecoderConfig>::Convert(const media::VideoDecoderConfig&
+ input) {
+ media::interfaces::VideoDecoderConfigPtr config(
+ media::interfaces::VideoDecoderConfig::New());
+ config->codec = static_cast<media::interfaces::VideoCodec>(input.codec());
+ config->profile =
+ static_cast<media::interfaces::VideoCodecProfile>(input.profile());
+ config->format = static_cast<media::interfaces::VideoFormat>(input.format());
+ config->color_space =
+ static_cast<media::interfaces::ColorSpace>(input.color_space());
config->coded_size = Size::From(input.coded_size());
config->visible_rect = Rect::From(input.visible_rect());
config->natural_size = Size::From(input.natural_size());
@@ -441,40 +476,40 @@ TypeConverter<VideoDecoderConfigPtr, media::VideoDecoderConfig>::Convert(
// static
media::VideoDecoderConfig
-TypeConverter<media::VideoDecoderConfig, VideoDecoderConfigPtr>::Convert(
- const VideoDecoderConfigPtr& input) {
+TypeConverter<media::VideoDecoderConfig,
+ media::interfaces::VideoDecoderConfigPtr>::
+ Convert(const media::interfaces::VideoDecoderConfigPtr& input) {
media::VideoDecoderConfig config;
config.Initialize(
static_cast<media::VideoCodec>(input->codec),
static_cast<media::VideoCodecProfile>(input->profile),
- static_cast<media::VideoFrame::Format>(input->format),
- media::VideoFrame::COLOR_SPACE_UNSPECIFIED,
- input->coded_size.To<gfx::Size>(),
- input->visible_rect.To<gfx::Rect>(),
+ static_cast<media::VideoPixelFormat>(input->format),
+ static_cast<media::ColorSpace>(input->color_space),
+ input->coded_size.To<gfx::Size>(), input->visible_rect.To<gfx::Rect>(),
input->natural_size.To<gfx::Size>(),
input->extra_data.size() ? &input->extra_data.front() : NULL,
- input->extra_data.size(),
- input->is_encrypted,
- false);
+ input->extra_data.size(), input->is_encrypted);
return config;
}
// static
-CdmKeyInformationPtr
-TypeConverter<CdmKeyInformationPtr, media::CdmKeyInformation>::Convert(
- const media::CdmKeyInformation& input) {
- CdmKeyInformationPtr info(CdmKeyInformation::New());
+media::interfaces::CdmKeyInformationPtr TypeConverter<
+ media::interfaces::CdmKeyInformationPtr,
+ media::CdmKeyInformation>::Convert(const media::CdmKeyInformation& input) {
+ media::interfaces::CdmKeyInformationPtr info(
+ media::interfaces::CdmKeyInformation::New());
std::vector<uint8_t> key_id_copy(input.key_id);
info->key_id.Swap(&key_id_copy);
- info->status = static_cast<CdmKeyStatus>(input.status);
+ info->status = static_cast<media::interfaces::CdmKeyStatus>(input.status);
info->system_code = input.system_code;
return info.Pass();
}
// static
-scoped_ptr<media::CdmKeyInformation> TypeConverter<
- scoped_ptr<media::CdmKeyInformation>,
- CdmKeyInformationPtr>::Convert(const CdmKeyInformationPtr& input) {
+scoped_ptr<media::CdmKeyInformation>
+TypeConverter<scoped_ptr<media::CdmKeyInformation>,
+ media::interfaces::CdmKeyInformationPtr>::
+ Convert(const media::interfaces::CdmKeyInformationPtr& input) {
scoped_ptr<media::CdmKeyInformation> info(new media::CdmKeyInformation());
info->key_id = input->key_id.storage();
info->status =
@@ -484,9 +519,10 @@ scoped_ptr<media::CdmKeyInformation> TypeConverter<
}
// static
-CdmConfigPtr TypeConverter<CdmConfigPtr, media::CdmConfig>::Convert(
+media::interfaces::CdmConfigPtr
+TypeConverter<media::interfaces::CdmConfigPtr, media::CdmConfig>::Convert(
const media::CdmConfig& input) {
- CdmConfigPtr config(CdmConfig::New());
+ media::interfaces::CdmConfigPtr config(media::interfaces::CdmConfig::New());
config->allow_distinctive_identifier = input.allow_distinctive_identifier;
config->allow_persistent_state = input.allow_persistent_state;
config->use_hw_secure_codecs = input.use_hw_secure_codecs;
@@ -494,8 +530,9 @@ CdmConfigPtr TypeConverter<CdmConfigPtr, media::CdmConfig>::Convert(
}
// static
-media::CdmConfig TypeConverter<media::CdmConfig, CdmConfigPtr>::Convert(
- const CdmConfigPtr& input) {
+media::CdmConfig
+TypeConverter<media::CdmConfig, media::interfaces::CdmConfigPtr>::Convert(
+ const media::interfaces::CdmConfigPtr& input) {
media::CdmConfig config;
config.allow_distinctive_identifier = input->allow_distinctive_identifier;
config.allow_persistent_state = input->allow_persistent_state;
diff --git a/chromium/media/mojo/services/media_type_converters.h b/chromium/media/mojo/services/media_type_converters.h
index 7b92854758c..0133a998631 100644
--- a/chromium/media/mojo/services/media_type_converters.h
+++ b/chromium/media/mojo/services/media_type_converters.h
@@ -9,6 +9,7 @@
#include "base/memory/scoped_ptr.h"
#include "media/mojo/interfaces/content_decryption_module.mojom.h"
#include "media/mojo/interfaces/media_types.mojom.h"
+#include "mojo/public/cpp/bindings/type_converter.h"
namespace media {
class AudioDecoderConfig;
@@ -20,76 +21,95 @@ struct CdmKeyInformation;
struct SubsampleEntry;
}
+// These are specializations of mojo::TypeConverter and have to be in the mojo
+// namespace.
namespace mojo {
template <>
-struct TypeConverter<SubsampleEntryPtr, media::SubsampleEntry> {
- static SubsampleEntryPtr Convert(const media::SubsampleEntry& input);
+struct TypeConverter<media::interfaces::SubsampleEntryPtr,
+ media::SubsampleEntry> {
+ static media::interfaces::SubsampleEntryPtr Convert(
+ const media::SubsampleEntry& input);
};
template <>
-struct TypeConverter<media::SubsampleEntry, SubsampleEntryPtr> {
- static media::SubsampleEntry Convert(const SubsampleEntryPtr& input);
+struct TypeConverter<media::SubsampleEntry,
+ media::interfaces::SubsampleEntryPtr> {
+ static media::SubsampleEntry Convert(
+ const media::interfaces::SubsampleEntryPtr& input);
};
template <>
-struct TypeConverter<DecryptConfigPtr, media::DecryptConfig> {
- static DecryptConfigPtr Convert(const media::DecryptConfig& input);
+struct TypeConverter<media::interfaces::DecryptConfigPtr,
+ media::DecryptConfig> {
+ static media::interfaces::DecryptConfigPtr Convert(
+ const media::DecryptConfig& input);
};
template <>
-struct TypeConverter<scoped_ptr<media::DecryptConfig>, DecryptConfigPtr> {
+struct TypeConverter<scoped_ptr<media::DecryptConfig>,
+ media::interfaces::DecryptConfigPtr> {
static scoped_ptr<media::DecryptConfig> Convert(
- const DecryptConfigPtr& input);
+ const media::interfaces::DecryptConfigPtr& input);
};
template <>
-struct TypeConverter<MediaDecoderBufferPtr,
+struct TypeConverter<media::interfaces::DecoderBufferPtr,
scoped_refptr<media::DecoderBuffer>> {
- static MediaDecoderBufferPtr Convert(
+ static media::interfaces::DecoderBufferPtr Convert(
const scoped_refptr<media::DecoderBuffer>& input);
};
-template<>
+template <>
struct TypeConverter<scoped_refptr<media::DecoderBuffer>,
- MediaDecoderBufferPtr> {
+ media::interfaces::DecoderBufferPtr> {
static scoped_refptr<media::DecoderBuffer> Convert(
- const MediaDecoderBufferPtr& input);
+ const media::interfaces::DecoderBufferPtr& input);
};
template <>
-struct TypeConverter<AudioDecoderConfigPtr, media::AudioDecoderConfig> {
- static AudioDecoderConfigPtr Convert(const media::AudioDecoderConfig& input);
+struct TypeConverter<media::interfaces::AudioDecoderConfigPtr,
+ media::AudioDecoderConfig> {
+ static media::interfaces::AudioDecoderConfigPtr Convert(
+ const media::AudioDecoderConfig& input);
};
template <>
-struct TypeConverter<media::AudioDecoderConfig, AudioDecoderConfigPtr> {
- static media::AudioDecoderConfig Convert(const AudioDecoderConfigPtr& input);
+struct TypeConverter<media::AudioDecoderConfig,
+ media::interfaces::AudioDecoderConfigPtr> {
+ static media::AudioDecoderConfig Convert(
+ const media::interfaces::AudioDecoderConfigPtr& input);
};
template <>
-struct TypeConverter<VideoDecoderConfigPtr, media::VideoDecoderConfig> {
- static VideoDecoderConfigPtr Convert(const media::VideoDecoderConfig& input);
+struct TypeConverter<media::interfaces::VideoDecoderConfigPtr,
+ media::VideoDecoderConfig> {
+ static media::interfaces::VideoDecoderConfigPtr Convert(
+ const media::VideoDecoderConfig& input);
};
template <>
-struct TypeConverter<media::VideoDecoderConfig, VideoDecoderConfigPtr> {
- static media::VideoDecoderConfig Convert(const VideoDecoderConfigPtr& input);
+struct TypeConverter<media::VideoDecoderConfig,
+ media::interfaces::VideoDecoderConfigPtr> {
+ static media::VideoDecoderConfig Convert(
+ const media::interfaces::VideoDecoderConfigPtr& input);
};
template <>
-struct TypeConverter<CdmKeyInformationPtr, media::CdmKeyInformation> {
- static CdmKeyInformationPtr Convert(const media::CdmKeyInformation& input);
+struct TypeConverter<media::interfaces::CdmKeyInformationPtr,
+ media::CdmKeyInformation> {
+ static media::interfaces::CdmKeyInformationPtr Convert(
+ const media::CdmKeyInformation& input);
};
template <>
struct TypeConverter<scoped_ptr<media::CdmKeyInformation>,
- CdmKeyInformationPtr> {
+ media::interfaces::CdmKeyInformationPtr> {
static scoped_ptr<media::CdmKeyInformation> Convert(
- const CdmKeyInformationPtr& input);
+ const media::interfaces::CdmKeyInformationPtr& input);
};
template <>
-struct TypeConverter<CdmConfigPtr, media::CdmConfig> {
- static CdmConfigPtr Convert(const media::CdmConfig& input);
+struct TypeConverter<media::interfaces::CdmConfigPtr, media::CdmConfig> {
+ static media::interfaces::CdmConfigPtr Convert(const media::CdmConfig& input);
};
template <>
-struct TypeConverter<media::CdmConfig, CdmConfigPtr> {
- static media::CdmConfig Convert(const CdmConfigPtr& input);
+struct TypeConverter<media::CdmConfig, media::interfaces::CdmConfigPtr> {
+ static media::CdmConfig Convert(const media::interfaces::CdmConfigPtr& input);
};
} // namespace mojo
diff --git a/chromium/media/mojo/services/media_type_converters_unittest.cc b/chromium/media/mojo/services/media_type_converters_unittest.cc
index ef96049559e..54cd6655193 100644
--- a/chromium/media/mojo/services/media_type_converters_unittest.cc
+++ b/chromium/media/mojo/services/media_type_converters_unittest.cc
@@ -9,10 +9,7 @@
#include "media/base/decoder_buffer.h"
#include "testing/gtest/include/gtest/gtest.h"
-using media::DecoderBuffer;
-
-namespace mojo {
-namespace test {
+namespace media {
TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
const uint8 kData[] = "hello, world";
@@ -27,12 +24,12 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_Normal) {
buffer->set_timestamp(base::TimeDelta::FromMilliseconds(123));
buffer->set_duration(base::TimeDelta::FromMilliseconds(456));
buffer->set_splice_timestamp(base::TimeDelta::FromMilliseconds(200));
- buffer->set_discard_padding(media::DecoderBuffer::DiscardPadding(
- base::TimeDelta::FromMilliseconds(5),
- base::TimeDelta::FromMilliseconds(6)));
+ buffer->set_discard_padding(
+ DecoderBuffer::DiscardPadding(base::TimeDelta::FromMilliseconds(5),
+ base::TimeDelta::FromMilliseconds(6)));
// Convert from and back.
- MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ interfaces::DecoderBufferPtr ptr(interfaces::DecoderBuffer::From(buffer));
scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
// Compare.
@@ -57,7 +54,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EOS) {
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CreateEOSBuffer());
// Convert from and back.
- MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ interfaces::DecoderBufferPtr ptr(interfaces::DecoderBuffer::From(buffer));
scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
// Compare.
@@ -75,7 +72,7 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_KeyFrame) {
EXPECT_TRUE(buffer->is_key_frame());
// Convert from and back.
- MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ interfaces::DecoderBufferPtr ptr(interfaces::DecoderBuffer::From(buffer));
scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
// Compare.
@@ -91,19 +88,19 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EncryptedBuffer) {
const char kKeyId[] = "00112233445566778899aabbccddeeff";
const char kIv[] = "0123456789abcdef";
- std::vector<media::SubsampleEntry> subsamples;
- subsamples.push_back(media::SubsampleEntry(10, 20));
- subsamples.push_back(media::SubsampleEntry(30, 40));
- subsamples.push_back(media::SubsampleEntry(50, 60));
+ std::vector<SubsampleEntry> subsamples;
+ subsamples.push_back(SubsampleEntry(10, 20));
+ subsamples.push_back(SubsampleEntry(30, 40));
+ subsamples.push_back(SubsampleEntry(50, 60));
// Original.
scoped_refptr<DecoderBuffer> buffer(DecoderBuffer::CopyFrom(
reinterpret_cast<const uint8*>(&kData), kDataSize));
buffer->set_decrypt_config(
- make_scoped_ptr(new media::DecryptConfig(kKeyId, kIv, subsamples)));
+ make_scoped_ptr(new DecryptConfig(kKeyId, kIv, subsamples)));
// Convert from and back.
- MediaDecoderBufferPtr ptr(MediaDecoderBuffer::From(buffer));
+ interfaces::DecoderBufferPtr ptr(interfaces::DecoderBuffer::From(buffer));
scoped_refptr<DecoderBuffer> result(ptr.To<scoped_refptr<DecoderBuffer>>());
// Compare.
@@ -113,9 +110,10 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EncryptedBuffer) {
EXPECT_TRUE(buffer->decrypt_config()->Matches(*result->decrypt_config()));
// Test empty IV. This is used for clear buffer in an encrypted stream.
- buffer->set_decrypt_config(make_scoped_ptr(new media::DecryptConfig(
- kKeyId, "", std::vector<media::SubsampleEntry>())));
- result = MediaDecoderBuffer::From(buffer).To<scoped_refptr<DecoderBuffer>>();
+ buffer->set_decrypt_config(make_scoped_ptr(
+ new DecryptConfig(kKeyId, "", std::vector<SubsampleEntry>())));
+ result = interfaces::DecoderBuffer::From(buffer)
+ .To<scoped_refptr<DecoderBuffer>>();
EXPECT_TRUE(buffer->decrypt_config()->Matches(*result->decrypt_config()));
EXPECT_TRUE(buffer->decrypt_config()->iv().empty());
}
@@ -125,64 +123,46 @@ TEST(MediaTypeConvertersTest, ConvertDecoderBuffer_EncryptedBuffer) {
TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_Normal) {
const uint8 kExtraData[] = "config extra data";
const int kExtraDataSize = arraysize(kExtraData);
- media::AudioDecoderConfig config;
- config.Initialize(media::kCodecAAC,
- media::kSampleFormatU8,
- media::CHANNEL_LAYOUT_SURROUND,
- 48000,
- reinterpret_cast<const uint8*>(&kExtraData),
- kExtraDataSize,
- false,
- false,
- base::TimeDelta(),
- 0);
- AudioDecoderConfigPtr ptr(AudioDecoderConfig::From(config));
- media::AudioDecoderConfig result(ptr.To<media::AudioDecoderConfig>());
+ AudioDecoderConfig config;
+ config.Initialize(kCodecAAC, kSampleFormatU8, CHANNEL_LAYOUT_SURROUND, 48000,
+ reinterpret_cast<const uint8*>(&kExtraData), kExtraDataSize,
+ false, base::TimeDelta(), 0);
+ interfaces::AudioDecoderConfigPtr ptr(
+ interfaces::AudioDecoderConfig::From(config));
+ AudioDecoderConfig result(ptr.To<AudioDecoderConfig>());
EXPECT_TRUE(result.Matches(config));
}
TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_NullExtraData) {
- media::AudioDecoderConfig config;
- config.Initialize(media::kCodecAAC,
- media::kSampleFormatU8,
- media::CHANNEL_LAYOUT_SURROUND,
- 48000,
- NULL,
- 0,
- false,
- false,
- base::TimeDelta(),
- 0);
- AudioDecoderConfigPtr ptr(AudioDecoderConfig::From(config));
- media::AudioDecoderConfig result(ptr.To<media::AudioDecoderConfig>());
+ AudioDecoderConfig config;
+ config.Initialize(kCodecAAC, kSampleFormatU8, CHANNEL_LAYOUT_SURROUND, 48000,
+ NULL, 0, false, base::TimeDelta(), 0);
+ interfaces::AudioDecoderConfigPtr ptr(
+ interfaces::AudioDecoderConfig::From(config));
+ AudioDecoderConfig result(ptr.To<AudioDecoderConfig>());
EXPECT_TRUE(result.Matches(config));
}
TEST(MediaTypeConvertersTest, ConvertAudioDecoderConfig_Encrypted) {
- media::AudioDecoderConfig config;
- config.Initialize(media::kCodecAAC,
- media::kSampleFormatU8,
- media::CHANNEL_LAYOUT_SURROUND,
- 48000,
- NULL,
- 0,
+ AudioDecoderConfig config;
+ config.Initialize(kCodecAAC, kSampleFormatU8, CHANNEL_LAYOUT_SURROUND, 48000,
+ NULL, 0,
true, // Is encrypted.
- false,
- base::TimeDelta(),
- 0);
- AudioDecoderConfigPtr ptr(AudioDecoderConfig::From(config));
- media::AudioDecoderConfig result(ptr.To<media::AudioDecoderConfig>());
+ base::TimeDelta(), 0);
+ interfaces::AudioDecoderConfigPtr ptr(
+ interfaces::AudioDecoderConfig::From(config));
+ AudioDecoderConfig result(ptr.To<AudioDecoderConfig>());
EXPECT_TRUE(result.Matches(config));
}
TEST(MediaTypeConvertersTest, ConvertCdmConfig) {
- media::CdmConfig config;
+ CdmConfig config;
config.allow_distinctive_identifier = true;
config.allow_persistent_state = false;
config.use_hw_secure_codecs = true;
- CdmConfigPtr ptr(CdmConfig::From(config));
- media::CdmConfig result(ptr.To<media::CdmConfig>());
+ interfaces::CdmConfigPtr ptr(interfaces::CdmConfig::From(config));
+ CdmConfig result(ptr.To<CdmConfig>());
EXPECT_EQ(config.allow_distinctive_identifier,
result.allow_distinctive_identifier);
@@ -190,5 +170,4 @@ TEST(MediaTypeConvertersTest, ConvertCdmConfig) {
EXPECT_EQ(config.use_hw_secure_codecs, result.use_hw_secure_codecs);
}
-} // namespace test
-} // namespace mojo
+} // namespace media
diff --git a/chromium/media/mojo/services/mojo_cdm.cc b/chromium/media/mojo/services/mojo_cdm.cc
index 97b6358db9b..dc5e4082d02 100644
--- a/chromium/media/mojo/services/mojo_cdm.cc
+++ b/chromium/media/mojo/services/mojo_cdm.cc
@@ -12,14 +12,13 @@
#include "media/mojo/services/media_type_converters.h"
#include "mojo/application/public/cpp/connect.h"
#include "mojo/application/public/interfaces/service_provider.mojom.h"
-#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
#include "url/gurl.h"
namespace media {
template <typename PromiseType>
static void RejectPromise(scoped_ptr<PromiseType> promise,
- mojo::CdmPromiseResultPtr result) {
+ interfaces::CdmPromiseResultPtr result) {
promise->reject(static_cast<MediaKeys::Exception>(result->exception),
result->system_code, result->error_message);
}
@@ -31,7 +30,7 @@ void MojoCdm::Create(
const std::string& key_system,
const GURL& security_origin,
const media::CdmConfig& cdm_config,
- mojo::ContentDecryptionModulePtr remote_cdm,
+ interfaces::ContentDecryptionModulePtr remote_cdm,
const media::SessionMessageCB& session_message_cb,
const media::SessionClosedCB& session_closed_cb,
const media::LegacySessionErrorCB& legacy_session_error_cb,
@@ -52,7 +51,7 @@ void MojoCdm::Create(
promise.Pass());
}
-MojoCdm::MojoCdm(mojo::ContentDecryptionModulePtr remote_cdm,
+MojoCdm::MojoCdm(interfaces::ContentDecryptionModulePtr remote_cdm,
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const LegacySessionErrorCB& legacy_session_error_cb,
@@ -76,7 +75,7 @@ MojoCdm::MojoCdm(mojo::ContentDecryptionModulePtr remote_cdm,
DCHECK(!session_keys_change_cb_.is_null());
DCHECK(!session_expiration_update_cb_.is_null());
- mojo::ContentDecryptionModuleClientPtr client_ptr;
+ interfaces::ContentDecryptionModuleClientPtr client_ptr;
binding_.Bind(GetProxy(&client_ptr));
remote_cdm_->SetClient(client_ptr.Pass());
}
@@ -91,9 +90,10 @@ void MojoCdm::InitializeCdm(const std::string& key_system,
scoped_ptr<CdmInitializedPromise> promise) {
DVLOG(1) << __FUNCTION__ << ": " << key_system;
remote_cdm_->Initialize(
- key_system, security_origin.spec(), mojo::CdmConfig::From(cdm_config),
- cdm_id_, base::Bind(&MojoCdm::OnPromiseResult<>,
- weak_factory_.GetWeakPtr(), base::Passed(&promise)));
+ key_system, security_origin.spec(),
+ interfaces::CdmConfig::From(cdm_config), cdm_id_,
+ base::Bind(&MojoCdm::OnPromiseResult<>, weak_factory_.GetWeakPtr(),
+ base::Passed(&promise)));
}
void MojoCdm::SetServerCertificate(const std::vector<uint8_t>& certificate,
@@ -112,8 +112,10 @@ void MojoCdm::CreateSessionAndGenerateRequest(
scoped_ptr<NewSessionCdmPromise> promise) {
DVLOG(2) << __FUNCTION__;
remote_cdm_->CreateSessionAndGenerateRequest(
- static_cast<mojo::ContentDecryptionModule::SessionType>(session_type),
- static_cast<mojo::ContentDecryptionModule::InitDataType>(init_data_type),
+ static_cast<interfaces::ContentDecryptionModule::SessionType>(
+ session_type),
+ static_cast<interfaces::ContentDecryptionModule::InitDataType>(
+ init_data_type),
mojo::Array<uint8_t>::From(init_data),
base::Bind(&MojoCdm::OnPromiseResult<std::string>,
weak_factory_.GetWeakPtr(), base::Passed(&promise)));
@@ -124,7 +126,8 @@ void MojoCdm::LoadSession(SessionType session_type,
scoped_ptr<NewSessionCdmPromise> promise) {
DVLOG(2) << __FUNCTION__;
remote_cdm_->LoadSession(
- static_cast<mojo::ContentDecryptionModule::SessionType>(session_type),
+ static_cast<interfaces::ContentDecryptionModule::SessionType>(
+ session_type),
session_id,
base::Bind(&MojoCdm::OnPromiseResult<std::string>,
weak_factory_.GetWeakPtr(), base::Passed(&promise)));
@@ -172,7 +175,7 @@ int MojoCdm::GetCdmId() const {
}
void MojoCdm::OnSessionMessage(const mojo::String& session_id,
- mojo::CdmMessageType message_type,
+ interfaces::CdmMessageType message_type,
mojo::Array<uint8_t> message,
const mojo::String& legacy_destination_url) {
DVLOG(2) << __FUNCTION__;
@@ -194,7 +197,7 @@ void MojoCdm::OnSessionClosed(const mojo::String& session_id) {
}
void MojoCdm::OnLegacySessionError(const mojo::String& session_id,
- mojo::CdmException exception,
+ interfaces::CdmException exception,
uint32_t system_code,
const mojo::String& error_message) {
DVLOG(2) << __FUNCTION__;
@@ -206,7 +209,7 @@ void MojoCdm::OnLegacySessionError(const mojo::String& session_id,
void MojoCdm::OnSessionKeysChange(
const mojo::String& session_id,
bool has_additional_usable_key,
- mojo::Array<mojo::CdmKeyInformationPtr> keys_info) {
+ mojo::Array<interfaces::CdmKeyInformationPtr> keys_info) {
DVLOG(2) << __FUNCTION__;
media::CdmKeysInfo key_data;
key_data.reserve(keys_info.size());
diff --git a/chromium/media/mojo/services/mojo_cdm.h b/chromium/media/mojo/services/mojo_cdm.h
index 841a18e9dc9..96b4532ba1e 100644
--- a/chromium/media/mojo/services/mojo_cdm.h
+++ b/chromium/media/mojo/services/mojo_cdm.h
@@ -14,6 +14,7 @@
#include "media/base/media_keys.h"
#include "media/mojo/interfaces/content_decryption_module.mojom.h"
#include "media/mojo/services/mojo_type_trait.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/binding.h"
namespace mojo {
class ServiceProvider;
@@ -21,18 +22,18 @@ class ServiceProvider;
namespace media {
-// A MediaKeys that proxies to a mojo::ContentDecryptionModule. That
-// mojo::ContentDecryptionModule proxies back to the MojoCdm via the
-// mojo::ContentDecryptionModuleClient interface.
+// A MediaKeys that proxies to a interfaces::ContentDecryptionModule. That
+// interfaces::ContentDecryptionModule proxies back to the MojoCdm via the
+// interfaces::ContentDecryptionModuleClient interface.
class MojoCdm : public MediaKeys,
public CdmContext,
- public mojo::ContentDecryptionModuleClient {
+ public interfaces::ContentDecryptionModuleClient {
public:
static void Create(
const std::string& key_system,
const GURL& security_origin,
const media::CdmConfig& cdm_config,
- mojo::ContentDecryptionModulePtr remote_cdm,
+ interfaces::ContentDecryptionModulePtr remote_cdm,
const media::SessionMessageCB& session_message_cb,
const media::SessionClosedCB& session_closed_cb,
const media::LegacySessionErrorCB& legacy_session_error_cb,
@@ -67,7 +68,7 @@ class MojoCdm : public MediaKeys,
int GetCdmId() const final;
private:
- MojoCdm(mojo::ContentDecryptionModulePtr remote_cdm,
+ MojoCdm(interfaces::ContentDecryptionModulePtr remote_cdm,
const SessionMessageCB& session_message_cb,
const SessionClosedCB& session_closed_cb,
const LegacySessionErrorCB& legacy_session_error_cb,
@@ -79,20 +80,20 @@ class MojoCdm : public MediaKeys,
const media::CdmConfig& cdm_config,
scoped_ptr<CdmInitializedPromise> promise);
- // mojo::ContentDecryptionModuleClient implementation.
+ // interfaces::ContentDecryptionModuleClient implementation.
void OnSessionMessage(const mojo::String& session_id,
- mojo::CdmMessageType message_type,
+ interfaces::CdmMessageType message_type,
mojo::Array<uint8_t> message,
const mojo::String& legacy_destination_url) final;
void OnSessionClosed(const mojo::String& session_id) final;
void OnLegacySessionError(const mojo::String& session_id,
- mojo::CdmException exception,
+ interfaces::CdmException exception,
uint32_t system_code,
const mojo::String& error_message) final;
void OnSessionKeysChange(
const mojo::String& session_id,
bool has_additional_usable_key,
- mojo::Array<mojo::CdmKeyInformationPtr> keys_info) final;
+ mojo::Array<interfaces::CdmKeyInformationPtr> keys_info) final;
void OnSessionExpirationUpdate(const mojo::String& session_id,
double new_expiry_time_sec) final;
@@ -102,7 +103,7 @@ class MojoCdm : public MediaKeys,
// "unable to match function definition to an existing declaration".
template <typename... T>
void OnPromiseResult(scoped_ptr<CdmPromiseTemplate<T...>> promise,
- mojo::CdmPromiseResultPtr result,
+ interfaces::CdmPromiseResultPtr result,
typename MojoTypeTrait<T>::MojoType... args) {
if (result->success)
promise->resolve(args.template To<T>()...); // See ISO C++03 14.2/4.
@@ -112,7 +113,7 @@ class MojoCdm : public MediaKeys,
static int next_cdm_id_;
- mojo::ContentDecryptionModulePtr remote_cdm_;
+ interfaces::ContentDecryptionModulePtr remote_cdm_;
mojo::Binding<ContentDecryptionModuleClient> binding_;
int cdm_id_;
diff --git a/chromium/media/mojo/services/mojo_cdm_factory.cc b/chromium/media/mojo/services/mojo_cdm_factory.cc
index 91fd548c3f5..072b2ddd28a 100644
--- a/chromium/media/mojo/services/mojo_cdm_factory.cc
+++ b/chromium/media/mojo/services/mojo_cdm_factory.cc
@@ -4,14 +4,16 @@
#include "media/mojo/services/mojo_cdm_factory.h"
+#include "media/mojo/interfaces/service_factory.mojom.h"
#include "media/mojo/services/mojo_cdm.h"
#include "mojo/application/public/cpp/connect.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_request.h"
namespace media {
-MojoCdmFactory::MojoCdmFactory(mojo::ServiceProvider* service_provider)
- : service_provider_(service_provider) {
- DCHECK(service_provider_);
+MojoCdmFactory::MojoCdmFactory(interfaces::ServiceFactory* service_factory)
+ : service_factory_(service_factory) {
+ DCHECK(service_factory_);
}
MojoCdmFactory::~MojoCdmFactory() {
@@ -28,10 +30,10 @@ void MojoCdmFactory::Create(
const SessionExpirationUpdateCB& session_expiration_update_cb,
const CdmCreatedCB& cdm_created_cb) {
DVLOG(2) << __FUNCTION__ << ": " << key_system;
- DCHECK(service_provider_);
+ DCHECK(service_factory_);
- mojo::ContentDecryptionModulePtr cdm_ptr;
- mojo::ConnectToService(service_provider_, &cdm_ptr);
+ interfaces::ContentDecryptionModulePtr cdm_ptr;
+ service_factory_->CreateCdm(mojo::GetProxy(&cdm_ptr));
MojoCdm::Create(key_system, security_origin, cdm_config, cdm_ptr.Pass(),
session_message_cb, session_closed_cb,
diff --git a/chromium/media/mojo/services/mojo_cdm_factory.h b/chromium/media/mojo/services/mojo_cdm_factory.h
index 81432e4dc2d..ae4003c8784 100644
--- a/chromium/media/mojo/services/mojo_cdm_factory.h
+++ b/chromium/media/mojo/services/mojo_cdm_factory.h
@@ -8,15 +8,15 @@
#include "base/macros.h"
#include "media/base/cdm_factory.h"
-namespace mojo {
-class ServiceProvider;
-}
-
namespace media {
+namespace interfaces {
+class ServiceFactory;
+}
+
class MojoCdmFactory : public CdmFactory {
public:
- explicit MojoCdmFactory(mojo::ServiceProvider* service_provider);
+ explicit MojoCdmFactory(interfaces::ServiceFactory* service_factory);
~MojoCdmFactory() final;
// CdmFactory implementation.
@@ -31,7 +31,7 @@ class MojoCdmFactory : public CdmFactory {
const CdmCreatedCB& cdm_created_cb) final;
private:
- mojo::ServiceProvider* service_provider_;
+ interfaces::ServiceFactory* service_factory_;
DISALLOW_COPY_AND_ASSIGN(MojoCdmFactory);
};
diff --git a/chromium/media/mojo/services/mojo_cdm_promise.cc b/chromium/media/mojo/services/mojo_cdm_promise.cc
index 4f17f117ead..7b0dc6cf5a9 100644
--- a/chromium/media/mojo/services/mojo_cdm_promise.cc
+++ b/chromium/media/mojo/services/mojo_cdm_promise.cc
@@ -14,13 +14,15 @@
namespace media {
-static mojo::CdmPromiseResultPtr GetRejectResult(
+static interfaces::CdmPromiseResultPtr GetRejectResult(
MediaKeys::Exception exception,
uint32_t system_code,
const std::string& error_message) {
- mojo::CdmPromiseResultPtr cdm_promise_result(mojo::CdmPromiseResult::New());
+ interfaces::CdmPromiseResultPtr cdm_promise_result(
+ interfaces::CdmPromiseResult::New());
cdm_promise_result->success = false;
- cdm_promise_result->exception = static_cast<mojo::CdmException>(exception);
+ cdm_promise_result->exception =
+ static_cast<interfaces::CdmException>(exception);
cdm_promise_result->system_code = system_code;
cdm_promise_result->error_message = error_message;
return cdm_promise_result.Pass();
@@ -41,7 +43,8 @@ MojoCdmPromise<T...>::~MojoCdmPromise() {
template <typename... T>
void MojoCdmPromise<T...>::resolve(const T&... result) {
MarkPromiseSettled();
- mojo::CdmPromiseResultPtr cdm_promise_result(mojo::CdmPromiseResult::New());
+ interfaces::CdmPromiseResultPtr cdm_promise_result(
+ interfaces::CdmPromiseResult::New());
cdm_promise_result->success = true;
callback_.Run(cdm_promise_result.Pass(),
MojoTypeTrait<T>::MojoType::From(result)...);
diff --git a/chromium/media/mojo/services/mojo_cdm_promise.h b/chromium/media/mojo/services/mojo_cdm_promise.h
index 9e6a507a031..8492f22fee7 100644
--- a/chromium/media/mojo/services/mojo_cdm_promise.h
+++ b/chromium/media/mojo/services/mojo_cdm_promise.h
@@ -17,7 +17,7 @@ namespace media {
template <typename... T>
class MojoCdmPromise : public CdmPromiseTemplate<T...> {
public:
- typedef mojo::Callback<void(mojo::CdmPromiseResultPtr,
+ typedef mojo::Callback<void(interfaces::CdmPromiseResultPtr,
typename MojoTypeTrait<T>::MojoType...)>
CallbackType;
diff --git a/chromium/media/mojo/services/mojo_cdm_service.cc b/chromium/media/mojo/services/mojo_cdm_service.cc
index 5199d8aa1d8..1d614c57920 100644
--- a/chromium/media/mojo/services/mojo_cdm_service.cc
+++ b/chromium/media/mojo/services/mojo_cdm_service.cc
@@ -21,10 +21,10 @@ using NewSessionMojoCdmPromise = MojoCdmPromise<std::string>;
using SimpleMojoCdmPromise = MojoCdmPromise<>;
MojoCdmService::MojoCdmService(
- MojoCdmServiceContext* context,
+ base::WeakPtr<MojoCdmServiceContext> context,
mojo::ServiceProvider* service_provider,
CdmFactory* cdm_factory,
- mojo::InterfaceRequest<mojo::ContentDecryptionModule> request)
+ mojo::InterfaceRequest<interfaces::ContentDecryptionModule> request)
: binding_(this, request.Pass()),
context_(context),
service_provider_(service_provider),
@@ -36,20 +36,21 @@ MojoCdmService::MojoCdmService(
}
MojoCdmService::~MojoCdmService() {
- if (cdm_id_ != CdmContext::kInvalidCdmId)
+ if (cdm_id_ != CdmContext::kInvalidCdmId && context_)
context_->UnregisterCdm(cdm_id_);
}
-void MojoCdmService::SetClient(mojo::ContentDecryptionModuleClientPtr client) {
+void MojoCdmService::SetClient(
+ interfaces::ContentDecryptionModuleClientPtr client) {
client_ = client.Pass();
}
void MojoCdmService::Initialize(
const mojo::String& key_system,
const mojo::String& security_origin,
- mojo::CdmConfigPtr cdm_config,
+ interfaces::CdmConfigPtr cdm_config,
int32_t cdm_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback) {
DVLOG(1) << __FUNCTION__ << ": " << key_system;
DCHECK(!cdm_);
DCHECK_NE(CdmContext::kInvalidCdmId, cdm_id);
@@ -69,7 +70,7 @@ void MojoCdmService::Initialize(
void MojoCdmService::SetServerCertificate(
mojo::Array<uint8_t> certificate_data,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback) {
DVLOG(2) << __FUNCTION__;
cdm_->SetServerCertificate(
certificate_data.storage(),
@@ -77,10 +78,10 @@ void MojoCdmService::SetServerCertificate(
}
void MojoCdmService::CreateSessionAndGenerateRequest(
- mojo::ContentDecryptionModule::SessionType session_type,
- mojo::ContentDecryptionModule::InitDataType init_data_type,
+ interfaces::ContentDecryptionModule::SessionType session_type,
+ interfaces::ContentDecryptionModule::InitDataType init_data_type,
mojo::Array<uint8_t> init_data,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr, mojo::String)>&
callback) {
DVLOG(2) << __FUNCTION__;
cdm_->CreateSessionAndGenerateRequest(
@@ -90,9 +91,9 @@ void MojoCdmService::CreateSessionAndGenerateRequest(
}
void MojoCdmService::LoadSession(
- mojo::ContentDecryptionModule::SessionType session_type,
+ interfaces::ContentDecryptionModule::SessionType session_type,
const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr, mojo::String)>&
callback) {
DVLOG(2) << __FUNCTION__;
cdm_->LoadSession(static_cast<MediaKeys::SessionType>(session_type),
@@ -103,7 +104,7 @@ void MojoCdmService::LoadSession(
void MojoCdmService::UpdateSession(
const mojo::String& session_id,
mojo::Array<uint8_t> response,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback) {
DVLOG(2) << __FUNCTION__;
cdm_->UpdateSession(
session_id.To<std::string>(), response.storage(),
@@ -112,7 +113,7 @@ void MojoCdmService::UpdateSession(
void MojoCdmService::CloseSession(
const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback) {
DVLOG(2) << __FUNCTION__;
cdm_->CloseSession(session_id.To<std::string>(),
make_scoped_ptr(new SimpleMojoCdmPromise(callback)));
@@ -120,14 +121,14 @@ void MojoCdmService::CloseSession(
void MojoCdmService::RemoveSession(
const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) {
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback) {
DVLOG(2) << __FUNCTION__;
cdm_->RemoveSession(session_id.To<std::string>(),
make_scoped_ptr(new SimpleMojoCdmPromise(callback)));
}
void MojoCdmService::GetDecryptor(
- mojo::InterfaceRequest<mojo::Decryptor> decryptor) {
+ mojo::InterfaceRequest<interfaces::Decryptor> decryptor) {
NOTIMPLEMENTED();
}
@@ -141,7 +142,7 @@ void MojoCdmService::OnCdmCreated(int cdm_id,
const std::string& error_message) {
// TODO(xhwang): This should not happen when KeySystemInfo is properly
// populated. See http://crbug.com/469366
- if (!cdm) {
+ if (!cdm || !context_) {
promise->reject(MediaKeys::NOT_SUPPORTED_ERROR, 0, error_message);
return;
}
@@ -157,19 +158,19 @@ void MojoCdmService::OnSessionMessage(const std::string& session_id,
const std::vector<uint8_t>& message,
const GURL& legacy_destination_url) {
DVLOG(2) << __FUNCTION__;
- client_->OnSessionMessage(session_id,
- static_cast<mojo::CdmMessageType>(message_type),
- mojo::Array<uint8_t>::From(message),
- mojo::String::From(legacy_destination_url));
+ client_->OnSessionMessage(
+ session_id, static_cast<interfaces::CdmMessageType>(message_type),
+ mojo::Array<uint8_t>::From(message),
+ mojo::String::From(legacy_destination_url));
}
void MojoCdmService::OnSessionKeysChange(const std::string& session_id,
bool has_additional_usable_key,
CdmKeysInfo keys_info) {
DVLOG(2) << __FUNCTION__;
- mojo::Array<mojo::CdmKeyInformationPtr> keys_data;
+ mojo::Array<interfaces::CdmKeyInformationPtr> keys_data;
for (const auto& key : keys_info)
- keys_data.push_back(mojo::CdmKeyInformation::From(*key));
+ keys_data.push_back(interfaces::CdmKeyInformation::From(*key));
client_->OnSessionKeysChange(session_id, has_additional_usable_key,
keys_data.Pass());
}
@@ -192,9 +193,9 @@ void MojoCdmService::OnLegacySessionError(const std::string& session_id,
uint32_t system_code,
const std::string& error_message) {
DVLOG(2) << __FUNCTION__;
- client_->OnLegacySessionError(session_id,
- static_cast<mojo::CdmException>(exception),
- system_code, error_message);
+ client_->OnLegacySessionError(
+ session_id, static_cast<interfaces::CdmException>(exception), system_code,
+ error_message);
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_cdm_service.h b/chromium/media/mojo/services/mojo_cdm_service.h
index 951d701f704..a2424c8a1a8 100644
--- a/chromium/media/mojo/services/mojo_cdm_service.h
+++ b/chromium/media/mojo/services/mojo_cdm_service.h
@@ -12,57 +12,64 @@
#include "media/base/media_keys.h"
#include "media/mojo/interfaces/content_decryption_module.mojom.h"
#include "media/mojo/services/mojo_cdm_promise.h"
+#include "media/mojo/services/mojo_cdm_service_context.h"
#include "mojo/application/public/interfaces/service_provider.mojom.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
namespace media {
class CdmFactory;
-class MojoCdmServiceContext;
-// A mojo::ContentDecryptionModule implementation backed by a media::MediaKeys.
-class MojoCdmService : public mojo::ContentDecryptionModule {
+// A interfaces::ContentDecryptionModule implementation backed by a
+// media::MediaKeys.
+class MojoCdmService : public interfaces::ContentDecryptionModule {
public:
// Constructs a MojoCdmService and strongly binds it to the |request|.
- MojoCdmService(MojoCdmServiceContext* context,
- mojo::ServiceProvider* service_provider,
- CdmFactory* cdm_factory,
- mojo::InterfaceRequest<mojo::ContentDecryptionModule> request);
+ MojoCdmService(
+ base::WeakPtr<MojoCdmServiceContext> context,
+ mojo::ServiceProvider* service_provider,
+ CdmFactory* cdm_factory,
+ mojo::InterfaceRequest<interfaces::ContentDecryptionModule> request);
~MojoCdmService() final;
- // mojo::ContentDecryptionModule implementation.
- void SetClient(mojo::ContentDecryptionModuleClientPtr client) final;
- void Initialize(
- const mojo::String& key_system,
- const mojo::String& security_origin,
- mojo::CdmConfigPtr cdm_config,
- int32_t cdm_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ // interfaces::ContentDecryptionModule implementation.
+ void SetClient(interfaces::ContentDecryptionModuleClientPtr client) final;
+ void Initialize(const mojo::String& key_system,
+ const mojo::String& security_origin,
+ interfaces::CdmConfigPtr cdm_config,
+ int32_t cdm_id,
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>&
+ callback) final;
void SetServerCertificate(
mojo::Array<uint8_t> certificate_data,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback)
+ final;
void CreateSessionAndGenerateRequest(
- mojo::ContentDecryptionModule::SessionType session_type,
- mojo::ContentDecryptionModule::InitDataType init_data_type,
+ interfaces::ContentDecryptionModule::SessionType session_type,
+ interfaces::ContentDecryptionModule::InitDataType init_data_type,
mojo::Array<uint8_t> init_data,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr, mojo::String)>&
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr, mojo::String)>&
+ callback) final;
+ void LoadSession(
+ interfaces::ContentDecryptionModule::SessionType session_type,
+ const mojo::String& session_id,
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr, mojo::String)>&
callback) final;
- void LoadSession(mojo::ContentDecryptionModule::SessionType session_type,
- const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr,
- mojo::String)>& callback) final;
void UpdateSession(
const mojo::String& session_id,
mojo::Array<uint8_t> response,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
- void CloseSession(
- const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback)
+ final;
+ void CloseSession(const mojo::String& session_id,
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>&
+ callback) final;
void RemoveSession(
const mojo::String& session_id,
- const mojo::Callback<void(mojo::CdmPromiseResultPtr)>& callback) final;
- void GetDecryptor(mojo::InterfaceRequest<mojo::Decryptor> decryptor) final;
+ const mojo::Callback<void(interfaces::CdmPromiseResultPtr)>& callback)
+ final;
+ void GetDecryptor(
+ mojo::InterfaceRequest<interfaces::Decryptor> decryptor) final;
// Get CdmContext to be used by the media pipeline.
CdmContext* GetCdmContext();
@@ -90,8 +97,9 @@ class MojoCdmService : public mojo::ContentDecryptionModule {
uint32_t system_code,
const std::string& error_message);
- mojo::StrongBinding<mojo::ContentDecryptionModule> binding_;
- MojoCdmServiceContext* context_;
+ mojo::StrongBinding<interfaces::ContentDecryptionModule> binding_;
+ base::WeakPtr<MojoCdmServiceContext> context_;
+
mojo::ServiceProvider* service_provider_;
CdmFactory* cdm_factory_;
scoped_ptr<MediaKeys> cdm_;
@@ -99,7 +107,7 @@ class MojoCdmService : public mojo::ContentDecryptionModule {
// Set to a valid CDM ID if the |cdm_| is successfully created.
int cdm_id_;
- mojo::ContentDecryptionModuleClientPtr client_;
+ interfaces::ContentDecryptionModuleClientPtr client_;
base::WeakPtr<MojoCdmService> weak_this_;
base::WeakPtrFactory<MojoCdmService> weak_factory_;
diff --git a/chromium/media/mojo/services/mojo_cdm_service_context.cc b/chromium/media/mojo/services/mojo_cdm_service_context.cc
index 5431cf3d539..297d5831af2 100644
--- a/chromium/media/mojo/services/mojo_cdm_service_context.cc
+++ b/chromium/media/mojo/services/mojo_cdm_service_context.cc
@@ -6,15 +6,19 @@
#include "base/bind.h"
#include "base/logging.h"
+#include "media/mojo/services/mojo_cdm_service.h"
namespace media {
-MojoCdmServiceContext::MojoCdmServiceContext() {
-}
+MojoCdmServiceContext::MojoCdmServiceContext() : weak_ptr_factory_(this) {}
MojoCdmServiceContext::~MojoCdmServiceContext() {
}
+base::WeakPtr<MojoCdmServiceContext> MojoCdmServiceContext::GetWeakPtr() {
+ return weak_ptr_factory_.GetWeakPtr();
+}
+
void MojoCdmServiceContext::RegisterCdm(int cdm_id,
MojoCdmService* cdm_service) {
DCHECK(!cdm_services_.count(cdm_id));
diff --git a/chromium/media/mojo/services/mojo_cdm_service_context.h b/chromium/media/mojo/services/mojo_cdm_service_context.h
index dc55ad312c7..f3c2caf2331 100644
--- a/chromium/media/mojo/services/mojo_cdm_service_context.h
+++ b/chromium/media/mojo/services/mojo_cdm_service_context.h
@@ -5,20 +5,25 @@
#ifndef MEDIA_MOJO_SERVICES_MOJO_CDM_SERVICE_CONTEXT_H_
#define MEDIA_MOJO_SERVICES_MOJO_CDM_SERVICE_CONTEXT_H_
-#include "base/containers/scoped_ptr_hash_map.h"
+#include <map>
+
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
#include "media/base/cdm_context.h"
#include "media/base/media_export.h"
-#include "media/mojo/services/mojo_cdm_service.h"
namespace media {
+class MojoCdmService;
+
// A class that creates, owns and manages all MojoCdmService instances.
class MEDIA_EXPORT MojoCdmServiceContext : public CdmContextProvider {
public:
MojoCdmServiceContext();
~MojoCdmServiceContext() override;
+ base::WeakPtr<MojoCdmServiceContext> GetWeakPtr();
+
// Registers The |cdm_service| with |cdm_id|.
void RegisterCdm(int cdm_id, MojoCdmService* cdm_service);
@@ -36,6 +41,9 @@ class MEDIA_EXPORT MojoCdmServiceContext : public CdmContextProvider {
// A map between CDM ID and MojoCdmService.
std::map<int, MojoCdmService*> cdm_services_;
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<MojoCdmServiceContext> weak_ptr_factory_;
+
DISALLOW_COPY_AND_ASSIGN(MojoCdmServiceContext);
};
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
index 9817ae3e2d5..eeaa6343cb0 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.cc
@@ -13,7 +13,7 @@
namespace media {
MojoDemuxerStreamAdapter::MojoDemuxerStreamAdapter(
- mojo::DemuxerStreamPtr demuxer_stream,
+ interfaces::DemuxerStreamPtr demuxer_stream,
const base::Closure& stream_ready_cb)
: demuxer_stream_(demuxer_stream.Pass()),
stream_ready_cb_(stream_ready_cb),
@@ -68,10 +68,10 @@ VideoRotation MojoDemuxerStreamAdapter::video_rotation() {
// TODO(xhwang): Pass liveness here.
void MojoDemuxerStreamAdapter::OnStreamReady(
- mojo::DemuxerStream::Type type,
- mojo::ScopedDataPipeConsumerHandle pipe,
- mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config) {
+ interfaces::DemuxerStream::Type type,
+ mojo::ScopedDataPipeConsumerHandle pipe,
+ interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config) {
DVLOG(1) << __FUNCTION__;
DCHECK(pipe.is_valid());
DCHECK_EQ(DemuxerStream::UNKNOWN, type_);
@@ -84,27 +84,27 @@ void MojoDemuxerStreamAdapter::OnStreamReady(
}
void MojoDemuxerStreamAdapter::OnBufferReady(
- mojo::DemuxerStream::Status status,
- mojo::MediaDecoderBufferPtr buffer,
- mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config) {
+ interfaces::DemuxerStream::Status status,
+ interfaces::DecoderBufferPtr buffer,
+ interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config) {
DVLOG(3) << __FUNCTION__;
DCHECK(!read_cb_.is_null());
DCHECK_NE(type_, DemuxerStream::UNKNOWN);
DCHECK(stream_pipe_.is_valid());
- if (status == mojo::DemuxerStream::STATUS_CONFIG_CHANGED) {
+ if (status == interfaces::DemuxerStream::STATUS_CONFIG_CHANGED) {
UpdateConfig(audio_config.Pass(), video_config.Pass());
base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kConfigChanged, nullptr);
return;
}
- if (status == mojo::DemuxerStream::STATUS_ABORTED) {
+ if (status == interfaces::DemuxerStream::STATUS_ABORTED) {
base::ResetAndReturn(&read_cb_).Run(DemuxerStream::kAborted, nullptr);
return;
}
- DCHECK_EQ(status, mojo::DemuxerStream::STATUS_OK);
+ DCHECK_EQ(status, interfaces::DemuxerStream::STATUS_OK);
scoped_refptr<DecoderBuffer> media_buffer(
buffer.To<scoped_refptr<DecoderBuffer>>());
@@ -123,8 +123,8 @@ void MojoDemuxerStreamAdapter::OnBufferReady(
}
void MojoDemuxerStreamAdapter::UpdateConfig(
- mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config) {
+ interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config) {
DCHECK_NE(type_, DemuxerStream::UNKNOWN);
switch(type_) {
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
index ee7c64de4b3..b6b4d9372ed 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_adapter.h
@@ -17,16 +17,18 @@ namespace media {
// This class acts as a MojoRendererService-side stub for a real DemuxerStream
// that is part of a Pipeline in a remote application. Roughly speaking, it
-// takes a mojo::DemuxerStreamPtr and exposes it as a DemuxerStream for use by
+// takes a interfaces::DemuxerStreamPtr and exposes it as a DemuxerStream for
+// use by
// media components.
class MojoDemuxerStreamAdapter : public DemuxerStream {
public:
- // |demuxer_stream| is connected to the mojo::DemuxerStream that |this| will
+ // |demuxer_stream| is connected to the interfaces::DemuxerStream that |this|
+ // will
// become the client of.
// |stream_ready_cb| will be invoked when |demuxer_stream| has fully
// initialized and |this| is ready for use.
// NOTE: Illegal to call any methods until |stream_ready_cb| is invoked.
- MojoDemuxerStreamAdapter(mojo::DemuxerStreamPtr demuxer_stream,
+ MojoDemuxerStreamAdapter(interfaces::DemuxerStreamPtr demuxer_stream,
const base::Closure& stream_ready_cb);
~MojoDemuxerStreamAdapter() override;
@@ -40,23 +42,23 @@ class MojoDemuxerStreamAdapter : public DemuxerStream {
VideoRotation video_rotation() override;
private:
- void OnStreamReady(mojo::DemuxerStream::Type type,
+ void OnStreamReady(interfaces::DemuxerStream::Type type,
mojo::ScopedDataPipeConsumerHandle pipe,
- mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config);
+ interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config);
// The callback from |demuxer_stream_| that a read operation has completed.
// |read_cb| is a callback from the client who invoked Read() on |this|.
- void OnBufferReady(mojo::DemuxerStream::Status status,
- mojo::MediaDecoderBufferPtr buffer,
- mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config);
+ void OnBufferReady(interfaces::DemuxerStream::Status status,
+ interfaces::DecoderBufferPtr buffer,
+ interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config);
- void UpdateConfig(mojo::AudioDecoderConfigPtr audio_config,
- mojo::VideoDecoderConfigPtr video_config);
+ void UpdateConfig(interfaces::AudioDecoderConfigPtr audio_config,
+ interfaces::VideoDecoderConfigPtr video_config);
// See constructor for descriptions.
- mojo::DemuxerStreamPtr demuxer_stream_;
+ interfaces::DemuxerStreamPtr demuxer_stream_;
base::Closure stream_ready_cb_;
// The last ReadCB received through a call to Read().
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc b/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
index 65b87e50b01..ffe92714fa1 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_impl.cc
@@ -15,9 +15,8 @@ namespace media {
MojoDemuxerStreamImpl::MojoDemuxerStreamImpl(
media::DemuxerStream* stream,
- mojo::InterfaceRequest<mojo::DemuxerStream> request)
- : binding_(this, request.Pass()), stream_(stream), weak_factory_(this) {
-}
+ mojo::InterfaceRequest<interfaces::DemuxerStream> request)
+ : binding_(this, request.Pass()), stream_(stream), weak_factory_(this) {}
MojoDemuxerStreamImpl::~MojoDemuxerStreamImpl() {
}
@@ -47,20 +46,20 @@ void MojoDemuxerStreamImpl::Initialize(const InitializeCallback& callback) {
stream_pipe_ = data_pipe.producer_handle.Pass();
// Prepare the initial config.
- mojo::AudioDecoderConfigPtr audio_config;
- mojo::VideoDecoderConfigPtr video_config;
+ interfaces::AudioDecoderConfigPtr audio_config;
+ interfaces::VideoDecoderConfigPtr video_config;
if (stream_->type() == media::DemuxerStream::AUDIO) {
audio_config =
- mojo::AudioDecoderConfig::From(stream_->audio_decoder_config());
+ interfaces::AudioDecoderConfig::From(stream_->audio_decoder_config());
} else if (stream_->type() == media::DemuxerStream::VIDEO) {
video_config =
- mojo::VideoDecoderConfig::From(stream_->video_decoder_config());
+ interfaces::VideoDecoderConfig::From(stream_->video_decoder_config());
} else {
NOTREACHED() << "Unsupported stream type: " << stream_->type();
return;
}
- callback.Run(static_cast<mojo::DemuxerStream::Type>(stream_->type()),
+ callback.Run(static_cast<interfaces::DemuxerStream::Type>(stream_->type()),
data_pipe.consumer_handle.Pass(), audio_config.Pass(),
video_config.Pass());
}
@@ -74,8 +73,8 @@ void MojoDemuxerStreamImpl::OnBufferReady(
const ReadCallback& callback,
media::DemuxerStream::Status status,
const scoped_refptr<media::DecoderBuffer>& buffer) {
- mojo::AudioDecoderConfigPtr audio_config;
- mojo::VideoDecoderConfigPtr video_config;
+ interfaces::AudioDecoderConfigPtr audio_config;
+ interfaces::VideoDecoderConfigPtr video_config;
if (status == media::DemuxerStream::kConfigChanged) {
DVLOG(2) << __FUNCTION__ << ": ConfigChange!";
@@ -83,24 +82,24 @@ void MojoDemuxerStreamImpl::OnBufferReady(
// Status obtained via Run() below.
if (stream_->type() == media::DemuxerStream::AUDIO) {
audio_config =
- mojo::AudioDecoderConfig::From(stream_->audio_decoder_config());
+ interfaces::AudioDecoderConfig::From(stream_->audio_decoder_config());
} else if (stream_->type() == media::DemuxerStream::VIDEO) {
video_config =
- mojo::VideoDecoderConfig::From(stream_->video_decoder_config());
+ interfaces::VideoDecoderConfig::From(stream_->video_decoder_config());
} else {
NOTREACHED() << "Unsupported config change encountered for type: "
<< stream_->type();
}
- callback.Run(mojo::DemuxerStream::STATUS_CONFIG_CHANGED,
- mojo::MediaDecoderBufferPtr(), audio_config.Pass(),
+ callback.Run(interfaces::DemuxerStream::STATUS_CONFIG_CHANGED,
+ interfaces::DecoderBufferPtr(), audio_config.Pass(),
video_config.Pass());
return;
}
if (status == media::DemuxerStream::kAborted) {
- callback.Run(mojo::DemuxerStream::STATUS_ABORTED,
- mojo::MediaDecoderBufferPtr(), audio_config.Pass(),
+ callback.Run(interfaces::DemuxerStream::STATUS_ABORTED,
+ interfaces::DecoderBufferPtr(), audio_config.Pass(),
video_config.Pass());
return;
}
@@ -119,8 +118,8 @@ void MojoDemuxerStreamImpl::OnBufferReady(
// TODO(dalecurtis): Once we can write framed data to the DataPipe, fill via
// the producer handle and then read more to keep the pipe full. Waiting for
// space can be accomplished using an AsyncWaiter.
- callback.Run(static_cast<mojo::DemuxerStream::Status>(status),
- mojo::MediaDecoderBuffer::From(buffer), audio_config.Pass(),
+ callback.Run(static_cast<interfaces::DemuxerStream::Status>(status),
+ interfaces::DecoderBuffer::From(buffer), audio_config.Pass(),
video_config.Pass());
}
diff --git a/chromium/media/mojo/services/mojo_demuxer_stream_impl.h b/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
index c95129c2afd..09021140c44 100644
--- a/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
+++ b/chromium/media/mojo/services/mojo_demuxer_stream_impl.h
@@ -15,17 +15,19 @@ namespace media {
class DemuxerStream;
// This class wraps a media::DemuxerStream and exposes it as a
-// mojo::DemuxerStream for use as a proxy from remote applications.
-class MojoDemuxerStreamImpl : public mojo::DemuxerStream {
+// interfaces::DemuxerStream for use as a proxy from remote applications.
+class MojoDemuxerStreamImpl : public interfaces::DemuxerStream {
public:
// |stream| is the underlying DemuxerStream we are proxying for.
// Note: |this| does not take ownership of |stream|.
- MojoDemuxerStreamImpl(media::DemuxerStream* stream,
- mojo::InterfaceRequest<mojo::DemuxerStream> request);
+ MojoDemuxerStreamImpl(
+ media::DemuxerStream* stream,
+ mojo::InterfaceRequest<interfaces::DemuxerStream> request);
~MojoDemuxerStreamImpl() override;
- // mojo::DemuxerStream implementation.
- // InitializeCallback and ReadCallback are defined in mojo::DemuxerStream.
+ // interfaces::DemuxerStream implementation.
+ // InitializeCallback and ReadCallback are defined in
+ // interfaces::DemuxerStream.
void Initialize(const InitializeCallback& callback) override;
void Read(const ReadCallback& callback) override;
@@ -34,7 +36,7 @@ class MojoDemuxerStreamImpl : public mojo::DemuxerStream {
media::DemuxerStream::Status status,
const scoped_refptr<media::DecoderBuffer>& buffer);
- mojo::StrongBinding<mojo::DemuxerStream> binding_;
+ mojo::StrongBinding<interfaces::DemuxerStream> binding_;
// See constructor. We do not own |stream_|.
media::DemuxerStream* stream_;
diff --git a/chromium/media/mojo/services/mojo_media_application.cc b/chromium/media/mojo/services/mojo_media_application.cc
index ecadb591090..566f43f3640 100644
--- a/chromium/media/mojo/services/mojo_media_application.cc
+++ b/chromium/media/mojo/services/mojo_media_application.cc
@@ -5,12 +5,8 @@
#include "media/mojo/services/mojo_media_application.h"
#include "base/logging.h"
-#include "media/base/cdm_factory.h"
#include "media/base/media_log.h"
-#include "media/base/renderer_factory.h"
-#include "media/mojo/services/mojo_cdm_service.h"
-#include "media/mojo/services/mojo_media_client.h"
-#include "media/mojo/services/mojo_renderer_service.h"
+#include "media/mojo/services/service_factory_impl.h"
#include "mojo/application/public/cpp/application_connection.h"
#include "mojo/application/public/cpp/application_impl.h"
@@ -29,13 +25,15 @@ scoped_ptr<mojo::ApplicationDelegate> MojoMediaApplication::CreateApp() {
}
// TODO(xhwang): Hook up MediaLog when possible.
-MojoMediaApplication::MojoMediaApplication() : media_log_(new MediaLog()) {
-}
+MojoMediaApplication::MojoMediaApplication()
+ : app_impl_(nullptr), media_log_(new MediaLog()) {}
MojoMediaApplication::~MojoMediaApplication() {
}
void MojoMediaApplication::Initialize(mojo::ApplicationImpl* app) {
+ app_impl_ = app;
+
logging::LoggingSettings settings;
settings.logging_dest = logging::LOG_TO_SYSTEM_DEBUG_LOG;
logging::InitLogging(settings);
@@ -45,38 +43,17 @@ void MojoMediaApplication::Initialize(mojo::ApplicationImpl* app) {
bool MojoMediaApplication::ConfigureIncomingConnection(
mojo::ApplicationConnection* connection) {
- connection->AddService<mojo::ContentDecryptionModule>(this);
- connection->AddService<mojo::MediaRenderer>(this);
+ connection->AddService<interfaces::ServiceFactory>(this);
return true;
}
void MojoMediaApplication::Create(
mojo::ApplicationConnection* connection,
- mojo::InterfaceRequest<mojo::ContentDecryptionModule> request) {
+ mojo::InterfaceRequest<interfaces::ServiceFactory> request) {
// The created object is owned by the pipe.
- new MojoCdmService(&cdm_service_context_, connection->GetServiceProvider(),
- GetCdmFactory(), request.Pass());
-}
-
-void MojoMediaApplication::Create(
- mojo::ApplicationConnection* connection,
- mojo::InterfaceRequest<mojo::MediaRenderer> request) {
- // The created object is owned by the pipe.
- new MojoRendererService(&cdm_service_context_, GetRendererFactory(),
- media_log_, request.Pass());
-}
-
-RendererFactory* MojoMediaApplication::GetRendererFactory() {
- if (!renderer_factory_)
- renderer_factory_ =
- MojoMediaClient::Get()->CreateRendererFactory(media_log_);
- return renderer_factory_.get();
-}
-
-CdmFactory* MojoMediaApplication::GetCdmFactory() {
- if (!cdm_factory_)
- cdm_factory_ = MojoMediaClient::Get()->CreateCdmFactory();
- return cdm_factory_.get();
+ new ServiceFactoryImpl(request.Pass(), connection->GetServiceProvider(),
+ media_log_,
+ app_impl_->app_lifetime_helper()->CreateAppRefCount());
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_media_application.h b/chromium/media/mojo/services/mojo_media_application.h
index b1dcede54d8..3233e03d2f4 100644
--- a/chromium/media/mojo/services/mojo_media_application.h
+++ b/chromium/media/mojo/services/mojo_media_application.h
@@ -2,24 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/memory/scoped_ptr.h"
-#include "media/mojo/interfaces/content_decryption_module.mojom.h"
-#include "media/mojo/interfaces/media_renderer.mojom.h"
-#include "media/mojo/services/mojo_cdm_service_context.h"
+#include "base/memory/ref_counted.h"
+#include "media/mojo/interfaces/service_factory.mojom.h"
#include "mojo/application/public/cpp/application_delegate.h"
#include "mojo/application/public/cpp/interface_factory_impl.h"
#include "url/gurl.h"
namespace media {
-class CdmFactory;
class MediaLog;
-class RendererFactory;
class MojoMediaApplication
: public mojo::ApplicationDelegate,
- public mojo::InterfaceFactory<mojo::ContentDecryptionModule>,
- public mojo::InterfaceFactory<mojo::MediaRenderer> {
+ public mojo::InterfaceFactory<interfaces::ServiceFactory> {
public:
static GURL AppUrl();
static scoped_ptr<mojo::ApplicationDelegate> CreateApp();
@@ -33,21 +28,11 @@ class MojoMediaApplication
bool ConfigureIncomingConnection(
mojo::ApplicationConnection* connection) final;
- // mojo::InterfaceFactory<mojo::ContentDecryptionModule> implementation.
- void Create(
- mojo::ApplicationConnection* connection,
- mojo::InterfaceRequest<mojo::ContentDecryptionModule> request) final;
-
- // mojo::InterfaceFactory<mojo::MediaRenderer> implementation.
+ // mojo::InterfaceFactory<interfaces::ServiceFactory> implementation.
void Create(mojo::ApplicationConnection* connection,
- mojo::InterfaceRequest<mojo::MediaRenderer> request) final;
-
- RendererFactory* GetRendererFactory();
- CdmFactory* GetCdmFactory();
+ mojo::InterfaceRequest<interfaces::ServiceFactory> request) final;
- MojoCdmServiceContext cdm_service_context_;
- scoped_ptr<RendererFactory> renderer_factory_;
- scoped_ptr<CdmFactory> cdm_factory_;
+ mojo::ApplicationImpl* app_impl_;
scoped_refptr<MediaLog> media_log_;
};
diff --git a/chromium/media/mojo/services/mojo_renderer_factory.cc b/chromium/media/mojo/services/mojo_renderer_factory.cc
index 43ba42c7d45..aa0b30895d4 100644
--- a/chromium/media/mojo/services/mojo_renderer_factory.cc
+++ b/chromium/media/mojo/services/mojo_renderer_factory.cc
@@ -5,15 +5,16 @@
#include "media/mojo/services/mojo_renderer_factory.h"
#include "base/single_thread_task_runner.h"
+#include "media/mojo/interfaces/service_factory.mojom.h"
#include "media/mojo/services/mojo_renderer_impl.h"
-#include "mojo/application/public/cpp/connect.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_request.h"
namespace media {
MojoRendererFactory::MojoRendererFactory(
- mojo::ServiceProvider* service_provider)
- : service_provider_(service_provider) {
- DCHECK(service_provider_);
+ interfaces::ServiceFactory* service_factory)
+ : service_factory_(service_factory) {
+ DCHECK(service_factory_);
}
MojoRendererFactory::~MojoRendererFactory() {
@@ -21,15 +22,16 @@ MojoRendererFactory::~MojoRendererFactory() {
scoped_ptr<Renderer> MojoRendererFactory::CreateRenderer(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& /* worker_task_runner */,
AudioRendererSink* /* audio_renderer_sink */,
VideoRendererSink* /* video_renderer_sink */) {
- DCHECK(service_provider_);
+ DCHECK(service_factory_);
- mojo::MediaRendererPtr mojo_media_renderer;
- mojo::ConnectToService(service_provider_, &mojo_media_renderer);
+ interfaces::RendererPtr mojo_renderer;
+ service_factory_->CreateRenderer(mojo::GetProxy(&mojo_renderer));
return scoped_ptr<Renderer>(
- new MojoRendererImpl(media_task_runner, mojo_media_renderer.Pass()));
+ new MojoRendererImpl(media_task_runner, mojo_renderer.Pass()));
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_factory.h b/chromium/media/mojo/services/mojo_renderer_factory.h
index 4c37a157642..64b68382fd6 100644
--- a/chromium/media/mojo/services/mojo_renderer_factory.h
+++ b/chromium/media/mojo/services/mojo_renderer_factory.h
@@ -7,28 +7,29 @@
#include "media/base/media_export.h"
#include "media/base/renderer_factory.h"
-#include "media/mojo/interfaces/media_renderer.mojom.h"
+#include "media/mojo/interfaces/renderer.mojom.h"
#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_ptr.h"
-namespace mojo {
-class ServiceProvider;
-}
-
namespace media {
+namespace interfaces {
+class ServiceFactory;
+}
+
// The default factory class for creating MojoRendererImpl.
class MEDIA_EXPORT MojoRendererFactory : public RendererFactory {
public:
- explicit MojoRendererFactory(mojo::ServiceProvider* service_provider);
+ explicit MojoRendererFactory(interfaces::ServiceFactory* service_factory);
~MojoRendererFactory() final;
scoped_ptr<Renderer> CreateRenderer(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
AudioRendererSink* audio_renderer_sink,
VideoRendererSink* video_renderer_sink) final;
private:
- mojo::ServiceProvider* service_provider_;
+ interfaces::ServiceFactory* service_factory_;
DISALLOW_COPY_AND_ASSIGN(MojoRendererFactory);
};
diff --git a/chromium/media/mojo/services/mojo_renderer_impl.cc b/chromium/media/mojo/services/mojo_renderer_impl.cc
index 2791c021f29..3355d14f95c 100644
--- a/chromium/media/mojo/services/mojo_renderer_impl.cc
+++ b/chromium/media/mojo/services/mojo_renderer_impl.cc
@@ -13,15 +13,14 @@
#include "media/mojo/services/mojo_demuxer_stream_impl.h"
#include "mojo/application/public/cpp/connect.h"
#include "mojo/application/public/interfaces/service_provider.mojom.h"
-#include "third_party/mojo/src/mojo/public/cpp/bindings/interface_impl.h"
namespace media {
MojoRendererImpl::MojoRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- mojo::MediaRendererPtr remote_media_renderer)
+ interfaces::RendererPtr remote_renderer)
: task_runner_(task_runner),
- remote_media_renderer_(remote_media_renderer.Pass()),
+ remote_renderer_(remote_renderer.Pass()),
binding_(this),
weak_factory_(this) {
DVLOG(1) << __FUNCTION__;
@@ -30,7 +29,7 @@ MojoRendererImpl::MojoRendererImpl(
MojoRendererImpl::~MojoRendererImpl() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- // Connection to |remote_media_renderer_| will error-out here.
+ // Connection to |remote_renderer_| will error-out here.
}
// TODO(xhwang): Support |waiting_for_decryption_key_cb| if needed.
@@ -52,27 +51,25 @@ void MojoRendererImpl::Initialize(
error_cb_ = error_cb;
buffering_state_cb_ = buffering_state_cb;
- // Create audio and video mojo::DemuxerStream and bind its lifetime to the
- // pipe.
+ // Create audio and video interfaces::DemuxerStream and bind its lifetime to
+ // the pipe.
DemuxerStream* const audio =
demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* const video =
demuxer_stream_provider_->GetStream(DemuxerStream::VIDEO);
- mojo::DemuxerStreamPtr audio_stream;
+ interfaces::DemuxerStreamPtr audio_stream;
if (audio)
new MojoDemuxerStreamImpl(audio, GetProxy(&audio_stream));
- mojo::DemuxerStreamPtr video_stream;
+ interfaces::DemuxerStreamPtr video_stream;
if (video)
new MojoDemuxerStreamImpl(video, GetProxy(&video_stream));
- mojo::MediaRendererClientPtr client_ptr;
+ interfaces::RendererClientPtr client_ptr;
binding_.Bind(GetProxy(&client_ptr));
- remote_media_renderer_->Initialize(
- client_ptr.Pass(),
- audio_stream.Pass(),
- video_stream.Pass(),
+ remote_renderer_->Initialize(
+ client_ptr.Pass(), audio_stream.Pass(), video_stream.Pass(),
BindToCurrentLoop(base::Bind(&MojoRendererImpl::OnInitialized,
weak_factory_.GetWeakPtr())));
}
@@ -90,13 +87,13 @@ void MojoRendererImpl::SetCdm(CdmContext* cdm_context,
return;
}
- remote_media_renderer_->SetCdm(cdm_id, cdm_attached_cb);
+ remote_renderer_->SetCdm(cdm_id, cdm_attached_cb);
}
void MojoRendererImpl::Flush(const base::Closure& flush_cb) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_media_renderer_->Flush(flush_cb);
+ remote_renderer_->Flush(flush_cb);
}
void MojoRendererImpl::StartPlayingFrom(base::TimeDelta time) {
@@ -108,19 +105,19 @@ void MojoRendererImpl::StartPlayingFrom(base::TimeDelta time) {
time_ = time;
}
- remote_media_renderer_->StartPlayingFrom(time.InMicroseconds());
+ remote_renderer_->StartPlayingFrom(time.InMicroseconds());
}
void MojoRendererImpl::SetPlaybackRate(double playback_rate) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_media_renderer_->SetPlaybackRate(playback_rate);
+ remote_renderer_->SetPlaybackRate(playback_rate);
}
void MojoRendererImpl::SetVolume(float volume) {
DVLOG(2) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- remote_media_renderer_->SetVolume(volume);
+ remote_renderer_->SetVolume(volume);
}
base::TimeDelta MojoRendererImpl::GetMediaTime() {
@@ -132,7 +129,7 @@ base::TimeDelta MojoRendererImpl::GetMediaTime() {
bool MojoRendererImpl::HasAudio() {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(remote_media_renderer_.get()); // We always bind the renderer.
+ DCHECK(remote_renderer_.get()); // We always bind the renderer.
return !!demuxer_stream_provider_->GetStream(DemuxerStream::AUDIO);
}
@@ -159,7 +156,8 @@ void MojoRendererImpl::OnTimeUpdate(int64_t time_usec, int64_t max_time_usec) {
max_time_ = base::TimeDelta::FromMicroseconds(max_time_usec);
}
-void MojoRendererImpl::OnBufferingStateChange(mojo::BufferingState state) {
+void MojoRendererImpl::OnBufferingStateChange(
+ interfaces::BufferingState state) {
DVLOG(2) << __FUNCTION__;
if (!task_runner_->BelongsToCurrentThread()) {
@@ -188,6 +186,7 @@ void MojoRendererImpl::OnEnded() {
void MojoRendererImpl::OnError() {
DVLOG(1) << __FUNCTION__;
+ DCHECK(init_cb_.is_null());
if (!task_runner_->BelongsToCurrentThread()) {
task_runner_->PostTask(
@@ -198,17 +197,16 @@ void MojoRendererImpl::OnError() {
// TODO(tim): Should we plumb error code from remote renderer?
// http://crbug.com/410451.
- if (init_cb_.is_null()) // We have initialized already.
- error_cb_.Run(PIPELINE_ERROR_DECODE);
- else
- init_cb_.Run(PIPELINE_ERROR_COULD_NOT_RENDER);
+ error_cb_.Run(PIPELINE_ERROR_DECODE);
}
-void MojoRendererImpl::OnInitialized() {
+void MojoRendererImpl::OnInitialized(bool success) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- if (!init_cb_.is_null())
- base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
+ DCHECK(!init_cb_.is_null());
+
+ base::ResetAndReturn(&init_cb_)
+ .Run(success ? PIPELINE_OK : PIPELINE_ERROR_INITIALIZATION_FAILED);
}
} // namespace media
diff --git a/chromium/media/mojo/services/mojo_renderer_impl.h b/chromium/media/mojo/services/mojo_renderer_impl.h
index 7b955a2cc1b..e574b150599 100644
--- a/chromium/media/mojo/services/mojo_renderer_impl.h
+++ b/chromium/media/mojo/services/mojo_renderer_impl.h
@@ -8,7 +8,8 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "media/base/renderer.h"
-#include "media/mojo/interfaces/media_renderer.mojom.h"
+#include "media/mojo/interfaces/renderer.mojom.h"
+#include "third_party/mojo/src/mojo/public/cpp/bindings/binding.h"
namespace base {
class SingleThreadTaskRunner;
@@ -18,18 +19,18 @@ namespace media {
class DemuxerStreamProvider;
-// A media::Renderer that proxies to a mojo::MediaRenderer. That
-// mojo::MediaRenderer proxies back to the MojoRendererImpl via the
-// mojo::MediaRendererClient interface.
+// A media::Renderer that proxies to a interfaces::Renderer. That
+// interfaces::Renderer proxies back to the MojoRendererImpl via the
+// interfaces::RendererClient interface.
//
// MojoRendererImpl implements media::Renderer for use as either an audio
// or video renderer.
-class MojoRendererImpl : public Renderer, public mojo::MediaRendererClient {
+class MojoRendererImpl : public Renderer, public interfaces::RendererClient {
public:
// |task_runner| is the TaskRunner on which all methods are invoked.
MojoRendererImpl(
const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- mojo::MediaRendererPtr remote_media_renderer);
+ interfaces::RendererPtr remote_renderer);
~MojoRendererImpl() override;
// Renderer implementation.
@@ -50,25 +51,25 @@ class MojoRendererImpl : public Renderer, public mojo::MediaRendererClient {
bool HasAudio() override;
bool HasVideo() override;
- // mojo::MediaRendererClient implementation.
+ // interfaces::RendererClient implementation.
void OnTimeUpdate(int64_t time_usec, int64_t max_time_usec) override;
- void OnBufferingStateChange(mojo::BufferingState state) override;
+ void OnBufferingStateChange(interfaces::BufferingState state) override;
void OnEnded() override;
void OnError() override;
private:
- // Called when |remote_media_renderer_| has finished initializing.
- void OnInitialized();
+ // Called when |remote_renderer_| has finished initializing.
+ void OnInitialized(bool success);
// Task runner used to execute pipeline tasks.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DemuxerStreamProvider* demuxer_stream_provider_;
- mojo::MediaRendererPtr remote_media_renderer_;
- mojo::Binding<MediaRendererClient> binding_;
+ interfaces::RendererPtr remote_renderer_;
+ mojo::Binding<RendererClient> binding_;
// Callbacks passed to Initialize() that we forward messages from
- // |remote_media_renderer_| through.
+ // |remote_renderer_| through.
PipelineStatusCB init_cb_;
base::Closure ended_cb_;
PipelineStatusCB error_cb_;
diff --git a/chromium/media/mojo/services/mojo_renderer_service.cc b/chromium/media/mojo/services/mojo_renderer_service.cc
index d1bf751e531..9efde200d3b 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.cc
+++ b/chromium/media/mojo/services/mojo_renderer_service.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "media/base/audio_decoder.h"
#include "media/base/audio_renderer.h"
@@ -29,10 +28,10 @@ namespace media {
const int kTimeUpdateIntervalMs = 50;
MojoRendererService::MojoRendererService(
- CdmContextProvider* cdm_context_provider,
+ base::WeakPtr<CdmContextProvider> cdm_context_provider,
RendererFactory* renderer_factory,
const scoped_refptr<MediaLog>& media_log,
- mojo::InterfaceRequest<mojo::MediaRenderer> request)
+ mojo::InterfaceRequest<interfaces::Renderer> request)
: binding_(this, request.Pass()),
cdm_context_provider_(cdm_context_provider),
state_(STATE_UNINITIALIZED),
@@ -50,15 +49,16 @@ MojoRendererService::MojoRendererService(
// Create renderer.
if (renderer_factory) {
- renderer_ = renderer_factory->CreateRenderer(
- task_runner, audio_renderer_sink_.get(), video_renderer_sink_.get());
+ renderer_ = renderer_factory->CreateRenderer(task_runner, task_runner,
+ audio_renderer_sink_.get(),
+ video_renderer_sink_.get());
} else {
scoped_ptr<AudioRenderer> audio_renderer(new AudioRendererImpl(
task_runner, audio_renderer_sink_.get(),
mojo_media_client->CreateAudioDecoders(task_runner, media_log).Pass(),
mojo_media_client->GetAudioHardwareConfig(), media_log));
scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
- task_runner, video_renderer_sink_.get(),
+ task_runner, task_runner, video_renderer_sink_.get(),
mojo_media_client->CreateVideoDecoders(task_runner, media_log).Pass(),
true, nullptr, media_log));
renderer_.reset(new RendererImpl(task_runner, audio_renderer.Pass(),
@@ -69,10 +69,11 @@ MojoRendererService::MojoRendererService(
MojoRendererService::~MojoRendererService() {
}
-void MojoRendererService::Initialize(mojo::MediaRendererClientPtr client,
- mojo::DemuxerStreamPtr audio,
- mojo::DemuxerStreamPtr video,
- const mojo::Closure& callback) {
+void MojoRendererService::Initialize(
+ interfaces::RendererClientPtr client,
+ interfaces::DemuxerStreamPtr audio,
+ interfaces::DemuxerStreamPtr video,
+ const mojo::Callback<void(bool)>& callback) {
DVLOG(1) << __FUNCTION__;
DCHECK_EQ(state_, STATE_UNINITIALIZED);
client_ = client.Pass();
@@ -129,7 +130,8 @@ void MojoRendererService::SetCdm(int32_t cdm_id,
weak_this_, callback));
}
-void MojoRendererService::OnStreamReady(const mojo::Closure& callback) {
+void MojoRendererService::OnStreamReady(
+ const mojo::Callback<void(bool)>& callback) {
DCHECK_EQ(state_, STATE_INITIALIZING);
renderer_->Initialize(
@@ -144,20 +146,19 @@ void MojoRendererService::OnStreamReady(const mojo::Closure& callback) {
}
void MojoRendererService::OnRendererInitializeDone(
- const mojo::Closure& callback, PipelineStatus status) {
+ const mojo::Callback<void(bool)>& callback,
+ PipelineStatus status) {
DVLOG(1) << __FUNCTION__;
+ DCHECK_EQ(state_, STATE_INITIALIZING);
- if (status != PIPELINE_OK && state_ != STATE_ERROR)
- OnError(status);
-
- if (state_ == STATE_ERROR) {
- renderer_.reset();
- } else {
- DCHECK_EQ(state_, STATE_INITIALIZING);
- state_ = STATE_PLAYING;
+ if (status != PIPELINE_OK) {
+ state_ = STATE_ERROR;
+ callback.Run(false);
+ return;
}
- callback.Run();
+ state_ = STATE_PLAYING;
+ callback.Run(true);
}
void MojoRendererService::OnUpdateStatistics(const PipelineStatistics& stats) {
@@ -191,7 +192,7 @@ void MojoRendererService::OnBufferingStateChanged(
BufferingState new_buffering_state) {
DVLOG(2) << __FUNCTION__ << "(" << new_buffering_state << ")";
client_->OnBufferingStateChange(
- static_cast<mojo::BufferingState>(new_buffering_state));
+ static_cast<interfaces::BufferingState>(new_buffering_state));
}
void MojoRendererService::OnRendererEnded() {
diff --git a/chromium/media/mojo/services/mojo_renderer_service.h b/chromium/media/mojo/services/mojo_renderer_service.h
index 085cea2bea1..dad61704128 100644
--- a/chromium/media/mojo/services/mojo_renderer_service.h
+++ b/chromium/media/mojo/services/mojo_renderer_service.h
@@ -15,7 +15,8 @@
#include "media/base/buffering_state.h"
#include "media/base/media_export.h"
#include "media/base/pipeline_status.h"
-#include "media/mojo/interfaces/media_renderer.mojom.h"
+#include "media/mojo/interfaces/renderer.mojom.h"
+#include "media/mojo/services/mojo_cdm_service_context.h"
#include "third_party/mojo/src/mojo/public/cpp/bindings/strong_binding.h"
namespace mojo {
@@ -32,24 +33,24 @@ class Renderer;
class RendererFactory;
class VideoRendererSink;
-// A mojo::MediaRenderer implementation that uses media::AudioRenderer to
+// A interfaces::Renderer implementation that uses media::AudioRenderer to
// decode and render audio to a sink obtained from the ApplicationConnection.
class MEDIA_EXPORT MojoRendererService
- : NON_EXPORTED_BASE(mojo::MediaRenderer) {
+ : NON_EXPORTED_BASE(interfaces::Renderer) {
public:
// |cdm_context_provider| can be used to find the CdmContext to support
// encrypted media. If null, encrypted media is not supported.
- MojoRendererService(CdmContextProvider* cdm_context_provider,
+ MojoRendererService(base::WeakPtr<CdmContextProvider> cdm_context_provider,
RendererFactory* renderer_factory,
const scoped_refptr<MediaLog>& media_log,
- mojo::InterfaceRequest<mojo::MediaRenderer> request);
+ mojo::InterfaceRequest<interfaces::Renderer> request);
~MojoRendererService() final;
- // mojo::MediaRenderer implementation.
- void Initialize(mojo::MediaRendererClientPtr client,
- mojo::DemuxerStreamPtr audio,
- mojo::DemuxerStreamPtr video,
- const mojo::Closure& callback) final;
+ // interfaces::Renderer implementation.
+ void Initialize(interfaces::RendererClientPtr client,
+ interfaces::DemuxerStreamPtr audio,
+ interfaces::DemuxerStreamPtr video,
+ const mojo::Callback<void(bool)>& callback) final;
void Flush(const mojo::Closure& callback) final;
void StartPlayingFrom(int64_t time_delta_usec) final;
void SetPlaybackRate(double playback_rate) final;
@@ -67,10 +68,10 @@ class MEDIA_EXPORT MojoRendererService
// Called when the DemuxerStreamProviderShim is ready to go (has a config,
// pipe handle, etc) and can be handed off to a renderer for use.
- void OnStreamReady(const mojo::Closure& callback);
+ void OnStreamReady(const mojo::Callback<void(bool)>& callback);
// Called when |audio_renderer_| initialization has completed.
- void OnRendererInitializeDone(const mojo::Closure& callback,
+ void OnRendererInitializeDone(const mojo::Callback<void(bool)>& callback,
PipelineStatus status);
// Callback executed by filters to update statistics.
@@ -99,9 +100,9 @@ class MEDIA_EXPORT MojoRendererService
// Callback executed once SetCdm() completes.
void OnCdmAttached(const mojo::Callback<void(bool)>& callback, bool success);
- mojo::StrongBinding<mojo::MediaRenderer> binding_;
+ mojo::StrongBinding<interfaces::Renderer> binding_;
- CdmContextProvider* cdm_context_provider_;
+ base::WeakPtr<CdmContextProvider> cdm_context_provider_;
State state_;
@@ -111,12 +112,12 @@ class MEDIA_EXPORT MojoRendererService
scoped_refptr<AudioRendererSink> audio_renderer_sink_;
scoped_ptr<VideoRendererSink> video_renderer_sink_;
- scoped_ptr<Renderer> renderer_;
+ scoped_ptr<media::Renderer> renderer_;
- base::RepeatingTimer<MojoRendererService> time_update_timer_;
+ base::RepeatingTimer time_update_timer_;
uint64_t last_media_time_usec_;
- mojo::MediaRendererClientPtr client_;
+ interfaces::RendererClientPtr client_;
base::WeakPtr<MojoRendererService> weak_this_;
base::WeakPtrFactory<MojoRendererService> weak_factory_;
diff --git a/chromium/media/mojo/services/service_factory_impl.cc b/chromium/media/mojo/services/service_factory_impl.cc
new file mode 100644
index 00000000000..fcbbc614324
--- /dev/null
+++ b/chromium/media/mojo/services/service_factory_impl.cc
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/mojo/services/service_factory_impl.h"
+
+#include "base/logging.h"
+#include "media/base/cdm_factory.h"
+#include "media/base/media_log.h"
+#include "media/base/renderer_factory.h"
+#include "media/mojo/services/mojo_cdm_service.h"
+#include "media/mojo/services/mojo_media_client.h"
+#include "media/mojo/services/mojo_renderer_service.h"
+#include "mojo/application/public/cpp/app_lifetime_helper.h"
+
+namespace media {
+
+ServiceFactoryImpl::ServiceFactoryImpl(
+ mojo::InterfaceRequest<interfaces::ServiceFactory> request,
+ mojo::ServiceProvider* service_provider,
+ scoped_refptr<MediaLog> media_log,
+ scoped_ptr<mojo::AppRefCount> parent_app_refcount)
+ : binding_(this, request.Pass()),
+ service_provider_(service_provider),
+ media_log_(media_log),
+ parent_app_refcount_(parent_app_refcount.Pass()) {
+ DVLOG(1) << __FUNCTION__;
+}
+
+ServiceFactoryImpl::~ServiceFactoryImpl() {
+ DVLOG(1) << __FUNCTION__;
+}
+
+// interfaces::ServiceFactory implementation.
+void ServiceFactoryImpl::CreateRenderer(
+ mojo::InterfaceRequest<interfaces::Renderer> request) {
+ // The created object is owned by the pipe.
+ new MojoRendererService(cdm_service_context_.GetWeakPtr(),
+ GetRendererFactory(), media_log_, request.Pass());
+}
+
+void ServiceFactoryImpl::CreateCdm(
+ mojo::InterfaceRequest<interfaces::ContentDecryptionModule> request) {
+ // The created object is owned by the pipe.
+ new MojoCdmService(cdm_service_context_.GetWeakPtr(), service_provider_,
+ GetCdmFactory(), request.Pass());
+}
+
+RendererFactory* ServiceFactoryImpl::GetRendererFactory() {
+ if (!renderer_factory_)
+ renderer_factory_ =
+ MojoMediaClient::Get()->CreateRendererFactory(media_log_);
+ return renderer_factory_.get();
+}
+
+CdmFactory* ServiceFactoryImpl::GetCdmFactory() {
+ if (!cdm_factory_)
+ cdm_factory_ = MojoMediaClient::Get()->CreateCdmFactory();
+ return cdm_factory_.get();
+}
+
+} // namespace media
diff --git a/chromium/media/mojo/services/service_factory_impl.h b/chromium/media/mojo/services/service_factory_impl.h
new file mode 100644
index 00000000000..8357f8ad4e5
--- /dev/null
+++ b/chromium/media/mojo/services/service_factory_impl.h
@@ -0,0 +1,58 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_MOJO_SERVICES_SERVICE_FACTORY_IMPL_H_
+#define MEDIA_MOJO_SERVICES_SERVICE_FACTORY_IMPL_H_
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "media/mojo/interfaces/service_factory.mojom.h"
+#include "media/mojo/services/mojo_cdm_service_context.h"
+#include "mojo/public/cpp/bindings/strong_binding.h"
+
+namespace mojo {
+class AppRefCount;
+class ServiceProvider;
+}
+
+namespace media {
+
+class CdmFactory;
+class MediaLog;
+class RendererFactory;
+
+class ServiceFactoryImpl : public interfaces::ServiceFactory {
+ public:
+ ServiceFactoryImpl(mojo::InterfaceRequest<interfaces::ServiceFactory> request,
+ mojo::ServiceProvider* service_provider,
+ scoped_refptr<MediaLog> media_log,
+ scoped_ptr<mojo::AppRefCount> parent_app_refcount);
+ ~ServiceFactoryImpl() final;
+
+ // interfaces::ServiceFactory implementation.
+ void CreateRenderer(
+ mojo::InterfaceRequest<interfaces::Renderer> renderer) final;
+ void CreateCdm(
+ mojo::InterfaceRequest<interfaces::ContentDecryptionModule> cdm) final;
+
+ private:
+ RendererFactory* GetRendererFactory();
+ CdmFactory* GetCdmFactory();
+
+ MojoCdmServiceContext cdm_service_context_;
+
+ mojo::StrongBinding<interfaces::ServiceFactory> binding_;
+ mojo::ServiceProvider* service_provider_;
+ scoped_refptr<MediaLog> media_log_;
+ scoped_ptr<mojo::AppRefCount> parent_app_refcount_;
+
+ scoped_ptr<RendererFactory> renderer_factory_;
+ scoped_ptr<CdmFactory> cdm_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ServiceFactoryImpl);
+};
+
+} // namespace media
+
+#endif // MEDIA_MOJO_SERVICES_SERVICE_FACTORY_IMPL_H_
diff --git a/chromium/media/renderers/audio_renderer_impl.cc b/chromium/media/renderers/audio_renderer_impl.cc
index 8df794155ad..2d9879b64b8 100644
--- a/chromium/media/renderers/audio_renderer_impl.cc
+++ b/chromium/media/renderers/audio_renderer_impl.cc
@@ -22,6 +22,7 @@
#include "media/base/bind_to_current_loop.h"
#include "media/base/demuxer_stream.h"
#include "media/base/media_log.h"
+#include "media/base/timestamp_constants.h"
#include "media/filters/audio_clock.h"
#include "media/filters/decrypting_demuxer_stream.h"
@@ -343,8 +344,6 @@ void AudioRendererImpl::Initialize(
audio_parameters_.Reset(
AudioParameters::AUDIO_PCM_LOW_LATENCY,
stream->audio_decoder_config().channel_layout(),
- ChannelLayoutToChannelCount(
- stream->audio_decoder_config().channel_layout()),
stream->audio_decoder_config().samples_per_second(),
stream->audio_decoder_config().bits_per_channel(),
buffer_size);
@@ -352,15 +351,11 @@ void AudioRendererImpl::Initialize(
} else {
audio_parameters_.Reset(
hw_params.format(),
- // Always use the source's channel layout and channel count to avoid
- // premature downmixing (http://crbug.com/379288), platform specific
- // issues around channel layouts (http://crbug.com/266674), and
- // unnecessary upmixing overhead.
+ // Always use the source's channel layout to avoid premature downmixing
+ // (http://crbug.com/379288), platform specific issues around channel
+ // layouts (http://crbug.com/266674), and unnecessary upmixing overhead.
stream->audio_decoder_config().channel_layout(),
- ChannelLayoutToChannelCount(
- stream->audio_decoder_config().channel_layout()),
- hw_params.sample_rate(),
- hw_params.bits_per_sample(),
+ hw_params.sample_rate(), hw_params.bits_per_sample(),
hardware_config_.GetHighLatencyBufferSize());
}
diff --git a/chromium/media/renderers/audio_renderer_impl.h b/chromium/media/renderers/audio_renderer_impl.h
index 164cdbc5604..1c98c871934 100644
--- a/chromium/media/renderers/audio_renderer_impl.h
+++ b/chromium/media/renderers/audio_renderer_impl.h
@@ -21,7 +21,6 @@
#include <deque>
-#include "base/gtest_prod_util.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
diff --git a/chromium/media/renderers/default_renderer_factory.cc b/chromium/media/renderers/default_renderer_factory.cc
index 09f78ae1ec6..add6e15c9c8 100644
--- a/chromium/media/renderers/default_renderer_factory.cc
+++ b/chromium/media/renderers/default_renderer_factory.cc
@@ -16,10 +16,12 @@
#if !defined(MEDIA_DISABLE_FFMPEG)
#include "media/filters/ffmpeg_audio_decoder.h"
+#if !defined(DISABLE_FFMPEG_VIDEO_DECODERS)
#include "media/filters/ffmpeg_video_decoder.h"
#endif
+#endif
-#if !defined(OS_ANDROID)
+#if !defined(OS_ANDROID) || defined(ENABLE_MEDIA_PIPELINE_ON_ANDROID)
#include "media/filters/opus_audio_decoder.h"
#endif
@@ -43,6 +45,7 @@ DefaultRendererFactory::~DefaultRendererFactory() {
scoped_ptr<Renderer> DefaultRendererFactory::CreateRenderer(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
AudioRendererSink* audio_renderer_sink,
VideoRendererSink* video_renderer_sink) {
DCHECK(audio_renderer_sink);
@@ -51,11 +54,11 @@ scoped_ptr<Renderer> DefaultRendererFactory::CreateRenderer(
ScopedVector<AudioDecoder> audio_decoders;
#if !defined(MEDIA_DISABLE_FFMPEG)
- audio_decoders.push_back(new FFmpegAudioDecoder(
- media_task_runner, base::Bind(&MediaLog::AddLogEvent, media_log_)));
+ audio_decoders.push_back(
+ new FFmpegAudioDecoder(media_task_runner, media_log_));
#endif
-#if !defined(OS_ANDROID)
+#if !defined(OS_ANDROID) || defined(ENABLE_MEDIA_PIPELINE_ON_ANDROID)
audio_decoders.push_back(new OpusAudioDecoder(media_task_runner));
#endif
@@ -79,13 +82,13 @@ scoped_ptr<Renderer> DefaultRendererFactory::CreateRenderer(
video_decoders.push_back(new VpxVideoDecoder(media_task_runner));
#endif
-#if !defined(MEDIA_DISABLE_FFMPEG)
+#if !defined(MEDIA_DISABLE_FFMPEG) && !defined(DISABLE_FFMPEG_VIDEO_DECODERS)
video_decoders.push_back(new FFmpegVideoDecoder(media_task_runner));
#endif
scoped_ptr<VideoRenderer> video_renderer(new VideoRendererImpl(
- media_task_runner, video_renderer_sink, video_decoders.Pass(), true,
- gpu_factories_, media_log_));
+ media_task_runner, worker_task_runner, video_renderer_sink,
+ video_decoders.Pass(), true, gpu_factories_, media_log_));
// Create renderer.
return scoped_ptr<Renderer>(new RendererImpl(
diff --git a/chromium/media/renderers/default_renderer_factory.h b/chromium/media/renderers/default_renderer_factory.h
index 05cf2b1e1e5..2ec8aa8bab5 100644
--- a/chromium/media/renderers/default_renderer_factory.h
+++ b/chromium/media/renderers/default_renderer_factory.h
@@ -28,6 +28,7 @@ class MEDIA_EXPORT DefaultRendererFactory : public RendererFactory {
scoped_ptr<Renderer> CreateRenderer(
const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
AudioRendererSink* audio_renderer_sink,
VideoRendererSink* video_renderer_sink) final;
diff --git a/chromium/media/renderers/gpu_video_accelerator_factories.h b/chromium/media/renderers/gpu_video_accelerator_factories.h
index 7a8c3870a4c..51740ada926 100644
--- a/chromium/media/renderers/gpu_video_accelerator_factories.h
+++ b/chromium/media/renderers/gpu_video_accelerator_factories.h
@@ -12,6 +12,7 @@
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "media/base/media_export.h"
+#include "media/base/video_types.h"
#include "media/video/video_decode_accelerator.h"
#include "media/video/video_encode_accelerator.h"
#include "ui/gfx/gpu_memory_buffer.h"
@@ -65,11 +66,14 @@ class MEDIA_EXPORT GpuVideoAcceleratorFactories
virtual scoped_ptr<gfx::GpuMemoryBuffer> AllocateGpuMemoryBuffer(
const gfx::Size& size,
- gfx::GpuMemoryBuffer::Format format,
- gfx::GpuMemoryBuffer::Usage usage) = 0;
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) = 0;
+ virtual bool ShouldUseGpuMemoryBuffersForVideoFrames() const = 0;
virtual unsigned ImageTextureTarget() = 0;
- virtual bool IsTextureRGSupported() = 0;
+ // Pixel format of the hardware video frames created when GpuMemoryBuffers
+ // video frames are enabled.
+ virtual VideoPixelFormat VideoFrameOutputFormat() = 0;
virtual gpu::gles2::GLES2Interface* GetGLES2Interface() = 0;
diff --git a/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc b/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc
index a1f49e41178..361d8f213a8 100644
--- a/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc
+++ b/chromium/media/renderers/mock_gpu_video_accelerator_factories.cc
@@ -4,6 +4,7 @@
#include "media/renderers/mock_gpu_video_accelerator_factories.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/gpu_memory_buffer.h"
namespace media {
@@ -12,13 +13,24 @@ namespace {
class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
public:
- GpuMemoryBufferImpl(const gfx::Size& size) : size_(size) {
- bytes_.resize(size_.GetArea());
+ GpuMemoryBufferImpl(const gfx::Size& size, gfx::BufferFormat format)
+ : format_(format), size_(size),
+ num_planes_(gfx::NumberOfPlanesForBufferFormat(format)) {
+ DCHECK(gfx::BufferFormat::R_8 == format_ ||
+ gfx::BufferFormat::YUV_420_BIPLANAR == format_ ||
+ gfx::BufferFormat::UYVY_422 == format_);
+ DCHECK(num_planes_ <= kMaxPlanes);
+ for (int i = 0; i < static_cast<int>(num_planes_); ++i) {
+ bytes_[i].resize(
+ gfx::RowSizeForBufferFormat(size_.width(), format_, i) *
+ size_.height() / gfx::SubsamplingFactorForBufferFormat(format_, i));
+ }
}
// Overridden from gfx::GpuMemoryBuffer:
bool Map(void** data) override {
- data[0] = &bytes_[0];
+ for (size_t plane = 0; plane < num_planes_; ++plane)
+ data[plane] = &bytes_[plane][0];
return true;
}
void Unmap() override{};
@@ -26,8 +38,19 @@ class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
NOTREACHED();
return false;
}
- Format GetFormat() const override { return gfx::GpuMemoryBuffer::R_8; }
- void GetStride(int* stride) const override { stride[0] = size_.width(); }
+ gfx::BufferFormat GetFormat() const override {
+ return format_;
+ }
+ void GetStride(int* strides) const override {
+ for (int plane = 0; plane < static_cast<int>(num_planes_); ++plane) {
+ strides[plane] = static_cast<int>(
+ gfx::RowSizeForBufferFormat(size_.width(), format_, plane));
+ }
+ }
+ gfx::GpuMemoryBufferId GetId() const override {
+ NOTREACHED();
+ return gfx::GpuMemoryBufferId(0);
+ }
gfx::GpuMemoryBufferHandle GetHandle() const override {
NOTREACHED();
return gfx::GpuMemoryBufferHandle();
@@ -37,8 +60,12 @@ class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
}
private:
- std::vector<unsigned char> bytes_;
+ static const size_t kMaxPlanes = 3;
+
+ gfx::BufferFormat format_;
const gfx::Size size_;
+ size_t num_planes_;
+ std::vector<uint8> bytes_[kMaxPlanes];
};
} // unnamed namespace
@@ -54,10 +81,12 @@ bool MockGpuVideoAcceleratorFactories::IsGpuVideoAcceleratorEnabled() {
scoped_ptr<gfx::GpuMemoryBuffer>
MockGpuVideoAcceleratorFactories::AllocateGpuMemoryBuffer(
const gfx::Size& size,
- gfx::GpuMemoryBuffer::Format format,
- gfx::GpuMemoryBuffer::Usage usage) {
- DCHECK_EQ(gfx::GpuMemoryBuffer::R_8, format);
- return make_scoped_ptr<gfx::GpuMemoryBuffer>(new GpuMemoryBufferImpl(size));
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+ if (fail_to_allocate_gpu_memory_buffer_)
+ return nullptr;
+ return make_scoped_ptr<gfx::GpuMemoryBuffer>(
+ new GpuMemoryBufferImpl(size, format));
}
scoped_ptr<base::SharedMemory>
@@ -75,6 +104,11 @@ MockGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator() {
return scoped_ptr<VideoEncodeAccelerator>(DoCreateVideoEncodeAccelerator());
}
+bool MockGpuVideoAcceleratorFactories::ShouldUseGpuMemoryBuffersForVideoFrames()
+ const {
+ return false;
+}
+
unsigned MockGpuVideoAcceleratorFactories::ImageTextureTarget() {
return GL_TEXTURE_2D;
}
diff --git a/chromium/media/renderers/mock_gpu_video_accelerator_factories.h b/chromium/media/renderers/mock_gpu_video_accelerator_factories.h
index b50b0c197ba..f4af02a5fa6 100644
--- a/chromium/media/renderers/mock_gpu_video_accelerator_factories.h
+++ b/chromium/media/renderers/mock_gpu_video_accelerator_factories.h
@@ -47,11 +47,24 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
scoped_ptr<gfx::GpuMemoryBuffer> AllocateGpuMemoryBuffer(
const gfx::Size& size,
- gfx::GpuMemoryBuffer::Format format,
- gfx::GpuMemoryBuffer::Usage usage) override;
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override;
+ bool ShouldUseGpuMemoryBuffersForVideoFrames() const override;
unsigned ImageTextureTarget() override;
- MOCK_METHOD0(IsTextureRGSupported, bool());
+ VideoPixelFormat VideoFrameOutputFormat() override {
+ return video_frame_output_format_;
+ };
+
+ void SetVideoFrameOutputFormat(
+ const VideoPixelFormat video_frame_output_format) {
+ video_frame_output_format_ = video_frame_output_format;
+ };
+
+ void SetFailToAllocateGpuMemoryBufferForTesting(bool fail) {
+ fail_to_allocate_gpu_memory_buffer_ = fail;
+ }
+
MOCK_METHOD0(GetGLES2Interface, gpu::gles2::GLES2Interface*());
scoped_ptr<base::SharedMemory> CreateSharedMemory(size_t size) override;
@@ -64,6 +77,10 @@ class MockGpuVideoAcceleratorFactories : public GpuVideoAcceleratorFactories {
~MockGpuVideoAcceleratorFactories() override;
DISALLOW_COPY_AND_ASSIGN(MockGpuVideoAcceleratorFactories);
+
+ VideoPixelFormat video_frame_output_format_ = PIXEL_FORMAT_I420;
+
+ bool fail_to_allocate_gpu_memory_buffer_ = false;
};
} // namespace media
diff --git a/chromium/media/renderers/video_renderer_impl.cc b/chromium/media/renderers/video_renderer_impl.cc
index ef389822507..5fbf7299e86 100644
--- a/chromium/media/renderers/video_renderer_impl.cc
+++ b/chromium/media/renderers/video_renderer_impl.cc
@@ -7,16 +7,15 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
-#include "base/command_line.h"
#include "base/location.h"
-#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/time/default_tick_clock.h"
#include "base/trace_event/trace_event.h"
#include "media/base/bind_to_current_loop.h"
-#include "media/base/buffers.h"
#include "media/base/limits.h"
+#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/pipeline.h"
#include "media/base/video_frame.h"
@@ -25,75 +24,54 @@
namespace media {
-// TODO(dalecurtis): This experiment is temporary and should be removed once we
-// have enough data to support the primacy of the new video rendering path; see
-// http://crbug.com/485699 for details.
-static bool ShouldUseVideoRenderingPath() {
- // Note: It's important to query the field trial state first, to ensure that
- // UMA reports the correct group.
- const std::string group_name =
- base::FieldTrialList::FindFullName("NewVideoRendererTrial");
- const bool disabled_via_cli =
- base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableNewVideoRenderer);
- return !disabled_via_cli &&
- !base::StartsWithASCII(group_name, "Disabled", true);
-}
-
VideoRendererImpl::VideoRendererImpl(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
VideoRendererSink* sink,
ScopedVector<VideoDecoder> decoders,
bool drop_frames,
const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
const scoped_refptr<MediaLog>& media_log)
- : task_runner_(task_runner),
- use_new_video_renderering_path_(ShouldUseVideoRenderingPath()),
+ : task_runner_(media_task_runner),
sink_(sink),
sink_started_(false),
video_frame_stream_(
- new VideoFrameStream(task_runner, decoders.Pass(), media_log)),
- gpu_memory_buffer_pool_(
- new GpuMemoryBufferVideoFramePool(task_runner, gpu_factories)),
+ new VideoFrameStream(media_task_runner, decoders.Pass(), media_log)),
+ gpu_memory_buffer_pool_(nullptr),
+ media_log_(media_log),
low_delay_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
- frame_available_(&lock_),
state_(kUninitialized),
- thread_(),
+ sequence_token_(0),
pending_read_(false),
drop_frames_(drop_frames),
buffering_state_(BUFFERING_HAVE_NOTHING),
frames_decoded_(0),
frames_dropped_(0),
- is_shutting_down_(false),
tick_clock_(new base::DefaultTickClock()),
was_background_rendering_(false),
time_progressing_(false),
render_first_frame_and_stop_(false),
posted_maybe_stop_after_first_paint_(false),
weak_factory_(this) {
+ if (gpu_factories &&
+ gpu_factories->ShouldUseGpuMemoryBuffersForVideoFrames()) {
+ gpu_memory_buffer_pool_.reset(new GpuMemoryBufferVideoFramePool(
+ media_task_runner, worker_task_runner, gpu_factories));
+ }
}
VideoRendererImpl::~VideoRendererImpl() {
DCHECK(task_runner_->BelongsToCurrentThread());
- if (!use_new_video_renderering_path_) {
- base::AutoLock auto_lock(lock_);
- is_shutting_down_ = true;
- frame_available_.Signal();
- }
-
- if (!thread_.is_null())
- base::PlatformThread::Join(thread_);
-
if (!init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(PIPELINE_ERROR_ABORT);
if (!flush_cb_.is_null())
base::ResetAndReturn(&flush_cb_).Run();
- if (use_new_video_renderering_path_ && sink_started_)
+ if (sink_started_)
StopSink();
}
@@ -101,7 +79,7 @@ void VideoRendererImpl::Flush(const base::Closure& callback) {
DVLOG(1) << __FUNCTION__;
DCHECK(task_runner_->BelongsToCurrentThread());
- if (use_new_video_renderering_path_ && sink_started_)
+ if (sink_started_)
StopSink();
base::AutoLock auto_lock(lock_);
@@ -111,7 +89,6 @@ void VideoRendererImpl::Flush(const base::Closure& callback) {
// This is necessary if the |video_frame_stream_| has already seen an end of
// stream and needs to drain it before flushing it.
- ready_frames_.clear();
if (buffering_state_ != BUFFERING_HAVE_NOTHING) {
buffering_state_ = BUFFERING_HAVE_NOTHING;
buffering_state_cb_.Run(BUFFERING_HAVE_NOTHING);
@@ -119,8 +96,7 @@ void VideoRendererImpl::Flush(const base::Closure& callback) {
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
- if (use_new_video_renderering_path_)
- algorithm_->Reset();
+ algorithm_->Reset();
video_frame_stream_->Reset(
base::Bind(&VideoRendererImpl::OnVideoFrameStreamResetDone,
@@ -133,7 +109,6 @@ void VideoRendererImpl::StartPlayingFrom(base::TimeDelta timestamp) {
base::AutoLock auto_lock(lock_);
DCHECK_EQ(state_, kFlushed);
DCHECK(!pending_read_);
- DCHECK(ready_frames_.empty());
DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
state_ = kPlaying;
@@ -167,6 +142,9 @@ void VideoRendererImpl::Initialize(
DCHECK(!time_progressing_);
low_delay_ = (stream->liveness() == DemuxerStream::LIVENESS_LIVE);
+ UMA_HISTOGRAM_BOOLEAN("Media.VideoRenderer.LowDelay", low_delay_);
+ if (low_delay_)
+ MEDIA_LOG(DEBUG, media_log_) << "Video rendering in low delay mode.";
// Always post |init_cb_| because |this| could be destroyed if initialization
// failed.
@@ -178,8 +156,6 @@ void VideoRendererImpl::Initialize(
buffering_state_cb_ = BindToCurrentLoop(buffering_state_cb);
statistics_cb_ = statistics_cb;
- paint_cb_ = base::Bind(&VideoRendererSink::PaintFrameUsingOldRenderingPath,
- base::Unretained(sink_));
ended_cb_ = ended_cb;
error_cb_ = error_cb;
wall_clock_time_cb_ = wall_clock_time_cb;
@@ -196,7 +172,6 @@ scoped_refptr<VideoFrame> VideoRendererImpl::Render(
base::TimeTicks deadline_max,
bool background_rendering) {
base::AutoLock auto_lock(lock_);
- DCHECK(use_new_video_renderering_path_);
DCHECK_EQ(state_, kPlaying);
size_t frames_dropped = 0;
@@ -212,7 +187,18 @@ scoped_refptr<VideoFrame> VideoRendererImpl::Render(
// end of stream, or have frames available. We also don't want to do this in
// background rendering mode unless this isn't the first background render
// tick and we haven't seen any decoded frames since the last one.
- const size_t effective_frames = MaybeFireEndedCallback();
+ //
+ // We use the inverse of |render_first_frame_and_stop_| as a proxy for the
+ // value of |time_progressing_| here since we can't access it from the
+ // compositor thread. If we're here (in Render()) the sink must have been
+ // started -- but if it was started only to render the first frame and stop,
+ // then |time_progressing_| is likely false. If we're still in Render() when
+ // |render_first_frame_and_stop_| is false, then |time_progressing_| is true.
+ // If |time_progressing_| is actually true when |render_first_frame_and_stop_|
+ // is also true, then the ended callback will be harmlessly delayed until
+ // MaybeStopSinkAfterFirstPaint() runs and the next Render() call comes in.
+ const size_t effective_frames =
+ MaybeFireEndedCallback_Locked(!render_first_frame_and_stop_);
if (buffering_state_ == BUFFERING_HAVE_ENOUGH && !received_end_of_stream_ &&
!effective_frames && (!background_rendering ||
(!frames_decoded_ && was_background_rendering_))) {
@@ -230,7 +216,7 @@ scoped_refptr<VideoFrame> VideoRendererImpl::Render(
// dropped frames since they are likely just dropped due to being too old.
if (!background_rendering && !was_background_rendering_)
frames_dropped_ += frames_dropped;
- UpdateStatsAndWait_Locked(base::TimeDelta());
+ UpdateStats_Locked();
was_background_rendering_ = background_rendering;
// After painting the first frame, if playback hasn't started, we post a
@@ -259,24 +245,9 @@ scoped_refptr<VideoFrame> VideoRendererImpl::Render(
void VideoRendererImpl::OnFrameDropped() {
base::AutoLock auto_lock(lock_);
- DCHECK(use_new_video_renderering_path_);
algorithm_->OnLastFrameDropped();
}
-void VideoRendererImpl::CreateVideoThread() {
- // This may fail and cause a crash if there are too many threads created in
- // the current process. See http://crbug.com/443291
- const base::ThreadPriority priority =
-#if defined(OS_WIN)
- // Bump up our priority so our sleeping is more accurate.
- // TODO(scherkus): find out if this is necessary, but it seems to help.
- base::ThreadPriority::DISPLAY;
-#else
- base::ThreadPriority::NORMAL;
-#endif
- CHECK(base::PlatformThread::CreateWithPriority(0, this, &thread_, priority));
-}
-
void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
DCHECK(task_runner_->BelongsToCurrentThread());
base::AutoLock auto_lock(lock_);
@@ -294,116 +265,23 @@ void VideoRendererImpl::OnVideoFrameStreamInitialized(bool success) {
// have not populated any buffers yet.
state_ = kFlushed;
- if (use_new_video_renderering_path_) {
- algorithm_.reset(new VideoRendererAlgorithm(wall_clock_time_cb_));
- if (!drop_frames_)
- algorithm_->disable_frame_dropping();
- } else {
- CreateVideoThread();
- }
+ algorithm_.reset(new VideoRendererAlgorithm(wall_clock_time_cb_));
+ if (!drop_frames_)
+ algorithm_->disable_frame_dropping();
base::ResetAndReturn(&init_cb_).Run(PIPELINE_OK);
}
-// PlatformThread::Delegate implementation.
-void VideoRendererImpl::ThreadMain() {
- DCHECK(!use_new_video_renderering_path_);
- base::PlatformThread::SetName("CrVideoRenderer");
-
- // The number of milliseconds to idle when we do not have anything to do.
- // Nothing special about the value, other than we're being more OS-friendly
- // than sleeping for 1 millisecond.
- //
- // TODO(scherkus): switch to pure event-driven frame timing instead of this
- // kIdleTimeDelta business http://crbug.com/106874
- const base::TimeDelta kIdleTimeDelta =
- base::TimeDelta::FromMilliseconds(10);
-
- for (;;) {
- base::AutoLock auto_lock(lock_);
-
- // Thread exit condition.
- if (is_shutting_down_)
- return;
-
- // Remain idle as long as we're not playing.
- if (state_ != kPlaying || buffering_state_ != BUFFERING_HAVE_ENOUGH) {
- UpdateStatsAndWait_Locked(kIdleTimeDelta);
- continue;
- }
-
- base::TimeTicks now = tick_clock_->NowTicks();
-
- // Remain idle until we have the next frame ready for rendering.
- if (ready_frames_.empty()) {
- base::TimeDelta wait_time = kIdleTimeDelta;
- if (received_end_of_stream_) {
- if (!rendered_end_of_stream_) {
- rendered_end_of_stream_ = true;
- task_runner_->PostTask(FROM_HERE, ended_cb_);
- }
- } else if (now >= latest_possible_paint_time_) {
- // Declare HAVE_NOTHING if we don't have another frame by the time we
- // are ready to paint the next one.
- buffering_state_ = BUFFERING_HAVE_NOTHING;
- task_runner_->PostTask(
- FROM_HERE, base::Bind(buffering_state_cb_, BUFFERING_HAVE_NOTHING));
- } else {
- wait_time = std::min(kIdleTimeDelta, latest_possible_paint_time_ - now);
- }
-
- UpdateStatsAndWait_Locked(wait_time);
- continue;
- }
-
- base::TimeTicks target_paint_time =
- ConvertMediaTimestamp(ready_frames_.front()->timestamp());
-
- // If media time has stopped, don't attempt to paint any more frames.
- if (target_paint_time.is_null()) {
- UpdateStatsAndWait_Locked(kIdleTimeDelta);
- continue;
- }
-
- // Deadline is defined as the duration between this frame and the next
- // frame, using the delta between this frame and the previous frame as the
- // assumption for frame duration.
- //
- // TODO(scherkus): This can be vastly improved. Use a histogram to measure
- // the accuracy of our frame timing code. http://crbug.com/149829
- if (last_media_time_.is_null()) {
- latest_possible_paint_time_ = now;
- } else {
- base::TimeDelta duration = target_paint_time - last_media_time_;
- latest_possible_paint_time_ = target_paint_time + duration;
- }
-
- // Remain idle until we've reached our target paint window.
- if (now < target_paint_time) {
- UpdateStatsAndWait_Locked(
- std::min(target_paint_time - now, kIdleTimeDelta));
- continue;
- }
-
- if (ready_frames_.size() > 1 && now > latest_possible_paint_time_ &&
- drop_frames_) {
- DropNextReadyFrame_Locked();
- continue;
- }
-
- // Congratulations! You've made it past the video frame timing gauntlet.
- //
- // At this point enough time has passed that the next frame that ready for
- // rendering.
- PaintNextReadyFrame_Locked();
- }
-}
-
void VideoRendererImpl::SetTickClockForTesting(
scoped_ptr<base::TickClock> tick_clock) {
tick_clock_.swap(tick_clock);
}
+void VideoRendererImpl::SetGpuMemoryBufferVideoForTesting(
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool) {
+ gpu_memory_buffer_pool_.swap(gpu_memory_buffer_pool);
+}
+
void VideoRendererImpl::OnTimeStateChanged(bool time_progressing) {
DCHECK(task_runner_->BelongsToCurrentThread());
time_progressing_ = time_progressing;
@@ -411,7 +289,7 @@ void VideoRendererImpl::OnTimeStateChanged(bool time_progressing) {
// WARNING: Do not attempt to use |lock_| here as this may be a reentrant call
// in response to callbacks firing above.
- if (!use_new_video_renderering_path_ || sink_started_ == time_progressing_)
+ if (sink_started_ == time_progressing_)
return;
if (time_progressing_) {
@@ -424,44 +302,32 @@ void VideoRendererImpl::OnTimeStateChanged(bool time_progressing) {
}
}
-void VideoRendererImpl::PaintNextReadyFrame_Locked() {
- DCHECK(!use_new_video_renderering_path_);
- lock_.AssertAcquired();
-
- scoped_refptr<VideoFrame> next_frame = ready_frames_.front();
- ready_frames_.pop_front();
-
- last_media_time_ = ConvertMediaTimestamp(next_frame->timestamp());
-
- paint_cb_.Run(next_frame);
-
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
-}
-
-void VideoRendererImpl::DropNextReadyFrame_Locked() {
- DCHECK(!use_new_video_renderering_path_);
- TRACE_EVENT0("media", "VideoRendererImpl:frameDropped");
-
- lock_.AssertAcquired();
-
- last_media_time_ = ConvertMediaTimestamp(ready_frames_.front()->timestamp());
-
- ready_frames_.pop_front();
- frames_dropped_++;
+void VideoRendererImpl::FrameReadyForCopyingToGpuMemoryBuffers(
+ VideoFrameStream::Status status,
+ const scoped_refptr<VideoFrame>& frame) {
+ if (status != VideoFrameStream::OK || start_timestamp_ > frame->timestamp()) {
+ VideoRendererImpl::FrameReady(sequence_token_, status, frame);
+ return;
+ }
- task_runner_->PostTask(
- FROM_HERE,
- base::Bind(&VideoRendererImpl::AttemptRead, weak_factory_.GetWeakPtr()));
+ DCHECK(frame);
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ frame, base::Bind(&VideoRendererImpl::FrameReady,
+ weak_factory_.GetWeakPtr(), sequence_token_, status));
}
-void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
+void VideoRendererImpl::FrameReady(uint32_t sequence_token,
+ VideoFrameStream::Status status,
const scoped_refptr<VideoFrame>& frame) {
DCHECK(task_runner_->BelongsToCurrentThread());
bool start_sink = false;
{
base::AutoLock auto_lock(lock_);
+ // Stream has been reset and this VideoFrame was decoded before the reset
+ // but the async copy finished after.
+ if (sequence_token != sequence_token_)
+ return;
+
DCHECK_NE(state_, kUninitialized);
DCHECK_NE(state_, kFlushed);
@@ -504,15 +370,12 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
received_end_of_stream_ = true;
// See if we can fire EOS immediately instead of waiting for Render().
- if (use_new_video_renderering_path_)
- MaybeFireEndedCallback();
+ MaybeFireEndedCallback_Locked(time_progressing_);
} else {
// Maintain the latest frame decoded so the correct frame is displayed
// after prerolling has completed.
if (frame->timestamp() <= start_timestamp_) {
- if (use_new_video_renderering_path_)
- algorithm_->Reset();
- ready_frames_.clear();
+ algorithm_->Reset();
}
AddReadyFrame_Locked(frame);
}
@@ -526,8 +389,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
const bool have_nothing = buffering_state_ != BUFFERING_HAVE_ENOUGH;
const bool have_nothing_and_paused = have_nothing && !sink_started_;
if (was_background_rendering_ ||
- (use_new_video_renderering_path_ && have_nothing_and_paused &&
- drop_frames_)) {
+ (have_nothing_and_paused && drop_frames_)) {
base::TimeTicks expiry_time;
if (have_nothing_and_paused) {
// Use the current media wall clock time plus the frame duration since
@@ -557,7 +419,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
// data.
if (have_nothing && HaveEnoughData_Locked()) {
TransitionToHaveEnough_Locked();
- if (use_new_video_renderering_path_ && !sink_started_ &&
+ if (!sink_started_ &&
!rendered_end_of_stream_) {
start_sink = true;
render_first_frame_and_stop_ = true;
@@ -574,7 +436,7 @@ void VideoRendererImpl::FrameReady(VideoFrameStream::Status status,
// If time is progressing, the sink has already been started; this may be true
// if we have previously underflowed, yet weren't stopped because of audio.
- if (use_new_video_renderering_path_ && start_sink) {
+ if (start_sink) {
DCHECK(!sink_started_);
StartSink();
}
@@ -589,29 +451,20 @@ bool VideoRendererImpl::HaveEnoughData_Locked() {
if (HaveReachedBufferingCap())
return true;
- if (use_new_video_renderering_path_ && was_background_rendering_ &&
- frames_decoded_) {
+ if (was_background_rendering_ && frames_decoded_) {
return true;
}
if (!low_delay_)
return false;
- return ready_frames_.size() > 0 ||
- (use_new_video_renderering_path_ && algorithm_->frames_queued() > 0);
+ return algorithm_->frames_queued() > 0;
}
void VideoRendererImpl::TransitionToHaveEnough_Locked() {
DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
- if (!ready_frames_.empty()) {
- DCHECK(!use_new_video_renderering_path_);
- // Because the clock might remain paused in for an undetermined amount
- // of time (e.g., seeking while paused), paint the first frame.
- PaintNextReadyFrame_Locked();
- }
-
buffering_state_ = BUFFERING_HAVE_ENOUGH;
buffering_state_cb_.Run(BUFFERING_HAVE_ENOUGH);
}
@@ -635,18 +488,7 @@ void VideoRendererImpl::AddReadyFrame_Locked(
frames_decoded_++;
- if (use_new_video_renderering_path_) {
- algorithm_->EnqueueFrame(frame);
- return;
- }
-
- ready_frames_.push_back(frame);
- DCHECK_LE(ready_frames_.size(),
- static_cast<size_t>(limits::kMaxVideoFrames));
-
- // Avoid needlessly waking up |thread_| unless playing.
- if (state_ == kPlaying)
- frame_available_.Signal();
+ algorithm_->EnqueueFrame(frame);
}
void VideoRendererImpl::AttemptRead() {
@@ -667,10 +509,16 @@ void VideoRendererImpl::AttemptRead_Locked() {
switch (state_) {
case kPlaying:
pending_read_ = true;
- video_frame_stream_->Read(base::Bind(&VideoRendererImpl::FrameReady,
- weak_factory_.GetWeakPtr()));
+ if (gpu_memory_buffer_pool_) {
+ video_frame_stream_->Read(base::Bind(
+ &VideoRendererImpl::FrameReadyForCopyingToGpuMemoryBuffers,
+ weak_factory_.GetWeakPtr()));
+ } else {
+ video_frame_stream_->Read(base::Bind(&VideoRendererImpl::FrameReady,
+ weak_factory_.GetWeakPtr(),
+ sequence_token_));
+ }
return;
-
case kUninitialized:
case kInitializing:
case kFlushing:
@@ -682,19 +530,18 @@ void VideoRendererImpl::AttemptRead_Locked() {
void VideoRendererImpl::OnVideoFrameStreamResetDone() {
base::AutoLock auto_lock(lock_);
DCHECK_EQ(kFlushing, state_);
- DCHECK(!pending_read_);
- DCHECK(ready_frames_.empty());
DCHECK(!received_end_of_stream_);
DCHECK(!rendered_end_of_stream_);
DCHECK_EQ(buffering_state_, BUFFERING_HAVE_NOTHING);
+ // Pending read might be true if an async video frame copy is in flight.
+ pending_read_ = false;
+ sequence_token_++;
state_ = kFlushed;
- latest_possible_paint_time_ = last_media_time_ = base::TimeTicks();
base::ResetAndReturn(&flush_cb_).Run();
}
-void VideoRendererImpl::UpdateStatsAndWait_Locked(
- base::TimeDelta wait_duration) {
+void VideoRendererImpl::UpdateStats_Locked() {
lock_.AssertAcquired();
DCHECK_GE(frames_decoded_, 0);
DCHECK_GE(frames_dropped_, 0);
@@ -708,37 +555,27 @@ void VideoRendererImpl::UpdateStatsAndWait_Locked(
frames_decoded_ = 0;
frames_dropped_ = 0;
}
-
- if (wait_duration > base::TimeDelta())
- frame_available_.TimedWait(wait_duration);
}
void VideoRendererImpl::MaybeStopSinkAfterFirstPaint() {
DCHECK(task_runner_->BelongsToCurrentThread());
- DCHECK(use_new_video_renderering_path_);
-
- {
- base::AutoLock auto_lock(lock_);
- render_first_frame_and_stop_ = false;
- }
if (!time_progressing_ && sink_started_)
StopSink();
+
+ base::AutoLock auto_lock(lock_);
+ render_first_frame_and_stop_ = false;
}
bool VideoRendererImpl::HaveReachedBufferingCap() {
DCHECK(task_runner_->BelongsToCurrentThread());
const size_t kMaxVideoFrames = limits::kMaxVideoFrames;
- if (use_new_video_renderering_path_) {
- // When the display rate is less than the frame rate, the effective frames
- // queued may be much smaller than the actual number of frames queued. Here
- // we ensure that frames_queued() doesn't get excessive.
- return algorithm_->EffectiveFramesQueued() >= kMaxVideoFrames ||
- algorithm_->frames_queued() >= 3 * kMaxVideoFrames;
- }
-
- return ready_frames_.size() >= kMaxVideoFrames;
+ // When the display rate is less than the frame rate, the effective frames
+ // queued may be much smaller than the actual number of frames queued. Here
+ // we ensure that frames_queued() doesn't get excessive.
+ return algorithm_->EffectiveFramesQueued() >= kMaxVideoFrames ||
+ algorithm_->frames_queued() >= 3 * kMaxVideoFrames;
}
void VideoRendererImpl::StartSink() {
@@ -757,7 +594,9 @@ void VideoRendererImpl::StopSink() {
was_background_rendering_ = false;
}
-size_t VideoRendererImpl::MaybeFireEndedCallback() {
+size_t VideoRendererImpl::MaybeFireEndedCallback_Locked(bool time_progressing) {
+ lock_.AssertAcquired();
+
// If there's only one frame in the video or Render() was never called, the
// algorithm will have one frame linger indefinitely. So in cases where the
// frame duration is unknown and we've received EOS, fire it once we get down
@@ -769,7 +608,7 @@ size_t VideoRendererImpl::MaybeFireEndedCallback() {
return effective_frames;
// Don't fire ended if time isn't moving and we have frames.
- if (!time_progressing_ && algorithm_->frames_queued())
+ if (!time_progressing && algorithm_->frames_queued())
return effective_frames;
// Fire ended if we have no more effective frames or only ever had one frame.
diff --git a/chromium/media/renderers/video_renderer_impl.h b/chromium/media/renderers/video_renderer_impl.h
index 7af8bb724f6..3800638a18e 100644
--- a/chromium/media/renderers/video_renderer_impl.h
+++ b/chromium/media/renderers/video_renderer_impl.h
@@ -13,7 +13,6 @@
#include "base/memory/weak_ptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
-#include "base/threading/platform_thread.h"
#include "base/timer/timer.h"
#include "media/base/decryptor.h"
#include "media/base/demuxer_stream.h"
@@ -41,8 +40,7 @@ namespace media {
// ready for rendering.
class MEDIA_EXPORT VideoRendererImpl
: public VideoRenderer,
- public NON_EXPORTED_BASE(VideoRendererSink::RenderCallback),
- public base::PlatformThread::Delegate {
+ public NON_EXPORTED_BASE(VideoRendererSink::RenderCallback) {
public:
// |decoders| contains the VideoDecoders to use when initializing.
//
@@ -52,7 +50,8 @@ class MEDIA_EXPORT VideoRendererImpl
//
// Setting |drop_frames_| to true causes the renderer to drop expired frames.
VideoRendererImpl(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
VideoRendererSink* sink,
ScopedVector<VideoDecoder> decoders,
bool drop_frames,
@@ -74,10 +73,9 @@ class MEDIA_EXPORT VideoRendererImpl
void StartPlayingFrom(base::TimeDelta timestamp) override;
void OnTimeStateChanged(bool time_progressing) override;
- // PlatformThread::Delegate implementation.
- void ThreadMain() override;
-
void SetTickClockForTesting(scoped_ptr<base::TickClock> tick_clock);
+ void SetGpuMemoryBufferVideoForTesting(
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool);
// VideoRendererSink::RenderCallback implementation.
scoped_refptr<VideoFrame> Render(base::TimeTicks deadline_min,
@@ -85,23 +83,24 @@ class MEDIA_EXPORT VideoRendererImpl
bool background_rendering) override;
void OnFrameDropped() override;
- void disable_new_video_renderer_for_testing() {
- use_new_video_renderering_path_ = false;
- }
-
private:
- // Creates a dedicated |thread_| for video rendering.
- void CreateVideoThread();
-
// Callback for |video_frame_stream_| initialization.
void OnVideoFrameStreamInitialized(bool success);
// Callback for |video_frame_stream_| to deliver decoded video frames and
+ // report video decoding status. If a frame is available the planes will be
+ // copied asynchronously and FrameReady will be called once finished copying.
+ void FrameReadyForCopyingToGpuMemoryBuffers(
+ VideoFrameStream::Status status,
+ const scoped_refptr<VideoFrame>& frame);
+
+ // Callback for |video_frame_stream_| to deliver decoded video frames and
// report video decoding status.
- void FrameReady(VideoFrameStream::Status status,
+ void FrameReady(uint32_t sequence_token,
+ VideoFrameStream::Status status,
const scoped_refptr<VideoFrame>& frame);
- // Helper method for adding a frame to |ready_frames_|.
+ // Helper method for enqueueing a frame to |alogorithm_|.
void AddReadyFrame_Locked(const scoped_refptr<VideoFrame>& frame);
// Helper method that schedules an asynchronous read from the
@@ -113,16 +112,6 @@ class MEDIA_EXPORT VideoRendererImpl
// Called when VideoFrameStream::Reset() completes.
void OnVideoFrameStreamResetDone();
- // Runs |paint_cb_| with the next frame from |ready_frames_|.
- //
- // A read is scheduled to replace the frame.
- void PaintNextReadyFrame_Locked();
-
- // Drops the next frame from |ready_frames_| and runs |statistics_cb_|.
- //
- // A read is scheduled to replace the frame.
- void DropNextReadyFrame_Locked();
-
// Returns true if the renderer has enough data for playback purposes.
// Note that having enough data may be due to reaching end of stream.
bool HaveEnoughData_Locked();
@@ -130,9 +119,8 @@ class MEDIA_EXPORT VideoRendererImpl
void TransitionToHaveNothing();
// Runs |statistics_cb_| with |frames_decoded_| and |frames_dropped_|, resets
- // them to 0, and then waits on |frame_available_| for up to the
- // |wait_duration|.
- void UpdateStatsAndWait_Locked(base::TimeDelta wait_duration);
+ // them to 0.
+ void UpdateStats_Locked();
// Called after we've painted the first frame. If |time_progressing_| is
// false it Stop() on |sink_|.
@@ -147,23 +135,26 @@ class MEDIA_EXPORT VideoRendererImpl
// Fires |ended_cb_| if there are no remaining usable frames and
// |received_end_of_stream_| is true. Sets |rendered_end_of_stream_| if it
- // does so. Returns algorithm_->EffectiveFramesQueued().
- size_t MaybeFireEndedCallback();
+ // does so.
+ //
+ // When called from the media thread, |time_progressing| should reflect the
+ // value of |time_progressing_|. When called from Render() on the sink
+ // callback thread, the inverse of |render_first_frame_and_stop_| should be
+ // used as a proxy for |time_progressing_|.
+ //
+ // Returns algorithm_->EffectiveFramesQueued().
+ size_t MaybeFireEndedCallback_Locked(bool time_progressing);
// Helper method for converting a single media timestamp to wall clock time.
base::TimeTicks ConvertMediaTimestamp(base::TimeDelta media_timestamp);
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
- // Enables the use of VideoRendererAlgorithm and VideoRendererSink for frame
- // rendering instead of using a thread in a sleep-loop. Set via the command
- // line flag kEnableNewVideoRenderer or via test methods.
- bool use_new_video_renderering_path_;
-
// Sink which calls into VideoRendererImpl via Render() for video frames. Do
// not call any methods on the sink while |lock_| is held or the two threads
// might deadlock. Do not call Start() or Stop() on the sink directly, use
- // StartSink() and StopSink() to ensure background rendering is started.
+ // StartSink() and StopSink() to ensure background rendering is started. Only
+ // access these values on |task_runner_|.
VideoRendererSink* const sink_;
bool sink_started_;
@@ -176,22 +167,16 @@ class MEDIA_EXPORT VideoRendererImpl
// Pool of GpuMemoryBuffers and resources used to create hardware frames.
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_;
+ scoped_refptr<MediaLog> media_log_;
+
// Flag indicating low-delay mode.
bool low_delay_;
- // Queue of incoming frames yet to be painted.
- typedef std::deque<scoped_refptr<VideoFrame>> VideoFrameQueue;
- VideoFrameQueue ready_frames_;
-
// Keeps track of whether we received the end of stream buffer and finished
// rendering.
bool received_end_of_stream_;
bool rendered_end_of_stream_;
- // Used to signal |thread_| as frames are added to |frames_|. Rule of thumb:
- // always check |state_| to see if it was set to STOPPED after waking up!
- base::ConditionVariable frame_available_;
-
// Important detail: being in kPlaying doesn't imply that video is being
// rendered. Rather, it means that the renderer is ready to go. The actual
// rendering of video is controlled by time advancing via |get_time_cb_|.
@@ -218,8 +203,11 @@ class MEDIA_EXPORT VideoRendererImpl
};
State state_;
- // Video thread handle.
- base::PlatformThreadHandle thread_;
+ // An integer that represents how many times the video frame stream has been
+ // reset. This is useful when doing video frame copies asynchronously since we
+ // want to discard video frames that might be received after the stream has
+ // been reset.
+ uint32_t sequence_token_;
// Keep track of the outstanding read on the VideoFrameStream. Flushing can
// only complete once the read has completed.
@@ -242,24 +230,11 @@ class MEDIA_EXPORT VideoRendererImpl
base::TimeDelta start_timestamp_;
- // Embedder callback for notifying a new frame is available for painting.
- PaintCB paint_cb_;
-
- // The wallclock times of the last frame removed from the |ready_frames_|
- // queue, either for calling |paint_cb_| or for dropping. Set to null during
- // flushing.
- base::TimeTicks last_media_time_;
-
- // Equivalent to |last_media_time_| + the estimated duration of the frame.
- base::TimeTicks latest_possible_paint_time_;
-
// Keeps track of the number of frames decoded and dropped since the
// last call to |statistics_cb_|. These must be accessed under lock.
int frames_decoded_;
int frames_dropped_;
- bool is_shutting_down_;
-
scoped_ptr<base::TickClock> tick_clock_;
// Algorithm for selecting which frame to render; manages frames and all
@@ -271,7 +246,8 @@ class MEDIA_EXPORT VideoRendererImpl
// counted. Must be accessed under |lock_| once |sink_| is started.
bool was_background_rendering_;
- // Indicates whether or not media time is currently progressing or not.
+ // Indicates whether or not media time is currently progressing or not. Must
+ // only be accessed from |task_runner_|.
bool time_progressing_;
// Indicates that Render() should only render the first frame and then request
diff --git a/chromium/media/renderers/video_renderer_impl_unittest.cc b/chromium/media/renderers/video_renderer_impl_unittest.cc
index 6786e64323c..e7a09efb984 100644
--- a/chromium/media/renderers/video_renderer_impl_unittest.cc
+++ b/chromium/media/renderers/video_renderer_impl_unittest.cc
@@ -47,7 +47,7 @@ MATCHER_P(HasTimestamp, ms, "") {
}
class VideoRendererImplTest
- : public testing::TestWithParam<bool /* new_video_renderer */> {
+ : public testing::Test {
public:
VideoRendererImplTest()
: tick_clock_(new base::SimpleTestTickClock()),
@@ -61,13 +61,11 @@ class VideoRendererImplTest
base::Bind(&MockCB::FrameReceived, base::Unretained(&mock_cb_)),
message_loop_.task_runner()));
- renderer_.reset(new VideoRendererImpl(message_loop_.task_runner(),
- null_video_sink_.get(),
- decoders.Pass(), true,
- nullptr, // gpu_factories
- new MediaLog()));
- if (!GetParam())
- renderer_->disable_new_video_renderer_for_testing();
+ renderer_.reset(new VideoRendererImpl(
+ message_loop_.task_runner(), message_loop_.task_runner().get(),
+ null_video_sink_.get(), decoders.Pass(), true,
+ nullptr, // gpu_factories
+ new MediaLog()));
renderer_->SetTickClockForTesting(scoped_ptr<base::TickClock>(tick_clock_));
null_video_sink_->set_tick_clock_for_testing(tick_clock_);
time_source_.set_tick_clock_for_testing(tick_clock_);
@@ -166,17 +164,17 @@ class VideoRendererImplTest
// A clip that is four frames long: "0 10 20 30"
// A clip that has a decode error: "60 70 error"
void QueueFrames(const std::string& str) {
- std::vector<std::string> tokens;
- base::SplitString(str, ' ', &tokens);
- for (size_t i = 0; i < tokens.size(); ++i) {
- if (tokens[i] == "abort") {
+ for (const std::string& token :
+ base::SplitString(str, " ", base::TRIM_WHITESPACE,
+ base::SPLIT_WANT_ALL)) {
+ if (token == "abort") {
scoped_refptr<VideoFrame> null_frame;
decode_results_.push_back(
std::make_pair(VideoDecoder::kAborted, null_frame));
continue;
}
- if (tokens[i] == "error") {
+ if (token == "error") {
scoped_refptr<VideoFrame> null_frame;
decode_results_.push_back(
std::make_pair(VideoDecoder::kDecodeError, null_frame));
@@ -184,19 +182,16 @@ class VideoRendererImplTest
}
int timestamp_in_ms = 0;
- if (base::StringToInt(tokens[i], &timestamp_in_ms)) {
+ if (base::StringToInt(token, &timestamp_in_ms)) {
gfx::Size natural_size = TestVideoConfig::NormalCodedSize();
scoped_refptr<VideoFrame> frame = VideoFrame::CreateFrame(
- VideoFrame::YV12,
- natural_size,
- gfx::Rect(natural_size),
- natural_size,
- base::TimeDelta::FromMilliseconds(timestamp_in_ms));
+ PIXEL_FORMAT_YV12, natural_size, gfx::Rect(natural_size),
+ natural_size, base::TimeDelta::FromMilliseconds(timestamp_in_ms));
decode_results_.push_back(std::make_pair(VideoDecoder::kOk, frame));
continue;
}
- CHECK(false) << "Unrecognized decoder buffer token: " << tokens[i];
+ CHECK(false) << "Unrecognized decoder buffer token: " << token;
}
}
@@ -304,6 +299,8 @@ class VideoRendererImplTest
WallClockTimeSource time_source_;
+ base::MessageLoop message_loop_;
+
private:
void DecodeRequested(const scoped_refptr<DecoderBuffer>& buffer,
const VideoDecoder::DecodeCB& decode_cb) {
@@ -338,8 +335,6 @@ class VideoRendererImplTest
MOCK_METHOD0(OnWaitingForDecryptionKey, void(void));
- base::MessageLoop message_loop_;
-
// Used to protect |time_|.
base::Lock lock_;
base::TimeDelta time_;
@@ -361,21 +356,21 @@ class VideoRendererImplTest
DISALLOW_COPY_AND_ASSIGN(VideoRendererImplTest);
};
-TEST_P(VideoRendererImplTest, DoNothing) {
+TEST_F(VideoRendererImplTest, DoNothing) {
// Test that creation and deletion doesn't depend on calls to Initialize()
// and/or Destroy().
}
-TEST_P(VideoRendererImplTest, DestroyWithoutInitialize) {
+TEST_F(VideoRendererImplTest, DestroyWithoutInitialize) {
Destroy();
}
-TEST_P(VideoRendererImplTest, Initialize) {
+TEST_F(VideoRendererImplTest, Initialize) {
Initialize();
Destroy();
}
-TEST_P(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
+TEST_F(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
@@ -384,7 +379,7 @@ TEST_P(VideoRendererImplTest, InitializeAndStartPlayingFrom) {
Destroy();
}
-TEST_P(VideoRendererImplTest, InitializeAndEndOfStream) {
+TEST_F(VideoRendererImplTest, InitializeAndEndOfStream) {
Initialize();
StartPlayingFrom(0);
WaitForPendingRead();
@@ -402,12 +397,12 @@ TEST_P(VideoRendererImplTest, InitializeAndEndOfStream) {
Destroy();
}
-TEST_P(VideoRendererImplTest, DestroyWhileInitializing) {
+TEST_F(VideoRendererImplTest, DestroyWhileInitializing) {
CallInitialize(NewExpectedStatusCB(PIPELINE_ERROR_ABORT), false, PIPELINE_OK);
Destroy();
}
-TEST_P(VideoRendererImplTest, DestroyWhileFlushing) {
+TEST_F(VideoRendererImplTest, DestroyWhileFlushing) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
@@ -418,7 +413,7 @@ TEST_P(VideoRendererImplTest, DestroyWhileFlushing) {
Destroy();
}
-TEST_P(VideoRendererImplTest, Play) {
+TEST_F(VideoRendererImplTest, Play) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
@@ -427,7 +422,7 @@ TEST_P(VideoRendererImplTest, Play) {
Destroy();
}
-TEST_P(VideoRendererImplTest, FlushWithNothingBuffered) {
+TEST_F(VideoRendererImplTest, FlushWithNothingBuffered) {
Initialize();
StartPlayingFrom(0);
@@ -437,11 +432,18 @@ TEST_P(VideoRendererImplTest, FlushWithNothingBuffered) {
Destroy();
}
-TEST_P(VideoRendererImplTest, DecodeError_Playing) {
+TEST_F(VideoRendererImplTest, DecodeError_Playing) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, FrameReceived(_)).Times(testing::AtLeast(1));
+
+ // Consider the case that rendering is faster than we setup the test event.
+ // In that case, when we run out of the frames, BUFFERING_HAVE_NOTHING will
+ // be called.
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
+ .Times(testing::AtMost(1));
+
StartPlayingFrom(0);
renderer_->OnTimeStateChanged(true);
time_source_.StartTicking();
@@ -453,14 +455,14 @@ TEST_P(VideoRendererImplTest, DecodeError_Playing) {
Destroy();
}
-TEST_P(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
+TEST_F(VideoRendererImplTest, DecodeError_DuringStartPlayingFrom) {
Initialize();
QueueFrames("error");
StartPlayingFrom(0);
Destroy();
}
-TEST_P(VideoRendererImplTest, StartPlayingFrom_Exact) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_Exact) {
Initialize();
QueueFrames("50 60 70 80 90");
@@ -470,7 +472,7 @@ TEST_P(VideoRendererImplTest, StartPlayingFrom_Exact) {
Destroy();
}
-TEST_P(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
Initialize();
QueueFrames("50 60 70 80 90");
@@ -480,7 +482,7 @@ TEST_P(VideoRendererImplTest, StartPlayingFrom_RightBefore) {
Destroy();
}
-TEST_P(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
Initialize();
QueueFrames("50 60 70 80 90");
@@ -490,7 +492,7 @@ TEST_P(VideoRendererImplTest, StartPlayingFrom_RightAfter) {
Destroy();
}
-TEST_P(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
+TEST_F(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
// In low-delay mode only one frame is required to finish preroll. But frames
// prior to the start time will not be used.
InitializeWithLowDelay(true);
@@ -520,7 +522,7 @@ TEST_P(VideoRendererImplTest, StartPlayingFrom_LowDelay) {
}
// Verify that a late decoder response doesn't break invariants in the renderer.
-TEST_P(VideoRendererImplTest, DestroyDuringOutstandingRead) {
+TEST_F(VideoRendererImplTest, DestroyDuringOutstandingRead) {
Initialize();
QueueFrames("0 10 20 30");
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
@@ -533,12 +535,12 @@ TEST_P(VideoRendererImplTest, DestroyDuringOutstandingRead) {
Destroy();
}
-TEST_P(VideoRendererImplTest, VideoDecoder_InitFailure) {
+TEST_F(VideoRendererImplTest, VideoDecoder_InitFailure) {
InitializeRenderer(false, false);
Destroy();
}
-TEST_P(VideoRendererImplTest, Underflow) {
+TEST_F(VideoRendererImplTest, Underflow) {
Initialize();
QueueFrames("0 30 60 90");
@@ -564,10 +566,7 @@ TEST_P(VideoRendererImplTest, Underflow) {
// start rendering frames on its own thread, so the first frame may be
// received.
time_source_.StartTicking();
- if (GetParam())
- EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(30))).Times(0);
- else
- EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(30))).Times(AnyNumber());
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(30))).Times(0);
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(60))).Times(0);
EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(90)))
@@ -586,10 +585,6 @@ TEST_P(VideoRendererImplTest, Underflow) {
EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
.WillOnce(RunClosure(event.GetClosure()));
AdvanceTimeInMs(30);
- // The old rendering path needs wall clock time to increase too.
- if (!GetParam())
- AdvanceWallclockTimeInMs(30);
-
event.RunAndWait();
Mock::VerifyAndClearExpectations(&mock_cb_);
}
@@ -610,11 +605,7 @@ TEST_P(VideoRendererImplTest, Underflow) {
// Verifies that the sink is stopped after rendering the first frame if
// playback hasn't started.
-TEST_P(VideoRendererImplTest, RenderingStopsAfterFirstFrame) {
- // This test is only for the new rendering path.
- if (!GetParam())
- return;
-
+TEST_F(VideoRendererImplTest, RenderingStopsAfterFirstFrame) {
InitializeWithLowDelay(true);
QueueFrames("0");
@@ -641,11 +632,7 @@ TEST_P(VideoRendererImplTest, RenderingStopsAfterFirstFrame) {
// Verifies that the sink is stopped after rendering the first frame if
// playback ha started.
-TEST_P(VideoRendererImplTest, RenderingStopsAfterOneFrameWithEOS) {
- // This test is only for the new rendering path.
- if (!GetParam())
- return;
-
+TEST_F(VideoRendererImplTest, RenderingStopsAfterOneFrameWithEOS) {
InitializeWithLowDelay(true);
QueueFrames("0");
@@ -673,11 +660,7 @@ TEST_P(VideoRendererImplTest, RenderingStopsAfterOneFrameWithEOS) {
// Tests the case where the video started and received a single Render() call,
// then the video was put into the background.
-TEST_P(VideoRendererImplTest, RenderingStartedThenStopped) {
- // This test is only for the new rendering path.
- if (!GetParam())
- return;
-
+TEST_F(VideoRendererImplTest, RenderingStartedThenStopped) {
Initialize();
QueueFrames("0 30 60 90");
@@ -695,6 +678,14 @@ TEST_P(VideoRendererImplTest, RenderingStartedThenStopped) {
EXPECT_EQ(0u, last_pipeline_statistics_.video_frames_dropped);
}
+ // Consider the case that rendering is faster than we setup the test event.
+ // In that case, when we run out of the frames, BUFFERING_HAVE_NOTHING will
+ // be called. And then during SatisfyPendingReadWithEndOfStream,
+ // BUFFER_HAVE_ENOUGH will be called again.
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH))
+ .Times(testing::AtMost(1));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_NOTHING))
+ .Times(testing::AtMost(1));
renderer_->OnTimeStateChanged(true);
time_source_.StartTicking();
@@ -718,11 +709,7 @@ TEST_P(VideoRendererImplTest, RenderingStartedThenStopped) {
Destroy();
}
-TEST_P(VideoRendererImplTest, StartPlayingFromThenFlushThenEOS) {
- // This test is only for the new rendering path.
- if (!GetParam())
- return;
-
+TEST_F(VideoRendererImplTest, StartPlayingFromThenFlushThenEOS) {
Initialize();
QueueFrames("0 30 60 90");
@@ -750,11 +737,58 @@ TEST_P(VideoRendererImplTest, StartPlayingFromThenFlushThenEOS) {
Destroy();
}
-INSTANTIATE_TEST_CASE_P(OldVideoRenderer,
- VideoRendererImplTest,
- testing::Values(false));
-INSTANTIATE_TEST_CASE_P(NewVideoRenderer,
- VideoRendererImplTest,
- testing::Values(true));
+namespace {
+class MockGpuMemoryBufferVideoFramePool : public GpuMemoryBufferVideoFramePool {
+ public:
+ MockGpuMemoryBufferVideoFramePool(std::vector<base::Closure>* frame_ready_cbs)
+ : frame_ready_cbs_(frame_ready_cbs) {}
+ void MaybeCreateHardwareFrame(const scoped_refptr<VideoFrame>& video_frame,
+ const FrameReadyCB& frame_ready_cb) override {
+ frame_ready_cbs_->push_back(base::Bind(frame_ready_cb, video_frame));
+ }
+
+ private:
+ std::vector<base::Closure>* frame_ready_cbs_;
+};
+}
+
+class VideoRendererImplAsyncAddFrameReadyTest : public VideoRendererImplTest {
+ public:
+ VideoRendererImplAsyncAddFrameReadyTest() {
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool(
+ new MockGpuMemoryBufferVideoFramePool(&frame_ready_cbs_));
+ renderer_->SetGpuMemoryBufferVideoForTesting(gpu_memory_buffer_pool.Pass());
+ }
+
+ protected:
+ std::vector<base::Closure> frame_ready_cbs_;
+};
+
+TEST_F(VideoRendererImplAsyncAddFrameReadyTest, InitializeAndStartPlayingFrom) {
+ Initialize();
+ QueueFrames("0 10 20 30");
+ EXPECT_CALL(mock_cb_, FrameReceived(HasTimestamp(0)));
+ EXPECT_CALL(mock_cb_, BufferingStateChange(BUFFERING_HAVE_ENOUGH));
+ StartPlayingFrom(0);
+ ASSERT_EQ(1u, frame_ready_cbs_.size());
+
+ uint32_t frame_ready_index = 0;
+ while (frame_ready_index < frame_ready_cbs_.size()) {
+ frame_ready_cbs_[frame_ready_index++].Run();
+ message_loop_.RunUntilIdle();
+ }
+ Destroy();
+}
+
+TEST_F(VideoRendererImplAsyncAddFrameReadyTest, SequenceTokenDiscardOneFrame) {
+ Initialize();
+ QueueFrames("0 10 20 30");
+ StartPlayingFrom(0);
+ Flush();
+ ASSERT_EQ(1u, frame_ready_cbs_.size());
+ // This frame will be discarded.
+ frame_ready_cbs_.front().Run();
+ Destroy();
+}
} // namespace media
diff --git a/chromium/media/shared_memory_support.gypi b/chromium/media/shared_memory_support.gypi
index 65403f748d8..721f6aa4c49 100644
--- a/chromium/media/shared_memory_support.gypi
+++ b/chromium/media/shared_memory_support.gypi
@@ -10,6 +10,8 @@
'shared_memory_support_sources': [
'audio/audio_parameters.cc',
'audio/audio_parameters.h',
+ 'audio/point.cc',
+ 'audio/point.h',
'base/audio_bus.cc',
'base/audio_bus.h',
'base/channel_layout.cc',
diff --git a/chromium/media/video/capture/file_video_capture_device.cc b/chromium/media/video/capture/file_video_capture_device.cc
deleted file mode 100644
index 295a35b30d7..00000000000
--- a/chromium/media/video/capture/file_video_capture_device.cc
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/video/capture/file_video_capture_device.h"
-
-
-#include "base/bind.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
-#include "media/base/video_capture_types.h"
-
-namespace media {
-static const int kY4MHeaderMaxSize = 200;
-static const char kY4MSimpleFrameDelimiter[] = "FRAME";
-static const int kY4MSimpleFrameDelimiterSize = 6;
-
-int ParseY4MInt(const base::StringPiece& token) {
- int temp_int;
- CHECK(base::StringToInt(token, &temp_int)) << token;
- return temp_int;
-}
-
-// Extract numerator and denominator out of a token that must have the aspect
-// numerator:denominator, both integer numbers.
-void ParseY4MRational(const base::StringPiece& token,
- int* numerator,
- int* denominator) {
- size_t index_divider = token.find(':');
- CHECK_NE(index_divider, token.npos);
- *numerator = ParseY4MInt(token.substr(0, index_divider));
- *denominator = ParseY4MInt(token.substr(index_divider + 1, token.length()));
- CHECK(*denominator);
-}
-
-// This function parses the ASCII string in |header| as belonging to a Y4M file,
-// returning the collected format in |video_format|. For a non authoritative
-// explanation of the header format, check
-// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2
-// Restrictions: Only interlaced I420 pixel format is supported, and pixel
-// aspect ratio is ignored.
-// Implementation notes: Y4M header should end with an ASCII 0x20 (whitespace)
-// character, however all examples mentioned in the Y4M header description end
-// with a newline character instead. Also, some headers do _not_ specify pixel
-// format, in this case it means I420.
-// This code was inspired by third_party/libvpx/.../y4minput.* .
-void ParseY4MTags(const std::string& file_header,
- media::VideoCaptureFormat* video_format) {
- video_format->pixel_format = media::PIXEL_FORMAT_I420;
- video_format->frame_size.set_width(0);
- video_format->frame_size.set_height(0);
- size_t index = 0;
- size_t blank_position = 0;
- base::StringPiece token;
- while ((blank_position = file_header.find_first_of("\n ", index)) !=
- std::string::npos) {
- // Every token is supposed to have an identifier letter and a bunch of
- // information immediately after, which we extract into a |token| here.
- token =
- base::StringPiece(&file_header[index + 1], blank_position - index - 1);
- CHECK(!token.empty());
- switch (file_header[index]) {
- case 'W':
- video_format->frame_size.set_width(ParseY4MInt(token));
- break;
- case 'H':
- video_format->frame_size.set_height(ParseY4MInt(token));
- break;
- case 'F': {
- // If the token is "FRAME", it means we have finished with the header.
- if (token[0] == 'R')
- break;
- int fps_numerator, fps_denominator;
- ParseY4MRational(token, &fps_numerator, &fps_denominator);
- video_format->frame_rate = fps_numerator / fps_denominator;
- break;
- }
- case 'I':
- // Interlacing is ignored, but we don't like mixed modes.
- CHECK_NE(token[0], 'm');
- break;
- case 'A':
- // Pixel aspect ratio ignored.
- break;
- case 'C':
- CHECK(token == "420" || token == "420jpeg" || token == "420paldv")
- << token; // Only I420 is supported, and we fudge the variants.
- break;
- default:
- break;
- }
- // We're done if we have found a newline character right after the token.
- if (file_header[blank_position] == '\n')
- break;
- index = blank_position + 1;
- }
- // Last video format semantic correctness check before sending it back.
- CHECK(video_format->IsValid());
-}
-
-// Reads and parses the header of a Y4M |file|, returning the collected pixel
-// format in |video_format|. Returns the index of the first byte of the first
-// video frame.
-// Restrictions: Only trivial per-frame headers are supported.
-// static
-int64 FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(
- base::File* file,
- media::VideoCaptureFormat* video_format) {
- std::string header(kY4MHeaderMaxSize, 0);
- file->Read(0, &header[0], kY4MHeaderMaxSize - 1);
-
- size_t header_end = header.find(kY4MSimpleFrameDelimiter);
- CHECK_NE(header_end, header.npos);
-
- ParseY4MTags(header, video_format);
- return header_end + kY4MSimpleFrameDelimiterSize;
-}
-
-// Opens a given file for reading, and returns the file to the caller, who is
-// responsible for closing it.
-// static
-base::File FileVideoCaptureDevice::OpenFileForRead(
- const base::FilePath& file_path) {
- base::File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
- DLOG_IF(ERROR, file.IsValid())
- << file_path.value()
- << ", error: " << base::File::ErrorToString(file.error_details());
- return file.Pass();
-}
-
-FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
- : capture_thread_("CaptureThread"),
- file_path_(file_path),
- frame_size_(0),
- current_byte_index_(0),
- first_frame_byte_index_(0) {}
-
-FileVideoCaptureDevice::~FileVideoCaptureDevice() {
- DCHECK(thread_checker_.CalledOnValidThread());
- // Check if the thread is running.
- // This means that the device have not been DeAllocated properly.
- CHECK(!capture_thread_.IsRunning());
-}
-
-void FileVideoCaptureDevice::AllocateAndStart(
- const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(!capture_thread_.IsRunning());
-
- capture_thread_.Start();
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FileVideoCaptureDevice::OnAllocateAndStart,
- base::Unretained(this),
- params,
- base::Passed(&client)));
-}
-
-void FileVideoCaptureDevice::StopAndDeAllocate() {
- DCHECK(thread_checker_.CalledOnValidThread());
- CHECK(capture_thread_.IsRunning());
-
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FileVideoCaptureDevice::OnStopAndDeAllocate,
- base::Unretained(this)));
- capture_thread_.Stop();
-}
-
-int FileVideoCaptureDevice::CalculateFrameSize() const {
- DCHECK_EQ(capture_format_.pixel_format, PIXEL_FORMAT_I420);
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- return capture_format_.ImageAllocationSize();
-}
-
-void FileVideoCaptureDevice::OnAllocateAndStart(
- const VideoCaptureParams& params,
- scoped_ptr<VideoCaptureDevice::Client> client) {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
-
- client_ = client.Pass();
-
- // Open the file and parse the header. Get frame size and format.
- DCHECK(!file_.IsValid());
- file_ = OpenFileForRead(file_path_);
- if (!file_.IsValid()) {
- client_->OnError("Could not open Video file");
- return;
- }
- first_frame_byte_index_ =
- ParseFileAndExtractVideoFormat(&file_, &capture_format_);
- current_byte_index_ = first_frame_byte_index_;
- DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
- << ", fps: " << capture_format_.frame_rate;
-
- frame_size_ = CalculateFrameSize();
- video_frame_.reset(new uint8[frame_size_]);
-
- capture_thread_.message_loop()->PostTask(
- FROM_HERE,
- base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
- base::Unretained(this)));
-}
-
-void FileVideoCaptureDevice::OnStopAndDeAllocate() {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- file_.Close();
- client_.reset();
- current_byte_index_ = 0;
- first_frame_byte_index_ = 0;
- frame_size_ = 0;
- next_frame_time_ = base::TimeTicks();
- video_frame_.reset();
-}
-
-void FileVideoCaptureDevice::OnCaptureTask() {
- DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
- if (!client_)
- return;
- int result = file_.Read(current_byte_index_,
- reinterpret_cast<char*>(video_frame_.get()),
- frame_size_);
-
- // If we passed EOF to base::File, it will return 0 read characters. In that
- // case, reset the pointer and read again.
- if (result != frame_size_) {
- CHECK_EQ(result, 0);
- current_byte_index_ = first_frame_byte_index_;
- CHECK_EQ(file_.Read(current_byte_index_,
- reinterpret_cast<char*>(video_frame_.get()),
- frame_size_),
- frame_size_);
- } else {
- current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
- }
-
- // Give the captured frame to the client.
- const base::TimeTicks current_time = base::TimeTicks::Now();
- client_->OnIncomingCapturedData(video_frame_.get(),
- frame_size_,
- capture_format_,
- 0,
- current_time);
- // Reschedule next CaptureTask.
- const base::TimeDelta frame_interval =
- base::TimeDelta::FromMicroseconds(1E6 / capture_format_.frame_rate);
- if (next_frame_time_.is_null()) {
- next_frame_time_ = current_time + frame_interval;
- } else {
- next_frame_time_ += frame_interval;
- // Don't accumulate any debt if we are lagging behind - just post next frame
- // immediately and continue as normal.
- if (next_frame_time_ < current_time)
- next_frame_time_ = current_time;
- }
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
- base::Unretained(this)),
- next_frame_time_ - current_time);
-}
-
-} // namespace media
diff --git a/chromium/media/video/fake_video_encode_accelerator.cc b/chromium/media/video/fake_video_encode_accelerator.cc
index 8e23df5f60c..f0087db1f41 100644
--- a/chromium/media/video/fake_video_encode_accelerator.cc
+++ b/chromium/media/video/fake_video_encode_accelerator.cc
@@ -41,12 +41,11 @@ FakeVideoEncodeAccelerator::GetSupportedProfiles() {
return profiles;
}
-bool FakeVideoEncodeAccelerator::Initialize(
- VideoFrame::Format input_format,
- const gfx::Size& input_visible_size,
- VideoCodecProfile output_profile,
- uint32 initial_bitrate,
- Client* client) {
+bool FakeVideoEncodeAccelerator::Initialize(VideoPixelFormat input_format,
+ const gfx::Size& input_visible_size,
+ VideoCodecProfile output_profile,
+ uint32 initial_bitrate,
+ Client* client) {
if (!will_initialization_succeed_) {
return false;
}
diff --git a/chromium/media/video/fake_video_encode_accelerator.h b/chromium/media/video/fake_video_encode_accelerator.h
index c421e96ed9d..91b6f1d4f77 100644
--- a/chromium/media/video/fake_video_encode_accelerator.h
+++ b/chromium/media/video/fake_video_encode_accelerator.h
@@ -29,7 +29,7 @@ class MEDIA_EXPORT FakeVideoEncodeAccelerator : public VideoEncodeAccelerator {
~FakeVideoEncodeAccelerator() override;
VideoEncodeAccelerator::SupportedProfiles GetSupportedProfiles() override;
- bool Initialize(VideoFrame::Format input_format,
+ bool Initialize(VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
VideoCodecProfile output_profile,
uint32 initial_bitrate,
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
index 137a56d88d2..f1965d109b9 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.cc
@@ -7,46 +7,68 @@
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
+#include <algorithm>
#include <list>
#include <utility>
+#include "base/barrier_closure.h"
#include "base/bind.h"
#include "base/containers/stack_container.h"
#include "base/location.h"
#include "base/memory/linked_ptr.h"
-#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/trace_event.h"
+#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "media/renderers/gpu_video_accelerator_factories.h"
+#include "third_party/libyuv/include/libyuv.h"
+#include "ui/gfx/buffer_format_util.h"
+#include "ui/gl/trace_util.h"
namespace media {
// Implementation of a pool of GpuMemoryBuffers used to back VideoFrames.
class GpuMemoryBufferVideoFramePool::PoolImpl
: public base::RefCountedThreadSafe<
- GpuMemoryBufferVideoFramePool::PoolImpl> {
+ GpuMemoryBufferVideoFramePool::PoolImpl>,
+ public base::trace_event::MemoryDumpProvider {
public:
- // |task_runner| is associated to the thread where the context of
- // GLES2Interface returned by |gpu_factories| lives.
+ // |media_task_runner| is the media task runner associated with the
+ // GL context provided by |gpu_factories|
+ // |worker_task_runner| is a task runner used to asynchronously copy
+ // video frame's planes.
// |gpu_factories| is an interface to GPU related operation and can be
- // null.
- PoolImpl(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ // null if a GL context is not available.
+ PoolImpl(const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories)
- : task_runner_(task_runner),
+ : media_task_runner_(media_task_runner),
+ worker_task_runner_(worker_task_runner),
gpu_factories_(gpu_factories),
- texture_target_(gpu_factories_ ? gpu_factories_->ImageTextureTarget()
- : GL_TEXTURE_2D) {}
+ texture_target_(gpu_factories ? gpu_factories->ImageTextureTarget()
+ : GL_TEXTURE_2D),
+ output_format_(PIXEL_FORMAT_UNKNOWN) {
+ DCHECK(media_task_runner_);
+ DCHECK(worker_task_runner_);
+ }
+
+ // Takes a software VideoFrame and calls |frame_ready_cb| with a VideoFrame
+ // backed by native textures if possible.
+ // The data contained in video_frame is copied into the returned frame
+ // asynchronously posting tasks to |worker_task_runner_|, while
+ // |frame_ready_cb| will be called on |media_task_runner_| once all the data
+ // has been copied.
+ void CreateHardwareFrame(const scoped_refptr<VideoFrame>& video_frame,
+ const FrameReadyCB& cb);
- // Takes a software VideoFrame and returns a VideoFrame backed by native
- // textures if possible.
- // The data contained in video_frame is copied into the returned frame.
- scoped_refptr<VideoFrame> CreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame);
+ bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) override;
private:
friend class base::RefCountedThreadSafe<
GpuMemoryBufferVideoFramePool::PoolImpl>;
- ~PoolImpl();
+ ~PoolImpl() override;
// Resource to represent a plane.
struct PlaneResource {
@@ -59,27 +81,45 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
// All the resources needed to compose a frame.
struct FrameResources {
- FrameResources(VideoFrame::Format format, const gfx::Size& size)
- : format(format), size(size) {}
+ explicit FrameResources(const gfx::Size& size) : size(size) {}
bool in_use = true;
- VideoFrame::Format format;
gfx::Size size;
PlaneResource plane_resources[VideoFrame::kMaxPlanes];
};
+ // Copy |video_frame| data into |frame_resouces|
+ // and calls |done| when done.
+ void CopyVideoFrameToGpuMemoryBuffers(
+ const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb);
+
+ // Called when all the data has been copied.
+ void OnCopiesDone(const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb);
+
+ // Prepares GL resources, mailboxes and calls |frame_ready_cb| with the new
+ // VideoFrame.
+ // This has to be run on |media_task_runner_| where |frame_ready_cb| will also
+ // be run.
+ void BindAndCreateMailboxesHardwareFrameResources(
+ const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb);
+
// Return true if |resources| can be used to represent a frame for
// specific |format| and |size|.
static bool AreFrameResourcesCompatible(const FrameResources* resources,
- const gfx::Size& size,
- VideoFrame::Format format) {
- return size == resources->size && format == resources->format;
+ const gfx::Size& size) {
+ return size == resources->size;
}
// Get the resources needed for a frame out of the pool, or create them if
// necessary.
// This also drops the LRU resources that can't be reuse for this frame.
FrameResources* GetOrCreateFrameResources(const gfx::Size& size,
- VideoFrame::Format format);
+ VideoPixelFormat format);
// Callback called when a VideoFrame generated with GetFrameResources is no
// longer referenced.
@@ -88,7 +128,7 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
uint32 sync_point);
// Return frame resources to the pool. This has to be called on the thread
- // where |task_runner| is current.
+ // where |media_task_runner_| is current.
void ReturnFrameResources(FrameResources* frame_resources);
// Delete resources. This has to be called on the thread where |task_runner|
@@ -97,96 +137,401 @@ class GpuMemoryBufferVideoFramePool::PoolImpl
const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories,
FrameResources* frame_resources);
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ // Task runner associated to the GL context provided by |gpu_factories_|.
+ scoped_refptr<base::SingleThreadTaskRunner> media_task_runner_;
+ // Task runner used to asynchronously copy planes.
+ scoped_refptr<base::TaskRunner> worker_task_runner_;
+
+ // Interface to GPU related operations.
scoped_refptr<GpuVideoAcceleratorFactories> gpu_factories_;
// Pool of resources.
std::list<FrameResources*> resources_pool_;
const unsigned texture_target_;
+ // TODO(dcastagna): change the following type from VideoPixelFormat to
+ // BufferFormat.
+ VideoPixelFormat output_format_;
DISALLOW_COPY_AND_ASSIGN(PoolImpl);
};
namespace {
-// Copy a buffer info a GpuMemoryBuffer.
-// |bytes_per_row| is expected to be less or equal than the strides of the two
-// buffers.
-void CopyPlaneToGpuMemoryBuffer(int rows,
- int bytes_per_row,
- const uint8* source,
- int source_stride,
- gfx::GpuMemoryBuffer* buffer) {
- TRACE_EVENT2("media", "CopyPlaneToGpuMemoryBuffer", "bytes_per_row",
- bytes_per_row, "rows", rows);
-
- DCHECK(buffer);
- DCHECK(source);
- void* data = nullptr;
- CHECK(buffer->Map(&data));
- uint8* mapped_buffer = static_cast<uint8*>(data);
- int dest_stride = 0;
- buffer->GetStride(&dest_stride);
- DCHECK_NE(dest_stride, 0);
- DCHECK_LE(bytes_per_row, std::abs(dest_stride));
- DCHECK_LE(bytes_per_row, source_stride);
- for (int row = 0; row < rows; ++row) {
- memcpy(mapped_buffer + dest_stride * row, source + source_stride * row,
- bytes_per_row);
+// VideoFrame copies to GpuMemoryBuffers will be split in copies where the
+// output size is |kBytesPerCopyTarget| bytes and run in parallel.
+const size_t kBytesPerCopyTarget = 1024 * 1024; // 1MB
+
+// Return the GpuMemoryBuffer format to use for a specific VideoPixelFormat
+// and plane.
+gfx::BufferFormat GpuMemoryBufferFormat(VideoPixelFormat format, size_t plane) {
+ switch (format) {
+ case PIXEL_FORMAT_I420:
+ DCHECK_LE(plane, 2u);
+ return gfx::BufferFormat::R_8;
+ case PIXEL_FORMAT_NV12:
+ DCHECK_LE(plane, 1u);
+ return gfx::BufferFormat::YUV_420_BIPLANAR;
+ case PIXEL_FORMAT_UYVY:
+ DCHECK_EQ(0u, plane);
+ return gfx::BufferFormat::UYVY_422;
+ default:
+ NOTREACHED();
+ return gfx::BufferFormat::BGRA_8888;
+ }
+}
+
+unsigned ImageInternalFormat(VideoPixelFormat format, size_t plane) {
+ switch (format) {
+ case PIXEL_FORMAT_I420:
+ DCHECK_LE(plane, 2u);
+ return GL_R8_EXT;
+ case PIXEL_FORMAT_NV12:
+ DCHECK_LE(plane, 1u);
+ DLOG(WARNING) << "NV12 format not supported yet";
+ return 0; // TODO(andresantoso): Implement extension for NV12.
+ case PIXEL_FORMAT_UYVY:
+ DCHECK_EQ(0u, plane);
+ return GL_RGB_YCBCR_422_CHROMIUM;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+// The number of output planes to be copied in each iteration.
+size_t PlanesPerCopy(VideoPixelFormat format) {
+ switch (format) {
+ case PIXEL_FORMAT_I420:
+ case PIXEL_FORMAT_UYVY:
+ return 1;
+ case PIXEL_FORMAT_NV12:
+ return 2;
+ default:
+ NOTREACHED();
+ return 0;
+ }
+}
+
+// The number of output rows to be copied in each iteration.
+int RowsPerCopy(size_t plane, VideoPixelFormat format, int width) {
+ int bytes_per_row = VideoFrame::RowBytes(plane, format, width);
+ if (format == PIXEL_FORMAT_NV12) {
+ DCHECK_EQ(0u, plane);
+ bytes_per_row += VideoFrame::RowBytes(1, format, width);
+ }
+ // Copy an even number of lines, and at least one.
+ return std::max<size_t>((kBytesPerCopyTarget / bytes_per_row) & ~1, 1);
+}
+
+void CopyRowsToI420Buffer(int first_row,
+ int rows,
+ int bytes_per_row,
+ const uint8* source,
+ int source_stride,
+ uint8* output,
+ int dest_stride,
+ const base::Closure& done) {
+ TRACE_EVENT2("media", "CopyRowsToI420Buffer", "bytes_per_row", bytes_per_row,
+ "rows", rows);
+ if (output) {
+ DCHECK_NE(dest_stride, 0);
+ DCHECK_LE(bytes_per_row, std::abs(dest_stride));
+ DCHECK_LE(bytes_per_row, source_stride);
+ for (int row = first_row; row < first_row + rows; ++row) {
+ memcpy(output + dest_stride * row, source + source_stride * row,
+ bytes_per_row);
+ }
+ }
+ done.Run();
+}
+
+void CopyRowsToNV12Buffer(int first_row,
+ int rows,
+ int bytes_per_row,
+ const scoped_refptr<VideoFrame>& source_frame,
+ uint8* dest_y,
+ int dest_stride_y,
+ uint8* dest_uv,
+ int dest_stride_uv,
+ const base::Closure& done) {
+ TRACE_EVENT2("media", "CopyRowsToNV12Buffer", "bytes_per_row", bytes_per_row,
+ "rows", rows);
+ if (dest_y && dest_uv) {
+ DCHECK_NE(dest_stride_y, 0);
+ DCHECK_NE(dest_stride_uv, 0);
+ DCHECK_LE(bytes_per_row, std::abs(dest_stride_y));
+ DCHECK_LE(bytes_per_row, std::abs(dest_stride_uv));
+ DCHECK_EQ(0, first_row % 2);
+
+ libyuv::I420ToNV12(
+ source_frame->visible_data(VideoFrame::kYPlane) +
+ first_row * source_frame->stride(VideoFrame::kYPlane),
+ source_frame->stride(VideoFrame::kYPlane),
+ source_frame->visible_data(VideoFrame::kUPlane) +
+ first_row / 2 * source_frame->stride(VideoFrame::kUPlane),
+ source_frame->stride(VideoFrame::kUPlane),
+ source_frame->visible_data(VideoFrame::kVPlane) +
+ first_row / 2 * source_frame->stride(VideoFrame::kVPlane),
+ source_frame->stride(VideoFrame::kVPlane),
+ dest_y + first_row * dest_stride_y, dest_stride_y,
+ dest_uv + first_row / 2 * dest_stride_uv, dest_stride_uv, bytes_per_row,
+ rows);
}
- buffer->Unmap();
+ done.Run();
+}
+
+void CopyRowsToUYVYBuffer(int first_row,
+ int rows,
+ int width,
+ const scoped_refptr<VideoFrame>& source_frame,
+ uint8* output,
+ int dest_stride,
+ const base::Closure& done) {
+ TRACE_EVENT2("media", "CopyRowsToUYVYBuffer", "bytes_per_row", width * 2,
+ "rows", rows);
+ if (output) {
+ DCHECK_NE(dest_stride, 0);
+ DCHECK_LE(width, std::abs(dest_stride / 2));
+ DCHECK_EQ(0, first_row % 2);
+ libyuv::I420ToUYVY(
+ source_frame->visible_data(VideoFrame::kYPlane) +
+ first_row * source_frame->stride(VideoFrame::kYPlane),
+ source_frame->stride(VideoFrame::kYPlane),
+ source_frame->visible_data(VideoFrame::kUPlane) +
+ first_row / 2 * source_frame->stride(VideoFrame::kUPlane),
+ source_frame->stride(VideoFrame::kUPlane),
+ source_frame->visible_data(VideoFrame::kVPlane) +
+ first_row / 2 * source_frame->stride(VideoFrame::kVPlane),
+ source_frame->stride(VideoFrame::kVPlane),
+ output + first_row * dest_stride, dest_stride, width, rows);
+ }
+ done.Run();
}
} // unnamed namespace
// Creates a VideoFrame backed by native textures starting from a software
// VideoFrame.
-// The data contained in video_frame is copied into the returned VideoFrame.
-scoped_refptr<VideoFrame>
-GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame) {
- if (!gpu_factories_)
- return video_frame;
+// The data contained in |video_frame| is copied into the VideoFrame passed to
+// |frame_ready_cb|.
+// This has to be called on the thread where |media_task_runner_| is current.
+void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame,
+ const FrameReadyCB& frame_ready_cb) {
+ DCHECK(media_task_runner_->BelongsToCurrentThread());
+ if (!gpu_factories_) {
+ frame_ready_cb.Run(video_frame);
+ return;
+ }
- if (!gpu_factories_->IsTextureRGSupported())
- return video_frame;
+ // Lazily initialize output_format_ since VideoFrameOutputFormat() has to be
+ // called on the media_thread while this object might be instantiated on any.
+ if (output_format_ == PIXEL_FORMAT_UNKNOWN)
+ output_format_ = gpu_factories_->VideoFrameOutputFormat();
- gpu::gles2::GLES2Interface* gles2 = gpu_factories_->GetGLES2Interface();
- if (!gles2)
- return video_frame;
+ if (output_format_ == PIXEL_FORMAT_UNKNOWN) {
+ frame_ready_cb.Run(video_frame);
+ return;
+ }
+ switch (video_frame->format()) {
+ // Supported cases.
+ case PIXEL_FORMAT_YV12:
+ case PIXEL_FORMAT_I420:
+ break;
+ // Unsupported cases.
+ case PIXEL_FORMAT_YV12A:
+ case PIXEL_FORMAT_YV16:
+ case PIXEL_FORMAT_YV24:
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_NV21:
+ case PIXEL_FORMAT_UYVY:
+ case PIXEL_FORMAT_YUY2:
+ case PIXEL_FORMAT_ARGB:
+ case PIXEL_FORMAT_XRGB:
+ case PIXEL_FORMAT_RGB24:
+ case PIXEL_FORMAT_RGB32:
+ case PIXEL_FORMAT_MJPEG:
+ case PIXEL_FORMAT_MT21:
+ case PIXEL_FORMAT_UNKNOWN:
+ frame_ready_cb.Run(video_frame);
+ return;
+ }
- VideoFrame::Format format = video_frame->format();
- size_t planes = VideoFrame::NumPlanes(format);
- DCHECK(video_frame->visible_rect().origin().IsOrigin());
- gfx::Size size = video_frame->visible_rect().size();
- gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
+ const gfx::Size size = video_frame->visible_rect().size();
// Acquire resources. Incompatible ones will be dropped from the pool.
- FrameResources* frame_resources = GetOrCreateFrameResources(size, format);
+ FrameResources* frame_resources =
+ GetOrCreateFrameResources(size, output_format_);
+ if (!frame_resources) {
+ frame_ready_cb.Run(video_frame);
+ return;
+ }
- // Set up the planes copying data into it and creating the mailboxes needed
- // to refer to the textures.
- for (size_t i = 0; i < planes; ++i) {
- PlaneResource& plane_resource = frame_resources->plane_resources[i];
- CopyPlaneToGpuMemoryBuffer(VideoFrame::Rows(i, format, size.height()),
- VideoFrame::RowBytes(i, format, size.width()),
- video_frame->data(i), video_frame->stride(i),
- plane_resource.gpu_memory_buffer.get());
+ worker_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&PoolImpl::CopyVideoFrameToGpuMemoryBuffers, this,
+ video_frame, frame_resources, frame_ready_cb));
+}
+bool GpuMemoryBufferVideoFramePool::PoolImpl::OnMemoryDump(
+ const base::trace_event::MemoryDumpArgs& args,
+ base::trace_event::ProcessMemoryDump* pmd) {
+ const uint64 tracing_process_id =
+ base::trace_event::MemoryDumpManager::GetInstance()
+ ->GetTracingProcessId();
+ const int kImportance = 2;
+ for (const FrameResources* frame_resources : resources_pool_) {
+ for (const PlaneResource& plane_resource :
+ frame_resources->plane_resources) {
+ if (plane_resource.gpu_memory_buffer) {
+ gfx::GpuMemoryBufferId buffer_id =
+ plane_resource.gpu_memory_buffer->GetId();
+ std::string dump_name = base::StringPrintf(
+ "media/video_frame_memory/buffer_%d", buffer_id.id);
+ base::trace_event::MemoryAllocatorDump* dump =
+ pmd->CreateAllocatorDump(dump_name);
+ size_t buffer_size_in_bytes = gfx::BufferSizeForBufferFormat(
+ plane_resource.size, plane_resource.gpu_memory_buffer->GetFormat());
+ dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ buffer_size_in_bytes);
+ dump->AddScalar("free_size",
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ frame_resources->in_use ? 0 : buffer_size_in_bytes);
+ base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
+ gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id,
+ buffer_id);
+ pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
+ pmd->AddOwnershipEdge(dump->guid(), shared_buffer_guid, kImportance);
+ }
+ }
+ }
+ return true;
+}
+
+void GpuMemoryBufferVideoFramePool::PoolImpl::OnCopiesDone(
+ const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb) {
+ for (const auto& plane_resource : frame_resources->plane_resources) {
+ if (plane_resource.gpu_memory_buffer)
+ plane_resource.gpu_memory_buffer->Unmap();
+ }
+
+ media_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&PoolImpl::BindAndCreateMailboxesHardwareFrameResources, this,
+ video_frame, frame_resources, frame_ready_cb));
+}
+
+// Copies |video_frame| into |frame_resources| asynchronously, posting n tasks
+// that will be synchronized by a barrier.
+// After the barrier is passed OnCopiesDone will be called.
+void GpuMemoryBufferVideoFramePool::PoolImpl::CopyVideoFrameToGpuMemoryBuffers(
+ const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb) {
+ // Compute the number of tasks to post and create the barrier.
+ const size_t num_planes = VideoFrame::NumPlanes(output_format_);
+ const size_t planes_per_copy = PlanesPerCopy(output_format_);
+ const gfx::Size size = video_frame->visible_rect().size();
+ size_t copies = 0;
+ for (size_t i = 0; i < num_planes; i += planes_per_copy) {
+ const int rows = VideoFrame::Rows(i, output_format_, size.height());
+ const int rows_per_copy = RowsPerCopy(i, output_format_, size.width());
+ copies += rows / rows_per_copy;
+ if (rows % rows_per_copy)
+ ++copies;
+ }
+ base::Closure copies_done =
+ base::Bind(&PoolImpl::OnCopiesDone, this, video_frame, frame_resources,
+ frame_ready_cb);
+ base::Closure barrier = base::BarrierClosure(copies, copies_done);
+
+ // Post all the async tasks.
+ for (size_t i = 0; i < num_planes; i += planes_per_copy) {
+ gfx::GpuMemoryBuffer* buffer =
+ frame_resources->plane_resources[i].gpu_memory_buffer.get();
+ uint8* dest_buffers[VideoFrame::kMaxPlanes] = {0};
+ int dest_strides[VideoFrame::kMaxPlanes] = {0};
+ if (buffer) {
+ DCHECK_EQ(planes_per_copy,
+ gfx::NumberOfPlanesForBufferFormat(buffer->GetFormat()));
+ bool rv = buffer->Map(reinterpret_cast<void**>(dest_buffers));
+ DCHECK(rv);
+ buffer->GetStride(dest_strides);
+ }
+
+ const int rows = VideoFrame::Rows(i, output_format_, size.height());
+ const int rows_per_copy = RowsPerCopy(i, output_format_, size.width());
+
+ for (int row = 0; row < rows; row += rows_per_copy) {
+ const int rows_to_copy = std::min(rows_per_copy, rows - row);
+ switch (output_format_) {
+ case PIXEL_FORMAT_I420: {
+ const int bytes_per_row =
+ VideoFrame::RowBytes(i, output_format_, size.width());
+ worker_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&CopyRowsToI420Buffer, row, rows_to_copy,
+ bytes_per_row, video_frame->visible_data(i),
+ video_frame->stride(i), dest_buffers[0],
+ dest_strides[0], barrier));
+ break;
+ }
+ case PIXEL_FORMAT_NV12:
+ worker_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&CopyRowsToNV12Buffer, row, rows_to_copy,
+ size.width(), video_frame, dest_buffers[0],
+ dest_strides[0], dest_buffers[1], dest_strides[1],
+ barrier));
+ break;
+ case PIXEL_FORMAT_UYVY:
+ worker_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&CopyRowsToUYVYBuffer, row, rows_to_copy, size.width(),
+ video_frame, dest_buffers[0], dest_strides[0],
+ barrier));
+ break;
+ default:
+ NOTREACHED();
+ }
+ }
+ }
+}
+
+void GpuMemoryBufferVideoFramePool::PoolImpl::
+ BindAndCreateMailboxesHardwareFrameResources(
+ const scoped_refptr<VideoFrame>& video_frame,
+ FrameResources* frame_resources,
+ const FrameReadyCB& frame_ready_cb) {
+ gpu::gles2::GLES2Interface* gles2 = gpu_factories_->GetGLES2Interface();
+ if (!gles2) {
+ frame_ready_cb.Run(video_frame);
+ return;
+ }
+
+ const size_t num_planes = VideoFrame::NumPlanes(output_format_);
+ const size_t planes_per_copy = PlanesPerCopy(output_format_);
+ const gfx::Size size = video_frame->visible_rect().size();
+ gpu::MailboxHolder mailbox_holders[VideoFrame::kMaxPlanes];
+ // Set up the planes creating the mailboxes needed to refer to the textures.
+ for (size_t i = 0; i < num_planes; i += planes_per_copy) {
+ PlaneResource& plane_resource = frame_resources->plane_resources[i];
// Bind the texture and create or rebind the image.
gles2->BindTexture(texture_target_, plane_resource.texture_id);
if (plane_resource.gpu_memory_buffer && !plane_resource.image_id) {
- const size_t width = VideoFrame::Columns(i, format, size.width());
- const size_t height = VideoFrame::Rows(i, format, size.height());
+ const size_t width = VideoFrame::Columns(i, output_format_, size.width());
+ const size_t height = VideoFrame::Rows(i, output_format_, size.height());
plane_resource.image_id = gles2->CreateImageCHROMIUM(
plane_resource.gpu_memory_buffer->AsClientBuffer(), width, height,
- GL_R8_EXT);
- } else {
+ ImageInternalFormat(output_format_, i));
+ } else if (plane_resource.image_id) {
gles2->ReleaseTexImage2DCHROMIUM(texture_target_,
plane_resource.image_id);
}
- gles2->BindTexImage2DCHROMIUM(texture_target_, plane_resource.image_id);
+ if (plane_resource.image_id)
+ gles2->BindTexImage2DCHROMIUM(texture_target_, plane_resource.image_id);
mailbox_holders[i] =
gpu::MailboxHolder(plane_resource.mailbox, texture_target_, 0);
}
@@ -195,31 +540,44 @@ GpuMemoryBufferVideoFramePool::PoolImpl::CreateHardwareFrame(
// mailboxes refer to will be used only after all the previous commands posted
// in the command buffer have been processed.
unsigned sync_point = gles2->InsertSyncPointCHROMIUM();
- for (size_t i = 0; i < planes; ++i) {
+ for (size_t i = 0; i < num_planes; i += planes_per_copy)
mailbox_holders[i].sync_point = sync_point;
- }
+ scoped_refptr<VideoFrame> frame;
// Create the VideoFrame backed by native textures.
- scoped_refptr<VideoFrame> frame = VideoFrame::WrapYUV420NativeTextures(
- mailbox_holders[VideoFrame::kYPlane],
- mailbox_holders[VideoFrame::kUPlane],
- mailbox_holders[VideoFrame::kVPlane],
- base::Bind(&PoolImpl::MailboxHoldersReleased, this, frame_resources),
- size, video_frame->visible_rect(), video_frame->natural_size(),
- video_frame->timestamp());
- if (video_frame->metadata()->IsTrue(VideoFrameMetadata::ALLOW_OVERLAY))
- frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
- return frame;
+ switch (output_format_) {
+ case PIXEL_FORMAT_I420:
+ frame = VideoFrame::WrapYUV420NativeTextures(
+ mailbox_holders[VideoFrame::kYPlane],
+ mailbox_holders[VideoFrame::kUPlane],
+ mailbox_holders[VideoFrame::kVPlane],
+ base::Bind(&PoolImpl::MailboxHoldersReleased, this, frame_resources),
+ size, gfx::Rect(size), size, video_frame->timestamp());
+ if (video_frame->metadata()->IsTrue(VideoFrameMetadata::ALLOW_OVERLAY))
+ frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
+ break;
+ case PIXEL_FORMAT_NV12:
+ case PIXEL_FORMAT_UYVY:
+ frame = VideoFrame::WrapNativeTexture(
+ output_format_, mailbox_holders[VideoFrame::kYPlane],
+ base::Bind(&PoolImpl::MailboxHoldersReleased, this, frame_resources),
+ size, gfx::Rect(size), size, video_frame->timestamp());
+ frame->metadata()->SetBoolean(VideoFrameMetadata::ALLOW_OVERLAY, true);
+ break;
+ default:
+ NOTREACHED();
+ }
+ frame_ready_cb.Run(frame);
}
// Destroy all the resources posting one task per FrameResources
-// to the |task_runner_|.
+// to the |media_task_runner_|.
GpuMemoryBufferVideoFramePool::PoolImpl::~PoolImpl() {
// Delete all the resources on the media thread.
while (!resources_pool_.empty()) {
FrameResources* frame_resources = resources_pool_.front();
resources_pool_.pop_front();
- task_runner_->PostTask(
+ media_task_runner_->PostTask(
FROM_HERE, base::Bind(&PoolImpl::DeleteFrameResources, gpu_factories_,
base::Owned(frame_resources)));
}
@@ -230,14 +588,12 @@ GpuMemoryBufferVideoFramePool::PoolImpl::~PoolImpl() {
GpuMemoryBufferVideoFramePool::PoolImpl::FrameResources*
GpuMemoryBufferVideoFramePool::PoolImpl::GetOrCreateFrameResources(
const gfx::Size& size,
- VideoFrame::Format format) {
- DCHECK(task_runner_->BelongsToCurrentThread());
-
+ VideoPixelFormat format) {
auto it = resources_pool_.begin();
while (it != resources_pool_.end()) {
FrameResources* frame_resources = *it;
if (!frame_resources->in_use) {
- if (AreFrameResourcesCompatible(frame_resources, size, format)) {
+ if (AreFrameResourcesCompatible(frame_resources, size)) {
frame_resources->in_use = true;
return frame_resources;
} else {
@@ -252,18 +608,21 @@ GpuMemoryBufferVideoFramePool::PoolImpl::GetOrCreateFrameResources(
// Create the resources.
gpu::gles2::GLES2Interface* gles2 = gpu_factories_->GetGLES2Interface();
- DCHECK(gles2);
+ if (!gles2)
+ return nullptr;
gles2->ActiveTexture(GL_TEXTURE0);
- size_t planes = VideoFrame::NumPlanes(format);
- FrameResources* frame_resources = new FrameResources(format, size);
+ size_t num_planes = VideoFrame::NumPlanes(format);
+ FrameResources* frame_resources = new FrameResources(size);
resources_pool_.push_back(frame_resources);
- for (size_t i = 0; i < planes; ++i) {
+ for (size_t i = 0; i < num_planes; i += PlanesPerCopy(format)) {
PlaneResource& plane_resource = frame_resources->plane_resources[i];
const size_t width = VideoFrame::Columns(i, format, size.width());
const size_t height = VideoFrame::Rows(i, format, size.height());
- const gfx::Size plane_size(width, height);
+ plane_resource.size = gfx::Size(width, height);
+
+ const gfx::BufferFormat buffer_format = GpuMemoryBufferFormat(format, i);
plane_resource.gpu_memory_buffer = gpu_factories_->AllocateGpuMemoryBuffer(
- plane_size, gfx::GpuMemoryBuffer::R_8, gfx::GpuMemoryBuffer::MAP);
+ plane_resource.size, buffer_format, gfx::BufferUsage::MAP);
gles2->GenTextures(1, &plane_resource.texture_id);
gles2->BindTexture(texture_target_, plane_resource.texture_id);
@@ -301,14 +660,14 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::MailboxHoldersReleased(
FrameResources* frame_resources,
uint32 sync_point) {
// Return the resource on the media thread.
- task_runner_->PostTask(FROM_HERE, base::Bind(&PoolImpl::ReturnFrameResources,
- this, frame_resources));
+ media_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&PoolImpl::ReturnFrameResources, this, frame_resources));
}
// Put back the resoruces in the pool.
void GpuMemoryBufferVideoFramePool::PoolImpl::ReturnFrameResources(
FrameResources* frame_resources) {
- DCHECK(task_runner_->BelongsToCurrentThread());
auto it = std::find(resources_pool_.begin(), resources_pool_.end(),
frame_resources);
@@ -320,36 +679,28 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::ReturnFrameResources(
frame_resources->in_use = false;
}
+GpuMemoryBufferVideoFramePool::GpuMemoryBufferVideoFramePool() {}
+
GpuMemoryBufferVideoFramePool::GpuMemoryBufferVideoFramePool(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories)
- : pool_impl_(new PoolImpl(task_runner, gpu_factories)) {
+ : pool_impl_(
+ new PoolImpl(media_task_runner, worker_task_runner, gpu_factories)) {
+ base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ pool_impl_.get(), media_task_runner);
}
GpuMemoryBufferVideoFramePool::~GpuMemoryBufferVideoFramePool() {
+ base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
+ pool_impl_.get());
}
-scoped_refptr<VideoFrame>
-GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame) {
- switch (video_frame->format()) {
- // Supported cases.
- case VideoFrame::YV12:
- case VideoFrame::I420:
- return pool_impl_->CreateHardwareFrame(video_frame);
- // Unsupported cases.
- case VideoFrame::YV12A:
- case VideoFrame::YV16:
- case VideoFrame::YV24:
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- case VideoFrame::NV12:
-#endif
- case VideoFrame::ARGB:
- case VideoFrame::XRGB:
- case VideoFrame::UNKNOWN:
- break;
- }
- return video_frame;
+void GpuMemoryBufferVideoFramePool::MaybeCreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame,
+ const FrameReadyCB& frame_ready_cb) {
+ DCHECK(video_frame);
+ pool_impl_->CreateHardwareFrame(video_frame, frame_ready_cb);
}
} // namespace media
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool.h b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
index e01c791aae9..9e72e9b4d73 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool.h
@@ -7,6 +7,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/task_runner.h"
#include "media/base/video_frame.h"
namespace base {
@@ -25,18 +26,27 @@ class GpuVideoAcceleratorFactories;
// in a round trip to the browser/GPU process.
class MEDIA_EXPORT GpuMemoryBufferVideoFramePool {
public:
+ GpuMemoryBufferVideoFramePool();
GpuMemoryBufferVideoFramePool(
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner,
+ const scoped_refptr<base::TaskRunner>& worker_task_runner,
const scoped_refptr<GpuVideoAcceleratorFactories>& gpu_factories);
- ~GpuMemoryBufferVideoFramePool();
+ virtual ~GpuMemoryBufferVideoFramePool();
- // Returns a new VideoFrame containing only mailboxes to native resources.
- // The content of the returned object is copied from the software-allocated
+ // Callback used by MaybeCreateHardwareFrame to deliver a new VideoFrame
+ // after it has been copied to GpuMemoryBuffers.
+ typedef base::Callback<void(const scoped_refptr<VideoFrame>&)> FrameReadyCB;
+
+ // Calls |cb| on |media_worker_pool| with a new VideoFrame containing only
+ // mailboxes to native resources. |cb| will be destroyed on
+ // |media_worker_pool|.
+ // The content of the new object is copied from the software-allocated
// |video_frame|.
// If it's not possible to create a new hardware VideoFrame, |video_frame|
- // itself will be returned.
- scoped_refptr<VideoFrame> MaybeCreateHardwareFrame(
- const scoped_refptr<VideoFrame>& video_frame);
+ // itself will passed to |cb|.
+ virtual void MaybeCreateHardwareFrame(
+ const scoped_refptr<VideoFrame>& video_frame,
+ const FrameReadyCB& frame_ready_cb);
private:
class PoolImpl;
diff --git a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
index ee51e7c4ca5..cd435ea87a6 100644
--- a/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
+++ b/chromium/media/video/gpu_memory_buffer_video_frame_pool_unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
+#include "base/bind.h"
+#include "base/test/test_simple_task_runner.h"
#include "gpu/command_buffer/client/gles2_interface_stub.h"
#include "media/base/video_frame.h"
#include "media/renderers/mock_gpu_video_accelerator_factories.h"
@@ -37,9 +37,19 @@ class TestGLES2Interface : public gpu::gles2::GLES2InterfaceStub {
class GpuMemoryBufferVideoFramePoolTest : public ::testing::Test {
public:
GpuMemoryBufferVideoFramePoolTest() {}
- void SetUp() override { gles2_.reset(new TestGLES2Interface); }
+ void SetUp() override {
+ gles2_.reset(new TestGLES2Interface);
+ media_task_runner_ = make_scoped_refptr(new base::TestSimpleTaskRunner);
+ copy_task_runner_ = make_scoped_refptr(new base::TestSimpleTaskRunner);
+ }
+
+ void TearDown() override { RunUntilIdle(); }
- void TearDown() override { base::RunLoop().RunUntilIdle(); }
+ void RunUntilIdle() {
+ media_task_runner_->RunUntilIdle();
+ copy_task_runner_->RunUntilIdle();
+ media_task_runner_->RunUntilIdle();
+ }
static scoped_refptr<media::VideoFrame> CreateTestYUVVideoFrame(
int dimension) {
@@ -52,48 +62,59 @@ class GpuMemoryBufferVideoFramePoolTest : public ::testing::Test {
gfx::Size size(dimension, dimension);
return media::VideoFrame::WrapExternalYuvData(
- media::VideoFrame::YV12, // format
- size, // coded_size
- gfx::Rect(size), // visible_rect
- size, // natural_size
- size.width(), // y_stride
- size.width() / 2, // u_stride
- size.width() / 2, // v_stride
- y_data, // y_data
- u_data, // u_data
- v_data, // v_data
- base::TimeDelta()); // timestamp
+ media::PIXEL_FORMAT_YV12, // format
+ size, // coded_size
+ gfx::Rect(size), // visible_rect
+ size, // natural_size
+ size.width(), // y_stride
+ size.width() / 2, // u_stride
+ size.width() / 2, // v_stride
+ y_data, // y_data
+ u_data, // u_data
+ v_data, // v_data
+ base::TimeDelta()); // timestamp
}
protected:
- base::MessageLoop media_message_loop_;
+ scoped_refptr<base::TestSimpleTaskRunner> media_task_runner_;
+ scoped_refptr<base::TestSimpleTaskRunner> copy_task_runner_;
scoped_ptr<TestGLES2Interface> gles2_;
};
+void MaybeCreateHardwareFrameCallback(
+ scoped_refptr<VideoFrame>* video_frame_output,
+ const scoped_refptr<VideoFrame>& video_frame) {
+ *video_frame_output = video_frame;
+}
+
TEST_F(GpuMemoryBufferVideoFramePoolTest, NoGpuFactoryNoHardwareVideoFrame) {
scoped_refptr<VideoFrame> frame = CreateTestYUVVideoFrame(10);
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
- media_message_loop_.task_runner(), nullptr));
+ media_task_runner_, copy_task_runner_.get(), nullptr));
- scoped_refptr<VideoFrame> frame2 =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(frame);
+ scoped_refptr<VideoFrame> frame2;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame2));
+ RunUntilIdle();
EXPECT_EQ(frame.get(), frame2.get());
}
-TEST_F(GpuMemoryBufferVideoFramePoolTest, NoTextureRGNoHardwareVideoFrame) {
- scoped_refptr<VideoFrame> frame = CreateTestYUVVideoFrame(10);
+TEST_F(GpuMemoryBufferVideoFramePoolTest, VideoFrameOutputFormatUnknown) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
new MockGpuVideoAcceleratorFactories);
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
- media_message_loop_.task_runner(), mock_gpu_factories));
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
- EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
- .WillRepeatedly(testing::Return(false));
- scoped_refptr<VideoFrame> frame2 =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(frame);
- EXPECT_EQ(frame.get(), frame2.get());
+ mock_gpu_factories->SetVideoFrameOutputFormat(PIXEL_FORMAT_UNKNOWN);
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+ RunUntilIdle();
+
+ EXPECT_EQ(software_frame.get(), frame.get());
}
TEST_F(GpuMemoryBufferVideoFramePoolTest, CreateOneHardwareFrame) {
@@ -102,15 +123,17 @@ TEST_F(GpuMemoryBufferVideoFramePoolTest, CreateOneHardwareFrame) {
new MockGpuVideoAcceleratorFactories);
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
- media_message_loop_.task_runner(), mock_gpu_factories));
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
.WillRepeatedly(testing::Return(gles2_.get()));
- EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
- .WillRepeatedly(testing::Return(true));
- scoped_refptr<VideoFrame> frame =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+
+ RunUntilIdle();
+
EXPECT_NE(software_frame.get(), frame.get());
EXPECT_EQ(3u, gles2_->gen_textures);
}
@@ -121,30 +144,38 @@ TEST_F(GpuMemoryBufferVideoFramePoolTest, ReuseFirstResource) {
new MockGpuVideoAcceleratorFactories);
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
- media_message_loop_.task_runner(), mock_gpu_factories));
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
.WillRepeatedly(testing::Return(gles2_.get()));
- EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
- .WillRepeatedly(testing::Return(true));
- scoped_refptr<VideoFrame> frame =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+ RunUntilIdle();
+
EXPECT_NE(software_frame.get(), frame.get());
gpu::Mailbox mailbox = frame->mailbox_holder(0).mailbox;
unsigned sync_point = frame->mailbox_holder(0).sync_point;
EXPECT_EQ(3u, gles2_->gen_textures);
- scoped_refptr<VideoFrame> frame2 =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ scoped_refptr<VideoFrame> frame2;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame2));
+ RunUntilIdle();
+
EXPECT_NE(software_frame.get(), frame2.get());
EXPECT_NE(mailbox, frame2->mailbox_holder(0).mailbox);
EXPECT_EQ(6u, gles2_->gen_textures);
frame = nullptr;
frame2 = nullptr;
- base::RunLoop().RunUntilIdle(); // Run posted closures.
- frame = gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(software_frame);
+ RunUntilIdle();
+
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+ RunUntilIdle();
+
EXPECT_NE(software_frame.get(), frame.get());
EXPECT_EQ(6u, gles2_->gen_textures);
EXPECT_EQ(frame->mailbox_holder(0).mailbox, mailbox);
@@ -156,23 +187,95 @@ TEST_F(GpuMemoryBufferVideoFramePoolTest, DropResourceWhenSizeIsDifferent) {
new MockGpuVideoAcceleratorFactories);
scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
- media_message_loop_.task_runner(), mock_gpu_factories));
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
.WillRepeatedly(testing::Return(gles2_.get()));
- EXPECT_CALL(*mock_gpu_factories.get(), IsTextureRGSupported())
- .WillRepeatedly(testing::Return(true));
- scoped_refptr<VideoFrame> frame =
- gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
- CreateTestYUVVideoFrame(10));
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ CreateTestYUVVideoFrame(10),
+ base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+ RunUntilIdle();
+
EXPECT_EQ(3u, gles2_->gen_textures);
frame = nullptr;
- base::RunLoop().RunUntilIdle(); // Run posted closures.
- frame = gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
- CreateTestYUVVideoFrame(4));
+ RunUntilIdle();
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ CreateTestYUVVideoFrame(4),
+ base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+ RunUntilIdle();
EXPECT_EQ(6u, gles2_->gen_textures);
}
+TEST_F(GpuMemoryBufferVideoFramePoolTest, CreateOneHardwareUYUVFrame) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ mock_gpu_factories->SetVideoFrameOutputFormat(PIXEL_FORMAT_UYVY);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+
+ RunUntilIdle();
+
+ EXPECT_NE(software_frame.get(), frame.get());
+ EXPECT_EQ(1u, gles2_->gen_textures);
+}
+
+TEST_F(GpuMemoryBufferVideoFramePoolTest, CreateOneHardwareNV12Frame) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ mock_gpu_factories->SetVideoFrameOutputFormat(PIXEL_FORMAT_NV12);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+
+ RunUntilIdle();
+
+ EXPECT_NE(software_frame.get(), frame.get());
+ EXPECT_EQ(1u, gles2_->gen_textures);
+}
+
+// AllocateGpuMemoryBuffer can return null (e.g: when the GPU process is down).
+// This test checks that in that case we don't crash and still create the
+// textures.
+TEST_F(GpuMemoryBufferVideoFramePoolTest, AllocateGpuMemoryBufferBFail) {
+ scoped_refptr<VideoFrame> software_frame = CreateTestYUVVideoFrame(10);
+ scoped_refptr<MockGpuVideoAcceleratorFactories> mock_gpu_factories(
+ new MockGpuVideoAcceleratorFactories);
+ mock_gpu_factories->SetFailToAllocateGpuMemoryBufferForTesting(true);
+ scoped_ptr<GpuMemoryBufferVideoFramePool> gpu_memory_buffer_pool_ =
+ make_scoped_ptr(new GpuMemoryBufferVideoFramePool(
+ media_task_runner_, copy_task_runner_.get(), mock_gpu_factories));
+
+ EXPECT_CALL(*mock_gpu_factories.get(), GetGLES2Interface())
+ .WillRepeatedly(testing::Return(gles2_.get()));
+
+ scoped_refptr<VideoFrame> frame;
+ gpu_memory_buffer_pool_->MaybeCreateHardwareFrame(
+ software_frame, base::Bind(MaybeCreateHardwareFrameCallback, &frame));
+
+ RunUntilIdle();
+
+ EXPECT_NE(software_frame.get(), frame.get());
+ EXPECT_EQ(3u, gles2_->gen_textures);
+}
+
} // namespace media
diff --git a/chromium/media/video/jpeg_decode_accelerator.h b/chromium/media/video/jpeg_decode_accelerator.h
index fd7608fc9fb..ec2763a365f 100644
--- a/chromium/media/video/jpeg_decode_accelerator.h
+++ b/chromium/media/video/jpeg_decode_accelerator.h
@@ -108,6 +108,10 @@ class MEDIA_EXPORT JpegDecodeAccelerator {
// |bitstream_buffer|, or destructor returns.
virtual void Decode(const BitstreamBuffer& bitstream_buffer,
const scoped_refptr<media::VideoFrame>& video_frame) = 0;
+
+ // Returns true when the JPEG decoder is supported. This can be called before
+ // Initialize().
+ virtual bool IsSupported() = 0;
};
} // namespace media
diff --git a/chromium/media/video/video_decode_accelerator.h b/chromium/media/video/video_decode_accelerator.h
index 53e18a786d5..2b71d65ab07 100644
--- a/chromium/media/video/video_decode_accelerator.h
+++ b/chromium/media/video/video_decode_accelerator.h
@@ -61,6 +61,8 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
class MEDIA_EXPORT Client {
public:
// Callback to tell client how many and what size of buffers to provide.
+ // Note that the actual count provided through AssignPictureBuffers() can be
+ // larger than the value requested.
virtual void ProvidePictureBuffers(uint32 requested_num_of_buffers,
const gfx::Size& dimensions,
uint32 texture_target) = 0;
@@ -115,7 +117,9 @@ class MEDIA_EXPORT VideoDecodeAccelerator {
// callback has been initiated for a given buffer.
//
// Parameters:
- // |buffers| contains the allocated picture buffers for the output.
+ // |buffers| contains the allocated picture buffers for the output. Note
+ // that the count of buffers may be larger than the count requested through
+ // the call to Client::ProvidePictureBuffers().
virtual void AssignPictureBuffers(
const std::vector<PictureBuffer>& buffers) = 0;
diff --git a/chromium/media/video/video_encode_accelerator.h b/chromium/media/video/video_encode_accelerator.h
index 19a2216d88b..6879b0de714 100644
--- a/chromium/media/video/video_encode_accelerator.h
+++ b/chromium/media/video/video_encode_accelerator.h
@@ -112,7 +112,7 @@ class MEDIA_EXPORT VideoEncodeAccelerator {
// |client| is the client of this video encoder. The provided pointer must
// be valid until Destroy() is called.
// TODO(sheu): handle resolution changes. http://crbug.com/249944
- virtual bool Initialize(VideoFrame::Format input_format,
+ virtual bool Initialize(VideoPixelFormat input_format,
const gfx::Size& input_visible_size,
VideoCodecProfile output_profile,
uint32 initial_bitrate,