summaryrefslogtreecommitdiff
path: root/chromium/third_party/webrtc/modules
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/webrtc/modules')
-rw-r--r--chromium/third_party/webrtc/modules/audio_codec_speed_tests.isolate (renamed from chromium/third_party/webrtc/modules/audio_coding/audio_codec_speed_tests.isolate)0
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/BUILD.gn124
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/audio_coding.gypi3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/audio_coding_tests.gypi17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.h11
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.cc32
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.h137
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h133
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc165
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc47
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc28
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h50
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c25
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc65
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc87
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711_interface.c36
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h63
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h49
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h28
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc40
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc138
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc95
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_decode.c14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_encode.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_interface.c33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h72
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc43
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc56
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc81
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c63
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/defines.h29
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c18
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/encode.c26
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c59
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c81
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c46
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h42
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h32
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c37
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h9
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.c100
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_search.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c27
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c20
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h54
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h108
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h99
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h205
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h22
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h139
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h48
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc (renamed from chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc)10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc128
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c21
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h123
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c188
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc64
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c53
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac.gypi6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac_common.gypi22
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc22
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h45
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h22
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h137
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h34
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc (renamed from chromium/third_party/webrtc/modules/video_capture/ensure_initialized.h)15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc127
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c93
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c92
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h117
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc87
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc37
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c41
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/isac/unittest.cc287
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h37
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc94
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc109
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc319
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc172
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h51
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h102
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h35
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_interface.c57
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc153
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc48
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc26
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h40
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h20
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc74
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h19
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc217
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h59
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h48
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.cc240
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.h59
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump_unittest.cc124
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc109
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.h44
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc24
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc1
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc390
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h77
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc56
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc201
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.cc71
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.h11
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.cc211
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.h51
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc78
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/acm2/dump.proto169
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/audio_coding_module.gypi (renamed from chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi)137
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h264
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h124
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.cc79
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.cc5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/SpatialAudio.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestRedFec.cc16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.cc16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.h1
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/Tester.cc26
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc55
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.cc53
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.cc11
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.h1
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/utility.cc26
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/main/test/utility.h13
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc423
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h218
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc136
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc21
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h28
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc13
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc39
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc125
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h27
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc134
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/interface/neteq.h24
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc107
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h32
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/merge_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_expand.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h18
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi48
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc357
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h35
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc417
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc40
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi52
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc146
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc22
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/normal.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc12
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc21
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc28
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.cc7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.cc17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.h20
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/random_vector.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/rtcp.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.cc191
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.h118
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/sync_buffer.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc203
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc16
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.h24
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_checksum.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_loop.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_sink.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc11
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc123
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_audio_file.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_wav_file.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet_source.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc127
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h70
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/BUILD.gn2
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h27
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h41
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc302
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h96
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.cc76
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.h37
-rw-r--r--chromium/third_party/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc165
-rw-r--r--chromium/third_party/webrtc/modules/audio_decoder_unittests.isolate (renamed from chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate)0
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/BUILD.gn4
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_common.h29
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_device_template.h70
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_device_unittest.cc143
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_manager.cc64
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_manager.h71
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/audio_manager_unittest.cc34
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/build_info.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/ensure_initialized.cc9
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.cc89
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.h69
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/opensles_common.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/opensles_player.cc104
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/android/opensles_player.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device.gypi25
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.cc21
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.h18
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_generic.cc103
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_generic.h318
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_impl.cc57
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/audio_device_impl.h385
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.cc150
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.h108
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc (renamed from chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer_unittest.cc)61
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/include/audio_device.h25
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/include/audio_device_defines.h278
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/include/fake_audio_device.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.h495
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.mm2543
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm292
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc787
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc104
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc52
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/mac/audio_device_mac.cc36
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/mock_audio_device_buffer.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/test/audio_device_test_api.cc18
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.cc40
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.h20
-rw-r--r--chromium/third_party/webrtc/modules/audio_device/win/audio_mixer_manager_win.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_device_tests.isolate (renamed from chromium/third_party/webrtc/modules/audio_device/audio_device_tests.isolate)0
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/BUILD.gn4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/OWNERS3
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c98
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h3
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.c9
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c36
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc46
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c12
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/agc.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/agc.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.cc10
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.h32
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc686
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/legacy/analog_agc.c21
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.c11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/legacy/gain_control.h14
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/mock_agc.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc153
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h35
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi3
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc620
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h109
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/beamformer.h1
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/complex_matrix.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc22
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/matrix.h36
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc86
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h23
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/debug.proto33
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc12
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h7
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h196
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h17
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc333
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h134
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc193
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc188
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h39
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc180
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc (renamed from chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc)33
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging.h86
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc57
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.h41
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c51
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c31
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c3
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_neon.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/rms_level.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/rms_level.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc36
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/splitting_filter_unittest.cc26
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/test/audio_processing_unittest.cc733
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/test/audioproc_float.cc99
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/test/test_utils.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/test/test_utils.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/test/unpack.cc34
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.cc64
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.h14
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/common.h6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/pitch_based_vad.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.cc32
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.h18
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.cc41
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.h39
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.cc22
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.h2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h2
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/BUILD.gn2
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator.cc56
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc66
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc23
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.h21
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_allocator.h31
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_controller.h1
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc28
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h5
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/bitrate_controller/send_time_history_unittest.cc149
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/BUILD.gn24
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/cropped_desktop_frame.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/cropping_window_capturer_win.cc4
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_frame.h10
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_frame_win.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_region.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/desktop_region.h1
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/differ.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/differ_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/mac/full_screen_chrome_window_detector.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/mac/scoped_pixel_buffer_object.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/mouse_cursor.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capture_frame_queue.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_helper.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mac.mm4
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mock_objects.h4
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_x11.cc25
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/shared_memory.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/desktop.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/scoped_gdi_object.h6
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/scoped_thread_desktop.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc5
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.cc23
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.h15
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/window_capturer_mac.mm2
-rwxr-xr-xchromium/third_party/webrtc/modules/desktop_capture/window_capturer_null.cc2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/window_capturer_win.cc49
-rwxr-xr-xchromium/third_party/webrtc/modules/desktop_capture/window_capturer_x11.cc4
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/x11/shared_x_display.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/x11/x_error_trap.h2
-rw-r--r--chromium/third_party/webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h2
-rw-r--r--chromium/third_party/webrtc/modules/interface/module_common_types.h207
-rw-r--r--chromium/third_party/webrtc/modules/module_common_types_unittest.cc59
-rw-r--r--chromium/third_party/webrtc/modules/modules.gyp87
-rw-r--r--chromium/third_party/webrtc/modules/modules_java.gyp14
-rw-r--r--chromium/third_party/webrtc/modules/modules_java_chromium.gyp11
-rw-r--r--chromium/third_party/webrtc/modules/pacing/BUILD.gn2
-rw-r--r--chromium/third_party/webrtc/modules/pacing/bitrate_prober.cc4
-rw-r--r--chromium/third_party/webrtc/modules/pacing/include/paced_sender.h28
-rw-r--r--chromium/third_party/webrtc/modules/pacing/include/packet_router.h29
-rw-r--r--chromium/third_party/webrtc/modules/pacing/paced_sender.cc23
-rw-r--r--chromium/third_party/webrtc/modules/pacing/paced_sender_unittest.cc21
-rw-r--r--chromium/third_party/webrtc/modules/pacing/pacing.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/pacing/packet_router.cc70
-rw-r--r--chromium/third_party/webrtc/modules/pacing/packet_router_unittest.cc35
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/BUILD.gn6
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc17
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h6
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc280
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h42
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h30
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h (renamed from chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.h)13
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc9
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.h2
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc2
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.h2
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_estimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator.gypi11
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc20
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h12
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc40
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc17
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h8
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h6
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc155
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc160
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h77
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc272
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history.cc (renamed from chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.cc)35
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history_unittest.cc224
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.cc143
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.h89
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc706
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.h94
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.cc4
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h4
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc327
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h127
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc273
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc92
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h103
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc393
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc124
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h15
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc390
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc13
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.h4
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.cc27
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.h4
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc7
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc445
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h189
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder_unittest.cc108
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet.h21
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc137
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h39
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc163
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h54
-rwxr-xr-xchromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_bars.sh286
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.py166
-rwxr-xr-xchromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.sh (renamed from chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_plot.sh)48
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/random.h2
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc90
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc41
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc11
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc127
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h63
-rw-r--r--chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc323
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/BUILD.gn9
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h4
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h42
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h122
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h22
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi9
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/byte_io.h244
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.cc566
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h80
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser_unittest.cc62
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_sps_parser.cc6
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc59
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc137
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.h57
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc197
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc45
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc169
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.h57
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc776
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h105
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc482
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc1
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc68
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h21
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc4
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h7
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc125
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc696
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.h28
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc757
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc290
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.h53
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility_unittest.cc89
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format.cc6
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc28
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h3
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h2
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc10
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h2
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h2
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc765
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h108
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc662
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc134
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h21
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc45
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc2
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc19
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h6
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc51
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h5
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h3
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc7
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h1
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc122
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h32
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc13
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.cc427
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h82
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc58
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h8
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc269
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc23
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_utility.cc11
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h4
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc6
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc38
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h9
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc13
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc20
-rw-r--r--chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc11
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/audio_frame_operations.h4
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/file_player.h2
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/file_recorder.h8
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/helpers_android.h8
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/helpers_ios.h55
-rw-r--r--chromium/third_party/webrtc/modules/utility/interface/process_thread.h2
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/audio_frame_operations.cc14
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/audio_frame_operations_unittest.cc6
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/coder.cc6
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/coder.h8
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/file_player_impl.cc13
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/file_player_impl.h2
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/file_player_unittests.cc4
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.cc19
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.h11
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/helpers_android.cc25
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/helpers_ios.mm172
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/jvm_android.cc30
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/process_thread_impl.cc50
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/process_thread_impl.h3
-rw-r--r--chromium/third_party/webrtc/modules/utility/source/process_thread_impl_unittest.cc18
-rw-r--r--chromium/third_party/webrtc/modules/utility/utility.gypi2
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/BUILD.gn15
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/android/device_info_android.cc256
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/android/device_info_android.h71
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.cc255
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.h50
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/ensure_initialized.cc49
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/linux/device_info_linux.cc9
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/test/video_capture_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/video_capture.gypi77
-rw-r--r--chromium/third_party/webrtc/modules/video_capture/video_capture_factory.cc8
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264.cc4
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc20
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc24
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc32
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h35
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.cc8
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.h6
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc101
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc27
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc5
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc4
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h1
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8.gyp2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc22
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9.gyp2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc12
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc320
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h17
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/interface/video_coding_defines.h10
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/codec_database.cc16
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/encoded_frame.cc61
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/frame_buffer.cc2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.cc11
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.cc43
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.cc38
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.h6
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer_common.h12
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.cc34
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.h3
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.cc7
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/packet.cc12
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/receiver_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/session_info.cc47
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/test/stream_generator.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.cc106
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.h47
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.cc4
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.h41
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/video_receiver.cc86
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/source/video_sender.cc93
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/test/rtp_player.cc15
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/test/test_util.h3
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/utility/include/quality_scaler.h14
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler.cc67
-rw-r--r--chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc177
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/interface/video_processing.h4
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.cc14
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.h3
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.cc7
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.h2
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.cc42
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.h12
-rw-r--r--chromium/third_party/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc2
-rw-r--r--chromium/third_party/webrtc/modules/video_render/BUILD.gn2
-rw-r--r--chromium/third_party/webrtc/modules/video_render/video_render.gypi19
-rw-r--r--chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.cc3
-rw-r--r--chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.h1
-rw-r--r--chromium/third_party/webrtc/modules/video_render_tests.isolate (renamed from chromium/third_party/webrtc/modules/video_render/video_render_tests.isolate)0
812 files changed, 28128 insertions, 17597 deletions
diff --git a/chromium/third_party/webrtc/modules/audio_coding/audio_codec_speed_tests.isolate b/chromium/third_party/webrtc/modules/audio_codec_speed_tests.isolate
index c559d54bff8..c559d54bff8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/audio_codec_speed_tests.isolate
+++ b/chromium/third_party/webrtc/modules/audio_codec_speed_tests.isolate
diff --git a/chromium/third_party/webrtc/modules/audio_coding/BUILD.gn b/chromium/third_party/webrtc/modules/audio_coding/BUILD.gn
index 15ea369005b..7bbcd3aee2d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/audio_coding/BUILD.gn
@@ -7,7 +7,6 @@
# be found in the AUTHORS file in the root of the source tree.
import("//build/config/arm.gni")
-import("//third_party/protobuf/proto_library.gni")
import("../../build/webrtc.gni")
config("audio_coding_config") {
@@ -59,16 +58,18 @@ source_set("audio_coding") {
]
}
+ if (is_clang) {
+ # Suppress warnings from Chrome's Clang plugins.
+ # See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
+ configs -= [ "//build/config/clang:find_bad_constructs" ]
+ }
+
deps = [
":cng",
":g711",
- ":g722",
- ":ilbc",
- ":isac",
- ":isac_fix",
":neteq",
":pcm16b",
- ":red",
+ "../..:rtc_event_log",
"../..:webrtc_common",
"../../common_audio",
"../../system_wrappers",
@@ -78,34 +79,26 @@ source_set("audio_coding") {
defines += [ "WEBRTC_CODEC_OPUS" ]
deps += [ ":webrtc_opus" ]
}
-}
-
-proto_library("acm_dump_proto") {
- sources = [
- "main/acm2/dump.proto",
- ]
- proto_out_dir = "webrtc/audio_coding"
-}
-
-source_set("acm_dump") {
- sources = [
- "main/acm2/acm_dump.cc",
- "main/acm2/acm_dump.h",
- ]
-
- defines = []
-
- configs += [ "../..:common_config" ]
-
- public_configs = [ "../..:common_inherited_config" ]
-
- deps = [
- ":acm_dump_proto",
- "../..:webrtc_common",
- ]
-
- if (rtc_enable_protobuf) {
- defines += [ "RTC_AUDIOCODING_DEBUG_DUMP" ]
+ if (!build_with_mozilla) {
+ if (current_cpu == "arm") {
+ defines += [ "WEBRTC_CODEC_ISACFX" ]
+ deps += [ ":isac_fix" ]
+ } else {
+ defines += [ "WEBRTC_CODEC_ISAC" ]
+ deps += [ ":isac" ]
+ }
+ defines += [ "WEBRTC_CODEC_G722" ]
+ deps += [ ":g722" ]
+ }
+ if (!build_with_mozilla && !build_with_chromium) {
+ defines += [
+ "WEBRTC_CODEC_ILBC",
+ "WEBRTC_CODEC_RED",
+ ]
+ deps += [
+ ":ilbc",
+ ":red",
+ ]
}
}
@@ -195,10 +188,12 @@ config("g711_config") {
source_set("g711") {
sources = [
+ "codecs/g711/audio_decoder_pcm.cc",
"codecs/g711/audio_encoder_pcm.cc",
"codecs/g711/g711.c",
"codecs/g711/g711.h",
"codecs/g711/g711_interface.c",
+ "codecs/g711/include/audio_decoder_pcm.h",
"codecs/g711/include/audio_encoder_pcm.h",
"codecs/g711/include/g711_interface.h",
]
@@ -224,11 +219,13 @@ config("g722_config") {
source_set("g722") {
sources = [
+ "codecs/g722/audio_decoder_g722.cc",
"codecs/g722/audio_encoder_g722.cc",
"codecs/g722/g722_decode.c",
"codecs/g722/g722_enc_dec.h",
"codecs/g722/g722_encode.c",
"codecs/g722/g722_interface.c",
+ "codecs/g722/include/audio_decoder_g722.h",
"codecs/g722/include/audio_encoder_g722.h",
"codecs/g722/include/g722_interface.h",
]
@@ -258,6 +255,7 @@ source_set("ilbc") {
"codecs/ilbc/abs_quant.h",
"codecs/ilbc/abs_quant_loop.c",
"codecs/ilbc/abs_quant_loop.h",
+ "codecs/ilbc/audio_decoder_ilbc.cc",
"codecs/ilbc/audio_encoder_ilbc.cc",
"codecs/ilbc/augmented_cb_corr.c",
"codecs/ilbc/augmented_cb_corr.h",
@@ -323,6 +321,7 @@ source_set("ilbc") {
"codecs/ilbc/hp_output.c",
"codecs/ilbc/hp_output.h",
"codecs/ilbc/ilbc.c",
+ "codecs/ilbc/include/audio_decoder_ilbc.h",
"codecs/ilbc/include/audio_encoder_ilbc.h",
"codecs/ilbc/index_conv_dec.c",
"codecs/ilbc/index_conv_dec.h",
@@ -410,6 +409,16 @@ source_set("ilbc") {
]
}
+source_set("isac_common") {
+ sources = [
+ "codecs/isac/audio_encoder_isac_t.h",
+ "codecs/isac/audio_encoder_isac_t_impl.h",
+ "codecs/isac/locked_bandwidth_info.cc",
+ "codecs/isac/locked_bandwidth_info.h",
+ ]
+ public_configs = [ "../..:common_inherited_config" ]
+}
+
config("isac_config") {
include_dirs = [
"../../..",
@@ -419,14 +428,14 @@ config("isac_config") {
source_set("isac") {
sources = [
- "codecs/isac/audio_encoder_isac_t.h",
- "codecs/isac/audio_encoder_isac_t_impl.h",
+ "codecs/isac/main/interface/audio_decoder_isac.h",
"codecs/isac/main/interface/audio_encoder_isac.h",
"codecs/isac/main/interface/isac.h",
"codecs/isac/main/source/arith_routines.c",
"codecs/isac/main/source/arith_routines.h",
"codecs/isac/main/source/arith_routines_hist.c",
"codecs/isac/main/source/arith_routines_logist.c",
+ "codecs/isac/main/source/audio_decoder_isac.cc",
"codecs/isac/main/source/audio_encoder_isac.cc",
"codecs/isac/main/source/bandwidth_estimator.c",
"codecs/isac/main/source/bandwidth_estimator.h",
@@ -448,6 +457,7 @@ source_set("isac") {
"codecs/isac/main/source/filterbanks.c",
"codecs/isac/main/source/intialize.c",
"codecs/isac/main/source/isac.c",
+ "codecs/isac/main/source/isac_float_type.h",
"codecs/isac/main/source/lattice.c",
"codecs/isac/main/source/lpc_analysis.c",
"codecs/isac/main/source/lpc_analysis.h",
@@ -488,6 +498,7 @@ source_set("isac") {
deps = [
":audio_decoder_interface",
":audio_encoder_interface",
+ ":isac_common",
"../../common_audio",
]
}
@@ -501,14 +512,14 @@ config("isac_fix_config") {
source_set("isac_fix") {
sources = [
- "codecs/isac/audio_encoder_isac_t.h",
- "codecs/isac/audio_encoder_isac_t_impl.h",
+ "codecs/isac/fix/interface/audio_decoder_isacfix.h",
"codecs/isac/fix/interface/audio_encoder_isacfix.h",
"codecs/isac/fix/interface/isacfix.h",
"codecs/isac/fix/source/arith_routines.c",
"codecs/isac/fix/source/arith_routines_hist.c",
"codecs/isac/fix/source/arith_routines_logist.c",
"codecs/isac/fix/source/arith_routins.h",
+ "codecs/isac/fix/source/audio_decoder_isacfix.cc",
"codecs/isac/fix/source/audio_encoder_isacfix.cc",
"codecs/isac/fix/source/bandwidth_estimator.c",
"codecs/isac/fix/source/bandwidth_estimator.h",
@@ -526,6 +537,7 @@ source_set("isac_fix") {
"codecs/isac/fix/source/filterbanks.c",
"codecs/isac/fix/source/filters.c",
"codecs/isac/fix/source/initialize.c",
+ "codecs/isac/fix/source/isac_fix_type.h",
"codecs/isac/fix/source/isacfix.c",
"codecs/isac/fix/source/lattice.c",
"codecs/isac/fix/source/lattice_c.c",
@@ -563,6 +575,7 @@ source_set("isac_fix") {
deps = [
":audio_encoder_interface",
+ ":isac_common",
"../../common_audio",
"../../system_wrappers",
]
@@ -611,16 +624,11 @@ if (rtc_build_with_neon) {
source_set("isac_neon") {
sources = [
"codecs/isac/fix/source/entropy_coding_neon.c",
+ "codecs/isac/fix/source/filterbanks_neon.c",
"codecs/isac/fix/source/filters_neon.c",
"codecs/isac/fix/source/lattice_neon.c",
"codecs/isac/fix/source/transform_neon.c",
]
- if (current_cpu != "arm64" || !is_clang) {
- # Disable AllpassFilter2FixDec16Neon function due to a clang bug.
- # For more details refer to:
- # https://code.google.com/p/webrtc/issues/detail?id=4567
- sources += [ "codecs/isac/fix/source/filterbanks_neon.c" ]
- }
if (current_cpu != "arm64") {
# Enable compilation for the NEON instruction set. This is needed
@@ -657,7 +665,9 @@ config("pcm16b_config") {
source_set("pcm16b") {
sources = [
+ "codecs/pcm16b/audio_decoder_pcm16b.cc",
"codecs/pcm16b/audio_encoder_pcm16b.cc",
+ "codecs/pcm16b/include/audio_decoder_pcm16b.h",
"codecs/pcm16b/include/audio_encoder_pcm16b.h",
"codecs/pcm16b/include/pcm16b.h",
"codecs/pcm16b/pcm16b.c",
@@ -682,7 +692,9 @@ config("opus_config") {
source_set("webrtc_opus") {
sources = [
+ "codecs/opus/audio_decoder_opus.cc",
"codecs/opus/audio_encoder_opus.cc",
+ "codecs/opus/interface/audio_decoder_opus.h",
"codecs/opus/interface/audio_encoder_opus.h",
"codecs/opus/interface/opus_interface.h",
"codecs/opus/opus_inst.h",
@@ -697,8 +709,9 @@ source_set("webrtc_opus") {
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
- deps += [ rtc_opus_dir ]
- forward_dependent_configs_from = [ rtc_opus_dir ]
+ public_deps = [
+ rtc_opus_dir,
+ ]
} else if (build_with_mozilla) {
include_dirs = [ getenv("DIST") + "/include/opus" ]
}
@@ -792,10 +805,6 @@ source_set("neteq") {
":audio_decoder_interface",
":cng",
":g711",
- ":g722",
- ":ilbc",
- ":isac",
- ":isac_fix",
":pcm16b",
"../..:webrtc_common",
"../../common_audio",
@@ -808,4 +817,19 @@ source_set("neteq") {
defines += [ "WEBRTC_CODEC_OPUS" ]
deps += [ ":webrtc_opus" ]
}
+ if (!build_with_mozilla) {
+ if (current_cpu == "arm") {
+ defines += [ "WEBRTC_CODEC_ISACFX" ]
+ deps += [ ":isac_fix" ]
+ } else {
+ defines += [ "WEBRTC_CODEC_ISAC" ]
+ deps += [ ":isac" ]
+ }
+ defines += [ "WEBRTC_CODEC_G722" ]
+ deps += [ ":g722" ]
+ }
+ if (!build_with_mozilla && !build_with_chromium) {
+ defines += [ "WEBRTC_CODEC_ILBC" ]
+ deps += [ ":ilbc" ]
+ }
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/audio_coding.gypi b/chromium/third_party/webrtc/modules/audio_coding/audio_coding.gypi
index 8f5927bc5e9..bc3c48d075d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/audio_coding.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/audio_coding.gypi
@@ -15,10 +15,11 @@
'codecs/g722/g722.gypi',
'codecs/ilbc/ilbc.gypi',
'codecs/isac/isac.gypi',
+ 'codecs/isac/isac_common.gypi',
'codecs/isac/isacfix.gypi',
'codecs/pcm16b/pcm16b.gypi',
'codecs/red/red.gypi',
- 'main/acm2/audio_coding_module.gypi',
+ 'main/audio_coding_module.gypi',
'neteq/neteq.gypi',
],
'conditions': [
diff --git a/chromium/third_party/webrtc/modules/audio_coding/audio_coding_tests.gypi b/chromium/third_party/webrtc/modules/audio_coding/audio_coding_tests.gypi
index 84ef8673b27..e60309a6dfa 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/audio_coding_tests.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/audio_coding_tests.gypi
@@ -51,22 +51,5 @@
},
],
}],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'audio_codec_speed_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'audio_codec_speed_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- 'audio_codec_speed_tests.isolate',
- ],
- },
- ],
- }],
],
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.cc
index 0a4a6a964bd..08d101c5ae4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.cc
@@ -56,7 +56,9 @@ int AudioDecoder::DecodeRedundantInternal(const uint8_t* encoded,
bool AudioDecoder::HasDecodePlc() const { return false; }
-int AudioDecoder::DecodePlc(int num_frames, int16_t* decoded) { return 0; }
+size_t AudioDecoder::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return 0;
+}
int AudioDecoder::IncomingPacket(const uint8_t* payload,
size_t payload_len,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.h
index 8947e811668..b277d4bb7d5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_decoder.h
@@ -61,11 +61,12 @@ class AudioDecoder {
virtual bool HasDecodePlc() const;
// Calls the packet-loss concealment of the decoder to update the state after
- // one or several lost packets.
- virtual int DecodePlc(int num_frames, int16_t* decoded);
+ // one or several lost packets. The caller has to make sure that the
+ // memory allocated in |decoded| should accommodate |num_frames| frames.
+ virtual size_t DecodePlc(size_t num_frames, int16_t* decoded);
- // Initializes the decoder.
- virtual int Init() = 0;
+ // Resets the decoder state (empty buffers etc.).
+ virtual void Reset() = 0;
// Notifies the decoder of an incoming packet to NetEQ.
virtual int IncomingPacket(const uint8_t* payload,
@@ -115,7 +116,7 @@ class AudioDecoder {
SpeechType* speech_type);
private:
- DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoder);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.cc
index 72e4265e987..6d763005acc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.cc
@@ -13,10 +13,12 @@
namespace webrtc {
-AudioEncoder::EncodedInfo::EncodedInfo() : EncodedInfoLeaf() {
-}
+AudioEncoder::EncodedInfo::EncodedInfo() = default;
+
+AudioEncoder::EncodedInfo::~EncodedInfo() = default;
-AudioEncoder::EncodedInfo::~EncodedInfo() {
+int AudioEncoder::RtpTimestampRateHz() const {
+ return SampleRateHz();
}
AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
@@ -24,16 +26,30 @@ AudioEncoder::EncodedInfo AudioEncoder::Encode(uint32_t rtp_timestamp,
size_t num_samples_per_channel,
size_t max_encoded_bytes,
uint8_t* encoded) {
- CHECK_EQ(num_samples_per_channel,
- static_cast<size_t>(SampleRateHz() / 100));
+ RTC_CHECK_EQ(num_samples_per_channel,
+ static_cast<size_t>(SampleRateHz() / 100));
EncodedInfo info =
EncodeInternal(rtp_timestamp, audio, max_encoded_bytes, encoded);
- CHECK_LE(info.encoded_bytes, max_encoded_bytes);
+ RTC_CHECK_LE(info.encoded_bytes, max_encoded_bytes);
return info;
}
-int AudioEncoder::RtpTimestampRateHz() const {
- return SampleRateHz();
+bool AudioEncoder::SetFec(bool enable) {
+ return !enable;
}
+bool AudioEncoder::SetDtx(bool enable) {
+ return !enable;
+}
+
+bool AudioEncoder::SetApplication(Application application) {
+ return false;
+}
+
+void AudioEncoder::SetMaxPlaybackRate(int frequency_hz) {}
+
+void AudioEncoder::SetProjectedPacketLossRate(double fraction) {}
+
+void AudioEncoder::SetTargetBitrate(int target_bps) {}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.h
index fe6fd87dfd7..cda9d86f2e6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder.h
@@ -23,18 +23,11 @@ namespace webrtc {
class AudioEncoder {
public:
struct EncodedInfoLeaf {
- EncodedInfoLeaf()
- : encoded_bytes(0),
- encoded_timestamp(0),
- payload_type(0),
- send_even_if_empty(false),
- speech(true) {}
-
- size_t encoded_bytes;
- uint32_t encoded_timestamp;
- int payload_type;
- bool send_even_if_empty;
- bool speech;
+ size_t encoded_bytes = 0;
+ uint32_t encoded_timestamp = 0;
+ int payload_type = 0;
+ bool send_even_if_empty = false;
+ bool speech = true;
};
// This is the main struct for auxiliary encoding information. Each encoded
@@ -54,26 +47,9 @@ class AudioEncoder {
std::vector<EncodedInfoLeaf> redundant;
};
- virtual ~AudioEncoder() {}
+ virtual ~AudioEncoder() = default;
- // Accepts one 10 ms block of input audio (i.e., sample_rate_hz() / 100 *
- // num_channels() samples). Multi-channel audio must be sample-interleaved.
- // The encoder produces zero or more bytes of output in |encoded| and
- // returns additional encoding information.
- // The caller is responsible for making sure that |max_encoded_bytes| is
- // not smaller than the number of bytes actually produced by the encoder.
- EncodedInfo Encode(uint32_t rtp_timestamp,
- const int16_t* audio,
- size_t num_samples_per_channel,
- size_t max_encoded_bytes,
- uint8_t* encoded);
-
- // Return the input sample rate in Hz and the number of input channels.
- // These are constants set at instantiation time.
- virtual int SampleRateHz() const = 0;
- virtual int NumChannels() const = 0;
-
- // Return the maximum number of bytes that can be produced by the encoder
+ // Returns the maximum number of bytes that can be produced by the encoder
// at each Encode() call. The caller can use the return value to determine
// the size of the buffer that needs to be allocated. This value is allowed
// to depend on encoder parameters like bitrate, frame size etc., so if
@@ -81,8 +57,13 @@ class AudioEncoder {
// that the buffer is large enough by calling MaxEncodedBytes() again.
virtual size_t MaxEncodedBytes() const = 0;
- // Returns the rate with which the RTP timestamps are updated. By default,
- // this is the same as sample_rate_hz().
+ // Returns the input sample rate in Hz and the number of input channels.
+ // These are constants set at instantiation time.
+ virtual int SampleRateHz() const = 0;
+ virtual int NumChannels() const = 0;
+
+ // Returns the rate at which the RTP timestamps are updated. The default
+ // implementation returns SampleRateHz().
virtual int RtpTimestampRateHz() const;
// Returns the number of 10 ms frames the encoder will put in the next
@@ -90,63 +71,73 @@ class AudioEncoder {
// the encoder may vary the number of 10 ms frames from packet to packet, but
// it must decide the length of the next packet no later than when outputting
// the preceding packet.
- virtual int Num10MsFramesInNextPacket() const = 0;
+ virtual size_t Num10MsFramesInNextPacket() const = 0;
// Returns the maximum value that can be returned by
// Num10MsFramesInNextPacket().
- virtual int Max10MsFramesInAPacket() const = 0;
+ virtual size_t Max10MsFramesInAPacket() const = 0;
// Returns the current target bitrate in bits/s. The value -1 means that the
// codec adapts the target automatically, and a current target cannot be
// provided.
virtual int GetTargetBitrate() const = 0;
- // Changes the target bitrate. The implementation is free to alter this value,
- // e.g., if the desired value is outside the valid range.
- virtual void SetTargetBitrate(int bits_per_second) {}
-
- // Tells the implementation what the projected packet loss rate is. The rate
- // is in the range [0.0, 1.0]. This rate is typically used to adjust channel
- // coding efforts, such as FEC.
- virtual void SetProjectedPacketLossRate(double fraction) {}
+ // Accepts one 10 ms block of input audio (i.e., SampleRateHz() / 100 *
+ // NumChannels() samples). Multi-channel audio must be sample-interleaved.
+ // The encoder produces zero or more bytes of output in |encoded| and
+ // returns additional encoding information.
+ // The caller is responsible for making sure that |max_encoded_bytes| is
+ // not smaller than the number of bytes actually produced by the encoder.
+ // Encode() checks some preconditions, calls EncodeInternal() which does the
+ // actual work, and then checks some postconditions.
+ EncodedInfo Encode(uint32_t rtp_timestamp,
+ const int16_t* audio,
+ size_t num_samples_per_channel,
+ size_t max_encoded_bytes,
+ uint8_t* encoded);
- // This is the encode function that the inherited classes must implement. It
- // is called from Encode in the base class.
virtual EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) = 0;
-};
-class AudioEncoderMutable : public AudioEncoder {
- public:
- enum Application { kApplicationSpeech, kApplicationAudio };
-
- // Discards unprocessed audio data.
+ // Resets the encoder to its starting state, discarding any input that has
+ // been fed to the encoder but not yet emitted in a packet.
virtual void Reset() = 0;
- // Enables codec-internal FEC, if the implementation supports it.
- virtual bool SetFec(bool enable) = 0;
-
- // Enables or disables codec-internal VAD/DTX, if the implementation supports
- // it.
- virtual bool SetDtx(bool enable) = 0;
-
- // Sets the application mode. The implementation is free to disregard this
- // setting.
- virtual bool SetApplication(Application application) = 0;
-
- // Sets an upper limit on the payload size produced by the encoder. The
- // implementation is free to disregard this setting.
- virtual void SetMaxPayloadSize(int max_payload_size_bytes) = 0;
-
- // Sets the maximum rate which the codec may not exceed for any packet.
- virtual void SetMaxRate(int max_rate_bps) = 0;
-
- // Informs the encoder about the maximum sample rate which the decoder will
- // use when decoding the bitstream. The implementation is free to disregard
- // this hint.
- virtual bool SetMaxPlaybackRate(int frequency_hz) = 0;
+ // Enables or disables codec-internal FEC (forward error correction). Returns
+ // true if the codec was able to comply. The default implementation returns
+ // true when asked to disable FEC and false when asked to enable it (meaning
+ // that FEC isn't supported).
+ virtual bool SetFec(bool enable);
+
+ // Enables or disables codec-internal VAD/DTX. Returns true if the codec was
+ // able to comply. The default implementation returns true when asked to
+ // disable DTX and false when asked to enable it (meaning that DTX isn't
+ // supported).
+ virtual bool SetDtx(bool enable);
+
+ // Sets the application mode. Returns true if the codec was able to comply.
+ // The default implementation just returns false.
+ enum class Application { kSpeech, kAudio };
+ virtual bool SetApplication(Application application);
+
+ // Tells the encoder about the highest sample rate the decoder is expected to
+ // use when decoding the bitstream. The encoder would typically use this
+ // information to adjust the quality of the encoding. The default
+ // implementation just returns true.
+ virtual void SetMaxPlaybackRate(int frequency_hz);
+
+ // Tells the encoder what the projected packet loss rate is. The rate is in
+ // the range [0.0, 1.0]. The encoder would typically use this information to
+ // adjust channel coding efforts, such as FEC. The default implementation
+ // does nothing.
+ virtual void SetProjectedPacketLossRate(double fraction);
+
+ // Tells the encoder what average bitrate we'd like it to produce. The
+ // encoder is free to adjust or disregard the given bitrate (the default
+ // implementation does the latter).
+ virtual void SetTargetBitrate(int target_bps);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
deleted file mode 100644
index 553d8ad4b8c..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_MUTABLE_IMPL_H_
-#define WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_MUTABLE_IMPL_H_
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-
-namespace webrtc {
-
-// This is a convenient base class for implementations of AudioEncoderMutable.
-// T is the type of the encoder state; it has to look like an AudioEncoder
-// subclass whose constructor takes a single T::Config parameter. If P is
-// given, this class will inherit from it instead of directly from
-// AudioEncoderMutable.
-template <typename T, typename P = AudioEncoderMutable>
-class AudioEncoderMutableImpl : public P {
- public:
- void Reset() override {
- typename T::Config config;
- {
- CriticalSectionScoped cs(encoder_lock_.get());
- config = config_;
- }
- Reconstruct(config);
- }
-
- bool SetFec(bool enable) override { return false; }
-
- bool SetDtx(bool enable) override { return false; }
-
- bool SetApplication(AudioEncoderMutable::Application application) override {
- return false;
- }
-
- void SetMaxPayloadSize(int max_payload_size_bytes) override {}
-
- void SetMaxRate(int max_rate_bps) override {}
-
- bool SetMaxPlaybackRate(int frequency_hz) override { return false; }
-
- AudioEncoderMutable::EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
- const int16_t* audio,
- size_t max_encoded_bytes,
- uint8_t* encoded) override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->EncodeInternal(rtp_timestamp, audio, max_encoded_bytes,
- encoded);
- }
- int SampleRateHz() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->SampleRateHz();
- }
- int NumChannels() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->NumChannels();
- }
- size_t MaxEncodedBytes() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->MaxEncodedBytes();
- }
- int RtpTimestampRateHz() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->RtpTimestampRateHz();
- }
- int Num10MsFramesInNextPacket() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->Num10MsFramesInNextPacket();
- }
- int Max10MsFramesInAPacket() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->Max10MsFramesInAPacket();
- }
- int GetTargetBitrate() const override {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder_->GetTargetBitrate();
- }
- void SetTargetBitrate(int bits_per_second) override {
- CriticalSectionScoped cs(encoder_lock_.get());
- encoder_->SetTargetBitrate(bits_per_second);
- }
- void SetProjectedPacketLossRate(double fraction) override {
- CriticalSectionScoped cs(encoder_lock_.get());
- encoder_->SetProjectedPacketLossRate(fraction);
- }
-
- protected:
- explicit AudioEncoderMutableImpl(const typename T::Config& config)
- : encoder_lock_(CriticalSectionWrapper::CreateCriticalSection()) {
- Reconstruct(config);
- }
-
- bool Reconstruct(const typename T::Config& config) {
- if (!config.IsOk())
- return false;
- CriticalSectionScoped cs(encoder_lock_.get());
- config_ = config;
- encoder_.reset(new T(config_));
- return true;
- }
-
- typename T::Config config() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return config_;
- }
- T* encoder() EXCLUSIVE_LOCKS_REQUIRED(encoder_lock_) {
- return encoder_.get();
- }
- const T* encoder() const EXCLUSIVE_LOCKS_REQUIRED(encoder_lock_) {
- return encoder_.get();
- }
-
- const rtc::scoped_ptr<CriticalSectionWrapper> encoder_lock_;
-
- private:
- rtc::scoped_ptr<T> encoder_ GUARDED_BY(encoder_lock_);
- typename T::Config config_ GUARDED_BY(encoder_lock_);
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_AUDIO_ENCODER_MUTABLE_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
index d2acaa1a1a4..121524633c6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.cc
@@ -19,18 +19,20 @@ namespace {
const int kMaxFrameSizeMs = 60;
-} // namespace
-
-AudioEncoderCng::Config::Config()
- : num_channels(1),
- payload_type(13),
- speech_encoder(NULL),
- vad_mode(Vad::kVadNormal),
- sid_frame_interval_ms(100),
- num_cng_coefficients(8),
- vad(NULL) {
+rtc::scoped_ptr<CNG_enc_inst, CngInstDeleter> CreateCngInst(
+ int sample_rate_hz,
+ int sid_frame_interval_ms,
+ int num_cng_coefficients) {
+ rtc::scoped_ptr<CNG_enc_inst, CngInstDeleter> cng_inst;
+ RTC_CHECK_EQ(0, WebRtcCng_CreateEnc(cng_inst.accept()));
+ RTC_CHECK_EQ(0,
+ WebRtcCng_InitEnc(cng_inst.get(), sample_rate_hz,
+ sid_frame_interval_ms, num_cng_coefficients));
+ return cng_inst;
}
+} // namespace
+
bool AudioEncoderCng::Config::IsOk() const {
if (num_channels != 1)
return false;
@@ -38,7 +40,8 @@ bool AudioEncoderCng::Config::IsOk() const {
return false;
if (num_channels != speech_encoder->NumChannels())
return false;
- if (sid_frame_interval_ms < speech_encoder->Max10MsFramesInAPacket() * 10)
+ if (sid_frame_interval_ms <
+ static_cast<int>(speech_encoder->Max10MsFramesInAPacket() * 10))
return false;
if (num_cng_coefficients > WEBRTC_CNG_MAX_LPC_ORDER ||
num_cng_coefficients <= 0)
@@ -50,50 +53,41 @@ AudioEncoderCng::AudioEncoderCng(const Config& config)
: speech_encoder_(config.speech_encoder),
cng_payload_type_(config.payload_type),
num_cng_coefficients_(config.num_cng_coefficients),
+ sid_frame_interval_ms_(config.sid_frame_interval_ms),
last_frame_active_(true),
- vad_(new Vad(config.vad_mode)) {
- if (config.vad) {
- // Replace default Vad object with user-provided one.
- vad_.reset(config.vad);
- }
- CHECK(config.IsOk()) << "Invalid configuration.";
- CNG_enc_inst* cng_inst;
- CHECK_EQ(WebRtcCng_CreateEnc(&cng_inst), 0) << "WebRtcCng_CreateEnc failed.";
- cng_inst_.reset(cng_inst); // Transfer ownership to scoped_ptr.
- CHECK_EQ(WebRtcCng_InitEnc(cng_inst_.get(), SampleRateHz(),
- config.sid_frame_interval_ms,
- config.num_cng_coefficients),
- 0)
- << "WebRtcCng_InitEnc failed";
+ vad_(config.vad ? rtc_make_scoped_ptr(config.vad)
+ : CreateVad(config.vad_mode)) {
+ RTC_CHECK(config.IsOk()) << "Invalid configuration.";
+ cng_inst_ = CreateCngInst(SampleRateHz(), sid_frame_interval_ms_,
+ num_cng_coefficients_);
}
-AudioEncoderCng::~AudioEncoderCng() {
+AudioEncoderCng::~AudioEncoderCng() = default;
+
+size_t AudioEncoderCng::MaxEncodedBytes() const {
+ const size_t max_encoded_bytes_active = speech_encoder_->MaxEncodedBytes();
+ const size_t max_encoded_bytes_passive =
+ rtc::CheckedDivExact(kMaxFrameSizeMs, 10) * SamplesPer10msFrame();
+ return std::max(max_encoded_bytes_active, max_encoded_bytes_passive);
}
int AudioEncoderCng::SampleRateHz() const {
return speech_encoder_->SampleRateHz();
}
-int AudioEncoderCng::RtpTimestampRateHz() const {
- return speech_encoder_->RtpTimestampRateHz();
-}
-
int AudioEncoderCng::NumChannels() const {
return 1;
}
-size_t AudioEncoderCng::MaxEncodedBytes() const {
- const size_t max_encoded_bytes_active = speech_encoder_->MaxEncodedBytes();
- const size_t max_encoded_bytes_passive =
- rtc::CheckedDivExact(kMaxFrameSizeMs, 10) * SamplesPer10msFrame();
- return std::max(max_encoded_bytes_active, max_encoded_bytes_passive);
+int AudioEncoderCng::RtpTimestampRateHz() const {
+ return speech_encoder_->RtpTimestampRateHz();
}
-int AudioEncoderCng::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCng::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
-int AudioEncoderCng::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCng::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
@@ -101,34 +95,25 @@ int AudioEncoderCng::GetTargetBitrate() const {
return speech_encoder_->GetTargetBitrate();
}
-void AudioEncoderCng::SetTargetBitrate(int bits_per_second) {
- speech_encoder_->SetTargetBitrate(bits_per_second);
-}
-
-void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
- DCHECK_GE(fraction, 0.0);
- DCHECK_LE(fraction, 1.0);
- speech_encoder_->SetProjectedPacketLossRate(fraction);
-}
-
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- CHECK_GE(max_encoded_bytes, static_cast<size_t>(num_cng_coefficients_ + 1));
+ RTC_CHECK_GE(max_encoded_bytes,
+ static_cast<size_t>(num_cng_coefficients_ + 1));
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
- CHECK_EQ(speech_buffer_.size(),
- rtp_timestamps_.size() * samples_per_10ms_frame);
+ RTC_CHECK_EQ(speech_buffer_.size(),
+ rtp_timestamps_.size() * samples_per_10ms_frame);
rtp_timestamps_.push_back(rtp_timestamp);
for (size_t i = 0; i < samples_per_10ms_frame; ++i) {
speech_buffer_.push_back(audio[i]);
}
- const int frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
- if (rtp_timestamps_.size() < static_cast<size_t>(frames_to_encode)) {
+ const size_t frames_to_encode = speech_encoder_->Num10MsFramesInNextPacket();
+ if (rtp_timestamps_.size() < frames_to_encode) {
return EncodedInfo();
}
- CHECK_LE(frames_to_encode * 10, kMaxFrameSizeMs)
+ RTC_CHECK_LE(static_cast<int>(frames_to_encode * 10), kMaxFrameSizeMs)
<< "Frame size cannot be larger than " << kMaxFrameSizeMs
<< " ms when using VAD/CNG.";
@@ -136,12 +121,12 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
// following split sizes:
// 10 ms = 10 + 0 ms; 20 ms = 20 + 0 ms; 30 ms = 30 + 0 ms;
// 40 ms = 20 + 20 ms; 50 ms = 30 + 20 ms; 60 ms = 30 + 30 ms.
- int blocks_in_first_vad_call =
+ size_t blocks_in_first_vad_call =
(frames_to_encode > 3 ? 3 : frames_to_encode);
if (frames_to_encode == 4)
blocks_in_first_vad_call = 2;
- CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
- const int blocks_in_second_vad_call =
+ RTC_CHECK_GE(frames_to_encode, blocks_in_first_vad_call);
+ const size_t blocks_in_second_vad_call =
frames_to_encode - blocks_in_first_vad_call;
// Check if all of the buffer is passive speech. Start with checking the first
@@ -182,24 +167,63 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodeInternal(
return info;
}
+void AudioEncoderCng::Reset() {
+ speech_encoder_->Reset();
+ speech_buffer_.clear();
+ rtp_timestamps_.clear();
+ last_frame_active_ = true;
+ vad_->Reset();
+ cng_inst_ = CreateCngInst(SampleRateHz(), sid_frame_interval_ms_,
+ num_cng_coefficients_);
+}
+
+bool AudioEncoderCng::SetFec(bool enable) {
+ return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCng::SetDtx(bool enable) {
+ return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCng::SetApplication(Application application) {
+ return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCng::SetMaxPlaybackRate(int frequency_hz) {
+ speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+void AudioEncoderCng::SetProjectedPacketLossRate(double fraction) {
+ speech_encoder_->SetProjectedPacketLossRate(fraction);
+}
+
+void AudioEncoderCng::SetTargetBitrate(int bits_per_second) {
+ speech_encoder_->SetTargetBitrate(bits_per_second);
+}
+
AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
- int frames_to_encode,
+ size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
bool force_sid = last_frame_active_;
bool output_produced = false;
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
- CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
+ RTC_CHECK_GE(max_encoded_bytes, frames_to_encode * samples_per_10ms_frame);
AudioEncoder::EncodedInfo info;
- for (int i = 0; i < frames_to_encode; ++i) {
- int16_t encoded_bytes_tmp = 0;
- CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
- &speech_buffer_[i * samples_per_10ms_frame],
- static_cast<int16_t>(samples_per_10ms_frame),
- encoded, &encoded_bytes_tmp, force_sid), 0);
+ for (size_t i = 0; i < frames_to_encode; ++i) {
+ // It's important not to pass &info.encoded_bytes directly to
+ // WebRtcCng_Encode(), since later loop iterations may return zero in that
+ // value, in which case we don't want to overwrite any value from an earlier
+ // iteration.
+ size_t encoded_bytes_tmp = 0;
+ RTC_CHECK_GE(WebRtcCng_Encode(cng_inst_.get(),
+ &speech_buffer_[i * samples_per_10ms_frame],
+ samples_per_10ms_frame, encoded,
+ &encoded_bytes_tmp, force_sid),
+ 0);
if (encoded_bytes_tmp > 0) {
- CHECK(!output_produced);
- info.encoded_bytes = static_cast<size_t>(encoded_bytes_tmp);
+ RTC_CHECK(!output_produced);
+ info.encoded_bytes = encoded_bytes_tmp;
output_produced = true;
force_sid = false;
}
@@ -212,19 +236,20 @@ AudioEncoder::EncodedInfo AudioEncoderCng::EncodePassive(
}
AudioEncoder::EncodedInfo AudioEncoderCng::EncodeActive(
- int frames_to_encode,
+ size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded) {
const size_t samples_per_10ms_frame = SamplesPer10msFrame();
AudioEncoder::EncodedInfo info;
- for (int i = 0; i < frames_to_encode; ++i) {
+ for (size_t i = 0; i < frames_to_encode; ++i) {
info = speech_encoder_->Encode(
rtp_timestamps_.front(), &speech_buffer_[i * samples_per_10ms_frame],
samples_per_10ms_frame, max_encoded_bytes, encoded);
if (i + 1 == frames_to_encode) {
- CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
+ RTC_CHECK_GT(info.encoded_bytes, 0u) << "Encoder didn't deliver data.";
} else {
- CHECK_EQ(info.encoded_bytes, 0u) << "Encoder delivered data too early.";
+ RTC_CHECK_EQ(info.encoded_bytes, 0u)
+ << "Encoder delivered data too early.";
}
}
return info;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
index d16dd3b791a..0b837a0f12b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng_unittest.cc
@@ -34,7 +34,7 @@ static const int kCngPayloadType = 18;
class AudioEncoderCngTest : public ::testing::Test {
protected:
AudioEncoderCngTest()
- : mock_vad_(new MockVad(Vad::kVadNormal)),
+ : mock_vad_(new MockVad),
timestamp_(4711),
num_audio_samples_10ms_(0),
sample_rate_hz_(8000) {
@@ -59,14 +59,14 @@ class AudioEncoderCngTest : public ::testing::Test {
void CreateCng() {
// The config_ parameters may be changed by the TEST_Fs up until CreateCng()
// is called, thus we cannot use the values until now.
- num_audio_samples_10ms_ = 10 * sample_rate_hz_ / 1000;
+ num_audio_samples_10ms_ = static_cast<size_t>(10 * sample_rate_hz_ / 1000);
ASSERT_LE(num_audio_samples_10ms_, kMaxNumSamples);
EXPECT_CALL(mock_encoder_, SampleRateHz())
.WillRepeatedly(Return(sample_rate_hz_));
// Max10MsFramesInAPacket() is just used to verify that the SID frame period
// is not too small. The return value does not matter that much, as long as
// it is smaller than 10.
- EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1));
+ EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(1u));
EXPECT_CALL(mock_encoder_, MaxEncodedBytes())
.WillRepeatedly(Return(kMockMaxEncodedBytes));
cng_.reset(new AudioEncoderCng(config_));
@@ -83,10 +83,10 @@ class AudioEncoderCngTest : public ::testing::Test {
// Expect |num_calls| calls to the encoder, all successful. The last call
// claims to have encoded |kMockMaxEncodedBytes| bytes, and all the preceding
// ones 0 bytes.
- void ExpectEncodeCalls(int num_calls) {
+ void ExpectEncodeCalls(size_t num_calls) {
InSequence s;
AudioEncoder::EncodedInfo info;
- for (int j = 0; j < num_calls - 1; ++j) {
+ for (size_t j = 0; j < num_calls - 1; ++j) {
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _))
.WillOnce(Return(info));
}
@@ -98,7 +98,7 @@ class AudioEncoderCngTest : public ::testing::Test {
// Verifies that the cng_ object waits until it has collected
// |blocks_per_frame| blocks of audio, and then dispatches all of them to
// the underlying codec (speech or cng).
- void CheckBlockGrouping(int blocks_per_frame, bool active_speech) {
+ void CheckBlockGrouping(size_t blocks_per_frame, bool active_speech) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
CreateCng();
@@ -107,7 +107,7 @@ class AudioEncoderCngTest : public ::testing::Test {
// Don't expect any calls to the encoder yet.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
- for (int i = 0; i < blocks_per_frame - 1; ++i) {
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
}
@@ -127,14 +127,15 @@ class AudioEncoderCngTest : public ::testing::Test {
void CheckVadInputSize(int input_frame_size_ms,
int expected_first_block_size_ms,
int expected_second_block_size_ms) {
- const int blocks_per_frame = input_frame_size_ms / 10;
+ const size_t blocks_per_frame =
+ static_cast<size_t>(input_frame_size_ms / 10);
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
// Expect nothing to happen before the last block is sent to cng_.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _)).Times(0);
- for (int i = 0; i < blocks_per_frame - 1; ++i) {
+ for (size_t i = 0; i < blocks_per_frame - 1; ++i) {
Encode();
}
@@ -163,7 +164,7 @@ class AudioEncoderCngTest : public ::testing::Test {
Vad::Activity second_type) {
// Set the speech encoder frame size to 60 ms, to ensure that the VAD will
// be called twice.
- const int blocks_per_frame = 6;
+ const size_t blocks_per_frame = 6;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(blocks_per_frame));
InSequence s;
@@ -175,7 +176,7 @@ class AudioEncoderCngTest : public ::testing::Test {
.WillOnce(Return(second_type));
}
encoded_info_.payload_type = 0;
- for (int i = 0; i < blocks_per_frame; ++i) {
+ for (size_t i = 0; i < blocks_per_frame; ++i) {
Encode();
}
return encoded_info_.payload_type != kCngPayloadType;
@@ -199,8 +200,8 @@ TEST_F(AudioEncoderCngTest, CreateAndDestroy) {
TEST_F(AudioEncoderCngTest, CheckFrameSizePropagation) {
CreateCng();
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, cng_->Num10MsFramesInNextPacket());
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, cng_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCngTest, CheckChangeBitratePropagation) {
@@ -217,7 +218,7 @@ TEST_F(AudioEncoderCngTest, CheckProjectedPacketLossRatePropagation) {
TEST_F(AudioEncoderCngTest, EncodeCallsVad) {
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(1));
+ .WillRepeatedly(Return(1U));
CreateCng();
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
@@ -249,7 +250,7 @@ TEST_F(AudioEncoderCngTest, EncodeCollects3BlocksActiveSpeech) {
}
TEST_F(AudioEncoderCngTest, EncodePassive) {
- const int kBlocksPerFrame = 3;
+ const size_t kBlocksPerFrame = 3;
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
.WillRepeatedly(Return(kBlocksPerFrame));
CreateCng();
@@ -258,7 +259,7 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
// Expect no calls at all to the speech encoder mock.
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
uint32_t expected_timestamp = timestamp_;
- for (int i = 0; i < 100; ++i) {
+ for (size_t i = 0; i < 100; ++i) {
Encode();
// Check if it was time to call the cng encoder. This is done once every
// |kBlocksPerFrame| calls.
@@ -339,7 +340,7 @@ TEST_F(AudioEncoderCngTest, VadInputSize60Ms) {
TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
CreateCng();
EXPECT_CALL(mock_encoder_, EncodeInternal(_, _, _, _)).Times(0);
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1));
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(1U));
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.WillOnce(Return(Vad::kPassive));
encoded_info_.payload_type = 0;
@@ -352,7 +353,7 @@ TEST_F(AudioEncoderCngTest, VerifyCngPayloadType) {
TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(1));
+ .WillRepeatedly(Return(1U));
// Start with encoding noise.
EXPECT_CALL(*mock_vad_, VoiceActivity(_, _, _))
.Times(2)
@@ -385,6 +386,14 @@ TEST_F(AudioEncoderCngTest, VerifySidFrameAfterSpeech) {
encoded_info_.encoded_bytes);
}
+// Resetting the CNG should reset both the VAD and the encoder.
+TEST_F(AudioEncoderCngTest, Reset) {
+ CreateCng();
+ EXPECT_CALL(mock_encoder_, Reset()).Times(1);
+ EXPECT_CALL(*mock_vad_, Reset()).Times(1);
+ cng_->Reset();
+}
+
#if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
// This test fixture tests various error conditions that makes the
@@ -443,7 +452,7 @@ TEST_F(AudioEncoderCngDeathTest, Stereo) {
TEST_F(AudioEncoderCngDeathTest, EncoderFrameSizeTooLarge) {
CreateCng();
EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket())
- .WillRepeatedly(Return(7));
+ .WillRepeatedly(Return(7U));
for (int i = 0; i < 6; ++i)
Encode();
EXPECT_DEATH(Encode(),
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
index 0d1c6702902..1061dca69ac 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/cng_unittest.cc
@@ -99,7 +99,7 @@ TEST_F(CngTest, CngInitFail) {
TEST_F(CngTest, CngEncode) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -151,7 +151,7 @@ TEST_F(CngTest, CngEncode) {
// Encode Cng with too long input vector.
TEST_F(CngTest, CngEncodeTooLong) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and init encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -170,7 +170,7 @@ TEST_F(CngTest, CngEncodeTooLong) {
// Call encode without calling init.
TEST_F(CngTest, CngEncodeNoInit) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -187,14 +187,14 @@ TEST_F(CngTest, CngEncodeNoInit) {
// Update SID parameters, for both 9 and 16 parameters.
TEST_F(CngTest, CngUpdateSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
EXPECT_EQ(0, WebRtcCng_CreateDec(&cng_dec_inst_));
EXPECT_EQ(0, WebRtcCng_InitEnc(cng_enc_inst_, 16000, kSidNormalIntervalUpdate,
kCNGNumParamsNormal));
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// Run normal Encode and UpdateSid.
EXPECT_EQ(kCNGNumParamsNormal + 1, WebRtcCng_Encode(
@@ -205,7 +205,7 @@ TEST_F(CngTest, CngUpdateSid) {
// Reinit with new length.
EXPECT_EQ(0, WebRtcCng_InitEnc(cng_enc_inst_, 16000, kSidNormalIntervalUpdate,
kCNGNumParamsHigh));
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// Expect 0 because of unstable parameters after switching length.
EXPECT_EQ(0, WebRtcCng_Encode(cng_enc_inst_, speech_data_, 160, sid_data,
@@ -224,7 +224,7 @@ TEST_F(CngTest, CngUpdateSid) {
// Update SID parameters, with wrong parameters or without calling decode.
TEST_F(CngTest, CngUpdateSidErroneous) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
@@ -242,7 +242,7 @@ TEST_F(CngTest, CngUpdateSidErroneous) {
EXPECT_EQ(6220, WebRtcCng_GetErrorCodeDec(cng_dec_inst_));
// Initialize decoder.
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// First run with valid parameters, then with too many CNG parameters.
// The function will operate correctly by only reading the maximum number of
@@ -261,14 +261,14 @@ TEST_F(CngTest, CngUpdateSidErroneous) {
TEST_F(CngTest, CngGenerate) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
int16_t out_data[640];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
EXPECT_EQ(0, WebRtcCng_CreateDec(&cng_dec_inst_));
EXPECT_EQ(0, WebRtcCng_InitEnc(cng_enc_inst_, 16000, kSidNormalIntervalUpdate,
kCNGNumParamsNormal));
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// Normal Encode.
EXPECT_EQ(kCNGNumParamsNormal + 1, WebRtcCng_Encode(
@@ -294,14 +294,14 @@ TEST_F(CngTest, CngGenerate) {
// Test automatic SID.
TEST_F(CngTest, CngAutoSid) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
EXPECT_EQ(0, WebRtcCng_CreateDec(&cng_dec_inst_));
EXPECT_EQ(0, WebRtcCng_InitEnc(cng_enc_inst_, 16000, kSidNormalIntervalUpdate,
kCNGNumParamsNormal));
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// Normal Encode, 100 msec, where no SID data should be generated.
for (int i = 0; i < 10; i++) {
@@ -321,14 +321,14 @@ TEST_F(CngTest, CngAutoSid) {
// Test automatic SID, with very short interval.
TEST_F(CngTest, CngAutoSidShort) {
uint8_t sid_data[WEBRTC_CNG_MAX_LPC_ORDER + 1];
- int16_t number_bytes;
+ size_t number_bytes;
// Create and initialize encoder and decoder memory.
EXPECT_EQ(0, WebRtcCng_CreateEnc(&cng_enc_inst_));
EXPECT_EQ(0, WebRtcCng_CreateDec(&cng_dec_inst_));
EXPECT_EQ(0, WebRtcCng_InitEnc(cng_enc_inst_, 16000, kSidShortIntervalUpdate,
kCNGNumParamsNormal));
- EXPECT_EQ(0, WebRtcCng_InitDec(cng_dec_inst_));
+ WebRtcCng_InitDec(cng_dec_inst_);
// First call will never generate SID, unless forced to.
EXPECT_EQ(0, WebRtcCng_Encode(cng_enc_inst_, speech_data_, 160, sid_data,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
index 51d2febad6f..3ca9eb60f33 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h
@@ -20,57 +20,59 @@
namespace webrtc {
+// Deleter for use with scoped_ptr.
+struct CngInstDeleter {
+ void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
+};
+
class Vad;
class AudioEncoderCng final : public AudioEncoder {
public:
struct Config {
- Config();
bool IsOk() const;
- int num_channels;
- int payload_type;
+ int num_channels = 1;
+ int payload_type = 13;
// Caller keeps ownership of the AudioEncoder object.
- AudioEncoder* speech_encoder;
- Vad::Aggressiveness vad_mode;
- int sid_frame_interval_ms;
- int num_cng_coefficients;
+ AudioEncoder* speech_encoder = nullptr;
+ Vad::Aggressiveness vad_mode = Vad::kVadNormal;
+ int sid_frame_interval_ms = 100;
+ int num_cng_coefficients = 8;
// The Vad pointer is mainly for testing. If a NULL pointer is passed, the
// AudioEncoderCng creates (and destroys) a Vad object internally. If an
// object is passed, the AudioEncoderCng assumes ownership of the Vad
// object.
- Vad* vad;
+ Vad* vad = nullptr;
};
explicit AudioEncoderCng(const Config& config);
-
~AudioEncoderCng() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
- void SetTargetBitrate(int bits_per_second) override;
- void SetProjectedPacketLossRate(double fraction) override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
+ void Reset() override;
+ bool SetFec(bool enable) override;
+ bool SetDtx(bool enable) override;
+ bool SetApplication(Application application) override;
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ void SetProjectedPacketLossRate(double fraction) override;
+ void SetTargetBitrate(int target_bps) override;
private:
- // Deleter for use with scoped_ptr. E.g., use as
- // rtc::scoped_ptr<CNG_enc_inst, CngInstDeleter> cng_inst_;
- struct CngInstDeleter {
- inline void operator()(CNG_enc_inst* ptr) const { WebRtcCng_FreeEnc(ptr); }
- };
-
- EncodedInfo EncodePassive(int frames_to_encode,
+ EncodedInfo EncodePassive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
- EncodedInfo EncodeActive(int frames_to_encode,
+ EncodedInfo EncodeActive(size_t frames_to_encode,
size_t max_encoded_bytes,
uint8_t* encoded);
size_t SamplesPer10msFrame() const;
@@ -78,12 +80,16 @@ class AudioEncoderCng final : public AudioEncoder {
AudioEncoder* speech_encoder_;
const int cng_payload_type_;
const int num_cng_coefficients_;
+ const int sid_frame_interval_ms_;
std::vector<int16_t> speech_buffer_;
std::vector<uint32_t> rtp_timestamps_;
bool last_frame_active_;
rtc::scoped_ptr<Vad> vad_;
rtc::scoped_ptr<CNG_enc_inst, CngInstDeleter> cng_inst_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCng);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_CNG_INCLUDE_AUDIO_ENCODER_CNG_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
index 1ec5d67d1cb..fe87fc90cc0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h
@@ -70,7 +70,7 @@ int16_t WebRtcCng_CreateDec(CNG_dec_inst** cng_inst);
int WebRtcCng_InitEnc(CNG_enc_inst* cng_inst, int fs, int16_t interval,
int16_t quality);
-int16_t WebRtcCng_InitDec(CNG_dec_inst* cng_inst);
+void WebRtcCng_InitDec(CNG_dec_inst* cng_inst);
/****************************************************************************
* WebRtcCng_FreeEnc/Dec(...)
@@ -104,8 +104,8 @@ int16_t WebRtcCng_FreeDec(CNG_dec_inst* cng_inst);
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
- int16_t nrOfSamples, uint8_t* SIDdata,
- int16_t* bytesOut, int16_t forceSID);
+ size_t nrOfSamples, uint8_t* SIDdata,
+ size_t* bytesOut, int16_t forceSID);
/****************************************************************************
* WebRtcCng_UpdateSid(...)
@@ -138,7 +138,7 @@ int16_t WebRtcCng_UpdateSid(CNG_dec_inst* cng_inst, uint8_t* SID,
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
- int16_t nrOfSamples, int16_t new_period);
+ size_t nrOfSamples, int16_t new_period);
/*****************************************************************************
* WebRtcCng_GetErrorCodeEnc/Dec(...)
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
index 1f6974a4562..8dddc5c717d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/cng/webrtc_cng.c
@@ -35,7 +35,7 @@ typedef struct WebRtcCngDecoder_ {
} WebRtcCngDecoder;
typedef struct WebRtcCngEncoder_ {
- int16_t enc_nrOfCoefs;
+ size_t enc_nrOfCoefs;
int enc_sampfreq;
int16_t enc_interval;
int16_t enc_msSinceSID;
@@ -169,7 +169,7 @@ int WebRtcCng_InitEnc(CNG_enc_inst* cng_inst, int fs, int16_t interval,
return 0;
}
-int16_t WebRtcCng_InitDec(CNG_dec_inst* cng_inst) {
+void WebRtcCng_InitDec(CNG_dec_inst* cng_inst) {
int i;
WebRtcCngDecoder* inst = (WebRtcCngDecoder*) cng_inst;
@@ -188,8 +188,6 @@ int16_t WebRtcCng_InitDec(CNG_dec_inst* cng_inst) {
inst->dec_used_reflCoefs[0] = 0;
inst->dec_used_energy = 0;
inst->initflag = 1;
-
- return 0;
}
/****************************************************************************
@@ -228,8 +226,8 @@ int16_t WebRtcCng_FreeDec(CNG_dec_inst* cng_inst) {
* -1 - Error
*/
int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
- int16_t nrOfSamples, uint8_t* SIDdata,
- int16_t* bytesOut, int16_t forceSID) {
+ size_t nrOfSamples, uint8_t* SIDdata,
+ size_t* bytesOut, int16_t forceSID) {
WebRtcCngEncoder* inst = (WebRtcCngEncoder*) cng_inst;
int16_t arCoefs[WEBRTC_CNG_MAX_LPC_ORDER + 1];
@@ -240,10 +238,11 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
int16_t ReflBetaComp = 13107; /* 0.4 in q15. */
int32_t outEnergy;
int outShifts;
- int i, stab;
+ size_t i;
+ int stab;
int acorrScale;
- int index;
- int16_t ind, factor;
+ size_t index;
+ size_t ind, factor;
int32_t* bptr;
int32_t blo, bhi;
int16_t negate;
@@ -281,7 +280,7 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
outShifts--;
}
}
- outEnergy = WebRtcSpl_DivW32W16(outEnergy, factor);
+ outEnergy = WebRtcSpl_DivW32W16(outEnergy, (int16_t)factor);
if (outEnergy > 1) {
/* Create Hanning Window. */
@@ -390,7 +389,7 @@ int WebRtcCng_Encode(CNG_enc_inst* cng_inst, int16_t* speech,
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
- return inst->enc_nrOfCoefs + 1;
+ return (int)(inst->enc_nrOfCoefs + 1);
} else {
inst->enc_msSinceSID +=
(int16_t)((1000 * nrOfSamples) / inst->enc_sampfreq);
@@ -475,10 +474,10 @@ int16_t WebRtcCng_UpdateSid(CNG_dec_inst* cng_inst, uint8_t* SID,
* -1 - Error
*/
int16_t WebRtcCng_Generate(CNG_dec_inst* cng_inst, int16_t* outData,
- int16_t nrOfSamples, int16_t new_period) {
+ size_t nrOfSamples, int16_t new_period) {
WebRtcCngDecoder* inst = (WebRtcCngDecoder*) cng_inst;
- int i;
+ size_t i;
int16_t excitation[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t low[WEBRTC_CNG_MAX_OUTSIZE_ORDER];
int16_t lpPoly[WEBRTC_CNG_MAX_LPC_ORDER + 1];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
new file mode 100644
index 00000000000..12306d9167e
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
+
+#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
+
+namespace webrtc {
+
+void AudioDecoderPcmU::Reset() {}
+
+size_t AudioDecoderPcmU::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 8000);
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret = WebRtcG711_DecodeU(encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // One encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / Channels());
+}
+
+void AudioDecoderPcmA::Reset() {}
+
+size_t AudioDecoderPcmA::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 8000);
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret = WebRtcG711_DecodeA(encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // One encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / Channels());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
index 905a7152dd4..dde3cc67998 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.cc
@@ -19,14 +19,25 @@
namespace webrtc {
namespace {
+
int16_t NumSamplesPerFrame(int num_channels,
int frame_size_ms,
int sample_rate_hz) {
int samples_per_frame = num_channels * frame_size_ms * sample_rate_hz / 1000;
- CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
+ RTC_CHECK_LE(samples_per_frame, std::numeric_limits<int16_t>::max())
<< "Frame size too large.";
return static_cast<int16_t>(samples_per_frame);
}
+
+template <typename T>
+typename T::Config CreateConfig(const CodecInst& codec_inst) {
+ typename T::Config config;
+ config.frame_size_ms = codec_inst.pacsize / 8;
+ config.num_channels = codec_inst.channels;
+ config.payload_type = codec_inst.pltype;
+ return config;
+}
+
} // namespace
bool AudioEncoderPcm::Config::IsOk() const {
@@ -37,18 +48,22 @@ AudioEncoderPcm::AudioEncoderPcm(const Config& config, int sample_rate_hz)
: sample_rate_hz_(sample_rate_hz),
num_channels_(config.num_channels),
payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
full_frame_samples_(NumSamplesPerFrame(config.num_channels,
config.frame_size_ms,
sample_rate_hz_)),
first_timestamp_in_buffer_(0) {
- CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
- CHECK_EQ(config.frame_size_ms % 10, 0)
+ RTC_CHECK_GT(sample_rate_hz, 0) << "Sample rate must be larger than 0 Hz";
+ RTC_CHECK_EQ(config.frame_size_ms % 10, 0)
<< "Frame size must be an integer multiple of 10 ms.";
speech_buffer_.reserve(full_frame_samples_);
}
-AudioEncoderPcm::~AudioEncoderPcm() {
+AudioEncoderPcm::~AudioEncoderPcm() = default;
+
+size_t AudioEncoderPcm::MaxEncodedBytes() const {
+ return full_frame_samples_ * BytesPerSample();
}
int AudioEncoderPcm::SampleRateHz() const {
@@ -59,15 +74,11 @@ int AudioEncoderPcm::NumChannels() const {
return num_channels_;
}
-size_t AudioEncoderPcm::MaxEncodedBytes() const {
- return full_frame_samples_ * BytesPerSample();
-}
-
-int AudioEncoderPcm::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderPcm::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderPcm::Max10MsFramesInAPacket() const {
+size_t AudioEncoderPcm::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -90,57 +101,45 @@ AudioEncoder::EncodedInfo AudioEncoderPcm::EncodeInternal(
if (speech_buffer_.size() < full_frame_samples_) {
return EncodedInfo();
}
- CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
- CHECK_GE(max_encoded_bytes, full_frame_samples_);
+ RTC_CHECK_EQ(speech_buffer_.size(), full_frame_samples_);
+ RTC_CHECK_GE(max_encoded_bytes, full_frame_samples_);
EncodedInfo info;
info.encoded_timestamp = first_timestamp_in_buffer_;
info.payload_type = payload_type_;
- int16_t ret = EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
- CHECK_GE(ret, 0);
- info.encoded_bytes = static_cast<size_t>(ret);
+ info.encoded_bytes =
+ EncodeCall(&speech_buffer_[0], full_frame_samples_, encoded);
speech_buffer_.clear();
return info;
}
-int16_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcG711_EncodeA(audio, static_cast<int16_t>(input_len), encoded);
+void AudioEncoderPcm::Reset() {
+ speech_buffer_.clear();
}
-int AudioEncoderPcmA::BytesPerSample() const {
- return 1;
-}
+AudioEncoderPcmA::AudioEncoderPcmA(const CodecInst& codec_inst)
+ : AudioEncoderPcmA(CreateConfig<AudioEncoderPcmA>(codec_inst)) {}
-int16_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcG711_EncodeU(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcmA::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeA(audio, input_len, encoded);
}
-int AudioEncoderPcmU::BytesPerSample() const {
+int AudioEncoderPcmA::BytesPerSample() const {
return 1;
}
-namespace {
-template <typename T>
-typename T::Config CreateConfig(const CodecInst& codec_inst) {
- typename T::Config config;
- config.frame_size_ms = codec_inst.pacsize / 8;
- config.num_channels = codec_inst.channels;
- config.payload_type = codec_inst.pltype;
- return config;
-}
-} // namespace
+AudioEncoderPcmU::AudioEncoderPcmU(const CodecInst& codec_inst)
+ : AudioEncoderPcmU(CreateConfig<AudioEncoderPcmU>(codec_inst)) {}
-AudioEncoderMutablePcmU::AudioEncoderMutablePcmU(const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderPcmU>(
- CreateConfig<AudioEncoderPcmU>(codec_inst)) {
+size_t AudioEncoderPcmU::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcG711_EncodeU(audio, input_len, encoded);
}
-AudioEncoderMutablePcmA::AudioEncoderMutablePcmA(const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderPcmA>(
- CreateConfig<AudioEncoderPcmA>(codec_inst)) {
+int AudioEncoderPcmU::BytesPerSample() const {
+ return 1;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711.gypi
index fc86b18e5f4..d35d7874e7f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711.gypi
@@ -26,10 +26,12 @@
},
'sources': [
'include/g711_interface.h',
+ 'include/audio_decoder_pcm.h',
'include/audio_encoder_pcm.h',
'g711_interface.c',
'g711.c',
'g711.h',
+ 'audio_decoder_pcm.cc',
'audio_encoder_pcm.cc',
],
},
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711_interface.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
index b5795209f70..5b96a9c5553 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/g711_interface.c
@@ -12,40 +12,40 @@
#include "g711_interface.h"
#include "webrtc/typedefs.h"
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded) {
- int n;
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_alaw(speechIn[n]);
return len;
}
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded) {
- int n;
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded) {
+ size_t n;
for (n = 0; n < len; n++)
encoded[n] = linear_to_ulaw(speechIn[n]);
return len;
}
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType) {
- int n;
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
for (n = 0; n < len; n++)
decoded[n] = alaw_to_linear(encoded[n]);
*speechType = 1;
return len;
}
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType) {
- int n;
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType) {
+ size_t n;
for (n = 0; n < len; n++)
decoded[n] = ulaw_to_linear(encoded[n]);
*speechType = 1;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h
new file mode 100644
index 00000000000..7bc37d3b7a7
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+
+namespace webrtc {
+
+class AudioDecoderPcmU final : public AudioDecoder {
+ public:
+ explicit AudioDecoderPcmU(size_t num_channels) : num_channels_(num_channels) {
+ RTC_DCHECK_GE(num_channels, 1u);
+ }
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const size_t num_channels_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmU);
+};
+
+class AudioDecoderPcmA final : public AudioDecoder {
+ public:
+ explicit AudioDecoderPcmA(size_t num_channels) : num_channels_(num_channels) {
+ RTC_DCHECK_GE(num_channels, 1u);
+ }
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const size_t num_channels_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmA);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_DECODER_PCM_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
index c8690379c6b..e532f9b1bc4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h
@@ -15,7 +15,6 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
namespace webrtc {
@@ -36,23 +35,24 @@ class AudioEncoderPcm : public AudioEncoder {
~AudioEncoderPcm() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
+ void Reset() override;
protected:
AudioEncoderPcm(const Config& config, int sample_rate_hz);
- virtual int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) = 0;
+ virtual size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) = 0;
virtual int BytesPerSample() const = 0;
@@ -60,12 +60,14 @@ class AudioEncoderPcm : public AudioEncoder {
const int sample_rate_hz_;
const int num_channels_;
const int payload_type_;
- const int num_10ms_frames_per_packet_;
+ const size_t num_10ms_frames_per_packet_;
const size_t full_frame_samples_;
std::vector<int16_t> speech_buffer_;
uint32_t first_timestamp_in_buffer_;
};
+struct CodecInst;
+
class AudioEncoderPcmA final : public AudioEncoderPcm {
public:
struct Config : public AudioEncoderPcm::Config {
@@ -74,16 +76,18 @@ class AudioEncoderPcmA final : public AudioEncoderPcm {
explicit AudioEncoderPcmA(const Config& config)
: AudioEncoderPcm(config, kSampleRateHz) {}
+ explicit AudioEncoderPcmA(const CodecInst& codec_inst);
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmA);
};
class AudioEncoderPcmU final : public AudioEncoderPcm {
@@ -94,31 +98,20 @@ class AudioEncoderPcmU final : public AudioEncoderPcm {
explicit AudioEncoderPcmU(const Config& config)
: AudioEncoderPcm(config, kSampleRateHz) {}
+ explicit AudioEncoderPcmU(const CodecInst& codec_inst);
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
private:
static const int kSampleRateHz = 8000;
-};
-
-struct CodecInst;
-
-class AudioEncoderMutablePcmU
- : public AudioEncoderMutableImpl<AudioEncoderPcmU> {
- public:
- explicit AudioEncoderMutablePcmU(const CodecInst& codec_inst);
-};
-
-class AudioEncoderMutablePcmA
- : public AudioEncoderMutableImpl<AudioEncoderPcmA> {
- public:
- explicit AudioEncoderMutablePcmA(const CodecInst& codec_inst);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcmU);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G711_INCLUDE_AUDIO_ENCODER_PCM_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
index 5c71e9879c9..9d67222cc3a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h
@@ -38,9 +38,9 @@ extern "C" {
* Always equal to len input parameter.
*/
-int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG711_EncodeA(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcG711_EncodeU(...)
@@ -59,9 +59,9 @@ int16_t WebRtcG711_EncodeA(const int16_t* speechIn,
* Always equal to len input parameter.
*/
-int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG711_EncodeU(const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcG711_DecodeA(...)
@@ -82,10 +82,10 @@ int16_t WebRtcG711_EncodeU(const int16_t* speechIn,
* -1 - Error
*/
-int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType);
+size_t WebRtcG711_DecodeA(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
/****************************************************************************
* WebRtcG711_DecodeU(...)
@@ -106,10 +106,10 @@ int16_t WebRtcG711_DecodeA(const uint8_t* encoded,
* -1 - Error
*/
-int16_t WebRtcG711_DecodeU(const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speechType);
+size_t WebRtcG711_DecodeU(const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType);
/**********************************************************************
* WebRtcG711_Version(...)
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
index 49c671c5a0a..94248f7a669 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g711/test/testG711.cc
@@ -24,8 +24,8 @@
#define CLOCKS_PER_SEC_G711 1000
/* function for reading audio data from PCM file */
-bool readframe(int16_t* data, FILE* inp, int length) {
- short rlen = (short) fread(data, sizeof(int16_t), length, inp);
+bool readframe(int16_t* data, FILE* inp, size_t length) {
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -40,16 +40,14 @@ int main(int argc, char* argv[]) {
int framecnt;
bool endfile;
- int16_t framelength = 80;
-
- int err;
+ size_t framelength = 80;
/* Runtime statistics */
double starttime;
double runtime;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int16_t shortdata[480];
int16_t decoded[480];
uint8_t streamdata[1000];
@@ -80,11 +78,12 @@ int main(int argc, char* argv[]) {
printf("-----------------------------------\n");
printf("G.711 version: %s\n\n", versionNumber);
/* Get frame length */
- framelength = atoi(argv[1]);
- if (framelength < 0) {
- printf(" G.711: Invalid framelength %d.\n", framelength);
- exit(1);
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
+ exit(1);
}
+ framelength = static_cast<size_t>(framelength_int);
/* Get compression law */
strcpy(law, argv[2]);
@@ -130,36 +129,29 @@ int main(int argc, char* argv[]) {
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
- static_cast<size_t>(stream_len)) {
+ stream_len) {
return -1;
}
}
- err = WebRtcG711_DecodeA(streamdata, stream_len, decoded,
- speechType);
+ WebRtcG711_DecodeA(streamdata, stream_len, decoded, speechType);
} else if (!strcmp(law, "u")) {
/* u-law encoding */
stream_len = WebRtcG711_EncodeU(shortdata, framelength, streamdata);
if (argc == 6) {
/* Write bits to file */
if (fwrite(streamdata, sizeof(unsigned char), stream_len, bitp) !=
- static_cast<size_t>(stream_len)) {
+ stream_len) {
return -1;
}
}
- err = WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
+ WebRtcG711_DecodeU(streamdata, stream_len, decoded, speechType);
} else {
printf("Wrong law mode\n");
exit(1);
}
- if (stream_len < 0 || err < 0) {
- /* exit if returned with error */
- printf("Error in encoder/decoder\n");
- } else {
- /* Write coded speech to file */
- if (fwrite(decoded, sizeof(short), framelength, outp) !=
- static_cast<size_t>(framelength)) {
- return -1;
- }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) != framelength) {
+ return -1;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
new file mode 100644
index 00000000000..55ebe7a315b
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.cc
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
+
+#include <string.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+
+namespace webrtc {
+
+AudioDecoderG722::AudioDecoderG722() {
+ WebRtcG722_CreateDecoder(&dec_state_);
+ WebRtcG722_DecoderInit(dec_state_);
+}
+
+AudioDecoderG722::~AudioDecoderG722() {
+ WebRtcG722_FreeDecoder(dec_state_);
+}
+
+bool AudioDecoderG722::HasDecodePlc() const {
+ return false;
+}
+
+int AudioDecoderG722::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 16000);
+ int16_t temp_type = 1; // Default is speech.
+ size_t ret =
+ WebRtcG722_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return static_cast<int>(ret);
+}
+
+void AudioDecoderG722::Reset() {
+ WebRtcG722_DecoderInit(dec_state_);
+}
+
+int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // 1/2 encoded byte per sample per channel.
+ return static_cast<int>(2 * encoded_len / Channels());
+}
+
+size_t AudioDecoderG722::Channels() const {
+ return 1;
+}
+
+AudioDecoderG722Stereo::AudioDecoderG722Stereo() {
+ WebRtcG722_CreateDecoder(&dec_state_left_);
+ WebRtcG722_CreateDecoder(&dec_state_right_);
+ WebRtcG722_DecoderInit(dec_state_left_);
+ WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
+ WebRtcG722_FreeDecoder(dec_state_left_);
+ WebRtcG722_FreeDecoder(dec_state_right_);
+}
+
+int AudioDecoderG722Stereo::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 16000);
+ int16_t temp_type = 1; // Default is speech.
+ // De-interleave the bit-stream into two separate payloads.
+ uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
+ SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
+ // Decode left and right.
+ size_t decoded_len = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
+ encoded_len / 2, decoded, &temp_type);
+ size_t ret = WebRtcG722_Decode(
+ dec_state_right_, &encoded_deinterleaved[encoded_len / 2],
+ encoded_len / 2, &decoded[decoded_len], &temp_type);
+ if (ret == decoded_len) {
+ ret += decoded_len; // Return total number of samples.
+ // Interleave output.
+ for (size_t k = ret / 2; k < ret; k++) {
+ int16_t temp = decoded[k];
+ memmove(&decoded[2 * k - ret + 2], &decoded[2 * k - ret + 1],
+ (ret - k - 1) * sizeof(int16_t));
+ decoded[2 * k - ret + 1] = temp;
+ }
+ }
+ *speech_type = ConvertSpeechType(temp_type);
+ delete[] encoded_deinterleaved;
+ return static_cast<int>(ret);
+}
+
+size_t AudioDecoderG722Stereo::Channels() const {
+ return 2;
+}
+
+void AudioDecoderG722Stereo::Reset() {
+ WebRtcG722_DecoderInit(dec_state_left_);
+ WebRtcG722_DecoderInit(dec_state_right_);
+}
+
+// Split the stereo packet and place left and right channel after each other
+// in the output array.
+void AudioDecoderG722Stereo::SplitStereoPacket(const uint8_t* encoded,
+ size_t encoded_len,
+ uint8_t* encoded_deinterleaved) {
+ // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ...,
+ // where "lx" is 4 bits representing left sample number x, and "rx" right
+ // sample. Two samples fit in one byte, represented with |...|.
+ for (size_t i = 0; i + 1 < encoded_len; i += 2) {
+ uint8_t right_byte = ((encoded[i] & 0x0F) << 4) + (encoded[i + 1] & 0x0F);
+ encoded_deinterleaved[i] = (encoded[i] & 0xF0) + (encoded[i + 1] >> 4);
+ encoded_deinterleaved[i + 1] = right_byte;
+ }
+
+ // Move one byte representing right channel each loop, and place it at the
+ // end of the bytestream vector. After looping the data is reordered to:
+ // |l1 l2| |l3 l4| ... |l(N-1) lN| |r1 r2| |r3 r4| ... |r(N-1) r(N)|,
+ // where N is the total number of samples.
+ for (size_t i = 0; i < encoded_len / 2; i++) {
+ uint8_t right_byte = encoded_deinterleaved[i + 1];
+ memmove(&encoded_deinterleaved[i + 1], &encoded_deinterleaved[i + 2],
+ encoded_len - i - 2);
+ encoded_deinterleaved[encoded_len - 1] = right_byte;
+ }
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
index a0d1720e460..43b097fa0ea 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
@@ -19,7 +19,15 @@ namespace webrtc {
namespace {
-const int kSampleRateHz = 16000;
+const size_t kSampleRateHz = 16000;
+
+AudioEncoderG722::Config CreateConfig(const CodecInst& codec_inst) {
+ AudioEncoderG722::Config config;
+ config.num_channels = codec_inst.channels;
+ config.frame_size_ms = codec_inst.pacsize / 16;
+ config.payload_type = codec_inst.pltype;
+ return config;
+}
} // namespace
@@ -28,57 +36,53 @@ bool AudioEncoderG722::Config::IsOk() const {
(num_channels >= 1);
}
-AudioEncoderG722::EncoderState::EncoderState() {
- CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
- CHECK_EQ(0, WebRtcG722_EncoderInit(encoder));
-}
-
-AudioEncoderG722::EncoderState::~EncoderState() {
- CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
-}
-
AudioEncoderG722::AudioEncoderG722(const Config& config)
: num_channels_(config.num_channels),
payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
num_10ms_frames_buffered_(0),
first_timestamp_in_buffer_(0),
encoders_(new EncoderState[num_channels_]),
interleave_buffer_(2 * num_channels_) {
- CHECK(config.IsOk());
- const int samples_per_channel =
+ RTC_CHECK(config.IsOk());
+ const size_t samples_per_channel =
kSampleRateHz / 100 * num_10ms_frames_per_packet_;
for (int i = 0; i < num_channels_; ++i) {
encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]);
encoders_[i].encoded_buffer.SetSize(samples_per_channel / 2);
}
+ Reset();
}
-AudioEncoderG722::~AudioEncoderG722() {}
+AudioEncoderG722::AudioEncoderG722(const CodecInst& codec_inst)
+ : AudioEncoderG722(CreateConfig(codec_inst)) {}
-int AudioEncoderG722::SampleRateHz() const {
- return kSampleRateHz;
+AudioEncoderG722::~AudioEncoderG722() = default;
+
+size_t AudioEncoderG722::MaxEncodedBytes() const {
+ return SamplesPerChannel() / 2 * num_channels_;
}
-int AudioEncoderG722::RtpTimestampRateHz() const {
- // The RTP timestamp rate for G.722 is 8000 Hz, even though it is a 16 kHz
- // codec.
- return kSampleRateHz / 2;
+int AudioEncoderG722::SampleRateHz() const {
+ return kSampleRateHz;
}
int AudioEncoderG722::NumChannels() const {
return num_channels_;
}
-size_t AudioEncoderG722::MaxEncodedBytes() const {
- return static_cast<size_t>(SamplesPerChannel() / 2 * num_channels_);
+int AudioEncoderG722::RtpTimestampRateHz() const {
+ // The RTP timestamp rate for G.722 is 8000 Hz, even though it is a 16 kHz
+ // codec.
+ return kSampleRateHz / 2;
}
-int AudioEncoderG722::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderG722::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderG722::Max10MsFramesInAPacket() const {
+size_t AudioEncoderG722::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -92,14 +96,14 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
+ RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
if (num_10ms_frames_buffered_ == 0)
first_timestamp_in_buffer_ = rtp_timestamp;
// Deinterleave samples and save them in each channel's buffer.
- const int start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
- for (int i = 0; i < kSampleRateHz / 100; ++i)
+ const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_;
+ for (size_t i = 0; i < kSampleRateHz / 100; ++i)
for (int j = 0; j < num_channels_; ++j)
encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j];
@@ -109,21 +113,20 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
}
// Encode each channel separately.
- CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+ RTC_CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
- const int samples_per_channel = SamplesPerChannel();
+ const size_t samples_per_channel = SamplesPerChannel();
for (int i = 0; i < num_channels_; ++i) {
- const int encoded = WebRtcG722_Encode(
+ const size_t encoded = WebRtcG722_Encode(
encoders_[i].encoder, encoders_[i].speech_buffer.get(),
- samples_per_channel, encoders_[i].encoded_buffer.data<uint8_t>());
- CHECK_GE(encoded, 0);
- CHECK_EQ(encoded, samples_per_channel / 2);
+ samples_per_channel, encoders_[i].encoded_buffer.data());
+ RTC_CHECK_EQ(encoded, samples_per_channel / 2);
}
// Interleave the encoded bytes of the different channels. Each separate
// channel and the interleaved stream encodes two samples per byte, most
// significant half first.
- for (int i = 0; i < samples_per_channel / 2; ++i) {
+ for (size_t i = 0; i < samples_per_channel / 2; ++i) {
for (int j = 0; j < num_channels_; ++j) {
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
interleave_buffer_.data()[j] = two_samples >> 4;
@@ -140,22 +143,22 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
return info;
}
-int AudioEncoderG722::SamplesPerChannel() const {
- return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
+void AudioEncoderG722::Reset() {
+ num_10ms_frames_buffered_ = 0;
+ for (int i = 0; i < num_channels_; ++i)
+ RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
}
-namespace {
-AudioEncoderG722::Config CreateConfig(const CodecInst& codec_inst) {
- AudioEncoderG722::Config config;
- config.num_channels = codec_inst.channels;
- config.frame_size_ms = codec_inst.pacsize / 16;
- config.payload_type = codec_inst.pltype;
- return config;
+AudioEncoderG722::EncoderState::EncoderState() {
+ RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
}
-} // namespace
-AudioEncoderMutableG722::AudioEncoderMutableG722(const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderG722>(CreateConfig(codec_inst)) {
+AudioEncoderG722::EncoderState::~EncoderState() {
+ RTC_CHECK_EQ(0, WebRtcG722_FreeEncoder(encoder));
+}
+
+size_t AudioEncoderG722::SamplesPerChannel() const {
+ return kSampleRateHz / 100 * num_10ms_frames_per_packet_;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722.gypi
index 14b34b74401..aad11e3685c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722.gypi
@@ -24,7 +24,9 @@
],
},
'sources': [
+ 'audio_decoder_g722.cc',
'audio_encoder_g722.cc',
+ 'include/audio_decoder_g722.h',
'include/audio_encoder_g722.h',
'include/g722_interface.h',
'g722_interface.c',
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_decode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
index ee0eb89618d..952a7d037f6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_decode.c
@@ -157,11 +157,7 @@ static void block4(G722DecoderState *s, int band, int d)
G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
int rate,
int options) {
- if (s == NULL)
- {
- if ((s = (G722DecoderState *) malloc(sizeof(*s))) == NULL)
- return NULL;
- }
+ s = s ? s : malloc(sizeof(*s));
memset(s, 0, sizeof(*s));
if (rate == 48000)
s->bits_per_sample = 6;
@@ -188,8 +184,8 @@ int WebRtc_g722_decode_release(G722DecoderState *s)
}
/*- End of function --------------------------------------------------------*/
-int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
- const uint8_t g722_data[], int len)
+size_t WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
+ const uint8_t g722_data[], size_t len)
{
static const int wl[8] = {-60, -30, 58, 172, 334, 538, 1198, 3042 };
static const int rl42[16] = {0, 7, 6, 5, 4, 3, 2, 1,
@@ -258,9 +254,9 @@ int WebRtc_g722_decode(G722DecoderState *s, int16_t amp[],
int wd2;
int wd3;
int code;
- int outlen;
+ size_t outlen;
int i;
- int j;
+ size_t j;
outlen = 0;
rhigh = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
index 5cd1b2d30fa..7db4895fa5d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_enc_dec.h
@@ -139,19 +139,19 @@ G722EncoderState* WebRtc_g722_encode_init(G722EncoderState* s,
int rate,
int options);
int WebRtc_g722_encode_release(G722EncoderState *s);
-int WebRtc_g722_encode(G722EncoderState *s,
- uint8_t g722_data[],
- const int16_t amp[],
- int len);
+size_t WebRtc_g722_encode(G722EncoderState *s,
+ uint8_t g722_data[],
+ const int16_t amp[],
+ size_t len);
G722DecoderState* WebRtc_g722_decode_init(G722DecoderState* s,
int rate,
int options);
int WebRtc_g722_decode_release(G722DecoderState *s);
-int WebRtc_g722_decode(G722DecoderState *s,
- int16_t amp[],
- const uint8_t g722_data[],
- int len);
+size_t WebRtc_g722_decode(G722DecoderState *s,
+ int16_t amp[],
+ const uint8_t g722_data[],
+ size_t len);
#ifdef __cplusplus
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_encode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
index bed2d218b19..01ec127ca18 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_encode.c
@@ -202,8 +202,8 @@ int16_t limitValues (int16_t rl)
}
#endif
-int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
- const int16_t amp[], int len)
+size_t WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
+ const int16_t amp[], size_t len)
{
static const int q6[32] =
{
@@ -275,11 +275,11 @@ int WebRtc_g722_encode(G722EncoderState *s, uint8_t g722_data[],
int eh;
int mih;
int i;
- int j;
+ size_t j;
/* Low and high band PCM from the QMF */
int xlow;
int xhigh;
- int g722_bytes;
+ size_t g722_bytes;
/* Even and odd tap accumulators */
int sumeven;
int sumodd;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_interface.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
index 1edf58dc1da..4244d5c8099 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/g722_interface.c
@@ -45,10 +45,10 @@ int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst)
return WebRtc_g722_encode_release((G722EncoderState*) G722enc_inst);
}
-int16_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
- const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded)
+size_t WebRtcG722_Encode(G722EncInst *G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded)
{
unsigned char *codechar = (unsigned char*) encoded;
// Encode the input speech vector
@@ -66,17 +66,10 @@ int16_t WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst)
}
}
-int16_t WebRtcG722_DecoderInit(G722DecInst *G722dec_inst)
-{
- // Create and/or reset the G.722 decoder
- // Bitrate 64 kbps and wideband mode (2)
- G722dec_inst = (G722DecInst *) WebRtc_g722_decode_init(
- (G722DecoderState*) G722dec_inst, 64000, 2);
- if (G722dec_inst == NULL) {
- return -1;
- } else {
- return 0;
- }
+void WebRtcG722_DecoderInit(G722DecInst* inst) {
+ // Create and/or reset the G.722 decoder
+ // Bitrate 64 kbps and wideband mode (2)
+ WebRtc_g722_decode_init((G722DecoderState*)inst, 64000, 2);
}
int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
@@ -85,11 +78,11 @@ int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst)
return WebRtc_g722_decode_release((G722DecoderState*) G722dec_inst);
}
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
- const uint8_t *encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType)
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+ const uint8_t *encoded,
+ size_t len,
+ int16_t *decoded,
+ int16_t *speechType)
{
// Decode the G.722 encoder stream
*speechType=G722_WEBRTC_SPEECH;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h
new file mode 100644
index 00000000000..b9fa68fc488
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
+
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+
+typedef struct WebRtcG722DecInst G722DecInst;
+
+namespace webrtc {
+
+class AudioDecoderG722 final : public AudioDecoder {
+ public:
+ AudioDecoderG722();
+ ~AudioDecoderG722() override;
+ bool HasDecodePlc() const override;
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ G722DecInst* dec_state_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722);
+};
+
+class AudioDecoderG722Stereo final : public AudioDecoder {
+ public:
+ AudioDecoderG722Stereo();
+ ~AudioDecoderG722Stereo() override;
+ void Reset() override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+ size_t Channels() const override;
+
+ private:
+ // Splits the stereo-interleaved payload in |encoded| into separate payloads
+ // for left and right channels. The separated payloads are written to
+ // |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
+ // The left channel starts at offset 0, while the right channel starts at
+ // offset encoded_len / 2 into |encoded_deinterleaved|.
+ void SplitStereoPacket(const uint8_t* encoded,
+ size_t encoded_len,
+ uint8_t* encoded_deinterleaved);
+
+ G722DecInst* dec_state_left_;
+ G722DecInst* dec_state_right_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Stereo);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_G722_INCLUDE_AUDIO_DECODER_G722_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
index 9b57fbe6252..12495c5f488 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h
@@ -14,36 +14,38 @@
#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
namespace webrtc {
+struct CodecInst;
+
class AudioEncoderG722 final : public AudioEncoder {
public:
struct Config {
- Config() : payload_type(9), frame_size_ms(20), num_channels(1) {}
bool IsOk() const;
- int payload_type;
- int frame_size_ms;
- int num_channels;
+ int payload_type = 9;
+ int frame_size_ms = 20;
+ int num_channels = 1;
};
explicit AudioEncoderG722(const Config& config);
+ explicit AudioEncoderG722(const CodecInst& codec_inst);
~AudioEncoderG722() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
+ void Reset() override;
private:
// The encoder state for one channel.
@@ -55,23 +57,16 @@ class AudioEncoderG722 final : public AudioEncoder {
~EncoderState();
};
- int SamplesPerChannel() const;
+ size_t SamplesPerChannel() const;
const int num_channels_;
const int payload_type_;
- const int num_10ms_frames_per_packet_;
- int num_10ms_frames_buffered_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
const rtc::scoped_ptr<EncoderState[]> encoders_;
rtc::Buffer interleave_buffer_;
-};
-
-struct CodecInst;
-
-class AudioEncoderMutableG722
- : public AudioEncoderMutableImpl<AudioEncoderG722> {
- public:
- explicit AudioEncoderMutableG722(const CodecInst& codec_inst);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderG722);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
index 46ff3b0f019..e3133d6cf70 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h
@@ -94,10 +94,10 @@ int WebRtcG722_FreeEncoder(G722EncInst *G722enc_inst);
* Return value : Length (in bytes) of coded data
*/
-int16_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
- const int16_t* speechIn,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
+ const int16_t* speechIn,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
@@ -113,22 +113,16 @@ int16_t WebRtcG722_Encode(G722EncInst* G722enc_inst,
*/
int16_t WebRtcG722_CreateDecoder(G722DecInst **G722dec_inst);
-
/****************************************************************************
* WebRtcG722_DecoderInit(...)
*
- * This function initializes a G729 instance
+ * This function initializes a G722 instance
*
* Input:
- * - G729_decinst_t : G729 instance, i.e. the user that should receive
- * be initialized
- *
- * Return value : 0 - Ok
- * -1 - Error
+ * - inst : G722 instance
*/
-int16_t WebRtcG722_DecoderInit(G722DecInst *G722dec_inst);
-
+void WebRtcG722_DecoderInit(G722DecInst* inst);
/****************************************************************************
* WebRtcG722_FreeDecoder(...)
@@ -162,15 +156,14 @@ int WebRtcG722_FreeDecoder(G722DecInst *G722dec_inst);
* - speechType : 1 normal, 2 CNG (Since G722 does not have its own
* DTX/CNG scheme it should always return 1)
*
- * Return value : >0 - Samples in decoded vector
- * -1 - Error
+ * Return value : Samples in decoded vector
*/
-int16_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
- const uint8_t* encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType);
+size_t WebRtcG722_Decode(G722DecInst *G722dec_inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t *decoded,
+ int16_t *speechType);
/****************************************************************************
* WebRtcG722_Version(...)
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
index 6a6f03c31fa..b473c138c6e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/g722/test/testG722.cc
@@ -29,9 +29,9 @@ typedef struct WebRtcG722EncInst G722EncInst;
typedef struct WebRtcG722DecInst G722DecInst;
/* function for reading audio data from PCM file */
-bool readframe(int16_t *data, FILE *inp, int length)
+bool readframe(int16_t *data, FILE *inp, size_t length)
{
- short rlen = (short)fread(data, sizeof(int16_t), length, inp);
+ size_t rlen = fread(data, sizeof(int16_t), length, inp);
if (rlen >= length)
return false;
memset(data + rlen, 0, (length - rlen) * sizeof(int16_t));
@@ -45,17 +45,16 @@ int main(int argc, char* argv[])
int framecnt;
bool endfile;
- int16_t framelength = 160;
+ size_t framelength = 160;
G722EncInst *G722enc_inst;
G722DecInst *G722dec_inst;
- int err;
/* Runtime statistics */
double starttime;
double runtime = 0;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int16_t shortdata[960];
int16_t decoded[960];
uint8_t streamdata[80 * 6];
@@ -78,11 +77,12 @@ int main(int argc, char* argv[])
}
/* Get frame length */
- framelength = atoi(argv[1]);
- if (framelength < 0) {
- printf(" G.722: Invalid framelength %d.\n", framelength);
+ int framelength_int = atoi(argv[1]);
+ if (framelength_int < 0) {
+ printf(" G.722: Invalid framelength %d.\n", framelength_int);
exit(1);
}
+ framelength = static_cast<size_t>(framelength_int);
/* Get Input and Output files */
sscanf(argv[2], "%s", inname);
@@ -124,26 +124,21 @@ int main(int argc, char* argv[])
/* G.722 encoding + decoding */
stream_len = WebRtcG722_Encode((G722EncInst *)G722enc_inst, shortdata, framelength, streamdata);
- err = WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
- speechType);
+ WebRtcG722_Decode(G722dec_inst, streamdata, stream_len, decoded,
+ speechType);
/* Stop clock after call to encoder and decoder */
runtime += (double)((clock()/(double)CLOCKS_PER_SEC_G722)-starttime);
- if (stream_len < 0 || err < 0) {
- /* exit if returned with error */
- printf("Error in encoder/decoder\n");
- } else {
- /* Write coded bits to file */
- if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
- static_cast<size_t>(stream_len / 2)) {
- return -1;
- }
- /* Write coded speech to file */
- if (fwrite(decoded, sizeof(short), framelength, outp) !=
- static_cast<size_t>(framelength)) {
- return -1;
- }
+ /* Write coded bits to file */
+ if (fwrite(streamdata, sizeof(short), stream_len / 2, outbitp) !=
+ stream_len / 2) {
+ return -1;
+ }
+ /* Write coded speech to file */
+ if (fwrite(decoded, sizeof(short), framelength, outp) !=
+ framelength) {
+ return -1;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
index 75fc970ddec..263749ad2a0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant.c
@@ -36,7 +36,7 @@ void WebRtcIlbcfix_AbsQuant(
int16_t *weightDenum /* (i) denominator of synthesis filter */
) {
int16_t *syntOut;
- int16_t quantLen[2];
+ size_t quantLen[2];
/* Stack based */
int16_t syntOutBuf[LPC_FILTERORDER+STATE_SHORT_LEN_30MS];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
index d26fb5d6c9e..4b764534467 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.c
@@ -21,9 +21,9 @@
#include "sort_sq.h"
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
- int16_t *weightDenumIN, int16_t *quantLenIN,
+ int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN ) {
- int k1, k2;
+ size_t k1, k2;
int16_t index;
int32_t toQW32;
int32_t toQ32;
@@ -33,7 +33,7 @@ void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
int16_t *syntOut = syntOutIN;
int16_t *in_weighted = in_weightedIN;
int16_t *weightDenum = weightDenumIN;
- int16_t *quantLen = quantLenIN;
+ size_t *quantLen = quantLenIN;
int16_t *idxVec = idxVecIN;
for(k1=0;k1<2;k1++) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
index 50c6ffeb591..c8bf67575ff 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/abs_quant_loop.h
@@ -27,7 +27,7 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_AbsQuantLoop(int16_t *syntOutIN, int16_t *in_weightedIN,
- int16_t *weightDenumIN, int16_t *quantLenIN,
+ int16_t *weightDenumIN, size_t *quantLenIN,
int16_t *idxVecIN);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
new file mode 100644
index 00000000000..998e10df788
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
+
+namespace webrtc {
+
+AudioDecoderIlbc::AudioDecoderIlbc() {
+ WebRtcIlbcfix_DecoderCreate(&dec_state_);
+ WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+AudioDecoderIlbc::~AudioDecoderIlbc() {
+ WebRtcIlbcfix_DecoderFree(dec_state_);
+}
+
+bool AudioDecoderIlbc::HasDecodePlc() const {
+ return true;
+}
+
+int AudioDecoderIlbc::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 8000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret = WebRtcIlbcfix_Decode(dec_state_, encoded, encoded_len, decoded,
+ &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+size_t AudioDecoderIlbc::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
+}
+
+void AudioDecoderIlbc::Reset() {
+ WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
+}
+
+size_t AudioDecoderIlbc::Channels() const {
+ return 1;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
index 8dc9bdf4bdd..e3d729f5745 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.cc
@@ -22,28 +22,42 @@ namespace {
const int kSampleRateHz = 8000;
+AudioEncoderIlbc::Config CreateConfig(const CodecInst& codec_inst) {
+ AudioEncoderIlbc::Config config;
+ config.frame_size_ms = codec_inst.pacsize / 8;
+ config.payload_type = codec_inst.pltype;
+ return config;
+}
+
} // namespace
+// static
+const size_t AudioEncoderIlbc::kMaxSamplesPerPacket;
+
bool AudioEncoderIlbc::Config::IsOk() const {
return (frame_size_ms == 20 || frame_size_ms == 30 || frame_size_ms == 40 ||
frame_size_ms == 60) &&
- (kSampleRateHz / 100 * (frame_size_ms / 10)) <= kMaxSamplesPerPacket;
+ static_cast<size_t>(kSampleRateHz / 100 * (frame_size_ms / 10)) <=
+ kMaxSamplesPerPacket;
}
AudioEncoderIlbc::AudioEncoderIlbc(const Config& config)
- : payload_type_(config.payload_type),
- num_10ms_frames_per_packet_(config.frame_size_ms / 10),
- num_10ms_frames_buffered_(0) {
- CHECK(config.IsOk());
- CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
- const int encoder_frame_size_ms = config.frame_size_ms > 30
- ? config.frame_size_ms / 2
- : config.frame_size_ms;
- CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
+ : config_(config),
+ num_10ms_frames_per_packet_(
+ static_cast<size_t>(config.frame_size_ms / 10)),
+ encoder_(nullptr) {
+ Reset();
}
+AudioEncoderIlbc::AudioEncoderIlbc(const CodecInst& codec_inst)
+ : AudioEncoderIlbc(CreateConfig(codec_inst)) {}
+
AudioEncoderIlbc::~AudioEncoderIlbc() {
- CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+}
+
+size_t AudioEncoderIlbc::MaxEncodedBytes() const {
+ return RequiredOutputSizeBytes();
}
int AudioEncoderIlbc::SampleRateHz() const {
@@ -54,15 +68,11 @@ int AudioEncoderIlbc::NumChannels() const {
return 1;
}
-size_t AudioEncoderIlbc::MaxEncodedBytes() const {
- return RequiredOutputSizeBytes();
-}
-
-int AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderIlbc::Num10MsFramesInNextPacket() const {
return num_10ms_frames_per_packet_;
}
-int AudioEncoderIlbc::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIlbc::Max10MsFramesInAPacket() const {
return num_10ms_frames_per_packet_;
}
@@ -84,7 +94,7 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
+ RTC_DCHECK_GE(max_encoded_bytes, RequiredOutputSizeBytes());
// Save timestamp if starting a new packet.
if (num_10ms_frames_buffered_ == 0)
@@ -102,22 +112,34 @@ AudioEncoder::EncodedInfo AudioEncoderIlbc::EncodeInternal(
}
// Encode buffered input.
- DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
+ RTC_DCHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_);
num_10ms_frames_buffered_ = 0;
const int output_len = WebRtcIlbcfix_Encode(
encoder_,
input_buffer_,
kSampleRateHz / 100 * num_10ms_frames_per_packet_,
encoded);
- CHECK_GE(output_len, 0);
+ RTC_CHECK_GE(output_len, 0);
EncodedInfo info;
- info.encoded_bytes = output_len;
- DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
+ info.encoded_bytes = static_cast<size_t>(output_len);
+ RTC_DCHECK_EQ(info.encoded_bytes, RequiredOutputSizeBytes());
info.encoded_timestamp = first_timestamp_in_buffer_;
- info.payload_type = payload_type_;
+ info.payload_type = config_.payload_type;
return info;
}
+void AudioEncoderIlbc::Reset() {
+ if (encoder_)
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderFree(encoder_));
+ RTC_CHECK(config_.IsOk());
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderCreate(&encoder_));
+ const int encoder_frame_size_ms = config_.frame_size_ms > 30
+ ? config_.frame_size_ms / 2
+ : config_.frame_size_ms;
+ RTC_CHECK_EQ(0, WebRtcIlbcfix_EncoderInit(encoder_, encoder_frame_size_ms));
+ num_10ms_frames_buffered_ = 0;
+}
+
size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
switch (num_10ms_frames_per_packet_) {
case 2: return 38;
@@ -128,17 +150,4 @@ size_t AudioEncoderIlbc::RequiredOutputSizeBytes() const {
}
}
-namespace {
-AudioEncoderIlbc::Config CreateConfig(const CodecInst& codec_inst) {
- AudioEncoderIlbc::Config config;
- config.frame_size_ms = codec_inst.pacsize / 8;
- config.payload_type = codec_inst.pltype;
- return config;
-}
-} // namespace
-
-AudioEncoderMutableIlbc::AudioEncoderMutableIlbc(const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderIlbc>(CreateConfig(codec_inst)) {
-}
-
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
index c24b4a6ac0c..1a3735fc3db 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.c
@@ -28,14 +28,14 @@ void WebRtcIlbcfix_AugmentedCbCorr(
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
- int16_t low, /* (i) Lag to start from (typically
+ size_t low, /* (i) Lag to start from (typically
20) */
- int16_t high, /* (i) Lag to end at (typically 39) */
+ size_t high, /* (i) Lag to end at (typically 39) */
int scale) /* (i) Scale factor to use for
the crossDot */
{
- int lagcount;
- int16_t ilow;
+ size_t lagcount;
+ size_t ilow;
int16_t *targetPtr;
int32_t *crossDotPtr;
int16_t *iSPtr=interpSamples;
@@ -46,7 +46,7 @@ void WebRtcIlbcfix_AugmentedCbCorr(
crossDotPtr=crossDot;
for (lagcount=low; lagcount<=high; lagcount++) {
- ilow = (int16_t) (lagcount-4);
+ ilow = lagcount - 4;
/* Compute dot product for the first (lagcount-4) samples */
(*crossDotPtr) = WebRtcSpl_DotProductWithScale(target, buffer-lagcount, ilow, scale);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
index a0435c434af..c5c408880e9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/augmented_cb_corr.h
@@ -33,9 +33,9 @@ void WebRtcIlbcfix_AugmentedCbCorr(
int32_t *crossDot, /* (o) The cross correlation between
the target and the Augmented
vector */
- int16_t low, /* (i) Lag to start from (typically
+ size_t low, /* (i) Lag to start from (typically
20) */
- int16_t high, /* (i) Lag to end at (typically 39 */
+ size_t high, /* (i) Lag to end at (typically 39 */
int scale); /* (i) Scale factor to use for the crossDot */
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
index 9d11b83accb..cacf3ace289 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.c
@@ -29,10 +29,10 @@ void WebRtcIlbcfix_CbConstruct(
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t veclen /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen /* (i) Length of vector */
){
- int j;
+ size_t j;
int16_t gain[CB_NSTAGES];
/* Stack based */
int16_t cbvec0[SUBL];
@@ -50,9 +50,9 @@ void WebRtcIlbcfix_CbConstruct(
/* codebook vector construction and construction of total vector */
/* Stack based */
- WebRtcIlbcfix_GetCbVec(cbvec0, mem, index[0], lMem, veclen);
- WebRtcIlbcfix_GetCbVec(cbvec1, mem, index[1], lMem, veclen);
- WebRtcIlbcfix_GetCbVec(cbvec2, mem, index[2], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec0, mem, (size_t)index[0], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec1, mem, (size_t)index[1], lMem, veclen);
+ WebRtcIlbcfix_GetCbVec(cbvec2, mem, (size_t)index[2], lMem, veclen);
gainPtr = &gain[0];
for (j=0;j<veclen;j++) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
index 2e9080f401c..b676ef97ad2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_construct.h
@@ -30,8 +30,8 @@ void WebRtcIlbcfix_CbConstruct(
int16_t *index, /* (i) Codebook indices */
int16_t *gain_index, /* (i) Gain quantization indices */
int16_t *mem, /* (i) Buffer for codevector construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t veclen /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t veclen /* (i) Length of vector */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
index 1b8c506fc0e..6ad2f8eb7ad 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.c
@@ -27,15 +27,15 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CbMemEnergy(
- int16_t range,
+ size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
- int16_t lMem, /* (i) Length of the CB memory */
- int16_t lTarget, /* (i) Length of the target vector */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
) {
int16_t *ppi, *ppo, *pp;
int32_t energy, tmp32;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
index 68dd7dac500..6da2f43c0b9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy.h
@@ -20,15 +20,15 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_CB_MEM_ENERGY_H_
void WebRtcIlbcfix_CbMemEnergy(
- int16_t range,
+ size_t range,
int16_t *CB, /* (i) The CB memory (1:st section) */
int16_t *filteredCB, /* (i) The filtered CB memory (2:nd section) */
- int16_t lMem, /* (i) Length of the CB memory */
- int16_t lTarget, /* (i) Length of the target vector */
+ size_t lMem, /* (i) Length of the CB memory */
+ size_t lTarget, /* (i) Length of the target vector */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where the energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
index 2f3c299f6ac..acd6b9c1181 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.c
@@ -23,13 +23,14 @@ void WebRtcIlbcfix_CbMemEnergyAugmentation(
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size, /* (i) Index to where energy values should be stored */
+ size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
){
int32_t energy, tmp32;
int16_t *ppe, *pp, *interpSamplesPtr;
- int16_t *CBmemPtr, lagcount;
+ int16_t *CBmemPtr;
+ size_t lagcount;
int16_t *enPtr=&energyW16[base_size-20];
int16_t *enShPtr=&energyShifts[base_size-20];
int32_t nrjRecursive;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
index 46fb2fdc1b8..594ba5f01bb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_augmentation.h
@@ -23,7 +23,7 @@ void WebRtcIlbcfix_CbMemEnergyAugmentation(
int16_t *interpSamples, /* (i) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size, /* (i) Index to where energy values should be stored */
+ size_t base_size, /* (i) Index to where energy values should be stored */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts /* (o) Shift value of the energy */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
index 481dfbac059..f2415febc0d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.c
@@ -23,16 +23,17 @@
* sample and the last sample respectively */
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
- int16_t range, /* (i) number of iterations */
+ size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
)
{
- int16_t j,shft;
+ size_t j;
+ int16_t shft;
int32_t tmp;
int16_t *eSh_ptr;
int16_t *eW16_ptr;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
index 7f0cadf6e1a..2991869dd93 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_mem_energy_calc.h
@@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbMemEnergyCalc(
int32_t energy, /* (i) input start energy */
- int16_t range, /* (i) number of iterations */
+ size_t range, /* (i) number of iterations */
int16_t *ppi, /* (i) input pointer 1 */
int16_t *ppo, /* (i) input pointer 2 */
int16_t *energyW16, /* (o) Energy in the CB vectors */
int16_t *energyShifts, /* (o) Shift value of the energy */
int scale, /* (i) The scaling of all energy values */
- int16_t base_size /* (i) Index to where energy values should be stored */
+ size_t base_size /* (i) Index to where energy values should be stored */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
index d502cf0c32b..be949514496 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.c
@@ -40,29 +40,31 @@ void WebRtcIlbcfix_CbSearch(
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t lTarget, /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
- int16_t block /* (i) the subblock number */
+ size_t block /* (i) the subblock number */
) {
- int16_t i, j, stage, range;
+ size_t i, range;
+ int16_t ii, j, stage;
int16_t *pp;
int16_t tmp;
int scale;
int16_t bits, temp1, temp2;
- int16_t base_size;
+ size_t base_size;
int32_t codedEner, targetEner;
int16_t gains[CB_NSTAGES+1];
int16_t *cb_vecPtr;
- int16_t indexOffset, sInd, eInd;
+ size_t indexOffset, sInd, eInd;
int32_t CritMax=0;
int16_t shTotMax=WEBRTC_SPL_WORD16_MIN;
- int16_t bestIndex=0;
+ size_t bestIndex=0;
int16_t bestGain=0;
- int16_t indexNew, CritNewSh;
+ size_t indexNew;
+ int16_t CritNewSh;
int32_t CritNew;
int32_t *cDotPtr;
- int16_t noOfZeros;
+ size_t noOfZeros;
int16_t *gainPtr;
int32_t t32, tmpW32;
int16_t *WebRtcIlbcfix_kGainSq5_ptr;
@@ -148,9 +150,9 @@ void WebRtcIlbcfix_CbSearch(
scale, 20, energyW16, energyShifts);
/* Compute the CB vectors' energies for the second cb section (filtered cb) */
- WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors,
- scale, (int16_t)(base_size + 20),
- energyW16, energyShifts);
+ WebRtcIlbcfix_CbMemEnergyAugmentation(interpSamplesFilt, cbvectors, scale,
+ base_size + 20, energyW16,
+ energyShifts);
/* Compute the CB vectors' energies and store them in the vector
* energyW16. Also the corresponding shift values are stored. The
@@ -224,7 +226,7 @@ void WebRtcIlbcfix_CbSearch(
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
- CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew+indexOffset],
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew+indexOffset],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
@@ -242,11 +244,8 @@ void WebRtcIlbcfix_CbSearch(
i=sInd;
if (sInd<20) {
WebRtcIlbcfix_AugmentedCbCorr(target, cbvectors + lMem,
- interpSamplesFilt, cDot,
- (int16_t)(sInd + 20),
- (int16_t)(WEBRTC_SPL_MIN(39,
- (eInd + 20))),
- scale);
+ interpSamplesFilt, cDot, sInd + 20,
+ WEBRTC_SPL_MIN(39, (eInd + 20)), scale);
i=20;
cDotPtr = &cDot[20 - sInd];
} else {
@@ -257,7 +256,7 @@ void WebRtcIlbcfix_CbSearch(
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
- (int16_t)(eInd - i + 1), scale, -1);
+ eInd - i + 1, scale, -1);
} else {
cDotPtr = cDot;
@@ -265,7 +264,7 @@ void WebRtcIlbcfix_CbSearch(
/* Calculate the cross correlations (main part of the filtered CB) */
WebRtcSpl_CrossCorrelation(cDotPtr, target, cb_vecPtr, lTarget,
- (int16_t)(eInd - sInd + 1), scale, -1);
+ eInd - sInd + 1, scale, -1);
}
@@ -274,17 +273,17 @@ void WebRtcIlbcfix_CbSearch(
/* Search for best index in this part of the vector */
WebRtcIlbcfix_CbSearchCore(
- cDot, (int16_t)(eInd-sInd+1), stage, inverseEnergy+indexOffset,
+ cDot, eInd-sInd+1, stage, inverseEnergy+indexOffset,
inverseEnergyShifts+indexOffset, Crit,
&indexNew, &CritNew, &CritNewSh);
/* Update the global best index and the corresponding gain */
WebRtcIlbcfix_CbUpdateBestIndex(
- CritNew, CritNewSh, (int16_t)(indexNew+indexOffset), cDot[indexNew],
+ CritNew, CritNewSh, indexNew+indexOffset, cDot[indexNew],
inverseEnergy[indexNew+indexOffset], inverseEnergyShifts[indexNew+indexOffset],
&CritMax, &shTotMax, &bestIndex, &bestGain);
- index[stage] = bestIndex;
+ index[stage] = (int16_t)bestIndex;
bestGain = WebRtcIlbcfix_GainQuant(bestGain,
@@ -297,7 +296,7 @@ void WebRtcIlbcfix_CbSearch(
if(lTarget==(STATE_LEN-iLBCenc_inst->state_short_len)) {
- if(index[stage]<base_size) {
+ if((size_t)index[stage]<base_size) {
pp=buf+lMem-lTarget-index[stage];
} else {
pp=cbvectors+lMem-lTarget-
@@ -306,16 +305,16 @@ void WebRtcIlbcfix_CbSearch(
} else {
- if (index[stage]<base_size) {
+ if ((size_t)index[stage]<base_size) {
if (index[stage]>=20) {
/* Adjust index and extract vector */
index[stage]-=20;
pp=buf+lMem-lTarget-index[stage];
} else {
/* Adjust index and extract vector */
- index[stage]+=(base_size-20);
+ index[stage]+=(int16_t)(base_size-20);
- WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-base_size+40),
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-base_size+40,
buf+lMem, aug_vec);
pp = aug_vec;
@@ -329,8 +328,8 @@ void WebRtcIlbcfix_CbSearch(
index[stage]+base_size;
} else {
/* Adjust index and extract vector */
- index[stage]+=(base_size-20);
- WebRtcIlbcfix_CreateAugmentedVec((int16_t)(index[stage]-2*base_size+40),
+ index[stage]+=(int16_t)(base_size-20);
+ WebRtcIlbcfix_CreateAugmentedVec(index[stage]-2*base_size+40,
cbvectors+lMem, aug_vec);
pp = aug_vec;
}
@@ -381,7 +380,7 @@ void WebRtcIlbcfix_CbSearch(
WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[j];
/* targetEner and codedEner are in Q(-2*scale) */
- for (i=gain_index[0];i<32;i++) {
+ for (ii=gain_index[0];ii<32;ii++) {
/* Change the index if
(codedEnergy*gainTbl[i]*gainTbl[i])<(targetEn*gain[0]*gain[0]) AND
@@ -392,8 +391,8 @@ void WebRtcIlbcfix_CbSearch(
t32 = t32 - targetEner;
if (t32 < 0) {
if ((*WebRtcIlbcfix_kGainSq5_ptr) < tmpW32) {
- j=i;
- WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[i];
+ j=ii;
+ WebRtcIlbcfix_kGainSq5_ptr = (int16_t*)&WebRtcIlbcfix_kGainSq5[ii];
}
}
gainPtr++;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
index 2fe236f4c5d..ed1580c09da 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search.h
@@ -26,10 +26,10 @@ void WebRtcIlbcfix_CbSearch(
int16_t *gain_index, /* (o) Gain quantization indices */
int16_t *intarget, /* (i) Target vector for encoding */
int16_t *decResidual,/* (i) Decoded residual for codebook construction */
- int16_t lMem, /* (i) Length of buffer */
- int16_t lTarget, /* (i) Length of vector */
+ size_t lMem, /* (i) Length of buffer */
+ size_t lTarget, /* (i) Length of vector */
int16_t *weightDenum,/* (i) weighting filter coefficients in Q12 */
- int16_t block /* (i) the subblock number */
+ size_t block /* (i) the subblock number */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
index 3deb08a75c9..fafa39f69bc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.c
@@ -21,13 +21,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
- int16_t range, /* (i) Search range */
+ size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
- int16_t *bestIndex, /* (o) Index that corresponds to
+ size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the
@@ -37,7 +37,7 @@ void WebRtcIlbcfix_CbSearchCore(
{
int32_t maxW32, tmp32;
int16_t max, sh, tmp16;
- int i;
+ size_t i;
int32_t *cDotPtr;
int16_t cDotSqW16;
int16_t *inverseEnergyPtr;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
index e4f2e920286..9648cf29d3f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_search_core.h
@@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbSearchCore(
int32_t *cDot, /* (i) Cross Correlation */
- int16_t range, /* (i) Search range */
+ size_t range, /* (i) Search range */
int16_t stage, /* (i) Stage of this search */
int16_t *inverseEnergy, /* (i) Inversed energy */
int16_t *inverseEnergyShift, /* (i) Shifts of inversed energy
with the offset 2*16-29 */
int32_t *Crit, /* (o) The criteria */
- int16_t *bestIndex, /* (o) Index that corresponds to
+ size_t *bestIndex, /* (o) Index that corresponds to
maximum criteria (in this
vector) */
int32_t *bestCrit, /* (o) Value of critera for the
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
index 6fdec27aba1..fc27ea9f6ce 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.c
@@ -23,13 +23,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
- int16_t IndexNew, /* (i) Index of new Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
- int16_t *bestIndex, /* (i/o) Index that corresponds to
+ size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain) /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
index e8519d4118b..a20fa38b2e5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/cb_update_best_index.h
@@ -24,13 +24,13 @@
void WebRtcIlbcfix_CbUpdateBestIndex(
int32_t CritNew, /* (i) New Potentially best Criteria */
int16_t CritNewSh, /* (i) Shift value of above Criteria */
- int16_t IndexNew, /* (i) Index of new Criteria */
+ size_t IndexNew, /* (i) Index of new Criteria */
int32_t cDotNew, /* (i) Cross dot of new index */
int16_t invEnergyNew, /* (i) Inversed energy new index */
int16_t energyShiftNew, /* (i) Energy shifts of new index */
int32_t *CritMax, /* (i/o) Maximum Criteria (so far) */
int16_t *shTotMax, /* (i/o) Shifts of maximum criteria */
- int16_t *bestIndex, /* (i/o) Index that corresponds to
+ size_t *bestIndex, /* (i/o) Index that corresponds to
maximum criteria */
int16_t *bestGain); /* (i/o) Gain in Q14 that corresponds
to maximum criteria */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
index a53e8a77f1e..7653cb0c25d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.c
@@ -27,9 +27,9 @@ void WebRtcIlbcfix_CompCorr(
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
- int16_t lag, /* (i) pitch lag */
- int16_t bLen, /* (i) length of buffer */
- int16_t sRange, /* (i) correlation search length */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
){
int16_t *w16ptr;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
index 4ff80aac460..ab78c72b3e3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/comp_corr.h
@@ -30,9 +30,9 @@ void WebRtcIlbcfix_CompCorr(
int32_t *corr, /* (o) cross correlation */
int32_t *ener, /* (o) energy */
int16_t *buffer, /* (i) signal buffer */
- int16_t lag, /* (i) pitch lag */
- int16_t bLen, /* (i) length of buffer */
- int16_t sRange, /* (i) correlation search length */
+ size_t lag, /* (i) pitch lag */
+ size_t bLen, /* (i) length of buffer */
+ size_t sRange, /* (i) correlation search length */
int16_t scale /* (i) number of rightshifts to use */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.c
index 1d384b750e4..9e341942e66 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.c
@@ -593,10 +593,10 @@ const int16_t WebRtcIlbcfix_kAlpha[4]={
/* Ranges for search and filters at different subframes */
-const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
+const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES]={
{58,58,58}, {108,44,44}, {108,108,108}, {108,108,108}, {108,108,108}};
-const int16_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
+const size_t WebRtcIlbcfix_kFilterRange[5]={63, 85, 125, 147, 147};
/* Gain Quantization for the codebook gains of the 3 stages */
@@ -647,7 +647,7 @@ const int16_t WebRtcIlbcfix_kEnhWt[3] = {
4800, 16384, 27968 /* Q16 */
};
-const int16_t WebRtcIlbcfix_kEnhPlocs[ENH_NBLOCKS_TOT] = {
+const size_t WebRtcIlbcfix_kEnhPlocs[ENH_NBLOCKS_TOT] = {
160, 480, 800, 1120, 1440, 1760, 2080, 2400 /* Q(-2) */
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.h
index ff6370e14c8..7c4ad4d9286 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/constants.h
@@ -61,8 +61,8 @@ extern const int16_t WebRtcIlbcfix_kFrgQuantMod[];
/* Ranges for search and filters at different subframes */
-extern const int16_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
-extern const int16_t WebRtcIlbcfix_kFilterRange[];
+extern const size_t WebRtcIlbcfix_kSearchRange[5][CB_NSTAGES];
+extern const size_t WebRtcIlbcfix_kFilterRange[];
/* gain quantization tables */
@@ -81,7 +81,7 @@ extern const int16_t WebRtcIlbcfix_kAlpha[];
extern const int16_t WebRtcIlbcfix_kEnhPolyPhaser[ENH_UPS0][ENH_FLO_MULT2_PLUS1];
extern const int16_t WebRtcIlbcfix_kEnhWt[];
-extern const int16_t WebRtcIlbcfix_kEnhPlocs[];
+extern const size_t WebRtcIlbcfix_kEnhPlocs[];
/* PLC tables */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
index 965cbe0d394..8ae28ac3b99 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.c
@@ -25,12 +25,12 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
- int16_t index, /* (i) Index for the augmented vector to be created */
+ size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */
) {
- int16_t ilow;
+ size_t ilow;
int16_t *ppo, *ppi;
int16_t cbVecTmp[4];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
index e3c3c7b4bc7..430dfe9b9da 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/create_augmented_vec.h
@@ -27,7 +27,7 @@
*----------------------------------------------------------------*/
void WebRtcIlbcfix_CreateAugmentedVec(
- int16_t index, /* (i) Index for the augmented vector to be created */
+ size_t index, /* (i) Index for the augmented vector to be created */
int16_t *buffer, /* (i) Pointer to the end of the codebook memory that
is used for creation of the augmented codebook */
int16_t *cbVec /* (o) The construced codebook vector */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode.c
index 9918de201ed..4c8497a568c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode.c
@@ -44,7 +44,7 @@ void WebRtcIlbcfix_DecodeImpl(
int16_t mode /* (i) 0: bad packet, PLC,
1: normal */
) {
- int i;
+ size_t i;
int16_t order_plus_one;
int16_t last_bit;
@@ -106,7 +106,7 @@ void WebRtcIlbcfix_DecodeImpl(
WebRtcIlbcfix_DoThePlc(
PLCresidual, PLClpc, 0, decresidual,
syntdenum + (LPC_FILTERORDER + 1) * (iLBCdec_inst->nsub - 1),
- (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+ iLBCdec_inst->last_lag, iLBCdec_inst);
/* Use the output from doThePLC */
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@@ -122,7 +122,7 @@ void WebRtcIlbcfix_DecodeImpl(
/* packet loss conceal */
WebRtcIlbcfix_DoThePlc(PLCresidual, PLClpc, 1, decresidual, syntdenum,
- (int16_t)(iLBCdec_inst->last_lag), iLBCdec_inst);
+ iLBCdec_inst->last_lag, iLBCdec_inst);
WEBRTC_SPL_MEMCPY_W16(decresidual, PLCresidual, iLBCdec_inst->blockl);
@@ -188,18 +188,18 @@ void WebRtcIlbcfix_DecodeImpl(
WEBRTC_SPL_MEMCPY_W16(iLBCdec_inst->syntMem, &data[iLBCdec_inst->blockl-LPC_FILTERORDER], LPC_FILTERORDER);
} else { /* Enhancer not activated */
- int16_t lag;
+ size_t lag;
/* Find last lag (since the enhancer is not called to give this info) */
lag = 20;
if (iLBCdec_inst->mode==20) {
- lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+ lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-60],
&decresidual[iLBCdec_inst->blockl-60-lag],
60,
80, lag, -1);
} else {
- lag = (int16_t)WebRtcIlbcfix_XcorrCoef(
+ lag = WebRtcIlbcfix_XcorrCoef(
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL],
&decresidual[iLBCdec_inst->blockl-ENH_BLOCKL-lag],
ENH_BLOCKL,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
index de42ea96193..b8a067e0f3d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decode_residual.c
@@ -41,8 +41,8 @@ void WebRtcIlbcfix_DecodeResidual(
int16_t *syntdenum /* (i) the decoded synthesis filter
coefficients */
) {
- int16_t meml_gotten, diff, start_pos;
- int16_t subcount, subframe;
+ size_t meml_gotten, diff, start_pos;
+ size_t subcount, subframe;
int16_t *reverseDecresidual = iLBCdec_inst->enh_buf; /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
int16_t *memVec = iLBCdec_inst->prevResidual; /* Memory for codebook and filter state (reuse memory in state) */
int16_t *mem = &memVec[CB_HALFFILTERLEN]; /* Memory for codebook */
@@ -118,7 +118,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to encode */
- int16_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
+ size_t Nfor = iLBCdec_inst->nsub - iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nfor; subframe++) {
/* construct decoded vector */
@@ -156,7 +156,7 @@ void WebRtcIlbcfix_DecodeResidual(
/* loop over subframes to decode */
- int16_t Nback = iLBC_encbits->startIdx - 1;
+ size_t Nback = iLBC_encbits->startIdx - 1;
for (subframe=0; subframe<Nback; subframe++) {
/* construct decoded vector */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
index fad81706523..06ab2e7190a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/decoder_interpolate_lsf.c
@@ -34,7 +34,8 @@ void WebRtcIlbcfix_DecoderInterpolateLsp(
IlbcDecoder *iLBCdec_inst
/* (i) the decoder state structure */
){
- int i, pos, lp_length;
+ size_t i;
+ int pos, lp_length;
int16_t lp[LPC_FILTERORDER + 1], *lsfdeq2;
lsfdeq2 = lsfdeq + length;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/defines.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/defines.h
index 2d37e52650d..2faaea15fc3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/defines.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/defines.h
@@ -121,11 +121,11 @@ typedef struct iLBC_bits_t_ {
int16_t lsf[LSF_NSPLIT*LPC_N_MAX];
int16_t cb_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB index */
int16_t gain_index[CB_NSTAGES*(NASUB_MAX+1)]; /* First CB_NSTAGES values contains extra CB gain */
- int16_t idxForMax;
+ size_t idxForMax;
int16_t state_first;
int16_t idxVec[STATE_SHORT_LEN_30MS];
int16_t firstbits;
- int16_t startIdx;
+ size_t startIdx;
} iLBC_bits;
/* type definition encoder instance */
@@ -135,12 +135,12 @@ typedef struct IlbcEncoder_ {
int16_t mode;
/* basic parameters for different frame sizes */
- int16_t blockl;
- int16_t nsub;
+ size_t blockl;
+ size_t nsub;
int16_t nasub;
- int16_t no_of_bytes, no_of_words;
+ size_t no_of_bytes, no_of_words;
int16_t lpc_n;
- int16_t state_short_len;
+ size_t state_short_len;
/* analysis filter state */
int16_t anaMem[LPC_FILTERORDER];
@@ -164,7 +164,7 @@ typedef struct IlbcEncoder_ {
int16_t Nfor_flag;
int16_t Nback_flag;
int16_t start_pos;
- int16_t diff;
+ size_t diff;
#endif
} IlbcEncoder;
@@ -176,12 +176,12 @@ typedef struct IlbcDecoder_ {
int16_t mode;
/* basic parameters for different frame sizes */
- int16_t blockl;
- int16_t nsub;
+ size_t blockl;
+ size_t nsub;
int16_t nasub;
- int16_t no_of_bytes, no_of_words;
+ size_t no_of_bytes, no_of_words;
int16_t lpc_n;
- int16_t state_short_len;
+ size_t state_short_len;
/* synthesis filter state */
int16_t syntMem[LPC_FILTERORDER];
@@ -190,14 +190,15 @@ typedef struct IlbcDecoder_ {
int16_t lsfdeqold[LPC_FILTERORDER];
/* pitch lag estimated in enhancer and used in PLC */
- int last_lag;
+ size_t last_lag;
/* PLC state information */
int consPLICount, prev_enh_pl;
int16_t perSquare;
int16_t prevScale, prevPLI;
- int16_t prevLag, prevLpc[LPC_FILTERORDER+1];
+ size_t prevLag;
+ int16_t prevLpc[LPC_FILTERORDER+1];
int16_t prevResidual[NSUB_MAX*SUBL];
int16_t seed;
@@ -212,7 +213,7 @@ typedef struct IlbcDecoder_ {
/* enhancer state information */
int use_enhancer;
int16_t enh_buf[ENH_BUFL+ENH_BUFL_FILTEROVERHEAD];
- int16_t enh_period[ENH_NBLOCKS_TOT];
+ size_t enh_period[ENH_NBLOCKS_TOT];
} IlbcDecoder;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
index ecdd68a57fd..f74439ede6e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.c
@@ -33,18 +33,19 @@ void WebRtcIlbcfix_DoThePlc(
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
- int16_t inlag, /* (i) pitch lag */
+ size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
){
- int16_t i;
+ size_t i;
int32_t cross, ener, cross_comp, ener_comp = 0;
int32_t measure, maxMeasure, energy;
int16_t max, crossSquareMax, crossSquare;
- int16_t j, lag, tmp1, tmp2, randlag;
+ size_t j, lag, randlag;
+ int16_t tmp1, tmp2;
int16_t shift1, shift2, shift3, shiftMax;
int16_t scale3;
- int16_t corrLen;
+ size_t corrLen;
int32_t tmpW32, tmp2W32;
int16_t use_gain;
int16_t tot_gain;
@@ -54,7 +55,7 @@ void WebRtcIlbcfix_DoThePlc(
int32_t nom;
int16_t denom;
int16_t pitchfact;
- int16_t use_lag;
+ size_t use_lag;
int ind;
int16_t randvec[BLOCKL_MAX];
@@ -70,7 +71,8 @@ void WebRtcIlbcfix_DoThePlc(
/* Maximum 60 samples are correlated, preserve as high accuracy
as possible without getting overflow */
- max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual, (int16_t)iLBCdec_inst->blockl);
+ max = WebRtcSpl_MaxAbsValueW16((*iLBCdec_inst).prevResidual,
+ iLBCdec_inst->blockl);
scale3 = (WebRtcSpl_GetSizeInBits(max)<<1) - 25;
if (scale3 < 0) {
scale3 = 0;
@@ -85,7 +87,7 @@ void WebRtcIlbcfix_DoThePlc(
lag = inlag - 3;
/* Guard against getting outside the frame */
- corrLen = WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
+ corrLen = (size_t)WEBRTC_SPL_MIN(60, iLBCdec_inst->blockl-(inlag+3));
WebRtcIlbcfix_CompCorr( &cross, &ener,
iLBCdec_inst->prevResidual, lag, iLBCdec_inst->blockl, corrLen, scale3);
@@ -233,7 +235,7 @@ void WebRtcIlbcfix_DoThePlc(
/* noise component - 52 < randlagFIX < 117 */
iLBCdec_inst->seed = (int16_t)(iLBCdec_inst->seed * 31821 + 13849);
- randlag = 53 + (int16_t)(iLBCdec_inst->seed & 63);
+ randlag = 53 + (iLBCdec_inst->seed & 63);
if (randlag > i) {
randvec[i] =
iLBCdec_inst->prevResidual[iLBCdec_inst->blockl + i - randlag];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
index c55b81540c7..38b8fdb7c04 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/do_plc.h
@@ -33,7 +33,7 @@ void WebRtcIlbcfix_DoThePlc(
0 - no PL, 1 = PL */
int16_t *decresidual, /* (i) decoded residual */
int16_t *lpc, /* (i) decoded LPC (only used for no PL) */
- int16_t inlag, /* (i) pitch lag */
+ size_t inlag, /* (i) pitch lag */
IlbcDecoder *iLBCdec_inst
/* (i/o) decoder instance */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/encode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/encode.c
index 114ce1ffbbd..812ec8d6c7b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/encode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/encode.c
@@ -48,11 +48,11 @@ void WebRtcIlbcfix_EncodeImpl(
IlbcEncoder *iLBCenc_inst /* (i/o) the general encoder
state */
){
- int n, meml_gotten, Nfor;
- int16_t diff, start_pos;
- int index;
- int subcount, subframe;
- int16_t start_count, end_count;
+ size_t n, meml_gotten, Nfor;
+ size_t diff, start_pos;
+ size_t index;
+ size_t subcount, subframe;
+ size_t start_count, end_count;
int16_t *residual;
int32_t en1, en2;
int16_t scale, max;
@@ -86,7 +86,7 @@ void WebRtcIlbcfix_EncodeImpl(
#ifdef SPLIT_10MS
WebRtcSpl_MemSetW16 ( (int16_t *) iLBCbits_inst, 0,
- (int16_t) (sizeof(iLBC_bits) / sizeof(int16_t)) );
+ sizeof(iLBC_bits) / sizeof(int16_t) );
start_pos = iLBCenc_inst->start_pos;
diff = iLBCenc_inst->diff;
@@ -317,17 +317,17 @@ void WebRtcIlbcfix_EncodeImpl(
if (iLBCenc_inst->section == 1)
{
start_count = 0;
- end_count = WEBRTC_SPL_MIN (Nfor, 2);
+ end_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
}
if (iLBCenc_inst->section == 2)
{
- start_count = WEBRTC_SPL_MIN (Nfor, 2);
+ start_count = WEBRTC_SPL_MIN (Nfor, (size_t)2);
end_count = Nfor;
}
}
#else
start_count = 0;
- end_count = (int16_t)Nfor;
+ end_count = Nfor;
#endif
/* loop over subframes to encode */
@@ -341,7 +341,7 @@ void WebRtcIlbcfix_EncodeImpl(
&residual[(iLBCbits_inst->startIdx+1+subframe)*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx+1+subframe)*(LPC_FILTERORDER+1)],
- (int16_t)subcount);
+ subcount);
/* construct decoded vector */
@@ -386,7 +386,7 @@ void WebRtcIlbcfix_EncodeImpl(
contained in the same vector as the residual)
*/
- int Nback = iLBCbits_inst->startIdx - 1;
+ size_t Nback = iLBCbits_inst->startIdx - 1;
WebRtcSpl_MemCpyReversedOrder(&reverseResidual[Nback*SUBL-1], residual, Nback*SUBL);
/* setup memory */
@@ -434,7 +434,7 @@ void WebRtcIlbcfix_EncodeImpl(
}
#else
start_count = 0;
- end_count = (int16_t)Nback;
+ end_count = Nback;
#endif
/* loop over subframes to encode */
@@ -447,7 +447,7 @@ void WebRtcIlbcfix_EncodeImpl(
iLBCbits_inst->gain_index+subcount*CB_NSTAGES, &reverseResidual[subframe*SUBL],
mem, MEM_LF_TBL, SUBL,
&weightdenum[(iLBCbits_inst->startIdx-2-subframe)*(LPC_FILTERORDER+1)],
- (int16_t)subcount);
+ subcount);
/* construct decoded vector */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
index a6b1c758f9e..b2bdcfffc3f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.c
@@ -23,12 +23,12 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
- int noOfEnergies) /* (i) The length of the energy
+ size_t noOfEnergies) /* (i) The length of the energy
vector */
{
int32_t Nom=(int32_t)0x1FFFFFFF;
int16_t *energyPtr;
- int i;
+ size_t i;
/* Set the minimum energy value to 16384 to avoid overflow */
energyPtr=energy;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
index 7bb67215fce..fe25094325f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/energy_inverse.h
@@ -26,7 +26,7 @@
void WebRtcIlbcfix_EnergyInverse(
int16_t *energy, /* (i/o) Energy and inverse
energy (in Q29) */
- int noOfEnergies); /* (i) The length of the energy
+ size_t noOfEnergies); /* (i) The length of the energy
vector */
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
index 38c3de379aa..521d00441c2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.c
@@ -29,11 +29,11 @@
void WebRtcIlbcfix_Enhancer(
int16_t *odata, /* (o) smoothed block, dimension blockl */
int16_t *idata, /* (i) data buffer used for enhancing */
- int16_t idatal, /* (i) dimension idata */
- int16_t centerStartPos, /* (i) first sample current block within idata */
- int16_t *period, /* (i) pitch period array (pitch bward-in time) */
- int16_t *plocs, /* (i) locations where period array values valid */
- int16_t periodl /* (i) dimension of period and plocs */
+ size_t idatal, /* (i) dimension idata */
+ size_t centerStartPos, /* (i) first sample current block within idata */
+ size_t *period, /* (i) pitch period array (pitch bward-in time) */
+ const size_t *plocs, /* (i) locations where period array values valid */
+ size_t periodl /* (i) dimension of period and plocs */
){
/* Stack based */
int16_t surround[ENH_BLOCKL];
@@ -47,5 +47,5 @@ void WebRtcIlbcfix_Enhancer(
/* compute the smoothed output from said second sequence */
- WebRtcIlbcfix_Smooth(odata, idata+centerStartPos, surround);
+ WebRtcIlbcfix_Smooth(odata, idata + centerStartPos, surround);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
index 83f48b05055..ed219fb1bbf 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer.h
@@ -29,11 +29,11 @@
void WebRtcIlbcfix_Enhancer(
int16_t *odata, /* (o) smoothed block, dimension blockl */
int16_t *idata, /* (i) data buffer used for enhancing */
- int16_t idatal, /* (i) dimension idata */
- int16_t centerStartPos, /* (i) first sample current block within idata */
- int16_t *period, /* (i) pitch period array (pitch bward-in time) */
- int16_t *plocs, /* (i) locations where period array values valid */
- int16_t periodl /* (i) dimension of period and plocs */
+ size_t idatal, /* (i) dimension idata */
+ size_t centerStartPos, /* (i) first sample current block within idata */
+ size_t *period, /* (i) pitch period array (pitch bward-in time) */
+ const size_t *plocs, /* (i) locations where period array values valid */
+ size_t periodl /* (i) dimension of period and plocs */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
index c630dd5af97..1c0fd423837 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.c
@@ -30,25 +30,29 @@
* interface for enhancer
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
){
- int iblock;
- int lag=20, tlag=20;
- int inLen=iLBCdec_inst->blockl+120;
- int16_t scale, scale1, plc_blockl;
- int16_t *enh_buf, *enh_period;
- int32_t tmp1, tmp2, max, new_blocks;
+ size_t iblock;
+ size_t lag=20, tlag=20;
+ size_t inLen=iLBCdec_inst->blockl+120;
+ int16_t scale, scale1;
+ size_t plc_blockl;
+ int16_t *enh_buf;
+ size_t *enh_period;
+ int32_t tmp1, tmp2, max;
+ size_t new_blocks;
int16_t *enh_bufPtr1;
- int i, k;
+ size_t i;
+ size_t k;
int16_t EnChange;
int16_t SqrtEnChange;
int16_t inc;
int16_t win;
int16_t *tmpW16ptr;
- int16_t startPos;
+ size_t startPos;
int16_t *plc_pred;
int16_t *target, *regressor;
int16_t max16;
@@ -56,8 +60,9 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int32_t ener;
int16_t enerSh;
int16_t corrSh;
- int16_t ind, sh;
- int16_t start, stop;
+ size_t ind;
+ int16_t sh;
+ size_t start, stop;
/* Stack based */
int16_t totsh[3];
int16_t downsampled[(BLOCKL_MAX+120)>>1]; /* length 180 */
@@ -65,7 +70,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int32_t corrmax[3];
int16_t corr16[3];
int16_t en16[3];
- int16_t lagmax[3];
+ size_t lagmax[3];
plc_pred = downsampled; /* Reuse memory since plc_pred[ENH_BLOCKL] and
downsampled are non overlapping */
@@ -96,7 +101,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
memmove(enh_period, &enh_period[new_blocks],
(ENH_NBLOCKS_TOT - new_blocks) * sizeof(*enh_period));
- k = WebRtcSpl_DownsampleFast(
+ WebRtcSpl_DownsampleFast(
enh_buf+ENH_BUFL-inLen, /* Input samples */
inLen + ENH_BUFL_FILTEROVERHEAD,
downsampled,
@@ -128,11 +133,9 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
for (i=0;i<2;i++) {
lagmax[i] = WebRtcSpl_MaxIndexW32(corr32, 50);
corrmax[i] = corr32[lagmax[i]];
- start = lagmax[i] - 2;
- stop = lagmax[i] + 2;
- start = WEBRTC_SPL_MAX(0, start);
- stop = WEBRTC_SPL_MIN(49, stop);
- for (k=start; k<=stop; k++) {
+ start = WEBRTC_SPL_MAX(2, lagmax[i]) - 2;
+ stop = WEBRTC_SPL_MIN(47, lagmax[i]) + 2;
+ for (k = start; k <= stop; k++) {
corr32[k] = 0;
}
}
@@ -142,8 +145,8 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
/* Calculate normalized corr^2 and ener */
for (i=0;i<3;i++) {
corrSh = 15-WebRtcSpl_GetSizeInBits(corrmax[i]);
- ener = WebRtcSpl_DotProductWithScale(&regressor[-lagmax[i]],
- &regressor[-lagmax[i]],
+ ener = WebRtcSpl_DotProductWithScale(regressor - lagmax[i],
+ regressor - lagmax[i],
ENH_BLOCKL_HALF, shifts);
enerSh = 15-WebRtcSpl_GetSizeInBits(ener);
corr16[i] = (int16_t)WEBRTC_SPL_SHIFT_W32(corrmax[i], corrSh);
@@ -171,7 +174,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
lag = lagmax[ind] + 10;
/* Store the estimated lag in the non-downsampled domain */
- enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = (int16_t)(lag * 8);
+ enh_period[ENH_NBLOCKS_TOT - new_blocks + iblock] = lag * 8;
/* Store the estimated lag for backward PLC */
if (iLBCdec_inst->prev_enh_pl==1) {
@@ -224,7 +227,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
(plc_blockl-lag));
}
} else {
- int pos;
+ size_t pos;
pos = plc_blockl;
@@ -280,8 +283,8 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
/* Multiply first part of vector with 2*SqrtEnChange */
- WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange,
- (int16_t)(plc_blockl-16), 14);
+ WebRtcSpl_ScaleVector(plc_pred, plc_pred, SqrtEnChange, plc_blockl-16,
+ 14);
/* Calculate increase parameter for window part (16 last samples) */
/* (1-2*SqrtEnChange)/16 in Q15 */
@@ -343,7 +346,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
- (int16_t)lag);
+ lag);
WebRtcSpl_FilterARFastQ12(
enh_bufPtr1, synt,
&iLBCdec_inst->old_syntdenum[
@@ -354,7 +357,7 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
LPC_FILTERORDER);
WebRtcIlbcfix_HpOutput(synt, (int16_t*)WebRtcIlbcfix_kHpOutCoefs,
iLBCdec_inst->hpimemy, iLBCdec_inst->hpimemx,
- (int16_t)lag);
+ lag);
}
}
@@ -365,9 +368,9 @@ int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
WebRtcIlbcfix_Enhancer(out + iblock * ENH_BLOCKL,
enh_buf,
ENH_BUFL,
- (int16_t)(iblock * ENH_BLOCKL + startPos),
+ iblock * ENH_BLOCKL + startPos,
enh_period,
- (int16_t*)WebRtcIlbcfix_kEnhPlocs, ENH_NBLOCKS_TOT);
+ WebRtcIlbcfix_kEnhPlocs, ENH_NBLOCKS_TOT);
}
return (lag);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
index fa58b7a67f8..61efd22604b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/enhancer_interface.h
@@ -25,7 +25,7 @@
* interface for enhancer
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
+size_t WebRtcIlbcfix_EnhancerInterface( /* (o) Estimated lag in end of in[] */
int16_t *out, /* (o) enhanced signal */
int16_t *in, /* (i) unenhanced signal */
IlbcDecoder *iLBCdec_inst /* (i) buffers etc */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
index aa8170cb76d..04d17a67ef5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.c
@@ -29,8 +29,8 @@ void WebRtcIlbcfix_FilteredCbVecs(
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
- int lMem, /* (i) Length of codebook memory */
- int16_t samples /* (i) Number of samples to filter */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
) {
/* Set up the memory, start with zero state */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
index 99e89a08071..d23b25c1ac2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/filtered_cb_vecs.h
@@ -31,8 +31,8 @@ void WebRtcIlbcfix_FilteredCbVecs(
int16_t *cbvectors, /* (o) Codebook vector for the higher section */
int16_t *CBmem, /* (i) Codebook memory that is filtered to create a
second CB section */
- int lMem, /* (i) Length of codebook memory */
- int16_t samples /* (i) Number of samples to filter */
+ size_t lMem, /* (i) Length of codebook memory */
+ size_t samples /* (i) Number of samples to filter */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
index 6a68dec16f6..48332808e4e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.c
@@ -23,7 +23,7 @@
* Classification of subframes to localize start state
*---------------------------------------------------------------*/
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */
@@ -35,8 +35,8 @@ int16_t WebRtcIlbcfix_FrameClassify(
int32_t *seqEnPtr;
int32_t maxW32;
int16_t scale1;
- int16_t pos;
- int n;
+ size_t pos;
+ size_t n;
/*
Calculate the energy of each of the 80 sample blocks
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
index b32e2c87d5e..99f71447825 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/frame_classify.h
@@ -19,7 +19,7 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_SOURCE_FRAME_CLASSIFY_H_
-int16_t WebRtcIlbcfix_FrameClassify(
+size_t WebRtcIlbcfix_FrameClassify(
/* (o) Index to the max-energy sub frame */
IlbcEncoder *iLBCenc_inst,
/* (i/o) the encoder state structure */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
index cf05ce3310a..d7c2e75553b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.c
@@ -27,12 +27,12 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
- int16_t index, /* (i) Codebook index */
- int16_t lMem, /* (i) Length of codebook buffer */
- int16_t cbveclen /* (i) Codebook vector length */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
){
- int16_t k, base_size;
- int16_t lag;
+ size_t k, base_size;
+ size_t lag;
/* Stack based */
int16_t tempbuff2[SUBL+5];
@@ -58,7 +58,7 @@ void WebRtcIlbcfix_GetCbVec(
/* Calculate lag */
- k = (int16_t)(2 * (index - (lMem - cbveclen + 1))) + cbveclen;
+ k = (2 * (index - (lMem - cbveclen + 1))) + cbveclen;
lag = k / 2;
@@ -70,7 +70,7 @@ void WebRtcIlbcfix_GetCbVec(
else {
- int16_t memIndTest;
+ size_t memIndTest;
/* first non-interpolated vectors */
@@ -100,7 +100,7 @@ void WebRtcIlbcfix_GetCbVec(
/* do filtering */
WebRtcSpl_FilterMAFastQ12(
&mem[memIndTest+7], tempbuff2, (int16_t*)WebRtcIlbcfix_kCbFiltersRev,
- CB_FILTERLEN, (int16_t)(cbveclen+5));
+ CB_FILTERLEN, cbveclen+5);
/* Calculate lag index */
lag = (cbveclen<<1)-20+index-base_size-lMem-1;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
index 1c5ac8f16e3..07f67a2aa5e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_cd_vec.h
@@ -22,9 +22,9 @@
void WebRtcIlbcfix_GetCbVec(
int16_t *cbvec, /* (o) Constructed codebook vector */
int16_t *mem, /* (i) Codebook buffer */
- int16_t index, /* (i) Codebook index */
- int16_t lMem, /* (i) Length of codebook buffer */
- int16_t cbveclen /* (i) Codebook vector length */
+ size_t index, /* (i) Codebook index */
+ size_t lMem, /* (i) Length of codebook buffer */
+ size_t cbveclen /* (i) Codebook vector length */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
index 480ed7c6cdb..a98a96cdf10 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.c
@@ -27,71 +27,68 @@
void WebRtcIlbcfix_GetSyncSeq(
int16_t *idata, /* (i) original data */
- int16_t idatal, /* (i) dimension of data */
- int16_t centerStartPos, /* (i) where current block starts */
- int16_t *period, /* (i) rough-pitch-period array (Q-2) */
- int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
- int16_t periodl, /* (i) dimension period array */
- int16_t hl, /* (i) 2*hl+1 is the number of sequences */
+ size_t idatal, /* (i) dimension of data */
+ size_t centerStartPos, /* (i) where current block starts */
+ size_t *period, /* (i) rough-pitch-period array (Q-2) */
+ const size_t *plocs, /* (i) where periods of period array are taken (Q-2) */
+ size_t periodl, /* (i) dimension period array */
+ size_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
){
- int16_t i,centerEndPos,q;
+ size_t i, centerEndPos, q;
/* Stack based */
- int16_t lagBlock[2*ENH_HL+1];
- int16_t blockStartPos[2*ENH_HL+1]; /* Defines the position to search around (Q2) */
- int16_t plocs2[ENH_PLOCSL];
+ size_t lagBlock[2 * ENH_HL + 1];
+ size_t blockStartPos[2 * ENH_HL + 1]; /* The position to search around (Q2) */
+ size_t plocs2[ENH_PLOCSL];
- centerEndPos=centerStartPos+ENH_BLOCKL-1;
+ centerEndPos = centerStartPos + ENH_BLOCKL - 1;
/* present (find predicted lag from this position) */
WebRtcIlbcfix_NearestNeighbor(lagBlock + hl,
plocs,
- (int16_t)(2 * (centerStartPos + centerEndPos)),
+ 2 * (centerStartPos + centerEndPos),
periodl);
- blockStartPos[hl] = (int16_t)(4 * centerStartPos);
+ blockStartPos[hl] = 4 * centerStartPos;
/* past (find predicted position and perform a refined
search to find the best sequence) */
- for(q=hl-1;q>=0;q--) {
- blockStartPos[q]=blockStartPos[q+1]-period[lagBlock[q+1]];
-
- WebRtcIlbcfix_NearestNeighbor(
- lagBlock + q,
- plocs,
- (int16_t)(blockStartPos[q] + 4 * ENH_BLOCKL_HALF -
- period[lagBlock[q + 1]]),
- periodl);
-
- if (blockStartPos[q] - 4 * ENH_OVERHANG >= 0) {
-
- /* Find the best possible sequence in the 4 times upsampled
- domain around blockStartPos+q */
- WebRtcIlbcfix_Refiner(blockStartPos+q,idata,idatal,
- centerStartPos,blockStartPos[q],surround,WebRtcIlbcfix_kEnhWt[q]);
-
- } else {
- /* Don't add anything since this sequence would
- be outside the buffer */
- }
+ for (q = hl; q > 0; q--) {
+ size_t qq = q - 1;
+ size_t period_q = period[lagBlock[q]];
+ /* Stop if this sequence would be outside the buffer; that means all
+ further-past sequences would also be outside the buffer. */
+ if (blockStartPos[q] < period_q + (4 * ENH_OVERHANG))
+ break;
+ blockStartPos[qq] = blockStartPos[q] - period_q;
+
+ size_t value = blockStartPos[qq] + 4 * ENH_BLOCKL_HALF;
+ value = (value > period_q) ? (value - period_q) : 0;
+ WebRtcIlbcfix_NearestNeighbor(lagBlock + qq, plocs, value, periodl);
+
+ /* Find the best possible sequence in the 4 times upsampled
+ domain around blockStartPos+q */
+ WebRtcIlbcfix_Refiner(blockStartPos + qq, idata, idatal, centerStartPos,
+ blockStartPos[qq], surround,
+ WebRtcIlbcfix_kEnhWt[qq]);
}
/* future (find predicted position and perform a refined
search to find the best sequence) */
- for(i=0;i<periodl;i++) {
- plocs2[i]=(plocs[i]-period[i]);
+ for (i = 0; i < periodl; i++) {
+ plocs2[i] = plocs[i] - period[i];
}
- for (q = hl + 1; q <= (int16_t)(2 * hl); q++) {
+ for (q = hl + 1; q <= (2 * hl); q++) {
WebRtcIlbcfix_NearestNeighbor(
lagBlock + q,
plocs2,
- (int16_t)(blockStartPos[q - 1] + 4 * ENH_BLOCKL_HALF),
+ blockStartPos[q - 1] + 4 * ENH_BLOCKL_HALF,
periodl);
blockStartPos[q]=blockStartPos[q-1]+period[lagBlock[q]];
@@ -100,11 +97,11 @@ void WebRtcIlbcfix_GetSyncSeq(
/* Find the best possible sequence in the 4 times upsampled
domain around blockStartPos+q */
- WebRtcIlbcfix_Refiner(blockStartPos+q, idata, idatal,
- centerStartPos,blockStartPos[q],surround,WebRtcIlbcfix_kEnhWt[2*hl-q]);
+ WebRtcIlbcfix_Refiner(blockStartPos + q, idata, idatal, centerStartPos,
+ blockStartPos[q], surround,
+ WebRtcIlbcfix_kEnhWt[2 * hl - q]);
- }
- else {
+ } else {
/* Don't add anything since this sequence would
be outside the buffer */
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
index f9b08b750e3..0e3b2077532 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/get_sync_seq.h
@@ -27,12 +27,12 @@
void WebRtcIlbcfix_GetSyncSeq(
int16_t *idata, /* (i) original data */
- int16_t idatal, /* (i) dimension of data */
- int16_t centerStartPos, /* (i) where current block starts */
- int16_t *period, /* (i) rough-pitch-period array (Q-2) */
- int16_t *plocs, /* (i) where periods of period array are taken (Q-2) */
- int16_t periodl, /* (i) dimension period array */
- int16_t hl, /* (i) 2*hl+1 is the number of sequences */
+ size_t idatal, /* (i) dimension of data */
+ size_t centerStartPos, /* (i) where current block starts */
+ size_t *period, /* (i) rough-pitch-period array (Q-2) */
+ const size_t *plocs, /* (i) where periods of period array are taken (Q-2) */
+ size_t periodl, /* (i) dimension period array */
+ size_t hl, /* (i) 2*hl+1 is the number of sequences */
int16_t *surround /* (i/o) The contribution from this sequence
summed with earlier contributions */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
index 260591ede2d..5d8a86050e8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.c
@@ -30,9 +30,9 @@ void WebRtcIlbcfix_HpInput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len) /* (i) Number of samples to filter */
+ size_t len) /* (i) Number of samples to filter */
{
- int i;
+ size_t i;
int32_t tmpW32;
int32_t tmpW32b;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
index a30f703351c..acdfa91262f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_input.h
@@ -29,6 +29,6 @@ void WebRtcIlbcfix_HpInput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len); /* (i) Number of samples to filter */
+ size_t len); /* (i) Number of samples to filter */
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
index 3abb427b9a5..bd101bf30ca 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.c
@@ -30,9 +30,9 @@ void WebRtcIlbcfix_HpOutput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len) /* (i) Number of samples to filter */
+ size_t len) /* (i) Number of samples to filter */
{
- int i;
+ size_t i;
int32_t tmpW32;
int32_t tmpW32b;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
index 7937ba00cd7..1840b68186f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/hp_output.h
@@ -29,6 +29,6 @@ void WebRtcIlbcfix_HpOutput(
int16_t *y, /* (i/o) Filter state yhi[n-1] ylow[n-1]
yhi[n-2] ylow[n-2] */
int16_t *x, /* (i/o) Filter state x[n-1] x[n-2] */
- int16_t len); /* (i) Number of samples to filter */
+ size_t len); /* (i) Number of samples to filter */
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
index e41c095f82e..6cd9a723fdb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.c
@@ -90,10 +90,10 @@ int16_t WebRtcIlbcfix_EncoderInit(IlbcEncoderInstance* iLBCenc_inst,
int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
const int16_t* speechIn,
- int16_t len,
+ size_t len,
uint8_t* encoded) {
- int16_t pos = 0;
- int16_t encpos = 0;
+ size_t pos = 0;
+ size_t encpos = 0;
if ((len != ((IlbcEncoder*)iLBCenc_inst)->blockl) &&
#ifdef SPLIT_10MS
@@ -118,7 +118,7 @@ int WebRtcIlbcfix_Encode(IlbcEncoderInstance* iLBCenc_inst,
#endif
encpos += ((IlbcEncoder*)iLBCenc_inst)->no_of_words;
}
- return (encpos*2);
+ return (int)(encpos*2);
}
}
@@ -131,23 +131,21 @@ int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance* iLBCdec_inst,
return(-1);
}
}
-int16_t WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance *iLBCdec_inst) {
+void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst) {
WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 20, 1);
- return(0);
}
-int16_t WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance *iLBCdec_inst) {
+void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst) {
WebRtcIlbcfix_InitDecode((IlbcDecoder*) iLBCdec_inst, 30, 1);
- return(0);
}
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
/* Allow for automatic switching between the frame sizes
(although you do get some discontinuity) */
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
@@ -191,16 +189,16 @@ int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -219,16 +217,16 @@ int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
- int i=0;
+ size_t i=0;
if ((len==((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==2*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)||
(len==3*((IlbcDecoder*)iLBCdec_inst)->no_of_bytes)) {
@@ -247,13 +245,13 @@ int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
}
/* iLBC does not support VAD/CNG yet */
*speechType=1;
- return(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
+ return (int)(i*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
-int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
- int i;
+size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t i;
uint16_t dummy;
for (i=0;i<noOfLostFrames;i++) {
@@ -265,9 +263,9 @@ int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance* iLBCdec_inst,
return (noOfLostFrames*((IlbcDecoder*)iLBCdec_inst)->blockl);
}
-int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
+size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance* iLBCdec_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
/* Two input parameters not used, but needed for function pointers in NetEQ */
(void)(decoded = NULL);
(void)(noOfLostFrames = 0);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
index ca4704cc40e..ce439014a12 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/ilbc.gypi
@@ -26,10 +26,12 @@
],
},
'sources': [
+ 'interface/audio_decoder_ilbc.h',
'interface/audio_encoder_ilbc.h',
'interface/ilbc.h',
'abs_quant.c',
'abs_quant_loop.c',
+ 'audio_decoder_ilbc.cc',
'audio_encoder_ilbc.cc',
'augmented_cb_corr.c',
'bw_expand.c',
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
index 0659e5005bf..1f92480d9fd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_decode.c
@@ -92,5 +92,5 @@ int WebRtcIlbcfix_InitDecode( /* (o) Number of decoded samples */
iLBCdec_inst->prev_enh_pl = 0;
- return (iLBCdec_inst->blockl);
+ return (int)(iLBCdec_inst->blockl);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
index 9c562dbfc74..f559d8441f9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/init_encode.c
@@ -67,5 +67,5 @@ int WebRtcIlbcfix_InitEncode( /* (o) Number of bytes encoded */
iLBCenc_inst->section = 0;
#endif
- return (iLBCenc_inst->no_of_bytes);
+ return (int)(iLBCenc_inst->no_of_bytes);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h
new file mode 100644
index 00000000000..a03a068a398
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INTERFACE_AUDIO_DECODER_ILBC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INTERFACE_AUDIO_DECODER_ILBC_H_
+
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+
+typedef struct iLBC_decinst_t_ IlbcDecoderInstance;
+
+namespace webrtc {
+
+class AudioDecoderIlbc final : public AudioDecoder {
+ public:
+ AudioDecoderIlbc();
+ ~AudioDecoderIlbc() override;
+ bool HasDecodePlc() const override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+ void Reset() override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ IlbcDecoderInstance* dec_state_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbc);
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_INTERFACE_AUDIO_DECODER_ILBC_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
index b627c3a087d..0316d2d4c52 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h
@@ -13,55 +13,50 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
namespace webrtc {
+struct CodecInst;
+
class AudioEncoderIlbc final : public AudioEncoder {
public:
struct Config {
- Config() : payload_type(102), frame_size_ms(30) {}
bool IsOk() const;
- int payload_type;
- int frame_size_ms; // Valid values are 20, 30, 40, and 60 ms.
+ int payload_type = 102;
+ int frame_size_ms = 30; // Valid values are 20, 30, 40, and 60 ms.
// Note that frame size 40 ms produces encodings with two 20 ms frames in
// them, and frame size 60 ms consists of two 30 ms frames.
};
explicit AudioEncoderIlbc(const Config& config);
+ explicit AudioEncoderIlbc(const CodecInst& codec_inst);
~AudioEncoderIlbc() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
+ void Reset() override;
private:
size_t RequiredOutputSizeBytes() const;
- static const int kMaxSamplesPerPacket = 480;
- const int payload_type_;
- const int num_10ms_frames_per_packet_;
- int num_10ms_frames_buffered_;
+ static const size_t kMaxSamplesPerPacket = 480;
+ const Config config_;
+ const size_t num_10ms_frames_per_packet_;
+ size_t num_10ms_frames_buffered_;
uint32_t first_timestamp_in_buffer_;
int16_t input_buffer_[kMaxSamplesPerPacket];
IlbcEncoderInstance* encoder_;
-};
-
-struct CodecInst;
-
-class AudioEncoderMutableIlbc
- : public AudioEncoderMutableImpl<AudioEncoderIlbc> {
- public:
- explicit AudioEncoderMutableIlbc(const CodecInst& codec_inst);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIlbc);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
index 493496848ea..ba31f18ba5d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h
@@ -18,6 +18,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ILBC_MAIN_INTERFACE_ILBC_H_
+#include <stddef.h>
+
/*
* Define the fixpoint numeric formats
*/
@@ -137,7 +139,7 @@ extern "C" {
int WebRtcIlbcfix_Encode(IlbcEncoderInstance *iLBCenc_inst,
const int16_t *speechIn,
- int16_t len,
+ size_t len,
uint8_t* encoded);
/****************************************************************************
@@ -157,8 +159,8 @@ extern "C" {
int16_t WebRtcIlbcfix_DecoderInit(IlbcDecoderInstance *iLBCdec_inst,
int16_t frameLen);
- int16_t WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance *iLBCdec_inst);
- int16_t WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance *iLBCdec_inst);
+ void WebRtcIlbcfix_DecoderInit20Ms(IlbcDecoderInstance* iLBCdec_inst);
+ void WebRtcIlbcfix_Decoderinit30Ms(IlbcDecoderInstance* iLBCdec_inst);
/****************************************************************************
* WebRtcIlbcfix_Decode(...)
@@ -182,17 +184,17 @@ extern "C" {
int WebRtcIlbcfix_Decode(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode20Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
int WebRtcIlbcfix_Decode30Ms(IlbcDecoderInstance* iLBCdec_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
@@ -210,13 +212,12 @@ extern "C" {
* Output:
* - decoded : The "decoded" vector
*
- * Return value : >0 - Samples in decoded PLC vector
- * -1 - Error
+ * Return value : Samples in decoded PLC vector
*/
- int16_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIlbcfix_DecodePlc(IlbcDecoderInstance *iLBCdec_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_NetEqPlc(...)
@@ -232,13 +233,12 @@ extern "C" {
* Output:
* - decoded : The "decoded" vector (nothing in this case)
*
- * Return value : >0 - Samples in decoded PLC vector
- * -1 - Error
+ * Return value : Samples in decoded PLC vector
*/
- int16_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIlbcfix_NetEqPlc(IlbcDecoderInstance *iLBCdec_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
/****************************************************************************
* WebRtcIlbcfix_version(...)
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
index 4957142145c..376dbbb668a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.c
@@ -22,7 +22,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
- int16_t lMem /* (i) Length of the CB memory */
+ size_t lMem /* (i) Length of the CB memory */
) {
int16_t *ppi, *ppo, i, j, temp1, temp2;
int16_t *tmpPtr;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
index 586c27d3544..7549d2c2163 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/interpolate_samples.h
@@ -28,7 +28,7 @@
void WebRtcIlbcfix_InterpolateSamples(
int16_t *interpSamples, /* (o) The interpolated samples */
int16_t *CBmem, /* (i) The CB memory */
- int16_t lMem /* (i) Length of the CB memory */
+ size_t lMem /* (i) Length of the CB memory */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
index 3261015258f..bd6ff561c2c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.c
@@ -25,11 +25,12 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
- int16_t dim1, /* (i) dimension first seq1 */
+ size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
- int16_t dim2 /* (i) dimension seq2 */
+ size_t dim2 /* (i) dimension seq2 */
){
- int16_t max, loops;
+ int16_t max;
+ size_t loops;
int scale;
/* Calculate correlation between the two sequences. Scale the
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
index a74dd1e7d4c..214946410e0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/my_corr.h
@@ -28,9 +28,9 @@
void WebRtcIlbcfix_MyCorr(
int32_t* corr, /* (o) correlation of seq1 and seq2 */
const int16_t* seq1, /* (i) first sequence */
- int16_t dim1, /* (i) dimension first seq1 */
+ size_t dim1, /* (i) dimension first seq1 */
const int16_t* seq2, /* (i) second sequence */
- int16_t dim2 /* (i) dimension seq2 */
+ size_t dim2 /* (i) dimension seq2 */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
index 30c7a034cca..2b58abc4f94 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.c
@@ -18,29 +18,18 @@
#include "defines.h"
-/*----------------------------------------------------------------*
- * Find index in array such that the array element with said
- * index is the element of said array closest to "value"
- * according to the squared-error criterion
- *---------------------------------------------------------------*/
-
-void WebRtcIlbcfix_NearestNeighbor(
- int16_t *index, /* (o) index of array element closest to value */
- int16_t *array, /* (i) data array (Q2) */
- int16_t value, /* (i) value (Q2) */
- int16_t arlength /* (i) dimension of data array (==8) */
- ){
- int i;
- int16_t diff;
- /* Stack based */
- int32_t crit[8];
-
- /* Calculate square distance */
- for(i=0;i<arlength;i++){
- diff=array[i]-value;
- crit[i] = diff * diff;
+void WebRtcIlbcfix_NearestNeighbor(size_t* index,
+ const size_t* array,
+ size_t value,
+ size_t arlength) {
+ size_t i;
+ size_t min_diff = (size_t)-1;
+ for (i = 0; i < arlength; i++) {
+ const size_t diff =
+ (array[i] < value) ? (value - array[i]) : (array[i] - value);
+ if (diff < min_diff) {
+ *index = i;
+ min_diff = diff;
+ }
}
-
- /* Find the minimum square distance */
- *index=WebRtcSpl_MinIndexW32(crit, arlength);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
index 0c03470084a..7d7fb6fccc9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/nearest_neighbor.h
@@ -24,14 +24,13 @@
/*----------------------------------------------------------------*
* Find index in array such that the array element with said
* index is the element of said array closest to "value"
- * according to the squared-error criterion
*---------------------------------------------------------------*/
void WebRtcIlbcfix_NearestNeighbor(
- int16_t *index, /* (o) index of array element closest to value */
- int16_t *array, /* (i) data array (Q2) */
- int16_t value, /* (i) value (Q2) */
- int16_t arlength /* (i) dimension of data array (==8) */
+ size_t* index, /* (o) index of array element closest to value */
+ const size_t* array, /* (i) data array (Q2) */
+ size_t value, /* (i) value (Q2) */
+ size_t arlength /* (i) dimension of data array (==ENH_NBLOCKS_TOT) */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
index 2fff362f16e..3c1265e5d50 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.c
@@ -30,17 +30,17 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_Refiner(
- int16_t *updStartPos, /* (o) updated start point (Q-2) */
+ size_t *updStartPos, /* (o) updated start point (Q-2) */
int16_t *idata, /* (i) original data buffer */
- int16_t idatal, /* (i) dimension of idata */
- int16_t centerStartPos, /* (i) beginning center segment */
- int16_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
+ size_t idatal, /* (i) dimension of idata */
+ size_t centerStartPos, /* (i) beginning center segment */
+ size_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
int16_t *surround, /* (i/o) The contribution from this sequence
summed with earlier contributions */
int16_t gain /* (i) Gain to use for this sequence */
){
- int16_t estSegPosRounded,searchSegStartPos,searchSegEndPos,corrdim;
- int16_t tloc,tloc2,i,st,en,fraction;
+ size_t estSegPosRounded, searchSegStartPos, searchSegEndPos, corrdim;
+ size_t tloc, tloc2, i;
int32_t maxtemp, scalefact;
int16_t *filtStatePtr, *polyPtr;
@@ -55,96 +55,86 @@ void WebRtcIlbcfix_Refiner(
estSegPosRounded = (estSegPos - 2) >> 2;
- searchSegStartPos=estSegPosRounded-ENH_SLOP;
+ searchSegStartPos =
+ (estSegPosRounded < ENH_SLOP) ? 0 : (estSegPosRounded - ENH_SLOP);
- if (searchSegStartPos<0) {
- searchSegStartPos=0;
+ searchSegEndPos = estSegPosRounded + ENH_SLOP;
+ if ((searchSegEndPos + ENH_BLOCKL) >= idatal) {
+ searchSegEndPos = idatal - ENH_BLOCKL - 1;
}
- searchSegEndPos=estSegPosRounded+ENH_SLOP;
- if(searchSegEndPos+ENH_BLOCKL >= idatal) {
- searchSegEndPos=idatal-ENH_BLOCKL-1;
- }
- corrdim=searchSegEndPos-searchSegStartPos+1;
+ corrdim = searchSegEndPos + 1 - searchSegStartPos;
/* compute upsampled correlation and find
location of max */
- WebRtcIlbcfix_MyCorr(corrVecTemp,idata+searchSegStartPos,
- (int16_t)(corrdim+ENH_BLOCKL-1),idata+centerStartPos,ENH_BLOCKL);
+ WebRtcIlbcfix_MyCorr(corrVecTemp, idata + searchSegStartPos,
+ corrdim + ENH_BLOCKL - 1, idata + centerStartPos,
+ ENH_BLOCKL);
/* Calculate the rescaling factor for the correlation in order to
put the correlation in a int16_t vector instead */
- maxtemp=WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
+ maxtemp = WebRtcSpl_MaxAbsValueW32(corrVecTemp, corrdim);
- scalefact=WebRtcSpl_GetSizeInBits(maxtemp)-15;
+ scalefact = WebRtcSpl_GetSizeInBits(maxtemp) - 15;
- if (scalefact>0) {
- for (i=0;i<corrdim;i++) {
+ if (scalefact > 0) {
+ for (i = 0; i < corrdim; i++) {
corrVec[i] = (int16_t)(corrVecTemp[i] >> scalefact);
}
} else {
- for (i=0;i<corrdim;i++) {
- corrVec[i]=(int16_t)corrVecTemp[i];
+ for (i = 0; i < corrdim; i++) {
+ corrVec[i] = (int16_t)corrVecTemp[i];
}
}
/* In order to guarantee that all values are initialized */
- for (i=corrdim;i<ENH_CORRDIM;i++) {
- corrVec[i]=0;
+ for (i = corrdim; i < ENH_CORRDIM; i++) {
+ corrVec[i] = 0;
}
/* Upsample the correlation */
- WebRtcIlbcfix_EnhUpsample(corrVecUps,corrVec);
+ WebRtcIlbcfix_EnhUpsample(corrVecUps, corrVec);
/* Find maximum */
- tloc=WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
+ tloc = WebRtcSpl_MaxIndexW32(corrVecUps, ENH_UPS0 * corrdim);
/* make vector can be upsampled without ever running outside
bounds */
- *updStartPos = (int16_t)(searchSegStartPos * 4) + tloc + 4;
+ *updStartPos = searchSegStartPos * 4 + tloc + 4;
tloc2 = (tloc + 3) >> 2;
- st=searchSegStartPos+tloc2-ENH_FL0;
-
/* initialize the vector to be filtered, stuff with zeros
when data is outside idata buffer */
- if(st<0){
- WebRtcSpl_MemSetW16(vect, 0, (int16_t)(-st));
- WEBRTC_SPL_MEMCPY_W16(&vect[-st], idata, (ENH_VECTL+st));
- }
- else{
- en=st+ENH_VECTL;
-
- if(en>idatal){
- WEBRTC_SPL_MEMCPY_W16(vect, &idata[st],
- (ENH_VECTL-(en-idatal)));
- WebRtcSpl_MemSetW16(&vect[ENH_VECTL-(en-idatal)], 0,
- (int16_t)(en-idatal));
- }
- else {
+ if (ENH_FL0 > (searchSegStartPos + tloc2)) {
+ const size_t st = ENH_FL0 - searchSegStartPos - tloc2;
+ WebRtcSpl_MemSetW16(vect, 0, st);
+ WEBRTC_SPL_MEMCPY_W16(&vect[st], idata, ENH_VECTL - st);
+ } else {
+ const size_t st = searchSegStartPos + tloc2 - ENH_FL0;
+ if ((st + ENH_VECTL) > idatal) {
+ const size_t en = st + ENH_VECTL - idatal;
+ WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL - en);
+ WebRtcSpl_MemSetW16(&vect[ENH_VECTL - en], 0, en);
+ } else {
WEBRTC_SPL_MEMCPY_W16(vect, &idata[st], ENH_VECTL);
}
}
- /* Calculate which of the 4 fractions to use */
- fraction = (int16_t)(tloc2 * ENH_UPS0) - tloc;
/* compute the segment (this is actually a convolution) */
-
filtStatePtr = filt + 6;
- polyPtr = (int16_t*)WebRtcIlbcfix_kEnhPolyPhaser[fraction];
- for (i=0;i<7;i++) {
+ polyPtr = (int16_t*)WebRtcIlbcfix_kEnhPolyPhaser[tloc2 * ENH_UPS0 - tloc];
+ for (i = 0; i < 7; i++) {
*filtStatePtr-- = *polyPtr++;
}
- WebRtcSpl_FilterMAFastQ12(
- &vect[6], vect, filt,
- ENH_FLO_MULT2_PLUS1, ENH_BLOCKL);
+ WebRtcSpl_FilterMAFastQ12(&vect[6], vect, filt, ENH_FLO_MULT2_PLUS1,
+ ENH_BLOCKL);
- /* Add the contribution from this vector (scaled with gain) to the total surround vector */
- WebRtcSpl_AddAffineVectorToVector(
- surround, vect, gain,
- (int32_t)32768, 16, ENH_BLOCKL);
+ /* Add the contribution from this vector (scaled with gain) to the total
+ surround vector */
+ WebRtcSpl_AddAffineVectorToVector(surround, vect, gain, 32768, 16,
+ ENH_BLOCKL);
return;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.h
index d13996152df..f8a2abc2d69 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/refiner.h
@@ -30,11 +30,11 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_Refiner(
- int16_t *updStartPos, /* (o) updated start point (Q-2) */
+ size_t *updStartPos, /* (o) updated start point (Q-2) */
int16_t *idata, /* (i) original data buffer */
- int16_t idatal, /* (i) dimension of idata */
- int16_t centerStartPos, /* (i) beginning center segment */
- int16_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
+ size_t idatal, /* (i) dimension of idata */
+ size_t centerStartPos, /* (i) beginning center segment */
+ size_t estSegPos, /* (i) estimated beginning other segment (Q-2) */
int16_t *surround, /* (i/o) The contribution from this sequence
summed with earlier contributions */
int16_t gain /* (i) Gain to use for this sequence */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
index d89770ec0e4..e63dda8c8f7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_interpolate_lsf.c
@@ -42,7 +42,8 @@ void WebRtcIlbcfix_SimpleInterpolateLsf(
IlbcEncoder *iLBCenc_inst
/* (i/o) the encoder state structure */
) {
- int i, pos, lp_length;
+ size_t i;
+ int pos, lp_length;
int16_t *lsf2, *lsfdeq2;
/* Stack based */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
index dfc637bef42..72d80e04306 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/simple_lpc_analysis.c
@@ -34,7 +34,7 @@ void WebRtcIlbcfix_SimpleLpcAnalysis(
) {
int k;
int scale;
- int16_t is;
+ size_t is;
int16_t stability;
/* Stack based */
int16_t A[LPC_FILTERORDER + 1];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
index 324b670c9bd..29fe91b87ea 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.c
@@ -24,14 +24,14 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
- int16_t idxForMax, /* (i) 6-bit index for the quantization of
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
- int16_t len /* (i) length of a state vector */
+ size_t len /* (i) length of a state vector */
) {
- int k;
+ size_t k;
int16_t maxVal;
int16_t *tmp1, *tmp2, *tmp3;
/* Stack based */
@@ -96,7 +96,7 @@ void WebRtcIlbcfix_StateConstruct(
/* Run MA filter + AR filter */
WebRtcSpl_FilterMAFastQ12(
sampleVal, sampleMa,
- numerator, LPC_FILTERORDER+1, (int16_t)(len + LPC_FILTERORDER));
+ numerator, LPC_FILTERORDER+1, len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER));
WebRtcSpl_FilterARFastQ12(
sampleMa, sampleAr,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
index 22d75e2444f..26319193b8f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_construct.h
@@ -24,12 +24,12 @@
*---------------------------------------------------------------*/
void WebRtcIlbcfix_StateConstruct(
- int16_t idxForMax, /* (i) 6-bit index for the quantization of
+ size_t idxForMax, /* (i) 6-bit index for the quantization of
max amplitude */
int16_t *idxVec, /* (i) vector of quantization indexes */
int16_t *syntDenum, /* (i) synthesis filter denumerator */
int16_t *Out_fix, /* (o) the decoded state vector */
- int16_t len /* (i) length of a state vector */
+ size_t len /* (i) length of a state vector */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_search.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
index b2214c786f5..295c543d84e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/state_search.c
@@ -33,7 +33,7 @@ void WebRtcIlbcfix_StateSearch(
int16_t *syntDenum, /* (i) lpc synthesis filter */
int16_t *weightDenum /* (i) weighting filter denuminator */
) {
- int16_t k, index;
+ size_t k, index;
int16_t maxVal;
int16_t scale, shift;
int32_t maxValsq;
@@ -64,9 +64,9 @@ void WebRtcIlbcfix_StateSearch(
/* Run the Zero-Pole filter (Ciurcular convolution) */
WebRtcSpl_MemSetW16(residualLongVec, 0, LPC_FILTERORDER);
- WebRtcSpl_FilterMAFastQ12(
- residualLong, sampleMa,
- numerator, LPC_FILTERORDER+1, (int16_t)(iLBCenc_inst->state_short_len + LPC_FILTERORDER));
+ WebRtcSpl_FilterMAFastQ12(residualLong, sampleMa, numerator,
+ LPC_FILTERORDER + 1,
+ iLBCenc_inst->state_short_len + LPC_FILTERORDER);
WebRtcSpl_MemSetW16(&sampleMa[iLBCenc_inst->state_short_len + LPC_FILTERORDER], 0, iLBCenc_inst->state_short_len - LPC_FILTERORDER);
WebRtcSpl_FilterARFastQ12(
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
index 8bbac42b1cc..b795e56ac44 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.c
@@ -24,10 +24,10 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
- int16_t wordLength, /* (i) number or uint16_t to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
) {
- int k;
+ size_t k;
for (k = wordLength; k > 0; k--) {
*output++ = (*input >> 8)|(*input << 8);
input++;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
index a909b2cda4d..a4484d621e0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/swap_bytes.h
@@ -27,7 +27,7 @@
void WebRtcIlbcfix_SwapBytes(
const uint16_t* input, /* (i) the sequence to swap */
- int16_t wordLength, /* (i) number or uint16_t to swap */
+ size_t wordLength, /* (i) number or uint16_t to swap */
uint16_t* output /* (o) the swapped sequence */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
index 6ee3df4db6e..1199c816d89 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_test.c
@@ -47,12 +47,11 @@ int main(int argc, char* argv[])
int16_t data[BLOCKL_MAX];
uint8_t encoded_data[2 * ILBCNOOFWORDS_MAX];
int16_t decoded_data[BLOCKL_MAX];
- int len;
- short pli, mode;
+ int len_int, mode;
+ short pli;
int blockcount = 0;
int packetlosscount = 0;
- int frameLen;
- size_t len_i16s;
+ size_t frameLen, len, len_i16s;
int16_t speechType;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
@@ -153,23 +152,23 @@ int main(int argc, char* argv[])
WebRtcIlbcfix_EncoderInit(Enc_Inst, mode);
WebRtcIlbcfix_DecoderInit(Dec_Inst, mode);
- frameLen = mode*8;
+ frameLen = (size_t)(mode*8);
/* loop over input blocks */
- while (((int16_t)fread(data,sizeof(int16_t),frameLen,ifileid))==
- frameLen) {
+ while (fread(data,sizeof(int16_t),frameLen,ifileid) == frameLen) {
blockcount++;
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
- len = WebRtcIlbcfix_Encode(Enc_Inst, data, (int16_t)frameLen, encoded_data);
- if (len < 0) {
+ len_int = WebRtcIlbcfix_Encode(Enc_Inst, data, frameLen, encoded_data);
+ if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
+ len = (size_t)len_int;
fprintf(stderr, "\r");
/* write byte file */
@@ -204,12 +203,13 @@ int main(int argc, char* argv[])
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
- len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
- (int16_t)len, decoded_data,&speechType);
- if (len < 0) {
+ len_int=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data,
+ len, decoded_data,&speechType);
+ if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
+ len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, decoded_data, 1);
}
@@ -217,8 +217,7 @@ int main(int argc, char* argv[])
/* write output file */
- if (fwrite(decoded_data, sizeof(int16_t), len,
- ofileid) != (size_t)len) {
+ if (fwrite(decoded_data, sizeof(int16_t), len, ofileid) != len) {
return -1;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
index b4e36b62947..f14192c2ae2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/test/iLBC_testLib.c
@@ -41,15 +41,15 @@ int main(int argc, char* argv[])
{
FILE *ifileid,*efileid,*ofileid, *chfileid;
short encoded_data[55], data[240], speechType;
- int len;
- short mode, pli;
- size_t readlen;
+ int len_int, mode;
+ short pli;
+ size_t len, readlen;
int blockcount = 0;
IlbcEncoderInstance *Enc_Inst;
IlbcDecoderInstance *Dec_Inst;
#ifdef JUNK_DATA
- int i;
+ size_t i;
FILE *seedfile;
unsigned int random_seed = (unsigned int) time(NULL);//1196764538
#endif
@@ -136,11 +136,12 @@ int main(int argc, char* argv[])
/* encoding */
fprintf(stderr, "--- Encoding block %i --- ",blockcount);
- len=WebRtcIlbcfix_Encode(Enc_Inst, data, (short)readlen, encoded_data);
- if (len < 0) {
+ len_int=WebRtcIlbcfix_Encode(Enc_Inst, data, readlen, encoded_data);
+ if (len_int < 0) {
fprintf(stderr, "Error encoding\n");
exit(0);
}
+ len = (size_t)len_int;
fprintf(stderr, "\r");
#ifdef JUNK_DATA
@@ -174,12 +175,13 @@ int main(int argc, char* argv[])
/* decoding */
fprintf(stderr, "--- Decoding block %i --- ",blockcount);
if (pli==1) {
- len=WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, (int16_t)len, data,
- &speechType);
- if (len < 0) {
+ len_int = WebRtcIlbcfix_Decode(Dec_Inst, encoded_data, len, data,
+ &speechType);
+ if (len_int < 0) {
fprintf(stderr, "Error decoding\n");
exit(0);
}
+ len = (size_t)len_int;
} else {
len=WebRtcIlbcfix_DecodePlc(Dec_Inst, data, 1);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
index dbecc33abe6..dc12a5a7c43 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.c
@@ -26,9 +26,9 @@ void WebRtcIlbcfix_Window32W32(
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
- int16_t N /* length to process */
+ size_t N /* length to process */
) {
- int16_t i;
+ size_t i;
int16_t x_low, x_hi, y_low, y_hi;
int16_t left_shifts;
int32_t temp;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
index 4ee6fce54ff..27ed1b6a333 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/window32_w32.h
@@ -29,7 +29,7 @@ void WebRtcIlbcfix_Window32W32(
int32_t *z, /* Output */
int32_t *x, /* Input (same domain as Output)*/
const int32_t *y, /* Q31 Window */
- int16_t N /* length to process */
+ size_t N /* length to process */
);
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
index 53d95bfa3dd..0d898c54a48 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.c
@@ -23,16 +23,16 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
- int16_t subl, /* (i) dimension arrays */
- int16_t searchLen, /* (i) the search lenght */
- int16_t offset, /* (i) samples offset between arrays */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
){
- int k;
- int16_t maxlag;
+ size_t k;
+ size_t maxlag;
int16_t pos;
int16_t max;
int16_t crossCorrScale, Energyscale;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
index 1f4c58d9349..9b81c0fe979 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/ilbc/xcorr_coef.h
@@ -26,12 +26,12 @@
* crossCorr*crossCorr/(energy) criteria
*---------------------------------------------------------------*/
-int WebRtcIlbcfix_XcorrCoef(
+size_t WebRtcIlbcfix_XcorrCoef(
int16_t *target, /* (i) first array */
int16_t *regressor, /* (i) second array */
- int16_t subl, /* (i) dimension arrays */
- int16_t searchLen, /* (i) the search lenght */
- int16_t offset, /* (i) samples offset between arrays */
+ size_t subl, /* (i) dimension arrays */
+ size_t searchLen, /* (i) the search lenght */
+ size_t offset, /* (i) samples offset between arrays */
int16_t step /* (i) +1 or -1 */
);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
new file mode 100644
index 00000000000..845af424795
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
+
+#include <vector>
+
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+
+namespace webrtc {
+
+template <typename T>
+class AudioDecoderIsacT final : public AudioDecoder {
+ public:
+ AudioDecoderIsacT();
+ explicit AudioDecoderIsacT(LockedIsacBandwidthInfo* bwinfo);
+ ~AudioDecoderIsacT() override;
+
+ bool HasDecodePlc() const override;
+ size_t DecodePlc(size_t num_frames, int16_t* decoded) override;
+ void Reset() override;
+ int IncomingPacket(const uint8_t* payload,
+ size_t payload_len,
+ uint16_t rtp_sequence_number,
+ uint32_t rtp_timestamp,
+ uint32_t arrival_timestamp) override;
+ int ErrorCode() override;
+ size_t Channels() const override;
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ typename T::instance_type* isac_state_;
+ LockedIsacBandwidthInfo* bwinfo_;
+ int decoder_sample_rate_hz_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderIsacT);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
new file mode 100644
index 00000000000..126c129814b
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
+
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h"
+
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+template <typename T>
+AudioDecoderIsacT<T>::AudioDecoderIsacT()
+ : AudioDecoderIsacT(nullptr) {}
+
+template <typename T>
+AudioDecoderIsacT<T>::AudioDecoderIsacT(LockedIsacBandwidthInfo* bwinfo)
+ : bwinfo_(bwinfo), decoder_sample_rate_hz_(-1) {
+ RTC_CHECK_EQ(0, T::Create(&isac_state_));
+ T::DecoderInit(isac_state_);
+ if (bwinfo_) {
+ IsacBandwidthInfo bi;
+ T::GetBandwidthInfo(isac_state_, &bi);
+ bwinfo_->Set(bi);
+ }
+}
+
+template <typename T>
+AudioDecoderIsacT<T>::~AudioDecoderIsacT() {
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ // We want to crate the illusion that iSAC supports 48000 Hz decoding, while
+ // in fact it outputs 32000 Hz. This is the iSAC fullband mode.
+ if (sample_rate_hz == 48000)
+ sample_rate_hz = 32000;
+ RTC_CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000)
+ << "Unsupported sample rate " << sample_rate_hz;
+ if (sample_rate_hz != decoder_sample_rate_hz_) {
+ RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz));
+ decoder_sample_rate_hz_ = sample_rate_hz;
+ }
+ int16_t temp_type = 1; // Default is speech.
+ int ret =
+ T::DecodeInternal(isac_state_, encoded, encoded_len, decoded, &temp_type);
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+template <typename T>
+bool AudioDecoderIsacT<T>::HasDecodePlc() const {
+ return false;
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::DecodePlc(size_t num_frames, int16_t* decoded) {
+ return T::DecodePlc(isac_state_, decoded, num_frames);
+}
+
+template <typename T>
+void AudioDecoderIsacT<T>::Reset() {
+ T::DecoderInit(isac_state_);
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::IncomingPacket(const uint8_t* payload,
+ size_t payload_len,
+ uint16_t rtp_sequence_number,
+ uint32_t rtp_timestamp,
+ uint32_t arrival_timestamp) {
+ int ret = T::UpdateBwEstimate(isac_state_, payload, payload_len,
+ rtp_sequence_number, rtp_timestamp,
+ arrival_timestamp);
+ if (bwinfo_) {
+ IsacBandwidthInfo bwinfo;
+ T::GetBandwidthInfo(isac_state_, &bwinfo);
+ bwinfo_->Set(bwinfo);
+ }
+ return ret;
+}
+
+template <typename T>
+int AudioDecoderIsacT<T>::ErrorCode() {
+ return T::GetErrorCode(isac_state_);
+}
+
+template <typename T>
+size_t AudioDecoderIsacT<T>::Channels() const {
+ return 1;
+}
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_DECODER_ISAC_T_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
index 49df3c68be3..762757ace74 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h
@@ -13,17 +13,15 @@
#include <vector>
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/base/thread_annotations.h"
-#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
namespace webrtc {
-class CriticalSectionWrapper;
+struct CodecInst;
template <typename T>
-class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
+class AudioEncoderIsacT final : public AudioEncoder {
public:
// Allowed combinations of sample rate, frame size, and bit rate are
// - 16000 Hz, 30 ms, 10000-32000 bps
@@ -31,101 +29,70 @@ class AudioEncoderDecoderIsacT : public AudioEncoder, public AudioDecoder {
// - 32000 Hz, 30 ms, 10000-56000 bps (if T has super-wideband support)
// - 48000 Hz, 30 ms, 10000-56000 bps (if T has super-wideband support)
struct Config {
- Config();
bool IsOk() const;
- int payload_type;
- int sample_rate_hz;
- int frame_size_ms;
- int bit_rate; // Limit on the short-term average bit rate, in bits/s.
- int max_payload_size_bytes;
- int max_bit_rate;
+ LockedIsacBandwidthInfo* bwinfo = nullptr;
+
+ int payload_type = 103;
+ int sample_rate_hz = 16000;
+ int frame_size_ms = 30;
+ int bit_rate = kDefaultBitRate; // Limit on the short-term average bit
+ // rate, in bits/s.
+ int max_payload_size_bytes = -1;
+ int max_bit_rate = -1;
// If true, the encoder will dynamically adjust frame size and bit rate;
// the configured values are then merely the starting point.
- bool adaptive_mode;
+ bool adaptive_mode = false;
// In adaptive mode, prevent adaptive changes to the frame size. (Not used
// in nonadaptive mode.)
- bool enforce_frame_size;
+ bool enforce_frame_size = false;
};
- explicit AudioEncoderDecoderIsacT(const Config& config);
- ~AudioEncoderDecoderIsacT() override;
+ explicit AudioEncoderIsacT(const Config& config);
+ explicit AudioEncoderIsacT(const CodecInst& codec_inst,
+ LockedIsacBandwidthInfo* bwinfo);
+ ~AudioEncoderIsacT() override;
- // AudioEncoder public methods.
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
-
- // AudioDecoder methods.
- bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
- int Init() override;
- int IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) override;
- int ErrorCode() override;
- size_t Channels() const override { return 1; }
-
- // AudioEncoder protected method.
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
-
- // AudioDecoder protected method.
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
+ void Reset() override;
private:
// This value is taken from STREAM_SIZE_MAX_60 for iSAC float (60 ms) and
// STREAM_MAXW16_60MS for iSAC fix (60 ms).
static const size_t kSufficientEncodeBufferSizeBytes = 400;
- const int payload_type_;
-
- // iSAC encoder/decoder state, guarded by a mutex to ensure that encode calls
- // from one thread won't clash with decode calls from another thread.
- // Note: PT_GUARDED_BY is disabled since it is not yet supported by clang.
- const rtc::scoped_ptr<CriticalSectionWrapper> state_lock_;
- typename T::instance_type* isac_state_
- GUARDED_BY(state_lock_) /* PT_GUARDED_BY(lock_)*/;
+ static const int kDefaultBitRate = 32000;
- int decoder_sample_rate_hz_ GUARDED_BY(state_lock_);
+ // Recreate the iSAC encoder instance with the given settings, and save them.
+ void RecreateEncoderInstance(const Config& config);
- // Must be acquired before state_lock_.
- const rtc::scoped_ptr<CriticalSectionWrapper> lock_;
+ Config config_;
+ typename T::instance_type* isac_state_ = nullptr;
+ LockedIsacBandwidthInfo* bwinfo_ = nullptr;
// Have we accepted input but not yet emitted it in a packet?
- bool packet_in_progress_ GUARDED_BY(lock_);
+ bool packet_in_progress_ = false;
// Timestamp of the first input of the currently in-progress packet.
- uint32_t packet_timestamp_ GUARDED_BY(lock_);
+ uint32_t packet_timestamp_;
// Timestamp of the previously encoded packet.
- uint32_t last_encoded_timestamp_ GUARDED_BY(lock_);
+ uint32_t last_encoded_timestamp_;
- const int target_bitrate_bps_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioEncoderDecoderIsacT);
-};
-
-struct CodecInst;
-
-class AudioEncoderDecoderMutableIsac : public AudioEncoderMutable,
- public AudioDecoder {
- public:
- virtual void UpdateSettings(const CodecInst& codec_inst) = 0;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderIsacT);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_AUDIO_ENCODER_ISAC_T_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
index d2b20e3b941..fbc1ba91399 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h
@@ -13,35 +13,34 @@
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
-#include <algorithm>
-
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
-const int kIsacPayloadType = 103;
-const int kDefaultBitRate = 32000;
-
template <typename T>
-AudioEncoderDecoderIsacT<T>::Config::Config()
- : payload_type(kIsacPayloadType),
- sample_rate_hz(16000),
- frame_size_ms(30),
- bit_rate(kDefaultBitRate),
- max_payload_size_bytes(-1),
- max_bit_rate(-1),
- adaptive_mode(false),
- enforce_frame_size(false) {
+typename AudioEncoderIsacT<T>::Config CreateIsacConfig(
+ const CodecInst& codec_inst,
+ LockedIsacBandwidthInfo* bwinfo) {
+ typename AudioEncoderIsacT<T>::Config config;
+ config.bwinfo = bwinfo;
+ config.payload_type = codec_inst.pltype;
+ config.sample_rate_hz = codec_inst.plfreq;
+ config.frame_size_ms =
+ rtc::CheckedDivExact(1000 * codec_inst.pacsize, config.sample_rate_hz);
+ config.adaptive_mode = (codec_inst.rate == -1);
+ if (codec_inst.rate != -1)
+ config.bit_rate = codec_inst.rate;
+ return config;
}
template <typename T>
-bool AudioEncoderDecoderIsacT<T>::Config::IsOk() const {
+bool AudioEncoderIsacT<T>::Config::IsOk() const {
if (max_bit_rate < 32000 && max_bit_rate != -1)
return false;
if (max_payload_size_bytes < 120 && max_payload_size_bytes != -1)
return false;
+ if (adaptive_mode && !bwinfo)
+ return false;
switch (sample_rate_hz) {
case 16000:
if (max_bit_rate > 53400)
@@ -65,100 +64,77 @@ bool AudioEncoderDecoderIsacT<T>::Config::IsOk() const {
}
template <typename T>
-AudioEncoderDecoderIsacT<T>::AudioEncoderDecoderIsacT(const Config& config)
- : payload_type_(config.payload_type),
- state_lock_(CriticalSectionWrapper::CreateCriticalSection()),
- decoder_sample_rate_hz_(0),
- lock_(CriticalSectionWrapper::CreateCriticalSection()),
- packet_in_progress_(false),
- target_bitrate_bps_(config.adaptive_mode ? -1 : (config.bit_rate == 0
- ? kDefaultBitRate
- : config.bit_rate)) {
- CHECK(config.IsOk());
- CHECK_EQ(0, T::Create(&isac_state_));
- CHECK_EQ(0, T::EncoderInit(isac_state_, config.adaptive_mode ? 0 : 1));
- CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
- const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
- if (config.adaptive_mode) {
- CHECK_EQ(0, T::ControlBwe(isac_state_, bit_rate, config.frame_size_ms,
- config.enforce_frame_size));
- } else {
- CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
- }
- // When config.sample_rate_hz is set to 48000 Hz (iSAC-fb), the decoder is
- // still set to 32000 Hz, since there is no full-band mode in the decoder.
- CHECK_EQ(0, T::SetDecSampRate(isac_state_,
- std::min(config.sample_rate_hz, 32000)));
- if (config.max_payload_size_bytes != -1)
- CHECK_EQ(0,
- T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
- if (config.max_bit_rate != -1)
- CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
- CHECK_EQ(0, T::DecoderInit(isac_state_));
+AudioEncoderIsacT<T>::AudioEncoderIsacT(const Config& config) {
+ RecreateEncoderInstance(config);
}
template <typename T>
-AudioEncoderDecoderIsacT<T>::~AudioEncoderDecoderIsacT() {
- CHECK_EQ(0, T::Free(isac_state_));
+AudioEncoderIsacT<T>::AudioEncoderIsacT(const CodecInst& codec_inst,
+ LockedIsacBandwidthInfo* bwinfo)
+ : AudioEncoderIsacT(CreateIsacConfig<T>(codec_inst, bwinfo)) {}
+
+template <typename T>
+AudioEncoderIsacT<T>::~AudioEncoderIsacT() {
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::SampleRateHz() const {
- CriticalSectionScoped cs(state_lock_.get());
- return T::EncSampRate(isac_state_);
+size_t AudioEncoderIsacT<T>::MaxEncodedBytes() const {
+ return kSufficientEncodeBufferSizeBytes;
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::NumChannels() const {
- return 1;
+int AudioEncoderIsacT<T>::SampleRateHz() const {
+ return T::EncSampRate(isac_state_);
}
template <typename T>
-size_t AudioEncoderDecoderIsacT<T>::MaxEncodedBytes() const {
- return kSufficientEncodeBufferSizeBytes;
+int AudioEncoderIsacT<T>::NumChannels() const {
+ return 1;
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::Num10MsFramesInNextPacket() const {
- CriticalSectionScoped cs(state_lock_.get());
+size_t AudioEncoderIsacT<T>::Num10MsFramesInNextPacket() const {
const int samples_in_next_packet = T::GetNewFrameLen(isac_state_);
- return rtc::CheckedDivExact(samples_in_next_packet,
- rtc::CheckedDivExact(SampleRateHz(), 100));
+ return static_cast<size_t>(
+ rtc::CheckedDivExact(samples_in_next_packet,
+ rtc::CheckedDivExact(SampleRateHz(), 100)));
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::Max10MsFramesInAPacket() const {
+size_t AudioEncoderIsacT<T>::Max10MsFramesInAPacket() const {
return 6; // iSAC puts at most 60 ms in a packet.
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::GetTargetBitrate() const {
- return target_bitrate_bps_;
+int AudioEncoderIsacT<T>::GetTargetBitrate() const {
+ if (config_.adaptive_mode)
+ return -1;
+ return config_.bit_rate == 0 ? kDefaultBitRate : config_.bit_rate;
}
template <typename T>
-AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
+AudioEncoder::EncodedInfo AudioEncoderIsacT<T>::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) {
- CriticalSectionScoped cs_lock(lock_.get());
if (!packet_in_progress_) {
// Starting a new packet; remember the timestamp for later.
packet_in_progress_ = true;
packet_timestamp_ = rtp_timestamp;
}
- int r;
- {
- CriticalSectionScoped cs(state_lock_.get());
- r = T::Encode(isac_state_, audio, encoded);
- CHECK_GE(r, 0) << "Encode failed (error code "
- << T::GetErrorCode(isac_state_) << ")";
+ if (bwinfo_) {
+ IsacBandwidthInfo bwinfo = bwinfo_->Get();
+ T::SetBandwidthInfo(isac_state_, &bwinfo);
}
+ int r = T::Encode(isac_state_, audio, encoded);
+ RTC_CHECK_GE(r, 0) << "Encode failed (error code "
+ << T::GetErrorCode(isac_state_) << ")";
// T::Encode doesn't allow us to tell it the size of the output
// buffer. All we can do is check for an overrun after the fact.
- CHECK(static_cast<size_t>(r) <= max_encoded_bytes);
+ RTC_CHECK_LE(static_cast<size_t>(r), max_encoded_bytes);
if (r == 0)
return EncodedInfo();
@@ -169,68 +145,49 @@ AudioEncoder::EncodedInfo AudioEncoderDecoderIsacT<T>::EncodeInternal(
EncodedInfo info;
info.encoded_bytes = r;
info.encoded_timestamp = packet_timestamp_;
- info.payload_type = payload_type_;
+ info.payload_type = config_.payload_type;
return info;
}
template <typename T>
-int AudioEncoderDecoderIsacT<T>::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- CriticalSectionScoped cs(state_lock_.get());
- // We want to crate the illusion that iSAC supports 48000 Hz decoding, while
- // in fact it outputs 32000 Hz. This is the iSAC fullband mode.
- if (sample_rate_hz == 48000)
- sample_rate_hz = 32000;
- CHECK(sample_rate_hz == 16000 || sample_rate_hz == 32000)
- << "Unsupported sample rate " << sample_rate_hz;
- if (sample_rate_hz != decoder_sample_rate_hz_) {
- CHECK_EQ(0, T::SetDecSampRate(isac_state_, sample_rate_hz));
- decoder_sample_rate_hz_ = sample_rate_hz;
- }
- int16_t temp_type = 1; // Default is speech.
- int ret =
- T::DecodeInternal(isac_state_, encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
+void AudioEncoderIsacT<T>::Reset() {
+ RecreateEncoderInstance(config_);
}
template <typename T>
-bool AudioEncoderDecoderIsacT<T>::HasDecodePlc() const {
- return false;
-}
-
-template <typename T>
-int AudioEncoderDecoderIsacT<T>::DecodePlc(int num_frames, int16_t* decoded) {
- CriticalSectionScoped cs(state_lock_.get());
- return T::DecodePlc(isac_state_, decoded, num_frames);
-}
+void AudioEncoderIsacT<T>::RecreateEncoderInstance(const Config& config) {
+ RTC_CHECK(config.IsOk());
+ packet_in_progress_ = false;
+ bwinfo_ = config.bwinfo;
+ if (isac_state_)
+ RTC_CHECK_EQ(0, T::Free(isac_state_));
+ RTC_CHECK_EQ(0, T::Create(&isac_state_));
+ RTC_CHECK_EQ(0, T::EncoderInit(isac_state_, config.adaptive_mode ? 0 : 1));
+ RTC_CHECK_EQ(0, T::SetEncSampRate(isac_state_, config.sample_rate_hz));
+ const int bit_rate = config.bit_rate == 0 ? kDefaultBitRate : config.bit_rate;
+ if (config.adaptive_mode) {
+ RTC_CHECK_EQ(0, T::ControlBwe(isac_state_, bit_rate, config.frame_size_ms,
+ config.enforce_frame_size));
+ } else {
+ RTC_CHECK_EQ(0, T::Control(isac_state_, bit_rate, config.frame_size_ms));
+ }
+ if (config.max_payload_size_bytes != -1)
+ RTC_CHECK_EQ(
+ 0, T::SetMaxPayloadSize(isac_state_, config.max_payload_size_bytes));
+ if (config.max_bit_rate != -1)
+ RTC_CHECK_EQ(0, T::SetMaxRate(isac_state_, config.max_bit_rate));
-template <typename T>
-int AudioEncoderDecoderIsacT<T>::Init() {
- CriticalSectionScoped cs(state_lock_.get());
- return T::DecoderInit(isac_state_);
-}
+ // When config.sample_rate_hz is set to 48000 Hz (iSAC-fb), the decoder is
+ // still set to 32000 Hz, since there is no full-band mode in the decoder.
+ const int decoder_sample_rate_hz = std::min(config.sample_rate_hz, 32000);
-template <typename T>
-int AudioEncoderDecoderIsacT<T>::IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) {
- CriticalSectionScoped cs(state_lock_.get());
- return T::UpdateBwEstimate(
- isac_state_, payload, static_cast<int32_t>(payload_len),
- rtp_sequence_number, rtp_timestamp, arrival_timestamp);
-}
+ // Set the decoder sample rate even though we just use the encoder. This
+ // doesn't appear to be necessary to produce a valid encoding, but without it
+ // we get an encoding that isn't bit-for-bit identical with what a combined
+ // encoder+decoder object produces.
+ RTC_CHECK_EQ(0, T::SetDecSampRate(isac_state_, decoder_sample_rate_hz));
-template <typename T>
-int AudioEncoderDecoderIsacT<T>::ErrorCode() {
- CriticalSectionScoped cs(state_lock_.get());
- return T::GetErrorCode(isac_state_);
+ config_ = config;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h
new file mode 100644
index 00000000000..ed162a10aec
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_DECODER_ISACFIX_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_DECODER_ISACFIX_H_
+
+#include "webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsacFix = AudioDecoderIsacT<IsacFix>;
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_DECODER_ISACFIX_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
index f5f037de3a8..00c09877491 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h
@@ -11,147 +11,12 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_ENCODER_ISACFIX_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_ENCODER_ISACFIX_H_
-#include "webrtc/base/checks.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
#include "webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
-#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h"
namespace webrtc {
-struct IsacFix {
- typedef ISACFIX_MainStruct instance_type;
- static const bool has_swb = false;
- static const uint16_t kFixSampleRate = 16000;
- static inline int16_t Control(instance_type* inst,
- int32_t rate,
- int framesize) {
- return WebRtcIsacfix_Control(inst, rate, framesize);
- }
- static inline int16_t ControlBwe(instance_type* inst,
- int32_t rate_bps,
- int frame_size_ms,
- int16_t enforce_frame_size) {
- return WebRtcIsacfix_ControlBwe(inst, rate_bps, frame_size_ms,
- enforce_frame_size);
- }
- static inline int16_t Create(instance_type** inst) {
- return WebRtcIsacfix_Create(inst);
- }
- static inline int DecodeInternal(instance_type* inst,
- const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speech_type) {
- return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
- }
- static inline int16_t DecodePlc(instance_type* inst,
- int16_t* decoded,
- int16_t num_lost_frames) {
- return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
- }
- static inline int16_t DecoderInit(instance_type* inst) {
- return WebRtcIsacfix_DecoderInit(inst);
- }
- static inline int Encode(instance_type* inst,
- const int16_t* speech_in,
- uint8_t* encoded) {
- return WebRtcIsacfix_Encode(inst, speech_in, encoded);
- }
- static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
- return WebRtcIsacfix_EncoderInit(inst, coding_mode);
- }
- static inline uint16_t EncSampRate(instance_type* inst) {
- return kFixSampleRate;
- }
-
- static inline int16_t Free(instance_type* inst) {
- return WebRtcIsacfix_Free(inst);
- }
- static inline void GetBandwidthInfo(instance_type* inst,
- IsacBandwidthInfo* bwinfo) {
- WebRtcIsacfix_GetBandwidthInfo(inst, bwinfo);
- }
- static inline int16_t GetErrorCode(instance_type* inst) {
- return WebRtcIsacfix_GetErrorCode(inst);
- }
-
- static inline int16_t GetNewFrameLen(instance_type* inst) {
- return WebRtcIsacfix_GetNewFrameLen(inst);
- }
- static inline void SetBandwidthInfo(instance_type* inst,
- const IsacBandwidthInfo* bwinfo) {
- WebRtcIsacfix_SetBandwidthInfo(inst, bwinfo);
- }
- static inline int16_t SetDecSampRate(instance_type* inst,
- uint16_t sample_rate_hz) {
- DCHECK_EQ(sample_rate_hz, kFixSampleRate);
- return 0;
- }
- static inline int16_t SetEncSampRate(instance_type* inst,
- uint16_t sample_rate_hz) {
- DCHECK_EQ(sample_rate_hz, kFixSampleRate);
- return 0;
- }
- static inline int16_t UpdateBwEstimate(instance_type* inst,
- const uint8_t* encoded,
- int32_t packet_size,
- uint16_t rtp_seq_number,
- uint32_t send_ts,
- uint32_t arr_ts) {
- return WebRtcIsacfix_UpdateBwEstimate(inst, encoded, packet_size,
- rtp_seq_number, send_ts, arr_ts);
- }
- static inline int16_t SetMaxPayloadSize(instance_type* inst,
- int16_t max_payload_size_bytes) {
- return WebRtcIsacfix_SetMaxPayloadSize(inst, max_payload_size_bytes);
- }
- static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
- return WebRtcIsacfix_SetMaxRate(inst, max_bit_rate);
- }
-};
-
-typedef AudioEncoderDecoderIsacT<IsacFix> AudioEncoderDecoderIsacFix;
-
-struct CodecInst;
-
-class AudioEncoderDecoderMutableIsacFix
- : public AudioEncoderMutableImpl<AudioEncoderDecoderIsacFix,
- AudioEncoderDecoderMutableIsac> {
- public:
- explicit AudioEncoderDecoderMutableIsacFix(const CodecInst& codec_inst);
- void UpdateSettings(const CodecInst& codec_inst) override;
- void SetMaxPayloadSize(int max_payload_size_bytes) override;
- void SetMaxRate(int max_rate_bps) override;
-
- // From AudioDecoder.
- int Decode(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) override;
- int DecodeRedundant(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) override;
- bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
- int Init() override;
- int IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) override;
- int ErrorCode() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- int PacketDurationRedundant(const uint8_t* encoded,
- size_t encoded_len) const override;
- bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-};
+using AudioEncoderIsacFix = AudioEncoderIsacT<IsacFix>;
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_AUDIO_ENCODER_ISACFIX_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
index a205c6d6416..013ab7f13df 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_INTERFACE_ISACFIX_H_
+#include <stddef.h>
+
#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "webrtc/typedefs.h"
@@ -172,14 +174,9 @@ extern "C" {
*
* Input:
* - ISAC_main_inst : ISAC instance.
- *
- * Return value
- * : 0 - Ok
- * -1 - Error
*/
- int16_t WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst);
-
+ void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct* ISAC_main_inst);
/****************************************************************************
* WebRtcIsacfix_UpdateBwEstimate1(...)
@@ -189,7 +186,7 @@ extern "C" {
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
- * - packet_size : size of the packet.
+ * - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - arr_ts : the arrival time of the packet (from NetEq)
* in samples.
@@ -200,7 +197,7 @@ extern "C" {
int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t arr_ts);
@@ -212,7 +209,7 @@ extern "C" {
* Input:
* - ISAC_main_inst : ISAC instance.
* - encoded : encoded ISAC frame(s).
- * - packet_size : size of the packet.
+ * - packet_size : size of the packet in bytes.
* - rtp_seq_number : the RTP number of the packet.
* - send_ts : the send time of the packet from RTP header,
* in samples.
@@ -225,7 +222,7 @@ extern "C" {
int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
@@ -251,7 +248,7 @@ extern "C" {
int WebRtcIsacfix_Decode(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t *decoded,
int16_t *speechType);
@@ -280,7 +277,7 @@ extern "C" {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
const uint16_t *encoded,
- int16_t len,
+ size_t len,
int16_t *decoded,
int16_t *speechType);
#endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -303,14 +300,13 @@ extern "C" {
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames);
+ size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames);
#endif // WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -332,13 +328,12 @@ extern "C" {
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
- int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames );
+ size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
+ int16_t *decoded,
+ size_t noOfLostFrames );
/****************************************************************************
@@ -356,8 +351,8 @@ extern "C" {
*/
int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
- int encoded_len_bytes,
- int16_t* frameLength);
+ size_t encoded_len_bytes,
+ size_t* frameLength);
/****************************************************************************
* WebRtcIsacfix_Control(...)
@@ -379,7 +374,8 @@ extern "C" {
int16_t rate,
int framesize);
-
+ void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second);
/****************************************************************************
* WebRtcIsacfix_ControlBwe(...)
@@ -607,7 +603,7 @@ extern "C" {
*/
int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
- int encoded_len_bytes,
+ size_t encoded_len_bytes,
int16_t* rateIndex);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
index aecb0ec0ce8..8b4741b2cb9 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_decoder_isacfix.cc
@@ -8,15 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h"
-#include <vector>
+#include "webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
namespace webrtc {
-MockNonlinearBeamformer::MockNonlinearBeamformer(
- const std::vector<Point>& array_geometry)
- : NonlinearBeamformer(array_geometry) {
-}
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFix>;
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
index c7999b56be3..ad75ea867ec 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/audio_encoder_isacfix.cc
@@ -10,135 +10,11 @@
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
-#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
namespace webrtc {
-const uint16_t IsacFix::kFixSampleRate;
-
-// Explicit instantiation of AudioEncoderDecoderIsacT<IsacFix>, a.k.a.
-// AudioEncoderDecoderIsacFix.
-template class AudioEncoderDecoderIsacT<IsacFix>;
-
-namespace {
-AudioEncoderDecoderIsacFix::Config CreateConfig(const CodecInst& codec_inst) {
- AudioEncoderDecoderIsacFix::Config config;
- config.payload_type = codec_inst.pltype;
- config.sample_rate_hz = codec_inst.plfreq;
- config.frame_size_ms =
- rtc::CheckedDivExact(1000 * codec_inst.pacsize, config.sample_rate_hz);
- if (codec_inst.rate != -1)
- config.bit_rate = codec_inst.rate;
- config.adaptive_mode = (codec_inst.rate == -1);
- return config;
-}
-} // namespace
-
-AudioEncoderDecoderMutableIsacFix::AudioEncoderDecoderMutableIsacFix(
- const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderDecoderIsacFix,
- AudioEncoderDecoderMutableIsac>(
- CreateConfig(codec_inst)) {
-}
-
-void AudioEncoderDecoderMutableIsacFix::UpdateSettings(
- const CodecInst& codec_inst) {
- bool success = Reconstruct(CreateConfig(codec_inst));
- DCHECK(success);
-}
-
-void AudioEncoderDecoderMutableIsacFix::SetMaxPayloadSize(
- int max_payload_size_bytes) {
- auto conf = config();
- conf.max_payload_size_bytes = max_payload_size_bytes;
- Reconstruct(conf);
-}
-
-void AudioEncoderDecoderMutableIsacFix::SetMaxRate(int max_rate_bps) {
- auto conf = config();
- conf.max_bit_rate = max_rate_bps;
- Reconstruct(conf);
-}
-
-int AudioEncoderDecoderMutableIsacFix::Decode(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Decode(encoded, encoded_len, sample_rate_hz,
- max_decoded_bytes, decoded, speech_type);
-}
-
-int AudioEncoderDecoderMutableIsacFix::DecodeRedundant(
- const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->DecodeRedundant(encoded, encoded_len, sample_rate_hz,
- max_decoded_bytes, decoded, speech_type);
-}
-
-bool AudioEncoderDecoderMutableIsacFix::HasDecodePlc() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->HasDecodePlc();
-}
-
-int AudioEncoderDecoderMutableIsacFix::DecodePlc(int num_frames,
- int16_t* decoded) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->DecodePlc(num_frames, decoded);
-}
-
-int AudioEncoderDecoderMutableIsacFix::Init() {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Init();
-}
-
-int AudioEncoderDecoderMutableIsacFix::IncomingPacket(
- const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->IncomingPacket(payload, payload_len, rtp_sequence_number,
- rtp_timestamp, arrival_timestamp);
-}
-
-int AudioEncoderDecoderMutableIsacFix::ErrorCode() {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->ErrorCode();
-}
-
-int AudioEncoderDecoderMutableIsacFix::PacketDuration(
- const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketDuration(encoded, encoded_len);
-}
-
-int AudioEncoderDecoderMutableIsacFix::PacketDurationRedundant(
- const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketDurationRedundant(encoded, encoded_len);
-}
-
-bool AudioEncoderDecoderMutableIsacFix::PacketHasFec(const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketHasFec(encoded, encoded_len);
-}
-
-size_t AudioEncoderDecoderMutableIsacFix::Channels() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Channels();
-}
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFix>;
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
index d876a3cb837..b074962eae8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.c
@@ -148,7 +148,7 @@ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
const int16_t frameSize,
const uint32_t sendTime,
const uint32_t arrivalTime,
- const int16_t pksize,
+ const size_t pksize,
const uint16_t Index)
{
uint16_t weight = 0;
@@ -379,7 +379,7 @@ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bweStr,
/* compute inverse receiving rate for last packet, in Q19 */
numBytesInv = (uint16_t) WebRtcSpl_DivW32W16(
- 524288 + ((pksize + HEADER_SIZE) >> 1),
+ (int32_t)(524288 + ((pksize + HEADER_SIZE) >> 1)),
(int16_t)(pksize + HEADER_SIZE));
/* 8389 is ~ 1/128000 in Q30 */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
index 5d8ccbcd7d7..101ef620811 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/bandwidth_estimator.h
@@ -62,7 +62,7 @@ int32_t WebRtcIsacfix_UpdateUplinkBwImpl(BwEstimatorstr *bwest_str,
const int16_t frameSize,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int16_t pksize,
+ const size_t pksize,
const uint16_t Index);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
index d71decc67c8..fdbb2fcb0d7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/codec.h
@@ -27,18 +27,18 @@ extern "C" {
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr* bwest_str,
Bitstr_dec* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
IsacFixDecoderInstance* ISACdec_obj,
- int16_t* current_framesamples);
+ size_t* current_framesamples);
void WebRtcIsacfix_DecodePlcImpl(int16_t* decoded,
IsacFixDecoderInstance* ISACdec_obj,
- int16_t* current_framesample );
+ size_t* current_framesample );
int WebRtcIsacfix_EncodeImpl(int16_t* in,
IsacFixEncoderInstance* ISACenc_obj,
@@ -141,7 +141,7 @@ void WebRtcIsacfix_FilterAndCombine2(int16_t* tempin_ch1,
/* normalized lattice filters */
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t* stateGQ15,
int16_t* lat_inQ0,
int16_t* filt_coefQ15,
@@ -149,7 +149,7 @@ void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
int16_t lo_hi,
int16_t* lat_outQ9);
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t* stateGQ0,
int32_t* lat_inQ25,
int16_t* filt_coefQ15,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
index f0ae07e132d..e3de437a58d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode.c
@@ -27,9 +27,9 @@
-int WebRtcIsacfix_DecodeImpl(int16_t *signal_out16,
- IsacFixDecoderInstance *ISACdec_obj,
- int16_t *current_framesamples)
+int WebRtcIsacfix_DecodeImpl(int16_t* signal_out16,
+ IsacFixDecoderInstance* ISACdec_obj,
+ size_t* current_framesamples)
{
int k;
int err;
@@ -58,9 +58,9 @@ int WebRtcIsacfix_DecodeImpl(int16_t *signal_out16,
int16_t gainQ13;
- int16_t frame_nb; /* counter */
- int16_t frame_mode; /* 0 for 30ms, 1 for 60ms */
- static const int16_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
+ size_t frame_nb; /* counter */
+ size_t frame_mode; /* 0 for 30ms, 1 for 60ms */
+ static const size_t kProcessedSamples = 480; /* 480 (for both 30, 60 ms) */
/* PLC */
int16_t overlapWin[ 240 ];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
index b1f5d10a655..316f59a5e2f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_bwe.c
@@ -26,13 +26,13 @@
int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
Bitstr_dec *streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts)
{
int16_t index;
- int16_t frame_samples;
+ size_t frame_samples;
int err;
/* decode framelength */
@@ -53,10 +53,10 @@ int WebRtcIsacfix_EstimateBandwidth(BwEstimatorstr *bwest_str,
err = WebRtcIsacfix_UpdateUplinkBwImpl(
bwest_str,
rtp_seq_number,
- frame_samples * 1000 / FS,
+ (int16_t)(frame_samples * 1000 / FS),
send_ts,
arr_ts,
- (int16_t) packet_size, /* in bytes */
+ packet_size, /* in bytes */
index);
/* error check */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
index c3a89c3557d..e907f2b6a6b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/decode_plc.c
@@ -177,11 +177,12 @@ static void MemshipValQ15( int16_t in, int16_t *A, int16_t *B )
static void LinearResampler(int16_t* in,
int16_t* out,
- int16_t lenIn,
- int16_t lenOut)
+ size_t lenIn,
+ size_t lenOut)
{
- int32_t n = (lenIn - 1) * RESAMP_RES;
- int16_t resOut, i, j, relativePos, diff; /* */
+ size_t n = (lenIn - 1) * RESAMP_RES;
+ int16_t resOut, relativePos, diff; /* */
+ size_t i, j;
uint16_t udiff;
if( lenIn == lenOut )
@@ -190,7 +191,7 @@ static void LinearResampler(int16_t* in,
return;
}
- resOut = WebRtcSpl_DivW32W16ResW16( n, (int16_t)(lenOut-1) );
+ resOut = WebRtcSpl_DivW32W16ResW16( (int32_t)n, (int16_t)(lenOut-1) );
out[0] = in[0];
for( i = 1, j = 0, relativePos = 0; i < lenOut; i++ )
@@ -235,7 +236,7 @@ static void LinearResampler(int16_t* in,
void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
IsacFixDecoderInstance *ISACdec_obj,
- int16_t *current_framesamples )
+ size_t *current_framesamples )
{
int subframecnt;
@@ -260,12 +261,14 @@ void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
int16_t myDecayRate;
/* ---------- PLC variables ------------ */
- int16_t lag0, i, k, noiseIndex;
+ size_t lag0, i, k;
+ int16_t noiseIndex;
int16_t stretchPitchLP[PITCH_MAX_LAG + 10], stretchPitchLP1[PITCH_MAX_LAG + 10];
int32_t gain_lo_hiQ17[2*SUBFRAMES];
- int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16, minIdx;
+ int16_t nLP, pLP, wNoisyLP, wPriodicLP, tmp16;
+ size_t minIdx;
int32_t nHP, pHP, wNoisyHP, wPriodicHP, corr, minCorr, maxCoeff;
int16_t noise1, rshift;
@@ -300,7 +303,7 @@ void WebRtcIsacfix_DecodePlcImpl(int16_t *signal_out16,
- lag0 = ((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1;
+ lag0 = (size_t)(((ISACdec_obj->plcstr_obj.lastPitchLag_Q7 + 64) >> 7) + 1);
if( (ISACdec_obj->plcstr_obj).used != PLC_WAS_USED )
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
index 5f6e6ac0b10..2379ba50661 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.c
@@ -1870,7 +1870,7 @@ const uint16_t kFrameLenInitIndex[1] = {1};
int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
- int16_t *framesamples)
+ size_t *framesamples)
{
int err;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
index e4489df3310..2c8c923cd33 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/entropy_coding.h
@@ -92,7 +92,7 @@ int WebRtcIsacfix_DecodePitchLag(Bitstr_dec *streamdata,
int16_t *PitchLagQ7);
int WebRtcIsacfix_DecodeFrameLen(Bitstr_dec *streamdata,
- int16_t *framelength);
+ size_t *framelength);
int WebRtcIsacfix_EncodeFrameLen(int16_t framelength,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
index d6153e09fca..0e67e300ac1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbank_internal.h
@@ -60,10 +60,6 @@ void WebRtcIsacfix_AllpassFilter2FixDec16C(
int32_t *filter_state_ch1,
int32_t *filter_state_ch2);
-// Disable AllpassFilter2FixDec16Neon function due to a clang bug.
-// Refer more details at:
-// https://code.google.com/p/webrtc/issues/detail?id=4567
-#if !defined(__clang__) || !defined(WEBRTC_ARCH_ARM64)
#if (defined WEBRTC_DETECT_NEON) || (defined WEBRTC_HAS_NEON)
void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
int16_t *data_ch1,
@@ -74,7 +70,6 @@ void WebRtcIsacfix_AllpassFilter2FixDec16Neon(
int32_t *filter_state_ch1,
int32_t *filter_state_ch2);
#endif
-#endif
#if defined(MIPS_DSP_R1_LE)
void WebRtcIsacfix_AllpassFilter2FixDec16MIPS(
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
index 8ef05e897b6..80fb3929b9f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/filterbanks_unittest.cc
@@ -64,11 +64,6 @@ class FilterBanksTest : public testing::Test {
TEST_F(FilterBanksTest, AllpassFilter2FixDec16Test) {
CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16C);
-
-// Disable AllpassFilter2FixDec16Neon function due to a clang bug.
-// Refer more details at:
-// https://code.google.com/p/webrtc/issues/detail?id=4567
-#if !(defined __clang__) || !defined(WEBRTC_ARCH_ARM64)
#ifdef WEBRTC_DETECT_NEON
if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16Neon);
@@ -76,7 +71,6 @@ TEST_F(FilterBanksTest, AllpassFilter2FixDec16Test) {
#elif defined(WEBRTC_HAS_NEON)
CalculateResidualEnergyTester(WebRtcIsacfix_AllpassFilter2FixDec16Neon);
#endif
-#endif
}
TEST_F(FilterBanksTest, HighpassFilterFixDec32Test) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
new file mode 100644
index 00000000000..69c73d69044
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isac_fix_type.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
+
+namespace webrtc {
+
+class IsacFix {
+ public:
+ using instance_type = ISACFIX_MainStruct;
+ static const bool has_swb = false;
+ static inline int16_t Control(instance_type* inst,
+ int32_t rate,
+ int framesize) {
+ return WebRtcIsacfix_Control(inst, rate, framesize);
+ }
+ static inline int16_t ControlBwe(instance_type* inst,
+ int32_t rate_bps,
+ int frame_size_ms,
+ int16_t enforce_frame_size) {
+ return WebRtcIsacfix_ControlBwe(inst, rate_bps, frame_size_ms,
+ enforce_frame_size);
+ }
+ static inline int16_t Create(instance_type** inst) {
+ return WebRtcIsacfix_Create(inst);
+ }
+ static inline int DecodeInternal(instance_type* inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speech_type) {
+ return WebRtcIsacfix_Decode(inst, encoded, len, decoded, speech_type);
+ }
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
+ return WebRtcIsacfix_DecodePlc(inst, decoded, num_lost_frames);
+ }
+ static inline void DecoderInit(instance_type* inst) {
+ WebRtcIsacfix_DecoderInit(inst);
+ }
+ static inline int Encode(instance_type* inst,
+ const int16_t* speech_in,
+ uint8_t* encoded) {
+ return WebRtcIsacfix_Encode(inst, speech_in, encoded);
+ }
+ static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+ return WebRtcIsacfix_EncoderInit(inst, coding_mode);
+ }
+ static inline uint16_t EncSampRate(instance_type* inst) {
+ return kFixSampleRate;
+ }
+
+ static inline int16_t Free(instance_type* inst) {
+ return WebRtcIsacfix_Free(inst);
+ }
+ static inline void GetBandwidthInfo(instance_type* inst,
+ IsacBandwidthInfo* bwinfo) {
+ WebRtcIsacfix_GetBandwidthInfo(inst, bwinfo);
+ }
+ static inline int16_t GetErrorCode(instance_type* inst) {
+ return WebRtcIsacfix_GetErrorCode(inst);
+ }
+
+ static inline int16_t GetNewFrameLen(instance_type* inst) {
+ return WebRtcIsacfix_GetNewFrameLen(inst);
+ }
+ static inline void SetBandwidthInfo(instance_type* inst,
+ const IsacBandwidthInfo* bwinfo) {
+ WebRtcIsacfix_SetBandwidthInfo(inst, bwinfo);
+ }
+ static inline int16_t SetDecSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ return 0;
+ }
+ static inline int16_t SetEncSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ return 0;
+ }
+ static inline void SetEncSampRateInDecoder(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ RTC_DCHECK_EQ(sample_rate_hz, kFixSampleRate);
+ }
+ static inline void SetInitialBweBottleneck(instance_type* inst,
+ int bottleneck_bits_per_second) {
+ WebRtcIsacfix_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+ }
+ static inline int16_t UpdateBwEstimate(instance_type* inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts) {
+ return WebRtcIsacfix_UpdateBwEstimate(inst, encoded, packet_size,
+ rtp_seq_number, send_ts, arr_ts);
+ }
+ static inline int16_t SetMaxPayloadSize(instance_type* inst,
+ int16_t max_payload_size_bytes) {
+ return WebRtcIsacfix_SetMaxPayloadSize(inst, max_payload_size_bytes);
+ }
+ static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+ return WebRtcIsacfix_SetMaxRate(inst, max_bit_rate);
+ }
+
+ private:
+ enum { kFixSampleRate = 16000 };
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_FIX_SOURCE_ISAC_FIX_TYPE_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
index 2441e41ccb4..21911dd058f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/isacfix.c
@@ -38,7 +38,7 @@ MatrixProduct2 WebRtcIsacfix_MatrixProduct2;
/* This method assumes that |stream_size_bytes| is in valid range,
* i.e. >= 0 && <= STREAM_MAXW16_60MS
*/
-static void InitializeDecoderBitstream(int stream_size_bytes,
+static void InitializeDecoderBitstream(size_t stream_size_bytes,
Bitstr_dec* bitstream) {
bitstream->W_upper = 0xFFFFFFFF;
bitstream->streamval = 0;
@@ -207,13 +207,8 @@ static void WebRtcIsacfix_InitNeon(void) {
WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopNeon;
WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeNeon;
WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecNeon;
-// Disable AllpassFilter2FixDec16Neon function due to a clang bug.
-// Refer more details at:
-// https://code.google.com/p/webrtc/issues/detail?id=4567
-#if !defined(__clang__) || !defined(WEBRTC_ARCH_ARM64)
WebRtcIsacfix_AllpassFilter2FixDec16 =
WebRtcIsacfix_AllpassFilter2FixDec16Neon;
-#endif
WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1Neon;
WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2Neon;
}
@@ -246,6 +241,31 @@ static void WebRtcIsacfix_InitMIPS(void) {
}
#endif
+static void InitFunctionPointers(void) {
+ WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrC;
+ WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopC;
+ WebRtcIsacfix_CalculateResidualEnergy =
+ WebRtcIsacfix_CalculateResidualEnergyC;
+ WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
+ WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
+ WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
+ WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
+ WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
+ WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
+
+#ifdef WEBRTC_DETECT_NEON
+ if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
+ WebRtcIsacfix_InitNeon();
+ }
+#elif defined(WEBRTC_HAS_NEON)
+ WebRtcIsacfix_InitNeon();
+#endif
+
+#if defined(MIPS32_LE)
+ WebRtcIsacfix_InitMIPS();
+#endif
+}
+
/****************************************************************************
* WebRtcIsacfix_EncoderInit(...)
*
@@ -322,29 +342,7 @@ int16_t WebRtcIsacfix_EncoderInit(ISACFIX_MainStruct *ISAC_main_inst,
WebRtcIsacfix_InitPostFilterbank(&ISAC_inst->ISACenc_obj.interpolatorstr_obj);
#endif
- // Initiaze function pointers.
- WebRtcIsacfix_AutocorrFix = WebRtcIsacfix_AutocorrC;
- WebRtcIsacfix_FilterMaLoopFix = WebRtcIsacfix_FilterMaLoopC;
- WebRtcIsacfix_CalculateResidualEnergy =
- WebRtcIsacfix_CalculateResidualEnergyC;
- WebRtcIsacfix_AllpassFilter2FixDec16 = WebRtcIsacfix_AllpassFilter2FixDec16C;
- WebRtcIsacfix_HighpassFilterFixDec32 = WebRtcIsacfix_HighpassFilterFixDec32C;
- WebRtcIsacfix_Time2Spec = WebRtcIsacfix_Time2SpecC;
- WebRtcIsacfix_Spec2Time = WebRtcIsacfix_Spec2TimeC;
- WebRtcIsacfix_MatrixProduct1 = WebRtcIsacfix_MatrixProduct1C;
- WebRtcIsacfix_MatrixProduct2 = WebRtcIsacfix_MatrixProduct2C;
-
-#ifdef WEBRTC_DETECT_NEON
- if ((WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON) != 0) {
- WebRtcIsacfix_InitNeon();
- }
-#elif defined(WEBRTC_HAS_NEON)
- WebRtcIsacfix_InitNeon();
-#endif
-
-#if defined(MIPS32_LE)
- WebRtcIsacfix_InitMIPS();
-#endif
+ InitFunctionPointers();
return statusInit;
}
@@ -570,16 +568,14 @@ int16_t WebRtcIsacfix_GetNewBitStream(ISACFIX_MainStruct *ISAC_main_inst,
*
* Input:
* - ISAC_main_inst : ISAC instance.
- *
- * Return value
- * : 0 - Ok
- * -1 - Error
*/
-int16_t WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
+void WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
{
ISACFIX_SubStruct *ISAC_inst;
+ InitFunctionPointers();
+
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
@@ -597,8 +593,6 @@ int16_t WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
WebRtcIsacfix_InitPreFilterbank(&ISAC_inst->ISACdec_obj.decimatorstr_obj);
#endif
-
- return 0;
}
@@ -621,20 +615,20 @@ int16_t WebRtcIsacfix_DecoderInit(ISACFIX_MainStruct *ISAC_main_inst)
int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t arr_ts)
{
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
/* Sanity check of packet length */
- if (packet_size <= 0) {
+ if (packet_size == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -693,7 +687,7 @@ int16_t WebRtcIsacfix_UpdateBwEstimate1(ISACFIX_MainStruct *ISAC_main_inst,
int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts)
@@ -701,13 +695,13 @@ int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
ISACFIX_SubStruct *ISAC_inst;
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
/* Sanity check of packet length */
- if (packet_size <= 0) {
+ if (packet_size == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -770,15 +764,16 @@ int16_t WebRtcIsacfix_UpdateBwEstimate(ISACFIX_MainStruct *ISAC_main_inst,
int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType)
{
ISACFIX_SubStruct *ISAC_inst;
/* number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
- int16_t number_of_samples;
- int declen = 0;
+ size_t number_of_samples;
+ int declen_int = 0;
+ size_t declen;
/* typecast pointer to real structure */
ISAC_inst = (ISACFIX_SubStruct *)ISAC_main_inst;
@@ -790,7 +785,7 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
}
/* Sanity check of packet length */
- if (len <= 0) {
+ if (len == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -807,32 +802,37 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
/* added for NetEq purposes (VAD/DTX related) */
*speechType=1;
- declen = WebRtcIsacfix_DecodeImpl(decoded,&ISAC_inst->ISACdec_obj, &number_of_samples);
-
- if (declen < 0) {
+ declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+ &number_of_samples);
+ if (declen_int < 0) {
/* Some error inside the decoder */
- ISAC_inst->errorcode = -(int16_t)declen;
+ ISAC_inst->errorcode = -(int16_t)declen_int;
memset(decoded, 0, sizeof(int16_t) * MAX_FRAMESAMPLES);
return -1;
}
+ declen = (size_t)declen_int;
/* error check */
- if (declen & 0x0001) {
- if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) & 0x00FF) ) {
+ if (declen & 1) {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) {
ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
memset(decoded, 0, sizeof(int16_t) * number_of_samples);
return -1;
}
} else {
- if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) >> 8) ) {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) >> 8)) {
ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
memset(decoded, 0, sizeof(int16_t) * number_of_samples);
return -1;
}
}
- return number_of_samples;
+ return (int)number_of_samples;
}
@@ -861,17 +861,18 @@ int WebRtcIsacfix_Decode(ISACFIX_MainStruct* ISAC_main_inst,
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
- const uint16_t *encoded,
- int16_t len,
- int16_t *decoded,
- int16_t *speechType)
+int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct* ISAC_main_inst,
+ const uint16_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speechType)
{
ISACFIX_SubStruct *ISAC_inst;
/* twice the number of samples (480 or 960), output from decoder */
/* that were actually used in the encoder/decoder (determined on the fly) */
- int16_t number_of_samples;
- int declen = 0;
+ size_t number_of_samples;
+ int declen_int = 0;
+ size_t declen;
int16_t dummy[FRAMESAMPLES/2];
@@ -884,7 +885,7 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
return (-1);
}
- if (len <= 0) {
+ if (len == 0) {
/* return error code if the packet length is null or less */
ISAC_inst->errorcode = ISAC_EMPTY_PACKET;
return -1;
@@ -901,25 +902,30 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
/* added for NetEq purposes (VAD/DTX related) */
*speechType=1;
- declen = WebRtcIsacfix_DecodeImpl(decoded,&ISAC_inst->ISACdec_obj, &number_of_samples);
-
- if (declen < 0) {
+ declen_int = WebRtcIsacfix_DecodeImpl(decoded, &ISAC_inst->ISACdec_obj,
+ &number_of_samples);
+ if (declen_int < 0) {
/* Some error inside the decoder */
- ISAC_inst->errorcode = -(int16_t)declen;
+ ISAC_inst->errorcode = -(int16_t)declen_int;
memset(decoded, 0, sizeof(int16_t) * FRAMESAMPLES);
return -1;
}
+ declen = (size_t)declen_int;
/* error check */
- if (declen & 0x0001) {
- if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) & 0x00FF) ) {
+ if (declen & 1) {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >> 1]) & 0xFF)) {
ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
memset(decoded, 0, sizeof(int16_t) * number_of_samples);
return -1;
}
} else {
- if (len != declen && len != declen + (((ISAC_inst->ISACdec_obj.bitstr_obj).stream[declen>>1]) >> 8) ) {
+ if (len != declen &&
+ len != declen +
+ ((ISAC_inst->ISACdec_obj.bitstr_obj.stream[declen >>1]) >> 8)) {
ISAC_inst->errorcode = ISAC_LENGTH_MISMATCH;
memset(decoded, 0, sizeof(int16_t) * number_of_samples);
return -1;
@@ -933,7 +939,7 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
dummy, &ISAC_inst->ISACdec_obj.decimatorstr_obj);
}
- return number_of_samples/2;
+ return (int)(number_of_samples / 2);
}
#endif /* WEBRTC_ISAC_FIX_NB_CALLS_ENABLED */
@@ -954,16 +960,15 @@ int WebRtcIsacfix_DecodeNb(ISACFIX_MainStruct *ISAC_main_inst,
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
-int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames )
+size_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames )
{
- int16_t no_of_samples, declen, k, ok;
+ size_t no_of_samples, declen, k;
int16_t outframeNB[FRAMESAMPLES];
int16_t outframeWB[FRAMESAMPLES];
int16_t dummy[FRAMESAMPLES/2];
@@ -1020,16 +1025,15 @@ int16_t WebRtcIsacfix_DecodePlcNb(ISACFIX_MainStruct *ISAC_main_inst,
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
-int16_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct *ISAC_main_inst,
- int16_t *decoded,
- int16_t noOfLostFrames)
+size_t WebRtcIsacfix_DecodePlc(ISACFIX_MainStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames)
{
- int16_t no_of_samples, declen, k;
+ size_t no_of_samples, declen, k;
int16_t outframe16[MAX_FRAMESAMPLES];
ISACFIX_SubStruct *ISAC_inst;
@@ -1110,6 +1114,13 @@ int16_t WebRtcIsacfix_Control(ISACFIX_MainStruct *ISAC_main_inst,
return 0;
}
+void WebRtcIsacfix_SetInitialBweBottleneck(ISACFIX_MainStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second) {
+ ISACFIX_SubStruct* inst = (ISACFIX_SubStruct*)ISAC_main_inst;
+ assert(bottleneck_bits_per_second >= 10000 &&
+ bottleneck_bits_per_second <= 32000);
+ inst->bwestimator_obj.sendBwAvg = ((uint32_t)bottleneck_bits_per_second) << 7;
+}
/****************************************************************************
* WebRtcIsacfix_ControlBwe(...)
@@ -1257,12 +1268,12 @@ int16_t WebRtcIsacfix_UpdateUplinkBw(ISACFIX_MainStruct* ISAC_main_inst,
*/
int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
- int encoded_len_bytes,
- int16_t* frameLength)
+ size_t encoded_len_bytes,
+ size_t* frameLength)
{
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
if (encoded_len_bytes < kRequiredEncodedLenBytes) {
return -1;
@@ -1296,12 +1307,12 @@ int16_t WebRtcIsacfix_ReadFrameLen(const uint8_t* encoded,
*/
int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
- int encoded_len_bytes,
+ size_t encoded_len_bytes,
int16_t* rateIndex)
{
Bitstr_dec streamdata;
int16_t err;
- const int kRequiredEncodedLenBytes = 10;
+ const size_t kRequiredEncodedLenBytes = 10;
if (encoded_len_bytes < kRequiredEncodedLenBytes) {
return -1;
@@ -1312,7 +1323,8 @@ int16_t WebRtcIsacfix_ReadBwIndex(const uint8_t* encoded,
read_be16(encoded, kRequiredEncodedLenBytes, streamdata.stream);
/* decode frame length, needed to get to the rateIndex in the bitstream */
- err = WebRtcIsacfix_DecodeFrameLen(&streamdata, rateIndex);
+ size_t frameLength;
+ err = WebRtcIsacfix_DecodeFrameLen(&streamdata, &frameLength);
if (err<0) // error check
return err;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
index 7fcb9e3b7b8..22224a80711 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice.c
@@ -43,7 +43,7 @@ void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0,
int16_t* ar_f_Q0,
int16_t* cth_Q15,
int16_t* sth_Q15,
- int16_t order_coef);
+ size_t order_coef);
/* Inner loop used for function WebRtcIsacfix_NormLatticeFilterMa(). It does:
for 0 <= n < HALF_SUBFRAMELEN - 1:
@@ -86,7 +86,7 @@ void WebRtcIsacfix_FilterMaLoopC(int16_t input0, // Filter coefficient
/* filter the signal using normalized lattice filter */
/* MA filter */
-void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterMa(size_t orderCoef,
int32_t *stateGQ15,
int16_t *lat_inQ0,
int16_t *filt_coefQ15,
@@ -97,9 +97,10 @@ void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
- int u, i, k, n;
+ int u, n;
+ size_t i, k;
int16_t temp2,temp3;
- int16_t ord_1 = orderCoef+1;
+ size_t ord_1 = orderCoef+1;
int32_t inv_cthQ16[MAX_AR_MODEL_ORDER];
int32_t gain32, fQtmp;
@@ -210,7 +211,7 @@ void WebRtcIsacfix_NormLatticeFilterMa(int16_t orderCoef,
/* ----------------AR filter-------------------------*/
/* filter the signal using normalized lattice filter */
-void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
+void WebRtcIsacfix_NormLatticeFilterAr(size_t orderCoef,
int16_t *stateGQ0,
int32_t *lat_inQ25,
int16_t *filt_coefQ15,
@@ -218,7 +219,8 @@ void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
int16_t lo_hi,
int16_t *lat_outQ0)
{
- int ii,n,k,i,u;
+ size_t ii, k, i;
+ int n, u;
int16_t sthQ15[MAX_AR_MODEL_ORDER];
int16_t cthQ15[MAX_AR_MODEL_ORDER];
int32_t tmp32;
@@ -234,7 +236,7 @@ void WebRtcIsacfix_NormLatticeFilterAr(int16_t orderCoef,
int16_t sh;
int16_t temp2,temp3;
- int16_t ord_1 = orderCoef+1;
+ size_t ord_1 = orderCoef+1;
for (u=0;u<SUBFRAMES;u++)
{
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
index 4a0d99f3b17..945d6ee3a85 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_armv7.S
@@ -38,7 +38,7 @@ DEFINE_FUNCTION WebRtcIsacfix_FilterArLoop
mov r4, #HALF_SUBFRAMELEN
sub r4, #1 @ Outer loop counter = HALF_SUBFRAMELEN - 1
-HALF_SUBFRAME_LOOP: @ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++)
+HALF_SUBFRAME_LOOP: @ for (n = 0; n < HALF_SUBFRAMELEN - 1; n++)
ldr r9, [sp, #32] @ Restore the inner loop counter to order_coef
ldrh r5, [r1] @ tmpAR = ar_f_Q0[n+1]
@@ -46,7 +46,7 @@ HALF_SUBFRAME_LOOP: @ for(n = 0; n < HALF_SUBFRAMELEN - 1; n++)
add r2, r9, asl #1 @ Restore r2 to &cth_Q15[order_coef]
add r3, r9, asl #1 @ Restore r3 to &sth_Q15[order_coef]
-ORDER_COEF_LOOP: @ for(k = order_coef ; k > 0; k--)
+ORDER_COEF_LOOP: @ for (k = order_coef; k > 0; k--)
ldrh r7, [r3, #-2]! @ sth_Q15[k - 1]
ldrh r6, [r2, #-2]! @ cth_Q15[k - 1]
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
index 43a15794321..40c3bf86177 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_c.c
@@ -25,11 +25,11 @@ void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
- int16_t order_coef) { // order of the filter
+ size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
- int k = 0;
+ size_t k = 0;
int16_t tmpAR = 0;
int32_t tmp32 = 0;
int32_t tmp32_2 = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
index c596922168e..d488bfcb513 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/lattice_mips.c
@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <stddef.h>
+
#include "webrtc/modules/audio_coding/codecs/isac/fix/source/settings.h"
#include "webrtc/typedefs.h"
@@ -17,11 +19,11 @@ void WebRtcIsacfix_FilterArLoop(int16_t* ar_g_Q0, // Input samples
int16_t* ar_f_Q0, // Input samples
int16_t* cth_Q15, // Filter coefficients
int16_t* sth_Q15, // Filter coefficients
- int16_t order_coef) { // order of the filter
+ size_t order_coef) { // order of the filter
int n = 0;
for (n = 0; n < HALF_SUBFRAMELEN - 1; n++) {
- int count = order_coef - 1;
+ int count = (int)(order_coef - 1);
int offset;
#if !defined(MIPS_DSP_R1_LE)
int16_t* tmp_cth;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
index da401e5f11a..40f15c433c3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_estimator.h
@@ -39,7 +39,7 @@ void WebRtcIsacfix_PitchFilter(int16_t *indatFix,
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuff2,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
index c787d6e3689..d73a429178d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter.c
@@ -34,8 +34,8 @@ static const int16_t kIntrpCoef[PITCH_FRACS][PITCH_FRACORDER] = {
{ 271, -743, 1570, -3320, 12963, 7301, -2292, 953, -325}
};
-static __inline int32_t CalcLrIntQ(int32_t fixVal,
- int16_t qDomain) {
+static __inline size_t CalcLrIntQ(int16_t fixVal,
+ int16_t qDomain) {
int32_t roundVal = 1 << (qDomain - 1);
return (fixVal + roundVal) >> qDomain;
@@ -55,7 +55,7 @@ void WebRtcIsacfix_PitchFilter(int16_t* indatQQ, // Q10 if type is 1 or 4,
const int16_t Gain = 21299; // 1.3 in Q14
int16_t oldLagQ7;
int16_t oldGainQ12, lagdeltaQ7, curLagQ7, gaindeltaQ12, curGainQ12;
- int indW32 = 0, frcQQ = 0;
+ size_t indW32 = 0, frcQQ = 0;
const int16_t* fracoeffQQ = NULL;
// Assumptions in ARM assembly for WebRtcIsacfix_PitchFilterCoreARM().
@@ -141,13 +141,15 @@ void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
PitchFiltstr* pfp,
int16_t* lagsQ7,
int16_t* gainsQ12) {
- int k, n, m, ind, pos, pos3QQ;
+ int k, n, m;
+ size_t ind, pos, pos3QQ;
int16_t ubufQQ[PITCH_INTBUFFSIZE];
int16_t oldLagQ7, lagdeltaQ7, curLagQ7;
const int16_t* fracoeffQQ = NULL;
int16_t scale;
- int16_t cnt = 0, frcQQ, indW16 = 0, tmpW16;
+ int16_t cnt = 0, tmpW16;
+ size_t frcQQ, indW16 = 0;
int32_t tmpW32, tmp2W32, csum1QQ, esumxQQ;
// Set up buffer and states.
@@ -179,7 +181,7 @@ void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
for (cnt = 0; cnt < kSegments; cnt++) {
// Update parameters for each segment.
curLagQ7 += lagdeltaQ7;
- indW16 = (int16_t)CalcLrIntQ(curLagQ7, 7);
+ indW16 = CalcLrIntQ(curLagQ7, 7);
frcQQ = ((indW16 << 7) + 64 - curLagQ7) >> 4;
if (frcQQ == PITCH_FRACS) {
@@ -202,7 +204,7 @@ void WebRtcIsacfix_PitchFilterGains(const int16_t* indatQ0,
tmp2W32 = WEBRTC_SPL_MUL_16_32_RSFT14(indatQ0[ind], tmpW32);
tmpW32 += 8192;
- tmpW16 = (int16_t)(tmpW32 >> 14);
+ tmpW16 = tmpW32 >> 14;
tmpW32 = tmpW16 * tmpW16;
if ((tmp2W32 > 1073700000) || (csum1QQ > 1073700000) ||
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
index 57796b0e6ec..10b9579ccb0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_armv6.S
@@ -21,7 +21,7 @@ GLOBAL_FUNCTION WebRtcIsacfix_PitchFilterCore
@ void WebRtcIsacfix_PitchFilterCore(int loopNumber,
@ int16_t gain,
-@ int index,
+@ size_t index,
@ int16_t sign,
@ int16_t* inputState,
@ int16_t* outputBuf2,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
index 5c956780e69..366eef034d9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_c.c
@@ -18,7 +18,7 @@ static const int16_t kDampFilter[PITCH_DAMPORDER] = {
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
index 8334f7eb18b..0f390b8a4f7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/pitch_filter_mips.c
@@ -12,7 +12,7 @@
void WebRtcIsacfix_PitchFilterCore(int loopNumber,
int16_t gain,
- int index,
+ size_t index,
int16_t sign,
int16_t* inputState,
int16_t* outputBuf2,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
index 5abbd7ad446..278af7527dc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/source/structs.h
@@ -34,7 +34,7 @@ typedef struct Bitstreamstruct_dec {
int16_t full; /* 0 - first byte in memory filled, second empty*/
/* 1 - both bytes are empty (we just filled the previous memory */
- int stream_size; /* The size of stream. */
+ size_t stream_size; /* The size of stream in bytes. */
} Bitstr_dec;
/* Bitstream struct for encoder */
@@ -178,8 +178,8 @@ typedef struct {
int16_t pitchCycles;
int16_t A;
int16_t B;
- int16_t pitchIndex;
- int16_t stretchLag;
+ size_t pitchIndex;
+ size_t stretchLag;
int16_t *prevPitchLP; // [ FRAMESAMPLES/2 ]; saved 240
int16_t seed;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
index 8f073adf7eb..adee3376b5f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/isac_speed_test.cc
@@ -26,8 +26,8 @@ class IsacSpeedTest : public AudioCodecSpeedTest {
void SetUp() override;
void TearDown() override;
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes);
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ size_t max_bytes, size_t* encoded_bytes);
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data);
ISACFIX_MainStruct *ISACFIX_main_inst_;
};
@@ -43,12 +43,12 @@ void IsacSpeedTest::SetUp() {
AudioCodecSpeedTest::SetUp();
// Check whether the allocated buffer for the bit stream is large enough.
- EXPECT_GE(max_bytes_, STREAM_MAXW16_60MS);
+ EXPECT_GE(max_bytes_, static_cast<size_t>(STREAM_MAXW16_60MS));
// Create encoder memory.
EXPECT_EQ(0, WebRtcIsacfix_Create(&ISACFIX_main_inst_));
EXPECT_EQ(0, WebRtcIsacfix_EncoderInit(ISACFIX_main_inst_, 1));
- EXPECT_EQ(0, WebRtcIsacfix_DecoderInit(ISACFIX_main_inst_));
+ WebRtcIsacfix_DecoderInit(ISACFIX_main_inst_);
// Set bitrate and block length.
EXPECT_EQ(0, WebRtcIsacfix_Control(ISACFIX_main_inst_, bit_rate_,
block_duration_ms_));
@@ -61,7 +61,7 @@ void IsacSpeedTest::TearDown() {
}
float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) {
+ size_t max_bytes, size_t* encoded_bytes) {
// ISAC takes 10 ms everycall
const int subblocks = block_duration_ms_ / 10;
const int subblock_length = 10 * input_sampling_khz_;
@@ -78,13 +78,13 @@ float IsacSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
EXPECT_EQ(0, value);
}
clocks = clock() - clocks;
- *encoded_bytes = value;
+ *encoded_bytes = static_cast<size_t>(value);
assert(*encoded_bytes <= max_bytes);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float IsacSpeedTest::DecodeABlock(const uint8_t* bit_stream,
- int encoded_bytes,
+ size_t encoded_bytes,
int16_t* out_data) {
int value;
int16_t audio_type;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
index ab7c640eedd..d0f508f759d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/kenny.cc
@@ -50,7 +50,7 @@ typedef struct {
} BottleNeckModel;
void get_arrival_time(int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel *BN_data)
{
@@ -99,7 +99,8 @@ int main(int argc, char* argv[])
FILE *inp, *outp, *f_bn, *outbits;
int endfile;
- int i, errtype, h = 0, k, packetLossPercent = 0;
+ size_t i;
+ int errtype, h = 0, k, packetLossPercent = 0;
int16_t CodingMode;
int16_t bottleneck;
int framesize = 30; /* ms */
@@ -108,14 +109,15 @@ int main(int argc, char* argv[])
/* Runtime statistics */
double starttime, runtime, length_file;
- int16_t stream_len = 0;
+ int stream_len_int = 0;
+ size_t stream_len = 0;
int16_t framecnt;
int declen = 0;
int16_t shortdata[FRAMESAMPLES_10ms];
int16_t decoded[MAX_FRAMESAMPLES];
uint16_t streamdata[500];
int16_t speechType[1];
- int16_t prevFrameSize = 1;
+ size_t prevFrameSize = 1;
int16_t rateBPS = 0;
int16_t fixedFL = 0;
int16_t payloadSize = 0;
@@ -233,7 +235,7 @@ int main(int argc, char* argv[])
CodingMode = 0;
testNum = 0;
testCE = 0;
- for (i = 1; i + 2 < argc; i++) {
+ for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
/* Instantaneous mode */
if (!strcmp ("-I", argv[i])) {
printf("\nInstantaneous BottleNeck\n");
@@ -537,12 +539,7 @@ int main(int argc, char* argv[])
printf("\n\n Error in encoderinit: %d.\n\n", errtype);
}
- err = WebRtcIsacfix_DecoderInit(ISAC_main_inst);
- /* Error check */
- if (err < 0) {
- errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
- printf("\n\n Error in decoderinit: %d.\n\n", errtype);
- }
+ WebRtcIsacfix_DecoderInit(ISAC_main_inst);
}
@@ -565,19 +562,19 @@ int main(int argc, char* argv[])
short bwe;
/* Encode */
- stream_len = WebRtcIsacfix_Encode(ISAC_main_inst,
- shortdata,
- (uint8_t*)streamdata);
+ stream_len_int = WebRtcIsacfix_Encode(ISAC_main_inst,
+ shortdata,
+ (uint8_t*)streamdata);
/* If packet is ready, and CE testing, call the different API
functions from the internal API. */
- if (stream_len>0) {
+ if (stream_len_int>0) {
if (testCE == 1) {
err = WebRtcIsacfix_ReadBwIndex(
reinterpret_cast<const uint8_t*>(streamdata),
- stream_len,
+ static_cast<size_t>(stream_len_int),
&bwe);
- stream_len = WebRtcIsacfix_GetNewBitStream(
+ stream_len_int = WebRtcIsacfix_GetNewBitStream(
ISAC_main_inst,
bwe,
scale,
@@ -606,11 +603,11 @@ int main(int argc, char* argv[])
}
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- stream_len = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
- shortdata,
- streamdata);
+ stream_len_int = WebRtcIsacfix_EncodeNb(ISAC_main_inst,
+ shortdata,
+ streamdata);
#else
- stream_len = -1;
+ stream_len_int = -1;
#endif
}
}
@@ -619,13 +616,14 @@ int main(int argc, char* argv[])
break;
}
- if (stream_len < 0 || err < 0) {
+ if (stream_len_int < 0 || err < 0) {
/* exit if returned with error */
errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
printf("\nError in encoder: %d.\n", errtype);
} else {
- if (fwrite(streamdata, sizeof(char),
- stream_len, outbits) != (size_t)stream_len) {
+ stream_len = static_cast<size_t>(stream_len_int);
+ if (fwrite(streamdata, sizeof(char), stream_len, outbits) !=
+ stream_len) {
return -1;
}
}
@@ -731,12 +729,12 @@ int main(int argc, char* argv[])
/* iSAC decoding */
if( lostFrame && framecnt > 0) {
if (nbTest !=2) {
- declen = WebRtcIsacfix_DecodePlc(ISAC_main_inst,
- decoded, prevFrameSize );
+ declen = static_cast<int>(
+ WebRtcIsacfix_DecodePlc(ISAC_main_inst, decoded, prevFrameSize));
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
- declen = WebRtcIsacfix_DecodePlcNb(ISAC_main_inst, decoded,
- prevFrameSize );
+ declen = static_cast<int>(WebRtcIsacfix_DecodePlcNb(
+ ISAC_main_inst, decoded, prevFrameSize));
#else
declen = -1;
#endif
@@ -744,7 +742,7 @@ int main(int argc, char* argv[])
lostPackets++;
} else {
if (nbTest !=2 ) {
- short FL;
+ size_t FL;
/* Call getFramelen, only used here for function test */
err = WebRtcIsacfix_ReadFrameLen(
reinterpret_cast<const uint8_t*>(streamdata), stream_len, &FL);
@@ -755,11 +753,11 @@ int main(int argc, char* argv[])
decoded,
speechType);
/* Error check */
- if (err<0 || declen<0 || FL!=declen) {
+ if (err < 0 || declen < 0 || FL != static_cast<size_t>(declen)) {
errtype=WebRtcIsacfix_GetErrorCode(ISAC_main_inst);
printf("\nError in decode_B/or getFrameLen: %d.\n", errtype);
}
- prevFrameSize = declen/480;
+ prevFrameSize = static_cast<size_t>(declen/480);
} else {
#ifdef WEBRTC_ISAC_FIX_NB_CALLS_ENABLED
@@ -768,7 +766,7 @@ int main(int argc, char* argv[])
#else
declen = -1;
#endif
- prevFrameSize = static_cast<int16_t>(declen / 240);
+ prevFrameSize = static_cast<size_t>(declen / 240);
}
}
@@ -791,7 +789,7 @@ int main(int argc, char* argv[])
framecnt++;
totalsmpls += declen;
- totalbits += 8 * stream_len;
+ totalbits += static_cast<int>(8 * stream_len);
/* Error test number 10, garbage data */
if (testNum == 10) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
index 6dbdb7eff8b..b82af1c0591 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/fix/test/test_iSACfixfloat.c
@@ -21,6 +21,7 @@
/* include API */
#include "isac.h"
#include "isacfix.h"
+#include "webrtc/base/format_macros.h"
/* max number of samples per frame (= 60 ms frame) */
#define MAX_FRAMESAMPLES 960
@@ -57,7 +58,7 @@ typedef struct {
} BottleNeckModel;
void get_arrival_time(int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data) {
const int HeaderSize = 35;
@@ -98,7 +99,7 @@ int main(int argc, char* argv[]) {
double runtime;
double length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen;
int16_t shortdata[FRAMESAMPLES_10ms];
@@ -114,7 +115,7 @@ int main(int argc, char* argv[]) {
#ifdef _DEBUG
FILE* fy;
double kbps;
- int totalbits = 0;
+ size_t totalbits = 0;
int totalsmpls = 0;
#endif /* _DEBUG */
@@ -392,6 +393,8 @@ int main(int argc, char* argv[]) {
while (endfile == 0) {
cur_framesmpls = 0;
while (1) {
+ int stream_len_int;
+
/* Read 10 ms speech block */
if (nbTest != 1)
endfile = readframe(shortdata, inp, FRAMESAMPLES_10ms);
@@ -401,9 +404,9 @@ int main(int argc, char* argv[]) {
/* iSAC encoding */
if (mode == 0 || mode == 1) {
- stream_len =
+ stream_len_int =
WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
printf("\n\nError in encoder: %d.\n\n", errtype);
@@ -412,20 +415,21 @@ int main(int argc, char* argv[]) {
} else if (mode == 2 || mode == 3) {
/* iSAC encoding */
if (nbTest != 1) {
- stream_len = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
- (uint8_t*)streamdata);
+ stream_len_int = WebRtcIsacfix_Encode(ISACFIX_main_inst, shortdata,
+ (uint8_t*)streamdata);
} else {
- stream_len =
+ stream_len_int =
WebRtcIsacfix_EncodeNb(ISACFIX_main_inst, shortdata, streamdata);
}
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsacfix_GetErrorCode(ISACFIX_main_inst);
printf("\n\nError in encoder: %d.\n\n", errtype);
// exit(EXIT_FAILURE);
}
}
+ stream_len = (size_t)stream_len_int;
cur_framesmpls += FRAMESAMPLES_10ms;
@@ -494,10 +498,13 @@ int main(int argc, char* argv[]) {
/* iSAC decoding */
if (plc && (framecnt + 1) % 10 == 0) {
- if (nbTest != 2)
- declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
- else
- declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ if (nbTest != 2) {
+ declen =
+ (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+ } else {
+ declen =
+ (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ }
} else {
if (nbTest != 2)
declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
@@ -551,17 +558,21 @@ int main(int argc, char* argv[]) {
/* iSAC decoding */
if (plc && (framecnt + 1) % 10 == 0) {
- if (nbTest != 2)
- declen = WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
- else
- declen = WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ if (nbTest != 2) {
+ declen =
+ (int)WebRtcIsacfix_DecodePlc(ISACFIX_main_inst, decoded, 1);
+ } else {
+ declen =
+ (int)WebRtcIsacfix_DecodePlcNb(ISACFIX_main_inst, decoded, 1);
+ }
} else {
- if (nbTest != 2)
+ if (nbTest != 2) {
declen = WebRtcIsacfix_Decode(ISACFIX_main_inst, streamdata,
stream_len, decoded, speechType);
- else
+ } else {
declen = WebRtcIsacfix_DecodeNb(ISACFIX_main_inst, streamdata,
stream_len, decoded, speechType);
+ }
}
if (declen <= 0) {
/* exit if returned with error */
@@ -582,7 +593,7 @@ int main(int argc, char* argv[]) {
totalsmpls += declen;
totalbits += 8 * stream_len;
- kbps = ((double)FS) / ((double)cur_framesmpls) * 8.0 * stream_len / 1000.0;
+ kbps = (double)FS / (double)cur_framesmpls * 8.0 * stream_len / 1000.0;
fy = fopen("bit_rate.dat", "a");
fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps);
fclose(fy);
@@ -591,7 +602,7 @@ int main(int argc, char* argv[]) {
}
#ifdef _DEBUG
- printf("\n\ntotal bits = %d bits", totalbits);
+ printf("\n\ntotal bits = %" PRIuS " bits", totalbits);
printf("\nmeasured average bitrate = %0.3f kbits/s",
(double)totalbits * (FS / 1000) / totalsmpls);
printf("\n");
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac.gypi
index 50cc867b234..354719f330a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac.gypi
@@ -15,6 +15,7 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'audio_decoder_interface',
'audio_encoder_interface',
+ 'isac_common',
],
'include_dirs': [
'main/interface',
@@ -27,13 +28,13 @@
],
},
'sources': [
- 'audio_encoder_isac_t.h',
- 'audio_encoder_isac_t_impl.h',
+ 'main/interface/audio_decoder_isac.h',
'main/interface/audio_encoder_isac.h',
'main/interface/isac.h',
'main/source/arith_routines.c',
'main/source/arith_routines_hist.c',
'main/source/arith_routines_logist.c',
+ 'main/source/audio_decoder_isac.cc',
'main/source/audio_encoder_isac.cc',
'main/source/bandwidth_estimator.c',
'main/source/crc.c',
@@ -47,6 +48,7 @@
'main/source/filterbank_tables.c',
'main/source/intialize.c',
'main/source/isac.c',
+ 'main/source/isac_float_type.h',
'main/source/filterbanks.c',
'main/source/pitch_lag_tables.c',
'main/source/lattice.c',
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac_common.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac_common.gypi
new file mode 100644
index 00000000000..135ecd27cc9
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isac_common.gypi
@@ -0,0 +1,22 @@
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'isac_common',
+ 'type': 'static_library',
+ 'sources': [
+ 'audio_encoder_isac_t.h',
+ 'audio_encoder_isac_t_impl.h',
+ 'locked_bandwidth_info.cc',
+ 'locked_bandwidth_info.h',
+ ],
+ },
+ ],
+}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
index 68aa8d09c60..4355fa92cc8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/isacfix.gypi
@@ -14,6 +14,7 @@
'dependencies': [
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ 'isac_common',
],
'include_dirs': [
'fix/interface',
@@ -26,13 +27,13 @@
],
},
'sources': [
- 'audio_encoder_isac_t.h',
- 'audio_encoder_isac_t_impl.h',
+ 'fix/interface/audio_decoder_isacfix.h',
'fix/interface/audio_encoder_isacfix.h',
'fix/interface/isacfix.h',
'fix/source/arith_routines.c',
'fix/source/arith_routines_hist.c',
'fix/source/arith_routines_logist.c',
+ 'fix/source/audio_decoder_isacfix.cc',
'fix/source/audio_encoder_isacfix.cc',
'fix/source/bandwidth_estimator.c',
'fix/source/decode.c',
@@ -45,6 +46,7 @@
'fix/source/filterbanks.c',
'fix/source/filters.c',
'fix/source/initialize.c',
+ 'fix/source/isac_fix_type.h',
'fix/source/isacfix.c',
'fix/source/lattice.c',
'fix/source/lattice_c.c',
@@ -137,18 +139,11 @@
],
'sources': [
'fix/source/entropy_coding_neon.c',
+ 'fix/source/filterbanks_neon.c',
'fix/source/filters_neon.c',
'fix/source/lattice_neon.c',
'fix/source/transform_neon.c',
],
- 'conditions': [
- # Disable AllpassFilter2FixDec16Neon function due to a clang bug.
- # For more details refer to:
- # https://code.google.com/p/webrtc/issues/detail?id=4567
- ['target_arch!="arm64" or clang==0', {
- 'sources': ['fix/source/filterbanks_neon.c',],
- }]
- ],
},
],
}],
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc
new file mode 100644
index 00000000000..78b415c4c95
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.cc
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+
+namespace webrtc {
+
+LockedIsacBandwidthInfo::LockedIsacBandwidthInfo()
+ : lock_(CriticalSectionWrapper::CreateCriticalSection()) {
+ bwinfo_.in_use = 0;
+}
+
+LockedIsacBandwidthInfo::~LockedIsacBandwidthInfo() = default;
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h
new file mode 100644
index 00000000000..bf39003c121
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+
+namespace webrtc {
+
+// An IsacBandwidthInfo that's safe to access from multiple threads because
+// it's protected by a mutex.
+class LockedIsacBandwidthInfo final {
+ public:
+ LockedIsacBandwidthInfo();
+ ~LockedIsacBandwidthInfo();
+
+ IsacBandwidthInfo Get() const {
+ CriticalSectionScoped cs(lock_.get());
+ return bwinfo_;
+ }
+
+ void Set(const IsacBandwidthInfo& bwinfo) {
+ CriticalSectionScoped cs(lock_.get());
+ bwinfo_ = bwinfo;
+ }
+
+ private:
+ const rtc::scoped_ptr<CriticalSectionWrapper> lock_;
+ IsacBandwidthInfo bwinfo_ GUARDED_BY(lock_);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_LOCKED_BANDWIDTH_INFO_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h
new file mode 100644
index 00000000000..b224296ad49
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_DECODER_ISAC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_DECODER_ISAC_H_
+
+#include "webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
+
+namespace webrtc {
+
+using AudioDecoderIsac = AudioDecoderIsacT<IsacFloat>;
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_ENCODER_ISAC_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
index 1fe5d312b8c..f6ef7c42fdf 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h
@@ -11,145 +11,12 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_ENCODER_ISAC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_ENCODER_ISAC_H_
-#include "webrtc/base/checks.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
#include "webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t.h"
-#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h"
namespace webrtc {
-struct IsacFloat {
- typedef ISACStruct instance_type;
- static const bool has_swb = true;
- static inline int16_t Control(instance_type* inst,
- int32_t rate,
- int framesize) {
- return WebRtcIsac_Control(inst, rate, framesize);
- }
- static inline int16_t ControlBwe(instance_type* inst,
- int32_t rate_bps,
- int frame_size_ms,
- int16_t enforce_frame_size) {
- return WebRtcIsac_ControlBwe(inst, rate_bps, frame_size_ms,
- enforce_frame_size);
- }
- static inline int16_t Create(instance_type** inst) {
- return WebRtcIsac_Create(inst);
- }
- static inline int DecodeInternal(instance_type* inst,
- const uint8_t* encoded,
- int16_t len,
- int16_t* decoded,
- int16_t* speech_type) {
- return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
- }
- static inline int16_t DecodePlc(instance_type* inst,
- int16_t* decoded,
- int16_t num_lost_frames) {
- return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
- }
-
- static inline int16_t DecoderInit(instance_type* inst) {
- return WebRtcIsac_DecoderInit(inst);
- }
- static inline int Encode(instance_type* inst,
- const int16_t* speech_in,
- uint8_t* encoded) {
- return WebRtcIsac_Encode(inst, speech_in, encoded);
- }
- static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
- return WebRtcIsac_EncoderInit(inst, coding_mode);
- }
- static inline uint16_t EncSampRate(instance_type* inst) {
- return WebRtcIsac_EncSampRate(inst);
- }
-
- static inline int16_t Free(instance_type* inst) {
- return WebRtcIsac_Free(inst);
- }
- static inline void GetBandwidthInfo(instance_type* inst,
- IsacBandwidthInfo* bwinfo) {
- WebRtcIsac_GetBandwidthInfo(inst, bwinfo);
- }
- static inline int16_t GetErrorCode(instance_type* inst) {
- return WebRtcIsac_GetErrorCode(inst);
- }
-
- static inline int16_t GetNewFrameLen(instance_type* inst) {
- return WebRtcIsac_GetNewFrameLen(inst);
- }
- static inline void SetBandwidthInfo(instance_type* inst,
- const IsacBandwidthInfo* bwinfo) {
- WebRtcIsac_SetBandwidthInfo(inst, bwinfo);
- }
- static inline int16_t SetDecSampRate(instance_type* inst,
- uint16_t sample_rate_hz) {
- return WebRtcIsac_SetDecSampRate(inst, sample_rate_hz);
- }
- static inline int16_t SetEncSampRate(instance_type* inst,
- uint16_t sample_rate_hz) {
- return WebRtcIsac_SetEncSampRate(inst, sample_rate_hz);
- }
- static inline int16_t UpdateBwEstimate(instance_type* inst,
- const uint8_t* encoded,
- int32_t packet_size,
- uint16_t rtp_seq_number,
- uint32_t send_ts,
- uint32_t arr_ts) {
- return WebRtcIsac_UpdateBwEstimate(inst, encoded, packet_size,
- rtp_seq_number, send_ts, arr_ts);
- }
- static inline int16_t SetMaxPayloadSize(instance_type* inst,
- int16_t max_payload_size_bytes) {
- return WebRtcIsac_SetMaxPayloadSize(inst, max_payload_size_bytes);
- }
- static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
- return WebRtcIsac_SetMaxRate(inst, max_bit_rate);
- }
-};
-
-typedef AudioEncoderDecoderIsacT<IsacFloat> AudioEncoderDecoderIsac;
-
-struct CodecInst;
-
-class AudioEncoderDecoderMutableIsacFloat
- : public AudioEncoderMutableImpl<AudioEncoderDecoderIsac,
- AudioEncoderDecoderMutableIsac> {
- public:
- explicit AudioEncoderDecoderMutableIsacFloat(const CodecInst& codec_inst);
- void UpdateSettings(const CodecInst& codec_inst) override;
- void SetMaxPayloadSize(int max_payload_size_bytes) override;
- void SetMaxRate(int max_rate_bps) override;
-
- // From AudioDecoder.
- int Decode(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) override;
- int DecodeRedundant(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) override;
- bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
- int Init() override;
- int IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) override;
- int ErrorCode() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- int PacketDurationRedundant(const uint8_t* encoded,
- size_t encoded_len) const override;
- bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-};
+using AudioEncoderIsac = AudioEncoderIsacT<IsacFloat>;
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_AUDIO_ENCODER_ISAC_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
index 1fe11bcef03..1f5aeb36f41 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_INTERFACE_ISAC_H_
+#include <stddef.h>
+
#include "webrtc/modules/audio_coding/codecs/isac/bandwidth_info.h"
#include "webrtc/typedefs.h"
@@ -155,15 +157,9 @@ extern "C" {
*
* Input:
* - ISAC_main_inst : ISAC instance.
- *
- * Return value
- * : 0 - Ok
- * -1 - Error
*/
- int16_t WebRtcIsac_DecoderInit(
- ISACStruct* ISAC_main_inst);
-
+ void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst);
/******************************************************************************
* WebRtcIsac_UpdateBwEstimate(...)
@@ -186,7 +182,7 @@ extern "C" {
int16_t WebRtcIsac_UpdateBwEstimate(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts);
@@ -215,7 +211,7 @@ extern "C" {
int WebRtcIsac_Decode(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
@@ -235,14 +231,13 @@ extern "C" {
* Output:
* - decoded : The decoded vector.
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
- int16_t WebRtcIsac_DecodePlc(
+ size_t WebRtcIsac_DecodePlc(
ISACStruct* ISAC_main_inst,
int16_t* decoded,
- int16_t noOfLostFrames);
+ size_t noOfLostFrames);
/******************************************************************************
@@ -269,6 +264,8 @@ extern "C" {
int32_t rate,
int framesize);
+ void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second);
/******************************************************************************
* WebRtcIsac_ControlBwe(...)
@@ -702,17 +699,22 @@ extern "C" {
int WebRtcIsac_DecodeRcu(
ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t len,
+ size_t len,
int16_t* decoded,
int16_t* speechType);
- /* Fills in an IsacBandwidthInfo struct. */
+ /* Fills in an IsacBandwidthInfo struct. |inst| should be a decoder. */
void WebRtcIsac_GetBandwidthInfo(ISACStruct* inst, IsacBandwidthInfo* bwinfo);
- /* Uses the values from an IsacBandwidthInfo struct. */
+ /* Uses the values from an IsacBandwidthInfo struct. |inst| should be an
+ encoder. */
void WebRtcIsac_SetBandwidthInfo(ISACStruct* inst,
const IsacBandwidthInfo* bwinfo);
+ /* If |inst| is a decoder but not an encoder: tell it what sample rate the
+ encoder is using, for bandwidth estimation purposes. */
+ void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst, int sample_rate_hz);
+
#if defined(__cplusplus)
}
#endif
diff --git a/chromium/third_party/webrtc/modules/video_capture/ensure_initialized.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
index 429879537cd..89879adafba 100644
--- a/chromium/third_party/webrtc/modules/video_capture/ensure_initialized.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_decoder_isac.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -8,12 +8,13 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h"
+
+#include "webrtc/modules/audio_coding/codecs/isac/audio_decoder_isac_t_impl.h"
+
namespace webrtc {
-namespace videocapturemodule {
-// Ensure any necessary initialization of webrtc::videocapturemodule has
-// completed.
-void EnsureInitialized();
+// Explicit instantiation:
+template class AudioDecoderIsacT<IsacFloat>;
-} // namespace videocapturemodule.
-} // namespace webrtc.
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
index 201a2d4bb43..c72af307b77 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac.cc
@@ -10,134 +10,11 @@
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
-#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/isac/audio_encoder_isac_t_impl.h"
namespace webrtc {
-// Explicit instantiation of AudioEncoderDecoderIsacT<IsacFloat>, a.k.a.
-// AudioEncoderDecoderIsac.
-template class AudioEncoderDecoderIsacT<IsacFloat>;
-
-namespace {
-AudioEncoderDecoderIsac::Config CreateConfig(const CodecInst& codec_inst) {
- AudioEncoderDecoderIsac::Config config;
- config.payload_type = codec_inst.pltype;
- config.sample_rate_hz = codec_inst.plfreq;
- config.frame_size_ms =
- rtc::CheckedDivExact(1000 * codec_inst.pacsize, config.sample_rate_hz);
- if (codec_inst.rate != -1)
- config.bit_rate = codec_inst.rate;
- config.adaptive_mode = (codec_inst.rate == -1);
- return config;
-}
-} // namespace
-
-AudioEncoderDecoderMutableIsacFloat::AudioEncoderDecoderMutableIsacFloat(
- const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderDecoderIsac,
- AudioEncoderDecoderMutableIsac>(
- CreateConfig(codec_inst)) {
-}
-
-void AudioEncoderDecoderMutableIsacFloat::UpdateSettings(
- const CodecInst& codec_inst) {
- bool success = Reconstruct(CreateConfig(codec_inst));
- DCHECK(success);
-}
-
-void AudioEncoderDecoderMutableIsacFloat::SetMaxPayloadSize(
- int max_payload_size_bytes) {
- auto conf = config();
- conf.max_payload_size_bytes = max_payload_size_bytes;
- Reconstruct(conf);
-}
-
-void AudioEncoderDecoderMutableIsacFloat::SetMaxRate(int max_rate_bps) {
- auto conf = config();
- conf.max_bit_rate = max_rate_bps;
- Reconstruct(conf);
-}
-
-int AudioEncoderDecoderMutableIsacFloat::Decode(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Decode(encoded, encoded_len, sample_rate_hz,
- max_decoded_bytes, decoded, speech_type);
-}
-
-int AudioEncoderDecoderMutableIsacFloat::DecodeRedundant(
- const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- size_t max_decoded_bytes,
- int16_t* decoded,
- SpeechType* speech_type) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->DecodeRedundant(encoded, encoded_len, sample_rate_hz,
- max_decoded_bytes, decoded, speech_type);
-}
-
-bool AudioEncoderDecoderMutableIsacFloat::HasDecodePlc() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->HasDecodePlc();
-}
-
-int AudioEncoderDecoderMutableIsacFloat::DecodePlc(int num_frames,
- int16_t* decoded) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->DecodePlc(num_frames, decoded);
-}
-
-int AudioEncoderDecoderMutableIsacFloat::Init() {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Init();
-}
-
-int AudioEncoderDecoderMutableIsacFloat::IncomingPacket(
- const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->IncomingPacket(payload, payload_len, rtp_sequence_number,
- rtp_timestamp, arrival_timestamp);
-}
-
-int AudioEncoderDecoderMutableIsacFloat::ErrorCode() {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->ErrorCode();
-}
-
-int AudioEncoderDecoderMutableIsacFloat::PacketDuration(
- const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketDuration(encoded, encoded_len);
-}
-
-int AudioEncoderDecoderMutableIsacFloat::PacketDurationRedundant(
- const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketDurationRedundant(encoded, encoded_len);
-}
-
-bool AudioEncoderDecoderMutableIsacFloat::PacketHasFec(
- const uint8_t* encoded,
- size_t encoded_len) const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->PacketHasFec(encoded, encoded_len);
-}
-
-size_t AudioEncoderDecoderMutableIsacFloat::Channels() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->Channels();
-}
+// Explicit instantiation:
+template class AudioEncoderIsacT<IsacFloat>;
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
index ee5c03121b6..ff941ea79cd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/audio_encoder_isac_unittest.cc
@@ -17,13 +17,13 @@ namespace webrtc {
namespace {
-void TestBadConfig(const AudioEncoderDecoderIsac::Config& config) {
+void TestBadConfig(const AudioEncoderIsac::Config& config) {
EXPECT_FALSE(config.IsOk());
}
-void TestGoodConfig(const AudioEncoderDecoderIsac::Config& config) {
+void TestGoodConfig(const AudioEncoderIsac::Config& config) {
EXPECT_TRUE(config.IsOk());
- AudioEncoderDecoderIsac ed(config);
+ AudioEncoderIsac aei(config);
}
// Wrap subroutine calls that test things in this, so that the error messages
@@ -34,7 +34,7 @@ void TestGoodConfig(const AudioEncoderDecoderIsac::Config& config) {
} // namespace
TEST(AudioEncoderIsacTest, TestConfigBitrate) {
- AudioEncoderDecoderIsac::Config config;
+ AudioEncoderIsac::Config config;
// The default value is some real, positive value.
EXPECT_GT(config.bit_rate, 1);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
index 940e8f50c76..51da3f7c76c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.c
@@ -142,7 +142,7 @@ int16_t WebRtcIsac_UpdateBandwidthEstimator(
const int32_t frame_length,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int32_t pksize
+ const size_t pksize
/*, const uint16_t Index*/)
{
float weight = 0.0f;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
index 29168760825..0704337f7df 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/bandwidth_estimator.h
@@ -95,7 +95,7 @@ extern "C" {
const int32_t frame_length,
const uint32_t send_ts,
const uint32_t arr_ts,
- const int32_t pksize);
+ const size_t pksize);
/* Update receiving estimates. Used when we only receive BWE index, no iSAC data packet. */
int16_t WebRtcIsac_UpdateUplinkBwImpl(
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
index 4b36fffed68..7ef64b55fe0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/codec.h
@@ -25,7 +25,7 @@
void WebRtcIsac_ResetBitstream(Bitstr* bit_stream);
int WebRtcIsac_EstimateBandwidth(BwEstimatorstr* bwest_str, Bitstr* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts, uint32_t arr_ts,
enum IsacSamplingRate encoderSampRate,
@@ -195,14 +195,14 @@ void WebRtcIsac_Spec2time(const TransformTables* tables,
/******************************* filter functions ****************************/
-void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, int lengthInOut,
+void WebRtcIsac_AllPoleFilter(double* InOut, double* Coef, size_t lengthInOut,
int orderCoef);
-void WebRtcIsac_AllZeroFilter(double* In, double* Coef, int lengthInOut,
+void WebRtcIsac_AllZeroFilter(double* In, double* Coef, size_t lengthInOut,
int orderCoef, double* Out);
void WebRtcIsac_ZeroPoleFilter(double* In, double* ZeroCoef, double* PoleCoef,
- int lengthInOut, int orderCoef, double* Out);
+ size_t lengthInOut, int orderCoef, double* Out);
/***************************** filterbank functions **************************/
@@ -228,6 +228,6 @@ void WebRtcIsac_NormLatticeFilterAr(int orderCoef, float* stateF, float* stateG,
void WebRtcIsac_Dir2Lat(double* a, int orderCoef, float* sth, float* cth);
-void WebRtcIsac_AutoCorr(double* r, const double* x, int N, int order);
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order);
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_CODEC_H_ */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
index 5abe2041f9d..019cc895288 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/decode_bwe.c
@@ -18,7 +18,7 @@ int
WebRtcIsac_EstimateBandwidth(
BwEstimatorstr* bwest_str,
Bitstr* streamdata,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
index 76a9e7530d0..d47eb1fa664 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/filter_functions.c
@@ -19,12 +19,15 @@
-void WebRtcIsac_AllPoleFilter(double *InOut, double *Coef, int lengthInOut, int orderCoef){
-
+void WebRtcIsac_AllPoleFilter(double* InOut,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef) {
/* the state of filter is assumed to be in InOut[-1] to InOut[-orderCoef] */
double scal;
double sum;
- int n,k;
+ size_t n;
+ int k;
//if (fabs(Coef[0]-1.0)<0.001) {
if ( (Coef[0] > 0.9999) && (Coef[0] < 1.0001) )
@@ -53,11 +56,15 @@ void WebRtcIsac_AllPoleFilter(double *InOut, double *Coef, int lengthInOut, int
}
-void WebRtcIsac_AllZeroFilter(double *In, double *Coef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_AllZeroFilter(double* In,
+ double* Coef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
/* the state of filter is assumed to be in In[-1] to In[-orderCoef] */
- int n, k;
+ size_t n;
+ int k;
double tmp;
for(n = 0; n < lengthInOut; n++)
@@ -74,9 +81,12 @@ void WebRtcIsac_AllZeroFilter(double *In, double *Coef, int lengthInOut, int ord
}
-
-void WebRtcIsac_ZeroPoleFilter(double *In, double *ZeroCoef, double *PoleCoef, int lengthInOut, int orderCoef, double *Out){
-
+void WebRtcIsac_ZeroPoleFilter(double* In,
+ double* ZeroCoef,
+ double* PoleCoef,
+ size_t lengthInOut,
+ int orderCoef,
+ double* Out) {
/* the state of the zero section is assumed to be in In[-1] to In[-orderCoef] */
/* the state of the pole section is assumed to be in Out[-1] to Out[-orderCoef] */
@@ -85,14 +95,8 @@ void WebRtcIsac_ZeroPoleFilter(double *In, double *ZeroCoef, double *PoleCoef, i
}
-void WebRtcIsac_AutoCorr(
- double *r,
- const double *x,
- int N,
- int order
- )
-{
- int lag, n;
+void WebRtcIsac_AutoCorr(double* r, const double* x, size_t N, size_t order) {
+ size_t lag, n;
double sum, prod;
const double *x_lag;
@@ -112,8 +116,8 @@ void WebRtcIsac_AutoCorr(
}
-void WebRtcIsac_BwExpand(double *out, double *in, double coef, short length) {
- int i;
+void WebRtcIsac_BwExpand(double* out, double* in, double coef, size_t length) {
+ size_t i;
double chirp;
chirp = coef;
@@ -125,8 +129,10 @@ void WebRtcIsac_BwExpand(double *out, double *in, double coef, short length) {
}
}
-void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata) {
-
+void WebRtcIsac_WeightingFilter(const double* in,
+ double* weiout,
+ double* whiout,
+ WeightFiltstr* wfdata) {
double tmpbuffer[PITCH_FRAME_LEN + PITCH_WLPCBUFLEN];
double corr[PITCH_WLPCORDER+1], rc[PITCH_WLPCORDER+1];
double apol[PITCH_WLPCORDER+1], apolr[PITCH_WLPCORDER+1];
@@ -195,15 +201,13 @@ static const double APupper[ALLPASSSECTIONS] = {0.0347, 0.3826};
static const double APlower[ALLPASSSECTIONS] = {0.1544, 0.744};
-
-void WebRtcIsac_AllpassFilterForDec(double *InOut,
- const double *APSectionFactors,
- int lengthInOut,
- double *FilterState)
-{
+void WebRtcIsac_AllpassFilterForDec(double* InOut,
+ const double* APSectionFactors,
+ size_t lengthInOut,
+ double* FilterState) {
//This performs all-pass filtering--a series of first order all-pass sections are used
//to filter the input in a cascade manner.
- int n,j;
+ size_t n,j;
double temp;
for (j=0; j<ALLPASSSECTIONS; j++){
for (n=0;n<lengthInOut;n+=2){
@@ -214,12 +218,11 @@ void WebRtcIsac_AllpassFilterForDec(double *InOut,
}
}
-void WebRtcIsac_DecimateAllpass(const double *in,
- double *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
- int N, /* number of input samples */
- double *out) /* array of size N/2 */
-{
- int n;
+void WebRtcIsac_DecimateAllpass(const double* in,
+ double* state_in,
+ size_t N,
+ double* out) {
+ size_t n;
double data_vec[PITCH_FRAME_LEN];
/* copy input */
@@ -237,7 +240,6 @@ void WebRtcIsac_DecimateAllpass(const double *in,
}
-
/* create high-pass filter ocefficients
* z = 0.998 * exp(j*2*pi*35/8000);
* p = 0.94 * exp(j*2*pi*140/8000);
@@ -245,13 +247,13 @@ void WebRtcIsac_DecimateAllpass(const double *in,
* HP_a = [1, -2*real(p), abs(p)^2]; */
static const double a_coef[2] = { 1.86864659625574, -0.88360000000000};
static const double b_coef[2] = {-1.99524591718270, 0.99600400000000};
-static const float a_coef_float[2] = { 1.86864659625574f, -0.88360000000000f};
-static const float b_coef_float[2] = {-1.99524591718270f, 0.99600400000000f};
/* second order high-pass filter */
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N)
-{
- int k;
+void WebRtcIsac_Highpass(const double* in,
+ double* out,
+ double* state,
+ size_t N) {
+ size_t k;
for (k=0; k<N; k++) {
*out = *in + state[1];
@@ -259,14 +261,3 @@ void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N)
state[0] = b_coef[1] * *in++ + a_coef[1] * *out++;
}
}
-
-void WebRtcIsac_Highpass_float(const float *in, double *out, double *state, int N)
-{
- int k;
-
- for (k=0; k<N; k++) {
- *out = (double)*in + state[1];
- state[1] = state[0] + b_coef_float[0] * *in + a_coef_float[0] * *out;
- state[0] = b_coef_float[1] * (double)*in++ + a_coef_float[1] * *out++;
- }
-}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
index a19fd01167c..0a5f75a9016 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac.c
@@ -507,7 +507,7 @@ int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
int streamLenLB = 0;
int streamLenUB = 0;
int streamLen = 0;
- int16_t k = 0;
+ size_t k = 0;
uint8_t garbageLen = 0;
int32_t bottleneck = 0;
int16_t bottleneckIdx = 0;
@@ -528,12 +528,12 @@ int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
if (instISAC->in_sample_rate_hz == 48000) {
/* Samples in 10 ms @ 48 kHz. */
- const int kNumInputSamples = FRAMESAMPLES_10ms * 3;
+ const size_t kNumInputSamples = FRAMESAMPLES_10ms * 3;
/* Samples 10 ms @ 32 kHz. */
- const int kNumOutputSamples = FRAMESAMPLES_10ms * 2;
+ const size_t kNumOutputSamples = FRAMESAMPLES_10ms * 2;
/* Resampler divide the input into blocks of 3 samples, i.e.
* kNumInputSamples / 3. */
- const int kNumResamplerBlocks = FRAMESAMPLES_10ms;
+ const size_t kNumResamplerBlocks = FRAMESAMPLES_10ms;
int32_t buffer32[FRAMESAMPLES_10ms * 3 + SIZE_RESAMPLER_STATE];
/* Restore last samples from the past to the beginning of the buffer
@@ -750,7 +750,8 @@ int WebRtcIsac_Encode(ISACStruct* ISAC_main_inst,
streamLenUB + garbageLen, &crc);
#ifndef WEBRTC_ARCH_BIG_ENDIAN
for (k = 0; k < LEN_CHECK_SUM_WORD8; k++) {
- encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] = crc >> (24 - k * 8);
+ encoded[streamLen - LEN_CHECK_SUM_WORD8 + k] =
+ (uint8_t)(crc >> (24 - k * 8));
}
#else
memcpy(&encoded[streamLenLB + streamLenUB + 1], &crc, LEN_CHECK_SUM_WORD8);
@@ -923,12 +924,8 @@ int16_t WebRtcIsac_GetNewBitStream(ISACStruct* ISAC_main_inst,
*
* Input:
* - ISAC_main_inst : ISAC instance.
- *
- * Return value
- * : 0 - Ok
- * -1 - Error
*/
-static int16_t DecoderInitLb(ISACLBStruct* instISAC) {
+static void DecoderInitLb(ISACLBStruct* instISAC) {
int i;
/* Initialize stream vector to zero. */
for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
@@ -939,10 +936,9 @@ static int16_t DecoderInitLb(ISACLBStruct* instISAC) {
WebRtcIsac_InitPostFilterbank(
&instISAC->ISACdecLB_obj.postfiltbankstr_obj);
WebRtcIsac_InitPitchFilter(&instISAC->ISACdecLB_obj.pitchfiltstr_obj);
- return 0;
}
-static int16_t DecoderInitUb(ISACUBStruct* instISAC) {
+static void DecoderInitUb(ISACUBStruct* instISAC) {
int i;
/* Init stream vector to zero */
for (i = 0; i < STREAM_SIZE_MAX_60; i++) {
@@ -952,24 +948,18 @@ static int16_t DecoderInitUb(ISACUBStruct* instISAC) {
WebRtcIsac_InitMasking(&instISAC->ISACdecUB_obj.maskfiltstr_obj);
WebRtcIsac_InitPostFilterbank(
&instISAC->ISACdecUB_obj.postfiltbankstr_obj);
- return (0);
}
-int16_t WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
+void WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
- if (DecoderInitLb(&instISAC->instLB) < 0) {
- return -1;
- }
+ DecoderInitLb(&instISAC->instLB);
if (instISAC->decoderSamplingRateKHz == kIsacSuperWideband) {
memset(instISAC->synthesisFBState1, 0,
FB_STATE_SIZE_WORD32 * sizeof(int32_t));
memset(instISAC->synthesisFBState2, 0,
FB_STATE_SIZE_WORD32 * sizeof(int32_t));
-
- if (DecoderInitUb(&(instISAC->instUB)) < 0) {
- return -1;
- }
+ DecoderInitUb(&(instISAC->instUB));
}
if ((instISAC->initFlag & BIT_MASK_ENC_INIT) != BIT_MASK_ENC_INIT) {
WebRtcIsac_InitBandwidthEstimator(&instISAC->bwestimator_obj,
@@ -978,7 +968,6 @@ int16_t WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
}
instISAC->initFlag |= BIT_MASK_DEC_INIT;
instISAC->resetFlag_8kHz = 0;
- return 0;
}
@@ -1005,7 +994,7 @@ int16_t WebRtcIsac_DecoderInit(ISACStruct* ISAC_main_inst) {
*/
int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int32_t packet_size,
+ size_t packet_size,
uint16_t rtp_seq_number,
uint32_t send_ts,
uint32_t arr_ts) {
@@ -1055,7 +1044,7 @@ int16_t WebRtcIsac_UpdateBwEstimate(ISACStruct* ISAC_main_inst,
static int Decode(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType,
int16_t isRCUPayload) {
@@ -1068,13 +1057,14 @@ static int Decode(ISACStruct* ISAC_main_inst,
float outFrame[MAX_FRAMESAMPLES];
int16_t outFrameLB[MAX_FRAMESAMPLES];
int16_t outFrameUB[MAX_FRAMESAMPLES];
- int numDecodedBytesLB;
+ int numDecodedBytesLBint;
+ size_t numDecodedBytesLB;
int numDecodedBytesUB;
- int16_t lenEncodedLBBytes;
+ size_t lenEncodedLBBytes;
int16_t validChecksum = 1;
int16_t k;
uint16_t numLayer;
- int16_t totSizeBytes;
+ size_t totSizeBytes;
int16_t err;
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
@@ -1088,7 +1078,7 @@ static int Decode(ISACStruct* ISAC_main_inst,
return -1;
}
- if (lenEncodedBytes <= 0) {
+ if (lenEncodedBytes == 0) {
/* return error code if the packet length is null. */
instISAC->errorCode = ISAC_EMPTY_PACKET;
return -1;
@@ -1114,11 +1104,12 @@ static int Decode(ISACStruct* ISAC_main_inst,
/* Regardless of that the current codec is setup to work in
* wideband or super-wideband, the decoding of the lower-band
* has to be performed. */
- numDecodedBytesLB = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
- outFrame, decInstLB,
- &numSamplesLB, isRCUPayload);
-
- if ((numDecodedBytesLB < 0) || (numDecodedBytesLB > lenEncodedLBBytes) ||
+ numDecodedBytesLBint = WebRtcIsac_DecodeLb(&instISAC->transform_tables,
+ outFrame, decInstLB,
+ &numSamplesLB, isRCUPayload);
+ numDecodedBytesLB = (size_t)numDecodedBytesLBint;
+ if ((numDecodedBytesLBint < 0) ||
+ (numDecodedBytesLB > lenEncodedLBBytes) ||
(numSamplesLB > MAX_FRAMESAMPLES)) {
instISAC->errorCode = ISAC_LENGTH_MISMATCH;
return -1;
@@ -1361,7 +1352,7 @@ static int Decode(ISACStruct* ISAC_main_inst,
int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType) {
int16_t isRCUPayload = 0;
@@ -1393,7 +1384,7 @@ int WebRtcIsac_Decode(ISACStruct* ISAC_main_inst,
int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
const uint8_t* encoded,
- int16_t lenEncodedBytes,
+ size_t lenEncodedBytes,
int16_t* decoded,
int16_t* speechType) {
int16_t isRCUPayload = 1;
@@ -1416,13 +1407,12 @@ int WebRtcIsac_DecodeRcu(ISACStruct* ISAC_main_inst,
* Output:
* - decoded : The decoded vector
*
- * Return value : >0 - number of samples in decoded PLC vector
- * -1 - Error
+ * Return value : Number of samples in decoded PLC vector
*/
-int16_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
- int16_t* decoded,
- int16_t noOfLostFrames) {
- int16_t numSamples = 0;
+size_t WebRtcIsac_DecodePlc(ISACStruct* ISAC_main_inst,
+ int16_t* decoded,
+ size_t noOfLostFrames) {
+ size_t numSamples = 0;
ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
/* Limit number of frames to two = 60 millisecond.
@@ -1578,6 +1568,13 @@ int16_t WebRtcIsac_Control(ISACStruct* ISAC_main_inst,
return 0;
}
+void WebRtcIsac_SetInitialBweBottleneck(ISACStruct* ISAC_main_inst,
+ int bottleneck_bits_per_second) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)ISAC_main_inst;
+ assert(bottleneck_bits_per_second >= 10000 &&
+ bottleneck_bits_per_second <= 32000);
+ instISAC->bwestimator_obj.send_bw_avg = (float)bottleneck_bits_per_second;
+}
/****************************************************************************
* WebRtcIsac_ControlBwe(...)
@@ -2344,9 +2341,7 @@ int16_t WebRtcIsac_SetDecSampRate(ISACStruct* ISAC_main_inst,
memset(instISAC->synthesisFBState2, 0,
FB_STATE_SIZE_WORD32 * sizeof(int32_t));
- if (DecoderInitUb(&(instISAC->instUB)) < 0) {
- return -1;
- }
+ DecoderInitUb(&instISAC->instUB);
}
instISAC->decoderSamplingRateKHz = decoder_operational_rate;
return 0;
@@ -2399,3 +2394,12 @@ void WebRtcIsac_SetBandwidthInfo(ISACStruct* inst,
assert(instISAC->initFlag & BIT_MASK_ENC_INIT);
WebRtcIsacBw_SetBandwidthInfo(&instISAC->bwestimator_obj, bwinfo);
}
+
+void WebRtcIsac_SetEncSampRateInDecoder(ISACStruct* inst,
+ int sample_rate_hz) {
+ ISACMainStruct* instISAC = (ISACMainStruct*)inst;
+ assert(instISAC->initFlag & BIT_MASK_DEC_INIT);
+ assert(!(instISAC->initFlag & BIT_MASK_ENC_INIT));
+ assert(sample_rate_hz == 16000 || sample_rate_hz == 32000);
+ instISAC->encoderSamplingRateKHz = sample_rate_hz / 1000;
+}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
new file mode 100644
index 00000000000..0335548be7f
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_float_type.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
+
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
+
+namespace webrtc {
+
+struct IsacFloat {
+ using instance_type = ISACStruct;
+ static const bool has_swb = true;
+ static inline int16_t Control(instance_type* inst,
+ int32_t rate,
+ int framesize) {
+ return WebRtcIsac_Control(inst, rate, framesize);
+ }
+ static inline int16_t ControlBwe(instance_type* inst,
+ int32_t rate_bps,
+ int frame_size_ms,
+ int16_t enforce_frame_size) {
+ return WebRtcIsac_ControlBwe(inst, rate_bps, frame_size_ms,
+ enforce_frame_size);
+ }
+ static inline int16_t Create(instance_type** inst) {
+ return WebRtcIsac_Create(inst);
+ }
+ static inline int DecodeInternal(instance_type* inst,
+ const uint8_t* encoded,
+ size_t len,
+ int16_t* decoded,
+ int16_t* speech_type) {
+ return WebRtcIsac_Decode(inst, encoded, len, decoded, speech_type);
+ }
+ static inline size_t DecodePlc(instance_type* inst,
+ int16_t* decoded,
+ size_t num_lost_frames) {
+ return WebRtcIsac_DecodePlc(inst, decoded, num_lost_frames);
+ }
+
+ static inline void DecoderInit(instance_type* inst) {
+ WebRtcIsac_DecoderInit(inst);
+ }
+ static inline int Encode(instance_type* inst,
+ const int16_t* speech_in,
+ uint8_t* encoded) {
+ return WebRtcIsac_Encode(inst, speech_in, encoded);
+ }
+ static inline int16_t EncoderInit(instance_type* inst, int16_t coding_mode) {
+ return WebRtcIsac_EncoderInit(inst, coding_mode);
+ }
+ static inline uint16_t EncSampRate(instance_type* inst) {
+ return WebRtcIsac_EncSampRate(inst);
+ }
+
+ static inline int16_t Free(instance_type* inst) {
+ return WebRtcIsac_Free(inst);
+ }
+ static inline void GetBandwidthInfo(instance_type* inst,
+ IsacBandwidthInfo* bwinfo) {
+ WebRtcIsac_GetBandwidthInfo(inst, bwinfo);
+ }
+ static inline int16_t GetErrorCode(instance_type* inst) {
+ return WebRtcIsac_GetErrorCode(inst);
+ }
+
+ static inline int16_t GetNewFrameLen(instance_type* inst) {
+ return WebRtcIsac_GetNewFrameLen(inst);
+ }
+ static inline void SetBandwidthInfo(instance_type* inst,
+ const IsacBandwidthInfo* bwinfo) {
+ WebRtcIsac_SetBandwidthInfo(inst, bwinfo);
+ }
+ static inline int16_t SetDecSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ return WebRtcIsac_SetDecSampRate(inst, sample_rate_hz);
+ }
+ static inline int16_t SetEncSampRate(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ return WebRtcIsac_SetEncSampRate(inst, sample_rate_hz);
+ }
+ static inline void SetEncSampRateInDecoder(instance_type* inst,
+ uint16_t sample_rate_hz) {
+ WebRtcIsac_SetEncSampRateInDecoder(inst, sample_rate_hz);
+ }
+ static inline void SetInitialBweBottleneck(instance_type* inst,
+ int bottleneck_bits_per_second) {
+ WebRtcIsac_SetInitialBweBottleneck(inst, bottleneck_bits_per_second);
+ }
+ static inline int16_t UpdateBwEstimate(instance_type* inst,
+ const uint8_t* encoded,
+ size_t packet_size,
+ uint16_t rtp_seq_number,
+ uint32_t send_ts,
+ uint32_t arr_ts) {
+ return WebRtcIsac_UpdateBwEstimate(inst, encoded, packet_size,
+ rtp_seq_number, send_ts, arr_ts);
+ }
+ static inline int16_t SetMaxPayloadSize(instance_type* inst,
+ int16_t max_payload_size_bytes) {
+ return WebRtcIsac_SetMaxPayloadSize(inst, max_payload_size_bytes);
+ }
+ static inline int16_t SetMaxRate(instance_type* inst, int32_t max_bit_rate) {
+ return WebRtcIsac_SetMaxRate(inst, max_bit_rate);
+ }
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_FLOAT_TYPE_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
index a751c247921..84c712ee6ee 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/isac_unittest.cc
@@ -97,10 +97,12 @@ TEST_F(IsacTest, IsacUpdateBWE) {
encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
EXPECT_EQ(0, encoded_bytes);
encoded_bytes = WebRtcIsac_Encode(isac_codec_, speech_data_, bitstream_);
+ EXPECT_GT(encoded_bytes, 0);
// Call to update bandwidth estimator with real data.
EXPECT_EQ(0, WebRtcIsac_UpdateBwEstimate(isac_codec_, bitstream_,
- encoded_bytes, 1, 12345, 56789));
+ static_cast<size_t>(encoded_bytes),
+ 1, 12345, 56789));
// Free memory.
EXPECT_EQ(0, WebRtcIsac_Free(isac_codec_));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
index 4708a5c3526..60fc25b98bb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.c
@@ -75,11 +75,11 @@ static const double kLpcCorrWindow[WINLEN] = {
0.00155690, 0.00124918, 0.00094895, 0.00066112, 0.00039320, 0.00015881
};
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order)
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order)
{
double sum, alpha;
- int m, m_h, i;
+ size_t m, m_h, i;
alpha = 0; //warning -DH
a[0] = 1.0;
if (r[0] < LEVINSON_EPS) { /* if r[0] <= 0, set LPC coeff. to zero */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
index 866c76d8fdd..8dfe3838028 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/lpc_analysis.h
@@ -21,7 +21,7 @@
#include "settings.h"
#include "structs.h"
-double WebRtcIsac_LevDurb(double *a, double *k, double *r, int order);
+double WebRtcIsac_LevDurb(double *a, double *k, double *r, size_t order);
void WebRtcIsac_GetVars(const double *input, const int16_t *pitchGains_Q12,
double *oldEnergy, double *varscale);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
index f5d93564be2..6fb02b378f2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/source/pitch_estimator.h
@@ -61,11 +61,15 @@ void WebRtcIsac_PitchfilterPre_gains(double *indat,
void WebRtcIsac_WeightingFilter(const double *in, double *weiout, double *whiout, WeightFiltstr *wfdata);
-void WebRtcIsac_Highpass(const double *in, double *out, double *state, int N);
+void WebRtcIsac_Highpass(const double *in,
+ double *out,
+ double *state,
+ size_t N);
void WebRtcIsac_DecimateAllpass(const double *in,
- double *state_in, /* array of size: 2*ALLPASSSECTIONS+1 */
- int N, /* number of input samples */
- double *out); /* array of size N/2 */
+ double *state_in, /* array of size:
+ * 2*ALLPASSSECTIONS+1 */
+ size_t N, /* number of input samples */
+ double *out); /* array of size N/2 */
#endif /* WEBRTC_MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_PITCH_ESTIMATOR_H_ */
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
index 4eeeed078ff..2e5badd82ce 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/ReleaseTest-API/ReleaseTest-API.cc
@@ -21,6 +21,7 @@
/* include API */
#include "isac.h"
#include "utility.h"
+#include "webrtc/base/format_macros.h"
/* Defines */
#define SEED_FILE "randseed.txt" /* Used when running decoder on garbage data */
@@ -42,7 +43,8 @@ int main(int argc, char* argv[]) {
FILE* inp, *outp, * f_bn = NULL, * vadp = NULL, *bandwidthp;
int framecnt, endfile;
- int i, errtype, VADusage = 0, packetLossPercent = 0;
+ size_t i;
+ int errtype, VADusage = 0, packetLossPercent = 0;
int16_t CodingMode;
int32_t bottleneck = 0;
int framesize = 30; /* ms */
@@ -51,7 +53,7 @@ int main(int argc, char* argv[]) {
/* Runtime statistics */
double starttime, runtime, length_file;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen = 0, declenTC = 0;
bool lostFrame = false;
@@ -75,14 +77,14 @@ int main(int argc, char* argv[]) {
FILE* fy;
double kbps;
#endif /* _DEBUG */
- int totalbits = 0;
+ size_t totalbits = 0;
int totalsmpls = 0;
/* If use GNS file */
FILE* fp_gns = NULL;
char gns_file[100];
- short maxStreamLen30 = 0;
- short maxStreamLen60 = 0;
+ size_t maxStreamLen30 = 0;
+ size_t maxStreamLen60 = 0;
short sampFreqKHz = 32;
short samplesIn10Ms;
short useAssign = 0;
@@ -90,10 +92,10 @@ int main(int argc, char* argv[]) {
bool doTransCoding = false;
int32_t rateTransCoding = 0;
uint8_t streamDataTransCoding[1200];
- int16_t streamLenTransCoding = 0;
+ size_t streamLenTransCoding = 0;
FILE* transCodingFile = NULL;
FILE* transcodingBitstream = NULL;
- uint32_t numTransCodingBytes = 0;
+ size_t numTransCodingBytes = 0;
/* only one structure used for ISAC encoder */
ISACStruct* ISAC_main_inst = NULL;
@@ -185,7 +187,7 @@ int main(int argc, char* argv[]) {
char transCodingFileName[500];
int16_t totFileLoop = 0;
int16_t numFileLoop = 0;
- for (i = 1; i + 2 < argc; i++) {
+ for (i = 1; i + 2 < static_cast<size_t>(argc); i++) {
if (!strcmp("-LOOP", argv[i])) {
i++;
totFileLoop = (int16_t)atol(argv[i]);
@@ -497,13 +499,8 @@ int main(int argc, char* argv[]) {
return 0;
}
}
- if (testNum != 2) {
- if (WebRtcIsac_DecoderInit(ISAC_main_inst) < 0) {
- printf("Error could not initialize the decoder \n");
- cout << flush;
- return 0;
- }
- }
+ if (testNum != 2)
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
if (CodingMode == 1) {
err = WebRtcIsac_Control(ISAC_main_inst, bottleneck, framesize);
if (err < 0) {
@@ -568,17 +565,13 @@ int main(int argc, char* argv[]) {
cout << flush;
}
- err = WebRtcIsac_DecoderInit(ISAC_main_inst);
- /* Error check */
- if (err < 0) {
- errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
- printf("\n\n Error in decoderinit: %d.\n\n", errtype);
- cout << flush;
- }
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
}
cur_framesmpls = 0;
while (1) {
+ int stream_len_int = 0;
+
/* Read 10 ms speech block */
endfile = readframe(shortdata, inp, samplesIn10Ms);
@@ -598,21 +591,21 @@ int main(int argc, char* argv[]) {
/* iSAC encoding */
if (!(testNum == 3 && framecnt == 0)) {
- stream_len =
+ stream_len_int =
WebRtcIsac_Encode(ISAC_main_inst, shortdata, (uint8_t*)streamdata);
- if ((payloadSize != 0) && (stream_len > payloadSize)) {
+ if ((payloadSize != 0) && (stream_len_int > payloadSize)) {
if (testNum == 0) {
printf("\n\n");
}
printf("\nError: Streamsize out of range %d\n",
- stream_len - payloadSize);
+ stream_len_int - payloadSize);
cout << flush;
}
WebRtcIsac_GetUplinkBw(ISAC_main_inst, &sendBN);
- if (stream_len > 0) {
+ if (stream_len_int > 0) {
if (doTransCoding) {
int16_t indexStream;
uint8_t auxUW8;
@@ -620,13 +613,15 @@ int main(int argc, char* argv[]) {
/******************** Main Transcoding stream ********************/
WebRtcIsac_GetDownLinkBwIndex(ISAC_main_inst, &bnIdxTC,
&jitterInfoTC);
- streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
streamDataTransCoding, false);
- if (streamLenTransCoding < 0) {
+ if (streamLenTransCoding_int < 0) {
fprintf(stderr, "Error in trans-coding\n");
exit(0);
}
+ streamLenTransCoding =
+ static_cast<size_t>(streamLenTransCoding_int);
auxUW8 = (uint8_t)(((streamLenTransCoding & 0xFF00) >> 8) & 0x00FF);
if (fwrite(&auxUW8, sizeof(uint8_t), 1, transcodingBitstream) !=
1) {
@@ -641,7 +636,7 @@ int main(int argc, char* argv[]) {
if (fwrite(streamDataTransCoding, sizeof(uint8_t),
streamLenTransCoding, transcodingBitstream) !=
- static_cast<size_t>(streamLenTransCoding)) {
+ streamLenTransCoding) {
return -1;
}
@@ -659,12 +654,15 @@ int main(int argc, char* argv[]) {
break;
}
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
/* exit if returned with error */
errtype = WebRtcIsac_GetErrorCode(ISAC_main_inst);
- printf("\n\nError in encoder: %d.\n\n", errtype);
+ fprintf(stderr, "Error in encoder: %d.\n", errtype);
cout << flush;
+ exit(0);
}
+ stream_len = static_cast<size_t>(stream_len_int);
+
cur_framesmpls += samplesIn10Ms;
/* exit encoder loop if the encoder returned a bitstream */
if (stream_len != 0)
@@ -702,17 +700,24 @@ int main(int argc, char* argv[]) {
// RED.
if (lostFrame) {
- stream_len = WebRtcIsac_GetRedPayload(
+ int stream_len_int = WebRtcIsac_GetRedPayload(
ISAC_main_inst, reinterpret_cast<uint8_t*>(streamdata));
+ if (stream_len_int < 0) {
+ fprintf(stderr, "Error getting RED payload\n");
+ exit(0);
+ }
+ stream_len = static_cast<size_t>(stream_len_int);
if (doTransCoding) {
- streamLenTransCoding = WebRtcIsac_GetNewBitStream(
+ int streamLenTransCoding_int = WebRtcIsac_GetNewBitStream(
ISAC_main_inst, bnIdxTC, jitterInfoTC, rateTransCoding,
streamDataTransCoding, true);
- if (streamLenTransCoding < 0) {
+ if (streamLenTransCoding_int < 0) {
fprintf(stderr, "Error in RED trans-coding\n");
exit(0);
}
+ streamLenTransCoding =
+ static_cast<size_t>(streamLenTransCoding_int);
}
}
@@ -890,7 +895,7 @@ int main(int argc, char* argv[]) {
#endif /* _DEBUG */
}
printf("\n");
- printf("total bits = %d bits\n", totalbits);
+ printf("total bits = %" PRIuS " bits\n", totalbits);
printf("measured average bitrate = %0.3f kbits/s\n",
(double)totalbits * (sampFreqKHz) / totalsmpls);
if (doTransCoding) {
@@ -909,11 +914,11 @@ int main(int argc, char* argv[]) {
(100 * runtime / length_file));
if (maxStreamLen30 != 0) {
- printf("Maximum payload size 30ms Frames %d bytes (%0.3f kbps)\n",
+ printf("Maximum payload size 30ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
maxStreamLen30, maxStreamLen30 * 8 / 30.);
}
if (maxStreamLen60 != 0) {
- printf("Maximum payload size 60ms Frames %d bytes (%0.3f kbps)\n",
+ printf("Maximum payload size 60ms Frames %" PRIuS " bytes (%0.3f kbps)\n",
maxStreamLen60, maxStreamLen60 * 8 / 60.);
}
// fprintf(stderr, "\n");
@@ -922,12 +927,12 @@ int main(int argc, char* argv[]) {
fprintf(stderr, " %0.1f kbps",
(double)totalbits * (sampFreqKHz) / totalsmpls);
if (maxStreamLen30 != 0) {
- fprintf(stderr, " plmax-30ms %d bytes (%0.0f kbps)", maxStreamLen30,
- maxStreamLen30 * 8 / 30.);
+ fprintf(stderr, " plmax-30ms %" PRIuS " bytes (%0.0f kbps)",
+ maxStreamLen30, maxStreamLen30 * 8 / 30.);
}
if (maxStreamLen60 != 0) {
- fprintf(stderr, " plmax-60ms %d bytes (%0.0f kbps)", maxStreamLen60,
- maxStreamLen60 * 8 / 60.);
+ fprintf(stderr, " plmax-60ms %" PRIuS " bytes (%0.0f kbps)",
+ maxStreamLen60, maxStreamLen60 * 8 / 60.);
}
if (doTransCoding) {
fprintf(stderr, " transcoding rate %.0f kbps",
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
index a11e408b3b1..a53e7bd0b56 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/SwitchingSampRate/SwitchingSampRate.cc
@@ -51,9 +51,9 @@ int main(int argc, char* argv[])
short clientCntr;
- unsigned int lenEncodedInBytes[MAX_NUM_CLIENTS];
+ size_t lenEncodedInBytes[MAX_NUM_CLIENTS];
unsigned int lenAudioIn10ms[MAX_NUM_CLIENTS];
- unsigned int lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
+ size_t lenEncodedInBytesTmp[MAX_NUM_CLIENTS];
unsigned int lenAudioIn10msTmp[MAX_NUM_CLIENTS];
BottleNeckModel* packetData[MAX_NUM_CLIENTS];
@@ -166,13 +166,7 @@ int main(int argc, char* argv[])
return -1;
}
- // Initialize Decoder
- if(WebRtcIsac_DecoderInit(codecInstance[clientCntr]) < 0)
- {
- printf("Could not initialize decoder of client %d\n",
- clientCntr + 1);
- return -1;
- }
+ WebRtcIsac_DecoderInit(codecInstance[clientCntr]);
// setup Rate if in Instantaneous mode
if(codingMode != 0)
@@ -189,9 +183,9 @@ int main(int argc, char* argv[])
}
- short streamLen;
+ size_t streamLen;
short numSamplesRead;
- int lenDecodedAudio;
+ size_t lenDecodedAudio;
short senderIdx;
short receiverIdx;
@@ -282,11 +276,11 @@ int main(int argc, char* argv[])
// Encode
- streamLen = WebRtcIsac_Encode(codecInstance[senderIdx],
- audioBuff10ms,
- (uint8_t*)bitStream);
+ int streamLen_int = WebRtcIsac_Encode(codecInstance[senderIdx],
+ audioBuff10ms,
+ (uint8_t*)bitStream);
int16_t ggg;
- if (streamLen > 0) {
+ if (streamLen_int > 0) {
if ((WebRtcIsac_ReadFrameLen(
codecInstance[receiverIdx],
reinterpret_cast<const uint8_t*>(bitStream),
@@ -295,11 +289,12 @@ int main(int argc, char* argv[])
}
// Sanity check
- if(streamLen < 0)
+ if(streamLen_int < 0)
{
printf(" Encoder error in client %d \n", senderIdx + 1);
return -1;
}
+ streamLen = static_cast<size_t>(streamLen_int);
if(streamLen > 0)
@@ -423,18 +418,18 @@ int main(int argc, char* argv[])
}
/**/
// Decode
- lenDecodedAudio = WebRtcIsac_Decode(
+ int lenDecodedAudio_int = WebRtcIsac_Decode(
codecInstance[receiverIdx],
reinterpret_cast<const uint8_t*>(bitStream),
streamLen,
audioBuff60ms,
speechType);
- if(lenDecodedAudio < 0)
+ if(lenDecodedAudio_int < 0)
{
printf(" Decoder error in client %d \n", receiverIdx + 1);
return -1;
}
-
+ lenDecodedAudio = static_cast<size_t>(lenDecodedAudio_int);
if(encoderSampRate[senderIdx] == 16000)
{
@@ -442,7 +437,7 @@ int main(int argc, char* argv[])
resamplerState[receiverIdx]);
if (fwrite(resampledAudio60ms, sizeof(short), lenDecodedAudio << 1,
outFile[receiverIdx]) !=
- static_cast<size_t>(lenDecodedAudio << 1)) {
+ lenDecodedAudio << 1) {
return -1;
}
}
@@ -450,7 +445,7 @@ int main(int argc, char* argv[])
{
if (fwrite(audioBuff60ms, sizeof(short), lenDecodedAudio,
outFile[receiverIdx]) !=
- static_cast<size_t>(lenDecodedAudio)) {
+ lenDecodedAudio) {
return -1;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
index e0d0f412c80..e8116ffdf86 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/test/simpleKenny.c
@@ -26,6 +26,7 @@
/* include API */
#include "isac.h"
#include "utility.h"
+#include "webrtc/base/format_macros.h"
//#include "commonDefs.h"
/* max number of samples per frame (= 60 ms frame) */
@@ -57,7 +58,7 @@ int main(int argc, char* argv[]) {
/* Runtime statistics */
double rate;
double rateRCU;
- unsigned long totalbits = 0;
+ size_t totalbits = 0;
unsigned long totalBitsRCU = 0;
unsigned long totalsmpls = 0;
@@ -72,7 +73,7 @@ int main(int argc, char* argv[]) {
int32_t rateLimit;
ISACStruct* ISAC_main_inst;
- int16_t stream_len = 0;
+ size_t stream_len = 0;
int declen = 0;
int16_t err;
int cur_framesmpls;
@@ -94,7 +95,7 @@ int main(int argc, char* argv[]) {
FILE* averageFile;
int sampFreqKHz;
int samplesIn10Ms;
- int16_t maxStreamLen = 0;
+ size_t maxStreamLen = 0;
char histFileName[500];
char averageFileName[500];
unsigned int hist[600];
@@ -252,10 +253,7 @@ int main(int argc, char* argv[]) {
printf("cannot initialize encoder\n");
return -1;
}
- if (WebRtcIsac_DecoderInit(ISAC_main_inst) < 0) {
- printf("cannot initialize decoder\n");
- return -1;
- }
+ WebRtcIsac_DecoderInit(ISAC_main_inst);
// {
// int32_t b1, b2;
@@ -310,22 +308,22 @@ int main(int argc, char* argv[]) {
if (onlyDecode) {
uint8_t auxUW8;
- size_t auxSizet;
if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
break;
}
- stream_len = ((uint8_t)auxUW8) << 8;
+ stream_len = auxUW8 << 8;
if (fread(&auxUW8, sizeof(uint8_t), 1, inp) < 1) {
break;
}
- stream_len |= (uint16_t)auxUW8;
- auxSizet = (size_t)stream_len;
- if (fread(payload, 1, auxSizet, inp) < auxSizet) {
+ stream_len |= auxUW8;
+ if (fread(payload, 1, stream_len, inp) < stream_len) {
printf("last payload is corrupted\n");
break;
}
} else {
while (stream_len == 0) {
+ int stream_len_int;
+
// Read 10 ms speech block
endfile = readframe(shortdata, inp, samplesIn10Ms);
if (endfile) {
@@ -334,15 +332,16 @@ int main(int argc, char* argv[]) {
cur_framesmpls += samplesIn10Ms;
//-------- iSAC encoding ---------
- stream_len = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
+ stream_len_int = WebRtcIsac_Encode(ISAC_main_inst, shortdata, payload);
- if (stream_len < 0) {
+ if (stream_len_int < 0) {
// exit if returned with error
// errType=WebRtcIsac_GetErrorCode(ISAC_main_inst);
fprintf(stderr, "\nError in encoder\n");
getc(stdin);
exit(EXIT_FAILURE);
}
+ stream_len = (size_t)stream_len_int;
}
//===================================================================
if (endfile) {
@@ -350,6 +349,11 @@ int main(int argc, char* argv[]) {
}
rcuStreamLen = WebRtcIsac_GetRedPayload(ISAC_main_inst, payloadRCU);
+ if (rcuStreamLen < 0) {
+ fprintf(stderr, "\nError getting RED payload\n");
+ getc(stdin);
+ exit(EXIT_FAILURE);
+ }
get_arrival_time(cur_framesmpls, stream_len, bottleneck, &packetData,
sampFreqKHz * 1000, sampFreqKHz * 1000);
@@ -391,15 +395,16 @@ int main(int argc, char* argv[]) {
if (fwrite(&auxUW8, sizeof(uint8_t), 1, outp) != 1) {
return -1;
}
- if (fwrite(payload, 1, stream_len, outp) != (size_t)stream_len) {
+ if (fwrite(payload, 1, stream_len, outp) != stream_len) {
return -1;
}
} else {
//======================= iSAC decoding ===========================
if ((rand() % 100) < packetLossPercent) {
- declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU, rcuStreamLen,
- decoded, speechType);
+ declen = WebRtcIsac_DecodeRcu(ISAC_main_inst, payloadRCU,
+ (size_t)rcuStreamLen, decoded,
+ speechType);
lostPacketCntr++;
} else {
declen = WebRtcIsac_Decode(ISAC_main_inst, payload, stream_len, decoded,
@@ -453,7 +458,7 @@ int main(int argc, char* argv[]) {
printf("\n");
printf("Measured bit-rate........... %0.3f kbps\n", rate);
printf("Measured RCU bit-ratre...... %0.3f kbps\n", rateRCU);
- printf("Maximum bit-rate/payloadsize %0.3f / %d\n",
+ printf("Maximum bit-rate/payloadsize %0.3f / %" PRIuS "\n",
maxStreamLen * 8 / 0.03, maxStreamLen);
printf("Measured packet-loss........ %0.1f%% \n",
100.0f * (float)lostPacketCntr / (float)packetCntr);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
index 0a2256a0365..d9c4332123c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.c
@@ -135,7 +135,7 @@ readParamString(
void
get_arrival_time(
int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data,
short senderSampFreqHz,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
index f9fba94315b..1bb6d295b40 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/main/util/utility.h
@@ -99,7 +99,7 @@ extern "C" {
void get_arrival_time(
int current_framesamples, /* samples */
- int packet_size, /* bytes */
+ size_t packet_size, /* bytes */
int bottleneck, /* excluding headers; bits/s */
BottleNeckModel* BN_data,
short senderSampFreqHz,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/unittest.cc
index a80fd08bcfc..673d2906ae6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/isac/unittest.cc
@@ -24,10 +24,11 @@ namespace webrtc {
namespace {
+const int kIsacNumberOfSamples = 32 * 60; // 60 ms at 32 kHz
+
std::vector<int16_t> LoadSpeechData() {
webrtc::test::InputAudioFile input_file(
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"));
- static const int kIsacNumberOfSamples = 32 * 60; // 60 ms at 32 kHz
std::vector<int16_t> speech_data(kIsacNumberOfSamples);
input_file.Read(kIsacNumberOfSamples, speech_data.data());
return speech_data;
@@ -41,32 +42,45 @@ IsacBandwidthInfo GetBwInfo(typename T::instance_type* inst) {
return bi;
}
+// Encodes one packet. Returns the packet duration in milliseconds.
template <typename T>
-rtc::Buffer EncodePacket(typename T::instance_type* inst,
- const IsacBandwidthInfo* bi,
- const int16_t* speech_data,
- int framesize_ms) {
- rtc::Buffer output(1000);
- for (int i = 0;; ++i) {
+int EncodePacket(typename T::instance_type* inst,
+ const IsacBandwidthInfo* bi,
+ const int16_t* speech_data,
+ rtc::Buffer* output) {
+ output->SetSize(1000);
+ for (int duration_ms = 10;; duration_ms += 10) {
if (bi)
T::SetBandwidthInfo(inst, bi);
- int encoded_bytes = T::Encode(inst, speech_data, output.data());
- if (i + 1 == framesize_ms / 10) {
+ int encoded_bytes = T::Encode(inst, speech_data, output->data());
+ if (encoded_bytes > 0 || duration_ms >= 60) {
EXPECT_GT(encoded_bytes, 0);
- EXPECT_LE(static_cast<size_t>(encoded_bytes), output.size());
- output.SetSize(encoded_bytes);
- return output;
+ EXPECT_LE(static_cast<size_t>(encoded_bytes), output->size());
+ output->SetSize(encoded_bytes);
+ return duration_ms;
}
- EXPECT_EQ(0, encoded_bytes);
}
}
+template <typename T>
+std::vector<int16_t> DecodePacket(typename T::instance_type* inst,
+ const rtc::Buffer& encoded) {
+ std::vector<int16_t> decoded(kIsacNumberOfSamples);
+ int16_t speech_type;
+ int nsamples = T::DecodeInternal(inst, encoded.data(), encoded.size(),
+ &decoded.front(), &speech_type);
+ EXPECT_GT(nsamples, 0);
+ EXPECT_LE(static_cast<size_t>(nsamples), decoded.size());
+ decoded.resize(nsamples);
+ return decoded;
+}
+
class BoundedCapacityChannel final {
public:
- BoundedCapacityChannel(int rate_bits_per_second)
+ BoundedCapacityChannel(int sample_rate_hz, int rate_bits_per_second)
: current_time_rtp_(0),
channel_rate_bytes_per_sample_(rate_bits_per_second /
- (8.0 * kSamplesPerSecond)) {}
+ (8.0 * sample_rate_hz)) {}
// Simulate sending the given number of bytes at the given RTP time. Returns
// the new current RTP time after the sending is done.
@@ -81,47 +95,6 @@ class BoundedCapacityChannel final {
// The somewhat strange unit for channel rate, bytes per sample, is because
// RTP time is measured in samples:
const double channel_rate_bytes_per_sample_;
- static const int kSamplesPerSecond = 16000;
-};
-
-template <typename T, bool adaptive>
-struct TestParam {};
-
-template <>
-struct TestParam<IsacFloat, true> {
- static const int time_to_settle = 200;
- static int ExpectedRateBitsPerSecond(int rate_bits_per_second) {
- return rate_bits_per_second;
- }
-};
-
-template <>
-struct TestParam<IsacFix, true> {
- static const int time_to_settle = 350;
- static int ExpectedRateBitsPerSecond(int rate_bits_per_second) {
- // For some reason, IsacFix fails to adapt to the channel's actual
- // bandwidth. Instead, it settles on a few hundred packets at 10kbit/s,
- // then a few hundred at 5kbit/s, then a few hundred at 10kbit/s, and so
- // on. The 200 packets starting at 350 are in the middle of the first
- // 10kbit/s run.
- return 10000;
- }
-};
-
-template <>
-struct TestParam<IsacFloat, false> {
- static const int time_to_settle = 0;
- static int ExpectedRateBitsPerSecond(int rate_bits_per_second) {
- return 32000;
- }
-};
-
-template <>
-struct TestParam<IsacFix, false> {
- static const int time_to_settle = 0;
- static int ExpectedRateBitsPerSecond(int rate_bits_per_second) {
- return 16000;
- }
};
// Test that the iSAC encoder produces identical output whether or not we use a
@@ -129,143 +102,153 @@ struct TestParam<IsacFix, false> {
// communicate BW estimation info explicitly.
template <typename T, bool adaptive>
void TestGetSetBandwidthInfo(const int16_t* speech_data,
- int rate_bits_per_second) {
- using Param = TestParam<T, adaptive>;
- const int framesize_ms = adaptive ? 60 : 30;
+ int rate_bits_per_second,
+ int sample_rate_hz,
+ int frame_size_ms) {
+ const int bit_rate = 32000;
// Conjoined encoder/decoder pair:
typename T::instance_type* encdec;
ASSERT_EQ(0, T::Create(&encdec));
ASSERT_EQ(0, T::EncoderInit(encdec, adaptive ? 0 : 1));
- ASSERT_EQ(0, T::DecoderInit(encdec));
+ T::DecoderInit(encdec);
+ ASSERT_EQ(0, T::SetEncSampRate(encdec, sample_rate_hz));
+ if (adaptive)
+ ASSERT_EQ(0, T::ControlBwe(encdec, bit_rate, frame_size_ms, false));
+ else
+ ASSERT_EQ(0, T::Control(encdec, bit_rate, frame_size_ms));
// Disjoint encoder/decoder pair:
typename T::instance_type* enc;
ASSERT_EQ(0, T::Create(&enc));
ASSERT_EQ(0, T::EncoderInit(enc, adaptive ? 0 : 1));
+ ASSERT_EQ(0, T::SetEncSampRate(enc, sample_rate_hz));
+ if (adaptive)
+ ASSERT_EQ(0, T::ControlBwe(enc, bit_rate, frame_size_ms, false));
+ else
+ ASSERT_EQ(0, T::Control(enc, bit_rate, frame_size_ms));
typename T::instance_type* dec;
ASSERT_EQ(0, T::Create(&dec));
- ASSERT_EQ(0, T::DecoderInit(dec));
+ T::DecoderInit(dec);
+ T::SetInitialBweBottleneck(dec, bit_rate);
+ T::SetEncSampRateInDecoder(dec, sample_rate_hz);
// 0. Get initial BW info from decoder.
auto bi = GetBwInfo<T>(dec);
- BoundedCapacityChannel channel1(rate_bits_per_second),
- channel2(rate_bits_per_second);
- std::vector<size_t> packet_sizes;
- for (int i = 0; i < Param::time_to_settle + 200; ++i) {
+ BoundedCapacityChannel channel1(sample_rate_hz, rate_bits_per_second),
+ channel2(sample_rate_hz, rate_bits_per_second);
+
+ int elapsed_time_ms = 0;
+ for (int i = 0; elapsed_time_ms < 10000; ++i) {
std::ostringstream ss;
ss << " i = " << i;
SCOPED_TRACE(ss.str());
- // 1. Encode 6 * 10 ms (adaptive) or 3 * 10 ms (nonadaptive). The separate
- // encoder is given the BW info before each encode call.
- auto bitstream1 =
- EncodePacket<T>(encdec, nullptr, speech_data, framesize_ms);
- auto bitstream2 = EncodePacket<T>(enc, &bi, speech_data, framesize_ms);
+ // 1. Encode 3 * 10 ms or 6 * 10 ms. The separate encoder is given the BW
+ // info before each encode call.
+ rtc::Buffer bitstream1, bitstream2;
+ int duration1_ms =
+ EncodePacket<T>(encdec, nullptr, speech_data, &bitstream1);
+ int duration2_ms = EncodePacket<T>(enc, &bi, speech_data, &bitstream2);
+ EXPECT_EQ(duration1_ms, duration2_ms);
+ if (adaptive)
+ EXPECT_TRUE(duration1_ms == 30 || duration1_ms == 60);
+ else
+ EXPECT_EQ(frame_size_ms, duration1_ms);
+ ASSERT_EQ(bitstream1.size(), bitstream2.size());
EXPECT_EQ(bitstream1, bitstream2);
- if (i > Param::time_to_settle)
- packet_sizes.push_back(bitstream1.size());
-
- // 2. Deliver the encoded data to the decoders (but don't actually ask them
- // to decode it; that's not necessary). Then get new BW info from the
- // separate decoder.
- const int samples_per_packet = 16 * framesize_ms;
- const int send_time = i * samples_per_packet;
+
+ // 2. Deliver the encoded data to the decoders.
+ const int send_time = elapsed_time_ms * (sample_rate_hz / 1000);
EXPECT_EQ(0, T::UpdateBwEstimate(
encdec, bitstream1.data(), bitstream1.size(), i, send_time,
channel1.Send(send_time, bitstream1.size())));
EXPECT_EQ(0, T::UpdateBwEstimate(
dec, bitstream2.data(), bitstream2.size(), i, send_time,
channel2.Send(send_time, bitstream2.size())));
+
+ // 3. Decode, and get new BW info from the separate decoder.
+ ASSERT_EQ(0, T::SetDecSampRate(encdec, sample_rate_hz));
+ ASSERT_EQ(0, T::SetDecSampRate(dec, sample_rate_hz));
+ auto decoded1 = DecodePacket<T>(encdec, bitstream1);
+ auto decoded2 = DecodePacket<T>(dec, bitstream2);
+ EXPECT_EQ(decoded1, decoded2);
bi = GetBwInfo<T>(dec);
+
+ elapsed_time_ms += duration1_ms;
}
EXPECT_EQ(0, T::Free(encdec));
EXPECT_EQ(0, T::Free(enc));
EXPECT_EQ(0, T::Free(dec));
-
- // The average send bitrate is close to the channel's capacity.
- double avg_size =
- std::accumulate(packet_sizes.begin(), packet_sizes.end(), 0) /
- static_cast<double>(packet_sizes.size());
- double avg_rate_bits_per_second = 8.0 * avg_size / (framesize_ms * 1e-3);
- double expected_rate_bits_per_second =
- Param::ExpectedRateBitsPerSecond(rate_bits_per_second);
- EXPECT_GT(avg_rate_bits_per_second / expected_rate_bits_per_second, 0.95);
- EXPECT_LT(avg_rate_bits_per_second / expected_rate_bits_per_second, 1.06);
-
- // The largest packet isn't that large, and the smallest not that small.
- size_t min_size = *std::min_element(packet_sizes.begin(), packet_sizes.end());
- size_t max_size = *std::max_element(packet_sizes.begin(), packet_sizes.end());
- double size_range = max_size - min_size;
- EXPECT_LE(size_range / avg_size, 0.16);
}
-} // namespace
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat12kAdaptive) {
- TestGetSetBandwidthInfo<IsacFloat, true>(LoadSpeechData().data(), 12000);
-}
+enum class IsacType { Fix, Float };
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat15kAdaptive) {
- TestGetSetBandwidthInfo<IsacFloat, true>(LoadSpeechData().data(), 15000);
+std::ostream& operator<<(std::ostream& os, IsacType t) {
+ os << (t == IsacType::Fix ? "fix" : "float");
+ return os;
}
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat19kAdaptive) {
- TestGetSetBandwidthInfo<IsacFloat, true>(LoadSpeechData().data(), 19000);
-}
+struct IsacTestParam {
+ IsacType isac_type;
+ bool adaptive;
+ int channel_rate_bits_per_second;
+ int sample_rate_hz;
+ int frame_size_ms;
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat22kAdaptive) {
- TestGetSetBandwidthInfo<IsacFloat, true>(LoadSpeechData().data(), 22000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFix12kAdaptive) {
- TestGetSetBandwidthInfo<IsacFix, true>(LoadSpeechData().data(), 12000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFix15kAdaptive) {
- TestGetSetBandwidthInfo<IsacFix, true>(LoadSpeechData().data(), 15000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFix19kAdaptive) {
- TestGetSetBandwidthInfo<IsacFix, true>(LoadSpeechData().data(), 19000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFix22kAdaptive) {
- TestGetSetBandwidthInfo<IsacFix, true>(LoadSpeechData().data(), 22000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat12k) {
- TestGetSetBandwidthInfo<IsacFloat, false>(LoadSpeechData().data(), 12000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat15k) {
- TestGetSetBandwidthInfo<IsacFloat, false>(LoadSpeechData().data(), 15000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat19k) {
- TestGetSetBandwidthInfo<IsacFloat, false>(LoadSpeechData().data(), 19000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFloat22k) {
- TestGetSetBandwidthInfo<IsacFloat, false>(LoadSpeechData().data(), 22000);
-}
-
-TEST(IsacCommonTest, GetSetBandwidthInfoFix12k) {
- TestGetSetBandwidthInfo<IsacFix, false>(LoadSpeechData().data(), 12000);
-}
+ friend std::ostream& operator<<(std::ostream& os, const IsacTestParam& itp) {
+ os << '{' << itp.isac_type << ','
+ << (itp.adaptive ? "adaptive" : "nonadaptive") << ','
+ << itp.channel_rate_bits_per_second << ',' << itp.sample_rate_hz << ','
+ << itp.frame_size_ms << '}';
+ return os;
+ }
+};
-TEST(IsacCommonTest, GetSetBandwidthInfoFix15k) {
- TestGetSetBandwidthInfo<IsacFix, false>(LoadSpeechData().data(), 15000);
-}
+class IsacCommonTest : public testing::TestWithParam<IsacTestParam> {};
-TEST(IsacCommonTest, GetSetBandwidthInfoFix19k) {
- TestGetSetBandwidthInfo<IsacFix, false>(LoadSpeechData().data(), 19000);
-}
+} // namespace
-TEST(IsacCommonTest, GetSetBandwidthInfoFix22k) {
- TestGetSetBandwidthInfo<IsacFix, false>(LoadSpeechData().data(), 22000);
-}
+TEST_P(IsacCommonTest, GetSetBandwidthInfo) {
+ auto p = GetParam();
+ auto test_fun = [p] {
+ if (p.isac_type == IsacType::Fix) {
+ if (p.adaptive)
+ return TestGetSetBandwidthInfo<IsacFix, true>;
+ else
+ return TestGetSetBandwidthInfo<IsacFix, false>;
+ } else {
+ if (p.adaptive)
+ return TestGetSetBandwidthInfo<IsacFloat, true>;
+ else
+ return TestGetSetBandwidthInfo<IsacFloat, false>;
+ }
+ }();
+ test_fun(LoadSpeechData().data(), p.channel_rate_bits_per_second,
+ p.sample_rate_hz, p.frame_size_ms);
+}
+
+std::vector<IsacTestParam> TestCases() {
+ static const IsacType types[] = {IsacType::Fix, IsacType::Float};
+ static const bool adaptives[] = {true, false};
+ static const int channel_rates[] = {12000, 15000, 19000, 22000};
+ static const int sample_rates[] = {16000, 32000};
+ static const int frame_sizes[] = {30, 60};
+ std::vector<IsacTestParam> cases;
+ for (IsacType type : types)
+ for (bool adaptive : adaptives)
+ for (int channel_rate : channel_rates)
+ for (int sample_rate : sample_rates)
+ if (!(type == IsacType::Fix && sample_rate == 32000))
+ for (int frame_size : frame_sizes)
+ if (!(sample_rate == 32000 && frame_size == 60))
+ cases.push_back(
+ {type, adaptive, channel_rate, sample_rate, frame_size});
+ return cases;
+}
+
+INSTANTIATE_TEST_CASE_P(, IsacCommonTest, testing::ValuesIn(TestCases()));
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
index 18d40688734..95426d89e1e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h
@@ -17,50 +17,33 @@
namespace webrtc {
-class MockAudioEncoder : public AudioEncoder {
+class MockAudioEncoder final : public AudioEncoder {
public:
- virtual ~MockAudioEncoder() { Die(); }
+ ~MockAudioEncoder() override { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_CONST_METHOD0(SampleRateHz, int());
- MOCK_CONST_METHOD0(NumChannels, int());
+ MOCK_METHOD1(Mark, void(std::string desc));
MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
- MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
- MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
- MOCK_CONST_METHOD0(GetTargetBitrate, int());
- MOCK_METHOD1(SetTargetBitrate, void(int));
- MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
- // Note, we explicitly chose not to create a mock for the Encode method.
- MOCK_METHOD4(EncodeInternal,
- EncodedInfo(uint32_t timestamp,
- const int16_t* audio,
- size_t max_encoded_bytes,
- uint8_t* encoded));
-};
-
-class MockAudioEncoderMutable : public AudioEncoderMutable {
- public:
MOCK_CONST_METHOD0(SampleRateHz, int());
MOCK_CONST_METHOD0(NumChannels, int());
- MOCK_CONST_METHOD0(MaxEncodedBytes, size_t());
- MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, int());
- MOCK_CONST_METHOD0(Max10MsFramesInAPacket, int());
+ MOCK_CONST_METHOD0(RtpTimestampRateHz, int());
+ MOCK_CONST_METHOD0(Num10MsFramesInNextPacket, size_t());
+ MOCK_CONST_METHOD0(Max10MsFramesInAPacket, size_t());
MOCK_CONST_METHOD0(GetTargetBitrate, int());
- MOCK_METHOD1(SetTargetBitrate, void(int));
- MOCK_METHOD1(SetProjectedPacketLossRate, void(double));
// Note, we explicitly chose not to create a mock for the Encode method.
MOCK_METHOD4(EncodeInternal,
EncodedInfo(uint32_t timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded));
-
MOCK_METHOD0(Reset, void());
MOCK_METHOD1(SetFec, bool(bool enable));
MOCK_METHOD1(SetDtx, bool(bool enable));
MOCK_METHOD1(SetApplication, bool(Application application));
+ MOCK_METHOD1(SetMaxPlaybackRate, void(int frequency_hz));
+ MOCK_METHOD1(SetProjectedPacketLossRate, void(double fraction));
+ MOCK_METHOD1(SetTargetBitrate, void(int target_bps));
+ MOCK_METHOD1(SetMaxBitrate, void(int max_bps));
MOCK_METHOD1(SetMaxPayloadSize, void(int max_payload_size_bytes));
- MOCK_METHOD1(SetMaxRate, void(int max_rate_bps));
- MOCK_METHOD1(SetMaxPlaybackRate, bool(int frequency_hz));
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
new file mode 100644
index 00000000000..7151ab01a9d
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h"
+
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
+
+AudioDecoderOpus::AudioDecoderOpus(size_t num_channels)
+ : channels_(num_channels) {
+ RTC_DCHECK(num_channels == 1 || num_channels == 2);
+ WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+AudioDecoderOpus::~AudioDecoderOpus() {
+ WebRtcOpus_DecoderFree(dec_state_);
+}
+
+int AudioDecoderOpus::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK_EQ(sample_rate_hz, 48000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret =
+ WebRtcOpus_Decode(dec_state_, encoded, encoded_len, decoded, &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(channels_); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+int AudioDecoderOpus::DecodeRedundantInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
+ speech_type);
+ }
+
+ RTC_DCHECK_EQ(sample_rate_hz, 48000);
+ int16_t temp_type = 1; // Default is speech.
+ int ret = WebRtcOpus_DecodeFec(dec_state_, encoded, encoded_len, decoded,
+ &temp_type);
+ if (ret > 0)
+ ret *= static_cast<int>(channels_); // Return total number of samples.
+ *speech_type = ConvertSpeechType(temp_type);
+ return ret;
+}
+
+void AudioDecoderOpus::Reset() {
+ WebRtcOpus_DecoderInit(dec_state_);
+}
+
+int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ return WebRtcOpus_DurationEst(dec_state_, encoded, encoded_len);
+}
+
+int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const {
+ if (!PacketHasFec(encoded, encoded_len)) {
+ // This packet is a RED packet.
+ return PacketDuration(encoded, encoded_len);
+ }
+
+ return WebRtcOpus_FecDurationEst(encoded, encoded_len);
+}
+
+bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
+ size_t encoded_len) const {
+ int fec;
+ fec = WebRtcOpus_PacketHasFec(encoded, encoded_len);
+ return (fec == 1);
+}
+
+size_t AudioDecoderOpus::Channels() const {
+ return channels_;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc
deleted file mode 100644
index 3a083985df7..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/common_types.h"
-#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
-
-namespace webrtc {
-namespace acm2 {
-
-#ifdef WEBRTC_CODEC_OPUS
-namespace {
-const CodecInst kDefaultOpusCodecInst = {105, "opus", 48000, 960, 1, 32000};
-} // namespace
-
-class AudioEncoderMutableOpusTest : public ::testing::Test {
- protected:
- AudioEncoderMutableOpusTest() : codec_inst_(kDefaultOpusCodecInst) {}
-
- void CreateCodec(int num_channels) {
- codec_inst_.channels = num_channels;
- encoder_.reset(new AudioEncoderMutableOpus(codec_inst_));
- auto expected_app =
- num_channels == 1 ? AudioEncoderOpus::kVoip : AudioEncoderOpus::kAudio;
- EXPECT_EQ(expected_app, encoder_->application());
- }
-
- CodecInst codec_inst_;
- rtc::scoped_ptr<AudioEncoderMutableOpus> encoder_;
-};
-
-TEST_F(AudioEncoderMutableOpusTest, DefaultApplicationModeMono) {
- CreateCodec(1);
-}
-
-TEST_F(AudioEncoderMutableOpusTest, DefaultApplicationModeStereo) {
- CreateCodec(2);
-}
-
-TEST_F(AudioEncoderMutableOpusTest, ChangeApplicationMode) {
- CreateCodec(2);
- EXPECT_TRUE(
- encoder_->SetApplication(AudioEncoderMutable::kApplicationSpeech));
- EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
-}
-
-TEST_F(AudioEncoderMutableOpusTest, ResetWontChangeApplicationMode) {
- CreateCodec(2);
-
- // Trigger a reset.
- encoder_->Reset();
- // Verify that the mode is still kAudio.
- EXPECT_EQ(AudioEncoderOpus::kAudio, encoder_->application());
-
- // Now change to kVoip.
- EXPECT_TRUE(
- encoder_->SetApplication(AudioEncoderMutable::kApplicationSpeech));
- EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
-
- // Trigger a reset again.
- encoder_->Reset();
- // Verify that the mode is still kVoip.
- EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
-}
-
-TEST_F(AudioEncoderMutableOpusTest, ToggleDtx) {
- CreateCodec(2);
- // Enable DTX
- EXPECT_TRUE(encoder_->SetDtx(true));
- // Verify that the mode is still kAudio.
- EXPECT_EQ(AudioEncoderOpus::kAudio, encoder_->application());
- // Turn off DTX.
- EXPECT_TRUE(encoder_->SetDtx(false));
-}
-
-TEST_F(AudioEncoderMutableOpusTest, SetBitrate) {
- CreateCodec(1);
- // Constants are replicated from audio_encoder_opus.cc.
- const int kMinBitrateBps = 500;
- const int kMaxBitrateBps = 512000;
- // Set a too low bitrate.
- encoder_->SetTargetBitrate(kMinBitrateBps - 1);
- EXPECT_EQ(kMinBitrateBps, encoder_->GetTargetBitrate());
- // Set a too high bitrate.
- encoder_->SetTargetBitrate(kMaxBitrateBps + 1);
- EXPECT_EQ(kMaxBitrateBps, encoder_->GetTargetBitrate());
- // Set the minimum rate.
- encoder_->SetTargetBitrate(kMinBitrateBps);
- EXPECT_EQ(kMinBitrateBps, encoder_->GetTargetBitrate());
- // Set the maximum rate.
- encoder_->SetTargetBitrate(kMaxBitrateBps);
- EXPECT_EQ(kMaxBitrateBps, encoder_->GetTargetBitrate());
- // Set rates from 1000 up to 32000 bps.
- for (int rate = 1000; rate <= 32000; rate += 1000) {
- encoder_->SetTargetBitrate(rate);
- EXPECT_EQ(rate, encoder_->GetTargetBitrate());
- }
-}
-#endif // WEBRTC_CODEC_OPUS
-
-} // namespace acm2
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index 88e084fe1d5..d47236cabcb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -11,6 +11,7 @@
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
@@ -18,46 +19,63 @@ namespace webrtc {
namespace {
+const int kSampleRateHz = 48000;
const int kMinBitrateBps = 500;
const int kMaxBitrateBps = 512000;
-// TODO(tlegrand): Remove this code when we have proper APIs to set the
-// complexity at a higher level.
-#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
-// If we are on Android, iOS and/or ARM, use a lower complexity setting as
-// default, to save encoder complexity.
-const int kDefaultComplexity = 5;
-#else
-const int kDefaultComplexity = 9;
-#endif
-
-// We always encode at 48 kHz.
-const int kSampleRateHz = 48000;
-
-int16_t ClampInt16(size_t x) {
- return static_cast<int16_t>(
- std::min(x, static_cast<size_t>(std::numeric_limits<int16_t>::max())));
+AudioEncoderOpus::Config CreateConfig(const CodecInst& codec_inst) {
+ AudioEncoderOpus::Config config;
+ config.frame_size_ms = rtc::CheckedDivExact(codec_inst.pacsize, 48);
+ config.num_channels = codec_inst.channels;
+ config.bitrate_bps = codec_inst.rate;
+ config.payload_type = codec_inst.pltype;
+ config.application = config.num_channels == 1 ? AudioEncoderOpus::kVoip
+ : AudioEncoderOpus::kAudio;
+ return config;
}
-int16_t CastInt16(size_t x) {
- DCHECK_LE(x, static_cast<size_t>(std::numeric_limits<int16_t>::max()));
- return static_cast<int16_t>(x);
+// Optimize the loss rate to configure Opus. Basically, optimized loss rate is
+// the input loss rate rounded down to various levels, because a robustly good
+// audio quality is achieved by lowering the packet loss down.
+// Additionally, to prevent toggling, margins are used, i.e., when jumping to
+// a loss rate from below, a higher threshold is used than jumping to the same
+// level from above.
+double OptimizePacketLossRate(double new_loss_rate, double old_loss_rate) {
+ RTC_DCHECK_GE(new_loss_rate, 0.0);
+ RTC_DCHECK_LE(new_loss_rate, 1.0);
+ RTC_DCHECK_GE(old_loss_rate, 0.0);
+ RTC_DCHECK_LE(old_loss_rate, 1.0);
+ const double kPacketLossRate20 = 0.20;
+ const double kPacketLossRate10 = 0.10;
+ const double kPacketLossRate5 = 0.05;
+ const double kPacketLossRate1 = 0.01;
+ const double kLossRate20Margin = 0.02;
+ const double kLossRate10Margin = 0.01;
+ const double kLossRate5Margin = 0.01;
+ if (new_loss_rate >=
+ kPacketLossRate20 +
+ kLossRate20Margin *
+ (kPacketLossRate20 - old_loss_rate > 0 ? 1 : -1)) {
+ return kPacketLossRate20;
+ } else if (new_loss_rate >=
+ kPacketLossRate10 +
+ kLossRate10Margin *
+ (kPacketLossRate10 - old_loss_rate > 0 ? 1 : -1)) {
+ return kPacketLossRate10;
+ } else if (new_loss_rate >=
+ kPacketLossRate5 +
+ kLossRate5Margin *
+ (kPacketLossRate5 - old_loss_rate > 0 ? 1 : -1)) {
+ return kPacketLossRate5;
+ } else if (new_loss_rate >= kPacketLossRate1) {
+ return kPacketLossRate1;
+ } else {
+ return 0.0;
+ }
}
} // namespace
-AudioEncoderOpus::Config::Config()
- : frame_size_ms(20),
- num_channels(1),
- payload_type(120),
- application(kVoip),
- bitrate_bps(64000),
- fec_enabled(false),
- max_playback_rate_hz(48000),
- complexity(kDefaultComplexity),
- dtx_enabled(false) {
-}
-
bool AudioEncoderOpus::Config::IsOk() const {
if (frame_size_ms <= 0 || frame_size_ms % 10 != 0)
return false;
@@ -71,119 +89,45 @@ bool AudioEncoderOpus::Config::IsOk() const {
}
AudioEncoderOpus::AudioEncoderOpus(const Config& config)
- : num_10ms_frames_per_packet_(
- rtc::CheckedDivExact(config.frame_size_ms, 10)),
- num_channels_(config.num_channels),
- payload_type_(config.payload_type),
- application_(config.application),
- dtx_enabled_(config.dtx_enabled),
- samples_per_10ms_frame_(rtc::CheckedDivExact(kSampleRateHz, 100) *
- num_channels_),
- packet_loss_rate_(0.0) {
- CHECK(config.IsOk());
- input_buffer_.reserve(num_10ms_frames_per_packet_ * samples_per_10ms_frame_);
- CHECK_EQ(0, WebRtcOpus_EncoderCreate(&inst_, num_channels_, application_));
- SetTargetBitrate(config.bitrate_bps);
- if (config.fec_enabled) {
- CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
- } else {
- CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
- }
- CHECK_EQ(0,
- WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
- CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, config.complexity));
- if (config.dtx_enabled) {
- CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
- } else {
- CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
- }
-}
-
-AudioEncoderOpus::~AudioEncoderOpus() {
- CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+ : packet_loss_rate_(0.0), inst_(nullptr) {
+ RTC_CHECK(RecreateEncoderInstance(config));
}
-int AudioEncoderOpus::SampleRateHz() const {
- return kSampleRateHz;
-}
+AudioEncoderOpus::AudioEncoderOpus(const CodecInst& codec_inst)
+ : AudioEncoderOpus(CreateConfig(codec_inst)) {}
-int AudioEncoderOpus::NumChannels() const {
- return num_channels_;
+AudioEncoderOpus::~AudioEncoderOpus() {
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
}
size_t AudioEncoderOpus::MaxEncodedBytes() const {
// Calculate the number of bytes we expect the encoder to produce,
// then multiply by two to give a wide margin for error.
- size_t bytes_per_millisecond =
- static_cast<size_t>(bitrate_bps_ / (1000 * 8) + 1);
- size_t approx_encoded_bytes =
- num_10ms_frames_per_packet_ * 10 * bytes_per_millisecond;
+ const size_t bytes_per_millisecond =
+ static_cast<size_t>(config_.bitrate_bps / (1000 * 8) + 1);
+ const size_t approx_encoded_bytes =
+ Num10msFramesPerPacket() * 10 * bytes_per_millisecond;
return 2 * approx_encoded_bytes;
}
-int AudioEncoderOpus::Num10MsFramesInNextPacket() const {
- return num_10ms_frames_per_packet_;
+int AudioEncoderOpus::SampleRateHz() const {
+ return kSampleRateHz;
}
-int AudioEncoderOpus::Max10MsFramesInAPacket() const {
- return num_10ms_frames_per_packet_;
+int AudioEncoderOpus::NumChannels() const {
+ return config_.num_channels;
}
-int AudioEncoderOpus::GetTargetBitrate() const {
- return bitrate_bps_;
+size_t AudioEncoderOpus::Num10MsFramesInNextPacket() const {
+ return Num10msFramesPerPacket();
}
-void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
- bitrate_bps_ = std::max(std::min(bits_per_second, kMaxBitrateBps),
- kMinBitrateBps);
- CHECK_EQ(WebRtcOpus_SetBitRate(inst_, bitrate_bps_), 0);
+size_t AudioEncoderOpus::Max10MsFramesInAPacket() const {
+ return Num10msFramesPerPacket();
}
-void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
- DCHECK_GE(fraction, 0.0);
- DCHECK_LE(fraction, 1.0);
- // Optimize the loss rate to configure Opus. Basically, optimized loss rate is
- // the input loss rate rounded down to various levels, because a robustly good
- // audio quality is achieved by lowering the packet loss down.
- // Additionally, to prevent toggling, margins are used, i.e., when jumping to
- // a loss rate from below, a higher threshold is used than jumping to the same
- // level from above.
- const double kPacketLossRate20 = 0.20;
- const double kPacketLossRate10 = 0.10;
- const double kPacketLossRate5 = 0.05;
- const double kPacketLossRate1 = 0.01;
- const double kLossRate20Margin = 0.02;
- const double kLossRate10Margin = 0.01;
- const double kLossRate5Margin = 0.01;
- double opt_loss_rate;
- if (fraction >=
- kPacketLossRate20 +
- kLossRate20Margin *
- (kPacketLossRate20 - packet_loss_rate_ > 0 ? 1 : -1)) {
- opt_loss_rate = kPacketLossRate20;
- } else if (fraction >=
- kPacketLossRate10 +
- kLossRate10Margin *
- (kPacketLossRate10 - packet_loss_rate_ > 0 ? 1 : -1)) {
- opt_loss_rate = kPacketLossRate10;
- } else if (fraction >=
- kPacketLossRate5 +
- kLossRate5Margin *
- (kPacketLossRate5 - packet_loss_rate_ > 0 ? 1 : -1)) {
- opt_loss_rate = kPacketLossRate5;
- } else if (fraction >= kPacketLossRate1) {
- opt_loss_rate = kPacketLossRate1;
- } else {
- opt_loss_rate = 0;
- }
-
- if (packet_loss_rate_ != opt_loss_rate) {
- // Ask the encoder to change the target packet loss rate.
- CHECK_EQ(WebRtcOpus_SetPacketLossRate(
- inst_, static_cast<int32_t>(opt_loss_rate * 100 + .5)),
- 0);
- packet_loss_rate_ = opt_loss_rate;
- }
+int AudioEncoderOpus::GetTargetBitrate() const {
+ return config_.bitrate_bps;
}
AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
@@ -194,76 +138,121 @@ AudioEncoder::EncodedInfo AudioEncoderOpus::EncodeInternal(
if (input_buffer_.empty())
first_timestamp_in_buffer_ = rtp_timestamp;
input_buffer_.insert(input_buffer_.end(), audio,
- audio + samples_per_10ms_frame_);
- if (input_buffer_.size() < (static_cast<size_t>(num_10ms_frames_per_packet_) *
- samples_per_10ms_frame_)) {
+ audio + SamplesPer10msFrame());
+ if (input_buffer_.size() <
+ (static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame())) {
return EncodedInfo();
}
- CHECK_EQ(input_buffer_.size(),
- static_cast<size_t>(num_10ms_frames_per_packet_) *
- samples_per_10ms_frame_);
+ RTC_CHECK_EQ(
+ input_buffer_.size(),
+ static_cast<size_t>(Num10msFramesPerPacket()) * SamplesPer10msFrame());
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
- rtc::CheckedDivExact(CastInt16(input_buffer_.size()),
- static_cast<int16_t>(num_channels_)),
- ClampInt16(max_encoded_bytes), encoded);
- CHECK_GE(status, 0); // Fails only if fed invalid data.
+ rtc::CheckedDivExact(input_buffer_.size(),
+ static_cast<size_t>(config_.num_channels)),
+ rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded);
+ RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.
input_buffer_.clear();
EncodedInfo info;
info.encoded_bytes = static_cast<size_t>(status);
info.encoded_timestamp = first_timestamp_in_buffer_;
- info.payload_type = payload_type_;
+ info.payload_type = config_.payload_type;
info.send_even_if_empty = true; // Allows Opus to send empty packets.
info.speech = (status > 0);
return info;
}
-namespace {
-AudioEncoderOpus::Config CreateConfig(const CodecInst& codec_inst) {
- AudioEncoderOpus::Config config;
- config.frame_size_ms = rtc::CheckedDivExact(codec_inst.pacsize, 48);
- config.num_channels = codec_inst.channels;
- config.bitrate_bps = codec_inst.rate;
- config.payload_type = codec_inst.pltype;
- config.application = (config.num_channels == 1 ? AudioEncoderOpus::kVoip
- : AudioEncoderOpus::kAudio);
- return config;
+void AudioEncoderOpus::Reset() {
+ RTC_CHECK(RecreateEncoderInstance(config_));
}
-} // namespace
-AudioEncoderMutableOpus::AudioEncoderMutableOpus(const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderOpus>(CreateConfig(codec_inst)) {
-}
-
-bool AudioEncoderMutableOpus::SetFec(bool enable) {
- auto conf = config();
+bool AudioEncoderOpus::SetFec(bool enable) {
+ auto conf = config_;
conf.fec_enabled = enable;
- return Reconstruct(conf);
+ return RecreateEncoderInstance(conf);
}
-bool AudioEncoderMutableOpus::SetDtx(bool enable) {
- auto conf = config();
+bool AudioEncoderOpus::SetDtx(bool enable) {
+ auto conf = config_;
conf.dtx_enabled = enable;
- return Reconstruct(conf);
+ return RecreateEncoderInstance(conf);
}
-bool AudioEncoderMutableOpus::SetApplication(Application application) {
- auto conf = config();
+bool AudioEncoderOpus::SetApplication(Application application) {
+ auto conf = config_;
switch (application) {
- case kApplicationSpeech:
+ case Application::kSpeech:
conf.application = AudioEncoderOpus::kVoip;
break;
- case kApplicationAudio:
+ case Application::kAudio:
conf.application = AudioEncoderOpus::kAudio;
break;
}
- return Reconstruct(conf);
+ return RecreateEncoderInstance(conf);
}
-bool AudioEncoderMutableOpus::SetMaxPlaybackRate(int frequency_hz) {
- auto conf = config();
+void AudioEncoderOpus::SetMaxPlaybackRate(int frequency_hz) {
+ auto conf = config_;
conf.max_playback_rate_hz = frequency_hz;
- return Reconstruct(conf);
+ RTC_CHECK(RecreateEncoderInstance(conf));
+}
+
+void AudioEncoderOpus::SetProjectedPacketLossRate(double fraction) {
+ double opt_loss_rate = OptimizePacketLossRate(fraction, packet_loss_rate_);
+ if (packet_loss_rate_ != opt_loss_rate) {
+ packet_loss_rate_ = opt_loss_rate;
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetPacketLossRate(
+ inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+ }
+}
+
+void AudioEncoderOpus::SetTargetBitrate(int bits_per_second) {
+ config_.bitrate_bps =
+ std::max(std::min(bits_per_second, kMaxBitrateBps), kMinBitrateBps);
+ RTC_DCHECK(config_.IsOk());
+ RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config_.bitrate_bps));
+}
+
+int AudioEncoderOpus::Num10msFramesPerPacket() const {
+ return rtc::CheckedDivExact(config_.frame_size_ms, 10);
+}
+
+int AudioEncoderOpus::SamplesPer10msFrame() const {
+ return rtc::CheckedDivExact(kSampleRateHz, 100) * config_.num_channels;
+}
+
+// If the given config is OK, recreate the Opus encoder instance with those
+// settings, save the config, and return true. Otherwise, do nothing and return
+// false.
+bool AudioEncoderOpus::RecreateEncoderInstance(const Config& config) {
+ if (!config.IsOk())
+ return false;
+ if (inst_)
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderFree(inst_));
+ input_buffer_.clear();
+ input_buffer_.reserve(Num10msFramesPerPacket() * SamplesPer10msFrame());
+ RTC_CHECK_EQ(0, WebRtcOpus_EncoderCreate(&inst_, config.num_channels,
+ config.application));
+ RTC_CHECK_EQ(0, WebRtcOpus_SetBitRate(inst_, config.bitrate_bps));
+ if (config.fec_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableFec(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableFec(inst_));
+ }
+ RTC_CHECK_EQ(
+ 0, WebRtcOpus_SetMaxPlaybackRate(inst_, config.max_playback_rate_hz));
+ RTC_CHECK_EQ(0, WebRtcOpus_SetComplexity(inst_, config.complexity));
+ if (config.dtx_enabled) {
+ RTC_CHECK_EQ(0, WebRtcOpus_EnableDtx(inst_));
+ } else {
+ RTC_CHECK_EQ(0, WebRtcOpus_DisableDtx(inst_));
+ }
+ RTC_CHECK_EQ(0,
+ WebRtcOpus_SetPacketLossRate(
+ inst_, static_cast<int32_t>(packet_loss_rate_ * 100 + .5)));
+ config_ = config;
+ return true;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
index 33afa5fcc57..64742eea4e8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus_unittest.cc
@@ -9,72 +9,144 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
namespace webrtc {
+namespace {
+const CodecInst kOpusSettings = {105, "opus", 48000, 960, 1, 32000};
+} // namespace
+
class AudioEncoderOpusTest : public ::testing::Test {
protected:
- // The constructor simply creates an Opus encoder with default configuration.
- AudioEncoderOpusTest()
- : opus_(new AudioEncoderOpus(AudioEncoderOpus::Config())) {}
-
- // Repeatedly sets packet loss rates in the range [from, to], increasing by
- // 0.01 in each step. The function verifies that the actual loss rate is
- // |expected_return|.
- void TestSetPacketLossRate(double from, double to, double expected_return) {
- ASSERT_TRUE(opus_);
- for (double loss = from; loss <= to;
- (to >= from) ? loss += 0.01 : loss -= 0.01) {
- opus_->SetProjectedPacketLossRate(loss);
- EXPECT_DOUBLE_EQ(expected_return, opus_->packet_loss_rate());
- }
+ void CreateCodec(int num_channels) {
+ codec_inst_.channels = num_channels;
+ encoder_.reset(new AudioEncoderOpus(codec_inst_));
+ auto expected_app =
+ num_channels == 1 ? AudioEncoderOpus::kVoip : AudioEncoderOpus::kAudio;
+ EXPECT_EQ(expected_app, encoder_->application());
}
- rtc::scoped_ptr<AudioEncoderOpus> opus_;
+ CodecInst codec_inst_ = kOpusSettings;
+ rtc::scoped_ptr<AudioEncoderOpus> encoder_;
};
+TEST_F(AudioEncoderOpusTest, DefaultApplicationModeMono) {
+ CreateCodec(1);
+}
+
+TEST_F(AudioEncoderOpusTest, DefaultApplicationModeStereo) {
+ CreateCodec(2);
+}
+
+TEST_F(AudioEncoderOpusTest, ChangeApplicationMode) {
+ CreateCodec(2);
+ EXPECT_TRUE(encoder_->SetApplication(AudioEncoder::Application::kSpeech));
+ EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
+}
+
+TEST_F(AudioEncoderOpusTest, ResetWontChangeApplicationMode) {
+ CreateCodec(2);
+
+ // Trigger a reset.
+ encoder_->Reset();
+ // Verify that the mode is still kAudio.
+ EXPECT_EQ(AudioEncoderOpus::kAudio, encoder_->application());
+
+ // Now change to kVoip.
+ EXPECT_TRUE(encoder_->SetApplication(AudioEncoder::Application::kSpeech));
+ EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
+
+ // Trigger a reset again.
+ encoder_->Reset();
+ // Verify that the mode is still kVoip.
+ EXPECT_EQ(AudioEncoderOpus::kVoip, encoder_->application());
+}
+
+TEST_F(AudioEncoderOpusTest, ToggleDtx) {
+ CreateCodec(2);
+ // Enable DTX
+ EXPECT_TRUE(encoder_->SetDtx(true));
+ // Verify that the mode is still kAudio.
+ EXPECT_EQ(AudioEncoderOpus::kAudio, encoder_->application());
+ // Turn off DTX.
+ EXPECT_TRUE(encoder_->SetDtx(false));
+}
+
+TEST_F(AudioEncoderOpusTest, SetBitrate) {
+ CreateCodec(1);
+ // Constants are replicated from audio_encoder_opus.cc.
+ const int kMinBitrateBps = 500;
+ const int kMaxBitrateBps = 512000;
+ // Set a too low bitrate.
+ encoder_->SetTargetBitrate(kMinBitrateBps - 1);
+ EXPECT_EQ(kMinBitrateBps, encoder_->GetTargetBitrate());
+ // Set a too high bitrate.
+ encoder_->SetTargetBitrate(kMaxBitrateBps + 1);
+ EXPECT_EQ(kMaxBitrateBps, encoder_->GetTargetBitrate());
+ // Set the minimum rate.
+ encoder_->SetTargetBitrate(kMinBitrateBps);
+ EXPECT_EQ(kMinBitrateBps, encoder_->GetTargetBitrate());
+ // Set the maximum rate.
+ encoder_->SetTargetBitrate(kMaxBitrateBps);
+ EXPECT_EQ(kMaxBitrateBps, encoder_->GetTargetBitrate());
+ // Set rates from 1000 up to 32000 bps.
+ for (int rate = 1000; rate <= 32000; rate += 1000) {
+ encoder_->SetTargetBitrate(rate);
+ EXPECT_EQ(rate, encoder_->GetTargetBitrate());
+ }
+}
+
namespace {
-// These constants correspond to those used in
-// AudioEncoderOpus::SetProjectedPacketLossRate.
-const double kPacketLossRate20 = 0.20;
-const double kPacketLossRate10 = 0.10;
-const double kPacketLossRate5 = 0.05;
-const double kPacketLossRate1 = 0.01;
-const double kLossRate20Margin = 0.02;
-const double kLossRate10Margin = 0.01;
-const double kLossRate5Margin = 0.01;
+
+// Returns a vector with the n evenly-spaced numbers a, a + (b - a)/(n - 1),
+// ..., b.
+std::vector<double> IntervalSteps(double a, double b, size_t n) {
+ RTC_DCHECK_GT(n, 1u);
+ const double step = (b - a) / (n - 1);
+ std::vector<double> points;
+ for (size_t i = 0; i < n; ++i)
+ points.push_back(a + i * step);
+ return points;
+}
+
+// Sets the packet loss rate to each number in the vector in turn, and verifies
+// that the loss rate as reported by the encoder is |expected_return| for all
+// of them.
+void TestSetPacketLossRate(AudioEncoderOpus* encoder,
+ const std::vector<double>& losses,
+ double expected_return) {
+ for (double loss : losses) {
+ encoder->SetProjectedPacketLossRate(loss);
+ EXPECT_DOUBLE_EQ(expected_return, encoder->packet_loss_rate());
+ }
+}
+
} // namespace
TEST_F(AudioEncoderOpusTest, PacketLossRateOptimized) {
+ CreateCodec(1);
+ auto I = [](double a, double b) { return IntervalSteps(a, b, 10); };
+ const double eps = 1e-15;
+
// Note that the order of the following calls is critical.
- TestSetPacketLossRate(0.0, 0.0, 0.0);
- TestSetPacketLossRate(kPacketLossRate1,
- kPacketLossRate5 + kLossRate5Margin - 0.01,
- kPacketLossRate1);
- TestSetPacketLossRate(kPacketLossRate5 + kLossRate5Margin,
- kPacketLossRate10 + kLossRate10Margin - 0.01,
- kPacketLossRate5);
- TestSetPacketLossRate(kPacketLossRate10 + kLossRate10Margin,
- kPacketLossRate20 + kLossRate20Margin - 0.01,
- kPacketLossRate10);
- TestSetPacketLossRate(kPacketLossRate20 + kLossRate20Margin,
- 1.0,
- kPacketLossRate20);
- TestSetPacketLossRate(kPacketLossRate20 + kLossRate20Margin,
- kPacketLossRate20 - kLossRate20Margin,
- kPacketLossRate20);
- TestSetPacketLossRate(kPacketLossRate20 - kLossRate20Margin - 0.01,
- kPacketLossRate10 - kLossRate10Margin,
- kPacketLossRate10);
- TestSetPacketLossRate(kPacketLossRate10 - kLossRate10Margin - 0.01,
- kPacketLossRate5 - kLossRate5Margin,
- kPacketLossRate5);
- TestSetPacketLossRate(kPacketLossRate5 - kLossRate5Margin - 0.01,
- kPacketLossRate1,
- kPacketLossRate1);
- TestSetPacketLossRate(0.0, 0.0, 0.0);
+
+ // clang-format off
+ TestSetPacketLossRate(encoder_.get(), I(0.00 , 0.01 - eps), 0.00);
+ TestSetPacketLossRate(encoder_.get(), I(0.01 + eps, 0.06 - eps), 0.01);
+ TestSetPacketLossRate(encoder_.get(), I(0.06 + eps, 0.11 - eps), 0.05);
+ TestSetPacketLossRate(encoder_.get(), I(0.11 + eps, 0.22 - eps), 0.10);
+ TestSetPacketLossRate(encoder_.get(), I(0.22 + eps, 1.00 ), 0.20);
+
+ TestSetPacketLossRate(encoder_.get(), I(1.00 , 0.18 + eps), 0.20);
+ TestSetPacketLossRate(encoder_.get(), I(0.18 - eps, 0.09 + eps), 0.10);
+ TestSetPacketLossRate(encoder_.get(), I(0.09 - eps, 0.04 + eps), 0.05);
+ TestSetPacketLossRate(encoder_.get(), I(0.04 - eps, 0.01 + eps), 0.01);
+ TestSetPacketLossRate(encoder_.get(), I(0.01 - eps, 0.00 ), 0.00);
+ // clang-format on
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h
new file mode 100644
index 00000000000..d28113b7788
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_AUDIO_DECODER_OPUS_H
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_AUDIO_DECODER_OPUS_H
+
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
+
+namespace webrtc {
+
+class AudioDecoderOpus final : public AudioDecoder {
+ public:
+ explicit AudioDecoderOpus(size_t num_channels);
+ ~AudioDecoderOpus() override;
+
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ int PacketDurationRedundant(const uint8_t* encoded,
+ size_t encoded_len) const override;
+ bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+ int DecodeRedundantInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ OpusDecInst* dec_state_;
+ const size_t channels_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpus);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_AUDIO_DECODER_OPUS_H
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
index 3393bd516d3..9659a2bbd38 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h
@@ -13,15 +13,13 @@
#include <vector>
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
+#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
namespace webrtc {
-// NOTE: This class has neither ThreadChecker, nor locks. The owner of an
-// AudioEncoderOpus object must ensure that it is not accessed concurrently.
+struct CodecInst;
class AudioEncoderOpus final : public AudioEncoder {
public:
@@ -31,60 +29,44 @@ class AudioEncoderOpus final : public AudioEncoder {
};
struct Config {
- Config();
bool IsOk() const;
- int frame_size_ms;
- int num_channels;
- int payload_type;
- ApplicationMode application;
- int bitrate_bps;
- bool fec_enabled;
- int max_playback_rate_hz;
- int complexity;
- bool dtx_enabled;
+ int frame_size_ms = 20;
+ int num_channels = 1;
+ int payload_type = 120;
+ ApplicationMode application = kVoip;
+ int bitrate_bps = 64000;
+ bool fec_enabled = false;
+ int max_playback_rate_hz = 48000;
+ int complexity = kDefaultComplexity;
+ bool dtx_enabled = false;
+
+ private:
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
+ // If we are on Android, iOS and/or ARM, use a lower complexity setting as
+ // default, to save encoder complexity.
+ static const int kDefaultComplexity = 5;
+#else
+ static const int kDefaultComplexity = 9;
+#endif
};
explicit AudioEncoderOpus(const Config& config);
+ explicit AudioEncoderOpus(const CodecInst& codec_inst);
~AudioEncoderOpus() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
- void SetTargetBitrate(int bits_per_second) override;
- void SetProjectedPacketLossRate(double fraction) override;
-
- double packet_loss_rate() const { return packet_loss_rate_; }
- ApplicationMode application() const { return application_; }
- bool dtx_enabled() const { return dtx_enabled_; }
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
- private:
- const int num_10ms_frames_per_packet_;
- const int num_channels_;
- const int payload_type_;
- const ApplicationMode application_;
- int bitrate_bps_;
- const bool dtx_enabled_;
- const int samples_per_10ms_frame_;
- std::vector<int16_t> input_buffer_;
- OpusEncInst* inst_;
- uint32_t first_timestamp_in_buffer_;
- double packet_loss_rate_;
-};
-
-struct CodecInst;
-
-class AudioEncoderMutableOpus
- : public AudioEncoderMutableImpl<AudioEncoderOpus> {
- public:
- explicit AudioEncoderMutableOpus(const CodecInst& codec_inst);
+ void Reset() override;
bool SetFec(bool enable) override;
// Set Opus DTX. Once enabled, Opus stops transmission, when it detects voice
@@ -93,20 +75,28 @@ class AudioEncoderMutableOpus
bool SetDtx(bool enable) override;
bool SetApplication(Application application) override;
- bool SetMaxPlaybackRate(int frequency_hz) override;
- AudioEncoderOpus::ApplicationMode application() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->application();
- }
- double packet_loss_rate() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->packet_loss_rate();
- }
- bool dtx_enabled() const {
- CriticalSectionScoped cs(encoder_lock_.get());
- return encoder()->dtx_enabled();
- }
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ void SetProjectedPacketLossRate(double fraction) override;
+ void SetTargetBitrate(int target_bps) override;
+
+ // Getters for testing.
+ double packet_loss_rate() const { return packet_loss_rate_; }
+ ApplicationMode application() const { return config_.application; }
+ bool dtx_enabled() const { return config_.dtx_enabled; }
+
+ private:
+ int Num10msFramesPerPacket() const;
+ int SamplesPer10msFrame() const;
+ bool RecreateEncoderInstance(const Config& config);
+
+ Config config_;
+ double packet_loss_rate_;
+ std::vector<int16_t> input_buffer_;
+ OpusEncInst* inst_;
+ uint32_t first_timestamp_in_buffer_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderOpus);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_AUDIO_ENCODER_OPUS_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
index 925cd85df47..9c09ae8526e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
#ifdef __cplusplus
@@ -66,8 +68,8 @@ int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
*/
int WebRtcOpus_Encode(OpusEncInst* inst,
const int16_t* audio_in,
- int16_t samples,
- int16_t length_encoded_buffer,
+ size_t samples,
+ size_t length_encoded_buffer,
uint8_t* encoded);
/****************************************************************************
@@ -210,11 +212,8 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
*
* Input:
* - inst : Decoder context
- *
- * Return value : 0 - Success
- * -1 - Error
*/
-int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
+void WebRtcOpus_DecoderInit(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_Decode(...)
@@ -237,7 +236,7 @@ int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
* -1 - Error
*/
int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
@@ -276,7 +275,7 @@ int WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
* -1 - Error
*/
int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
@@ -293,7 +292,21 @@ int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
*/
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
+
+/****************************************************************************
+ * WebRtcOpus_PlcDuration(...)
+ *
+ * This function calculates the duration of a frame returned by packet loss
+ * concealment (PLC).
+ *
+ * Input:
+ * - inst : Decoder context
+ *
+ * Return value : The duration of a frame returned by PLC, in
+ * samples per channel.
+ */
+int WebRtcOpus_PlcDuration(OpusDecInst* inst);
/* TODO(minyue): Check whether it is needed to add a decoder context to the
* arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
@@ -313,7 +326,7 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst,
* 0 - No FEC data in the packet.
*/
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
/****************************************************************************
* WebRtcOpus_PacketHasFec(...)
@@ -327,7 +340,7 @@ int WebRtcOpus_FecDurationEst(const uint8_t* payload,
* 1 - the packet contains FEC.
*/
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
- int payload_length_bytes);
+ size_t payload_length_bytes);
#ifdef __cplusplus
} // extern "C"
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus.gypi
index 4ae4340361b..5a420b4fcf8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus.gypi
@@ -43,7 +43,9 @@
'<(webrtc_root)',
],
'sources': [
+ 'audio_decoder_opus.cc',
'audio_encoder_opus.cc',
+ 'interface/audio_decoder_opus.h',
'interface/audio_encoder_opus.h',
'interface/opus_interface.h',
'opus_inst.h',
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
index f0ef70a3ba4..c86fab76606 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_fec_test.cc
@@ -45,15 +45,15 @@ class OpusFecTest : public TestWithParam<coding_param> {
int block_duration_ms_;
int sampling_khz_;
- int block_length_sample_;
+ size_t block_length_sample_;
int channels_;
int bit_rate_;
size_t data_pointer_;
size_t loop_length_samples_;
- int max_bytes_;
- int encoded_bytes_;
+ size_t max_bytes_;
+ size_t encoded_bytes_;
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
@@ -122,7 +122,8 @@ void OpusFecTest::TearDown() {
OpusFecTest::OpusFecTest()
: block_duration_ms_(kOpusBlockDurationMs),
sampling_khz_(kOpusSamplingKhz),
- block_length_sample_(block_duration_ms_ * sampling_khz_),
+ block_length_sample_(
+ static_cast<size_t>(block_duration_ms_ * sampling_khz_)),
data_pointer_(0),
max_bytes_(0),
encoded_bytes_(0),
@@ -137,7 +138,7 @@ void OpusFecTest::EncodeABlock() {
max_bytes_, &bit_stream_[0]);
EXPECT_GT(value, 0);
- encoded_bytes_ = value;
+ encoded_bytes_ = static_cast<size_t>(value);
}
void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
@@ -154,14 +155,14 @@ void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
} else {
value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
}
- EXPECT_EQ(block_length_sample_, value_1);
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_1);
}
if (!lost_current) {
// Decode current frame.
value_2 = WebRtcOpus_Decode(opus_decoder_, &bit_stream_[0], encoded_bytes_,
&out_data_[value_1 * channels_], &audio_type);
- EXPECT_EQ(block_length_sample_, value_2);
+ EXPECT_EQ(static_cast<int>(block_length_sample_), value_2);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_interface.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
index e2506166a92..2ac53736650 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_interface.c
@@ -80,8 +80,8 @@ int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
int WebRtcOpus_Encode(OpusEncInst* inst,
const int16_t* audio_in,
- int16_t samples,
- int16_t length_encoded_buffer,
+ size_t samples,
+ size_t length_encoded_buffer,
uint8_t* encoded) {
int res;
@@ -91,9 +91,9 @@ int WebRtcOpus_Encode(OpusEncInst* inst,
res = opus_encode(inst->encoder,
(const opus_int16*)audio_in,
- samples,
+ (int)samples,
encoded,
- length_encoded_buffer);
+ (opus_int32)length_encoded_buffer);
if (res == 1) {
// Indicates DTX since the packet has nothing but a header. In principle,
@@ -250,17 +250,13 @@ int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
return inst->channels;
}
-int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
- int error = opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
- if (error == OPUS_OK) {
- inst->in_dtx_mode = 0;
- return 0;
- }
- return -1;
+void WebRtcOpus_DecoderInit(OpusDecInst* inst) {
+ opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
+ inst->in_dtx_mode = 0;
}
/* For decoder to determine if it is to output speech or comfort noise. */
-static int16_t DetermineAudioType(OpusDecInst* inst, int16_t encoded_bytes) {
+static int16_t DetermineAudioType(OpusDecInst* inst, size_t encoded_bytes) {
// Audio type becomes comfort noise if |encoded_byte| is 1 and keeps
// to be so if the following |encoded_byte| are 0 or 1.
if (encoded_bytes == 0 && inst->in_dtx_mode) {
@@ -278,9 +274,9 @@ static int16_t DetermineAudioType(OpusDecInst* inst, int16_t encoded_bytes) {
* is set to the number of samples needed for PLC in case of losses.
* It is up to the caller to make sure the value is correct. */
static int DecodeNative(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int frame_size,
+ size_t encoded_bytes, int frame_size,
int16_t* decoded, int16_t* audio_type, int decode_fec) {
- int res = opus_decode(inst->decoder, encoded, encoded_bytes,
+ int res = opus_decode(inst->decoder, encoded, (opus_int32)encoded_bytes,
(opus_int16*)decoded, frame_size, decode_fec);
if (res <= 0)
@@ -292,7 +288,7 @@ static int DecodeNative(OpusDecInst* inst, const uint8_t* encoded,
}
int WebRtcOpus_Decode(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
@@ -340,7 +336,7 @@ int WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
}
int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
- int16_t encoded_bytes, int16_t* decoded,
+ size_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int fec_samples;
@@ -362,9 +358,15 @@ int WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
+ if (payload_length_bytes == 0) {
+ // WebRtcOpus_Decode calls PLC when payload length is zero. So we return
+ // PLC duration correspondingly.
+ return WebRtcOpus_PlcDuration(inst);
+ }
+
int frames, samples;
- frames = opus_packet_get_nb_frames(payload, payload_length_bytes);
+ frames = opus_packet_get_nb_frames(payload, (opus_int32)payload_length_bytes);
if (frames < 0) {
/* Invalid payload data. */
return 0;
@@ -377,8 +379,17 @@ int WebRtcOpus_DurationEst(OpusDecInst* inst,
return samples;
}
+int WebRtcOpus_PlcDuration(OpusDecInst* inst) {
+ /* The number of samples we ask for is |number_of_lost_frames| times
+ * |prev_decoded_samples_|. Limit the number of samples to maximum
+ * |kWebRtcOpusMaxFrameSizePerChannel|. */
+ const int plc_samples = inst->prev_decoded_samples;
+ return (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
+ plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
+}
+
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
int samples;
if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
return 0;
@@ -393,13 +404,13 @@ int WebRtcOpus_FecDurationEst(const uint8_t* payload,
}
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
- int payload_length_bytes) {
+ size_t payload_length_bytes) {
int frames, channels, payload_length_ms;
int n;
opus_int16 frame_sizes[48];
const unsigned char *frame_data[48];
- if (payload == NULL || payload_length_bytes <= 0)
+ if (payload == NULL || payload_length_bytes == 0)
return 0;
/* In CELT_ONLY mode, packets should not have FEC. */
@@ -432,8 +443,8 @@ int WebRtcOpus_PacketHasFec(const uint8_t* payload,
}
/* The following is to parse the LBRR flags. */
- if (opus_packet_parse(payload, payload_length_bytes, NULL, frame_data,
- frame_sizes, NULL) < 0) {
+ if (opus_packet_parse(payload, (opus_int32)payload_length_bytes, NULL,
+ frame_data, frame_sizes, NULL) < 0) {
return 0;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
index b39de499a7f..926bcaf9d12 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_speed_test.cc
@@ -24,8 +24,8 @@ class OpusSpeedTest : public AudioCodecSpeedTest {
void SetUp() override;
void TearDown() override;
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes);
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ size_t max_bytes, size_t* encoded_bytes);
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data);
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
@@ -58,19 +58,19 @@ void OpusSpeedTest::TearDown() {
}
float OpusSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) {
+ size_t max_bytes, size_t* encoded_bytes) {
clock_t clocks = clock();
int value = WebRtcOpus_Encode(opus_encoder_, in_data,
input_length_sample_, max_bytes,
bit_stream);
clocks = clock() - clocks;
EXPECT_GT(value, 0);
- *encoded_bytes = value;
+ *encoded_bytes = static_cast<size_t>(value);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
- int encoded_bytes, int16_t* out_data) {
+ size_t encoded_bytes, int16_t* out_data) {
int value;
int16_t audio_type;
clock_t clocks = clock();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
index e218a6baa5d..db622a7c7f8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/opus/opus_unittest.cc
@@ -25,11 +25,11 @@ using ::testing::Combine;
// Maximum number of bytes in output bitstream.
const size_t kMaxBytes = 1000;
// Sample rate of Opus.
-const int kOpusRateKhz = 48;
+const size_t kOpusRateKhz = 48;
// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
-const int kOpus20msFrameSamples = kOpusRateKhz * 20;
+const size_t kOpus20msFrameSamples = kOpusRateKhz * 20;
// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
-const int kOpus10msFrameSamples = kOpusRateKhz * 10;
+const size_t kOpus10msFrameSamples = kOpusRateKhz * 10;
class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
protected:
@@ -45,7 +45,7 @@ class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
int EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
- int input_samples,
+ size_t input_samples,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type);
@@ -58,7 +58,7 @@ class OpusTest : public TestWithParam<::testing::tuple<int, int>> {
AudioLoop speech_data_;
uint8_t bitstream_[kMaxBytes];
- int encoded_bytes_;
+ size_t encoded_bytes_;
int channels_;
int application_;
};
@@ -97,18 +97,20 @@ void OpusTest::SetMaxPlaybackRate(WebRtcOpusEncInst* encoder,
int OpusTest::EncodeDecode(WebRtcOpusEncInst* encoder,
const int16_t* input_audio,
- int input_samples,
+ size_t input_samples,
WebRtcOpusDecInst* decoder,
int16_t* output_audio,
int16_t* audio_type) {
- encoded_bytes_ = WebRtcOpus_Encode(encoder,
- input_audio,
- input_samples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
- return WebRtcOpus_Decode(decoder, bitstream_,
- encoded_bytes_, output_audio,
- audio_type);
+ int encoded_bytes_int = WebRtcOpus_Encode(encoder, input_audio, input_samples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
+ encoded_bytes_ = static_cast<size_t>(encoded_bytes_int);
+ int est_len = WebRtcOpus_DurationEst(decoder, bitstream_, encoded_bytes_);
+ int act_len = WebRtcOpus_Decode(decoder, bitstream_,
+ encoded_bytes_, output_audio,
+ audio_type);
+ EXPECT_EQ(est_len, act_len);
+ return act_len;
}
// Test if encoder/decoder can enter DTX mode properly and do not enter DTX when
@@ -139,13 +141,14 @@ void OpusTest::TestDtxEffect(bool dtx) {
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// If not DTX, it should never enter DTX mode. If DTX, we do not care since
// whether it enters DTX depends on the signal type.
if (!dtx) {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -156,11 +159,11 @@ void OpusTest::TestDtxEffect(bool dtx) {
// However, DTX may happen after a while.
for (int i = 0; i < 30; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
if (!dtx) {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -180,17 +183,17 @@ void OpusTest::TestDtxEffect(bool dtx) {
// DTX mode is maintained 19 frames.
for (int i = 0; i < 19; ++i) {
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples,
+ opus_decoder_, output_data_decode, &audio_type)));
if (dtx) {
- EXPECT_EQ(0, encoded_bytes_) // Send 0 byte.
+ EXPECT_EQ(0U, encoded_bytes_) // Send 0 byte.
<< "Opus should have entered DTX mode.";
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
} else {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -199,27 +202,27 @@ void OpusTest::TestDtxEffect(bool dtx) {
// Quit DTX after 19 frames.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
// Enters DTX again immediately.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
if (dtx) {
- EXPECT_EQ(1, encoded_bytes_); // Send 1 byte.
+ EXPECT_EQ(1U, encoded_bytes_); // Send 1 byte.
EXPECT_EQ(1, opus_encoder_->in_dtx_mode);
EXPECT_EQ(1, opus_decoder_->in_dtx_mode);
EXPECT_EQ(2, audio_type); // Comfort noise.
} else {
- EXPECT_GT(encoded_bytes_, 1);
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -230,10 +233,10 @@ void OpusTest::TestDtxEffect(bool dtx) {
if (dtx) {
// Verify that encoder/decoder can jump out from DTX mode.
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, silence,
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
- EXPECT_GT(encoded_bytes_, 1);
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, silence, kOpus20msFrameSamples, opus_decoder_,
+ output_data_decode, &audio_type)));
+ EXPECT_GT(encoded_bytes_, 1U);
EXPECT_EQ(0, opus_encoder_->in_dtx_mode);
EXPECT_EQ(0, opus_decoder_->in_dtx_mode);
EXPECT_EQ(0, audio_type); // Speech.
@@ -311,9 +314,10 @@ TEST_P(OpusTest, OpusEncodeDecode) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// Free memory.
delete[] output_data_decode;
@@ -370,16 +374,17 @@ TEST_P(OpusTest, OpusDecodeInit) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
- EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_decoder_));
+ WebRtcOpus_DecoderInit(opus_decoder_);
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_Decode(opus_decoder_, bitstream_,
- encoded_bytes_, output_data_decode,
- &audio_type));
+ static_cast<size_t>(WebRtcOpus_Decode(
+ opus_decoder_, bitstream_, encoded_bytes_, output_data_decode,
+ &audio_type)));
// Free memory.
delete[] output_data_decode;
@@ -508,14 +513,16 @@ TEST_P(OpusTest, OpusDecodePlc) {
int16_t audio_type;
int16_t* output_data_decode = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- EncodeDecode(opus_encoder_, speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, opus_decoder_,
- output_data_decode, &audio_type));
+ static_cast<size_t>(EncodeDecode(
+ opus_encoder_, speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples, opus_decoder_, output_data_decode,
+ &audio_type)));
// Call decoder PLC.
int16_t* plc_buffer = new int16_t[kOpus20msFrameSamples * channels_];
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_DecodePlc(opus_decoder_, plc_buffer, 1));
+ static_cast<size_t>(WebRtcOpus_DecodePlc(
+ opus_decoder_, plc_buffer, 1)));
// Free memory.
delete[] plc_buffer;
@@ -535,24 +542,26 @@ TEST_P(OpusTest, OpusDurationEstimation) {
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
// 10 ms. We use only first 10 ms of a 20 ms block.
- encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus10msFrameSamples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
+ int encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+ speech_data_.GetNextBlock(),
+ kOpus10msFrameSamples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus10msFrameSamples,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
- encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int))));
// 20 ms
- encoded_bytes_ = WebRtcOpus_Encode(opus_encoder_,
- speech_data_.GetNextBlock(),
- kOpus20msFrameSamples, kMaxBytes,
- bitstream_);
- EXPECT_GE(encoded_bytes_, 0);
+ encoded_bytes_int = WebRtcOpus_Encode(opus_encoder_,
+ speech_data_.GetNextBlock(),
+ kOpus20msFrameSamples,
+ kMaxBytes, bitstream_);
+ EXPECT_GE(encoded_bytes_int, 0);
EXPECT_EQ(kOpus20msFrameSamples,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_,
- encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_,
+ static_cast<size_t>(encoded_bytes_int))));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
@@ -595,11 +604,13 @@ TEST_P(OpusTest, OpusDecodeRepacketized) {
encoded_bytes_ = opus_repacketizer_out(rp, bitstream_, kMaxBytes);
EXPECT_EQ(kOpus20msFrameSamples * kPackets,
- WebRtcOpus_DurationEst(opus_decoder_, bitstream_, encoded_bytes_));
+ static_cast<size_t>(WebRtcOpus_DurationEst(
+ opus_decoder_, bitstream_, encoded_bytes_)));
EXPECT_EQ(kOpus20msFrameSamples * kPackets,
- WebRtcOpus_Decode(opus_decoder_, bitstream_, encoded_bytes_,
- output_data_decode.get(), &audio_type));
+ static_cast<size_t>(WebRtcOpus_Decode(
+ opus_decoder_, bitstream_, encoded_bytes_,
+ output_data_decode.get(), &audio_type)));
// Free memory.
opus_repacketizer_destroy(rp);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
new file mode 100644
index 00000000000..7d07b23a3c2
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_decoder_pcm16b.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+
+namespace webrtc {
+
+AudioDecoderPcm16B::AudioDecoderPcm16B(size_t num_channels)
+ : num_channels_(num_channels) {
+ RTC_DCHECK_GE(num_channels, 1u);
+}
+
+void AudioDecoderPcm16B::Reset() {}
+
+size_t AudioDecoderPcm16B::Channels() const {
+ return num_channels_;
+}
+
+int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) {
+ RTC_DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
+ sample_rate_hz == 32000 || sample_rate_hz == 48000)
+ << "Unsupported sample rate " << sample_rate_hz;
+ size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
+ *speech_type = ConvertSpeechType(1);
+ return static_cast<int>(ret);
+}
+
+int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
+ size_t encoded_len) const {
+ // Two encoded byte per sample per channel.
+ return static_cast<int>(encoded_len / (2 * Channels()));
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
index 0c246c34bdb..6c30c7ff62a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.cc
@@ -16,17 +16,10 @@
namespace webrtc {
-bool AudioEncoderPcm16B::Config::IsOk() const {
- if ((sample_rate_hz != 8000) && (sample_rate_hz != 16000) &&
- (sample_rate_hz != 32000) && (sample_rate_hz != 48000))
- return false;
- return AudioEncoderPcm::Config::IsOk();
-}
-
-int16_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) {
- return WebRtcPcm16b_Encode(audio, static_cast<int16_t>(input_len), encoded);
+size_t AudioEncoderPcm16B::EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) {
+ return WebRtcPcm16b_Encode(audio, input_len, encoded);
}
int AudioEncoderPcm16B::BytesPerSample() const {
@@ -45,9 +38,14 @@ AudioEncoderPcm16B::Config CreateConfig(const CodecInst& codec_inst) {
}
} // namespace
-AudioEncoderMutablePcm16B::AudioEncoderMutablePcm16B(
- const CodecInst& codec_inst)
- : AudioEncoderMutableImpl<AudioEncoderPcm16B>(CreateConfig(codec_inst)) {
+bool AudioEncoderPcm16B::Config::IsOk() const {
+ if ((sample_rate_hz != 8000) && (sample_rate_hz != 16000) &&
+ (sample_rate_hz != 32000) && (sample_rate_hz != 48000))
+ return false;
+ return AudioEncoderPcm::Config::IsOk();
}
+AudioEncoderPcm16B::AudioEncoderPcm16B(const CodecInst& codec_inst)
+ : AudioEncoderPcm16B(CreateConfig(codec_inst)) {}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h
new file mode 100644
index 00000000000..96131c4d21f
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+
+namespace webrtc {
+
+class AudioDecoderPcm16B final : public AudioDecoder {
+ public:
+ explicit AudioDecoderPcm16B(size_t num_channels);
+ void Reset() override;
+ int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
+ size_t Channels() const override;
+
+ protected:
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override;
+
+ private:
+ const size_t num_channels_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcm16B);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_DECODER_PCM16B_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
index f02cf92dd96..e03da213dfe 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h
@@ -12,11 +12,12 @@
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_ENCODER_PCM16B_H_
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/codecs/audio_encoder_mutable_impl.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
namespace webrtc {
+struct CodecInst;
+
class AudioEncoderPcm16B final : public AudioEncoderPcm {
public:
struct Config : public AudioEncoderPcm::Config {
@@ -29,22 +30,19 @@ class AudioEncoderPcm16B final : public AudioEncoderPcm {
explicit AudioEncoderPcm16B(const Config& config)
: AudioEncoderPcm(config, config.sample_rate_hz) {}
+ explicit AudioEncoderPcm16B(const CodecInst& codec_inst);
protected:
- int16_t EncodeCall(const int16_t* audio,
- size_t input_len,
- uint8_t* encoded) override;
+ size_t EncodeCall(const int16_t* audio,
+ size_t input_len,
+ uint8_t* encoded) override;
int BytesPerSample() const override;
-};
-struct CodecInst;
-
-class AudioEncoderMutablePcm16B
- : public AudioEncoderMutableImpl<AudioEncoderPcm16B> {
- public:
- explicit AudioEncoderMutablePcm16B(const CodecInst& codec_inst);
+private:
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderPcm16B);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_PCM16B_INCLUDE_AUDIO_ENCODER_PCM16B_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
index 1cdf92dbf8f..d65d08af1a6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h
@@ -14,6 +14,8 @@
* Define the fixpoint numeric formats
*/
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
#ifdef __cplusplus
@@ -36,9 +38,9 @@ extern "C" {
* Always equal to twice the len input parameter.
*/
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
- int16_t len,
- uint8_t* encoded);
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+ size_t len,
+ uint8_t* encoded);
/****************************************************************************
* WebRtcPcm16b_Decode(...)
@@ -55,9 +57,9 @@ int16_t WebRtcPcm16b_Encode(const int16_t* speech,
* Returned value : Samples in speech
*/
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
- int16_t len,
- int16_t* speech);
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+ size_t len,
+ int16_t* speech);
#ifdef __cplusplus
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
index b6de0b5e67b..120c79052bd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.c
@@ -12,10 +12,10 @@
#include "webrtc/typedefs.h"
-int16_t WebRtcPcm16b_Encode(const int16_t* speech,
- int16_t len,
- uint8_t* encoded) {
- int i;
+size_t WebRtcPcm16b_Encode(const int16_t* speech,
+ size_t len,
+ uint8_t* encoded) {
+ size_t i;
for (i = 0; i < len; ++i) {
uint16_t s = speech[i];
encoded[2 * i] = s >> 8;
@@ -24,10 +24,10 @@ int16_t WebRtcPcm16b_Encode(const int16_t* speech,
return 2 * len;
}
-int16_t WebRtcPcm16b_Decode(const uint8_t* encoded,
- int16_t len,
- int16_t* speech) {
- int i;
+size_t WebRtcPcm16b_Decode(const uint8_t* encoded,
+ size_t len,
+ int16_t* speech) {
+ size_t i;
for (i = 0; i < len / 2; ++i)
speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1];
return len / 2;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
index 462d752d559..3dc2f772c12 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.gypi
@@ -26,8 +26,10 @@
],
},
'sources': [
+ 'include/audio_decoder_pcm16b.h',
'include/audio_encoder_pcm16b.h',
'include/pcm16b.h',
+ 'audio_decoder_pcm16b.cc',
'audio_encoder_pcm16b.cc',
'pcm16b.c',
],
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
index 16ba290fc33..a19d194e593 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.cc
@@ -19,33 +19,32 @@ namespace webrtc {
AudioEncoderCopyRed::AudioEncoderCopyRed(const Config& config)
: speech_encoder_(config.speech_encoder),
red_payload_type_(config.payload_type) {
- CHECK(speech_encoder_) << "Speech encoder not provided.";
+ RTC_CHECK(speech_encoder_) << "Speech encoder not provided.";
}
-AudioEncoderCopyRed::~AudioEncoderCopyRed() {
+AudioEncoderCopyRed::~AudioEncoderCopyRed() = default;
+
+size_t AudioEncoderCopyRed::MaxEncodedBytes() const {
+ return 2 * speech_encoder_->MaxEncodedBytes();
}
int AudioEncoderCopyRed::SampleRateHz() const {
return speech_encoder_->SampleRateHz();
}
-int AudioEncoderCopyRed::RtpTimestampRateHz() const {
- return speech_encoder_->RtpTimestampRateHz();
-}
-
int AudioEncoderCopyRed::NumChannels() const {
return speech_encoder_->NumChannels();
}
-size_t AudioEncoderCopyRed::MaxEncodedBytes() const {
- return 2 * speech_encoder_->MaxEncodedBytes();
+int AudioEncoderCopyRed::RtpTimestampRateHz() const {
+ return speech_encoder_->RtpTimestampRateHz();
}
-int AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
+size_t AudioEncoderCopyRed::Num10MsFramesInNextPacket() const {
return speech_encoder_->Num10MsFramesInNextPacket();
}
-int AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
+size_t AudioEncoderCopyRed::Max10MsFramesInAPacket() const {
return speech_encoder_->Max10MsFramesInAPacket();
}
@@ -53,16 +52,6 @@ int AudioEncoderCopyRed::GetTargetBitrate() const {
return speech_encoder_->GetTargetBitrate();
}
-void AudioEncoderCopyRed::SetTargetBitrate(int bits_per_second) {
- speech_encoder_->SetTargetBitrate(bits_per_second);
-}
-
-void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
- DCHECK_GE(fraction, 0.0);
- DCHECK_LE(fraction, 1.0);
- speech_encoder_->SetProjectedPacketLossRate(fraction);
-}
-
AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
uint32_t rtp_timestamp,
const int16_t* audio,
@@ -71,27 +60,26 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
EncodedInfo info = speech_encoder_->Encode(
rtp_timestamp, audio, static_cast<size_t>(SampleRateHz() / 100),
max_encoded_bytes, encoded);
- CHECK_GE(max_encoded_bytes,
- info.encoded_bytes + secondary_info_.encoded_bytes);
- CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
+ RTC_CHECK_GE(max_encoded_bytes,
+ info.encoded_bytes + secondary_info_.encoded_bytes);
+ RTC_CHECK(info.redundant.empty()) << "Cannot use nested redundant encoders.";
if (info.encoded_bytes > 0) {
// |info| will be implicitly cast to an EncodedInfoLeaf struct, effectively
// discarding the (empty) vector of redundant information. This is
// intentional.
info.redundant.push_back(info);
- DCHECK_EQ(info.redundant.size(), 1u);
+ RTC_DCHECK_EQ(info.redundant.size(), 1u);
if (secondary_info_.encoded_bytes > 0) {
memcpy(&encoded[info.encoded_bytes], secondary_encoded_.data(),
secondary_info_.encoded_bytes);
info.redundant.push_back(secondary_info_);
- DCHECK_EQ(info.redundant.size(), 2u);
+ RTC_DCHECK_EQ(info.redundant.size(), 2u);
}
// Save primary to secondary.
- secondary_encoded_.SetSize(info.encoded_bytes);
- memcpy(secondary_encoded_.data(), encoded, info.encoded_bytes);
+ secondary_encoded_.SetData(encoded, info.encoded_bytes);
secondary_info_ = info;
- DCHECK_EQ(info.speech, info.redundant[0].speech);
+ RTC_DCHECK_EQ(info.speech, info.redundant[0].speech);
}
// Update main EncodedInfo.
info.payload_type = red_payload_type_;
@@ -103,4 +91,34 @@ AudioEncoder::EncodedInfo AudioEncoderCopyRed::EncodeInternal(
return info;
}
+void AudioEncoderCopyRed::Reset() {
+ speech_encoder_->Reset();
+ secondary_encoded_.Clear();
+ secondary_info_.encoded_bytes = 0;
+}
+
+bool AudioEncoderCopyRed::SetFec(bool enable) {
+ return speech_encoder_->SetFec(enable);
+}
+
+bool AudioEncoderCopyRed::SetDtx(bool enable) {
+ return speech_encoder_->SetDtx(enable);
+}
+
+bool AudioEncoderCopyRed::SetApplication(Application application) {
+ return speech_encoder_->SetApplication(application);
+}
+
+void AudioEncoderCopyRed::SetMaxPlaybackRate(int frequency_hz) {
+ speech_encoder_->SetMaxPlaybackRate(frequency_hz);
+}
+
+void AudioEncoderCopyRed::SetProjectedPacketLossRate(double fraction) {
+ speech_encoder_->SetProjectedPacketLossRate(fraction);
+}
+
+void AudioEncoderCopyRed::SetTargetBitrate(int bits_per_second) {
+ speech_encoder_->SetTargetBitrate(bits_per_second);
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
index 78e1e9aff39..78370106051 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h
@@ -23,7 +23,7 @@ namespace webrtc {
// underlying AudioEncoder object that performs the actual encodings. The
// current class will gather the two latest encodings from the underlying codec
// into one packet.
-class AudioEncoderCopyRed : public AudioEncoder {
+class AudioEncoderCopyRed final : public AudioEncoder {
public:
struct Config {
public:
@@ -36,26 +36,33 @@ class AudioEncoderCopyRed : public AudioEncoder {
~AudioEncoderCopyRed() override;
+ size_t MaxEncodedBytes() const override;
int SampleRateHz() const override;
int NumChannels() const override;
- size_t MaxEncodedBytes() const override;
int RtpTimestampRateHz() const override;
- int Num10MsFramesInNextPacket() const override;
- int Max10MsFramesInAPacket() const override;
+ size_t Num10MsFramesInNextPacket() const override;
+ size_t Max10MsFramesInAPacket() const override;
int GetTargetBitrate() const override;
- void SetTargetBitrate(int bits_per_second) override;
- void SetProjectedPacketLossRate(double fraction) override;
EncodedInfo EncodeInternal(uint32_t rtp_timestamp,
const int16_t* audio,
size_t max_encoded_bytes,
uint8_t* encoded) override;
+ void Reset() override;
+ bool SetFec(bool enable) override;
+ bool SetDtx(bool enable) override;
+ bool SetApplication(Application application) override;
+ void SetMaxPlaybackRate(int frequency_hz) override;
+ void SetProjectedPacketLossRate(double fraction) override;
+ void SetTargetBitrate(int target_bps) override;
private:
AudioEncoder* speech_encoder_;
int red_payload_type_;
rtc::Buffer secondary_encoded_;
EncodedInfoLeaf secondary_info_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioEncoderCopyRed);
};
} // namespace webrtc
+
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_RED_AUDIO_ENCODER_COPY_RED_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
index 4debdfab8d3..cb506521833 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red_unittest.cc
@@ -87,8 +87,8 @@ class MockEncodeHelper {
size_t max_encoded_bytes,
uint8_t* encoded) {
if (write_payload_) {
- CHECK(encoded);
- CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
+ RTC_CHECK(encoded);
+ RTC_CHECK_LE(info_.encoded_bytes, max_encoded_bytes);
memcpy(encoded, payload_, info_.encoded_bytes);
}
return info_;
@@ -113,13 +113,13 @@ TEST_F(AudioEncoderCopyRedTest, CheckNumChannelsPropagation) {
}
TEST_F(AudioEncoderCopyRedTest, CheckFrameSizePropagation) {
- EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, red_->Num10MsFramesInNextPacket());
+ EXPECT_CALL(mock_encoder_, Num10MsFramesInNextPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Num10MsFramesInNextPacket());
}
TEST_F(AudioEncoderCopyRedTest, CheckMaxFrameSizePropagation) {
- EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17));
- EXPECT_EQ(17, red_->Max10MsFramesInAPacket());
+ EXPECT_CALL(mock_encoder_, Max10MsFramesInAPacket()).WillOnce(Return(17U));
+ EXPECT_EQ(17U, red_->Max10MsFramesInAPacket());
}
TEST_F(AudioEncoderCopyRedTest, CheckSetBitratePropagation) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc b/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
index c7cafdff9ba..3395721f8ba 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.cc
@@ -65,7 +65,8 @@ void AudioCodecSpeedTest::SetUp() {
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
input_length_sample_ * channels_ * sizeof(int16_t));
- max_bytes_ = input_length_sample_ * channels_ * sizeof(int16_t);
+ max_bytes_ =
+ static_cast<size_t>(input_length_sample_ * channels_ * sizeof(int16_t));
out_data_.reset(new int16_t[output_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h b/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
index 35ac69e8ab7..2736c2912e8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h
@@ -36,14 +36,14 @@ class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
// 3. assign |encoded_bytes| with the length of the bit stream (in bytes),
// 4. return the cost of time (in millisecond) spent on actual encoding.
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
- int max_bytes, int* encoded_bytes) = 0;
+ size_t max_bytes, size_t* encoded_bytes) = 0;
// DecodeABlock(...) does the following:
// 1. decodes the bit stream in |bit_stream| with a length of |encoded_bytes|
// (in bytes),
// 2. save the decoded audio in |out_data|,
// 3. return the cost of time (in millisecond) spent on actual decoding.
- virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
+ virtual float DecodeABlock(const uint8_t* bit_stream, size_t encoded_bytes,
int16_t* out_data) = 0;
// Encoding and decode an audio of |audio_duration| (in seconds) and
@@ -67,9 +67,9 @@ class AudioCodecSpeedTest : public testing::TestWithParam<coding_param> {
rtc::scoped_ptr<uint8_t[]> bit_stream_;
// Maximum number of bytes in output bitstream for a frame of audio.
- int max_bytes_;
+ size_t max_bytes_;
- int encoded_bytes_;
+ size_t encoded_bytes_;
float encoding_time_ms_;
float decoding_time_ms_;
FILE* out_file_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
index 9f97c79a8a4..9193270363a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
@@ -27,6 +27,38 @@ namespace webrtc {
namespace acm2 {
+namespace {
+
+// Checks if the bitrate is valid for the codec.
+bool IsRateValid(int codec_id, int rate) {
+ return ACMCodecDB::database_[codec_id].rate == rate;
+}
+
+// Checks if the bitrate is valid for iSAC.
+bool IsISACRateValid(int rate) {
+ return (rate == -1) || ((rate <= 56000) && (rate >= 10000));
+}
+
+// Checks if the bitrate is valid for iLBC.
+bool IsILBCRateValid(int rate, int frame_size_samples) {
+ if (((frame_size_samples == 240) || (frame_size_samples == 480)) &&
+ (rate == 13300)) {
+ return true;
+ } else if (((frame_size_samples == 160) || (frame_size_samples == 320)) &&
+ (rate == 15200)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// Checks if the bitrate is valid for Opus.
+bool IsOpusRateValid(int rate) {
+ return (rate >= 6000) && (rate <= 510000);
+}
+
+} // namespace
+
// Not yet used payload-types.
// 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68,
// 67, 66, 65
@@ -39,7 +71,6 @@ const CodecInst ACMCodecDB::database_[] = {
{105, "ISAC", 48000, kIsacPacSize1440, 1, kIsacSwbDefaultRate},
# endif
#endif
-#ifdef WEBRTC_CODEC_PCM16
// Mono
{107, "L16", 8000, 80, 1, 128000},
{108, "L16", 16000, 160, 1, 256000},
@@ -48,7 +79,6 @@ const CodecInst ACMCodecDB::database_[] = {
{111, "L16", 8000, 80, 2, 128000},
{112, "L16", 16000, 160, 2, 256000},
{113, "L16", 32000, 320, 2, 512000},
-#endif
// G.711, PCM mu-law and A-law.
// Mono
{0, "PCMU", 8000, 160, 1, 64000},
@@ -77,9 +107,7 @@ const CodecInst ACMCodecDB::database_[] = {
#ifdef ENABLE_48000_HZ
{100, "CN", 48000, 1440, 1, 0},
#endif
-#ifdef WEBRTC_CODEC_AVT
{106, "telephone-event", 8000, 240, 1, 0},
-#endif
#ifdef WEBRTC_CODEC_RED
{127, "red", 8000, 0, 1, 0},
#endif
@@ -93,59 +121,55 @@ const CodecInst ACMCodecDB::database_[] = {
// Basic block samples, max number of channels that are supported.
const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
- {2, {kIsacPacSize480, kIsacPacSize960}, 0, 1, true},
+ {2, {kIsacPacSize480, kIsacPacSize960}, 0, 1},
# if (defined(WEBRTC_CODEC_ISAC))
- {1, {kIsacPacSize960}, 0, 1, true},
- {1, {kIsacPacSize1440}, 0, 1, true},
+ {1, {kIsacPacSize960}, 0, 1},
+ {1, {kIsacPacSize1440}, 0, 1},
# endif
#endif
-#ifdef WEBRTC_CODEC_PCM16
// Mono
- {4, {80, 160, 240, 320}, 0, 2, false},
- {4, {160, 320, 480, 640}, 0, 2, false},
- {2, {320, 640}, 0, 2, false},
+ {4, {80, 160, 240, 320}, 0, 2},
+ {4, {160, 320, 480, 640}, 0, 2},
+ {2, {320, 640}, 0, 2},
// Stereo
- {4, {80, 160, 240, 320}, 0, 2, false},
- {4, {160, 320, 480, 640}, 0, 2, false},
+ {4, {80, 160, 240, 320}, 0, 2},
+ {4, {160, 320, 480, 640}, 0, 2},
{2, {320, 640}, 0, 2},
-#endif
// G.711, PCM mu-law and A-law.
// Mono
- {6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
- {6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
+ {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+ {6, {80, 160, 240, 320, 400, 480}, 0, 2},
// Stereo
- {6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
- {6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
+ {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+ {6, {80, 160, 240, 320, 400, 480}, 0, 2},
#ifdef WEBRTC_CODEC_ILBC
- {4, {160, 240, 320, 480}, 0, 1, false},
+ {4, {160, 240, 320, 480}, 0, 1},
#endif
#ifdef WEBRTC_CODEC_G722
// Mono
- {6, {160, 320, 480, 640, 800, 960}, 0, 2, false},
+ {6, {160, 320, 480, 640, 800, 960}, 0, 2},
// Stereo
- {6, {160, 320, 480, 640, 800, 960}, 0, 2, false},
+ {6, {160, 320, 480, 640, 800, 960}, 0, 2},
#endif
#ifdef WEBRTC_CODEC_OPUS
// Opus supports frames shorter than 10ms,
// but it doesn't help us to use them.
// Mono and stereo.
- {4, {480, 960, 1920, 2880}, 0, 2, false},
+ {4, {480, 960, 1920, 2880}, 0, 2},
#endif
// Comfort noise for three different sampling frequencies.
- {1, {240}, 240, 1, false},
- {1, {480}, 480, 1, false},
- {1, {960}, 960, 1, false},
+ {1, {240}, 240, 1},
+ {1, {480}, 480, 1},
+ {1, {960}, 960, 1},
#ifdef ENABLE_48000_HZ
- {1, {1440}, 1440, 1, false},
-#endif
-#ifdef WEBRTC_CODEC_AVT
- {1, {240}, 240, 1, false},
+ {1, {1440}, 1440, 1},
#endif
+ {1, {240}, 240, 1},
#ifdef WEBRTC_CODEC_RED
- {1, {0}, 0, 1, false},
+ {1, {0}, 0, 1},
#endif
// To prevent compile errors due to trailing commas.
- {-1, {-1}, -1, -1, false}
+ {-1, {-1}, -1, -1}
};
// Create a database of all NetEQ decoders at compile time.
@@ -157,7 +181,6 @@ const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
kDecoderISACfb,
# endif
#endif
-#ifdef WEBRTC_CODEC_PCM16
// Mono
kDecoderPCM16B,
kDecoderPCM16Bwb,
@@ -166,7 +189,6 @@ const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
kDecoderPCM16B_2ch,
kDecoderPCM16Bwb_2ch,
kDecoderPCM16Bswb32kHz_2ch,
-#endif
// G.711, PCM mu-las and A-law.
// Mono
kDecoderPCMu,
@@ -194,9 +216,7 @@ const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
#ifdef ENABLE_48000_HZ
, kDecoderCNGswb48kHz
#endif
-#ifdef WEBRTC_CODEC_AVT
, kDecoderAVT
-#endif
#ifdef WEBRTC_CODEC_RED
, kDecoderRED
#endif
@@ -281,21 +301,9 @@ int ACMCodecDB::CodecNumber(const CodecInst& codec_inst) {
} else if (STR_CASE_CMP("ilbc", codec_inst.plname) == 0) {
return IsILBCRateValid(codec_inst.rate, codec_inst.pacsize)
? codec_id : kInvalidRate;
- } else if (STR_CASE_CMP("amr", codec_inst.plname) == 0) {
- return IsAMRRateValid(codec_inst.rate)
- ? codec_id : kInvalidRate;
- } else if (STR_CASE_CMP("amr-wb", codec_inst.plname) == 0) {
- return IsAMRwbRateValid(codec_inst.rate)
- ? codec_id : kInvalidRate;
- } else if (STR_CASE_CMP("g7291", codec_inst.plname) == 0) {
- return IsG7291RateValid(codec_inst.rate)
- ? codec_id : kInvalidRate;
} else if (STR_CASE_CMP("opus", codec_inst.plname) == 0) {
return IsOpusRateValid(codec_inst.rate)
? codec_id : kInvalidRate;
- } else if (STR_CASE_CMP("speex", codec_inst.plname) == 0) {
- return IsSpeexRateValid(codec_inst.rate)
- ? codec_id : kInvalidRate;
}
return IsRateValid(codec_id, codec_inst.rate) ?
@@ -357,126 +365,11 @@ int ACMCodecDB::CodecFreq(int codec_id) {
return database_[codec_id].plfreq;
}
-// Returns the codec's basic coding block size in samples.
-int ACMCodecDB::BasicCodingBlock(int codec_id) {
- // Error check to see that codec_id is not out of bounds.
- if (codec_id < 0 || codec_id >= kNumCodecs) {
- return -1;
- }
-
- return codec_settings_[codec_id].basic_block_samples;
-}
-
-// Returns the NetEQ decoder database.
-const NetEqDecoder* ACMCodecDB::NetEQDecoders() {
- return neteq_decoders_;
-}
-
-// Checks if the bitrate is valid for the codec.
-bool ACMCodecDB::IsRateValid(int codec_id, int rate) {
- return database_[codec_id].rate == rate;
-}
-
-// Checks if the bitrate is valid for iSAC.
-bool ACMCodecDB::IsISACRateValid(int rate) {
- return (rate == -1) || ((rate <= 56000) && (rate >= 10000));
-}
-
-// Checks if the bitrate is valid for iLBC.
-bool ACMCodecDB::IsILBCRateValid(int rate, int frame_size_samples) {
- if (((frame_size_samples == 240) || (frame_size_samples == 480)) &&
- (rate == 13300)) {
- return true;
- } else if (((frame_size_samples == 160) || (frame_size_samples == 320)) &&
- (rate == 15200)) {
- return true;
- } else {
- return false;
- }
-}
-
-// Check if the bitrate is valid for the GSM-AMR.
-bool ACMCodecDB::IsAMRRateValid(int rate) {
- switch (rate) {
- case 4750:
- case 5150:
- case 5900:
- case 6700:
- case 7400:
- case 7950:
- case 10200:
- case 12200: {
- return true;
- }
- default: {
- return false;
- }
- }
-}
-
-// Check if the bitrate is valid for GSM-AMR-WB.
-bool ACMCodecDB::IsAMRwbRateValid(int rate) {
- switch (rate) {
- case 7000:
- case 9000:
- case 12000:
- case 14000:
- case 16000:
- case 18000:
- case 20000:
- case 23000:
- case 24000: {
- return true;
- }
- default: {
- return false;
- }
- }
-}
-
-// Check if the bitrate is valid for G.729.1.
-bool ACMCodecDB::IsG7291RateValid(int rate) {
- switch (rate) {
- case 8000:
- case 12000:
- case 14000:
- case 16000:
- case 18000:
- case 20000:
- case 22000:
- case 24000:
- case 26000:
- case 28000:
- case 30000:
- case 32000: {
- return true;
- }
- default: {
- return false;
- }
- }
-}
-
-// Checks if the bitrate is valid for Speex.
-bool ACMCodecDB::IsSpeexRateValid(int rate) {
- return rate > 2000;
-}
-
-// Checks if the bitrate is valid for Opus.
-bool ACMCodecDB::IsOpusRateValid(int rate) {
- return (rate >= 6000) && (rate <= 510000);
-}
-
// Checks if the payload type is in the valid range.
bool ACMCodecDB::ValidPayloadType(int payload_type) {
return (payload_type >= 0) && (payload_type <= 127);
}
-bool ACMCodecDB::OwnsDecoder(int codec_id) {
- assert(codec_id >= 0 && codec_id < ACMCodecDB::kNumCodecs);
- return ACMCodecDB::codec_settings_[codec_id].owns_decoder;
-}
-
} // namespace acm2
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
index b1ae5aa0f56..3ac4c5b3a97 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
@@ -38,7 +38,6 @@ class ACMCodecDB {
, kISACFB
# endif
#endif
-#ifdef WEBRTC_CODEC_PCM16
// Mono
, kPCM16B
, kPCM16Bwb
@@ -47,7 +46,6 @@ class ACMCodecDB {
, kPCM16B_2ch
, kPCM16Bwb_2ch
, kPCM16Bswb32kHz_2ch
-#endif
// Mono
, kPCMU
, kPCMA
@@ -73,9 +71,7 @@ class ACMCodecDB {
#ifdef ENABLE_48000_HZ
, kCNFB
#endif
-#ifdef WEBRTC_CODEC_AVT
, kAVT
-#endif
#ifdef WEBRTC_CODEC_RED
, kRED
#endif
@@ -90,16 +86,6 @@ class ACMCodecDB {
enum {kISAC = -1};
# endif
#endif
-#ifndef WEBRTC_CODEC_PCM16
- // Mono
- enum {kPCM16B = -1};
- enum {kPCM16Bwb = -1};
- enum {kPCM16Bswb32kHz = -1};
- // Stereo
- enum {kPCM16B_2ch = -1};
- enum {kPCM16Bwb_2ch = -1};
- enum {kPCM16Bswb32kHz_2ch = -1};
-#endif
// 48 kHz not supported, always set to -1.
enum {kPCM16Bswb48kHz = -1};
#ifndef WEBRTC_CODEC_ILBC
@@ -115,9 +101,6 @@ class ACMCodecDB {
// Mono and stereo
enum {kOpus = -1};
#endif
-#ifndef WEBRTC_CODEC_AVT
- enum {kAVT = -1};
-#endif
#ifndef WEBRTC_CODEC_RED
enum {kRED = -1};
#endif
@@ -141,18 +124,11 @@ class ACMCodecDB {
// that can be different from packet size.
// channel_support - number of channels supported to encode;
// 1 = mono, 2 = stereo, etc.
- // owns_decoder - if true, it means that the codec should own the
- // decoder instance. In this case, the codec should
- // implement ACMGenericCodec::Decoder(), which returns
- // a pointer to AudioDecoder. This pointer is injected
- // into NetEq when this codec is registered as receive
- // codec. DEPRECATED.
struct CodecSettings {
int num_packet_sizes;
int packet_sizes_samples[kMaxNumPacketSize];
int basic_block_samples;
int channel_support;
- bool owns_decoder;
};
// Gets codec information from database at the position in database given by
@@ -189,41 +165,6 @@ class ACMCodecDB {
// codec sampling frequency if successful, otherwise -1.
static int CodecFreq(int codec_id);
- // Return the codec's basic coding block size in samples.
- // TODO(tlegrand): Check if function is needed, or if we can change
- // to access database directly.
- // Input:
- // [codec_id] - number that specifies at what position in the database to
- // get the information.
- // Return:
- // codec basic block size if successful, otherwise -1.
- static int BasicCodingBlock(int codec_id);
-
- // Returns the NetEQ decoder database.
- static const NetEqDecoder* NetEQDecoders();
-
- // Specifies if the codec specified by |codec_id| MUST own its own decoder.
- // This is the case for codecs which *should* share a single codec instance
- // between encoder and decoder. Or for codecs which ACM should have control
- // over the decoder. For instance iSAC is such a codec that encoder and
- // decoder share the same codec instance.
- static bool OwnsDecoder(int codec_id);
-
- // Checks if the bitrate is valid for the codec.
- // Input:
- // [codec_id] - number that specifies codec's position in the database.
- // [rate] - bitrate to check.
- // [frame_size_samples] - (used for iLBC) specifies which frame size to go
- // with the rate.
- static bool IsRateValid(int codec_id, int rate);
- static bool IsISACRateValid(int rate);
- static bool IsILBCRateValid(int rate, int frame_size_samples);
- static bool IsAMRRateValid(int rate);
- static bool IsAMRwbRateValid(int rate);
- static bool IsG7291RateValid(int rate);
- static bool IsSpeexRateValid(int rate);
- static bool IsOpusRateValid(int rate);
-
// Check if the payload type is valid, meaning that it is in the valid range
// of 0 to 127.
// Input:
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
index 85a287e1268..df67ce29145 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
@@ -11,12 +11,7 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
-#include <string.h>
-
-#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
-#include "webrtc/typedefs.h"
// Checks for enabled codecs, we prevent enabling codecs which are not
// compatible.
@@ -24,24 +19,8 @@
#error iSAC and iSACFX codecs cannot be enabled at the same time
#endif
-
namespace webrtc {
-// 60 ms is the maximum block size we support. An extra 20 ms is considered
-// for safety if process() method is not called when it should be, i.e. we
-// accept 20 ms of jitter. 80 ms @ 48 kHz (full-band) stereo is 7680 samples.
-#define AUDIO_BUFFER_SIZE_W16 7680
-
-// There is one timestamp per each 10 ms of audio
-// the audio buffer, at max, may contain 32 blocks of 10ms
-// audio if the sampling frequency is 8000 Hz (80 samples per block).
-// Therefore, The size of the buffer where we keep timestamps
-// is defined as follows
-#define TIMESTAMP_BUFFER_SIZE_W32 (AUDIO_BUFFER_SIZE_W16/80)
-
-// The maximum size of a payload, that is 60 ms of PCM-16 @ 32 kHz stereo
-#define MAX_PAYLOAD_SIZE_BYTE 7680
-
// General codec specific defines
const int kIsacWbDefaultRate = 32000;
const int kIsacSwbDefaultRate = 56000;
@@ -49,33 +28,6 @@ const int kIsacPacSize480 = 480;
const int kIsacPacSize960 = 960;
const int kIsacPacSize1440 = 1440;
-// A structure which contains codec parameters. For instance, used when
-// initializing encoder and decoder.
-//
-// codec_inst: c.f. common_types.h
-// enable_dtx: set true to enable DTX. If codec does not have
-// internal DTX, this will enable VAD.
-// enable_vad: set true to enable VAD.
-// vad_mode: VAD mode, c.f. audio_coding_module_typedefs.h
-// for possible values.
-struct WebRtcACMCodecParams {
- CodecInst codec_inst;
- bool enable_dtx;
- bool enable_vad;
- ACMVADMode vad_mode;
-};
-
-// TODO(turajs): Remove when ACM1 is removed.
-struct WebRtcACMAudioBuff {
- int16_t in_audio[AUDIO_BUFFER_SIZE_W16];
- int16_t in_audio_ix_read;
- int16_t in_audio_ix_write;
- uint32_t in_timestamp[TIMESTAMP_BUFFER_SIZE_W32];
- int16_t in_timestamp_ix_write;
- uint32_t last_timestamp;
- uint32_t last_in_timestamp;
-};
-
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.cc
deleted file mode 100644
index 9c624d97b67..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.cc
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_coding/main/acm2/acm_dump.h"
-
-#include <deque>
-
-#include "webrtc/base/checks.h"
-#include "webrtc/base/thread_annotations.h"
-#include "webrtc/system_wrappers/interface/clock.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/file_wrapper.h"
-
-#ifdef RTC_AUDIOCODING_DEBUG_DUMP
-// Files generated at build-time by the protobuf compiler.
-#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
-#include "external/webrtc/webrtc/modules/audio_coding/dump.pb.h"
-#else
-#include "webrtc/audio_coding/dump.pb.h"
-#endif
-#endif
-
-namespace webrtc {
-
-// Noop implementation if flag is not set
-#ifndef RTC_AUDIOCODING_DEBUG_DUMP
-class AcmDumpImpl final : public AcmDump {
- public:
- void StartLogging(const std::string& file_name, int duration_ms) override{};
- void LogRtpPacket(bool incoming,
- const uint8_t* packet,
- size_t length) override{};
- void LogDebugEvent(DebugEvent event_type,
- const std::string& event_message) override{};
- void LogDebugEvent(DebugEvent event_type) override{};
-};
-#else
-
-class AcmDumpImpl final : public AcmDump {
- public:
- AcmDumpImpl();
-
- void StartLogging(const std::string& file_name, int duration_ms) override;
- void LogRtpPacket(bool incoming,
- const uint8_t* packet,
- size_t length) override;
- void LogDebugEvent(DebugEvent event_type,
- const std::string& event_message) override;
- void LogDebugEvent(DebugEvent event_type) override;
-
- private:
- // This function is identical to LogDebugEvent, but requires holding the lock.
- void LogDebugEventLocked(DebugEvent event_type,
- const std::string& event_message)
- EXCLUSIVE_LOCKS_REQUIRED(crit_);
- // Stops logging and clears the stored data and buffers.
- void Clear() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- // Adds a new event to the logfile if logging is active, or adds it to the
- // list of recent log events otherwise.
- void HandleEvent(ACMDumpEvent* event) EXCLUSIVE_LOCKS_REQUIRED(crit_);
- // Writes the event to the file. Note that this will destroy the state of the
- // input argument.
- void StoreToFile(ACMDumpEvent* event) EXCLUSIVE_LOCKS_REQUIRED(crit_);
- // Adds the event to the list of recent events, and removes any events that
- // are too old and no longer fall in the time window.
- void AddRecentEvent(const ACMDumpEvent& event)
- EXCLUSIVE_LOCKS_REQUIRED(crit_);
-
- // Amount of time in microseconds to record log events, before starting the
- // actual log.
- const int recent_log_duration_us = 10000000;
-
- rtc::scoped_ptr<webrtc::CriticalSectionWrapper> crit_;
- rtc::scoped_ptr<webrtc::FileWrapper> file_ GUARDED_BY(crit_);
- rtc::scoped_ptr<ACMDumpEventStream> stream_ GUARDED_BY(crit_);
- std::deque<ACMDumpEvent> recent_log_events_ GUARDED_BY(crit_);
- bool currently_logging_ GUARDED_BY(crit_);
- int64_t start_time_us_ GUARDED_BY(crit_);
- int64_t duration_us_ GUARDED_BY(crit_);
- const webrtc::Clock* const clock_;
-};
-
-namespace {
-
-// Convert from AcmDump's debug event enum (runtime format) to the corresponding
-// protobuf enum (serialized format).
-ACMDumpDebugEvent_EventType convertDebugEvent(AcmDump::DebugEvent event_type) {
- switch (event_type) {
- case AcmDump::DebugEvent::kLogStart:
- return ACMDumpDebugEvent::LOG_START;
- case AcmDump::DebugEvent::kLogEnd:
- return ACMDumpDebugEvent::LOG_END;
- case AcmDump::DebugEvent::kAudioPlayout:
- return ACMDumpDebugEvent::AUDIO_PLAYOUT;
- }
- return ACMDumpDebugEvent::UNKNOWN_EVENT;
-}
-
-} // Anonymous namespace.
-
-// AcmDumpImpl member functions.
-AcmDumpImpl::AcmDumpImpl()
- : crit_(webrtc::CriticalSectionWrapper::CreateCriticalSection()),
- file_(webrtc::FileWrapper::Create()),
- stream_(new webrtc::ACMDumpEventStream()),
- currently_logging_(false),
- start_time_us_(0),
- duration_us_(0),
- clock_(webrtc::Clock::GetRealTimeClock()) {
-}
-
-void AcmDumpImpl::StartLogging(const std::string& file_name, int duration_ms) {
- CriticalSectionScoped lock(crit_.get());
- Clear();
- if (file_->OpenFile(file_name.c_str(), false) != 0) {
- return;
- }
- // Add LOG_START event to the recent event list. This call will also remove
- // any events that are too old from the recent event list.
- LogDebugEventLocked(DebugEvent::kLogStart, "");
- currently_logging_ = true;
- start_time_us_ = clock_->TimeInMicroseconds();
- duration_us_ = static_cast<int64_t>(duration_ms) * 1000;
- // Write all the recent events to the log file.
- for (auto&& event : recent_log_events_) {
- StoreToFile(&event);
- }
- recent_log_events_.clear();
-}
-
-void AcmDumpImpl::LogRtpPacket(bool incoming,
- const uint8_t* packet,
- size_t length) {
- CriticalSectionScoped lock(crit_.get());
- ACMDumpEvent rtp_event;
- const int64_t timestamp = clock_->TimeInMicroseconds();
- rtp_event.set_timestamp_us(timestamp);
- rtp_event.set_type(webrtc::ACMDumpEvent::RTP_EVENT);
- rtp_event.mutable_packet()->set_direction(
- incoming ? ACMDumpRTPPacket::INCOMING : ACMDumpRTPPacket::OUTGOING);
- rtp_event.mutable_packet()->set_rtp_data(packet, length);
- HandleEvent(&rtp_event);
-}
-
-void AcmDumpImpl::LogDebugEvent(DebugEvent event_type,
- const std::string& event_message) {
- CriticalSectionScoped lock(crit_.get());
- LogDebugEventLocked(event_type, event_message);
-}
-
-void AcmDumpImpl::LogDebugEvent(DebugEvent event_type) {
- CriticalSectionScoped lock(crit_.get());
- LogDebugEventLocked(event_type, "");
-}
-
-void AcmDumpImpl::LogDebugEventLocked(DebugEvent event_type,
- const std::string& event_message) {
- ACMDumpEvent event;
- int64_t timestamp = clock_->TimeInMicroseconds();
- event.set_timestamp_us(timestamp);
- event.set_type(webrtc::ACMDumpEvent::DEBUG_EVENT);
- auto debug_event = event.mutable_debug_event();
- debug_event->set_type(convertDebugEvent(event_type));
- debug_event->set_message(event_message);
- HandleEvent(&event);
-}
-
-void AcmDumpImpl::Clear() {
- if (file_->Open()) {
- file_->CloseFile();
- }
- currently_logging_ = false;
- stream_->Clear();
-}
-
-void AcmDumpImpl::HandleEvent(ACMDumpEvent* event) {
- if (currently_logging_) {
- if (clock_->TimeInMicroseconds() < start_time_us_ + duration_us_) {
- StoreToFile(event);
- } else {
- LogDebugEventLocked(DebugEvent::kLogEnd, "");
- Clear();
- AddRecentEvent(*event);
- }
- } else {
- AddRecentEvent(*event);
- }
-}
-
-void AcmDumpImpl::StoreToFile(ACMDumpEvent* event) {
- // Reuse the same object at every log event.
- if (stream_->stream_size() < 1) {
- stream_->add_stream();
- }
- DCHECK_EQ(stream_->stream_size(), 1);
- stream_->mutable_stream(0)->Swap(event);
-
- std::string dump_buffer;
- stream_->SerializeToString(&dump_buffer);
- file_->Write(dump_buffer.data(), dump_buffer.size());
-}
-
-void AcmDumpImpl::AddRecentEvent(const ACMDumpEvent& event) {
- recent_log_events_.push_back(event);
- while (recent_log_events_.front().timestamp_us() <
- event.timestamp_us() - recent_log_duration_us) {
- recent_log_events_.pop_front();
- }
-}
-
-bool AcmDump::ParseAcmDump(const std::string& file_name,
- ACMDumpEventStream* result) {
- char tmp_buffer[1024];
- int bytes_read = 0;
- rtc::scoped_ptr<FileWrapper> dump_file(FileWrapper::Create());
- if (dump_file->OpenFile(file_name.c_str(), true) != 0) {
- return false;
- }
- std::string dump_buffer;
- while ((bytes_read = dump_file->Read(tmp_buffer, sizeof(tmp_buffer))) > 0) {
- dump_buffer.append(tmp_buffer, bytes_read);
- }
- dump_file->CloseFile();
- return result->ParseFromString(dump_buffer);
-}
-
-#endif // RTC_AUDIOCODING_DEBUG_DUMP
-
-// AcmDump member functions.
-rtc::scoped_ptr<AcmDump> AcmDump::Create() {
- return rtc::scoped_ptr<AcmDump>(new AcmDumpImpl());
-}
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.h
deleted file mode 100644
index c72c3870965..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DUMP_H_
-#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DUMP_H_
-
-#include <string>
-
-#include "webrtc/base/scoped_ptr.h"
-
-namespace webrtc {
-
-// Forward declaration of storage class that is automatically generated from
-// the protobuf file.
-class ACMDumpEventStream;
-
-class AcmDumpImpl;
-
-class AcmDump {
- public:
- // The types of debug events that are currently supported for logging.
- enum class DebugEvent { kLogStart, kLogEnd, kAudioPlayout };
-
- virtual ~AcmDump() {}
-
- static rtc::scoped_ptr<AcmDump> Create();
-
- // Starts logging for the specified duration to the specified file.
- // The logging will stop automatically after the specified duration.
- // If the file already exists it will be overwritten.
- // The function will return false on failure.
- virtual void StartLogging(const std::string& file_name, int duration_ms) = 0;
-
- // Logs an incoming or outgoing RTP packet.
- virtual void LogRtpPacket(bool incoming,
- const uint8_t* packet,
- size_t length) = 0;
-
- // Logs a debug event, with optional message.
- virtual void LogDebugEvent(DebugEvent event_type,
- const std::string& event_message) = 0;
- virtual void LogDebugEvent(DebugEvent event_type) = 0;
-
- // Reads an AcmDump file and returns true when reading was successful.
- // The result is stored in the given ACMDumpEventStream object.
- static bool ParseAcmDump(const std::string& file_name,
- ACMDumpEventStream* result);
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DUMP_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump_unittest.cc
deleted file mode 100644
index 98d0e622a87..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_dump_unittest.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifdef RTC_AUDIOCODING_DEBUG_DUMP
-
-#include <stdio.h>
-#include <string>
-#include <vector>
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/main/acm2/acm_dump.h"
-#include "webrtc/system_wrappers/interface/clock.h"
-#include "webrtc/test/test_suite.h"
-#include "webrtc/test/testsupport/fileutils.h"
-#include "webrtc/test/testsupport/gtest_disable.h"
-
-// Files generated at build-time by the protobuf compiler.
-#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
-#include "external/webrtc/webrtc/modules/audio_coding/dump.pb.h"
-#else
-#include "webrtc/audio_coding/dump.pb.h"
-#endif
-
-namespace webrtc {
-
-// Test for the acm dump class. Dumps some RTP packets to disk, then reads them
-// back to see if they match.
-class AcmDumpTest : public ::testing::Test {
- public:
- void VerifyResults(const ACMDumpEventStream& parsed_stream,
- size_t packet_size) {
- // Verify the result.
- EXPECT_EQ(5, parsed_stream.stream_size());
- const ACMDumpEvent& start_event = parsed_stream.stream(2);
- ASSERT_TRUE(start_event.has_type());
- EXPECT_EQ(ACMDumpEvent::DEBUG_EVENT, start_event.type());
- EXPECT_TRUE(start_event.has_timestamp_us());
- EXPECT_FALSE(start_event.has_packet());
- ASSERT_TRUE(start_event.has_debug_event());
- auto start_debug_event = start_event.debug_event();
- ASSERT_TRUE(start_debug_event.has_type());
- EXPECT_EQ(ACMDumpDebugEvent::LOG_START, start_debug_event.type());
- ASSERT_TRUE(start_debug_event.has_message());
-
- for (int i = 0; i < parsed_stream.stream_size(); i++) {
- if (i == 2) {
- // This is the LOG_START packet that was already verified.
- continue;
- }
- const ACMDumpEvent& test_event = parsed_stream.stream(i);
- ASSERT_TRUE(test_event.has_type());
- EXPECT_EQ(ACMDumpEvent::RTP_EVENT, test_event.type());
- EXPECT_TRUE(test_event.has_timestamp_us());
- EXPECT_FALSE(test_event.has_debug_event());
- ASSERT_TRUE(test_event.has_packet());
- const ACMDumpRTPPacket& test_packet = test_event.packet();
- ASSERT_TRUE(test_packet.has_direction());
- if (i <= 1) {
- EXPECT_EQ(ACMDumpRTPPacket::INCOMING, test_packet.direction());
- } else if (i >= 3) {
- EXPECT_EQ(ACMDumpRTPPacket::OUTGOING, test_packet.direction());
- }
- ASSERT_TRUE(test_packet.has_rtp_data());
- ASSERT_EQ(packet_size, test_packet.rtp_data().size());
- for (size_t i = 0; i < packet_size; i++) {
- EXPECT_EQ(rtp_packet_[i],
- static_cast<uint8_t>(test_packet.rtp_data()[i]));
- }
- }
- }
-
- void Run(int packet_size, int random_seed) {
- rtp_packet_.clear();
- rtp_packet_.reserve(packet_size);
- srand(random_seed);
- // Fill the packet vector with random data.
- for (int i = 0; i < packet_size; i++) {
- rtp_packet_.push_back(rand());
- }
- // Find the name of the current test, in order to use it as a temporary
- // filename.
- auto test_info = ::testing::UnitTest::GetInstance()->current_test_info();
- const std::string temp_filename =
- test::OutputPath() + test_info->test_case_name() + test_info->name();
-
- // When log_dumper goes out of scope, it causes the log file to be flushed
- // to disk.
- {
- rtc::scoped_ptr<AcmDump> log_dumper(AcmDump::Create());
- log_dumper->LogRtpPacket(true, rtp_packet_.data(), rtp_packet_.size());
- log_dumper->LogRtpPacket(true, rtp_packet_.data(), rtp_packet_.size());
- log_dumper->StartLogging(temp_filename, 10000000);
- log_dumper->LogRtpPacket(false, rtp_packet_.data(), rtp_packet_.size());
- log_dumper->LogRtpPacket(false, rtp_packet_.data(), rtp_packet_.size());
- }
-
- // Read the generated file from disk.
- ACMDumpEventStream parsed_stream;
-
- ASSERT_EQ(true, AcmDump::ParseAcmDump(temp_filename, &parsed_stream));
-
- VerifyResults(parsed_stream, packet_size);
-
- // Clean up temporary file - can be pretty slow.
- remove(temp_filename.c_str());
- }
- std::vector<uint8_t> rtp_packet_;
-};
-
-TEST_F(AcmDumpTest, DumpAndRead) {
- Run(256, 321);
-}
-
-} // namespace webrtc
-
-#endif // RTC_AUDIOCODING_DEBUG_DUMP
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
index dc59984a953..e01b2631270 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
@@ -38,9 +38,13 @@ AcmReceiveTest::AcmReceiveTest(PacketSource* packet_source,
}
void AcmReceiveTest::RegisterDefaultCodecs() {
+#ifdef WEBRTC_CODEC_OPUS
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kOpus, 120));
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISAC, 103));
-#ifndef WEBRTC_ANDROID
+#endif
+#ifdef WEBRTC_CODEC_ISAC
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACSWB, 104));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kISACFB, 105));
#endif
@@ -56,13 +60,19 @@ void AcmReceiveTest::RegisterDefaultCodecs() {
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMA, 8));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMU_2ch, 110));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kPCMA_2ch, 118));
+#ifdef WEBRTC_CODEC_ILBC
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kILBC, 102));
+#endif
+#ifdef WEBRTC_CODEC_G722
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kG722, 9));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kG722_2ch, 119));
+#endif
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNNB, 13));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNWB, 98));
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kCNSWB, 99));
+#ifdef WEBRTC_CODEC_RED
ASSERT_TRUE(acm_->RegisterReceiveCodec(acm2::ACMCodecDB::kRED, 127));
+#endif
}
void AcmReceiveTest::RegisterNetEqTestCodecs() {
@@ -93,7 +103,8 @@ void AcmReceiveTest::Run() {
AudioFrame output_frame;
EXPECT_TRUE(acm_->Get10MsAudio(&output_frame));
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
- const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ const size_t samples_per_block =
+ static_cast<size_t>(output_freq_hz_ * 10 / 1000);
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
if (expected_output_channels_ != kArbitraryChannels) {
if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h
index a1e01423bcb..80e9fbede65 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h
@@ -56,7 +56,7 @@ class AcmReceiveTest {
const int output_freq_hz_;
NumOutputChannels expected_output_channels_;
- DISALLOW_COPY_AND_ASSIGN(AcmReceiveTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTest);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
index 96a1fc5fdcb..2a0bbe15d4b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.cc
@@ -143,6 +143,15 @@ void AcmReceiveTestOldApi::RegisterNetEqTestCodecs() {
}
}
+int AcmReceiveTestOldApi::RegisterExternalReceiveCodec(
+ int rtp_payload_type,
+ AudioDecoder* external_decoder,
+ int sample_rate_hz,
+ int num_channels) {
+ return acm_->RegisterExternalReceiveCodec(rtp_payload_type, external_decoder,
+ sample_rate_hz, num_channels);
+}
+
void AcmReceiveTestOldApi::Run() {
for (rtc::scoped_ptr<Packet> packet(packet_source_->NextPacket()); packet;
packet.reset(packet_source_->NextPacket())) {
@@ -151,7 +160,8 @@ void AcmReceiveTestOldApi::Run() {
AudioFrame output_frame;
EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
- const int samples_per_block = output_freq_hz_ * 10 / 1000;
+ const size_t samples_per_block =
+ static_cast<size_t>(output_freq_hz_ * 10 / 1000);
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
if (exptected_output_channels_ != kArbitraryChannels) {
if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
index 5e5ff9a0a0e..6c1209739d1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receive_test_oldapi.h
@@ -17,6 +17,7 @@
namespace webrtc {
class AudioCodingModule;
+class AudioDecoder;
struct CodecInst;
namespace test {
@@ -44,6 +45,11 @@ class AcmReceiveTestOldApi {
// files.
void RegisterNetEqTestCodecs();
+ int RegisterExternalReceiveCodec(int rtp_payload_type,
+ AudioDecoder* external_decoder,
+ int sample_rate_hz,
+ int num_channels);
+
// Runs the test and returns true if successful.
void Run();
@@ -58,7 +64,7 @@ class AcmReceiveTestOldApi {
int output_freq_hz_;
NumOutputChannels exptected_output_channels_;
- DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
};
// This test toggles the output frequency every |toggle_period_ms|. The test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
index ae5a04f25e0..bfcf76ce724 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
@@ -15,6 +15,7 @@
#include <algorithm> // sort
#include <vector>
+#include "webrtc/base/checks.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
@@ -212,51 +213,6 @@ int AcmReceiver::current_sample_rate_hz() const {
return current_sample_rate_hz_;
}
-// TODO(turajs): use one set of enumerators, e.g. the one defined in
-// common_types.h
-// TODO(henrik.lundin): This method is not used any longer. The call hierarchy
-// stops in voe::Channel::SetNetEQPlayoutMode(). Remove it.
-void AcmReceiver::SetPlayoutMode(AudioPlayoutMode mode) {
- enum NetEqPlayoutMode playout_mode = kPlayoutOn;
- switch (mode) {
- case voice:
- playout_mode = kPlayoutOn;
- break;
- case fax: // No change to background noise mode.
- playout_mode = kPlayoutFax;
- break;
- case streaming:
- playout_mode = kPlayoutStreaming;
- break;
- case off:
- playout_mode = kPlayoutOff;
- break;
- }
- neteq_->SetPlayoutMode(playout_mode);
-}
-
-AudioPlayoutMode AcmReceiver::PlayoutMode() const {
- AudioPlayoutMode acm_mode = voice;
- NetEqPlayoutMode mode = neteq_->PlayoutMode();
- switch (mode) {
- case kPlayoutOn:
- acm_mode = voice;
- break;
- case kPlayoutOff:
- acm_mode = off;
- break;
- case kPlayoutFax:
- acm_mode = fax;
- break;
- case kPlayoutStreaming:
- acm_mode = streaming;
- break;
- default:
- assert(false);
- }
- return acm_mode;
-}
-
int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
const uint8_t* incoming_payload,
size_t length_payload) {
@@ -344,7 +300,7 @@ int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
enum NetEqOutputType type;
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
bool return_silence = false;
@@ -394,7 +350,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
}
// NetEq always returns 10 ms of audio.
- current_sample_rate_hz_ = samples_per_channel * 100;
+ current_sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
// Update if resampling is required.
bool need_resampling = (desired_freq_hz != -1) &&
@@ -403,18 +359,19 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
if (need_resampling && !resampled_last_output_frame_) {
// Prime the resampler with the last frame.
int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
- samples_per_channel =
+ int samples_per_channel_int =
resampler_.Resample10Msec(last_audio_buffer_.get(),
current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
temp_output);
- if (samples_per_channel < 0) {
+ if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - "
"Resampling last_audio_buffer_ failed.";
return -1;
}
+ samples_per_channel = static_cast<size_t>(samples_per_channel_int);
}
// The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
@@ -422,17 +379,18 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
// TODO(henrik.lundin) Glitches in the output may appear if the output rate
// from NetEq changes. See WebRTC issue 3923.
if (need_resampling) {
- samples_per_channel =
+ int samples_per_channel_int =
resampler_.Resample10Msec(audio_buffer_.get(),
current_sample_rate_hz_,
desired_freq_hz,
num_channels,
AudioFrame::kMaxDataSizeSamples,
audio_frame->data_);
- if (samples_per_channel < 0) {
+ if (samples_per_channel_int < 0) {
LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
return -1;
}
+ samples_per_channel = static_cast<size_t>(samples_per_channel_int);
resampled_last_output_frame_ = true;
} else {
resampled_last_output_frame_ = false;
@@ -448,7 +406,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
audio_frame->num_channels_ = num_channels;
audio_frame->samples_per_channel_ = samples_per_channel;
- audio_frame->sample_rate_hz_ = samples_per_channel * 100;
+ audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
// Should set |vad_activity| before calling SetAudioFrameActivityAndType().
audio_frame->vad_activity_ = previous_audio_activity_;
@@ -476,8 +434,10 @@ int32_t AcmReceiver::AddCodec(int acm_codec_id,
int channels,
int sample_rate_hz,
AudioDecoder* audio_decoder) {
- assert(acm_codec_id >= 0);
- NetEqDecoder neteq_decoder = ACMCodecDB::neteq_decoders_[acm_codec_id];
+ assert(acm_codec_id >= -1); // -1 means external decoder
+ NetEqDecoder neteq_decoder = (acm_codec_id == -1)
+ ? kDecoderArbitrary
+ : ACMCodecDB::neteq_decoders_[acm_codec_id];
// Make sure the right decoder is registered for Opus.
if (neteq_decoder == kDecoderOpus && channels == 2) {
@@ -491,14 +451,15 @@ int32_t AcmReceiver::AddCodec(int acm_codec_id,
auto it = decoders_.find(payload_type);
if (it != decoders_.end()) {
const Decoder& decoder = it->second;
- if (decoder.acm_codec_id == acm_codec_id && decoder.channels == channels &&
+ if (acm_codec_id != -1 && decoder.acm_codec_id == acm_codec_id &&
+ decoder.channels == channels &&
decoder.sample_rate_hz == sample_rate_hz) {
// Re-registering the same codec. Do nothing and return.
return 0;
}
- // Changing codec or number of channels. First unregister the old codec,
- // then register the new one.
+ // Changing codec. First unregister the old codec, then register the new
+ // one.
if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
LOG(LERROR) << "Cannot remove payload " << static_cast<int>(payload_type);
return -1;
@@ -647,31 +608,10 @@ void AcmReceiver::GetNetworkStatistics(NetworkStatistics* acm_stat) {
acm_stat->currentSecondaryDecodedRate = neteq_stat.secondary_decoded_rate;
acm_stat->clockDriftPPM = neteq_stat.clockdrift_ppm;
acm_stat->addedSamples = neteq_stat.added_zero_samples;
-
- std::vector<int> waiting_times;
- neteq_->WaitingTimes(&waiting_times);
- size_t size = waiting_times.size();
- if (size == 0) {
- acm_stat->meanWaitingTimeMs = -1;
- acm_stat->medianWaitingTimeMs = -1;
- acm_stat->minWaitingTimeMs = -1;
- acm_stat->maxWaitingTimeMs = -1;
- } else {
- std::sort(waiting_times.begin(), waiting_times.end());
- if ((size & 0x1) == 0) {
- acm_stat->medianWaitingTimeMs = (waiting_times[size / 2 - 1] +
- waiting_times[size / 2]) / 2;
- } else {
- acm_stat->medianWaitingTimeMs = waiting_times[size / 2];
- }
- acm_stat->minWaitingTimeMs = waiting_times.front();
- acm_stat->maxWaitingTimeMs = waiting_times.back();
- double sum = 0;
- for (size_t i = 0; i < size; ++i) {
- sum += waiting_times[i];
- }
- acm_stat->meanWaitingTimeMs = static_cast<int>(sum / size);
- }
+ acm_stat->meanWaitingTimeMs = neteq_stat.mean_waiting_time_ms;
+ acm_stat->medianWaitingTimeMs = neteq_stat.median_waiting_time_ms;
+ acm_stat->minWaitingTimeMs = neteq_stat.min_waiting_time_ms;
+ acm_stat->maxWaitingTimeMs = neteq_stat.max_waiting_time_ms;
}
int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
@@ -784,10 +724,11 @@ bool AcmReceiver::GetSilence(int desired_sample_rate_hz, AudioFrame* frame) {
frame->sample_rate_hz_ = current_sample_rate_hz_;
}
- frame->samples_per_channel_ = frame->sample_rate_hz_ / 100; // Always 10 ms.
+ frame->samples_per_channel_ =
+ static_cast<size_t>(frame->sample_rate_hz_ / 100); // Always 10 ms.
frame->speech_type_ = AudioFrame::kCNG;
frame->vad_activity_ = AudioFrame::kVadPassive;
- int samples = frame->samples_per_channel_ * frame->num_channels_;
+ size_t samples = frame->samples_per_channel_ * frame->num_channels_;
memset(frame->data_, 0, samples * sizeof(int16_t));
return true;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
index 46207fd4492..d726264bf99 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
@@ -93,21 +93,24 @@ class AcmReceiver {
// Adds a new codec to the NetEq codec database.
//
// Input:
- // - acm_codec_id : ACM codec ID.
+ // - acm_codec_id : ACM codec ID; -1 means external decoder.
// - payload_type : payload type.
// - sample_rate_hz : sample rate.
- // - audio_decoder : pointer to a decoder object. If it is NULL
- // then NetEq will internally create the decoder
- // object. Otherwise, NetEq will store this pointer
- // as the decoder corresponding with the given
- // payload type. NetEq won't acquire the ownership
- // of this pointer. It is up to the client of this
- // class (ACM) to delete it. By providing
- // |audio_decoder| ACM will have control over the
- // decoder instance of the codec. This is essential
- // for a codec like iSAC which encoder/decoder
- // encoder has to know about decoder (bandwidth
- // estimator that is updated at decoding time).
+ // - audio_decoder : pointer to a decoder object. If it's null, then
+ // NetEq will internally create a decoder object
+ // based on the value of |acm_codec_id| (which
+ // mustn't be -1). Otherwise, NetEq will use the
+ // given decoder for the given payload type. NetEq
+ // won't take ownership of the decoder; it's up to
+ // the caller to delete it when it's no longer
+ // needed.
+ //
+ // Providing an existing decoder object here is
+ // necessary for external decoders, but may also be
+ // used for built-in decoders if NetEq doesn't have
+ // all the info it needs to construct them properly
+ // (e.g. iSAC, where the decoder needs to be paired
+ // with an encoder).
//
// Return value : 0 if OK.
// <0 if NetEq returned an error.
@@ -175,21 +178,6 @@ class AcmReceiver {
int current_sample_rate_hz() const;
//
- // Sets the playout mode.
- //
- // Input:
- // - mode : an enumerator specifying the playout mode.
- //
- void SetPlayoutMode(AudioPlayoutMode mode);
-
- //
- // Get the current playout mode.
- //
- // Return value : The current playout mode.
- //
- AudioPlayoutMode PlayoutMode() const;
-
- //
// Get the current network statistics from NetEq.
//
// Output:
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
index 6234d4f501c..ff9c91115a5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
@@ -265,21 +265,6 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(SampleRate)) {
}
}
-// Verify that the playout mode is set correctly.
-TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(PlayoutMode)) {
- receiver_->SetPlayoutMode(voice);
- EXPECT_EQ(voice, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(streaming);
- EXPECT_EQ(streaming, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(fax);
- EXPECT_EQ(fax, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(off);
- EXPECT_EQ(off, receiver_->PlayoutMode());
-}
-
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(PostdecodingVad)) {
receiver_->EnableVad();
EXPECT_TRUE(receiver_->vad_enabled());
@@ -308,7 +293,13 @@ TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(PostdecodingVad)) {
EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
}
-TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(IF_ISAC_FLOAT(LastAudioCodec))) {
const int kCodecId[] = {
ACMCodecDB::kISAC, ACMCodecDB::kPCMA, ACMCodecDB::kISACSWB,
ACMCodecDB::kPCM16Bswb32kHz,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
index 5800fb7367f..c30eaf04323 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
@@ -264,21 +264,6 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(SampleRate)) {
}
}
-// Verify that the playout mode is set correctly.
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PlayoutMode)) {
- receiver_->SetPlayoutMode(voice);
- EXPECT_EQ(voice, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(streaming);
- EXPECT_EQ(streaming, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(fax);
- EXPECT_EQ(fax, receiver_->PlayoutMode());
-
- receiver_->SetPlayoutMode(off);
- EXPECT_EQ(off, receiver_->PlayoutMode());
-}
-
TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
receiver_->EnableVad();
EXPECT_TRUE(receiver_->vad_enabled());
@@ -307,7 +292,14 @@ TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
}
-TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(LastAudioCodec)) {
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmReceiverTestOldApi,
+ DISABLED_ON_ANDROID(IF_ISAC_FLOAT(LastAudioCodec))) {
const int kCodecId[] = {
ACMCodecDB::kISAC, ACMCodecDB::kPCMA, ACMCodecDB::kISACSWB,
ACMCodecDB::kPCM16Bswb32kHz,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
index 97d87b1b3a4..2650725331b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
@@ -29,9 +29,9 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
int in_freq_hz,
int out_freq_hz,
int num_audio_channels,
- int out_capacity_samples,
+ size_t out_capacity_samples,
int16_t* out_audio) {
- int in_length = in_freq_hz * num_audio_channels / 100;
+ size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
int out_length = out_freq_hz * num_audio_channels / 100;
if (in_freq_hz == out_freq_hz) {
if (out_capacity_samples < in_length) {
@@ -39,7 +39,7 @@ int ACMResampler::Resample10Msec(const int16_t* in_audio,
return -1;
}
memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
- return in_length / num_audio_channels;
+ return static_cast<int>(in_length / num_audio_channels);
}
if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
index a8fc6b6f26a..a19b0c45694 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
@@ -26,7 +26,7 @@ class ACMResampler {
int in_freq_hz,
int out_freq_hz,
int num_audio_channels,
- int out_capacity_samples,
+ size_t out_capacity_samples,
int16_t* out_audio);
private:
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
index b96db6b8b12..b05968645cc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
@@ -29,7 +29,8 @@ AcmSendTest::AcmSendTest(InputAudioFile* audio_source,
: clock_(0),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
- input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+ input_block_size_samples_(
+ static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
codec_registered_(false),
test_duration_ms_(test_duration_ms),
frame_type_(kAudioFrameSpeech),
@@ -70,7 +71,8 @@ Packet* AcmSendTest::NextPacket() {
// Insert audio and process until one packet is produced.
while (clock_.TimeInMilliseconds() < test_duration_ms_) {
clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
- CHECK(audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+ RTC_CHECK(
+ audio_source_->Read(input_block_size_samples_, input_frame_.data_));
if (input_frame_.num_channels_ > 1) {
InputAudioFile::DuplicateInterleaved(input_frame_.data_,
input_block_size_samples_,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
index 4c4db5bd13b..b0d26ba63b2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
@@ -63,7 +63,7 @@ class AcmSendTest : public AudioPacketizationCallback, public PacketSource {
rtc::scoped_ptr<AudioCoding> acm_;
InputAudioFile* audio_source_;
int source_rate_hz_;
- const int input_block_size_samples_;
+ const size_t input_block_size_samples_;
AudioFrame input_frame_;
bool codec_registered_;
int test_duration_ms_;
@@ -74,7 +74,7 @@ class AcmSendTest : public AudioPacketizationCallback, public PacketSource {
uint16_t sequence_number_;
std::vector<uint8_t> last_payload_vec_;
- DISALLOW_COPY_AND_ASSIGN(AcmSendTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AcmSendTest);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
index 1819d59d96d..7e2a3c6b6e7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.cc
@@ -31,7 +31,8 @@ AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
audio_source_(audio_source),
source_rate_hz_(source_rate_hz),
- input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
+ input_block_size_samples_(
+ static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
codec_registered_(false),
test_duration_ms_(test_duration_ms),
frame_type_(kAudioFrameSpeech),
@@ -52,8 +53,8 @@ bool AcmSendTestOldApi::RegisterCodec(const char* payload_name,
int payload_type,
int frame_size_samples) {
CodecInst codec;
- CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec, sampling_freq_hz,
- channels));
+ RTC_CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec,
+ sampling_freq_hz, channels));
codec.pltype = payload_type;
codec.pacsize = frame_size_samples;
codec_registered_ = (acm_->RegisterSendCodec(codec) == 0);
@@ -64,7 +65,7 @@ bool AcmSendTestOldApi::RegisterCodec(const char* payload_name,
}
bool AcmSendTestOldApi::RegisterExternalCodec(
- AudioEncoderMutable* external_speech_encoder) {
+ AudioEncoder* external_speech_encoder) {
acm_->RegisterExternalSendCodec(external_speech_encoder);
input_frame_.num_channels_ = external_speech_encoder->NumChannels();
assert(input_block_size_samples_ * input_frame_.num_channels_ <=
@@ -83,7 +84,8 @@ Packet* AcmSendTestOldApi::NextPacket() {
// Insert audio and process until one packet is produced.
while (clock_.TimeInMilliseconds() < test_duration_ms_) {
clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
- CHECK(audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+ RTC_CHECK(
+ audio_source_->Read(input_block_size_samples_, input_frame_.data_));
if (input_frame_.num_channels_ > 1) {
InputAudioFile::DuplicateInterleaved(input_frame_.data_,
input_block_size_samples_,
@@ -91,7 +93,7 @@ Packet* AcmSendTestOldApi::NextPacket() {
input_frame_.data_);
}
data_to_send_ = false;
- CHECK_GE(acm_->Add10MsData(input_frame_), 0);
+ RTC_CHECK_GE(acm_->Add10MsData(input_frame_), 0);
input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
if (data_to_send_) {
// Encoded packet received.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
index 8cdc2989831..7d17cb117d4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h
@@ -20,7 +20,7 @@
#include "webrtc/system_wrappers/interface/clock.h"
namespace webrtc {
-class AudioEncoderMutable;
+class AudioEncoder;
namespace test {
class InputAudioFile;
@@ -42,7 +42,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
int frame_size_samples);
// Registers an external send codec. Returns true on success, false otherwise.
- bool RegisterExternalCodec(AudioEncoderMutable* external_speech_encoder);
+ bool RegisterExternalCodec(AudioEncoder* external_speech_encoder);
// Returns the next encoded packet. Returns NULL if the test duration was
// exceeded. Ownership of the packet is handed over to the caller.
@@ -71,7 +71,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
rtc::scoped_ptr<AudioCodingModule> acm_;
InputAudioFile* audio_source_;
int source_rate_hz_;
- const int input_block_size_samples_;
+ const size_t input_block_size_samples_;
AudioFrame input_frame_;
bool codec_registered_;
int test_duration_ms_;
@@ -83,7 +83,7 @@ class AcmSendTestOldApi : public AudioPacketizationCallback,
std::vector<uint8_t> last_payload_vec_;
bool data_to_send_;
- DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc
index 51b9a788961..f502eb3f8c0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.cc
@@ -105,7 +105,6 @@ AudioCoding::Config::Config()
clock(Clock::GetRealTimeClock()),
transport(nullptr),
vad_callback(nullptr),
- play_dtmf(true),
initial_playout_delay_ms(0),
playout_channels(1),
playout_frequency_hz(32000) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index 9c3183271b7..c5d9d3acc75 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -32,29 +32,6 @@ namespace webrtc {
namespace acm2 {
-enum {
- kACMToneEnd = 999
-};
-
-// Maximum number of bytes in one packet (PCM16B, 20 ms packets, stereo).
-enum {
- kMaxPacketSize = 2560
-};
-
-// Maximum number of payloads that can be packed in one RED packet. For
-// regular RED, we only pack two payloads. In case of dual-streaming, in worst
-// case we might pack 3 payloads in one RED packet.
-enum {
- kNumRedFragmentationVectors = 2,
- kMaxNumFragmentationVectors = 3
-};
-
-// If packet N is arrived all packets prior to N - |kNackThresholdPackets| which
-// are not received are considered as lost, and appear in NACK list.
-enum {
- kNackThresholdPackets = 2
-};
-
namespace {
// TODO(turajs): the same functionality is used in NetEq. If both classes
@@ -76,22 +53,24 @@ bool IsCodecCN(int index) {
}
// Stereo-to-mono can be used as in-place.
-int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int DownMix(const AudioFrame& frame,
+ size_t length_out_buff,
+ int16_t* out_buff) {
if (length_out_buff < frame.samples_per_channel_) {
return -1;
}
- for (int n = 0; n < frame.samples_per_channel_; ++n)
+ for (size_t n = 0; n < frame.samples_per_channel_; ++n)
out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
return 0;
}
// Mono-to-stereo can be used as in-place.
-int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
if (length_out_buff < frame.samples_per_channel_) {
return -1;
}
- for (int n = frame.samples_per_channel_; n > 0; --n) {
- int i = n - 1;
+ for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+ size_t i = n - 1;
int16_t sample = frame.data_[i];
out_buff[2 * i + 1] = sample;
out_buff[2 * i] = sample;
@@ -139,7 +118,6 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
receiver_(config),
bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
previous_pltype_(255),
- aux_rtp_header_(NULL),
receiver_initialized_(false),
first_10ms_data_(false),
first_frame_(true),
@@ -153,23 +131,9 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(
WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
}
-AudioCodingModuleImpl::~AudioCodingModuleImpl() {
- if (aux_rtp_header_ != NULL) {
- delete aux_rtp_header_;
- aux_rtp_header_ = NULL;
- }
-
- delete callback_crit_sect_;
- callback_crit_sect_ = NULL;
-
- delete acm_crit_sect_;
- acm_crit_sect_ = NULL;
- WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_,
- "Destroyed");
-}
+AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
- uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE]; // Make room for 1 RED payload.
AudioEncoder::EncodedInfo encoded_info;
uint8_t previous_pltype;
@@ -191,11 +155,13 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
last_rtp_timestamp_ = rtp_timestamp;
first_frame_ = false;
- encoded_info = audio_encoder->Encode(rtp_timestamp, input_data.audio,
- input_data.length_per_channel,
- sizeof(stream), stream);
+ encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
+ encoded_info = audio_encoder->Encode(
+ rtp_timestamp, input_data.audio, input_data.length_per_channel,
+ encode_buffer_.size(), encode_buffer_.data());
+ encode_buffer_.SetSize(encoded_info.encoded_bytes);
bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
- if (encoded_info.encoded_bytes == 0 && !encoded_info.send_even_if_empty) {
+ if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
// Not enough data.
return 0;
}
@@ -204,20 +170,20 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
RTPFragmentationHeader my_fragmentation;
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
FrameType frame_type;
- if (encoded_info.encoded_bytes == 0 && encoded_info.send_even_if_empty) {
+ if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
frame_type = kFrameEmpty;
encoded_info.payload_type = previous_pltype;
} else {
- DCHECK_GT(encoded_info.encoded_bytes, 0u);
+ RTC_DCHECK_GT(encode_buffer_.size(), 0u);
frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
}
{
- CriticalSectionScoped lock(callback_crit_sect_);
+ CriticalSectionScoped lock(callback_crit_sect_.get());
if (packetization_callback_) {
packetization_callback_->SendData(
frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
- stream, encoded_info.encoded_bytes,
+ encode_buffer_.data(), encode_buffer_.size(),
my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
: nullptr);
}
@@ -228,37 +194,28 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
}
}
previous_pltype_ = encoded_info.payload_type;
- return static_cast<int32_t>(encoded_info.encoded_bytes);
+ return static_cast<int32_t>(encode_buffer_.size());
}
/////////////////////////////////////////
// Sender
//
-// TODO(henrik.lundin): Remove this method; only used in tests.
-int AudioCodingModuleImpl::ResetEncoder() {
- CriticalSectionScoped lock(acm_crit_sect_);
- if (!HaveValidEncoder("ResetEncoder")) {
- return -1;
- }
- return 0;
-}
-
// Can be called multiple times for Codec, CNG, RED.
int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.RegisterEncoder(send_codec);
}
void AudioCodingModuleImpl::RegisterExternalSendCodec(
- AudioEncoderMutable* external_speech_encoder) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ AudioEncoder* external_speech_encoder) {
+ CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.RegisterEncoder(external_speech_encoder);
}
// Get current send codec.
int AudioCodingModuleImpl::SendCodec(CodecInst* current_codec) const {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.GetCodecInst(current_codec);
}
@@ -266,7 +223,7 @@ int AudioCodingModuleImpl::SendCodec(CodecInst* current_codec) const {
int AudioCodingModuleImpl::SendFrequency() const {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"SendFrequency()");
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (!codec_manager_.CurrentEncoder()) {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
@@ -277,53 +234,18 @@ int AudioCodingModuleImpl::SendFrequency() const {
return codec_manager_.CurrentEncoder()->SampleRateHz();
}
-// Get encode bitrate.
-// Adaptive rate codecs return their current encode target rate, while other
-// codecs return there longterm avarage or their fixed rate.
-// TODO(henrik.lundin): Remove; not used.
-int AudioCodingModuleImpl::SendBitrate() const {
- FATAL() << "Deprecated";
- // This return statement is required to workaround a bug in VS2013 Update 4
- // when turning on the whole program optimizations. Without hit the linker
- // will hang because it doesn't seem to find an exit path for this function.
- // This is likely a bug in link.exe and would probably be fixed in VS2015.
- return -1;
- // CriticalSectionScoped lock(acm_crit_sect_);
- //
- // if (!codec_manager_.current_encoder()) {
- // WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
- // "SendBitrate Failed, no codec is registered");
- // return -1;
- // }
- //
- // WebRtcACMCodecParams encoder_param;
- // codec_manager_.current_encoder()->EncoderParams(&encoder_param);
- //
- // return encoder_param.codec_inst.rate;
-}
-
void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (codec_manager_.CurrentEncoder()) {
codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps);
}
}
-// Set available bandwidth, inform the encoder about the estimated bandwidth
-// received from the remote party.
-// TODO(henrik.lundin): Remove; not used.
-int AudioCodingModuleImpl::SetReceivedEstimatedBandwidth(int bw) {
- CriticalSectionScoped lock(acm_crit_sect_);
- FATAL() << "Dead code?";
- return -1;
-// return codecs_[current_send_codec_idx_]->SetEstimatedBandwidth(bw);
-}
-
// Register a transport callback which will be called to deliver
// the encoded buffers.
int AudioCodingModuleImpl::RegisterTransportCallback(
AudioPacketizationCallback* transport) {
- CriticalSectionScoped lock(callback_crit_sect_);
+ CriticalSectionScoped lock(callback_crit_sect_.get());
packetization_callback_ = transport;
return 0;
}
@@ -331,18 +253,17 @@ int AudioCodingModuleImpl::RegisterTransportCallback(
// Add 10MS of raw (PCM) audio data to the encoder.
int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
InputData input_data;
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
int r = Add10MsDataInternal(audio_frame, &input_data);
return r < 0 ? r : Encode(input_data);
}
int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
InputData* input_data) {
- if (audio_frame.samples_per_channel_ <= 0) {
+ if (audio_frame.samples_per_channel_ == 0) {
assert(false);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
- "Cannot Add 10 ms audio, payload length is negative or "
- "zero");
+ "Cannot Add 10 ms audio, payload length is zero");
return -1;
}
@@ -354,7 +275,7 @@ int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
}
// If the length and frequency matches. We currently just support raw PCM.
- if ((audio_frame.sample_rate_hz_ / 100) !=
+ if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
audio_frame.samples_per_channel_) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot Add 10 ms audio, input frequency and length doesn't"
@@ -477,17 +398,19 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
// The result of the resampler is written to output frame.
dest_ptr_audio = preprocess_frame_.data_;
- preprocess_frame_.samples_per_channel_ = resampler_.Resample10Msec(
+ int samples_per_channel = resampler_.Resample10Msec(
src_ptr_audio, in_frame.sample_rate_hz_,
codec_manager_.CurrentEncoder()->SampleRateHz(),
preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
dest_ptr_audio);
- if (preprocess_frame_.samples_per_channel_ < 0) {
+ if (samples_per_channel < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
"Cannot add 10 ms audio, resampling failed");
return -1;
}
+ preprocess_frame_.samples_per_channel_ =
+ static_cast<size_t>(samples_per_channel);
preprocess_frame_.sample_rate_hz_ =
codec_manager_.CurrentEncoder()->SampleRateHz();
}
@@ -504,7 +427,7 @@ int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
//
bool AudioCodingModuleImpl::REDStatus() const {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.red_enabled();
}
@@ -512,7 +435,7 @@ bool AudioCodingModuleImpl::REDStatus() const {
int AudioCodingModuleImpl::SetREDStatus(
#ifdef WEBRTC_CODEC_RED
bool enable_red) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
#else
bool /* enable_red */) {
@@ -527,20 +450,20 @@ int AudioCodingModuleImpl::SetREDStatus(
//
bool AudioCodingModuleImpl::CodecFEC() const {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.codec_fec_enabled();
}
int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetCodecFEC(enable_codec_fec);
}
int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (HaveValidEncoder("SetPacketLossRate")) {
- codec_manager_.CurrentSpeechEncoder()->SetProjectedPacketLossRate(
- loss_rate / 100.0);
+ codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate /
+ 100.0);
}
return 0;
}
@@ -552,15 +475,15 @@ int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
bool enable_vad,
ACMVADMode mode) {
// Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
- DCHECK_EQ(enable_dtx, enable_vad);
- CriticalSectionScoped lock(acm_crit_sect_);
+ RTC_DCHECK_EQ(enable_dtx, enable_vad);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return codec_manager_.SetVAD(enable_dtx, mode);
}
// Get VAD/DTX settings.
int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
ACMVADMode* mode) const {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
codec_manager_.VAD(dtx_enabled, vad_enabled, mode);
return 0;
}
@@ -570,7 +493,7 @@ int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
//
int AudioCodingModuleImpl::InitializeReceiver() {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return InitializeReceiverSafe();
}
@@ -605,21 +528,12 @@ int AudioCodingModuleImpl::InitializeReceiverSafe() {
return 0;
}
-// TODO(turajs): If NetEq opens an API for reseting the state of decoders then
-// implement this method. Otherwise it should be removed. I might be that by
-// removing and registering a decoder we can achieve the effect of resetting.
-// Reset the decoder state.
-// TODO(henrik.lundin): Remove; only used in one test, and does nothing.
-int AudioCodingModuleImpl::ResetDecoder() {
- return 0;
-}
-
// Get current receive frequency.
int AudioCodingModuleImpl::ReceiveFrequency() const {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"ReceiveFrequency()");
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
int codec_id = receiver_.last_audio_codec_id();
@@ -632,7 +546,7 @@ int AudioCodingModuleImpl::PlayoutFrequency() const {
WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
"PlayoutFrequency()");
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return receiver_.current_sample_rate_hz();
}
@@ -640,8 +554,8 @@ int AudioCodingModuleImpl::PlayoutFrequency() const {
// Register possible receive codecs, can be called multiple times,
// for codecs, CNG (NB, WB and SWB), DTMF, RED.
int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
- CriticalSectionScoped lock(acm_crit_sect_);
- DCHECK(receiver_initialized_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
+ RTC_DCHECK(receiver_initialized_);
if (codec.channels > 2 || codec.channels < 0) {
LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
return -1;
@@ -667,9 +581,32 @@ int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
codec_manager_.GetAudioDecoder(codec));
}
+int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
+ int rtp_payload_type,
+ AudioDecoder* external_decoder,
+ int sample_rate_hz,
+ int num_channels) {
+ CriticalSectionScoped lock(acm_crit_sect_.get());
+ RTC_DCHECK(receiver_initialized_);
+ if (num_channels > 2 || num_channels < 0) {
+ LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels;
+ return -1;
+ }
+
+ // Check if the payload-type is valid.
+ if (!ACMCodecDB::ValidPayloadType(rtp_payload_type)) {
+ LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
+ << " for external decoder.";
+ return -1;
+ }
+
+ return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels,
+ sample_rate_hz, external_decoder);
+}
+
// Get current received codec.
int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
return receiver_.LastAudioCodec(current_codec);
}
@@ -699,33 +636,6 @@ int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
return receiver_.SetMaximumDelay(time_ms);
}
-// Estimate the Bandwidth based on the incoming stream, needed for one way
-// audio where the RTCP send the BW estimate.
-// This is also done in the RTP module.
-int AudioCodingModuleImpl::DecoderEstimatedBandwidth() const {
- // We can estimate far-end to near-end bandwidth if the iSAC are sent. Check
- // if the last received packets were iSAC packet then retrieve the bandwidth.
- int last_audio_codec_id = receiver_.last_audio_codec_id();
- if (last_audio_codec_id >= 0 &&
- STR_CASE_CMP("ISAC", ACMCodecDB::database_[last_audio_codec_id].plname)) {
- CriticalSectionScoped lock(acm_crit_sect_);
- FATAL() << "Dead code?";
-// return codecs_[last_audio_codec_id]->GetEstimatedBandwidth();
- }
- return -1;
-}
-
-// Set playout mode for: voice, fax, streaming or off.
-int AudioCodingModuleImpl::SetPlayoutMode(AudioPlayoutMode mode) {
- receiver_.SetPlayoutMode(mode);
- return 0; // TODO(turajs): return value is for backward compatibility.
-}
-
-// Get playout mode voice, fax, streaming or off.
-AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const {
- return receiver_.PlayoutMode();
-}
-
// Get 10 milliseconds of raw audio data to play out.
// Automatic resample to the requested frequency.
int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
@@ -736,7 +646,6 @@ int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
"PlayoutData failed, RecOut Failed");
return -1;
}
-
audio_frame->id_ = id_;
return 0;
}
@@ -755,22 +664,24 @@ int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
"RegisterVADCallback()");
- CriticalSectionScoped lock(callback_crit_sect_);
+ CriticalSectionScoped lock(callback_crit_sect_.get());
vad_callback_ = vad_callback;
return 0;
}
-// TODO(tlegrand): Modify this function to work for stereo, and add tests.
+// TODO(kwiberg): Remove this method, and have callers call IncomingPacket
+// instead. The translation logic and state belong with them, not with
+// AudioCodingModuleImpl.
int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
size_t payload_length,
uint8_t payload_type,
uint32_t timestamp) {
// We are not acquiring any lock when interacting with |aux_rtp_header_| no
// other method uses this member variable.
- if (aux_rtp_header_ == NULL) {
+ if (!aux_rtp_header_) {
// This is the first time that we are using |dummy_rtp_header_|
// so we have to create it.
- aux_rtp_header_ = new WebRtcRTPHeader;
+ aux_rtp_header_.reset(new WebRtcRTPHeader);
aux_rtp_header_->header.payloadType = payload_type;
// Don't matter in this case.
aux_rtp_header_->header.ssrc = 0;
@@ -787,124 +698,58 @@ int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
return 0;
}
-int AudioCodingModuleImpl::ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx) {
- CriticalSectionScoped lock(acm_crit_sect_);
-
- if (!HaveValidEncoder("ReplaceInternalDTXWithWebRtc")) {
- WEBRTC_TRACE(
- webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
- "Cannot replace codec internal DTX when no send codec is registered.");
- return -1;
- }
-
- FATAL() << "Dead code?";
-// int res = codecs_[current_send_codec_idx_]->ReplaceInternalDTX(
-// use_webrtc_dtx);
- // Check if VAD is turned on, or if there is any error.
-// if (res == 1) {
-// vad_enabled_ = true;
-// } else if (res < 0) {
-// WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
-// "Failed to set ReplaceInternalDTXWithWebRtc(%d)",
-// use_webrtc_dtx);
-// return res;
-// }
-
- return 0;
-}
-
-int AudioCodingModuleImpl::IsInternalDTXReplacedWithWebRtc(
- bool* uses_webrtc_dtx) {
- *uses_webrtc_dtx = true;
- return 0;
-}
-
-// TODO(henrik.lundin): Remove? Only used in tests. Deprecated in VoiceEngine.
-int AudioCodingModuleImpl::SetISACMaxRate(int max_bit_per_sec) {
- CriticalSectionScoped lock(acm_crit_sect_);
-
- if (!HaveValidEncoder("SetISACMaxRate")) {
- return -1;
- }
-
- codec_manager_.CurrentSpeechEncoder()->SetMaxRate(max_bit_per_sec);
- return 0;
-}
-
-// TODO(henrik.lundin): Remove? Only used in tests. Deprecated in VoiceEngine.
-int AudioCodingModuleImpl::SetISACMaxPayloadSize(int max_size_bytes) {
- CriticalSectionScoped lock(acm_crit_sect_);
-
- if (!HaveValidEncoder("SetISACMaxPayloadSize")) {
- return -1;
- }
-
- codec_manager_.CurrentSpeechEncoder()->SetMaxPayloadSize(max_size_bytes);
- return 0;
-}
-
-// TODO(henrik.lundin): Remove? Only used in tests.
-int AudioCodingModuleImpl::ConfigISACBandwidthEstimator(
- int frame_size_ms,
- int rate_bit_per_sec,
- bool enforce_frame_size) {
- CriticalSectionScoped lock(acm_crit_sect_);
-
- if (!HaveValidEncoder("ConfigISACBandwidthEstimator")) {
- return -1;
- }
-
- FATAL() << "Dead code?";
- return -1;
-// return codecs_[current_send_codec_idx_]->ConfigISACBandwidthEstimator(
-// frame_size_ms, rate_bit_per_sec, enforce_frame_size);
-}
-
int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (!HaveValidEncoder("SetOpusApplication")) {
return -1;
}
- AudioEncoderMutable::Application app;
+ if (!codec_manager_.CurrentEncoderIsOpus())
+ return -1;
+ AudioEncoder::Application app;
switch (application) {
case kVoip:
- app = AudioEncoderMutable::kApplicationSpeech;
+ app = AudioEncoder::Application::kSpeech;
break;
case kAudio:
- app = AudioEncoderMutable::kApplicationAudio;
+ app = AudioEncoder::Application::kAudio;
break;
default:
FATAL();
return 0;
}
- return codec_manager_.CurrentSpeechEncoder()->SetApplication(app) ? 0 : -1;
+ return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1;
}
// Informs Opus encoder of the maximum playback rate the receiver will render.
int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
return -1;
}
- return codec_manager_.CurrentSpeechEncoder()->SetMaxPlaybackRate(frequency_hz)
- ? 0
- : -1;
+ if (!codec_manager_.CurrentEncoderIsOpus())
+ return -1;
+ codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz);
+ return 0;
}
int AudioCodingModuleImpl::EnableOpusDtx() {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (!HaveValidEncoder("EnableOpusDtx")) {
return -1;
}
- return codec_manager_.CurrentSpeechEncoder()->SetDtx(true) ? 0 : -1;
+ if (!codec_manager_.CurrentEncoderIsOpus())
+ return -1;
+ return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1;
}
int AudioCodingModuleImpl::DisableOpusDtx() {
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
if (!HaveValidEncoder("DisableOpusDtx")) {
return -1;
}
- return codec_manager_.CurrentSpeechEncoder()->SetDtx(false) ? 0 : -1;
+ if (!codec_manager_.CurrentEncoderIsOpus())
+ return -1;
+ return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1;
}
int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
@@ -924,29 +769,9 @@ int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
return receiver_.RemoveCodec(payload_type);
}
-// TODO(turajs): correct the type of |length_bytes| when it is corrected in
-// GenericCodec.
-int AudioCodingModuleImpl::REDPayloadISAC(int isac_rate,
- int isac_bw_estimate,
- uint8_t* payload,
- int16_t* length_bytes) {
- CriticalSectionScoped lock(acm_crit_sect_);
- if (!HaveValidEncoder("EncodeData")) {
- return -1;
- }
- FATAL() << "Dead code?";
- return -1;
-// int status;
-// status = codecs_[current_send_codec_idx_]->REDPayloadISAC(isac_rate,
-// isac_bw_estimate,
-// payload,
-// length_bytes);
-// return status;
-}
-
int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
{
- CriticalSectionScoped lock(acm_crit_sect_);
+ CriticalSectionScoped lock(acm_crit_sect_.get());
// Initialize receiver, if it is not initialized. Otherwise, initial delay
// is reset upon initialization of the receiver.
if (!receiver_initialized_)
@@ -955,14 +780,6 @@ int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
return receiver_.SetInitialDelay(delay_ms);
}
-int AudioCodingModuleImpl::SetDtmfPlayoutStatus(bool enable) {
- return 0;
-}
-
-bool AudioCodingModuleImpl::DtmfPlayoutStatus() const {
- return true;
-}
-
int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
return receiver_.EnableNack(max_nack_list_size);
}
@@ -992,7 +809,6 @@ AudioCodingImpl::AudioCodingImpl(const Config& config) {
acm_old_.reset(new acm2::AudioCodingModuleImpl(config_old));
acm_old_->RegisterTransportCallback(config.transport);
acm_old_->RegisterVADCallback(config.vad_callback);
- acm_old_->SetDtmfPlayoutStatus(config.play_dtmf);
if (config.initial_playout_delay_ms > 0) {
acm_old_->SetInitialPlayoutDelay(config.initial_playout_delay_ms);
}
@@ -1040,7 +856,7 @@ const CodecInst* AudioCodingImpl::GetSenderCodecInst() {
int AudioCodingImpl::Add10MsAudio(const AudioFrame& audio_frame) {
acm2::AudioCodingModuleImpl::InputData input_data;
- CriticalSectionScoped lock(acm_old_->acm_crit_sect_);
+ CriticalSectionScoped lock(acm_old_->acm_crit_sect_.get());
if (acm_old_->Add10MsDataInternal(audio_frame, &input_data) != 0)
return -1;
return acm_old_->Encode(input_data);
@@ -1149,7 +965,6 @@ bool AudioCodingImpl::MapCodecTypeToParameters(int codec_type,
int* sample_rate_hz,
int* channels) {
switch (codec_type) {
-#ifdef WEBRTC_CODEC_PCM16
case acm2::ACMCodecDB::kPCM16B:
*codec_name = "L16";
*sample_rate_hz = 8000;
@@ -1180,7 +995,6 @@ bool AudioCodingImpl::MapCodecTypeToParameters(int codec_type,
*sample_rate_hz = 32000;
*channels = 2;
break;
-#endif
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
case acm2::ACMCodecDB::kISAC:
*codec_name = "ISAC";
@@ -1266,13 +1080,11 @@ bool AudioCodingImpl::MapCodecTypeToParameters(int codec_type,
*sample_rate_hz = 8000;
*channels = 1;
break;
-#ifdef WEBRTC_CODEC_AVT
case acm2::ACMCodecDB::kAVT:
*codec_name = "telephone-event";
*sample_rate_hz = 8000;
*channels = 1;
break;
-#endif
default:
FATAL() << "Codec type " << codec_type << " not supported.";
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
index 19ca01bf9ed..fe9215bac0a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h
@@ -13,6 +13,7 @@
#include <vector>
+#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
@@ -29,9 +30,7 @@ class AudioCodingImpl;
namespace acm2 {
-class ACMDTMFDetection;
-
-class AudioCodingModuleImpl : public AudioCodingModule {
+class AudioCodingModuleImpl final : public AudioCodingModule {
public:
friend webrtc::AudioCodingImpl;
@@ -42,14 +41,11 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// Sender
//
- // Reset send codec.
- int ResetEncoder() override;
-
// Can be called multiple times for Codec, CNG, RED.
int RegisterSendCodec(const CodecInst& send_codec) override;
void RegisterExternalSendCodec(
- AudioEncoderMutable* external_speech_encoder) override;
+ AudioEncoder* external_speech_encoder) override;
// Get current send codec.
int SendCodec(CodecInst* current_codec) const override;
@@ -57,20 +53,11 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// Get current send frequency.
int SendFrequency() const override;
- // Get encode bit-rate.
- // Adaptive rate codecs return their current encode target rate, while other
- // codecs return there long-term average or their fixed rate.
- int SendBitrate() const override;
-
// Sets the bitrate to the specified value in bits/sec. In case the codec does
// not support the requested value it will choose an appropriate value
// instead.
void SetBitRate(int bitrate_bps) override;
- // Set available bandwidth, inform the encoder about the
- // estimated bandwidth received from the remote party.
- int SetReceivedEstimatedBandwidth(int bw) override;
-
// Register a transport callback which will be
// called to deliver the encoded buffers.
int RegisterTransportCallback(AudioPacketizationCallback* transport) override;
@@ -124,9 +111,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// Initialize receiver, resets codec database etc.
int InitializeReceiver() override;
- // Reset the decoder state.
- int ResetDecoder() override;
-
// Get current receive frequency.
int ReceiveFrequency() const override;
@@ -137,6 +121,11 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// for codecs, CNG, DTMF, RED.
int RegisterReceiveCodec(const CodecInst& receive_codec) override;
+ int RegisterExternalReceiveCodec(int rtp_payload_type,
+ AudioDecoder* external_decoder,
+ int sample_rate_hz,
+ int num_channels) override;
+
// Get current received codec.
int ReceiveCodec(CodecInst* current_codec) const override;
@@ -165,27 +154,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// audio is accumulated in NetEq buffer, then starts decoding payloads.
int SetInitialPlayoutDelay(int delay_ms) override;
- // TODO(turajs): DTMF playout is always activated in NetEq these APIs should
- // be removed, as well as all VoE related APIs and methods.
- //
- // Configure Dtmf playout status i.e on/off playout the incoming outband Dtmf
- // tone.
- int SetDtmfPlayoutStatus(bool enable) override;
-
- // Get Dtmf playout status.
- bool DtmfPlayoutStatus() const override;
-
- // Estimate the Bandwidth based on the incoming stream, needed
- // for one way audio where the RTCP send the BW estimate.
- // This is also done in the RTP module .
- int DecoderEstimatedBandwidth() const override;
-
- // Set playout mode voice, fax.
- int SetPlayoutMode(AudioPlayoutMode mode) override;
-
- // Get playout mode voice, fax.
- AudioPlayoutMode PlayoutMode() const override;
-
// Get playout timestamp.
int PlayoutTimestamp(uint32_t* timestamp) override;
@@ -199,26 +167,6 @@ class AudioCodingModuleImpl : public AudioCodingModule {
int GetNetworkStatistics(NetworkStatistics* statistics) override;
- // GET RED payload for iSAC. The method id called when 'this' ACM is
- // the default ACM.
- // TODO(henrik.lundin) Not used. Remove?
- int REDPayloadISAC(int isac_rate,
- int isac_bw_estimate,
- uint8_t* payload,
- int16_t* length_bytes);
-
- int ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx) override;
-
- int IsInternalDTXReplacedWithWebRtc(bool* uses_webrtc_dtx) override;
-
- int SetISACMaxRate(int max_bit_per_sec) override;
-
- int SetISACMaxPayloadSize(int max_size_bytes) override;
-
- int ConfigISACBandwidthEstimator(int frame_size_ms,
- int rate_bit_per_sec,
- bool enforce_frame_size = false) override;
-
int SetOpusApplication(OpusApplicationMode application) override;
// If current send codec is Opus, informs it about the maximum playback rate
@@ -243,7 +191,7 @@ class AudioCodingModuleImpl : public AudioCodingModule {
struct InputData {
uint32_t input_timestamp;
const int16_t* audio;
- uint16_t length_per_channel;
+ size_t length_per_channel;
uint8_t audio_channel;
// If a re-mix is required (up or down), this buffer will store a re-mixed
// version of the input.
@@ -295,7 +243,8 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// to |index|.
int UpdateUponReceivingCodec(int index);
- CriticalSectionWrapper* acm_crit_sect_;
+ const rtc::scoped_ptr<CriticalSectionWrapper> acm_crit_sect_;
+ rtc::Buffer encode_buffer_ GUARDED_BY(acm_crit_sect_);
int id_; // TODO(henrik.lundin) Make const.
uint32_t expected_codec_ts_ GUARDED_BY(acm_crit_sect_);
uint32_t expected_in_ts_ GUARDED_BY(acm_crit_sect_);
@@ -313,7 +262,7 @@ class AudioCodingModuleImpl : public AudioCodingModule {
// IMPORTANT: this variable is only used in IncomingPayload(), therefore,
// no lock acquired when interacting with this variable. If it is going to
// be used in other methods, locks need to be taken.
- WebRtcRTPHeader* aux_rtp_header_;
+ rtc::scoped_ptr<WebRtcRTPHeader> aux_rtp_header_;
bool receiver_initialized_ GUARDED_BY(acm_crit_sect_);
@@ -324,7 +273,7 @@ class AudioCodingModuleImpl : public AudioCodingModule {
uint32_t last_timestamp_ GUARDED_BY(acm_crit_sect_);
uint32_t last_rtp_timestamp_ GUARDED_BY(acm_crit_sect_);
- CriticalSectionWrapper* callback_crit_sect_;
+ const rtc::scoped_ptr<CriticalSectionWrapper> callback_crit_sect_;
AudioPacketizationCallback* packetization_callback_
GUARDED_BY(callback_crit_sect_);
ACMVADCallback* vad_callback_ GUARDED_BY(callback_crit_sect_);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
index eea51a3489e..c12baf3f404 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest.cc
@@ -272,7 +272,8 @@ TEST_F(AudioCodingModuleTest, VerifyOutputFrame) {
EXPECT_TRUE(acm_->Get10MsAudio(&audio_frame));
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0);
- EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+ EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+ audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
}
@@ -503,7 +504,13 @@ class AcmIsacMtTest : public AudioCodingModuleMtTest {
test::AudioLoop audio_loop_;
};
-TEST_F(AcmIsacMtTest, DoTest) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+TEST_F(AcmIsacMtTest, IF_ISAC(DoTest)) {
EXPECT_EQ(kEventSignaled, RunTest());
}
@@ -558,13 +565,20 @@ class AcmReceiverBitExactness : public ::testing::Test {
}
};
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+ defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#define IF_ALL_CODECS(x) x
+#else
+#define IF_ALL_CODECS(x) DISABLED_##x
+#endif
+
// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
#define MAYBE_8kHzOutput DISABLED_8kHzOutput
#else
#define MAYBE_8kHzOutput 8kHzOutput
#endif
-TEST_F(AcmReceiverBitExactness, MAYBE_8kHzOutput) {
+TEST_F(AcmReceiverBitExactness, IF_ALL_CODECS(MAYBE_8kHzOutput)) {
Run(8000,
PlatformChecksum("dcee98c623b147ebe1b40dd30efa896e",
"adc92e173f908f93b96ba5844209815a",
@@ -577,7 +591,7 @@ TEST_F(AcmReceiverBitExactness, MAYBE_8kHzOutput) {
#else
#define MAYBE_16kHzOutput 16kHzOutput
#endif
-TEST_F(AcmReceiverBitExactness, MAYBE_16kHzOutput) {
+TEST_F(AcmReceiverBitExactness, IF_ALL_CODECS(MAYBE_16kHzOutput)) {
Run(16000,
PlatformChecksum("f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
"8cffa6abcb3e18e33b9d857666dff66a",
@@ -590,7 +604,7 @@ TEST_F(AcmReceiverBitExactness, MAYBE_16kHzOutput) {
#else
#define MAYBE_32kHzOutput 32kHzOutput
#endif
-TEST_F(AcmReceiverBitExactness, MAYBE_32kHzOutput) {
+TEST_F(AcmReceiverBitExactness, IF_ALL_CODECS(MAYBE_32kHzOutput)) {
Run(32000,
PlatformChecksum("306e0d990ee6e92de3fbecc0123ece37",
"3e126fe894720c3f85edadcc91964ba5",
@@ -603,7 +617,7 @@ TEST_F(AcmReceiverBitExactness, MAYBE_32kHzOutput) {
#else
#define MAYBE_48kHzOutput 48kHzOutput
#endif
-TEST_F(AcmReceiverBitExactness, MAYBE_48kHzOutput) {
+TEST_F(AcmReceiverBitExactness, IF_ALL_CODECS(MAYBE_48kHzOutput)) {
Run(48000,
PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
"0155665e93067c4e89256b944dd11999",
@@ -769,7 +783,7 @@ class AcmSenderBitExactness : public ::testing::Test,
#else
#define MAYBE_IsacWb30ms IsacWb30ms
#endif
-TEST_F(AcmSenderBitExactness, MAYBE_IsacWb30ms) {
+TEST_F(AcmSenderBitExactness, IF_ISAC(MAYBE_IsacWb30ms)) {
ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kISAC, 1, 103, 480, 480));
Run(AcmReceiverBitExactness::PlatformChecksum(
"c7e5bdadfa2871df95639fcc297cf23d",
@@ -789,7 +803,7 @@ TEST_F(AcmSenderBitExactness, MAYBE_IsacWb30ms) {
#else
#define MAYBE_IsacWb60ms IsacWb60ms
#endif
-TEST_F(AcmSenderBitExactness, MAYBE_IsacWb60ms) {
+TEST_F(AcmSenderBitExactness, IF_ISAC(MAYBE_IsacWb60ms)) {
ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kISAC, 1, 103, 960, 960));
Run(AcmReceiverBitExactness::PlatformChecksum(
"14d63c5f08127d280e722e3191b73bdd",
@@ -803,7 +817,13 @@ TEST_F(AcmSenderBitExactness, MAYBE_IsacWb60ms) {
test::AcmReceiveTest::kMonoOutput);
}
-TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IsacSwb30ms)) {
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IF_ISAC_FLOAT(IsacSwb30ms))) {
ASSERT_NO_FATAL_FAILURE(
SetUpTest(acm2::ACMCodecDB::kISACSWB, 1, 104, 960, 960));
Run(AcmReceiverBitExactness::PlatformChecksum(
@@ -904,7 +924,13 @@ TEST_F(AcmSenderBitExactness, Pcma_stereo_20ms) {
test::AcmReceiveTest::kStereoOutput);
}
-TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(Ilbc_30ms)) {
+#ifdef WEBRTC_CODEC_ILBC
+#define IF_ILBC(x) x
+#else
+#define IF_ILBC(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IF_ILBC(Ilbc_30ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kILBC, 1, 102, 240, 240));
Run(AcmReceiverBitExactness::PlatformChecksum(
"7b6ec10910debd9af08011d3ed5249f7",
@@ -918,7 +944,13 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(Ilbc_30ms)) {
test::AcmReceiveTest::kMonoOutput);
}
-TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_20ms)) {
+#ifdef WEBRTC_CODEC_G722
+#define IF_G722(x) x
+#else
+#define IF_G722(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IF_G722(G722_20ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest(acm2::ACMCodecDB::kG722, 1, 9, 320, 160));
Run(AcmReceiverBitExactness::PlatformChecksum(
"7d759436f2533582950d148b5161a36c",
@@ -932,7 +964,7 @@ TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_20ms)) {
test::AcmReceiveTest::kMonoOutput);
}
-TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(G722_stereo_20ms)) {
+TEST_F(AcmSenderBitExactness, DISABLED_ON_ANDROID(IF_G722(G722_stereo_20ms))) {
ASSERT_NO_FATAL_FAILURE(
SetUpTest(acm2::ACMCodecDB::kG722_2ch, 2, 119, 320, 160));
Run(AcmReceiverBitExactness::PlatformChecksum(
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 568ae1ebe7f..01c8bb8e78e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
@@ -23,6 +24,8 @@
#include "webrtc/modules/audio_coding/main/acm2/acm_send_test_oldapi.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
+#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
@@ -312,7 +315,8 @@ TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
EXPECT_EQ(id_, audio_frame.id_);
EXPECT_EQ(0u, audio_frame.timestamp_);
EXPECT_GT(audio_frame.num_channels_, 0);
- EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
+ EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+ audio_frame.samples_per_channel_);
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
}
@@ -338,9 +342,16 @@ TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
}
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
// Verifies that the RTP timestamp series is not reset when the codec is
// changed.
-TEST_F(AudioCodingModuleTestOldApi, TimestampSeriesContinuesWhenCodecChanges) {
+TEST_F(AudioCodingModuleTestOldApi,
+ IF_ISAC(TimestampSeriesContinuesWhenCodecChanges)) {
RegisterCodec(); // This registers the default codec.
uint32_t expected_ts = input_frame_.timestamp_;
int blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
@@ -696,7 +707,7 @@ class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
test::AudioLoop audio_loop_;
};
-TEST_F(AcmIsacMtTestOldApi, DISABLED_ON_IOS(DoTest)) {
+TEST_F(AcmIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
EXPECT_EQ(kEventSignaled, RunTest());
}
@@ -721,9 +732,9 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
receive_packet_count_(0),
next_insert_packet_time_ms_(0),
fake_clock_(new SimulatedClock(0)) {
- AudioEncoderDecoderIsac::Config config;
+ AudioEncoderIsac::Config config;
config.payload_type = kPayloadType;
- isac_encoder_.reset(new AudioEncoderDecoderIsac(config));
+ isac_encoder_.reset(new AudioEncoderIsac(config));
clock_ = fake_clock_.get();
}
@@ -845,12 +856,12 @@ class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
bool codec_registered_ GUARDED_BY(crit_sect_);
int receive_packet_count_ GUARDED_BY(crit_sect_);
int64_t next_insert_packet_time_ms_ GUARDED_BY(crit_sect_);
- rtc::scoped_ptr<AudioEncoderDecoderIsac> isac_encoder_;
+ rtc::scoped_ptr<AudioEncoderIsac> isac_encoder_;
rtc::scoped_ptr<SimulatedClock> fake_clock_;
test::AudioLoop audio_loop_;
};
-TEST_F(AcmReRegisterIsacMtTestOldApi, DISABLED_ON_IOS(DoTest)) {
+TEST_F(AcmReRegisterIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
EXPECT_EQ(kEventSignaled, RunTest());
}
@@ -873,7 +884,16 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test {
}
protected:
- void Run(int output_freq_hz, const std::string& checksum_ref) {
+ struct ExternalDecoder {
+ int rtp_payload_type;
+ AudioDecoder* external_decoder;
+ int sample_rate_hz;
+ int num_channels;
+ };
+
+ void Run(int output_freq_hz,
+ const std::string& checksum_ref,
+ const std::vector<ExternalDecoder>& external_decoders) {
const std::string input_file_name =
webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
rtc::scoped_ptr<test::RtpFileSource> packet_source(
@@ -901,6 +921,11 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test {
output_freq_hz,
test::AcmReceiveTestOldApi::kArbitraryChannels);
ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
+ for (const auto& ed : external_decoders) {
+ ASSERT_EQ(0, test.RegisterExternalReceiveCodec(
+ ed.rtp_payload_type, ed.external_decoder,
+ ed.sample_rate_hz, ed.num_channels));
+ }
test.Run();
std::string checksum_string = checksum.Finish();
@@ -908,17 +933,24 @@ class AcmReceiverBitExactnessOldApi : public ::testing::Test {
}
};
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+ defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#define IF_ALL_CODECS(x) x
+#else
+#define IF_ALL_CODECS(x) DISABLED_##x
+#endif
+
// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
#define MAYBE_8kHzOutput DISABLED_8kHzOutput
#else
#define MAYBE_8kHzOutput 8kHzOutput
#endif
-TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_8kHzOutput) {
- Run(8000,
- PlatformChecksum("dcee98c623b147ebe1b40dd30efa896e",
- "adc92e173f908f93b96ba5844209815a",
- "908002dc01fc4eb1d2be24eb1d3f354b"));
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_8kHzOutput)) {
+ Run(8000, PlatformChecksum("dcee98c623b147ebe1b40dd30efa896e",
+ "adc92e173f908f93b96ba5844209815a",
+ "908002dc01fc4eb1d2be24eb1d3f354b"),
+ std::vector<ExternalDecoder>());
}
// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
@@ -927,11 +959,11 @@ TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_8kHzOutput) {
#else
#define MAYBE_16kHzOutput 16kHzOutput
#endif
-TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_16kHzOutput) {
- Run(16000,
- PlatformChecksum("f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
- "8cffa6abcb3e18e33b9d857666dff66a",
- "a909560b5ca49fa472b17b7b277195e9"));
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_16kHzOutput)) {
+ Run(16000, PlatformChecksum("f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
+ "8cffa6abcb3e18e33b9d857666dff66a",
+ "a909560b5ca49fa472b17b7b277195e9"),
+ std::vector<ExternalDecoder>());
}
// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
@@ -940,11 +972,11 @@ TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_16kHzOutput) {
#else
#define MAYBE_32kHzOutput 32kHzOutput
#endif
-TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_32kHzOutput) {
- Run(32000,
- PlatformChecksum("306e0d990ee6e92de3fbecc0123ece37",
- "3e126fe894720c3f85edadcc91964ba5",
- "441aab4b347fb3db4e9244337aca8d8e"));
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_32kHzOutput)) {
+ Run(32000, PlatformChecksum("306e0d990ee6e92de3fbecc0123ece37",
+ "3e126fe894720c3f85edadcc91964ba5",
+ "441aab4b347fb3db4e9244337aca8d8e"),
+ std::vector<ExternalDecoder>());
}
// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
@@ -953,11 +985,54 @@ TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_32kHzOutput) {
#else
#define MAYBE_48kHzOutput 48kHzOutput
#endif
-TEST_F(AcmReceiverBitExactnessOldApi, MAYBE_48kHzOutput) {
- Run(48000,
- PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
- "0155665e93067c4e89256b944dd11999",
- "4ee2730fa1daae755e8a8fd3abd779ec"));
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_48kHzOutput)) {
+ Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+ "0155665e93067c4e89256b944dd11999",
+ "4ee2730fa1daae755e8a8fd3abd779ec"),
+ std::vector<ExternalDecoder>());
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(__aarch64__)
+#define MAYBE_48kHzOutputExternalDecoder DISABLED_48kHzOutputExternalDecoder
+#else
+#define MAYBE_48kHzOutputExternalDecoder 48kHzOutputExternalDecoder
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi,
+ IF_ALL_CODECS(MAYBE_48kHzOutputExternalDecoder)) {
+ AudioDecoderPcmU decoder(1);
+ MockAudioDecoder mock_decoder;
+ // Set expectations on the mock decoder and also delegate the calls to the
+ // real decoder.
+ EXPECT_CALL(mock_decoder, IncomingPacket(_, _, _, _, _))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::IncomingPacket));
+ EXPECT_CALL(mock_decoder, Channels())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Channels));
+ EXPECT_CALL(mock_decoder, Decode(_, _, _, _, _, _))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Decode));
+ EXPECT_CALL(mock_decoder, HasDecodePlc())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::HasDecodePlc));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::PacketDuration));
+ ExternalDecoder ed;
+ ed.rtp_payload_type = 0;
+ ed.external_decoder = &mock_decoder;
+ ed.sample_rate_hz = 8000;
+ ed.num_channels = 1;
+ std::vector<ExternalDecoder> external_decoders;
+ external_decoders.push_back(ed);
+
+ Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+ "0155665e93067c4e89256b944dd11999",
+ "4ee2730fa1daae755e8a8fd3abd779ec"),
+ external_decoders);
+
+ EXPECT_CALL(mock_decoder, Die());
}
// This test verifies bit exactness for the send-side of ACM. The test setup is
@@ -1015,7 +1090,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
frame_size_samples);
}
- bool RegisterExternalSendCodec(AudioEncoderMutable* external_speech_encoder,
+ bool RegisterExternalSendCodec(AudioEncoder* external_speech_encoder,
int payload_type) {
payload_type_ = payload_type;
frame_size_rtp_timestamps_ =
@@ -1117,7 +1192,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
codec_frame_size_rtp_timestamps));
}
- void SetUpTestExternalEncoder(AudioEncoderMutable* external_speech_encoder,
+ void SetUpTestExternalEncoder(AudioEncoder* external_speech_encoder,
int payload_type) {
ASSERT_TRUE(SetUpSender());
ASSERT_TRUE(
@@ -1140,7 +1215,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
#else
#define MAYBE_IsacWb30ms IsacWb30ms
#endif
-TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacWb30ms) {
+TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb30ms)) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"c7e5bdadfa2871df95639fcc297cf23d",
@@ -1160,7 +1235,7 @@ TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacWb30ms) {
#else
#define MAYBE_IsacWb60ms IsacWb60ms
#endif
-TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacWb60ms) {
+TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb60ms)) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"14d63c5f08127d280e722e3191b73bdd",
@@ -1174,7 +1249,14 @@ TEST_F(AcmSenderBitExactnessOldApi, MAYBE_IsacWb60ms) {
test::AcmReceiveTestOldApi::kMonoOutput);
}
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IsacSwb30ms)) {
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi,
+ DISABLED_ON_ANDROID(IF_ISAC_FLOAT(IsacSwb30ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"2b3c387d06f00b7b7aad4c9be56fb83d",
@@ -1267,7 +1349,13 @@ TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
test::AcmReceiveTestOldApi::kStereoOutput);
}
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(Ilbc_30ms)) {
+#ifdef WEBRTC_CODEC_ILBC
+#define IF_ILBC(x) x
+#else
+#define IF_ILBC(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_ILBC(Ilbc_30ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7b6ec10910debd9af08011d3ed5249f7",
@@ -1281,7 +1369,13 @@ TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(Ilbc_30ms)) {
test::AcmReceiveTestOldApi::kMonoOutput);
}
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(G722_20ms)) {
+#ifdef WEBRTC_CODEC_G722
+#define IF_G722(x) x
+#else
+#define IF_G722(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_G722(G722_20ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7d759436f2533582950d148b5161a36c",
@@ -1295,7 +1389,8 @@ TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(G722_20ms)) {
test::AcmReceiveTestOldApi::kMonoOutput);
}
-TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(G722_stereo_20ms)) {
+TEST_F(AcmSenderBitExactnessOldApi,
+ DISABLED_ON_ANDROID(IF_G722(G722_stereo_20ms))) {
ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
"7190ee718ab3d80eca181e5f7140c210",
@@ -1542,32 +1637,36 @@ TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
codec_inst.channels = 1;
codec_inst.pacsize = 160;
codec_inst.pltype = 0;
- AudioEncoderMutablePcmU encoder(codec_inst);
- MockAudioEncoderMutable mock_encoder;
+ AudioEncoderPcmU encoder(codec_inst);
+ MockAudioEncoder mock_encoder;
// Set expectations on the mock encoder and also delegate the calls to the
// real encoder.
- EXPECT_CALL(mock_encoder, Num10MsFramesInNextPacket())
- .Times(AtLeast(1))
- .WillRepeatedly(Invoke(
- &encoder, &AudioEncoderMutablePcmU::Num10MsFramesInNextPacket));
- EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
+ EXPECT_CALL(mock_encoder, MaxEncodedBytes())
.Times(AtLeast(1))
- .WillRepeatedly(
- Invoke(&encoder, &AudioEncoderMutablePcmU::Max10MsFramesInAPacket));
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::MaxEncodedBytes));
EXPECT_CALL(mock_encoder, SampleRateHz())
.Times(AtLeast(1))
- .WillRepeatedly(Invoke(&encoder, &AudioEncoderMutablePcmU::SampleRateHz));
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SampleRateHz));
EXPECT_CALL(mock_encoder, NumChannels())
.Times(AtLeast(1))
- .WillRepeatedly(Invoke(&encoder, &AudioEncoderMutablePcmU::NumChannels));
- EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::NumChannels));
+ EXPECT_CALL(mock_encoder, RtpTimestampRateHz())
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::RtpTimestampRateHz));
+ EXPECT_CALL(mock_encoder, Num10MsFramesInNextPacket())
.Times(AtLeast(1))
.WillRepeatedly(
- Invoke(&encoder, &AudioEncoderMutablePcmU::EncodeInternal));
+ Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
+ EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
+ .Times(AtLeast(1))
+ .WillRepeatedly(
+ Invoke(&encoder, &AudioEncoderPcmU::Max10MsFramesInAPacket));
EXPECT_CALL(mock_encoder, GetTargetBitrate())
.Times(AtLeast(1))
- .WillRepeatedly(Invoke(
- &encoder, &AudioEncoderMutablePcmU::GetTargetBitrate));
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
+ EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
+ .Times(AtLeast(1))
+ .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::EncodeInternal));
ASSERT_NO_FATAL_FAILURE(
SetUpTestExternalEncoder(&mock_encoder, codec_inst.pltype));
Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9",
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
index cad6ee90893..862feaaa702 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.cc
@@ -122,11 +122,7 @@ bool IsPcmA(const CodecInst& codec) {
}
bool IsPcm16B(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_PCM16
- !STR_CASE_CMP(codec.plname, "l16") ||
-#endif
- false;
+ return !STR_CASE_CMP(codec.plname, "l16");
}
bool IsIlbc(const CodecInst& codec) {
@@ -164,7 +160,8 @@ CodecManager::CodecManager()
vad_mode_(VADNormal),
send_codec_inst_(kEmptyCodecInst),
red_enabled_(false),
- codec_fec_enabled_(false) {
+ codec_fec_enabled_(false),
+ encoder_is_opus_(false) {
// Register the default payload type for RED and for CNG at sampling rates of
// 8, 16, 32 and 48 kHz.
for (int i = (ACMCodecDB::kNumCodecs - 1); i >= 0; i--) {
@@ -188,7 +185,7 @@ CodecManager::CodecManager()
CodecManager::~CodecManager() = default;
int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
int codec_id = IsValidSendCodec(send_codec, true);
// Check for reported errors from function IsValidSendCodec().
@@ -267,7 +264,7 @@ int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
bool new_codec = true;
if (codec_owner_.Encoder()) {
int new_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_);
- DCHECK_GE(new_codec_id, 0);
+ RTC_DCHECK_GE(new_codec_id, 0);
new_codec = new_codec_id != codec_id;
}
@@ -275,21 +272,23 @@ int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
red_enabled_ = false;
}
+ encoder_is_opus_ = IsOpus(send_codec);
+
if (new_codec) {
// This is a new codec. Register it and return.
- DCHECK(CodecSupported(send_codec));
+ RTC_DCHECK(CodecSupported(send_codec));
if (IsOpus(send_codec)) {
// VAD/DTX not supported.
dtx_enabled_ = false;
}
- codec_owner_.SetEncoders(
- send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
- vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
- DCHECK(codec_owner_.Encoder());
+ if (!codec_owner_.SetEncoders(
+ send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
+ vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1))
+ return -1;
+ RTC_DCHECK(codec_owner_.Encoder());
- codec_fec_enabled_ =
- codec_fec_enabled_ &&
- codec_owner_.SpeechEncoder()->SetFec(codec_fec_enabled_);
+ codec_fec_enabled_ = codec_fec_enabled_ &&
+ codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
send_codec_inst_ = send_codec;
return 0;
@@ -299,10 +298,11 @@ int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
if (send_codec_inst_.plfreq != send_codec.plfreq ||
send_codec_inst_.pacsize != send_codec.pacsize ||
send_codec_inst_.channels != send_codec.channels) {
- codec_owner_.SetEncoders(
- send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
- vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1);
- DCHECK(codec_owner_.Encoder());
+ if (!codec_owner_.SetEncoders(
+ send_codec, dtx_enabled_ ? CngPayloadType(send_codec.plfreq) : -1,
+ vad_mode_, red_enabled_ ? RedPayloadType(send_codec.plfreq) : -1))
+ return -1;
+ RTC_DCHECK(codec_owner_.Encoder());
}
send_codec_inst_.plfreq = send_codec.plfreq;
send_codec_inst_.pacsize = send_codec.pacsize;
@@ -311,25 +311,24 @@ int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
// Check if a change in Rate is required.
if (send_codec.rate != send_codec_inst_.rate) {
- codec_owner_.SpeechEncoder()->SetTargetBitrate(send_codec.rate);
+ codec_owner_.Encoder()->SetTargetBitrate(send_codec.rate);
send_codec_inst_.rate = send_codec.rate;
}
- codec_fec_enabled_ = codec_fec_enabled_ &&
- codec_owner_.SpeechEncoder()->SetFec(codec_fec_enabled_);
+ codec_fec_enabled_ =
+ codec_fec_enabled_ && codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
return 0;
}
-void CodecManager::RegisterEncoder(
- AudioEncoderMutable* external_speech_encoder) {
+void CodecManager::RegisterEncoder(AudioEncoder* external_speech_encoder) {
// Make up a CodecInst.
send_codec_inst_.channels = external_speech_encoder->NumChannels();
send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
- send_codec_inst_.pacsize =
- rtc::CheckedDivExact(external_speech_encoder->Max10MsFramesInAPacket() *
- send_codec_inst_.plfreq,
- 100);
+ send_codec_inst_.pacsize = rtc::CheckedDivExact(
+ static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+ send_codec_inst_.plfreq),
+ 100);
send_codec_inst_.pltype = -1; // Not valid.
send_codec_inst_.rate = -1; // Not valid.
static const char kName[] = "external";
@@ -337,8 +336,8 @@ void CodecManager::RegisterEncoder(
if (stereo_send_)
dtx_enabled_ = false;
- codec_fec_enabled_ = codec_fec_enabled_ &&
- codec_owner_.SpeechEncoder()->SetFec(codec_fec_enabled_);
+ codec_fec_enabled_ =
+ codec_fec_enabled_ && codec_owner_.Encoder()->SetFec(codec_fec_enabled_);
int cng_pt = dtx_enabled_
? CngPayloadType(external_speech_encoder->SampleRateHz())
: -1;
@@ -384,8 +383,8 @@ bool CodecManager::SetCopyRed(bool enable) {
int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
// Sanity check of the mode.
- DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
- mode == VADVeryAggr);
+ RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
+ mode == VADVeryAggr);
// Check that the send codec is mono. We don't support VAD/DTX for stereo
// sending.
@@ -430,9 +429,9 @@ int CodecManager::SetCodecFEC(bool enable_codec_fec) {
return -1;
}
- CHECK(codec_owner_.SpeechEncoder());
- codec_fec_enabled_ = codec_owner_.SpeechEncoder()->SetFec(enable_codec_fec) &&
- enable_codec_fec;
+ RTC_CHECK(codec_owner_.Encoder());
+ codec_fec_enabled_ =
+ codec_owner_.Encoder()->SetFec(enable_codec_fec) && enable_codec_fec;
return codec_fec_enabled_ == enable_codec_fec ? 0 : -1;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.h
index bb9545d889b..2337521edf3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_manager.h
@@ -22,7 +22,6 @@ namespace webrtc {
class AudioDecoder;
class AudioEncoder;
-class AudioEncoderMutable;
namespace acm2 {
@@ -33,7 +32,7 @@ class CodecManager final {
int RegisterEncoder(const CodecInst& send_codec);
- void RegisterEncoder(AudioEncoderMutable* external_speech_encoder);
+ void RegisterEncoder(AudioEncoder* external_speech_encoder);
int GetCodecInst(CodecInst* current_codec) const;
@@ -58,12 +57,11 @@ class CodecManager final {
bool codec_fec_enabled() const { return codec_fec_enabled_; }
- AudioEncoderMutable* CurrentSpeechEncoder() {
- return codec_owner_.SpeechEncoder();
- }
AudioEncoder* CurrentEncoder() { return codec_owner_.Encoder(); }
const AudioEncoder* CurrentEncoder() const { return codec_owner_.Encoder(); }
+ bool CurrentEncoderIsOpus() const { return encoder_is_opus_; }
+
private:
int CngPayloadType(int sample_rate_hz) const;
@@ -82,8 +80,9 @@ class CodecManager final {
bool red_enabled_;
bool codec_fec_enabled_;
CodecOwner codec_owner_;
+ bool encoder_is_opus_;
- DISALLOW_COPY_AND_ASSIGN(CodecManager);
+ RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
};
} // namespace acm2
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
index 4d214be2428..449a467a172 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.cc
@@ -11,133 +11,104 @@
#include "webrtc/modules/audio_coding/main/acm2/codec_owner.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/audio_encoder_cng.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#ifdef WEBRTC_CODEC_G722
#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
+#endif
+#ifdef WEBRTC_CODEC_ILBC
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h"
+#endif
+#ifdef WEBRTC_CODEC_ISACFX
+#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
+#endif
+#ifdef WEBRTC_CODEC_OPUS
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
+#endif
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
+#ifdef WEBRTC_CODEC_RED
#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#endif
namespace webrtc {
namespace acm2 {
-namespace {
-bool IsIsac(const CodecInst& codec) {
- return
-#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
- !STR_CASE_CMP(codec.plname, "isac") ||
-#endif
- false;
-}
-
-bool IsOpus(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_OPUS
- !STR_CASE_CMP(codec.plname, "opus") ||
-#endif
- false;
-}
-
-bool IsPcmU(const CodecInst& codec) {
- return !STR_CASE_CMP(codec.plname, "pcmu");
-}
-
-bool IsPcmA(const CodecInst& codec) {
- return !STR_CASE_CMP(codec.plname, "pcma");
-}
-
-bool IsPcm16B(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_PCM16
- !STR_CASE_CMP(codec.plname, "l16") ||
-#endif
- false;
-}
-
-bool IsIlbc(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_ILBC
- !STR_CASE_CMP(codec.plname, "ilbc") ||
-#endif
- false;
-}
-
-bool IsG722(const CodecInst& codec) {
- return
-#ifdef WEBRTC_CODEC_G722
- !STR_CASE_CMP(codec.plname, "g722") ||
-#endif
- false;
-}
-} // namespace
-
-CodecOwner::CodecOwner()
- : isac_is_encoder_(false), external_speech_encoder_(nullptr) {
+CodecOwner::CodecOwner() : external_speech_encoder_(nullptr) {
}
CodecOwner::~CodecOwner() = default;
namespace {
-AudioEncoderDecoderMutableIsac* CreateIsacCodec(const CodecInst& speech_inst) {
+
+rtc::scoped_ptr<AudioDecoder> CreateIsacDecoder(
+ LockedIsacBandwidthInfo* bwinfo) {
#if defined(WEBRTC_CODEC_ISACFX)
- return new AudioEncoderDecoderMutableIsacFix(speech_inst);
+ return rtc_make_scoped_ptr(new AudioDecoderIsacFix(bwinfo));
#elif defined(WEBRTC_CODEC_ISAC)
- return new AudioEncoderDecoderMutableIsacFloat(speech_inst);
+ return rtc_make_scoped_ptr(new AudioDecoderIsac(bwinfo));
#else
FATAL() << "iSAC is not supported.";
- return nullptr;
+ return rtc::scoped_ptr<AudioDecoder>();
#endif
}
-void CreateSpeechEncoder(
+// Returns a new speech encoder, or null on error.
+// TODO(kwiberg): Don't handle errors here (bug 5033)
+rtc::scoped_ptr<AudioEncoder> CreateSpeechEncoder(
const CodecInst& speech_inst,
- rtc::scoped_ptr<AudioEncoderMutable>* speech_encoder,
- rtc::scoped_ptr<AudioEncoderDecoderMutableIsac>* isac_codec,
- bool* isac_is_encoder) {
- if (IsIsac(speech_inst)) {
- if (*isac_codec) {
- (*isac_codec)->UpdateSettings(speech_inst);
- } else {
- isac_codec->reset(CreateIsacCodec(speech_inst));
- }
- *isac_is_encoder = true;
- speech_encoder->reset();
- return;
- }
- if (IsOpus(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutableOpus(speech_inst));
- } else if (IsPcmU(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutablePcmU(speech_inst));
- } else if (IsPcmA(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutablePcmA(speech_inst));
- } else if (IsPcm16B(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutablePcm16B(speech_inst));
- } else if (IsIlbc(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutableIlbc(speech_inst));
- } else if (IsG722(speech_inst)) {
- speech_encoder->reset(new AudioEncoderMutableG722(speech_inst));
- } else {
- FATAL();
- }
- *isac_is_encoder = false;
+ LockedIsacBandwidthInfo* bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+ if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIsacFix(speech_inst, bwinfo));
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+ if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIsac(speech_inst, bwinfo));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ if (STR_CASE_CMP(speech_inst.plname, "opus") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderOpus(speech_inst));
+#endif
+ if (STR_CASE_CMP(speech_inst.plname, "pcmu") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcmU(speech_inst));
+ if (STR_CASE_CMP(speech_inst.plname, "pcma") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcmA(speech_inst));
+ if (STR_CASE_CMP(speech_inst.plname, "l16") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderPcm16B(speech_inst));
+#ifdef WEBRTC_CODEC_ILBC
+ if (STR_CASE_CMP(speech_inst.plname, "ilbc") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderIlbc(speech_inst));
+#endif
+#ifdef WEBRTC_CODEC_G722
+ if (STR_CASE_CMP(speech_inst.plname, "g722") == 0)
+ return rtc_make_scoped_ptr(new AudioEncoderG722(speech_inst));
+#endif
+ LOG_F(LS_ERROR) << "Could not create encoder of type " << speech_inst.plname;
+ return rtc::scoped_ptr<AudioEncoder>();
}
AudioEncoder* CreateRedEncoder(int red_payload_type,
AudioEncoder* encoder,
rtc::scoped_ptr<AudioEncoder>* red_encoder) {
- if (red_payload_type == -1) {
- red_encoder->reset();
- return encoder;
+#ifdef WEBRTC_CODEC_RED
+ if (red_payload_type != -1) {
+ AudioEncoderCopyRed::Config config;
+ config.payload_type = red_payload_type;
+ config.speech_encoder = encoder;
+ red_encoder->reset(new AudioEncoderCopyRed(config));
+ return red_encoder->get();
}
- AudioEncoderCopyRed::Config config;
- config.payload_type = red_payload_type;
- config.speech_encoder = encoder;
- red_encoder->reset(new AudioEncoderCopyRed(config));
- return red_encoder->get();
+#endif
+
+ red_encoder->reset();
+ return encoder;
}
void CreateCngEncoder(int cng_payload_type,
@@ -172,30 +143,31 @@ void CreateCngEncoder(int cng_payload_type,
}
} // namespace
-void CodecOwner::SetEncoders(const CodecInst& speech_inst,
+bool CodecOwner::SetEncoders(const CodecInst& speech_inst,
int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type) {
- CreateSpeechEncoder(speech_inst, &speech_encoder_, &isac_codec_,
- &isac_is_encoder_);
+ speech_encoder_ = CreateSpeechEncoder(speech_inst, &isac_bandwidth_info_);
+ if (!speech_encoder_)
+ return false;
external_speech_encoder_ = nullptr;
ChangeCngAndRed(cng_payload_type, vad_mode, red_payload_type);
+ return true;
}
-void CodecOwner::SetEncoders(AudioEncoderMutable* external_speech_encoder,
+void CodecOwner::SetEncoders(AudioEncoder* external_speech_encoder,
int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type) {
external_speech_encoder_ = external_speech_encoder;
speech_encoder_.reset();
- isac_is_encoder_ = false;
ChangeCngAndRed(cng_payload_type, vad_mode, red_payload_type);
}
void CodecOwner::ChangeCngAndRed(int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type) {
- AudioEncoderMutable* speech_encoder = SpeechEncoder();
+ AudioEncoder* speech_encoder = SpeechEncoder();
if (cng_payload_type != -1 || red_payload_type != -1) {
// The RED and CNG encoders need to be in sync with the speech encoder, so
// reset the latter to ensure its buffer is empty.
@@ -204,24 +176,13 @@ void CodecOwner::ChangeCngAndRed(int cng_payload_type,
AudioEncoder* encoder =
CreateRedEncoder(red_payload_type, speech_encoder, &red_encoder_);
CreateCngEncoder(cng_payload_type, vad_mode, encoder, &cng_encoder_);
- int num_true =
- !!speech_encoder_ + !!external_speech_encoder_ + isac_is_encoder_;
- DCHECK_EQ(num_true, 1);
- DCHECK(!isac_is_encoder_ || isac_codec_);
+ RTC_DCHECK_EQ(!!speech_encoder_ + !!external_speech_encoder_, 1);
}
AudioDecoder* CodecOwner::GetIsacDecoder() {
- if (!isac_codec_) {
- DCHECK(!isac_is_encoder_);
- // None of the parameter values in |speech_inst| matter when the codec is
- // used only as a decoder.
- CodecInst speech_inst;
- speech_inst.plfreq = 16000;
- speech_inst.rate = -1;
- speech_inst.pacsize = 480;
- isac_codec_.reset(CreateIsacCodec(speech_inst));
- }
- return isac_codec_.get();
+ if (!isac_decoder_)
+ isac_decoder_ = CreateIsacDecoder(&isac_bandwidth_info_);
+ return isac_decoder_.get();
}
AudioEncoder* CodecOwner::Encoder() {
@@ -237,21 +198,15 @@ const AudioEncoder* CodecOwner::Encoder() const {
return SpeechEncoder();
}
-AudioEncoderMutable* CodecOwner::SpeechEncoder() {
- const auto& const_this = *this;
- return const_cast<AudioEncoderMutable*>(const_this.SpeechEncoder());
+AudioEncoder* CodecOwner::SpeechEncoder() {
+ const auto* const_this = this;
+ return const_cast<AudioEncoder*>(const_this->SpeechEncoder());
}
-const AudioEncoderMutable* CodecOwner::SpeechEncoder() const {
- int num_true =
- !!speech_encoder_ + !!external_speech_encoder_ + isac_is_encoder_;
- DCHECK_GE(num_true, 0);
- DCHECK_LE(num_true, 1);
- if (external_speech_encoder_)
- return external_speech_encoder_;
- if (speech_encoder_)
- return speech_encoder_.get();
- return isac_is_encoder_ ? isac_codec_.get() : nullptr;
+const AudioEncoder* CodecOwner::SpeechEncoder() const {
+ RTC_DCHECK(!speech_encoder_ || !external_speech_encoder_);
+ return external_speech_encoder_ ? external_speech_encoder_
+ : speech_encoder_.get();
}
} // namespace acm2
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.h b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.h
index 2468c3ce00e..d2d93cf539e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner.h
@@ -15,13 +15,19 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
-#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#else
+// Dummy implementation, for when we don't have iSAC.
namespace webrtc {
+class LockedIsacBandwidthInfo {};
+}
+#endif
-class AudioDecoder;
-
+namespace webrtc {
namespace acm2 {
class CodecOwner {
@@ -29,12 +35,14 @@ class CodecOwner {
CodecOwner();
~CodecOwner();
- void SetEncoders(const CodecInst& speech_inst,
+ // Start using the specified encoder. Returns false on error.
+ // TODO(kwiberg): Don't handle errors here (bug 5033)
+ bool SetEncoders(const CodecInst& speech_inst,
int cng_payload_type,
ACMVADMode vad_mode,
- int red_payload_type);
+ int red_payload_type) WARN_UNUSED_RESULT;
- void SetEncoders(AudioEncoderMutable* external_speech_encoder,
+ void SetEncoders(AudioEncoder* external_speech_encoder,
int cng_payload_type,
ACMVADMode vad_mode,
int red_payload_type);
@@ -49,31 +57,28 @@ class CodecOwner {
AudioEncoder* Encoder();
const AudioEncoder* Encoder() const;
- AudioEncoderMutable* SpeechEncoder();
- const AudioEncoderMutable* SpeechEncoder() const;
private:
- // There are three main cases for the state of the encoder members below:
- // 1. An external encoder is used. |external_speech_encoder_| points to it.
- // |speech_encoder_| is null, and |isac_is_encoder_| is false.
- // 2. The internal iSAC codec is used as encoder. |isac_codec_| points to it
- // and |isac_is_encoder_| is true. |external_speech_encoder_| and
- // |speech_encoder_| are null.
- // 3. Another internal encoder is used. |speech_encoder_| points to it.
- // |external_speech_encoder_| is null, and |isac_is_encoder_| is false.
- // In addition to case 2, |isac_codec_| is valid when GetIsacDecoder has been
- // called.
- rtc::scoped_ptr<AudioEncoderMutable> speech_encoder_;
- rtc::scoped_ptr<AudioEncoderDecoderMutableIsac> isac_codec_;
- bool isac_is_encoder_;
- AudioEncoderMutable* external_speech_encoder_;
+ AudioEncoder* SpeechEncoder();
+ const AudioEncoder* SpeechEncoder() const;
+
+ // At most one of these is non-null:
+ rtc::scoped_ptr<AudioEncoder> speech_encoder_;
+ AudioEncoder* external_speech_encoder_;
+
+ // If we've created an iSAC decoder because someone called GetIsacDecoder,
+ // store it here.
+ rtc::scoped_ptr<AudioDecoder> isac_decoder_;
+
+ // iSAC bandwidth estimation info, for use with iSAC encoders and decoders.
+ LockedIsacBandwidthInfo isac_bandwidth_info_;
// |cng_encoder_| and |red_encoder_| are valid iff CNG or RED, respectively,
// are active.
rtc::scoped_ptr<AudioEncoder> cng_encoder_;
rtc::scoped_ptr<AudioEncoder> red_encoder_;
- DISALLOW_COPY_AND_ASSIGN(CodecOwner);
+ RTC_DISALLOW_COPY_AND_ASSIGN(CodecOwner);
};
} // namespace acm2
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
index a1366a9b885..6c232615a74 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/codec_owner_unittest.cc
@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <cstring>
+
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/safe_conversions.h"
@@ -34,7 +36,8 @@ class CodecOwnerTest : public ::testing::Test {
CodecOwnerTest() : timestamp_(0) {}
void CreateCodec() {
- codec_owner_.SetEncoders(kDefaultCodecInst, kCngPt, VADNormal, -1);
+ ASSERT_TRUE(
+ codec_owner_.SetEncoders(kDefaultCodecInst, kCngPt, VADNormal, -1));
}
void EncodeAndVerify(size_t expected_out_length,
@@ -56,6 +59,41 @@ class CodecOwnerTest : public ::testing::Test {
encoded_info.send_even_if_empty);
}
+ // Verify that the speech encoder's Reset method is called when CNG or RED
+ // (or both) are switched on, but not when they're switched off.
+ void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
+ MockAudioEncoder speech_encoder;
+ EXPECT_CALL(speech_encoder, NumChannels())
+ .WillRepeatedly(Return(1));
+ EXPECT_CALL(speech_encoder, Max10MsFramesInAPacket())
+ .WillRepeatedly(Return(2));
+ EXPECT_CALL(speech_encoder, SampleRateHz())
+ .WillRepeatedly(Return(8000));
+ {
+ InSequence s;
+ EXPECT_CALL(speech_encoder, Mark("start off"));
+ EXPECT_CALL(speech_encoder, Mark("switch on"));
+ if (use_cng || use_red)
+ EXPECT_CALL(speech_encoder, Reset());
+ EXPECT_CALL(speech_encoder, Mark("start on"));
+ if (use_cng || use_red)
+ EXPECT_CALL(speech_encoder, Reset());
+ EXPECT_CALL(speech_encoder, Mark("switch off"));
+ EXPECT_CALL(speech_encoder, Die());
+ }
+
+ int cng_pt = use_cng ? 17 : -1;
+ int red_pt = use_red ? 19 : -1;
+ speech_encoder.Mark("start off");
+ codec_owner_.SetEncoders(&speech_encoder, -1, VADNormal, -1);
+ speech_encoder.Mark("switch on");
+ codec_owner_.ChangeCngAndRed(cng_pt, VADNormal, red_pt);
+ speech_encoder.Mark("start on");
+ codec_owner_.SetEncoders(&speech_encoder, cng_pt, VADNormal, red_pt);
+ speech_encoder.Mark("switch off");
+ codec_owner_.ChangeCngAndRed(-1, VADNormal, -1);
+ }
+
CodecOwner codec_owner_;
uint32_t timestamp_;
};
@@ -99,7 +137,7 @@ TEST_F(CodecOwnerTest, VerifyCngFrames) {
}
TEST_F(CodecOwnerTest, ExternalEncoder) {
- MockAudioEncoderMutable external_encoder;
+ MockAudioEncoder external_encoder;
codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
const int kSampleRateHz = 8000;
const int kPacketSizeSamples = kSampleRateHz / 100;
@@ -115,35 +153,57 @@ TEST_F(CodecOwnerTest, ExternalEncoder) {
EXPECT_CALL(external_encoder,
EncodeInternal(0, audio, arraysize(encoded), encoded))
.WillOnce(Return(info));
- EXPECT_CALL(external_encoder, Reset());
- EXPECT_CALL(external_encoder, Reset());
+ EXPECT_CALL(external_encoder, Mark("A"));
+ EXPECT_CALL(external_encoder, Mark("B"));
info.encoded_timestamp = 2;
EXPECT_CALL(external_encoder,
EncodeInternal(2, audio, arraysize(encoded), encoded))
.WillOnce(Return(info));
- EXPECT_CALL(external_encoder, Reset());
+ EXPECT_CALL(external_encoder, Die());
}
info = codec_owner_.Encoder()->Encode(0, audio, arraysize(audio),
arraysize(encoded), encoded);
EXPECT_EQ(0u, info.encoded_timestamp);
- external_encoder.Reset(); // Dummy call to mark the sequence of expectations.
+ external_encoder.Mark("A");
// Change to internal encoder.
CodecInst codec_inst = kDefaultCodecInst;
codec_inst.pacsize = kPacketSizeSamples;
- codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1);
+ ASSERT_TRUE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
// Don't expect any more calls to the external encoder.
info = codec_owner_.Encoder()->Encode(1, audio, arraysize(audio),
arraysize(encoded), encoded);
- external_encoder.Reset(); // Dummy call to mark the sequence of expectations.
+ external_encoder.Mark("B");
// Change back to external encoder again.
codec_owner_.SetEncoders(&external_encoder, -1, VADNormal, -1);
info = codec_owner_.Encoder()->Encode(2, audio, arraysize(audio),
arraysize(encoded), encoded);
EXPECT_EQ(2u, info.encoded_timestamp);
- external_encoder.Reset(); // Dummy call to mark the sequence of expectations.
+}
+
+TEST_F(CodecOwnerTest, CngResetsSpeechEncoder) {
+ TestCngAndRedResetSpeechEncoder(true, false);
+}
+
+TEST_F(CodecOwnerTest, RedResetsSpeechEncoder) {
+ TestCngAndRedResetSpeechEncoder(false, true);
+}
+
+TEST_F(CodecOwnerTest, CngAndRedResetsSpeechEncoder) {
+ TestCngAndRedResetSpeechEncoder(true, true);
+}
+
+TEST_F(CodecOwnerTest, NoCngAndRedNoSpeechEncoderReset) {
+ TestCngAndRedResetSpeechEncoder(false, false);
+}
+
+TEST_F(CodecOwnerTest, SetEncodersError) {
+ CodecInst codec_inst = kDefaultCodecInst;
+ static const char bad_name[] = "Robert'); DROP TABLE Students;";
+ std::memcpy(codec_inst.plname, bad_name, sizeof bad_name);
+ EXPECT_FALSE(codec_owner_.SetEncoders(codec_inst, -1, VADNormal, -1));
}
} // namespace acm2
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/dump.proto b/chromium/third_party/webrtc/modules/audio_coding/main/acm2/dump.proto
deleted file mode 100644
index 232faec4287..00000000000
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/dump.proto
+++ /dev/null
@@ -1,169 +0,0 @@
-syntax = "proto2";
-option optimize_for = LITE_RUNTIME;
-package webrtc;
-
-// This is the main message to dump to a file, it can contain multiple event
-// messages, but it is possible to append multiple EventStreams (each with a
-// single event) to a file.
-// This has the benefit that there's no need to keep all data in memory.
-message ACMDumpEventStream {
- repeated ACMDumpEvent stream = 1;
-}
-
-
-message ACMDumpEvent {
- // required - Elapsed wallclock time in us since the start of the log.
- optional int64 timestamp_us = 1;
-
- // The different types of events that can occur, the UNKNOWN_EVENT entry
- // is added in case future EventTypes are added, in that case old code will
- // receive the new events as UNKNOWN_EVENT.
- enum EventType {
- UNKNOWN_EVENT = 0;
- RTP_EVENT = 1;
- DEBUG_EVENT = 2;
- CONFIG_EVENT = 3;
- }
-
- // required - Indicates the type of this event
- optional EventType type = 2;
-
- // optional - but required if type == RTP_EVENT
- optional ACMDumpRTPPacket packet = 3;
-
- // optional - but required if type == DEBUG_EVENT
- optional ACMDumpDebugEvent debug_event = 4;
-
- // optional - but required if type == CONFIG_EVENT
- optional ACMDumpConfigEvent config = 5;
-}
-
-
-message ACMDumpRTPPacket {
- // Indicates if the packet is incoming or outgoing with respect to the user
- // that is logging the data.
- enum Direction {
- UNKNOWN_DIRECTION = 0;
- OUTGOING = 1;
- INCOMING = 2;
- }
- enum PayloadType {
- UNKNOWN_TYPE = 0;
- AUDIO = 1;
- VIDEO = 2;
- RTX = 3;
- }
-
- // required
- optional Direction direction = 1;
-
- // required
- optional PayloadType type = 2;
-
- // required - Contains the whole RTP packet (header+payload).
- optional bytes RTP_data = 3;
-}
-
-
-message ACMDumpDebugEvent {
- // Indicates the type of the debug event.
- // LOG_START and LOG_END indicate the start and end of the log respectively.
- // AUDIO_PLAYOUT indicates a call to the PlayoutData10Ms() function in ACM.
- enum EventType {
- UNKNOWN_EVENT = 0;
- LOG_START = 1;
- LOG_END = 2;
- AUDIO_PLAYOUT = 3;
- }
-
- // required
- optional EventType type = 1;
-
- // An optional message that can be used to store additional information about
- // the debug event.
- optional string message = 2;
-}
-
-
-// TODO(terelius): Video and audio streams could in principle share SSRC,
-// so identifying a stream based only on SSRC might not work.
-// It might be better to use a combination of SSRC and media type
-// or SSRC and port number, but for now we will rely on SSRC only.
-message ACMDumpConfigEvent {
- // Synchronization source (stream identifier) to be received.
- optional uint32 remote_ssrc = 1;
-
- // RTX settings for incoming video payloads that may be received. RTX is
- // disabled if there's no config present.
- optional RtcpConfig rtcp_config = 3;
-
- // Map from video RTP payload type -> RTX config.
- repeated RtxMap rtx_map = 4;
-
- // RTP header extensions used for the received stream.
- repeated RtpHeaderExtension header_extensions = 5;
-
- // List of decoders associated with the stream.
- repeated DecoderConfig decoders = 6;
-}
-
-
-// Maps decoder names to payload types.
-message DecoderConfig {
- // required
- optional string name = 1;
-
- // required
- optional sint32 payload_type = 2;
-}
-
-
-// Maps RTP header extension names to numerical ids.
-message RtpHeaderExtension {
- // required
- optional string name = 1;
-
- // required
- optional sint32 id = 2;
-}
-
-
-// RTX settings for incoming video payloads that may be received.
-// RTX is disabled if there's no config present.
-message RtxConfig {
- // required - SSRCs to use for the RTX streams.
- optional uint32 ssrc = 1;
-
- // required - Payload type to use for the RTX stream.
- optional sint32 payload_type = 2;
-}
-
-
-message RtxMap {
- // required
- optional sint32 payload_type = 1;
-
- // required
- optional RtxConfig config = 2;
-}
-
-
-// Configuration information for RTCP.
-// For bandwidth estimation purposes it is more interesting to log the
-// RTCP messages that the sender receives, but we will support logging
-// at the receiver side too.
-message RtcpConfig {
- // Sender SSRC used for sending RTCP (such as receiver reports).
- optional uint32 local_ssrc = 1;
-
- // RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
- // RTCP mode is described by RFC 5506.
- enum RtcpMode {RTCP_COMPOUND = 1; RTCP_REDUCEDSIZE = 2;}
- optional RtcpMode rtcp_mode = 2;
-
- // Extended RTCP settings.
- optional bool receiver_reference_time_report = 3;
-
- // Receiver estimated maximum bandwidth.
- optional bool remb = 4;
-}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi b/chromium/third_party/webrtc/modules/audio_coding/main/audio_coding_module.gypi
index 13b1d4e73fd..7370836e8b5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/acm2/audio_coding_module.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/audio_coding_module.gypi
@@ -11,12 +11,7 @@
'audio_coding_dependencies': [
'cng',
'g711',
- 'g722',
- 'ilbc',
- 'isac',
- 'isac_fix',
'pcm16b',
- 'red',
'<(webrtc_root)/common.gyp:webrtc_common',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
@@ -27,6 +22,23 @@
'audio_coding_dependencies': ['webrtc_opus',],
'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
}],
+ ['build_with_mozilla==0', {
+ 'conditions': [
+ ['target_arch=="arm"', {
+ 'audio_coding_dependencies': ['isac_fix',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ISACFX',],
+ }, {
+ 'audio_coding_dependencies': ['isac',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ISAC',],
+ }],
+ ],
+ 'audio_coding_dependencies': ['g722',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_G722',],
+ }],
+ ['build_with_mozilla==0 and build_with_chromium==0', {
+ 'audio_coding_dependencies': ['ilbc', 'red',],
+ 'audio_coding_defines': ['WEBRTC_CODEC_ILBC', 'WEBRTC_CODEC_RED',],
+ }],
],
},
'targets': [
@@ -39,79 +51,48 @@
'dependencies': [
'<@(audio_coding_dependencies)',
'<(webrtc_root)/common.gyp:webrtc_common',
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log',
'neteq',
],
'include_dirs': [
- '../interface',
- '../../../interface',
+ 'interface',
+ '../../interface',
'<(webrtc_root)',
],
'direct_dependent_settings': {
'include_dirs': [
- '../interface',
- '../../../interface',
+ 'interface',
+ '../../interface',
'<(webrtc_root)',
],
},
'sources': [
- '../interface/audio_coding_module.h',
- '../interface/audio_coding_module_typedefs.h',
- 'acm_codec_database.cc',
- 'acm_codec_database.h',
- 'acm_common_defs.h',
- 'acm_receiver.cc',
- 'acm_receiver.h',
- 'acm_resampler.cc',
- 'acm_resampler.h',
- 'audio_coding_module.cc',
- 'audio_coding_module_impl.cc',
- 'audio_coding_module_impl.h',
- 'call_statistics.cc',
- 'call_statistics.h',
- 'codec_manager.cc',
- 'codec_manager.h',
- 'codec_owner.cc',
- 'codec_owner.h',
- 'initial_delay_manager.cc',
- 'initial_delay_manager.h',
- 'nack.cc',
- 'nack.h',
- ],
- },
- {
- 'target_name': 'acm_dump',
- 'type': 'static_library',
- 'conditions': [
- ['enable_protobuf==1', {
- 'defines': ['RTC_AUDIOCODING_DEBUG_DUMP'],
- 'dependencies': ['acm_dump_proto'],
- }
- ],
- ],
- 'sources': [
- 'acm_dump.h',
- 'acm_dump.cc'
+ 'acm2/acm_codec_database.cc',
+ 'acm2/acm_codec_database.h',
+ 'acm2/acm_common_defs.h',
+ 'acm2/acm_receiver.cc',
+ 'acm2/acm_receiver.h',
+ 'acm2/acm_resampler.cc',
+ 'acm2/acm_resampler.h',
+ 'acm2/audio_coding_module.cc',
+ 'acm2/audio_coding_module_impl.cc',
+ 'acm2/audio_coding_module_impl.h',
+ 'acm2/call_statistics.cc',
+ 'acm2/call_statistics.h',
+ 'acm2/codec_manager.cc',
+ 'acm2/codec_manager.h',
+ 'acm2/codec_owner.cc',
+ 'acm2/codec_owner.h',
+ 'acm2/initial_delay_manager.cc',
+ 'acm2/initial_delay_manager.h',
+ 'acm2/nack.cc',
+ 'acm2/nack.h',
+ 'interface/audio_coding_module.h',
+ 'interface/audio_coding_module_typedefs.h',
],
},
],
'conditions': [
- ['enable_protobuf==1', {
- 'targets': [
- {
- 'target_name': 'acm_dump_proto',
- 'type': 'static_library',
- 'sources': ['dump.proto',],
- 'variables': {
- 'proto_in_dir': '.',
- # Workaround to protect against gyp's pathname relativization when
- # this file is included by modules.gyp.
- 'proto_out_protected': 'webrtc/audio_coding',
- 'proto_out_dir': '<(proto_out_protected)',
- },
- 'includes': ['../../../../build/protoc.gypi',],
- },
- ]
- }],
['include_tests==1', {
'targets': [
{
@@ -127,10 +108,10 @@
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'sources': [
- 'acm_receive_test.cc',
- 'acm_receive_test.h',
- 'acm_receive_test_oldapi.cc',
- 'acm_receive_test_oldapi.h',
+ 'acm2/acm_receive_test.cc',
+ 'acm2/acm_receive_test.h',
+ 'acm2/acm_receive_test_oldapi.cc',
+ 'acm2/acm_receive_test_oldapi.h',
],
}, # acm_receive_test
{
@@ -146,10 +127,10 @@
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'sources': [
- 'acm_send_test.cc',
- 'acm_send_test.h',
- 'acm_send_test_oldapi.cc',
- 'acm_send_test_oldapi.h',
+ 'acm2/acm_send_test.cc',
+ 'acm2/acm_send_test.h',
+ 'acm2/acm_send_test_oldapi.cc',
+ 'acm2/acm_send_test_oldapi.h',
],
}, # acm_send_test
{
@@ -165,10 +146,10 @@
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
],
'sources': [
- '../test/delay_test.cc',
- '../test/Channel.cc',
- '../test/PCMFile.cc',
- '../test/utility.cc',
+ 'test/delay_test.cc',
+ 'test/Channel.cc',
+ 'test/PCMFile.cc',
+ 'test/utility.cc',
],
}, # delay_test
{
@@ -184,9 +165,9 @@
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
],
'sources': [
- '../test/insert_packet_with_timing.cc',
- '../test/Channel.cc',
- '../test/PCMFile.cc',
+ 'test/insert_packet_with_timing.cc',
+ 'test/Channel.cc',
+ 'test/PCMFile.cc',
],
}, # delay_test
],
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h b/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h
index b7d9a91b2f8..d0b7b03c43c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module.h
@@ -26,9 +26,10 @@ namespace webrtc {
// forward declarations
struct CodecInst;
struct WebRtcRTPHeader;
+class AudioDecoder;
+class AudioEncoder;
class AudioFrame;
class RTPFragmentationHeader;
-class AudioEncoderMutable;
#define WEBRTC_10MS_PCM_AUDIO 960 // 16 bits super wideband 48 kHz
@@ -45,15 +46,6 @@ class AudioPacketizationCallback {
const RTPFragmentationHeader* fragmentation) = 0;
};
-// Callback class used for inband Dtmf detection
-class AudioCodingFeedback {
- public:
- virtual ~AudioCodingFeedback() {}
-
- virtual int32_t IncomingDtmf(const uint8_t digit_dtmf,
- const bool end) = 0;
-};
-
// Callback class used for reporting VAD decision
class ACMVADCallback {
public:
@@ -62,29 +54,13 @@ class ACMVADCallback {
virtual int32_t InFrameType(FrameType frame_type) = 0;
};
-// Callback class used for reporting receiver statistics
-class ACMVQMonCallback {
- public:
- virtual ~ACMVQMonCallback() {}
-
- virtual int32_t NetEqStatistics(
- const int32_t id, // current ACM id
- const uint16_t MIUsValid, // valid voice duration in ms
- const uint16_t MIUsReplaced, // concealed voice duration in ms
- const uint8_t eventFlags, // concealed voice flags
- const uint16_t delayMS) = 0; // average delay in ms
-};
-
class AudioCodingModule {
protected:
AudioCodingModule() {}
public:
struct Config {
- Config()
- : id(0),
- neteq_config(),
- clock(Clock::GetRealTimeClock()) {}
+ Config() : id(0), neteq_config(), clock(Clock::GetRealTimeClock()) {}
int id;
NetEq::Config neteq_config;
@@ -101,7 +77,7 @@ class AudioCodingModule {
static AudioCodingModule* Create(int id);
static AudioCodingModule* Create(int id, Clock* clock);
static AudioCodingModule* Create(const Config& config);
- virtual ~AudioCodingModule() {};
+ virtual ~AudioCodingModule() = default;
///////////////////////////////////////////////////////////////////////////
// Utility functions
@@ -193,17 +169,6 @@ class AudioCodingModule {
//
///////////////////////////////////////////////////////////////////////////
- // int32_t ResetEncoder()
- // This API resets the states of encoder. All the encoder settings, such as
- // send-codec or VAD/DTX, will be preserved.
- //
- // Return value:
- // -1 if failed to initialize,
- // 0 if succeeded.
- //
- virtual int32_t ResetEncoder() = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t RegisterSendCodec()
// Registers a codec, specified by |send_codec|, as sending codec.
// This API can be called multiple of times to register Codec. The last codec
@@ -235,7 +200,7 @@ class AudioCodingModule {
// Registers |external_speech_encoder| as encoder. The new encoder will
// replace any previously registered speech encoder (internal or external).
virtual void RegisterExternalSendCodec(
- AudioEncoderMutable* external_speech_encoder) = 0;
+ AudioEncoder* external_speech_encoder) = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t SendCodec()
@@ -261,38 +226,10 @@ class AudioCodingModule {
virtual int32_t SendFrequency() const = 0;
///////////////////////////////////////////////////////////////////////////
- // int32_t Bitrate()
- // Get encoding bit-rate in bits per second.
- //
- // Return value:
- // positive; encoding rate in bits/sec,
- // -1 if an error is happened.
- //
- virtual int32_t SendBitrate() const = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// Sets the bitrate to the specified value in bits/sec. If the value is not
// supported by the codec, it will choose another appropriate value.
virtual void SetBitRate(int bitrate_bps) = 0;
- ///////////////////////////////////////////////////////////////////////////
- // int32_t SetReceivedEstimatedBandwidth()
- // Set available bandwidth [bits/sec] of the up-link channel.
- // This information is used for traffic shaping, and is currently only
- // supported if iSAC is the send codec.
- //
- // Input:
- // -bw : bandwidth in bits/sec estimated for
- // up-link.
- // Return value
- // -1 if error occurred in setting the bandwidth,
- // 0 bandwidth is set successfully.
- //
- // TODO(henrik.lundin) Unused. Remove?
- virtual int32_t SetReceivedEstimatedBandwidth(
- const int32_t bw) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t RegisterTransportCallback()
// Register a transport callback which will be called to deliver
// the encoded buffers whenever Process() is called and a
@@ -465,39 +402,6 @@ class AudioCodingModule {
ACMVADMode* vad_mode) const = 0;
///////////////////////////////////////////////////////////////////////////
- // int32_t ReplaceInternalDTXWithWebRtc()
- // Used to replace codec internal DTX scheme with WebRtc.
- //
- // Input:
- // -use_webrtc_dtx : if false (default) the codec built-in DTX/VAD
- // scheme is used, otherwise the internal DTX is
- // replaced with WebRtc DTX/VAD.
- //
- // Return value:
- // -1 if failed to replace codec internal DTX with WebRtc,
- // 0 if succeeded.
- //
- virtual int32_t ReplaceInternalDTXWithWebRtc(
- const bool use_webrtc_dtx = false) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // int32_t IsInternalDTXReplacedWithWebRtc()
- // Get status if the codec internal DTX is replaced with WebRtc DTX.
- // This should always be true if codec does not have an internal DTX.
- //
- // Output:
- // -uses_webrtc_dtx : is set to true if the codec internal DTX is
- // replaced with WebRtc DTX/VAD, otherwise it is set
- // to false.
- //
- // Return value:
- // -1 if failed to determine if codec internal DTX is replaced with WebRtc,
- // 0 if succeeded.
- //
- virtual int32_t IsInternalDTXReplacedWithWebRtc(
- bool* uses_webrtc_dtx) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t RegisterVADCallback()
// Call this method to register a callback function which is called
// any time that ACM encounters an empty frame. That is a frame which is
@@ -533,17 +437,6 @@ class AudioCodingModule {
virtual int32_t InitializeReceiver() = 0;
///////////////////////////////////////////////////////////////////////////
- // int32_t ResetDecoder()
- // This API resets the states of decoders. ACM will not lose any
- // decoder-related settings, such as registered codecs.
- //
- // Return value:
- // -1 if failed to initialize,
- // 0 if succeeded.
- //
- virtual int32_t ResetDecoder() = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t ReceiveFrequency()
// Get sampling frequency of the last received payload.
//
@@ -576,8 +469,12 @@ class AudioCodingModule {
// -1 if failed to register the codec
// 0 if the codec registered successfully.
//
- virtual int32_t RegisterReceiveCodec(
- const CodecInst& receive_codec) = 0;
+ virtual int RegisterReceiveCodec(const CodecInst& receive_codec) = 0;
+
+ virtual int RegisterExternalReceiveCodec(int rtp_payload_type,
+ AudioDecoder* external_decoder,
+ int sample_rate_hz,
+ int num_channels) = 0;
///////////////////////////////////////////////////////////////////////////
// int32_t UnregisterReceiveCodec()
@@ -693,31 +590,6 @@ class AudioCodingModule {
virtual int LeastRequiredDelayMs() const = 0;
///////////////////////////////////////////////////////////////////////////
- // int32_t SetDtmfPlayoutStatus()
- // Configure DTMF playout, i.e. whether out-of-band
- // DTMF tones are played or not.
- //
- // Input:
- // -enable : if true to enable playout out-of-band DTMF tones,
- // false to disable.
- //
- // Return value:
- // -1 if the method fails, e.g. DTMF playout is not supported.
- // 0 if the status is set successfully.
- //
- virtual int32_t SetDtmfPlayoutStatus(const bool enable) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // bool DtmfPlayoutStatus()
- // Get Dtmf playout status.
- //
- // Return value:
- // true if out-of-band Dtmf tones are played,
- // false if playout of Dtmf tones is disabled.
- //
- virtual bool DtmfPlayoutStatus() const = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t PlayoutTimestamp()
// The send timestamp of an RTP packet is associated with the decoded
// audio of the packet in question. This function returns the timestamp of
@@ -734,56 +606,6 @@ class AudioCodingModule {
virtual int32_t PlayoutTimestamp(uint32_t* timestamp) = 0;
///////////////////////////////////////////////////////////////////////////
- // int32_t DecoderEstimatedBandwidth()
- // Get the estimate of the Bandwidth, in bits/second, based on the incoming
- // stream. This API is useful in one-way communication scenarios, where
- // the bandwidth information is sent in an out-of-band fashion.
- // Currently only supported if iSAC is registered as a receiver.
- //
- // Return value:
- // >0 bandwidth in bits/second.
- // -1 if failed to get a bandwidth estimate.
- //
- virtual int32_t DecoderEstimatedBandwidth() const = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // int32_t SetPlayoutMode()
- // Call this API to set the playout mode. Playout mode could be optimized
- // for i) voice, ii) FAX or iii) streaming. In Voice mode, NetEQ is
- // optimized to deliver highest audio quality while maintaining a minimum
- // delay. In FAX mode, NetEQ is optimized to have few delay changes as
- // possible and maintain a constant delay, perhaps large relative to voice
- // mode, to avoid PLC. In streaming mode, we tolerate a little more delay
- // to achieve better jitter robustness.
- //
- // Input:
- // -mode : playout mode. Possible inputs are:
- // "voice",
- // "fax" and
- // "streaming".
- //
- // Return value:
- // -1 if failed to set the mode,
- // 0 if succeeding.
- //
- virtual int32_t SetPlayoutMode(const AudioPlayoutMode mode) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // AudioPlayoutMode PlayoutMode()
- // Get playout mode, i.e. whether it is speech, FAX or streaming. See
- // audio_coding_module_typedefs.h for definition of AudioPlayoutMode.
- //
- // Return value:
- // voice: is for voice output,
- // fax: a mode that is optimized for receiving FAX signals.
- // In this mode NetEq tries to maintain a constant high
- // delay to avoid PLC if possible.
- // streaming: a mode that is suitable for streaming. In this mode we
- // accept longer delay to improve jitter robustness.
- //
- virtual AudioPlayoutMode PlayoutMode() const = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int32_t PlayoutData10Ms(
// Get 10 milliseconds of raw audio data for playout, at the given sampling
// frequency. ACM will perform a resampling if required.
@@ -811,69 +633,6 @@ class AudioCodingModule {
//
///////////////////////////////////////////////////////////////////////////
- // int32_t SetISACMaxRate()
- // Set the maximum instantaneous rate of iSAC. For a payload of B bits
- // with a frame-size of T sec the instantaneous rate is B/T bits per
- // second. Therefore, (B/T < |max_rate_bps|) and
- // (B < |max_payload_len_bytes| * 8) are always satisfied for iSAC payloads,
- // c.f SetISACMaxPayloadSize().
- //
- // Input:
- // -max_rate_bps : maximum instantaneous bit-rate given in bits/sec.
- //
- // Return value:
- // -1 if failed to set the maximum rate.
- // 0 if the maximum rate is set successfully.
- //
- virtual int SetISACMaxRate(int max_rate_bps) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // int32_t SetISACMaxPayloadSize()
- // Set the maximum payload size of iSAC packets. No iSAC payload,
- // regardless of its frame-size, may exceed the given limit. For
- // an iSAC payload of size B bits and frame-size T seconds we have;
- // (B < |max_payload_len_bytes| * 8) and (B/T < |max_rate_bps|), c.f.
- // SetISACMaxRate().
- //
- // Input:
- // -max_payload_len_bytes : maximum payload size in bytes.
- //
- // Return value:
- // -1 if failed to set the maximum payload-size.
- // 0 if the given length is set successfully.
- //
- virtual int SetISACMaxPayloadSize(int max_payload_len_bytes) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // int32_t ConfigISACBandwidthEstimator()
- // Call this function to configure the bandwidth estimator of ISAC.
- // During the adaptation of bit-rate, iSAC automatically adjusts the
- // frame-size (either 30 or 60 ms) to save on RTP header. The initial
- // frame-size can be specified by the first argument. The configuration also
- // regards the initial estimate of bandwidths. The estimator starts from
- // this point and converges to the actual bottleneck. This is given by the
- // second parameter. Furthermore, it is also possible to control the
- // adaptation of frame-size. This is specified by the last parameter.
- //
- // Input:
- // -init_frame_size_ms : initial frame-size in milliseconds. For iSAC-wb
- // 30 ms and 60 ms (default) are acceptable values,
- // and for iSAC-swb 30 ms is the only acceptable
- // value. Zero indicates default value.
- // -init_rate_bps : initial estimate of the bandwidth. Values
- // between 10000 and 58000 are acceptable.
- // -enforce_srame_size : if true, the frame-size will not be adapted.
- //
- // Return value:
- // -1 if failed to configure the bandwidth estimator,
- // 0 if the configuration was successfully applied.
- //
- virtual int32_t ConfigISACBandwidthEstimator(
- int init_frame_size_ms,
- int init_rate_bps,
- bool enforce_frame_size = false) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
// int SetOpusApplication()
// Sets the intended application if current send codec is Opus. Opus uses this
// to optimize the encoding for applications like VOIP and music. Currently,
@@ -1014,7 +773,6 @@ class AudioCoding {
Clock* clock;
AudioPacketizationCallback* transport;
ACMVADCallback* vad_callback;
- bool play_dtmf;
int initial_playout_delay_ms;
int playout_channels;
int playout_frequency_hz;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h b/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h
index ee7a2f1340b..22b6e7800f3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h
@@ -19,60 +19,6 @@
namespace webrtc {
///////////////////////////////////////////////////////////////////////////
-// enum AudioPlayoutMode
-// An enumerator for different playout modes.
-//
-// -voice : This is the standard mode for VoIP calls. The trade-off
-// between low delay and jitter robustness is optimized
-// for high-quality two-way communication.
-// NetEQs packet loss concealment and signal processing
-// capabilities are fully employed.
-// -fax : The fax mode is optimized for decodability of fax signals
-// rather than for perceived audio quality. When this mode
-// is selected, NetEQ will do as few delay changes as possible,
-// trying to maintain a high and constant delay. Meanwhile,
-// the packet loss concealment efforts are reduced.
-//
-// -streaming : In the case of one-way communication such as passive
-// conference participant, a webinar, or a streaming application,
-// this mode can be used to improve the jitter robustness at
-// the cost of increased delay.
-// -off : Turns off most of NetEQ's features. Stuffs zeros for lost
-// packets and during buffer increases.
-//
-enum AudioPlayoutMode {
- voice = 0,
- fax = 1,
- streaming = 2,
- off = 3,
-};
-
-///////////////////////////////////////////////////////////////////////////
-// enum ACMSpeechType
-// An enumerator for possible labels of a decoded frame.
-//
-// -normal : a normal speech frame. If VAD is enabled on the
-// incoming stream this label indicate that the
-// frame is active.
-// -PLC : a PLC frame. The corresponding packet was lost
-// and this frame generated by PLC techniques.
-// -CNG : the frame is comfort noise. This happens if VAD
-// is enabled at the sender and we have received
-// SID.
-// -PLCCNG : PLC will fade to comfort noise if the duration
-// of PLC is long. This labels such a case.
-// -VADPassive : the VAD at the receiver recognizes this frame as
-// passive.
-//
-enum ACMSpeechType {
- normal = 0,
- PLC = 1,
- CNG = 2,
- PLCCNG = 3,
- VADPassive = 4
-};
-
-///////////////////////////////////////////////////////////////////////////
// enum ACMVADMode
// An enumerator for aggressiveness of VAD
// -VADNormal : least aggressive mode.
@@ -89,76 +35,6 @@ enum ACMVADMode {
};
///////////////////////////////////////////////////////////////////////////
-// enum ACMCountries
-// An enumerator for countries, used when enabling CPT for a specific country.
-//
-enum ACMCountries {
- ACMDisableCountryDetection = -1, // disable CPT detection
- ACMUSA = 0,
- ACMJapan,
- ACMCanada,
- ACMFrance,
- ACMGermany,
- ACMAustria,
- ACMBelgium,
- ACMUK,
- ACMCzech,
- ACMDenmark,
- ACMFinland,
- ACMGreece,
- ACMHungary,
- ACMIceland,
- ACMIreland,
- ACMItaly,
- ACMLuxembourg,
- ACMMexico,
- ACMNorway,
- ACMPoland,
- ACMPortugal,
- ACMSpain,
- ACMSweden,
- ACMTurkey,
- ACMChina,
- ACMHongkong,
- ACMTaiwan,
- ACMKorea,
- ACMSingapore,
- ACMNonStandard1
-// non-standard countries
-};
-
-///////////////////////////////////////////////////////////////////////////
-// enum ACMAMRPackingFormat
-// An enumerator for different bit-packing format of AMR codec according to
-// RFC 3267.
-//
-// -AMRUndefined : undefined.
-// -AMRBandwidthEfficient : bandwidth-efficient mode.
-// -AMROctetAlligned : Octet-alligned mode.
-// -AMRFileStorage : file-storage mode.
-//
-enum ACMAMRPackingFormat {
- AMRUndefined = -1,
- AMRBandwidthEfficient = 0,
- AMROctetAlligned = 1,
- AMRFileStorage = 2
-};
-
-///////////////////////////////////////////////////////////////////////////
-//
-// Enumeration of background noise mode a mapping from NetEQ interface.
-//
-// -On : default "normal" behavior with eternal noise
-// -Fade : noise fades to zero after some time
-// -Off : background noise is always zero
-//
-enum ACMBackgroundNoiseMode {
- On,
- Fade,
- Off
-};
-
-///////////////////////////////////////////////////////////////////////////
//
// Enumeration of Opus mode for intended application.
//
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.cc
index 1cdf6c753ae..c84d3c076a4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.cc
@@ -86,7 +86,6 @@ APITest::APITest(const Config& config)
_dotMoveDirectionA(1),
_dotPositionB(39),
_dotMoveDirectionB(-1),
- _dtmfCallback(NULL),
_vadCallbackA(NULL),
_vadCallbackB(NULL),
_apiTestRWLock(*RWLockWrapper::CreateRWLock()),
@@ -125,7 +124,6 @@ APITest::~APITest() {
_inFileB.Close();
_outFileB.Close();
- DELETE_POINTER(_dtmfCallback);
DELETE_POINTER(_vadCallbackA);
DELETE_POINTER(_vadCallbackB);
@@ -290,9 +288,6 @@ int16_t APITest::SetUp() {
}
}
-#ifdef WEBRTC_DTMF_DETECTION
- _dtmfCallback = new DTMFDetector;
-#endif
_vadCallbackA = new VADCallback;
_vadCallbackB = new VADCallback;
@@ -429,7 +424,7 @@ void APITest::RunTest(char thread) {
{
WriteLockScoped cs(_apiTestRWLock);
if (thread == 'A') {
- _testNumA = (_testNumB + 1 + (rand() % 4)) % 5;
+ _testNumA = (_testNumB + 1 + (rand() % 3)) % 4;
testNum = _testNumA;
_movingDot[_dotPositionA] = ' ';
@@ -442,7 +437,7 @@ void APITest::RunTest(char thread) {
_dotPositionA += _dotMoveDirectionA;
_movingDot[_dotPositionA] = (_dotMoveDirectionA > 0) ? '>' : '<';
} else {
- _testNumB = (_testNumA + 1 + (rand() % 4)) % 5;
+ _testNumB = (_testNumA + 1 + (rand() % 3)) % 4;
testNum = _testNumB;
_movingDot[_dotPositionB] = ' ';
@@ -464,18 +459,15 @@ void APITest::RunTest(char thread) {
ChangeCodec('A');
break;
case 1:
- TestPlayout('B');
- break;
- case 2:
if (!_randomTest) {
fprintf(stdout, "\nTesting Delay ...\n");
}
TestDelay('A');
break;
- case 3:
+ case 2:
TestSendVAD('A');
break;
- case 4:
+ case 3:
TestRegisteration('A');
break;
default:
@@ -498,7 +490,6 @@ bool APITest::APIRunA() {
} else {
CurrentCodec('A');
ChangeCodec('A');
- TestPlayout('B');
if (_codecCntrA == 0) {
fprintf(stdout, "\nTesting Delay ...\n");
TestDelay('A');
@@ -927,67 +918,6 @@ void APITest::TestRegisteration(char sendSide) {
}
}
-// Playout Mode, background noise mode.
-// Receiver Frequency, playout frequency.
-void APITest::TestPlayout(char receiveSide) {
- AudioCodingModule* receiveACM;
- AudioPlayoutMode* playoutMode = NULL;
- switch (receiveSide) {
- case 'A': {
- receiveACM = _acmA.get();
- playoutMode = &_playoutModeA;
- break;
- }
- case 'B': {
- receiveACM = _acmB.get();
- playoutMode = &_playoutModeB;
- break;
- }
- default:
- receiveACM = _acmA.get();
- }
-
- int32_t receiveFreqHz = receiveACM->ReceiveFrequency();
- int32_t playoutFreqHz = receiveACM->PlayoutFrequency();
-
- CHECK_ERROR_MT(receiveFreqHz);
- CHECK_ERROR_MT(playoutFreqHz);
-
-
- char playoutString[25];
- switch (*playoutMode) {
- case voice: {
- *playoutMode = fax;
- strncpy(playoutString, "FAX", 25);
- break;
- }
- case fax: {
- *playoutMode = streaming;
- strncpy(playoutString, "Streaming", 25);
- break;
- }
- case streaming: {
- *playoutMode = voice;
- strncpy(playoutString, "Voice", 25);
- break;
- }
- default:
- *playoutMode = voice;
- strncpy(playoutString, "Voice", 25);
- }
- CHECK_ERROR_MT(receiveACM->SetPlayoutMode(*playoutMode));
- playoutString[24] = '\0';
-
- if (!_randomTest) {
- fprintf(stdout, "\n");
- fprintf(stdout, "In Side %c\n", receiveSide);
- fprintf(stdout, "---------------------------------\n");
- fprintf(stdout, "Receive Frequency....... %d Hz\n", receiveFreqHz);
- fprintf(stdout, "Playout Frequency....... %d Hz\n", playoutFreqHz);
- fprintf(stdout, "Audio Playout Mode...... %s\n", playoutString);
- }
-}
-
void APITest::TestSendVAD(char side) {
if (_randomTest) {
return;
@@ -1129,7 +1059,6 @@ void APITest::ChangeCodec(char side) {
myChannel = _channel_B2A;
}
- myACM->ResetEncoder();
Wait(100);
// Register the next codec
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.h
index c287243810b..67261dcbcdd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/APITest.h
@@ -141,9 +141,6 @@ class APITest : public ACMTest {
int32_t _minDelayB;
bool _payloadUsed[32];
- AudioPlayoutMode _playoutModeA;
- AudioPlayoutMode _playoutModeB;
-
bool _verbose;
int _dotPositionA;
@@ -153,7 +150,6 @@ class APITest : public ACMTest {
char _movingDot[41];
- DTMFDetector* _dtmfCallback;
VADCallback* _vadCallbackA;
VADCallback* _vadCallbackB;
RWLockWrapper& _apiTestRWLock;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.cc
index 4f46098a7bd..d0ae7830de4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.cc
@@ -150,8 +150,7 @@ void PCMFile::Write10MsData(AudioFrame& audio_frame) {
}
} else {
int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
- int k;
- for (k = 0; k < audio_frame.samples_per_channel_; k++) {
+ for (size_t k = 0; k < audio_frame.samples_per_channel_; k++) {
stereo_audio[k << 1] = audio_frame.data_[k];
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
}
@@ -173,7 +172,7 @@ void PCMFile::Write10MsData(AudioFrame& audio_frame) {
}
}
-void PCMFile::Write10MsData(int16_t* playout_buffer, uint16_t length_smpls) {
+void PCMFile::Write10MsData(int16_t* playout_buffer, size_t length_smpls) {
if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
length_smpls) {
return;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.h
index c4487b81332..8353898f037 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/PCMFile.h
@@ -36,7 +36,7 @@ class PCMFile {
int32_t Read10MsData(AudioFrame& audio_frame);
- void Write10MsData(int16_t *playout_buffer, uint16_t length_smpls);
+ void Write10MsData(int16_t *playout_buffer, size_t length_smpls);
void Write10MsData(AudioFrame& audio_frame);
uint16_t PayloadLength10Ms() const;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/SpatialAudio.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
index b28c510a566..134d9755e5d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/SpatialAudio.cc
@@ -159,13 +159,13 @@ void SpatialAudio::EncodeDecode(const double leftPanning,
while (!_inFile.EndOfFile()) {
_inFile.Read10MsData(audioFrame);
- for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * leftPanning + 0.5);
}
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
- for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
+ for (size_t n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * rightToLeftRatio + 0.5);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
index b1badb6363a..21d97f1abf1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
@@ -223,7 +223,6 @@ void TestAllCodecs::Perform() {
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
-#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
}
@@ -263,7 +262,6 @@ void TestAllCodecs::Perform() {
RegisterSendCodec('A', codec_l16, 32000, 512000, 640, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
-#endif
if (test_mode_ != 0) {
printf("===============================================================\n");
}
@@ -339,9 +337,6 @@ void TestAllCodecs::Perform() {
#ifndef WEBRTC_CODEC_ISACFX
printf(" ISAC fix\n");
#endif
-#ifndef WEBRTC_CODEC_PCM16
- printf(" PCM16\n");
-#endif
printf("\nTo complete the test, listen to the %d number of output files.\n",
test_count_);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestRedFec.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestRedFec.cc
index 6027a4d0e1a..7a512b44333 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestRedFec.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestRedFec.cc
@@ -37,11 +37,15 @@ namespace webrtc {
namespace {
const char kNameL16[] = "L16";
const char kNamePCMU[] = "PCMU";
+ const char kNameCN[] = "CN";
+ const char kNameRED[] = "RED";
+
+ // These three are only used by code #ifdeffed on WEBRTC_CODEC_G722.
+#ifdef WEBRTC_CODEC_G722
const char kNameISAC[] = "ISAC";
const char kNameG722[] = "G722";
const char kNameOPUS[] = "opus";
- const char kNameCN[] = "CN";
- const char kNameRED[] = "RED";
+#endif
}
TestRedFec::TestRedFec()
@@ -82,10 +86,6 @@ void TestRedFec::Perform() {
_acmA->RegisterTransportCallback(_channelA2B);
_channelA2B->RegisterReceiverACM(_acmB.get());
-#ifndef WEBRTC_CODEC_PCM16
- EXPECT_TRUE(false) << "PCM16 needs to be activated to run this test\n");
- return;
-#endif
EXPECT_EQ(0, RegisterSendCodec('A', kNameL16, 8000));
EXPECT_EQ(0, RegisterSendCodec('A', kNameCN, 8000));
EXPECT_EQ(0, RegisterSendCodec('A', kNameRED));
@@ -108,7 +108,7 @@ void TestRedFec::Perform() {
EXPECT_TRUE(false);
printf("G722 needs to be activated to run this test\n");
return;
-#endif
+#else
EXPECT_EQ(0, RegisterSendCodec('A', kNameG722, 16000));
EXPECT_EQ(0, RegisterSendCodec('A', kNameCN, 16000));
@@ -412,6 +412,8 @@ void TestRedFec::Perform() {
EXPECT_FALSE(_acmA->REDStatus());
EXPECT_EQ(0, _acmA->SetCodecFEC(false));
EXPECT_FALSE(_acmA->CodecFEC());
+
+#endif // defined(WEBRTC_CODEC_G722)
}
int32_t TestRedFec::SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.cc
index 1eb4a675fe5..32ecadfaceb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.cc
@@ -118,11 +118,9 @@ TestStereo::TestStereo(int test_mode)
#ifdef WEBRTC_CODEC_G722
, g722_pltype_(0)
#endif
-#ifdef WEBRTC_CODEC_PCM16
, l16_8khz_pltype_(-1)
, l16_16khz_pltype_(-1)
, l16_32khz_pltype_(-1)
-#endif
#ifdef PCMA_AND_PCMU
, pcma_pltype_(-1)
, pcmu_pltype_(-1)
@@ -247,7 +245,6 @@ void TestStereo::Perform() {
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
-#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
@@ -306,7 +303,6 @@ void TestStereo::Perform() {
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
-#endif
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===========================================================\n");
@@ -435,7 +431,6 @@ void TestStereo::Perform() {
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
-#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
@@ -470,7 +465,6 @@ void TestStereo::Perform() {
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
-#endif
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===============================================================\n");
@@ -538,7 +532,6 @@ void TestStereo::Perform() {
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
-#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
@@ -572,7 +565,6 @@ void TestStereo::Perform() {
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
-#endif
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===============================================================\n");
@@ -662,9 +654,7 @@ void TestStereo::Perform() {
#ifdef WEBRTC_CODEC_G722
printf(" G.722\n");
#endif
-#ifdef WEBRTC_CODEC_PCM16
printf(" PCM16\n");
-#endif
printf(" G.711\n");
#ifdef WEBRTC_CODEC_OPUS
printf(" Opus\n");
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.h
index c6412c79461..b56e9952724 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestStereo.h
@@ -100,11 +100,9 @@ class TestStereo : public ACMTest {
#ifdef WEBRTC_CODEC_G722
int g722_pltype_;
#endif
-#ifdef WEBRTC_CODEC_PCM16
int l16_8khz_pltype_;
int l16_16khz_pltype_;
int l16_32khz_pltype_;
-#endif
#ifdef PCMA_AND_PCMU
int pcma_pltype_;
int pcmu_pltype_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
index d18479993c5..0e42b9f8ae1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
@@ -137,7 +137,6 @@ void TestVadDtx::Run(std::string in_filename, int frequency, int channels,
TestWebRtcVadDtx::TestWebRtcVadDtx()
: vad_enabled_(false),
dtx_enabled_(false),
- use_webrtc_dtx_(false),
output_file_num_(0) {
}
@@ -191,7 +190,7 @@ void TestWebRtcVadDtx::RunTestCases() {
// Set the expectation and run the test.
void TestWebRtcVadDtx::Test(bool new_outfile) {
- int expects[] = {-1, 1, use_webrtc_dtx_, 0, 0};
+ int expects[] = {-1, 1, dtx_enabled_, 0, 0};
if (new_outfile) {
output_file_num_++;
}
@@ -219,17 +218,10 @@ void TestWebRtcVadDtx::SetVAD(bool enable_dtx, bool enable_vad,
EXPECT_EQ(dtx_enabled_ , enable_dtx); // DTX should be set as expected.
- bool replaced = false;
- acm_send_->IsInternalDTXReplacedWithWebRtc(&replaced);
-
- use_webrtc_dtx_ = dtx_enabled_ && replaced;
-
- if (use_webrtc_dtx_) {
+ if (dtx_enabled_) {
EXPECT_TRUE(vad_enabled_); // WebRTC DTX cannot run without WebRTC VAD.
- }
-
- if (!dtx_enabled_ || !use_webrtc_dtx_) {
- // Using no DTX or codec Internal DTX should not affect setting of VAD.
+ } else {
+ // Using no DTX should not affect setting of VAD.
EXPECT_EQ(enable_vad, vad_enabled_);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.h
index b664a9b4d40..8ef4228bd7d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TestVADDTX.h
@@ -88,7 +88,6 @@ class TestWebRtcVadDtx final : public TestVadDtx {
bool vad_enabled_;
bool dtx_enabled_;
- bool use_webrtc_dtx_;
int output_file_num_;
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/Tester.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/Tester.cc
index 22510f34161..b6850fabba2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/Tester.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/Tester.cc
@@ -50,7 +50,13 @@ TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestEncodeDecode)) {
Trace::ReturnTrace();
}
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestRedFec)) {
+#ifdef WEBRTC_CODEC_RED
+#define IF_RED(x) x
+#else
+#define IF_RED(x) DISABLED_##x
+#endif
+
+TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(IF_RED(TestRedFec))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_fec_trace.txt").c_str());
@@ -58,7 +64,13 @@ TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestRedFec)) {
Trace::ReturnTrace();
}
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestIsac)) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(IF_ISAC(TestIsac))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_isac_trace.txt").c_str());
@@ -66,7 +78,15 @@ TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestIsac)) {
Trace::ReturnTrace();
}
-TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TwoWayCommunication)) {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) && \
+ defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#define IF_ALL_CODECS(x) x
+#else
+#define IF_ALL_CODECS(x) DISABLED_##x
+#endif
+
+TEST(AudioCodingModuleTest,
+ DISABLED_ON_ANDROID(IF_ALL_CODECS(TwoWayCommunication))) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_twowaycom_trace.txt").c_str());
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc
index 1014fc9d0ab..00853aead70 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/TwoWayCommunication.cc
@@ -32,10 +32,16 @@ namespace webrtc {
TwoWayCommunication::TwoWayCommunication(int testMode)
: _acmA(AudioCodingModule::Create(1)),
- _acmB(AudioCodingModule::Create(2)),
_acmRefA(AudioCodingModule::Create(3)),
- _acmRefB(AudioCodingModule::Create(4)),
- _testMode(testMode) {}
+ _testMode(testMode) {
+ AudioCodingModule::Config config;
+ // The clicks will be more obvious in FAX mode. TODO(henrik.lundin) Really?
+ config.neteq_config.playout_mode = kPlayoutFax;
+ config.id = 2;
+ _acmB.reset(AudioCodingModule::Create(config));
+ config.id = 4;
+ _acmRefB.reset(AudioCodingModule::Create(config));
+}
TwoWayCommunication::~TwoWayCommunication() {
delete _channel_A2B;
@@ -96,11 +102,6 @@ void TwoWayCommunication::SetUp() {
//--- Set A codecs
EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
-#ifdef WEBRTC_DTMF_DETECTION
- _dtmfDetectorA = new(DTMFDetector);
- EXPECT_GT(_acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA),
- -1);
-#endif
//--- Set ref-A codecs
EXPECT_EQ(0, _acmRefA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmRefA->RegisterReceiveCodec(codecInst_B));
@@ -108,11 +109,6 @@ void TwoWayCommunication::SetUp() {
//--- Set B codecs
EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(codecInst_A));
-#ifdef WEBRTC_DTMF_DETECTION
- _dtmfDetectorB = new(DTMFDetector);
- EXPECT_GT(_acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA),
- -1);
-#endif
//--- Set ref-B codecs
EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
@@ -169,11 +165,6 @@ void TwoWayCommunication::SetUp() {
_channelRef_B2A = new Channel;
_acmRefB->RegisterTransportCallback(_channelRef_B2A);
_channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
-
- // The clicks will be more obvious when we
- // are in FAX mode.
- EXPECT_EQ(_acmB->SetPlayoutMode(fax), 0);
- EXPECT_EQ(_acmRefB->SetPlayoutMode(fax), 0);
}
void TwoWayCommunication::SetUpAutotest() {
@@ -188,10 +179,6 @@ void TwoWayCommunication::SetUpAutotest() {
//--- Set A codecs
EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
-#ifdef WEBRTC_DTMF_DETECTION
- _dtmfDetectorA = new(DTMFDetector);
- EXPECT_EQ(0, _acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA));
-#endif
//--- Set ref-A codecs
EXPECT_GT(_acmRefA->RegisterSendCodec(codecInst_A), -1);
@@ -200,10 +187,6 @@ void TwoWayCommunication::SetUpAutotest() {
//--- Set B codecs
EXPECT_GT(_acmB->RegisterSendCodec(codecInst_B), -1);
EXPECT_GT(_acmB->RegisterReceiveCodec(codecInst_A), -1);
-#ifdef WEBRTC_DTMF_DETECTION
- _dtmfDetectorB = new(DTMFDetector);
- EXPECT_EQ(0, _acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA));
-#endif
//--- Set ref-B codecs
EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
@@ -251,11 +234,6 @@ void TwoWayCommunication::SetUpAutotest() {
_channelRef_B2A = new Channel;
_acmRefB->RegisterTransportCallback(_channelRef_B2A);
_channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
-
- // The clicks will be more obvious when we
- // are in FAX mode.
- EXPECT_EQ(0, _acmB->SetPlayoutMode(fax));
- EXPECT_EQ(0, _acmRefB->SetPlayoutMode(fax));
}
void TwoWayCommunication::Perform() {
@@ -279,8 +257,8 @@ void TwoWayCommunication::Perform() {
// In the following loop we tests that the code can handle misuse of the APIs.
// In the middle of a session with data flowing between two sides, called A
- // and B, APIs will be called, like ResetEncoder(), and the code should
- // continue to run, and be able to recover.
+ // and B, APIs will be called, and the code should continue to run, and be
+ // able to recover.
while (!_inFileA.EndOfFile() && !_inFileB.EndOfFile()) {
msecPassed += 10;
EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
@@ -305,21 +283,14 @@ void TwoWayCommunication::Perform() {
msecPassed = 0;
secPassed++;
}
- // Call RestEncoder for ACM on side A, and InitializeSender for ACM on
- // side B.
- if (((secPassed % 5) == 4) && (msecPassed == 0)) {
- EXPECT_EQ(0, _acmA->ResetEncoder());
- }
// Re-register send codec on side B.
if (((secPassed % 5) == 4) && (msecPassed >= 990)) {
EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmB->SendCodec(&dummy));
}
- // Reset decoder on side B, and initialize receiver on side A.
- if (((secPassed % 7) == 6) && (msecPassed == 0)) {
- EXPECT_EQ(0, _acmB->ResetDecoder());
+ // Initialize receiver on side A.
+ if (((secPassed % 7) == 6) && (msecPassed == 0))
EXPECT_EQ(0, _acmA->InitializeReceiver());
- }
// Re-register codec on side A.
if (((secPassed % 7) == 6) && (msecPassed >= 990)) {
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.cc
index 2469d1744df..bd796d1ce70 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.cc
@@ -35,8 +35,6 @@ namespace webrtc {
void SetISACConfigDefault(ACMTestISACConfig& isacConfig) {
isacConfig.currentRateBitPerSec = 0;
isacConfig.currentFrameSizeMsec = 0;
- isacConfig.maxRateBitPerSec = 0;
- isacConfig.maxPayloadSizeByte = 0;
isacConfig.encodingMode = -1;
isacConfig.initRateBitPerSec = 0;
isacConfig.initFrameSizeInMsec = 0;
@@ -67,22 +65,6 @@ int16_t SetISAConfig(ACMTestISACConfig& isacConfig, AudioCodingModule* acm,
}
}
- if (isacConfig.maxRateBitPerSec > 0) {
- // Set max rate.
- EXPECT_EQ(0, acm->SetISACMaxRate(isacConfig.maxRateBitPerSec));
- }
- if (isacConfig.maxPayloadSizeByte > 0) {
- // Set max payload size.
- EXPECT_EQ(0, acm->SetISACMaxPayloadSize(isacConfig.maxPayloadSizeByte));
- }
- if ((isacConfig.initFrameSizeInMsec != 0)
- || (isacConfig.initRateBitPerSec != 0)) {
- EXPECT_EQ(0, acm->ConfigISACBandwidthEstimator(
- static_cast<uint8_t>(isacConfig.initFrameSizeInMsec),
- static_cast<uint16_t>(isacConfig.initRateBitPerSec),
- isacConfig.enforceFrameSize));
- }
-
return 0;
}
@@ -200,41 +182,6 @@ void ISACTest::Perform() {
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
- int user_input;
- if ((_testMode == 0) || (_testMode == 1)) {
- swbISACConfig.maxPayloadSizeByte = static_cast<uint16_t>(200);
- wbISACConfig.maxPayloadSizeByte = static_cast<uint16_t>(200);
- } else {
- printf("Enter the max payload-size for side A: ");
- CHECK_ERROR(scanf("%d", &user_input));
- swbISACConfig.maxPayloadSizeByte = (uint16_t) user_input;
- printf("Enter the max payload-size for side B: ");
- CHECK_ERROR(scanf("%d", &user_input));
- wbISACConfig.maxPayloadSizeByte = (uint16_t) user_input;
- }
- testNr++;
- EncodeDecode(testNr, wbISACConfig, swbISACConfig);
-
- _acmA->ResetEncoder();
- _acmB->ResetEncoder();
- SetISACConfigDefault(wbISACConfig);
- SetISACConfigDefault(swbISACConfig);
-
- if ((_testMode == 0) || (_testMode == 1)) {
- swbISACConfig.maxRateBitPerSec = static_cast<uint32_t>(48000);
- wbISACConfig.maxRateBitPerSec = static_cast<uint32_t>(48000);
- } else {
- printf("Enter the max rate for side A: ");
- CHECK_ERROR(scanf("%d", &user_input));
- swbISACConfig.maxRateBitPerSec = (uint32_t) user_input;
- printf("Enter the max rate for side B: ");
- CHECK_ERROR(scanf("%d", &user_input));
- wbISACConfig.maxRateBitPerSec = (uint32_t) user_input;
- }
-
- testNr++;
- EncodeDecode(testNr, wbISACConfig, swbISACConfig);
-
testNr++;
if (_testMode == 0) {
SwitchingSamplingRate(testNr, 4);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.h
index f4223f75128..8f892d907b1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/iSACTest.h
@@ -29,8 +29,6 @@ namespace webrtc {
struct ACMTestISACConfig {
int32_t currentRateBitPerSec;
int16_t currentFrameSizeMsec;
- uint32_t maxRateBitPerSec;
- int16_t maxPayloadSizeByte;
int16_t encodingMode;
uint32_t initRateBitPerSec;
int16_t initFrameSizeInMsec;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
index ffbbc8c5d13..0bac4013f53 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/initial_delay_unittest.cc
@@ -32,9 +32,9 @@ namespace webrtc {
namespace {
double FrameRms(AudioFrame& frame) {
- int samples = frame.num_channels_ * frame.samples_per_channel_;
+ size_t samples = frame.num_channels_ * frame.samples_per_channel_;
double rms = 0;
- for (int n = 0; n < samples; ++n)
+ for (size_t n = 0; n < samples; ++n)
rms += frame.data_[n] * frame.data_[n];
rms /= samples;
rms = sqrt(rms);
@@ -132,9 +132,9 @@ class InitialPlayoutDelayTest : public ::testing::Test {
in_audio_frame.sample_rate_hz_ = codec.plfreq;
in_audio_frame.num_channels_ = codec.channels;
in_audio_frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
- int samples = in_audio_frame.num_channels_ *
+ size_t samples = in_audio_frame.num_channels_ *
in_audio_frame.samples_per_channel_;
- for (int n = 0; n < samples; ++n) {
+ for (size_t n = 0; n < samples; ++n) {
in_audio_frame.data_[n] = kAmp;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.cc
index c61d25ad19a..d6482dd4479 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.cc
@@ -84,8 +84,8 @@ void OpusTest::Perform() {
// Create Opus decoders for mono and stereo for stand-alone testing of Opus.
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1), -1);
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2), -1);
- ASSERT_GT(WebRtcOpus_DecoderInit(opus_mono_decoder_), -1);
- ASSERT_GT(WebRtcOpus_DecoderInit(opus_stereo_decoder_), -1);
+ WebRtcOpus_DecoderInit(opus_mono_decoder_);
+ WebRtcOpus_DecoderInit(opus_stereo_decoder_);
ASSERT_TRUE(acm_receiver_.get() != NULL);
EXPECT_EQ(0, acm_receiver_->InitializeReceiver());
@@ -270,14 +270,14 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
if (loop_encode > 0) {
const int kMaxBytes = 1000; // Maximum number of bytes for one packet.
- int16_t bitstream_len_byte;
+ size_t bitstream_len_byte;
uint8_t bitstream[kMaxBytes];
for (int i = 0; i < loop_encode; i++) {
int bitstream_len_byte_int = WebRtcOpus_Encode(
(channels == 1) ? opus_mono_encoder_ : opus_stereo_encoder_,
&audio[read_samples], frame_length, kMaxBytes, bitstream);
ASSERT_GE(bitstream_len_byte_int, 0);
- bitstream_len_byte = static_cast<int16_t>(bitstream_len_byte_int);
+ bitstream_len_byte = static_cast<size_t>(bitstream_len_byte_int);
// Simulate packet loss by setting |packet_loss_| to "true" in
// |percent_loss| percent of the loops.
@@ -341,7 +341,8 @@ void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
// Write stand-alone speech to file.
- out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
+ out_file_standalone_.Write10MsData(
+ out_audio, static_cast<size_t>(decoded_samples) * channels);
if (audio_frame.timestamp_ > start_time_stamp) {
// Number of channels should be the same for both stand-alone and
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.h
index 4c3d8c160e6..63945ccd285 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/opus_test.h
@@ -14,6 +14,7 @@
#include <math.h>
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.cc b/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.cc
index e4e6dd4a351..949ca617b24 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.cc
@@ -279,32 +279,6 @@ bool FixedPayloadTypeCodec(const char* payloadName) {
return false;
}
-DTMFDetector::DTMFDetector() {
- for (int16_t n = 0; n < 1000; n++) {
- _toneCntr[n] = 0;
- }
-}
-
-DTMFDetector::~DTMFDetector() {
-}
-
-int32_t DTMFDetector::IncomingDtmf(const uint8_t digitDtmf,
- const bool /* toneEnded */) {
- fprintf(stdout, "%d-", digitDtmf);
- _toneCntr[digitDtmf]++;
- return 0;
-}
-
-void DTMFDetector::PrintDetectedDigits() {
- for (int16_t n = 0; n < 1000; n++) {
- if (_toneCntr[n] > 0) {
- fprintf(stdout, "%d %u msec, \n", n, _toneCntr[n] * 10);
- }
- }
- fprintf(stdout, "\n");
- return;
-}
-
void VADCallback::Reset() {
memset(_numFrameTypes, 0, sizeof(_numFrameTypes));
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.h b/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.h
index eccb68f6d1e..6998538dddb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/main/test/utility.h
@@ -115,19 +115,6 @@ void PrintCodecs();
bool FixedPayloadTypeCodec(const char* payloadName);
-class DTMFDetector : public AudioCodingFeedback {
- public:
- DTMFDetector();
- ~DTMFDetector();
- // used for inband DTMF detection
- int32_t IncomingDtmf(const uint8_t digitDtmf, const bool toneEnded);
- void PrintDetectedDigits();
-
- private:
- uint32_t _toneCntr[1000];
-
-};
-
class VADCallback : public ACMVADCallback {
public:
VADCallback();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.cc
index ad7423810df..1c36fa8c612 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.cc
@@ -18,11 +18,11 @@ Accelerate::ReturnCodes Accelerate::Process(const int16_t* input,
size_t input_length,
bool fast_accelerate,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
// Input length must be (almost) 30 ms.
- static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
- if (num_channels_ == 0 || static_cast<int>(input_length) / num_channels_ <
- (2 * k15ms - 1) * fs_mult_) {
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ if (num_channels_ == 0 ||
+ input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_) {
// Length of input data too short to do accelerate. Simply move all data
// from input to output.
output->PushBackInterleaved(input, input_length);
@@ -34,7 +34,7 @@ Accelerate::ReturnCodes Accelerate::Process(const int16_t* input,
void Accelerate::SetParametersForPassiveSpeech(size_t /*len*/,
int16_t* best_correlation,
- int* /*peak_index*/) const {
+ size_t* /*peak_index*/) const {
// When the signal does not contain any active speech, the correlation does
// not matter. Simply set it to zero.
*best_correlation = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.h
index 684f74bb8c3..f66bc8ed341 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/accelerate.h
@@ -45,14 +45,14 @@ class Accelerate : public TimeStretch {
size_t input_length,
bool fast_accelerate,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech.
void SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation,
- int* peak_index) const override;
+ size_t* peak_index) const override;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching.
@@ -65,7 +65,7 @@ class Accelerate : public TimeStretch {
AudioMultiVector* output) const override;
private:
- DISALLOW_COPY_AND_ASSIGN(Accelerate);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Accelerate);
};
struct AccelerateFactory {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index 99ff95a2ec1..afad9b1d6b0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -11,398 +11,42 @@
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include <assert.h>
-#include <string.h> // memmove
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
-#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
#ifdef WEBRTC_CODEC_G722
-#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
+#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
#endif
#ifdef WEBRTC_CODEC_ILBC
-#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h"
#endif
#ifdef WEBRTC_CODEC_ISACFX
+#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
#endif
#ifdef WEBRTC_CODEC_ISAC
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
#endif
#ifdef WEBRTC_CODEC_OPUS
-#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
-#endif
-#ifdef WEBRTC_CODEC_PCM16
-#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
+#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h"
#endif
+#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
namespace webrtc {
-// PCMu
-
-int AudioDecoderPcmU::Init() {
- return 0;
-}
-size_t AudioDecoderPcmU::Channels() const {
- return 1;
-}
-
-int AudioDecoderPcmU::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 8000);
- int16_t temp_type = 1; // Default is speech.
- int16_t ret = WebRtcG711_DecodeU(encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderPcmU::PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const {
- // One encoded byte per sample per channel.
- return static_cast<int>(encoded_len / Channels());
-}
-
-size_t AudioDecoderPcmUMultiCh::Channels() const {
- return channels_;
-}
-
-// PCMa
-
-int AudioDecoderPcmA::Init() {
- return 0;
-}
-size_t AudioDecoderPcmA::Channels() const {
- return 1;
-}
-
-int AudioDecoderPcmA::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 8000);
- int16_t temp_type = 1; // Default is speech.
- int16_t ret = WebRtcG711_DecodeA(encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderPcmA::PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const {
- // One encoded byte per sample per channel.
- return static_cast<int>(encoded_len / Channels());
-}
-
-size_t AudioDecoderPcmAMultiCh::Channels() const {
- return channels_;
-}
-
-// PCM16B
-#ifdef WEBRTC_CODEC_PCM16
-AudioDecoderPcm16B::AudioDecoderPcm16B() {}
-
-int AudioDecoderPcm16B::Init() {
- return 0;
-}
-size_t AudioDecoderPcm16B::Channels() const {
- return 1;
-}
-
-int AudioDecoderPcm16B::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK(sample_rate_hz == 8000 || sample_rate_hz == 16000 ||
- sample_rate_hz == 32000 || sample_rate_hz == 48000)
- << "Unsupported sample rate " << sample_rate_hz;
- int16_t ret =
- WebRtcPcm16b_Decode(encoded, static_cast<int16_t>(encoded_len), decoded);
- *speech_type = ConvertSpeechType(1);
- return ret;
-}
-
-int AudioDecoderPcm16B::PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const {
- // Two encoded byte per sample per channel.
- return static_cast<int>(encoded_len / (2 * Channels()));
-}
-
-AudioDecoderPcm16BMultiCh::AudioDecoderPcm16BMultiCh(int num_channels)
- : channels_(num_channels) {
- DCHECK(num_channels > 0);
-}
-
-size_t AudioDecoderPcm16BMultiCh::Channels() const {
- return channels_;
-}
-#endif
-
-// iLBC
-#ifdef WEBRTC_CODEC_ILBC
-AudioDecoderIlbc::AudioDecoderIlbc() {
- WebRtcIlbcfix_DecoderCreate(&dec_state_);
-}
-
-AudioDecoderIlbc::~AudioDecoderIlbc() {
- WebRtcIlbcfix_DecoderFree(dec_state_);
-}
-
-bool AudioDecoderIlbc::HasDecodePlc() const {
- return true;
-}
-
-int AudioDecoderIlbc::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 8000);
- int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcIlbcfix_Decode(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
- &temp_type);
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderIlbc::DecodePlc(int num_frames, int16_t* decoded) {
- return WebRtcIlbcfix_NetEqPlc(dec_state_, decoded, num_frames);
-}
-
-int AudioDecoderIlbc::Init() {
- return WebRtcIlbcfix_Decoderinit30Ms(dec_state_);
-}
-
-size_t AudioDecoderIlbc::Channels() const {
- return 1;
-}
-#endif
-
-// G.722
-#ifdef WEBRTC_CODEC_G722
-AudioDecoderG722::AudioDecoderG722() {
- WebRtcG722_CreateDecoder(&dec_state_);
-}
-
-AudioDecoderG722::~AudioDecoderG722() {
- WebRtcG722_FreeDecoder(dec_state_);
-}
-
-bool AudioDecoderG722::HasDecodePlc() const {
- return false;
-}
-
-int AudioDecoderG722::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 16000);
- int16_t temp_type = 1; // Default is speech.
- int16_t ret =
- WebRtcG722_Decode(dec_state_, encoded, static_cast<int16_t>(encoded_len),
- decoded, &temp_type);
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderG722::Init() {
- return WebRtcG722_DecoderInit(dec_state_);
-}
-
-int AudioDecoderG722::PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const {
- // 1/2 encoded byte per sample per channel.
- return static_cast<int>(2 * encoded_len / Channels());
-}
-
-size_t AudioDecoderG722::Channels() const {
- return 1;
-}
-
-AudioDecoderG722Stereo::AudioDecoderG722Stereo() {
- WebRtcG722_CreateDecoder(&dec_state_left_);
- WebRtcG722_CreateDecoder(&dec_state_right_);
-}
-
-AudioDecoderG722Stereo::~AudioDecoderG722Stereo() {
- WebRtcG722_FreeDecoder(dec_state_left_);
- WebRtcG722_FreeDecoder(dec_state_right_);
-}
-
-int AudioDecoderG722Stereo::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 16000);
- int16_t temp_type = 1; // Default is speech.
- // De-interleave the bit-stream into two separate payloads.
- uint8_t* encoded_deinterleaved = new uint8_t[encoded_len];
- SplitStereoPacket(encoded, encoded_len, encoded_deinterleaved);
- // Decode left and right.
- int16_t ret = WebRtcG722_Decode(dec_state_left_, encoded_deinterleaved,
- static_cast<int16_t>(encoded_len / 2),
- decoded, &temp_type);
- if (ret >= 0) {
- int decoded_len = ret;
- ret = WebRtcG722_Decode(dec_state_right_,
- &encoded_deinterleaved[encoded_len / 2],
- static_cast<int16_t>(encoded_len / 2),
- &decoded[decoded_len], &temp_type);
- if (ret == decoded_len) {
- decoded_len += ret;
- // Interleave output.
- for (int k = decoded_len / 2; k < decoded_len; k++) {
- int16_t temp = decoded[k];
- memmove(&decoded[2 * k - decoded_len + 2],
- &decoded[2 * k - decoded_len + 1],
- (decoded_len - k - 1) * sizeof(int16_t));
- decoded[2 * k - decoded_len + 1] = temp;
- }
- ret = decoded_len; // Return total number of samples.
- }
- }
- *speech_type = ConvertSpeechType(temp_type);
- delete [] encoded_deinterleaved;
- return ret;
-}
-
-size_t AudioDecoderG722Stereo::Channels() const {
- return 2;
-}
-
-int AudioDecoderG722Stereo::Init() {
- int r = WebRtcG722_DecoderInit(dec_state_left_);
- if (r != 0)
- return r;
- return WebRtcG722_DecoderInit(dec_state_right_);
-}
-
-// Split the stereo packet and place left and right channel after each other
-// in the output array.
-void AudioDecoderG722Stereo::SplitStereoPacket(const uint8_t* encoded,
- size_t encoded_len,
- uint8_t* encoded_deinterleaved) {
- assert(encoded);
- // Regroup the 4 bits/sample so |l1 l2| |r1 r2| |l3 l4| |r3 r4| ...,
- // where "lx" is 4 bits representing left sample number x, and "rx" right
- // sample. Two samples fit in one byte, represented with |...|.
- for (size_t i = 0; i + 1 < encoded_len; i += 2) {
- uint8_t right_byte = ((encoded[i] & 0x0F) << 4) + (encoded[i + 1] & 0x0F);
- encoded_deinterleaved[i] = (encoded[i] & 0xF0) + (encoded[i + 1] >> 4);
- encoded_deinterleaved[i + 1] = right_byte;
- }
-
- // Move one byte representing right channel each loop, and place it at the
- // end of the bytestream vector. After looping the data is reordered to:
- // |l1 l2| |l3 l4| ... |l(N-1) lN| |r1 r2| |r3 r4| ... |r(N-1) r(N)|,
- // where N is the total number of samples.
- for (size_t i = 0; i < encoded_len / 2; i++) {
- uint8_t right_byte = encoded_deinterleaved[i + 1];
- memmove(&encoded_deinterleaved[i + 1], &encoded_deinterleaved[i + 2],
- encoded_len - i - 2);
- encoded_deinterleaved[encoded_len - 1] = right_byte;
- }
-}
-#endif
-
-// Opus
-#ifdef WEBRTC_CODEC_OPUS
-AudioDecoderOpus::AudioDecoderOpus(int num_channels) : channels_(num_channels) {
- DCHECK(num_channels == 1 || num_channels == 2);
- WebRtcOpus_DecoderCreate(&dec_state_, static_cast<int>(channels_));
-}
-
-AudioDecoderOpus::~AudioDecoderOpus() {
- WebRtcOpus_DecoderFree(dec_state_);
-}
-
-int AudioDecoderOpus::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- DCHECK_EQ(sample_rate_hz, 48000);
- int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcOpus_Decode(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
- &temp_type);
- if (ret > 0)
- ret *= static_cast<int>(channels_); // Return total number of samples.
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderOpus::DecodeRedundantInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- if (!PacketHasFec(encoded, encoded_len)) {
- // This packet is a RED packet.
- return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
- speech_type);
- }
-
- DCHECK_EQ(sample_rate_hz, 48000);
- int16_t temp_type = 1; // Default is speech.
- int ret = WebRtcOpus_DecodeFec(dec_state_, encoded,
- static_cast<int16_t>(encoded_len), decoded,
- &temp_type);
- if (ret > 0)
- ret *= static_cast<int>(channels_); // Return total number of samples.
- *speech_type = ConvertSpeechType(temp_type);
- return ret;
-}
-
-int AudioDecoderOpus::Init() {
- return WebRtcOpus_DecoderInit(dec_state_);
-}
-
-int AudioDecoderOpus::PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const {
- return WebRtcOpus_DurationEst(dec_state_,
- encoded, static_cast<int>(encoded_len));
-}
-
-int AudioDecoderOpus::PacketDurationRedundant(const uint8_t* encoded,
- size_t encoded_len) const {
- if (!PacketHasFec(encoded, encoded_len)) {
- // This packet is a RED packet.
- return PacketDuration(encoded, encoded_len);
- }
-
- return WebRtcOpus_FecDurationEst(encoded, static_cast<int>(encoded_len));
-}
-
-bool AudioDecoderOpus::PacketHasFec(const uint8_t* encoded,
- size_t encoded_len) const {
- int fec;
- fec = WebRtcOpus_PacketHasFec(encoded, static_cast<int>(encoded_len));
- return (fec == 1);
-}
-
-size_t AudioDecoderOpus::Channels() const {
- return channels_;
-}
-#endif
-
AudioDecoderCng::AudioDecoderCng() {
- CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
+ RTC_CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
+ WebRtcCng_InitDec(dec_state_);
}
AudioDecoderCng::~AudioDecoderCng() {
WebRtcCng_FreeDec(dec_state_);
}
-int AudioDecoderCng::Init() {
- return WebRtcCng_InitDec(dec_state_);
+void AudioDecoderCng::Reset() {
+ WebRtcCng_InitDec(dec_state_);
}
int AudioDecoderCng::IncomingPacket(const uint8_t* payload,
@@ -445,7 +89,6 @@ bool CodecSupported(NetEqDecoder codec_type) {
case kDecoderISACswb:
case kDecoderISACfb:
#endif
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16B:
case kDecoderPCM16Bwb:
case kDecoderPCM16Bswb32kHz:
@@ -455,7 +98,6 @@ bool CodecSupported(NetEqDecoder codec_type) {
case kDecoderPCM16Bswb32kHz_2ch:
case kDecoderPCM16Bswb48kHz_2ch:
case kDecoderPCM16B_5ch:
-#endif
#ifdef WEBRTC_CODEC_G722
case kDecoderG722:
case kDecoderG722_2ch:
@@ -488,21 +130,17 @@ int CodecSampleRateHz(NetEqDecoder codec_type) {
#ifdef WEBRTC_CODEC_ILBC
case kDecoderILBC:
#endif
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16B:
case kDecoderPCM16B_2ch:
case kDecoderPCM16B_5ch:
-#endif
case kDecoderCNGnb: {
return 8000;
}
#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
case kDecoderISAC:
#endif
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16Bwb:
case kDecoderPCM16Bwb_2ch:
-#endif
#ifdef WEBRTC_CODEC_G722
case kDecoderG722:
case kDecoderG722_2ch:
@@ -514,19 +152,15 @@ int CodecSampleRateHz(NetEqDecoder codec_type) {
case kDecoderISACswb:
case kDecoderISACfb:
#endif
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16Bswb32kHz:
case kDecoderPCM16Bswb32kHz_2ch:
-#endif
case kDecoderCNGswb32kHz: {
return 32000;
}
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16Bswb48kHz:
case kDecoderPCM16Bswb48kHz_2ch: {
return 48000;
}
-#endif
#ifdef WEBRTC_CODEC_OPUS
case kDecoderOpus:
case kDecoderOpus_2ch: {
@@ -549,49 +183,38 @@ AudioDecoder* CreateAudioDecoder(NetEqDecoder codec_type) {
}
switch (codec_type) {
case kDecoderPCMu:
- return new AudioDecoderPcmU;
+ return new AudioDecoderPcmU(1);
case kDecoderPCMa:
- return new AudioDecoderPcmA;
+ return new AudioDecoderPcmA(1);
case kDecoderPCMu_2ch:
- return new AudioDecoderPcmUMultiCh(2);
+ return new AudioDecoderPcmU(2);
case kDecoderPCMa_2ch:
- return new AudioDecoderPcmAMultiCh(2);
+ return new AudioDecoderPcmA(2);
#ifdef WEBRTC_CODEC_ILBC
case kDecoderILBC:
return new AudioDecoderIlbc;
#endif
#if defined(WEBRTC_CODEC_ISACFX)
- case kDecoderISAC: {
- AudioEncoderDecoderIsacFix::Config config;
- return new AudioEncoderDecoderIsacFix(config);
- }
+ case kDecoderISAC:
+ return new AudioDecoderIsacFix();
#elif defined(WEBRTC_CODEC_ISAC)
- case kDecoderISAC: {
- AudioEncoderDecoderIsac::Config config;
- config.sample_rate_hz = 16000;
- return new AudioEncoderDecoderIsac(config);
- }
+ case kDecoderISAC:
case kDecoderISACswb:
- case kDecoderISACfb: {
- AudioEncoderDecoderIsac::Config config;
- config.sample_rate_hz = 32000;
- return new AudioEncoderDecoderIsac(config);
- }
+ case kDecoderISACfb:
+ return new AudioDecoderIsac();
#endif
-#ifdef WEBRTC_CODEC_PCM16
case kDecoderPCM16B:
case kDecoderPCM16Bwb:
case kDecoderPCM16Bswb32kHz:
case kDecoderPCM16Bswb48kHz:
- return new AudioDecoderPcm16B;
+ return new AudioDecoderPcm16B(1);
case kDecoderPCM16B_2ch:
case kDecoderPCM16Bwb_2ch:
case kDecoderPCM16Bswb32kHz_2ch:
case kDecoderPCM16Bswb48kHz_2ch:
- return new AudioDecoderPcm16BMultiCh(2);
+ return new AudioDecoderPcm16B(2);
case kDecoderPCM16B_5ch:
- return new AudioDecoderPcm16BMultiCh(5);
-#endif
+ return new AudioDecoderPcm16B(5);
#ifdef WEBRTC_CODEC_G722
case kDecoderG722:
return new AudioDecoderG722;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
index 202d79d8f51..48ef50259f1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
@@ -13,231 +13,17 @@
#include <assert.h>
-#ifndef AUDIO_DECODER_UNITTEST
-// If this is compiled as a part of the audio_deoder_unittest, the codec
-// selection is made in the gypi file instead of in engine_configurations.h.
#include "webrtc/engine_configurations.h"
-#endif
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
#ifdef WEBRTC_CODEC_G722
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
#endif
-#ifdef WEBRTC_CODEC_ILBC
-#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
-#endif
#include "webrtc/typedefs.h"
namespace webrtc {
-class AudioDecoderPcmU : public AudioDecoder {
- public:
- AudioDecoderPcmU() {}
- int Init() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmU);
-};
-
-class AudioDecoderPcmA : public AudioDecoder {
- public:
- AudioDecoderPcmA() {}
- int Init() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmA);
-};
-
-class AudioDecoderPcmUMultiCh : public AudioDecoderPcmU {
- public:
- explicit AudioDecoderPcmUMultiCh(size_t channels)
- : AudioDecoderPcmU(), channels_(channels) {
- assert(channels > 0);
- }
- size_t Channels() const override;
-
- private:
- const size_t channels_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmUMultiCh);
-};
-
-class AudioDecoderPcmAMultiCh : public AudioDecoderPcmA {
- public:
- explicit AudioDecoderPcmAMultiCh(size_t channels)
- : AudioDecoderPcmA(), channels_(channels) {
- assert(channels > 0);
- }
- size_t Channels() const override;
-
- private:
- const size_t channels_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcmAMultiCh);
-};
-
-#ifdef WEBRTC_CODEC_PCM16
-// This class handles all four types (i.e., sample rates) of PCM16B codecs.
-// The type is specified in the constructor parameter |type|.
-class AudioDecoderPcm16B : public AudioDecoder {
- public:
- AudioDecoderPcm16B();
- int Init() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcm16B);
-};
-
-// This class handles all four types (i.e., sample rates) of PCM16B codecs.
-// The type is specified in the constructor parameter |type|, and the number
-// of channels is derived from the type.
-class AudioDecoderPcm16BMultiCh : public AudioDecoderPcm16B {
- public:
- explicit AudioDecoderPcm16BMultiCh(int num_channels);
- size_t Channels() const override;
-
- private:
- const size_t channels_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderPcm16BMultiCh);
-};
-#endif
-
-#ifdef WEBRTC_CODEC_ILBC
-class AudioDecoderIlbc : public AudioDecoder {
- public:
- AudioDecoderIlbc();
- ~AudioDecoderIlbc() override;
- bool HasDecodePlc() const override;
- int DecodePlc(int num_frames, int16_t* decoded) override;
- int Init() override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- IlbcDecoderInstance* dec_state_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderIlbc);
-};
-#endif
-
-#ifdef WEBRTC_CODEC_G722
-class AudioDecoderG722 : public AudioDecoder {
- public:
- AudioDecoderG722();
- ~AudioDecoderG722() override;
- bool HasDecodePlc() const override;
- int Init() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- G722DecInst* dec_state_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722);
-};
-
-class AudioDecoderG722Stereo : public AudioDecoder {
- public:
- AudioDecoderG722Stereo();
- ~AudioDecoderG722Stereo() override;
- int Init() override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
- size_t Channels() const override;
-
- private:
- // Splits the stereo-interleaved payload in |encoded| into separate payloads
- // for left and right channels. The separated payloads are written to
- // |encoded_deinterleaved|, which must hold at least |encoded_len| samples.
- // The left channel starts at offset 0, while the right channel starts at
- // offset encoded_len / 2 into |encoded_deinterleaved|.
- void SplitStereoPacket(const uint8_t* encoded, size_t encoded_len,
- uint8_t* encoded_deinterleaved);
-
- G722DecInst* dec_state_left_;
- G722DecInst* dec_state_right_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderG722Stereo);
-};
-#endif
-
-#ifdef WEBRTC_CODEC_OPUS
-class AudioDecoderOpus : public AudioDecoder {
- public:
- explicit AudioDecoderOpus(int num_channels);
- ~AudioDecoderOpus() override;
-
- int Init() override;
- int PacketDuration(const uint8_t* encoded, size_t encoded_len) const override;
- int PacketDurationRedundant(const uint8_t* encoded,
- size_t encoded_len) const override;
- bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
- int DecodeRedundantInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- OpusDecInst* dec_state_;
- const size_t channels_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderOpus);
-};
-#endif
-
// AudioDecoderCng is a special type of AudioDecoder. It inherits from
// AudioDecoder just to fit in the DecoderDatabase. None of the class methods
// should be used, except constructor, destructor, and accessors.
@@ -248,7 +34,7 @@ class AudioDecoderCng : public AudioDecoder {
public:
explicit AudioDecoderCng();
~AudioDecoderCng() override;
- int Init() override;
+ void Reset() override;
int IncomingPacket(const uint8_t* payload,
size_t payload_len,
uint16_t rtp_sequence_number,
@@ -267,7 +53,7 @@ class AudioDecoderCng : public AudioDecoder {
private:
CNG_dec_inst* dec_state_;
- DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
};
enum NetEqDecoder {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
index d54fbe9099a..0eafd7dd529 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -18,12 +18,19 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/codecs/g711/include/audio_decoder_pcm.h"
#include "webrtc/modules/audio_coding/codecs/g711/include/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g722/include/audio_decoder_g722.h"
#include "webrtc/modules/audio_coding/codecs/g722/include/audio_encoder_g722.h"
+#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_decoder_ilbc.h"
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/audio_encoder_ilbc.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_decoder_isacfix.h"
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/audio_encoder_isacfix.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_decoder_isac.h"
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/audio_encoder_isac.h"
+#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_decoder_opus.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/audio_encoder_opus.h"
+#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_decoder_pcm16b.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/audio_encoder_pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "webrtc/system_wrappers/interface/data_log.h"
@@ -137,11 +144,11 @@ class AudioDecoderTest : public ::testing::Test {
uint8_t* output) {
encoded_info_.encoded_bytes = 0;
const size_t samples_per_10ms = audio_encoder_->SampleRateHz() / 100;
- CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
- input_len_samples);
+ RTC_CHECK_EQ(samples_per_10ms * audio_encoder_->Num10MsFramesInNextPacket(),
+ input_len_samples);
rtc::scoped_ptr<int16_t[]> interleaved_input(
new int16_t[channels_ * samples_per_10ms]);
- for (int i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
+ for (size_t i = 0; i < audio_encoder_->Num10MsFramesInNextPacket(); ++i) {
EXPECT_EQ(0u, encoded_info_.encoded_bytes);
// Duplicate the mono input signal to however many channels the test
@@ -171,7 +178,6 @@ class AudioDecoderTest : public ::testing::Test {
size_t processed_samples = 0u;
encoded_bytes_ = 0u;
InitEncoder();
- EXPECT_EQ(0, decoder_->Init());
std::vector<int16_t> input;
std::vector<int16_t> decoded;
while (processed_samples + frame_size_ <= data_length_) {
@@ -220,7 +226,7 @@ class AudioDecoderTest : public ::testing::Test {
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
size_t dec_len;
AudioDecoder::SpeechType speech_type1, speech_type2;
- EXPECT_EQ(0, decoder_->Init());
+ decoder_->Reset();
rtc::scoped_ptr<int16_t[]> output1(new int16_t[frame_size_ * channels_]);
dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
@@ -228,7 +234,7 @@ class AudioDecoderTest : public ::testing::Test {
ASSERT_LE(dec_len, frame_size_ * channels_);
EXPECT_EQ(frame_size_ * channels_, dec_len);
// Re-init decoder and decode again.
- EXPECT_EQ(0, decoder_->Init());
+ decoder_->Reset();
rtc::scoped_ptr<int16_t[]> output2(new int16_t[frame_size_ * channels_]);
dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
@@ -249,7 +255,7 @@ class AudioDecoderTest : public ::testing::Test {
input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
AudioDecoder::SpeechType speech_type;
- EXPECT_EQ(0, decoder_->Init());
+ decoder_->Reset();
rtc::scoped_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
size_t dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
@@ -280,7 +286,7 @@ class AudioDecoderPcmUTest : public AudioDecoderTest {
AudioDecoderPcmUTest() : AudioDecoderTest() {
frame_size_ = 160;
data_length_ = 10 * frame_size_;
- decoder_ = new AudioDecoderPcmU;
+ decoder_ = new AudioDecoderPcmU(1);
AudioEncoderPcmU::Config config;
config.frame_size_ms = static_cast<int>(frame_size_ / 8);
config.payload_type = payload_type_;
@@ -293,7 +299,7 @@ class AudioDecoderPcmATest : public AudioDecoderTest {
AudioDecoderPcmATest() : AudioDecoderTest() {
frame_size_ = 160;
data_length_ = 10 * frame_size_;
- decoder_ = new AudioDecoderPcmA;
+ decoder_ = new AudioDecoderPcmA(1);
AudioEncoderPcmA::Config config;
config.frame_size_ms = static_cast<int>(frame_size_ / 8);
config.payload_type = payload_type_;
@@ -307,7 +313,7 @@ class AudioDecoderPcm16BTest : public AudioDecoderTest {
codec_input_rate_hz_ = 16000;
frame_size_ = 20 * codec_input_rate_hz_ / 1000;
data_length_ = 10 * frame_size_;
- decoder_ = new AudioDecoderPcm16B;
+ decoder_ = new AudioDecoderPcm16B(1);
assert(decoder_);
AudioEncoderPcm16B::Config config;
config.sample_rate_hz = codec_input_rate_hz_;
@@ -341,14 +347,14 @@ class AudioDecoderIlbcTest : public AudioDecoderTest {
input_audio_.Read(frame_size_, codec_input_rate_hz_, input.get()));
size_t enc_len = EncodeFrame(input.get(), frame_size_, encoded_);
AudioDecoder::SpeechType speech_type;
- EXPECT_EQ(0, decoder_->Init());
+ decoder_->Reset();
rtc::scoped_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
size_t dec_len = decoder_->Decode(encoded_, enc_len, codec_input_rate_hz_,
frame_size_ * channels_ * sizeof(int16_t),
output.get(), &speech_type);
EXPECT_EQ(frame_size_, dec_len);
// Simply call DecodePlc and verify that we get 0 as return value.
- EXPECT_EQ(0, decoder_->DecodePlc(1, output.get()));
+ EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));
}
};
@@ -358,17 +364,14 @@ class AudioDecoderIsacFloatTest : public AudioDecoderTest {
codec_input_rate_hz_ = 16000;
frame_size_ = 480;
data_length_ = 10 * frame_size_;
- AudioEncoderDecoderIsac::Config config;
+ AudioEncoderIsac::Config config;
config.payload_type = payload_type_;
config.sample_rate_hz = codec_input_rate_hz_;
config.adaptive_mode = false;
config.frame_size_ms =
1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
-
- // We need to create separate AudioEncoderDecoderIsac objects for encoding
- // and decoding, because the test class destructor destroys them both.
- audio_encoder_.reset(new AudioEncoderDecoderIsac(config));
- decoder_ = new AudioEncoderDecoderIsac(config);
+ audio_encoder_.reset(new AudioEncoderIsac(config));
+ decoder_ = new AudioDecoderIsac();
}
};
@@ -378,17 +381,14 @@ class AudioDecoderIsacSwbTest : public AudioDecoderTest {
codec_input_rate_hz_ = 32000;
frame_size_ = 960;
data_length_ = 10 * frame_size_;
- AudioEncoderDecoderIsac::Config config;
+ AudioEncoderIsac::Config config;
config.payload_type = payload_type_;
config.sample_rate_hz = codec_input_rate_hz_;
config.adaptive_mode = false;
config.frame_size_ms =
1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
-
- // We need to create separate AudioEncoderDecoderIsac objects for encoding
- // and decoding, because the test class destructor destroys them both.
- audio_encoder_.reset(new AudioEncoderDecoderIsac(config));
- decoder_ = new AudioEncoderDecoderIsac(config);
+ audio_encoder_.reset(new AudioEncoderIsac(config));
+ decoder_ = new AudioDecoderIsac();
}
};
@@ -398,18 +398,14 @@ class AudioDecoderIsacFixTest : public AudioDecoderTest {
codec_input_rate_hz_ = 16000;
frame_size_ = 480;
data_length_ = 10 * frame_size_;
- AudioEncoderDecoderIsacFix::Config config;
+ AudioEncoderIsacFix::Config config;
config.payload_type = payload_type_;
config.sample_rate_hz = codec_input_rate_hz_;
config.adaptive_mode = false;
config.frame_size_ms =
1000 * static_cast<int>(frame_size_) / codec_input_rate_hz_;
-
- // We need to create separate AudioEncoderDecoderIsacFix objects for
- // encoding and decoding, because the test class destructor destroys them
- // both.
- audio_encoder_.reset(new AudioEncoderDecoderIsacFix(config));
- decoder_ = new AudioEncoderDecoderIsacFix(config);
+ audio_encoder_.reset(new AudioEncoderIsacFix(config));
+ decoder_ = new AudioDecoderIsacFix();
}
};
@@ -479,7 +475,6 @@ class AudioDecoderOpusStereoTest : public AudioDecoderOpusTest {
TEST_F(AudioDecoderPcmUTest, EncodeDecode) {
int tolerance = 251;
double mse = 1734.0;
- EXPECT_TRUE(CodecSupported(kDecoderPCMu));
EncodeDecodeTest(data_length_, tolerance, mse);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -506,7 +501,6 @@ TEST_F(AudioDecoderPcmUTest, SetTargetBitrate) {
TEST_F(AudioDecoderPcmATest, EncodeDecode) {
int tolerance = 308;
double mse = 1931.0;
- EXPECT_TRUE(CodecSupported(kDecoderPCMa));
EncodeDecodeTest(data_length_, tolerance, mse);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -519,10 +513,6 @@ TEST_F(AudioDecoderPcmATest, SetTargetBitrate) {
TEST_F(AudioDecoderPcm16BTest, EncodeDecode) {
int tolerance = 0;
double mse = 0.0;
- EXPECT_TRUE(CodecSupported(kDecoderPCM16B));
- EXPECT_TRUE(CodecSupported(kDecoderPCM16Bwb));
- EXPECT_TRUE(CodecSupported(kDecoderPCM16Bswb32kHz));
- EXPECT_TRUE(CodecSupported(kDecoderPCM16Bswb48kHz));
EncodeDecodeTest(2 * data_length_, tolerance, mse);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -537,7 +527,6 @@ TEST_F(AudioDecoderIlbcTest, EncodeDecode) {
int tolerance = 6808;
double mse = 2.13e6;
int delay = 80; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderILBC));
EncodeDecodeTest(500, tolerance, mse, delay);
ReInitTest();
EXPECT_TRUE(decoder_->HasDecodePlc());
@@ -552,7 +541,6 @@ TEST_F(AudioDecoderIsacFloatTest, EncodeDecode) {
int tolerance = 3399;
double mse = 434951.0;
int delay = 48; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderISAC));
EncodeDecodeTest(0, tolerance, mse, delay);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -566,7 +554,6 @@ TEST_F(AudioDecoderIsacSwbTest, EncodeDecode) {
int tolerance = 19757;
double mse = 8.18e6;
int delay = 160; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderISACswb));
EncodeDecodeTest(0, tolerance, mse, delay);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -586,7 +573,6 @@ TEST_F(AudioDecoderIsacFixTest, MAYBE_EncodeDecode) {
int tolerance = 11034;
double mse = 3.46e6;
int delay = 54; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderISAC));
#ifdef WEBRTC_ANDROID
static const int kEncodedBytes = 685;
#else
@@ -605,7 +591,6 @@ TEST_F(AudioDecoderG722Test, EncodeDecode) {
int tolerance = 6176;
double mse = 238630.0;
int delay = 22; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderG722));
EncodeDecodeTest(data_length_ / 2, tolerance, mse, delay);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -615,16 +600,11 @@ TEST_F(AudioDecoderG722Test, SetTargetBitrate) {
TestSetAndGetTargetBitratesWithFixedCodec(audio_encoder_.get(), 64000);
}
-TEST_F(AudioDecoderG722StereoTest, CreateAndDestroy) {
- EXPECT_TRUE(CodecSupported(kDecoderG722_2ch));
-}
-
TEST_F(AudioDecoderG722StereoTest, EncodeDecode) {
int tolerance = 6176;
int channel_diff_tolerance = 0;
double mse = 238630.0;
int delay = 22; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderG722_2ch));
EncodeDecodeTest(data_length_, tolerance, mse, delay, channel_diff_tolerance);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -638,7 +618,6 @@ TEST_F(AudioDecoderOpusTest, EncodeDecode) {
int tolerance = 6176;
double mse = 238630.0;
int delay = 22; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderOpus));
EncodeDecodeTest(0, tolerance, mse, delay);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -663,7 +642,6 @@ TEST_F(AudioDecoderOpusStereoTest, EncodeDecode) {
int channel_diff_tolerance = 0;
double mse = 238630.0;
int delay = 22; // Delay from input to output.
- EXPECT_TRUE(CodecSupported(kDecoderOpus_2ch));
EncodeDecodeTest(0, tolerance, mse, delay, channel_diff_tolerance);
ReInitTest();
EXPECT_FALSE(decoder_->HasDecodePlc());
@@ -673,15 +651,43 @@ TEST_F(AudioDecoderOpusStereoTest, SetTargetBitrate) {
TestOpusSetTargetBitrates(audio_encoder_.get());
}
+namespace {
+#ifdef WEBRTC_CODEC_ILBC
+const bool has_ilbc = true;
+#else
+const bool has_ilbc = false;
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+const bool has_isac = true;
+#else
+const bool has_isac = false;
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+const bool has_isac_swb = true;
+#else
+const bool has_isac_swb = false;
+#endif
+#ifdef WEBRTC_CODEC_G722
+const bool has_g722 = true;
+#else
+const bool has_g722 = false;
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+const bool has_opus = true;
+#else
+const bool has_opus = false;
+#endif
+} // namespace
+
TEST(AudioDecoder, CodecSampleRateHz) {
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCMu));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCMa));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCMu_2ch));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCMa_2ch));
- EXPECT_EQ(8000, CodecSampleRateHz(kDecoderILBC));
- EXPECT_EQ(16000, CodecSampleRateHz(kDecoderISAC));
- EXPECT_EQ(32000, CodecSampleRateHz(kDecoderISACswb));
- EXPECT_EQ(32000, CodecSampleRateHz(kDecoderISACfb));
+ EXPECT_EQ(has_ilbc ? 8000 : -1, CodecSampleRateHz(kDecoderILBC));
+ EXPECT_EQ(has_isac ? 16000 : -1, CodecSampleRateHz(kDecoderISAC));
+ EXPECT_EQ(has_isac_swb ? 32000 : -1, CodecSampleRateHz(kDecoderISACswb));
+ EXPECT_EQ(has_isac_swb ? 32000 : -1, CodecSampleRateHz(kDecoderISACfb));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCM16B));
EXPECT_EQ(16000, CodecSampleRateHz(kDecoderPCM16Bwb));
EXPECT_EQ(32000, CodecSampleRateHz(kDecoderPCM16Bswb32kHz));
@@ -691,15 +697,15 @@ TEST(AudioDecoder, CodecSampleRateHz) {
EXPECT_EQ(32000, CodecSampleRateHz(kDecoderPCM16Bswb32kHz_2ch));
EXPECT_EQ(48000, CodecSampleRateHz(kDecoderPCM16Bswb48kHz_2ch));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderPCM16B_5ch));
- EXPECT_EQ(16000, CodecSampleRateHz(kDecoderG722));
- EXPECT_EQ(16000, CodecSampleRateHz(kDecoderG722_2ch));
+ EXPECT_EQ(has_g722 ? 16000 : -1, CodecSampleRateHz(kDecoderG722));
+ EXPECT_EQ(has_g722 ? 16000 : -1, CodecSampleRateHz(kDecoderG722_2ch));
EXPECT_EQ(-1, CodecSampleRateHz(kDecoderRED));
EXPECT_EQ(-1, CodecSampleRateHz(kDecoderAVT));
EXPECT_EQ(8000, CodecSampleRateHz(kDecoderCNGnb));
EXPECT_EQ(16000, CodecSampleRateHz(kDecoderCNGwb));
EXPECT_EQ(32000, CodecSampleRateHz(kDecoderCNGswb32kHz));
- EXPECT_EQ(48000, CodecSampleRateHz(kDecoderOpus));
- EXPECT_EQ(48000, CodecSampleRateHz(kDecoderOpus_2ch));
+ EXPECT_EQ(has_opus ? 48000 : -1, CodecSampleRateHz(kDecoderOpus));
+ EXPECT_EQ(has_opus ? 48000 : -1, CodecSampleRateHz(kDecoderOpus_2ch));
// TODO(tlegrand): Change 32000 to 48000 below once ACM has 48 kHz support.
EXPECT_EQ(32000, CodecSampleRateHz(kDecoderCNGswb48kHz));
EXPECT_EQ(-1, CodecSampleRateHz(kDecoderArbitrary));
@@ -710,10 +716,10 @@ TEST(AudioDecoder, CodecSupported) {
EXPECT_TRUE(CodecSupported(kDecoderPCMa));
EXPECT_TRUE(CodecSupported(kDecoderPCMu_2ch));
EXPECT_TRUE(CodecSupported(kDecoderPCMa_2ch));
- EXPECT_TRUE(CodecSupported(kDecoderILBC));
- EXPECT_TRUE(CodecSupported(kDecoderISAC));
- EXPECT_TRUE(CodecSupported(kDecoderISACswb));
- EXPECT_TRUE(CodecSupported(kDecoderISACfb));
+ EXPECT_EQ(has_ilbc, CodecSupported(kDecoderILBC));
+ EXPECT_EQ(has_isac, CodecSupported(kDecoderISAC));
+ EXPECT_EQ(has_isac_swb, CodecSupported(kDecoderISACswb));
+ EXPECT_EQ(has_isac_swb, CodecSupported(kDecoderISACfb));
EXPECT_TRUE(CodecSupported(kDecoderPCM16B));
EXPECT_TRUE(CodecSupported(kDecoderPCM16Bwb));
EXPECT_TRUE(CodecSupported(kDecoderPCM16Bswb32kHz));
@@ -723,8 +729,8 @@ TEST(AudioDecoder, CodecSupported) {
EXPECT_TRUE(CodecSupported(kDecoderPCM16Bswb32kHz_2ch));
EXPECT_TRUE(CodecSupported(kDecoderPCM16Bswb48kHz_2ch));
EXPECT_TRUE(CodecSupported(kDecoderPCM16B_5ch));
- EXPECT_TRUE(CodecSupported(kDecoderG722));
- EXPECT_TRUE(CodecSupported(kDecoderG722_2ch));
+ EXPECT_EQ(has_g722, CodecSupported(kDecoderG722));
+ EXPECT_EQ(has_g722, CodecSupported(kDecoderG722_2ch));
EXPECT_TRUE(CodecSupported(kDecoderRED));
EXPECT_TRUE(CodecSupported(kDecoderAVT));
EXPECT_TRUE(CodecSupported(kDecoderCNGnb));
@@ -732,8 +738,8 @@ TEST(AudioDecoder, CodecSupported) {
EXPECT_TRUE(CodecSupported(kDecoderCNGswb32kHz));
EXPECT_TRUE(CodecSupported(kDecoderCNGswb48kHz));
EXPECT_TRUE(CodecSupported(kDecoderArbitrary));
- EXPECT_TRUE(CodecSupported(kDecoderOpus));
- EXPECT_TRUE(CodecSupported(kDecoderOpus_2ch));
+ EXPECT_EQ(has_opus, CodecSupported(kDecoderOpus));
+ EXPECT_EQ(has_opus, CodecSupported(kDecoderOpus_2ch));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.h
index 0aae9e33518..1c28648816a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.h
@@ -132,7 +132,7 @@ class AudioMultiVector {
size_t num_channels_;
private:
- DISALLOW_COPY_AND_ASSIGN(AudioMultiVector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioMultiVector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
index b44fbff74a1..e046e382773 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
@@ -105,7 +105,7 @@ class AudioVector {
// Note that this index may point outside of array_.
size_t capacity_; // Allocated number of samples in the array.
- DISALLOW_COPY_AND_ASSIGN(AudioVector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
index a59f444c503..7e7a6325e97 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
@@ -21,6 +21,9 @@
namespace webrtc {
+// static
+const size_t BackgroundNoise::kMaxLpcOrder;
+
BackgroundNoise::BackgroundNoise(size_t num_channels)
: num_channels_(num_channels),
channel_parameters_(new ChannelParameters[num_channels_]),
@@ -150,7 +153,7 @@ const int16_t* BackgroundNoise::FilterState(size_t channel) const {
void BackgroundNoise::SetFilterState(size_t channel, const int16_t* input,
size_t length) {
assert(channel < num_channels_);
- length = std::min(length, static_cast<size_t>(kMaxLpcOrder));
+ length = std::min(length, kMaxLpcOrder);
memcpy(channel_parameters_[channel].filter_state, input,
length * sizeof(int16_t));
}
@@ -165,7 +168,7 @@ int16_t BackgroundNoise::ScaleShift(size_t channel) const {
}
int32_t BackgroundNoise::CalculateAutoCorrelation(
- const int16_t* signal, int length, int32_t* auto_correlation) const {
+ const int16_t* signal, size_t length, int32_t* auto_correlation) const {
int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length);
int correlation_scale = kLogVecLen -
WebRtcSpl_NormW32(signal_max * signal_max);
@@ -243,11 +246,10 @@ void BackgroundNoise::SaveParameters(size_t channel,
if (norm_shift & 0x1) {
norm_shift -= 1; // Even number of shifts required.
}
- assert(norm_shift >= 0); // Should always be positive.
- residual_energy = residual_energy << norm_shift;
+ residual_energy = WEBRTC_SPL_SHIFT_W32(residual_energy, norm_shift);
// Calculate scale and shift factor.
- parameters.scale = WebRtcSpl_SqrtFloor(residual_energy);
+ parameters.scale = static_cast<int16_t>(WebRtcSpl_SqrtFloor(residual_energy));
// Add 13 to the |scale_shift_|, since the random numbers table is in
// Q13.
// TODO(hlundin): Move the "13" to where the |scale_shift_| is used?
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.h
index baf1818dae4..2c67f21c3ac 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.h
@@ -29,7 +29,7 @@ class BackgroundNoise {
public:
// TODO(hlundin): For 48 kHz support, increase kMaxLpcOrder to 10.
// Will work anyway, but probably sound a little worse.
- static const int kMaxLpcOrder = 8; // 32000 / 8000 + 4.
+ static const size_t kMaxLpcOrder = 8; // 32000 / 8000 + 4.
explicit BackgroundNoise(size_t num_channels);
virtual ~BackgroundNoise();
@@ -76,9 +76,9 @@ class BackgroundNoise {
private:
static const int kThresholdIncrement = 229; // 0.0035 in Q16.
- static const int kVecLen = 256;
+ static const size_t kVecLen = 256;
static const int kLogVecLen = 8; // log2(kVecLen).
- static const int kResidualLength = 64;
+ static const size_t kResidualLength = 64;
static const int16_t kLogResidualLength = 6; // log2(kResidualLength)
struct ChannelParameters {
@@ -112,7 +112,7 @@ class BackgroundNoise {
};
int32_t CalculateAutoCorrelation(const int16_t* signal,
- int length,
+ size_t length,
int32_t* auto_correlation) const;
// Increments the energy threshold by a factor 1 + |kThresholdIncrement|.
@@ -130,7 +130,7 @@ class BackgroundNoise {
bool initialized_;
NetEq::BackgroundNoiseMode mode_;
- DISALLOW_COPY_AND_ASSIGN(BackgroundNoise);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BackgroundNoise);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
index 93f9a55b2c3..905479178d2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.cc
@@ -23,16 +23,16 @@ void BufferLevelFilter::Reset() {
level_factor_ = 253;
}
-void BufferLevelFilter::Update(int buffer_size_packets,
+void BufferLevelFilter::Update(size_t buffer_size_packets,
int time_stretched_samples,
- int packet_len_samples) {
+ size_t packet_len_samples) {
// Filter:
// |filtered_current_level_| = |level_factor_| * |filtered_current_level_| +
// (1 - |level_factor_|) * |buffer_size_packets|
// |level_factor_| and |filtered_current_level_| are in Q8.
// |buffer_size_packets| is in Q0.
filtered_current_level_ = ((level_factor_ * filtered_current_level_) >> 8) +
- ((256 - level_factor_) * buffer_size_packets);
+ ((256 - level_factor_) * static_cast<int>(buffer_size_packets));
// Account for time-scale operations (accelerate and pre-emptive expand).
if (time_stretched_samples && packet_len_samples > 0) {
@@ -42,7 +42,7 @@ void BufferLevelFilter::Update(int buffer_size_packets,
// Make sure that the filtered value remains non-negative.
filtered_current_level_ = std::max(0,
filtered_current_level_ -
- (time_stretched_samples << 8) / packet_len_samples);
+ (time_stretched_samples << 8) / static_cast<int>(packet_len_samples));
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
index 2d2a888e15e..030870653cc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/buffer_level_filter.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_BUFFER_LEVEL_FILTER_H_
+#include <stddef.h>
+
#include "webrtc/base/constructormagic.h"
namespace webrtc {
@@ -26,8 +28,8 @@ class BufferLevelFilter {
// corresponding number of packets, and is subtracted from the filtered
// value (thus bypassing the filter operation). |packet_len_samples| is the
// number of audio samples carried in each incoming packet.
- virtual void Update(int buffer_size_packets, int time_stretched_samples,
- int packet_len_samples);
+ virtual void Update(size_t buffer_size_packets, int time_stretched_samples,
+ size_t packet_len_samples);
// Set the current target buffer level (obtained from
// DelayManager::base_target_level()). Used to select the appropriate
@@ -40,7 +42,7 @@ class BufferLevelFilter {
int level_factor_; // Filter factor for the buffer level filter in Q8.
int filtered_current_level_; // Filtered current buffer level in Q8.
- DISALLOW_COPY_AND_ASSIGN(BufferLevelFilter);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BufferLevelFilter);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
index 54b0a28e52f..3fe66077784 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -12,6 +12,7 @@
#include <assert.h>
+#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
@@ -44,6 +45,7 @@ int ComfortNoise::UpdateParameters(Packet* packet) {
delete packet;
if (ret < 0) {
internal_error_code_ = WebRtcCng_GetErrorCodeDec(cng_inst);
+ LOG(LS_ERROR) << "WebRtcCng_UpdateSid produced " << internal_error_code_;
return kInternalError;
}
return kOK;
@@ -56,6 +58,7 @@ int ComfortNoise::Generate(size_t requested_length,
fs_hz_ == 48000);
// Not adapted for multi-channel yet.
if (output->Channels() != 1) {
+ LOG(LS_ERROR) << "No multi-channel support";
return kMultiChannelNotSupported;
}
@@ -70,17 +73,18 @@ int ComfortNoise::Generate(size_t requested_length,
// Get the decoder from the database.
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (!cng_decoder) {
+ LOG(LS_ERROR) << "Unknwown payload type";
return kUnknownPayloadType;
}
CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
// The expression &(*output)[0][0] is a pointer to the first element in
// the first channel.
- if (WebRtcCng_Generate(cng_inst, &(*output)[0][0],
- static_cast<int16_t>(number_of_samples),
+ if (WebRtcCng_Generate(cng_inst, &(*output)[0][0], number_of_samples,
new_period) < 0) {
// Error returned.
output->Zeros(requested_length);
internal_error_code_ = WebRtcCng_GetErrorCodeDec(cng_inst);
+ LOG(LS_ERROR) << "WebRtcCng_Generate produced " << internal_error_code_;
return kInternalError;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
index d4655962456..1fc22586637 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
@@ -66,7 +66,7 @@ class ComfortNoise {
DecoderDatabase* decoder_database_;
SyncBuffer* sync_buffer_;
int internal_error_code_;
- DISALLOW_COPY_AND_ASSIGN(ComfortNoise);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ComfortNoise);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
index 5fb054c7850..eb10e65897b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -24,7 +24,7 @@
namespace webrtc {
DecisionLogic* DecisionLogic::Create(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -56,7 +56,7 @@ DecisionLogic* DecisionLogic::Create(int fs_hz,
}
DecisionLogic::DecisionLogic(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -95,7 +95,7 @@ void DecisionLogic::SoftReset() {
timescale_hold_off_ = kMinTimescaleInterval;
}
-void DecisionLogic::SetSampleRate(int fs_hz, int output_size_samples) {
+void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
fs_mult_ = fs_hz / 8000;
@@ -104,7 +104,7 @@ void DecisionLogic::SetSampleRate(int fs_hz, int output_size_samples) {
Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf, bool* reset_decoder) {
@@ -123,9 +123,9 @@ Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
}
}
- const int samples_left = static_cast<int>(
- sync_buffer.FutureLength() - expand.overlap_length());
- const int cur_size_samples =
+ const size_t samples_left =
+ sync_buffer.FutureLength() - expand.overlap_length();
+ const size_t cur_size_samples =
samples_left + packet_buffer_.NumSamplesInBuffer(decoder_database_,
decoder_frame_length);
LOG(LS_VERBOSE) << "Buffers: " << packet_buffer_.NumPacketsInBuffer() <<
@@ -153,9 +153,10 @@ void DecisionLogic::ExpandDecision(Operations operation) {
}
}
-void DecisionLogic::FilterBufferLevel(int buffer_size_samples,
+void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
Modes prev_mode) {
- const int elapsed_time_ms = output_size_samples_ / (8 * fs_mult_);
+ const int elapsed_time_ms =
+ static_cast<int>(output_size_samples_ / (8 * fs_mult_));
delay_manager_->UpdateCounters(elapsed_time_ms);
// Do not update buffer history if currently playing CNG since it will bias
@@ -164,7 +165,7 @@ void DecisionLogic::FilterBufferLevel(int buffer_size_samples,
buffer_level_filter_->SetTargetBufferLevel(
delay_manager_->base_target_level());
- int buffer_size_packets = 0;
+ size_t buffer_size_packets = 0;
if (packet_length_samples_ > 0) {
// Calculate size in packets.
buffer_size_packets = buffer_size_samples / packet_length_samples_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
index 672ce939d41..30f7ddd3c71 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
@@ -34,7 +34,7 @@ class DecisionLogic {
// Static factory function which creates different types of objects depending
// on the |playout_mode|.
static DecisionLogic* Create(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -43,7 +43,7 @@ class DecisionLogic {
// Constructor.
DecisionLogic(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -60,7 +60,7 @@ class DecisionLogic {
void SoftReset();
// Sets the sample rate and the output block size.
- void SetSampleRate(int fs_hz, int output_size_samples);
+ void SetSampleRate(int fs_hz, size_t output_size_samples);
// Returns the operation that should be done next. |sync_buffer| and |expand|
// are provided for reference. |decoder_frame_length| is the number of samples
@@ -75,7 +75,7 @@ class DecisionLogic {
// return value.
Operations GetDecision(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -101,12 +101,12 @@ class DecisionLogic {
// Accessors and mutators.
void set_sample_memory(int32_t value) { sample_memory_ = value; }
- int generated_noise_samples() const { return generated_noise_samples_; }
- void set_generated_noise_samples(int value) {
+ size_t generated_noise_samples() const { return generated_noise_samples_; }
+ void set_generated_noise_samples(size_t value) {
generated_noise_samples_ = value;
}
- int packet_length_samples() const { return packet_length_samples_; }
- void set_packet_length_samples(int value) {
+ size_t packet_length_samples() const { return packet_length_samples_; }
+ void set_packet_length_samples(size_t value) {
packet_length_samples_ = value;
}
void set_prev_time_scale(bool value) { prev_time_scale_ = value; }
@@ -134,7 +134,7 @@ class DecisionLogic {
// Should be implemented by derived classes.
virtual Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -142,18 +142,18 @@ class DecisionLogic {
// Updates the |buffer_level_filter_| with the current buffer level
// |buffer_size_packets|.
- void FilterBufferLevel(int buffer_size_packets, Modes prev_mode);
+ void FilterBufferLevel(size_t buffer_size_packets, Modes prev_mode);
DecoderDatabase* decoder_database_;
const PacketBuffer& packet_buffer_;
DelayManager* delay_manager_;
BufferLevelFilter* buffer_level_filter_;
int fs_mult_;
- int output_size_samples_;
+ size_t output_size_samples_;
CngState cng_state_; // Remember if comfort noise is interrupted by other
// event (e.g., DTMF).
- int generated_noise_samples_;
- int packet_length_samples_;
+ size_t generated_noise_samples_;
+ size_t packet_length_samples_;
int sample_memory_;
bool prev_time_scale_;
int timescale_hold_off_;
@@ -161,7 +161,7 @@ class DecisionLogic {
const NetEqPlayoutMode playout_mode_;
private:
- DISALLOW_COPY_AND_ASSIGN(DecisionLogic);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogic);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
index 08a4c4cb646..ddea64425f2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -22,7 +22,7 @@ namespace webrtc {
Operations DecisionLogicFax::GetDecisionSpecialized(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
index d9f8db9652e..204dcc168a3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
@@ -23,7 +23,7 @@ class DecisionLogicFax : public DecisionLogic {
public:
// Constructor.
DecisionLogicFax(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -46,14 +46,14 @@ class DecisionLogicFax : public DecisionLogic {
// remain true if it was true before the call).
Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
bool* reset_decoder) override;
private:
- DISALLOW_COPY_AND_ASSIGN(DecisionLogicFax);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogicFax);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index e985ee0aa39..d3f6fa6dd45 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -27,7 +27,7 @@ namespace webrtc {
Operations DecisionLogicNormal::GetDecisionSpecialized(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -149,7 +149,7 @@ Operations DecisionLogicNormal::ExpectedPacketAvailable(Modes prev_mode,
Operations DecisionLogicNormal::FuturePacketAvailable(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
@@ -172,9 +172,9 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
}
}
- const int samples_left = static_cast<int>(sync_buffer.FutureLength() -
- expand.overlap_length());
- const int cur_size_samples = samples_left +
+ const size_t samples_left =
+ sync_buffer.FutureLength() - expand.overlap_length();
+ const size_t cur_size_samples = samples_left +
packet_buffer_.NumPacketsInBuffer() * decoder_frame_length;
// If previous was comfort noise, then no merge is needed.
@@ -205,7 +205,8 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
// fs_mult_ * 8 = fs / 1000.)
if (prev_mode == kModeExpand ||
(decoder_frame_length < output_size_samples_ &&
- cur_size_samples > kAllowMergeWithoutExpandMs * fs_mult_ * 8)) {
+ cur_size_samples >
+ static_cast<size_t>(kAllowMergeWithoutExpandMs * fs_mult_ * 8))) {
return kMerge;
} else if (play_dtmf) {
// Play DTMF instead of expand.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
index 047663f5b0f..7465906a381 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
@@ -23,7 +23,7 @@ class DecisionLogicNormal : public DecisionLogic {
public:
// Constructor.
DecisionLogicNormal(int fs_hz,
- int output_size_samples,
+ size_t output_size_samples,
NetEqPlayoutMode playout_mode,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
@@ -50,7 +50,7 @@ class DecisionLogicNormal : public DecisionLogic {
// remain true if it was true before the call).
Operations GetDecisionSpecialized(const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
@@ -61,7 +61,7 @@ class DecisionLogicNormal : public DecisionLogic {
virtual Operations FuturePacketAvailable(
const SyncBuffer& sync_buffer,
const Expand& expand,
- int decoder_frame_length,
+ size_t decoder_frame_length,
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
@@ -99,7 +99,7 @@ class DecisionLogicNormal : public DecisionLogic {
// Checks if num_consecutive_expands_ >= kMaxWaitForPacket.
bool MaxWaitForPacket() const;
- DISALLOW_COPY_AND_ASSIGN(DecisionLogicNormal);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogicNormal);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
index b9097b0873a..97dc00d7a68 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
@@ -13,6 +13,7 @@
#include <assert.h>
#include <utility> // pair
+#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
namespace webrtc {
@@ -71,7 +72,6 @@ int DecoderDatabase::InsertExternal(uint8_t rtp_payload_type,
if (!decoder) {
return kInvalidPointer;
}
- decoder->Init();
std::pair<DecoderMap::iterator, bool> ret;
DecoderInfo info(codec_type, fs_hz, decoder, true);
ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
@@ -135,7 +135,6 @@ AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) {
AudioDecoder* decoder = CreateAudioDecoder(info->codec_type);
assert(decoder); // Should not be able to have an unsupported codec here.
info->decoder = decoder;
- info->decoder->Init();
}
return info->decoder;
}
@@ -249,6 +248,8 @@ int DecoderDatabase::CheckPayloadTypes(const PacketList& packet_list) const {
for (it = packet_list.begin(); it != packet_list.end(); ++it) {
if (decoders_.find((*it)->header.payloadType) == decoders_.end()) {
// Payload type is not found.
+ LOG(LS_WARNING) << "CheckPayloadTypes: unknown RTP payload type "
+ << static_cast<int>((*it)->header.payloadType);
return kDecoderNotFound;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
index 1dbc685c373..8ce0b696e79 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
@@ -147,7 +147,7 @@ class DecoderDatabase {
int active_decoder_;
int active_cng_decoder_;
- DISALLOW_COPY_AND_ASSIGN(DecoderDatabase);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DecoderDatabase);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
index 1e4e58af3c1..af37ca959f9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -172,8 +172,14 @@ TEST(DecoderDatabase, CheckPayloadTypes) {
}
}
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
// Test the methods for setting and getting active speech and CNG decoders.
-TEST(DecoderDatabase, ActiveDecoders) {
+TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
DecoderDatabase db;
// Load payload types.
ASSERT_EQ(DecoderDatabase::kOK, db.RegisterPayload(0, kDecoderPCMu));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
index a935561eff4..e7f76f616e9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -22,7 +22,7 @@
namespace webrtc {
-DelayManager::DelayManager(int max_packets_in_buffer,
+DelayManager::DelayManager(size_t max_packets_in_buffer,
DelayPeakDetector* peak_detector)
: first_packet_received_(false),
max_packets_in_buffer_(max_packets_in_buffer),
@@ -239,7 +239,8 @@ void DelayManager::LimitTargetLevel() {
}
// Shift to Q8, then 75%.;
- int max_buffer_packets_q8 = (3 * (max_packets_in_buffer_ << 8)) / 4;
+ int max_buffer_packets_q8 =
+ static_cast<int>((3 * (max_packets_in_buffer_ << 8)) / 4);
target_level_ = std::min(target_level_, max_buffer_packets_q8);
// Sanity check, at least 1 packet (in Q8).
@@ -389,7 +390,8 @@ bool DelayManager::SetMinimumDelay(int delay_ms) {
// |max_packets_in_buffer_|.
if ((maximum_delay_ms_ > 0 && delay_ms > maximum_delay_ms_) ||
(packet_len_ms_ > 0 &&
- delay_ms > 3 * max_packets_in_buffer_ * packet_len_ms_ / 4)) {
+ delay_ms >
+ static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
return false;
}
minimum_delay_ms_ = delay_ms;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
index 33c4a40a6a8..785fced15df 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
@@ -32,7 +32,7 @@ class DelayManager {
// buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
// is the number of packet slots in the buffer). Supply a PeakDetector
// object to the DelayManager.
- DelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector);
+ DelayManager(size_t max_packets_in_buffer, DelayPeakDetector* peak_detector);
virtual ~DelayManager();
@@ -132,7 +132,7 @@ class DelayManager {
void LimitTargetLevel();
bool first_packet_received_;
- const int max_packets_in_buffer_; // Capacity of the packet buffer.
+ const size_t max_packets_in_buffer_; // Capacity of the packet buffer.
IATVector iat_vector_; // Histogram of inter-arrival times.
int iat_factor_; // Forgetting factor for updating the IAT histogram (Q15).
int packet_iat_count_ms_; // Milliseconds elapsed since last packet.
@@ -157,7 +157,7 @@ class DelayManager {
DelayPeakDetector& peak_detector_;
int last_pack_cng_or_dtmf_;
- DISALLOW_COPY_AND_ASSIGN(DelayManager);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DelayManager);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
index bf8ab744a15..69433b45248 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
@@ -69,7 +69,7 @@ class DelayPeakDetector {
int peak_detection_threshold_;
int peak_period_counter_ms_;
- DISALLOW_COPY_AND_ASSIGN(DelayPeakDetector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DelayPeakDetector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
index 289e66d17c8..4188914c86c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
@@ -99,13 +99,13 @@ int DspHelper::RampSignal(AudioMultiVector* signal,
return end_factor;
}
-void DspHelper::PeakDetection(int16_t* data, int data_length,
- int num_peaks, int fs_mult,
- int* peak_index, int16_t* peak_value) {
- int16_t min_index = 0;
- int16_t max_index = 0;
+void DspHelper::PeakDetection(int16_t* data, size_t data_length,
+ size_t num_peaks, int fs_mult,
+ size_t* peak_index, int16_t* peak_value) {
+ size_t min_index = 0;
+ size_t max_index = 0;
- for (int i = 0; i <= num_peaks - 1; i++) {
+ for (size_t i = 0; i <= num_peaks - 1; i++) {
if (num_peaks == 1) {
// Single peak. The parabola fit assumes that an extra point is
// available; worst case it gets a zero on the high end of the signal.
@@ -117,7 +117,7 @@ void DspHelper::PeakDetection(int16_t* data, int data_length,
peak_index[i] = WebRtcSpl_MaxIndexW16(data, data_length - 1);
if (i != num_peaks - 1) {
- min_index = std::max(0, peak_index[i] - 2);
+ min_index = (peak_index[i] > 2) ? (peak_index[i] - 2) : 0;
max_index = std::min(data_length - 1, peak_index[i] + 2);
}
@@ -148,7 +148,7 @@ void DspHelper::PeakDetection(int16_t* data, int data_length,
}
void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
- int* peak_index, int16_t* peak_value) {
+ size_t* peak_index, int16_t* peak_value) {
uint16_t fit_index[13];
if (fs_mult == 1) {
fit_index[0] = 0;
@@ -235,16 +235,16 @@ void DspHelper::ParabolicFit(int16_t* signal_points, int fs_mult,
}
}
-int DspHelper::MinDistortion(const int16_t* signal, int min_lag,
- int max_lag, int length,
- int32_t* distortion_value) {
- int best_index = -1;
+size_t DspHelper::MinDistortion(const int16_t* signal, size_t min_lag,
+ size_t max_lag, size_t length,
+ int32_t* distortion_value) {
+ size_t best_index = 0;
int32_t min_distortion = WEBRTC_SPL_WORD32_MAX;
- for (int i = min_lag; i <= max_lag; i++) {
+ for (size_t i = min_lag; i <= max_lag; i++) {
int32_t sum_diff = 0;
const int16_t* data1 = signal;
const int16_t* data2 = signal - i;
- for (int j = 0; j < length; j++) {
+ for (size_t j = 0; j < length; j++) {
sum_diff += WEBRTC_SPL_ABS_W32(data1[j] - data2[j]);
}
// Compare with previous minimum.
@@ -293,15 +293,15 @@ void DspHelper::MuteSignal(int16_t* signal, int mute_slope, size_t length) {
}
int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
- int output_length, int input_rate_hz,
+ size_t output_length, int input_rate_hz,
bool compensate_delay, int16_t* output) {
// Set filter parameters depending on input frequency.
// NOTE: The phase delay values are wrong compared to the true phase delay
// of the filters. However, the error is preserved (through the +1 term) for
// consistency.
const int16_t* filter_coefficients; // Filter coefficients.
- int16_t filter_length; // Number of coefficients.
- int16_t filter_delay; // Phase delay in samples.
+ size_t filter_length; // Number of coefficients.
+ size_t filter_delay; // Phase delay in samples.
int16_t factor; // Conversion rate (inFsHz / 8000).
switch (input_rate_hz) {
case 8000: {
@@ -345,9 +345,8 @@ int DspHelper::DownsampleTo4kHz(const int16_t* input, size_t input_length,
// Returns -1 if input signal is too short; 0 otherwise.
return WebRtcSpl_DownsampleFast(
- &input[filter_length - 1], static_cast<int>(input_length) -
- (filter_length - 1), output, output_length, filter_coefficients,
- filter_length, factor, filter_delay);
+ &input[filter_length - 1], input_length - filter_length + 1, output,
+ output_length, filter_coefficients, filter_length, factor, filter_delay);
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
index f9032562f1b..269c2eb0f25 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
@@ -78,9 +78,9 @@ class DspHelper {
// locations and values are written to the arrays |peak_index| and
// |peak_value|, respectively. Both arrays must hold at least |num_peaks|
// elements.
- static void PeakDetection(int16_t* data, int data_length,
- int num_peaks, int fs_mult,
- int* peak_index, int16_t* peak_value);
+ static void PeakDetection(int16_t* data, size_t data_length,
+ size_t num_peaks, int fs_mult,
+ size_t* peak_index, int16_t* peak_value);
// Estimates the height and location of a maximum. The three values in the
// array |signal_points| are used as basis for a parabolic fit, which is then
@@ -89,14 +89,15 @@ class DspHelper {
// |peak_index| and |peak_value| is given in the full sample rate, as
// indicated by the sample rate multiplier |fs_mult|.
static void ParabolicFit(int16_t* signal_points, int fs_mult,
- int* peak_index, int16_t* peak_value);
+ size_t* peak_index, int16_t* peak_value);
// Calculates the sum-abs-diff for |signal| when compared to a displaced
// version of itself. Returns the displacement lag that results in the minimum
// distortion. The resulting distortion is written to |distortion_value|.
// The values of |min_lag| and |max_lag| are boundaries for the search.
- static int MinDistortion(const int16_t* signal, int min_lag,
- int max_lag, int length, int32_t* distortion_value);
+ static size_t MinDistortion(const int16_t* signal, size_t min_lag,
+ size_t max_lag, size_t length,
+ int32_t* distortion_value);
// Mixes |length| samples from |input1| and |input2| together and writes the
// result to |output|. The gain for |input1| starts at |mix_factor| (Q14) and
@@ -122,14 +123,14 @@ class DspHelper {
// filters if |compensate_delay| is true. Returns -1 if the input is too short
// to produce |output_length| samples, otherwise 0.
static int DownsampleTo4kHz(const int16_t* input, size_t input_length,
- int output_length, int input_rate_hz,
+ size_t output_length, int input_rate_hz,
bool compensate_delay, int16_t* output);
private:
// Table of constants used in method DspHelper::ParabolicFit().
static const int16_t kParabolaCoefficients[17][3];
- DISALLOW_COPY_AND_ASSIGN(DspHelper);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DspHelper);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
index 24aa9fe790e..779d1d340b4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.cc
@@ -13,6 +13,9 @@
#include <assert.h>
#include <algorithm> // max
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
// Modify the code to obtain backwards bit-exactness. Once bit-exactness is no
// longer required, this #define should be removed (and the code that it
// enables).
@@ -67,10 +70,10 @@ int DtmfBuffer::ParseEvent(uint32_t rtp_timestamp,
const uint8_t* payload,
size_t payload_length_bytes,
DtmfEvent* event) {
- if (!payload || !event) {
- return kInvalidPointer;
- }
+ RTC_CHECK(payload);
+ RTC_CHECK(event);
if (payload_length_bytes < 4) {
+ LOG(LS_WARNING) << "ParseEvent payload too short";
return kPayloadTooShort;
}
@@ -98,6 +101,7 @@ int DtmfBuffer::InsertEvent(const DtmfEvent& event) {
if (event.event_no < 0 || event.event_no > 15 ||
event.volume < 0 || event.volume > 36 ||
event.duration <= 0 || event.duration > 65535) {
+ LOG(LS_WARNING) << "InsertEvent invalid parameters";
return kInvalidEventParameters;
}
DtmfList::iterator it = buffer_.begin();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.h
index 861a9486275..1f415ce81fa 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer.h
@@ -107,7 +107,7 @@ class DtmfBuffer {
DtmfList buffer_;
- DISALLOW_COPY_AND_ASSIGN(DtmfBuffer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DtmfBuffer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
index 4824b2baca6..dad4e76627b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_buffer_unittest.cc
@@ -77,12 +77,6 @@ TEST(DtmfBuffer, ParseEvent) {
EXPECT_EQ(timestamp, event.timestamp);
EXPECT_EQ(volume, event.volume);
- EXPECT_EQ(DtmfBuffer::kInvalidPointer,
- DtmfBuffer::ParseEvent(timestamp, NULL, 4, &event));
-
- EXPECT_EQ(DtmfBuffer::kInvalidPointer,
- DtmfBuffer::ParseEvent(timestamp, payload_ptr, 4, NULL));
-
EXPECT_EQ(DtmfBuffer::kPayloadTooShort,
DtmfBuffer::ParseEvent(timestamp, payload_ptr, 3, &event));
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
index 45601c04d42..f4d5190c615 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.cc
@@ -149,18 +149,18 @@ void DtmfToneGenerator::Reset() {
}
// Generate num_samples of DTMF signal and write to |output|.
-int DtmfToneGenerator::Generate(int num_samples,
+int DtmfToneGenerator::Generate(size_t num_samples,
AudioMultiVector* output) {
if (!initialized_) {
return kNotInitialized;
}
- if (num_samples < 0 || !output) {
+ if (!output) {
return kParameterError;
}
output->AssertSize(num_samples);
- for (int i = 0; i < num_samples; ++i) {
+ for (size_t i = 0; i < num_samples; ++i) {
// Use recursion formula y[n] = a * y[n - 1] - y[n - 2].
int16_t temp_val_low = ((coeff1_ * sample_history1_[1] + 8192) >> 14)
- sample_history1_[0];
@@ -186,7 +186,7 @@ int DtmfToneGenerator::Generate(int num_samples,
output->CopyChannel(0, channel);
}
- return num_samples;
+ return static_cast<int>(num_samples);
}
bool DtmfToneGenerator::initialized() const {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
index 4e51e53674b..36d902ad3fa 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h
@@ -30,7 +30,7 @@ class DtmfToneGenerator {
virtual ~DtmfToneGenerator() {}
virtual int Init(int fs, int event, int attenuation);
virtual void Reset();
- virtual int Generate(int num_samples, AudioMultiVector* output);
+ virtual int Generate(size_t num_samples, AudioMultiVector* output);
virtual bool initialized() const;
private:
@@ -48,7 +48,7 @@ class DtmfToneGenerator {
int16_t sample_history1_[2]; // Last 2 samples for the 1st oscillator.
int16_t sample_history2_[2]; // Last 2 samples for the 2nd oscillator.
- DISALLOW_COPY_AND_ASSIGN(DtmfToneGenerator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DtmfToneGenerator);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
index ccd7fa606f1..a55e6c90288 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dtmf_tone_generator_unittest.cc
@@ -171,8 +171,6 @@ TEST(DtmfToneGenerator, TestErrors) {
// Initialize with valid parameters.
ASSERT_EQ(0, tone_gen.Init(fs, event, attenuation));
EXPECT_TRUE(tone_gen.initialized());
- // Negative number of samples.
- EXPECT_EQ(DtmfToneGenerator::kParameterError, tone_gen.Generate(-1, &signal));
// NULL pointer to destination.
EXPECT_EQ(DtmfToneGenerator::kParameterError,
tone_gen.Generate(kNumSamples, NULL));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
index 10f6a9f5bf4..2aa9fb0a8d6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
@@ -16,10 +16,12 @@
#include <algorithm> // min, max
#include <limits> // numeric_limits<T>
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
+#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
namespace webrtc {
@@ -27,6 +29,7 @@ namespace webrtc {
Expand::Expand(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels)
: random_vector_(random_vector),
@@ -36,13 +39,15 @@ Expand::Expand(BackgroundNoise* background_noise,
num_channels_(num_channels),
consecutive_expands_(0),
background_noise_(background_noise),
+ statistics_(statistics),
overlap_length_(5 * fs / 8000),
lag_index_direction_(0),
current_lag_index_(0),
stop_muting_(false),
+ expand_duration_samples_(0),
channel_parameters_(new ChannelParameters[num_channels_]) {
assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
- assert(fs <= kMaxSampleRate); // Should not be possible.
+ assert(fs <= static_cast<int>(kMaxSampleRate)); // Should not be possible.
assert(num_channels_ > 0);
memset(expand_lags_, 0, sizeof(expand_lags_));
Reset();
@@ -67,7 +72,7 @@ int Expand::Process(AudioMultiVector* output) {
int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
int16_t* voiced_vector_storage = temp_data;
int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
@@ -78,10 +83,11 @@ int Expand::Process(AudioMultiVector* output) {
// Perform initial setup if this is the first expansion since last reset.
AnalyzeSignal(random_vector);
first_expand_ = false;
+ expand_duration_samples_ = 0;
} else {
// This is not the first expansion, parameters are already estimated.
// Extract a noise segment.
- int16_t rand_length = max_lag_;
+ size_t rand_length = max_lag_;
// This only applies to SWB where length could be larger than 256.
assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
GenerateRandomVector(2, rand_length, random_vector);
@@ -113,7 +119,7 @@ int Expand::Process(AudioMultiVector* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound(
&parameters.expand_vector0[expansion_vector_position], 3,
&parameters.expand_vector1[expansion_vector_position], 1, 2,
- voiced_vector_storage, static_cast<int>(temp_length));
+ voiced_vector_storage, temp_length);
} else if (current_lag_index_ == 2) {
// Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
assert(expansion_vector_position + temp_length <=
@@ -123,7 +129,7 @@ int Expand::Process(AudioMultiVector* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound(
&parameters.expand_vector0[expansion_vector_position], 1,
&parameters.expand_vector1[expansion_vector_position], 1, 1,
- voiced_vector_storage, static_cast<int>(temp_length));
+ voiced_vector_storage, temp_length);
}
// Get tapering window parameters. Values are in Q15.
@@ -190,10 +196,10 @@ int Expand::Process(AudioMultiVector* output) {
WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
parameters.ar_gain, add_constant,
parameters.ar_gain_scale,
- static_cast<int>(current_lag));
+ current_lag);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
parameters.ar_filter, kUnvoicedLpcOrder + 1,
- static_cast<int>(current_lag));
+ current_lag);
memcpy(parameters.ar_filter_state,
&(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
sizeof(int16_t) * kUnvoicedLpcOrder);
@@ -206,7 +212,8 @@ int Expand::Process(AudioMultiVector* output) {
// (>= 31 .. <= 63) * fs_mult => go from 1 to 0 in about 16 ms;
// >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
// temp_shift = getbits(max_lag_) - 5.
- int temp_shift = (31 - WebRtcSpl_NormW32(max_lag_)) - 5;
+ int temp_shift =
+ (31 - WebRtcSpl_NormW32(rtc::checked_cast<int32_t>(max_lag_))) - 5;
int16_t mix_factor_increment = 256 >> temp_shift;
if (stop_muting_) {
mix_factor_increment = 0;
@@ -231,7 +238,7 @@ int Expand::Process(AudioMultiVector* output) {
WebRtcSpl_ScaleAndAddVectorsWithRound(
voiced_vector + temp_length, parameters.current_voice_mix_factor,
unvoiced_vector + temp_length, temp_scale, 14,
- temp_data + temp_length, static_cast<int>(current_lag - temp_length));
+ temp_data + temp_length, current_lag - temp_length);
}
// Select muting slope depending on how many consecutive expands we have
@@ -252,7 +259,7 @@ int Expand::Process(AudioMultiVector* output) {
// Mute to the previous level, then continue with the muting.
WebRtcSpl_AffineTransformVector(temp_data, temp_data,
parameters.mute_factor, 8192,
- 14, static_cast<int>(current_lag));
+ 14, current_lag);
if (!stop_muting_) {
DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
@@ -298,6 +305,10 @@ int Expand::Process(AudioMultiVector* output) {
// Increase call number and cap it.
consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands ?
kMaxConsecutiveExpands : consecutive_expands_ + 1;
+ expand_duration_samples_ += output->Size();
+ // Clamp the duration counter at 2 seconds.
+ expand_duration_samples_ =
+ std::min(expand_duration_samples_, rtc::checked_cast<size_t>(fs_hz_ * 2));
return 0;
}
@@ -305,6 +316,8 @@ void Expand::SetParametersForNormalAfterExpand() {
current_lag_index_ = 0;
lag_index_direction_ = 0;
stop_muting_ = true; // Do not mute signal any more.
+ statistics_->LogDelayedPacketOutageEvent(
+ rtc::checked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
}
void Expand::SetParametersForMergeAfterExpand() {
@@ -339,26 +352,26 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
int32_t auto_correlation[kUnvoicedLpcOrder + 1];
int16_t reflection_coeff[kUnvoicedLpcOrder];
int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
- int best_correlation_index[kNumCorrelationCandidates];
+ size_t best_correlation_index[kNumCorrelationCandidates];
int16_t best_correlation[kNumCorrelationCandidates];
- int16_t best_distortion_index[kNumCorrelationCandidates];
+ size_t best_distortion_index[kNumCorrelationCandidates];
int16_t best_distortion[kNumCorrelationCandidates];
int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
int32_t best_distortion_w32[kNumCorrelationCandidates];
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
int fs_mult = fs_hz_ / 8000;
// Pre-calculate common multiplications with fs_mult.
- int fs_mult_4 = fs_mult * 4;
- int fs_mult_20 = fs_mult * 20;
- int fs_mult_120 = fs_mult * 120;
- int fs_mult_dist_len = fs_mult * kDistortionLength;
- int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
+ size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
+ size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
+ size_t fs_mult_dist_len = fs_mult * kDistortionLength;
+ size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
- const size_t signal_length = 256 * fs_mult;
+ const size_t signal_length = static_cast<size_t>(256 * fs_mult);
const int16_t* audio_history =
&(*sync_buffer_)[0][sync_buffer_->Size() - signal_length];
@@ -367,7 +380,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Calculate correlation in downsampled domain (4 kHz sample rate).
int correlation_scale;
- int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
+ size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
// If it is decided to break bit-exactness |correlation_length| should be
// initialized to the return value of Correlation().
Correlation(audio_history, signal_length, correlation_vector,
@@ -386,11 +399,11 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Calculate distortion around the |kNumCorrelationCandidates| best lags.
int distortion_scale = 0;
- for (int i = 0; i < kNumCorrelationCandidates; i++) {
- int16_t min_index = std::max(fs_mult_20,
- best_correlation_index[i] - fs_mult_4);
- int16_t max_index = std::min(fs_mult_120 - 1,
- best_correlation_index[i] + fs_mult_4);
+ for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
+ size_t min_index = std::max(fs_mult_20,
+ best_correlation_index[i] - fs_mult_4);
+ size_t max_index = std::min(fs_mult_120 - 1,
+ best_correlation_index[i] + fs_mult_4);
best_distortion_index[i] = DspHelper::MinDistortion(
&(audio_history[signal_length - fs_mult_dist_len]), min_index,
max_index, fs_mult_dist_len, &best_distortion_w32[i]);
@@ -404,8 +417,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Find the maximizing index |i| of the cost function
// f[i] = best_correlation[i] / best_distortion[i].
int32_t best_ratio = std::numeric_limits<int32_t>::min();
- int best_index = std::numeric_limits<int>::max();
- for (int i = 0; i < kNumCorrelationCandidates; ++i) {
+ size_t best_index = std::numeric_limits<size_t>::max();
+ for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
int32_t ratio;
if (best_distortion[i] > 0) {
ratio = (best_correlation[i] << 16) / best_distortion[i];
@@ -420,19 +433,20 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
}
}
- int distortion_lag = best_distortion_index[best_index];
- int correlation_lag = best_correlation_index[best_index];
+ size_t distortion_lag = best_distortion_index[best_index];
+ size_t correlation_lag = best_correlation_index[best_index];
max_lag_ = std::max(distortion_lag, correlation_lag);
// Calculate the exact best correlation in the range between
// |correlation_lag| and |distortion_lag|.
correlation_length =
- std::max(std::min(distortion_lag + 10, fs_mult_120), 60 * fs_mult);
+ std::max(std::min(distortion_lag + 10, fs_mult_120),
+ static_cast<size_t>(60 * fs_mult));
- int start_index = std::min(distortion_lag, correlation_lag);
- int correlation_lags =
- WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1;
- assert(correlation_lags <= 99 * fs_mult + 1); // Cannot be larger.
+ size_t start_index = std::min(distortion_lag, correlation_lag);
+ size_t correlation_lags = static_cast<size_t>(
+ WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1);
+ assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
ChannelParameters& parameters = channel_parameters_[channel_ix];
@@ -441,8 +455,8 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
&audio_history[signal_length - correlation_length - start_index
- correlation_lags],
correlation_length + start_index + correlation_lags - 1);
- correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max))
- + (31 - WebRtcSpl_NormW32(correlation_length))) - 31;
+ correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
+ (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
correlation_scale = std::max(0, correlation_scale);
// Calculate the correlation, store in |correlation_vector2|.
@@ -496,7 +510,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Extract the two vectors expand_vector0 and expand_vector1 from
// |audio_history|.
- int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_);
+ size_t expansion_length = max_lag_ + overlap_length_;
const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
const int16_t* vector2 = vector1 - distortion_lag;
// Normalize the second vector to the same energy as the first.
@@ -515,15 +529,15 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// Calculate scaled_energy1 / scaled_energy2 in Q13.
int32_t energy_ratio = WebRtcSpl_DivW32W16(
WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
- energy2 >> scaled_energy2);
+ static_cast<int16_t>(energy2 >> scaled_energy2));
// Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
- amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13);
+ amplitude_ratio =
+ static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
// Copy the two vectors and give them the same energy.
parameters.expand_vector0.Clear();
parameters.expand_vector0.PushBack(vector1, expansion_length);
parameters.expand_vector1.Clear();
- if (parameters.expand_vector1.Size() <
- static_cast<size_t>(expansion_length)) {
+ if (parameters.expand_vector1.Size() < expansion_length) {
parameters.expand_vector1.Extend(
expansion_length - parameters.expand_vector1.Size());
}
@@ -614,7 +628,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
if (channel_ix == 0) {
// Extract a noise segment.
- int16_t noise_length;
+ size_t noise_length;
if (distortion_lag < 40) {
noise_length = 2 * distortion_lag + 30;
} else {
@@ -756,7 +770,7 @@ void Expand::Correlation(const int16_t* input,
int* output_scale) const {
// Set parameters depending on sample rate.
const int16_t* filter_coefficients;
- int16_t num_coefficients;
+ size_t num_coefficients;
int16_t downsampling_factor;
if (fs_hz_ == 8000) {
num_coefficients = 3;
@@ -778,14 +792,14 @@ void Expand::Correlation(const int16_t* input,
// Correlate from lag 10 to lag 60 in downsampled domain.
// (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
- static const int kCorrelationStartLag = 10;
- static const int kNumCorrelationLags = 54;
- static const int kCorrelationLength = 60;
+ static const size_t kCorrelationStartLag = 10;
+ static const size_t kNumCorrelationLags = 54;
+ static const size_t kCorrelationLength = 60;
// Downsample to 4 kHz sample rate.
- static const int kDownsampledLength = kCorrelationStartLag
+ static const size_t kDownsampledLength = kCorrelationStartLag
+ kNumCorrelationLags + kCorrelationLength;
int16_t downsampled_input[kDownsampledLength];
- static const int kFilterDelay = 0;
+ static const size_t kFilterDelay = 0;
WebRtcSpl_DownsampleFast(
input + input_length - kDownsampledLength * downsampling_factor,
kDownsampledLength * downsampling_factor, downsampled_input,
@@ -833,10 +847,11 @@ void Expand::UpdateLagIndex() {
Expand* ExpandFactory::Create(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels) const {
- return new Expand(background_noise, sync_buffer, random_vector, fs,
- num_channels);
+ return new Expand(background_noise, sync_buffer, random_vector, statistics,
+ fs, num_channels);
}
// TODO(turajs): This can be moved to BackgroundNoise class.
@@ -846,9 +861,9 @@ void Expand::GenerateBackgroundNoise(int16_t* random_vector,
bool too_many_expands,
size_t num_noise_samples,
int16_t* buffer) {
- static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
+ static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
- assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125));
+ assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125));
int16_t* noise_samples = &buffer[kNoiseLpcOrder];
if (background_noise_->initialized()) {
// Use background noise parameters.
@@ -866,12 +881,12 @@ void Expand::GenerateBackgroundNoise(int16_t* random_vector,
scaled_random_vector, random_vector,
background_noise_->Scale(channel), dc_offset,
background_noise_->ScaleShift(channel),
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
background_noise_->Filter(channel),
kNoiseLpcOrder + 1,
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
background_noise_->SetFilterState(
channel,
@@ -918,7 +933,7 @@ void Expand::GenerateBackgroundNoise(int16_t* random_vector,
// kBgnFade has reached 0.
WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
bgn_mute_factor, 8192, 14,
- static_cast<int>(num_noise_samples));
+ num_noise_samples);
}
}
// Update mute_factor in BackgroundNoise class.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
index 5fb117d519c..25c8c21bdbd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
@@ -23,6 +23,7 @@ namespace webrtc {
// Forward declarations.
class BackgroundNoise;
class RandomVector;
+class StatisticsCalculator;
class SyncBuffer;
// This class handles extrapolation of audio data from the sync_buffer to
@@ -34,6 +35,7 @@ class Expand {
Expand(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels);
@@ -62,7 +64,7 @@ class Expand {
// Accessors and mutators.
virtual size_t overlap_length() const;
- int16_t max_lag() const { return max_lag_; }
+ size_t max_lag() const { return max_lag_; }
protected:
static const int kMaxConsecutiveExpands = 200;
@@ -86,19 +88,19 @@ class Expand {
// necessary to produce concealment data.
void AnalyzeSignal(int16_t* random_vector);
- RandomVector* random_vector_;
- SyncBuffer* sync_buffer_;
+ RandomVector* const random_vector_;
+ SyncBuffer* const sync_buffer_;
bool first_expand_;
const int fs_hz_;
const size_t num_channels_;
int consecutive_expands_;
private:
- static const int kUnvoicedLpcOrder = 6;
- static const int kNumCorrelationCandidates = 3;
- static const int kDistortionLength = 20;
- static const int kLpcAnalysisLength = 160;
- static const int kMaxSampleRate = 48000;
+ static const size_t kUnvoicedLpcOrder = 6;
+ static const size_t kNumCorrelationCandidates = 3;
+ static const size_t kDistortionLength = 20;
+ static const size_t kLpcAnalysisLength = 160;
+ static const size_t kMaxSampleRate = 48000;
static const int kNumLags = 3;
struct ChannelParameters {
@@ -127,16 +129,18 @@ class Expand {
void UpdateLagIndex();
- BackgroundNoise* background_noise_;
+ BackgroundNoise* const background_noise_;
+ StatisticsCalculator* const statistics_;
const size_t overlap_length_;
- int16_t max_lag_;
+ size_t max_lag_;
size_t expand_lags_[kNumLags];
int lag_index_direction_;
int current_lag_index_;
bool stop_muting_;
+ size_t expand_duration_samples_;
rtc::scoped_ptr<ChannelParameters[]> channel_parameters_;
- DISALLOW_COPY_AND_ASSIGN(Expand);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Expand);
};
struct ExpandFactory {
@@ -146,6 +150,7 @@ struct ExpandFactory {
virtual Expand* Create(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels) const;
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
index 68b4f60f15c..1441704102d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
@@ -13,9 +13,14 @@
#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/safe_conversions.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
+#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
+#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
+#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
@@ -25,7 +30,8 @@ TEST(Expand, CreateAndDestroy) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(1, 1000);
RandomVector random_vector;
- Expand expand(&bgn, &sync_buffer, &random_vector, fs, channels);
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
}
TEST(Expand, CreateUsingFactory) {
@@ -34,13 +40,135 @@ TEST(Expand, CreateUsingFactory) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(1, 1000);
RandomVector random_vector;
+ StatisticsCalculator statistics;
ExpandFactory expand_factory;
- Expand* expand =
- expand_factory.Create(&bgn, &sync_buffer, &random_vector, fs, channels);
+ Expand* expand = expand_factory.Create(&bgn, &sync_buffer, &random_vector,
+ &statistics, fs, channels);
EXPECT_TRUE(expand != NULL);
delete expand;
}
+namespace {
+class FakeStatisticsCalculator : public StatisticsCalculator {
+ public:
+ void LogDelayedPacketOutageEvent(int outage_duration_ms) override {
+ last_outage_duration_ms_ = outage_duration_ms;
+ }
+
+ int last_outage_duration_ms() const { return last_outage_duration_ms_; }
+
+ private:
+ int last_outage_duration_ms_ = 0;
+};
+
+// This is the same size that is given to the SyncBuffer object in NetEq.
+const size_t kNetEqSyncBufferLengthMs = 720;
+} // namespace
+
+class ExpandTest : public ::testing::Test {
+ protected:
+ ExpandTest()
+ : input_file_(test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
+ 32000),
+ test_sample_rate_hz_(32000),
+ num_channels_(1),
+ background_noise_(num_channels_),
+ sync_buffer_(num_channels_,
+ kNetEqSyncBufferLengthMs * test_sample_rate_hz_ / 1000),
+ expand_(&background_noise_,
+ &sync_buffer_,
+ &random_vector_,
+ &statistics_,
+ test_sample_rate_hz_,
+ num_channels_) {
+ WebRtcSpl_Init();
+ input_file_.set_output_rate_hz(test_sample_rate_hz_);
+ }
+
+ void SetUp() override {
+ // Fast-forward the input file until there is speech (about 1.1 second into
+ // the file).
+ const size_t speech_start_samples =
+ static_cast<size_t>(test_sample_rate_hz_ * 1.1f);
+ ASSERT_TRUE(input_file_.Seek(speech_start_samples));
+
+ // Pre-load the sync buffer with speech data.
+ ASSERT_TRUE(
+ input_file_.Read(sync_buffer_.Size(), &sync_buffer_.Channel(0)[0]));
+ ASSERT_EQ(1u, num_channels_) << "Fix: Must populate all channels.";
+ }
+
+ test::ResampleInputAudioFile input_file_;
+ int test_sample_rate_hz_;
+ size_t num_channels_;
+ BackgroundNoise background_noise_;
+ SyncBuffer sync_buffer_;
+ RandomVector random_vector_;
+ FakeStatisticsCalculator statistics_;
+ Expand expand_;
+};
+
+// This test calls the expand object to produce concealment data a few times,
+// and then ends by calling SetParametersForNormalAfterExpand. This simulates
+// the situation where the packet next up for decoding was just delayed, not
+// lost.
+TEST_F(ExpandTest, DelayedPacketOutage) {
+ AudioMultiVector output(num_channels_);
+ size_t sum_output_len_samples = 0;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ sum_output_len_samples += output.Size();
+ EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+ }
+ expand_.SetParametersForNormalAfterExpand();
+ // Convert |sum_output_len_samples| to milliseconds.
+ EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
+ (test_sample_rate_hz_ / 1000)),
+ statistics_.last_outage_duration_ms());
+}
+
+// This test is similar to DelayedPacketOutage, but ends by calling
+// SetParametersForMergeAfterExpand. This simulates the situation where the
+// packet next up for decoding was actually lost (or at least a later packet
+// arrived before it).
+TEST_F(ExpandTest, LostPacketOutage) {
+ AudioMultiVector output(num_channels_);
+ size_t sum_output_len_samples = 0;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ sum_output_len_samples += output.Size();
+ EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+ }
+ expand_.SetParametersForMergeAfterExpand();
+ EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+}
+
+// This test is similar to the DelayedPacketOutage test above, but with the
+// difference that Expand::Reset() is called after 5 calls to Expand::Process().
+// This should reset the statistics, and will in the end lead to an outage of
+// 5 periods instead of 10.
+TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
+ AudioMultiVector output(num_channels_);
+ size_t sum_output_len_samples = 0;
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(0, expand_.Process(&output));
+ EXPECT_GT(output.Size(), 0u);
+ sum_output_len_samples += output.Size();
+ if (i == 5) {
+ expand_.Reset();
+ sum_output_len_samples = 0;
+ }
+ EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+ }
+ expand_.SetParametersForNormalAfterExpand();
+ // Convert |sum_output_len_samples| to milliseconds.
+ EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
+ (test_sample_rate_hz_ / 1000)),
+ statistics_.last_outage_duration_ms());
+}
+
// TODO(hlundin): Write more tests.
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/interface/neteq.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/interface/neteq.h
index 88bf2087fff..48e8fd5cdee 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/interface/neteq.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/interface/neteq.h
@@ -14,7 +14,6 @@
#include <string.h> // Provide access to size_t.
#include <string>
-#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/common_types.h"
@@ -45,7 +44,13 @@ struct NetEqNetworkStatistics {
// decoding (in Q14).
int32_t clockdrift_ppm; // Average clock-drift in parts-per-million
// (positive or negative).
- int added_zero_samples; // Number of zero samples added in "off" mode.
+ size_t added_zero_samples; // Number of zero samples added in "off" mode.
+ // Statistics for packet waiting times, i.e., the time between a packet
+ // arrives until it is decoded.
+ int mean_waiting_time_ms;
+ int median_waiting_time_ms;
+ int min_waiting_time_ms;
+ int max_waiting_time_ms;
};
enum NetEqOutputType {
@@ -87,7 +92,7 @@ class NetEq {
int sample_rate_hz; // Initial value. Will change with input data.
bool enable_audio_classifier;
- int max_packets_in_buffer;
+ size_t max_packets_in_buffer;
int max_delay_ms;
BackgroundNoiseMode background_noise_mode;
NetEqPlayoutMode playout_mode;
@@ -165,7 +170,7 @@ class NetEq {
// The speech type is written to |type|, if |type| is not NULL.
// Returns kOK on success, or kFail in case of an error.
virtual int GetAudio(size_t max_length, int16_t* output_audio,
- int* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, int* num_channels,
NetEqOutputType* type) = 0;
// Associates |rtp_payload_type| with |codec| and stores the information in
@@ -210,8 +215,8 @@ class NetEq {
// Not implemented.
virtual int TargetDelay() = 0;
- // Not implemented.
- virtual int CurrentDelay() = 0;
+ // Returns the current total delay (packet buffer and sync buffer) in ms.
+ virtual int CurrentDelayMs() const = 0;
// Sets the playout mode to |mode|.
// Deprecated. Set the mode in the Config struct passed to the constructor.
@@ -227,11 +232,6 @@ class NetEq {
// after the call.
virtual int NetworkStatistics(NetEqNetworkStatistics* stats) = 0;
- // Writes the last packet waiting times (in ms) to |waiting_times|. The number
- // of values written is no more than 100, but may be smaller if the interface
- // is polled again before 100 packets has arrived.
- virtual void WaitingTimes(std::vector<int>* waiting_times) = 0;
-
// Writes the current RTCP statistics to |stats|. The statistics are reset
// and a new report period is started with the call.
virtual void GetRtcpStatistics(RtcpStatistics* stats) = 0;
@@ -281,7 +281,7 @@ class NetEq {
NetEq() {}
private:
- DISALLOW_COPY_AND_ASSIGN(NetEq);
+ RTC_DISALLOW_COPY_AND_ASSIGN(NetEq);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
index 2c515c14eb3..b6fb2d8a267 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
@@ -31,25 +31,25 @@ Merge::Merge(int fs_hz,
: fs_hz_(fs_hz),
num_channels_(num_channels),
fs_mult_(fs_hz_ / 8000),
- timestamps_per_call_(fs_hz_ / 100),
+ timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
expand_(expand),
sync_buffer_(sync_buffer),
expanded_(num_channels_) {
assert(num_channels_ > 0);
}
-int Merge::Process(int16_t* input, size_t input_length,
- int16_t* external_mute_factor_array,
- AudioMultiVector* output) {
+size_t Merge::Process(int16_t* input, size_t input_length,
+ int16_t* external_mute_factor_array,
+ AudioMultiVector* output) {
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
fs_hz_ == 48000);
assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
- int old_length;
- int expand_period;
+ size_t old_length;
+ size_t expand_period;
// Get expansion data to overlap and mix with.
- int expanded_length = GetExpandedSignal(&old_length, &expand_period);
+ size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
// Transfer input signal to an AudioMultiVector.
AudioMultiVector input_vector(num_channels_);
@@ -57,7 +57,7 @@ int Merge::Process(int16_t* input, size_t input_length,
size_t input_length_per_channel = input_vector.Size();
assert(input_length_per_channel == input_length / num_channels_);
- int16_t best_correlation_index = 0;
+ size_t best_correlation_index = 0;
size_t output_length = 0;
for (size_t channel = 0; channel < num_channels_; ++channel) {
@@ -65,8 +65,8 @@ int Merge::Process(int16_t* input, size_t input_length,
int16_t* expanded_channel = &expanded_[channel][0];
int16_t expanded_max, input_max;
int16_t new_mute_factor = SignalScaling(
- input_channel, static_cast<int>(input_length_per_channel),
- expanded_channel, &expanded_max, &input_max);
+ input_channel, input_length_per_channel, expanded_channel,
+ &expanded_max, &input_max);
// Adjust muting factor (product of "main" muting factor and expand muting
// factor).
@@ -84,13 +84,13 @@ int Merge::Process(int16_t* input, size_t input_length,
// Downsample, correlate, and find strongest correlation period for the
// master (i.e., first) channel only.
// Downsample to 4kHz sample rate.
- Downsample(input_channel, static_cast<int>(input_length_per_channel),
- expanded_channel, expanded_length);
+ Downsample(input_channel, input_length_per_channel, expanded_channel,
+ expanded_length);
// Calculate the lag of the strongest correlation period.
best_correlation_index = CorrelateAndPeakSearch(
expanded_max, input_max, old_length,
- static_cast<int>(input_length_per_channel), expand_period);
+ input_length_per_channel, expand_period);
}
static const int kTempDataSize = 3600;
@@ -99,11 +99,11 @@ int Merge::Process(int16_t* input, size_t input_length,
// Mute the new decoded data if needed (and unmute it linearly).
// This is the overlapping part of expanded_signal.
- int interpolation_length = std::min(
+ size_t interpolation_length = std::min(
kMaxCorrelationLength * fs_mult_,
expanded_length - best_correlation_index);
interpolation_length = std::min(interpolation_length,
- static_cast<int>(input_length_per_channel));
+ input_length_per_channel);
if (*external_mute_factor < 16384) {
// Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
// and so on.
@@ -153,14 +153,14 @@ int Merge::Process(int16_t* input, size_t input_length,
// Return new added length. |old_length| samples were borrowed from
// |sync_buffer_|.
- return static_cast<int>(output_length) - old_length;
+ return output_length - old_length;
}
-int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
+size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
// Check how much data that is left since earlier.
- *old_length = static_cast<int>(sync_buffer_->FutureLength());
+ *old_length = sync_buffer_->FutureLength();
// Should never be less than overlap_length.
- assert(*old_length >= static_cast<int>(expand_->overlap_length()));
+ assert(*old_length >= expand_->overlap_length());
// Generate data to merge the overlap with using expand.
expand_->SetParametersForMergeAfterExpand();
@@ -171,7 +171,7 @@ int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
// but shift them towards the end of the buffer. This is ok, since all of
// the buffer will be expand data anyway, so as long as the beginning is
// left untouched, we're fine.
- int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
+ size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
*old_length = 210 * kMaxSampleRate / 8000;
// This is the truncated length.
@@ -181,34 +181,34 @@ int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
AudioMultiVector expanded_temp(num_channels_);
expand_->Process(&expanded_temp);
- *expand_period = static_cast<int>(expanded_temp.Size()); // Samples per
- // channel.
+ *expand_period = expanded_temp.Size(); // Samples per channel.
expanded_.Clear();
// Copy what is left since earlier into the expanded vector.
expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
- assert(expanded_.Size() == static_cast<size_t>(*old_length));
+ assert(expanded_.Size() == *old_length);
assert(expanded_temp.Size() > 0);
// Do "ugly" copy and paste from the expanded in order to generate more data
// to correlate (but not interpolate) with.
- const int required_length = (120 + 80 + 2) * fs_mult_;
- if (expanded_.Size() < static_cast<size_t>(required_length)) {
- while (expanded_.Size() < static_cast<size_t>(required_length)) {
+ const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
+ if (expanded_.Size() < required_length) {
+ while (expanded_.Size() < required_length) {
// Append one more pitch period each time.
expanded_.PushBack(expanded_temp);
}
// Trim the length to exactly |required_length|.
expanded_.PopBack(expanded_.Size() - required_length);
}
- assert(expanded_.Size() >= static_cast<size_t>(required_length));
+ assert(expanded_.Size() >= required_length);
return required_length;
}
-int16_t Merge::SignalScaling(const int16_t* input, int input_length,
+int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
const int16_t* expanded_signal,
int16_t* expanded_max, int16_t* input_max) const {
// Adjust muting factor if new vector is more or less of the BGN energy.
- const int mod_input_length = std::min(64 * fs_mult_, input_length);
+ const size_t mod_input_length =
+ std::min(static_cast<size_t>(64 * fs_mult_), input_length);
*expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
*input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
@@ -260,13 +260,13 @@ int16_t Merge::SignalScaling(const int16_t* input, int input_length,
// TODO(hlundin): There are some parameter values in this method that seem
// strange. Compare with Expand::Correlation.
-void Merge::Downsample(const int16_t* input, int input_length,
- const int16_t* expanded_signal, int expanded_length) {
+void Merge::Downsample(const int16_t* input, size_t input_length,
+ const int16_t* expanded_signal, size_t expanded_length) {
const int16_t* filter_coefficients;
- int num_coefficients;
+ size_t num_coefficients;
int decimation_factor = fs_hz_ / 4000;
- static const int kCompensateDelay = 0;
- int length_limit = fs_hz_ / 100; // 10 ms in samples.
+ static const size_t kCompensateDelay = 0;
+ size_t length_limit = static_cast<size_t>(fs_hz_ / 100); // 10 ms in samples.
if (fs_hz_ == 8000) {
filter_coefficients = DspHelper::kDownsample8kHzTbl;
num_coefficients = 3;
@@ -280,7 +280,7 @@ void Merge::Downsample(const int16_t* input, int input_length,
filter_coefficients = DspHelper::kDownsample48kHzTbl;
num_coefficients = 7;
}
- int signal_offset = num_coefficients - 1;
+ size_t signal_offset = num_coefficients - 1;
WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
expanded_length - signal_offset,
expanded_downsampled_, kExpandDownsampLength,
@@ -288,10 +288,10 @@ void Merge::Downsample(const int16_t* input, int input_length,
decimation_factor, kCompensateDelay);
if (input_length <= length_limit) {
// Not quite long enough, so we have to cheat a bit.
- int16_t temp_len = input_length - signal_offset;
+ size_t temp_len = input_length - signal_offset;
// TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
// errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
- int16_t downsamp_temp_len = temp_len / decimation_factor;
+ size_t downsamp_temp_len = temp_len / decimation_factor;
WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
input_downsampled_, downsamp_temp_len,
filter_coefficients, num_coefficients,
@@ -307,12 +307,12 @@ void Merge::Downsample(const int16_t* input, int input_length,
}
}
-int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- int start_position, int input_length,
- int expand_period) const {
+size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+ size_t start_position, size_t input_length,
+ size_t expand_period) const {
// Calculate correlation without any normalization.
- const int max_corr_length = kMaxCorrelationLength;
- int stop_position_downsamp =
+ const size_t max_corr_length = kMaxCorrelationLength;
+ size_t stop_position_downsamp =
std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
int correlation_shift = 0;
if (expanded_max * input_max > 26843546) {
@@ -325,8 +325,8 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
stop_position_downsamp, correlation_shift, 1);
// Normalize correlation to 14 bits and copy to a 16-bit array.
- const int pad_length = static_cast<int>(expand_->overlap_length() - 1);
- const int correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
+ const size_t pad_length = expand_->overlap_length() - 1;
+ const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
rtc::scoped_ptr<int16_t[]> correlation16(
new int16_t[correlation_buffer_size]);
memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
@@ -342,21 +342,20 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
// (1) w16_bestIndex + input_length <
// timestamps_per_call_ + expand_->overlap_length();
// (2) w16_bestIndex + input_length < start_position.
- int start_index = timestamps_per_call_ +
- static_cast<int>(expand_->overlap_length());
+ size_t start_index = timestamps_per_call_ + expand_->overlap_length();
start_index = std::max(start_position, start_index);
start_index = (input_length > start_index) ? 0 : (start_index - input_length);
// Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
- int start_index_downsamp = start_index / (fs_mult_ * 2);
+ size_t start_index_downsamp = start_index / (fs_mult_ * 2);
// Calculate a modified |stop_position_downsamp| to account for the increased
// start index |start_index_downsamp| and the effective array length.
- int modified_stop_pos =
+ size_t modified_stop_pos =
std::min(stop_position_downsamp,
kMaxCorrelationLength + pad_length - start_index_downsamp);
- int best_correlation_index;
+ size_t best_correlation_index;
int16_t best_correlation;
- static const int kNumCorrelationCandidates = 1;
+ static const size_t kNumCorrelationCandidates = 1;
DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
modified_stop_pos, kNumCorrelationCandidates,
fs_mult_, &best_correlation_index,
@@ -368,16 +367,16 @@ int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
// least 10ms + overlap . (This should never happen thanks to the above
// modification of peak-finding starting point.)
while (((best_correlation_index + input_length) <
- static_cast<int>(timestamps_per_call_ + expand_->overlap_length())) ||
- ((best_correlation_index + input_length) < start_position)) {
+ (timestamps_per_call_ + expand_->overlap_length())) ||
+ ((best_correlation_index + input_length) < start_position)) {
assert(false); // Should never happen.
best_correlation_index += expand_period; // Jump one lag ahead.
}
return best_correlation_index;
}
-int Merge::RequiredFutureSamples() {
- return static_cast<int>(fs_hz_ / 100 * num_channels_); // 10 ms.
+size_t Merge::RequiredFutureSamples() {
+ return fs_hz_ / 100 * num_channels_; // 10 ms.
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
index 1b60aeca8f3..a168502c271 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
@@ -46,11 +46,11 @@ class Merge {
// de-interleaving |input|. The values in |external_mute_factor_array| (Q14)
// will be used to scale the audio, and is updated in the process. The array
// must have |num_channels_| elements.
- virtual int Process(int16_t* input, size_t input_length,
- int16_t* external_mute_factor_array,
- AudioMultiVector* output);
+ virtual size_t Process(int16_t* input, size_t input_length,
+ int16_t* external_mute_factor_array,
+ AudioMultiVector* output);
- virtual int RequiredFutureSamples();
+ virtual size_t RequiredFutureSamples();
protected:
const int fs_hz_;
@@ -58,45 +58,45 @@ class Merge {
private:
static const int kMaxSampleRate = 48000;
- static const int kExpandDownsampLength = 100;
- static const int kInputDownsampLength = 40;
- static const int kMaxCorrelationLength = 60;
+ static const size_t kExpandDownsampLength = 100;
+ static const size_t kInputDownsampLength = 40;
+ static const size_t kMaxCorrelationLength = 60;
// Calls |expand_| to get more expansion data to merge with. The data is
// written to |expanded_signal_|. Returns the length of the expanded data,
// while |expand_period| will be the number of samples in one expansion period
// (typically one pitch period). The value of |old_length| will be the number
// of samples that were taken from the |sync_buffer_|.
- int GetExpandedSignal(int* old_length, int* expand_period);
+ size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
// Analyzes |input| and |expanded_signal| to find maximum values. Returns
// a muting factor (Q14) to be used on the new data.
- int16_t SignalScaling(const int16_t* input, int input_length,
+ int16_t SignalScaling(const int16_t* input, size_t input_length,
const int16_t* expanded_signal,
int16_t* expanded_max, int16_t* input_max) const;
// Downsamples |input| (|input_length| samples) and |expanded_signal| to
// 4 kHz sample rate. The downsampled signals are written to
// |input_downsampled_| and |expanded_downsampled_|, respectively.
- void Downsample(const int16_t* input, int input_length,
- const int16_t* expanded_signal, int expanded_length);
+ void Downsample(const int16_t* input, size_t input_length,
+ const int16_t* expanded_signal, size_t expanded_length);
// Calculates cross-correlation between |input_downsampled_| and
// |expanded_downsampled_|, and finds the correlation maximum. The maximizing
// lag is returned.
- int16_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- int start_position, int input_length,
- int expand_period) const;
+ size_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
+ size_t start_position, size_t input_length,
+ size_t expand_period) const;
const int fs_mult_; // fs_hz_ / 8000.
- const int timestamps_per_call_;
+ const size_t timestamps_per_call_;
Expand* expand_;
SyncBuffer* sync_buffer_;
int16_t expanded_downsampled_[kExpandDownsampLength];
int16_t input_downsampled_[kInputDownsampLength];
AudioMultiVector expanded_;
- DISALLOW_COPY_AND_ASSIGN(Merge);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Merge);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge_unittest.cc
index bdcbbb8a9bf..ddb0e16ddf7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge_unittest.cc
@@ -18,6 +18,7 @@
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
+#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
namespace webrtc {
@@ -28,7 +29,8 @@ TEST(Merge, CreateAndDestroy) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(1, 1000);
RandomVector random_vector;
- Expand expand(&bgn, &sync_buffer, &random_vector, fs, channels);
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
Merge merge(fs, channels, &expand, &sync_buffer);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
index 93261ab607d..8debcbbb1e2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h
@@ -26,11 +26,12 @@ class MockAudioDecoder : public AudioDecoder {
Decode,
int(const uint8_t*, size_t, int, size_t, int16_t*, SpeechType*));
MOCK_CONST_METHOD0(HasDecodePlc, bool());
- MOCK_METHOD2(DecodePlc, int(int, int16_t*));
- MOCK_METHOD0(Init, int());
+ MOCK_METHOD2(DecodePlc, size_t(size_t, int16_t*));
+ MOCK_METHOD0(Reset, void());
MOCK_METHOD5(IncomingPacket, int(const uint8_t*, size_t, uint16_t, uint32_t,
uint32_t));
MOCK_METHOD0(ErrorCode, int());
+ MOCK_CONST_METHOD2(PacketDuration, int(const uint8_t*, size_t));
MOCK_CONST_METHOD0(Channels, size_t());
MOCK_CONST_METHOD0(codec_type, NetEqDecoder());
MOCK_METHOD1(CodecSupported, bool(NetEqDecoder));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
index ebc6acda993..82dee2a3452 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_buffer_level_filter.h
@@ -25,8 +25,8 @@ class MockBufferLevelFilter : public BufferLevelFilter {
MOCK_METHOD0(Reset,
void());
MOCK_METHOD3(Update,
- void(int buffer_size_packets, int time_stretched_samples,
- int packet_len_samples));
+ void(size_t buffer_size_packets, int time_stretched_samples,
+ size_t packet_len_samples));
MOCK_METHOD1(SetTargetBufferLevel,
void(int target_buffer_level));
MOCK_CONST_METHOD0(filtered_current_level,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 1d2dc8ea3d5..6fb85854d77 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -19,7 +19,8 @@ namespace webrtc {
class MockDelayManager : public DelayManager {
public:
- MockDelayManager(int max_packets_in_buffer, DelayPeakDetector* peak_detector)
+ MockDelayManager(size_t max_packets_in_buffer,
+ DelayPeakDetector* peak_detector)
: DelayManager(max_packets_in_buffer, peak_detector) {}
virtual ~MockDelayManager() { Die(); }
MOCK_METHOD0(Die, void());
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
index 881e9005bb9..a1c370e1807 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_dtmf_tone_generator.h
@@ -26,7 +26,7 @@ class MockDtmfToneGenerator : public DtmfToneGenerator {
MOCK_METHOD0(Reset,
void());
MOCK_METHOD2(Generate,
- int(int num_samples, AudioMultiVector* output));
+ int(size_t num_samples, AudioMultiVector* output));
MOCK_CONST_METHOD0(initialized,
bool());
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_expand.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_expand.h
index 45e3239f612..f5ca077531e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_expand.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_expand.h
@@ -22,10 +22,15 @@ class MockExpand : public Expand {
MockExpand(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels)
- : Expand(background_noise, sync_buffer, random_vector, fs, num_channels) {
- }
+ : Expand(background_noise,
+ sync_buffer,
+ random_vector,
+ statistics,
+ fs,
+ num_channels) {}
virtual ~MockExpand() { Die(); }
MOCK_METHOD0(Die, void());
MOCK_METHOD0(Reset,
@@ -46,10 +51,11 @@ namespace webrtc {
class MockExpandFactory : public ExpandFactory {
public:
- MOCK_CONST_METHOD5(Create,
+ MOCK_CONST_METHOD6(Create,
Expand*(BackgroundNoise* background_noise,
SyncBuffer* sync_buffer,
RandomVector* random_vector,
+ StatisticsCalculator* statistics,
int fs,
size_t num_channels));
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
index d8c88561a2e..8cf89c083d6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_external_decoder_pcm16b.h
@@ -28,7 +28,7 @@ using ::testing::Invoke;
class ExternalPcm16B : public AudioDecoder {
public:
ExternalPcm16B() {}
- virtual int Init() { return 0; }
+ void Reset() override {}
protected:
int DecodeInternal(const uint8_t* encoded,
@@ -36,15 +36,14 @@ class ExternalPcm16B : public AudioDecoder {
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) override {
- int16_t ret = WebRtcPcm16b_Decode(
- encoded, static_cast<int16_t>(encoded_len), decoded);
+ size_t ret = WebRtcPcm16b_Decode(encoded, encoded_len, decoded);
*speech_type = ConvertSpeechType(1);
- return ret;
+ return static_cast<int>(ret);
}
size_t Channels() const override { return 1; }
private:
- DISALLOW_COPY_AND_ASSIGN(ExternalPcm16B);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ExternalPcm16B);
};
// Create a mock of ExternalPcm16B which delegates all calls to the real object.
@@ -59,8 +58,8 @@ class MockExternalPcm16B : public ExternalPcm16B {
.WillByDefault(Invoke(&real_, &ExternalPcm16B::HasDecodePlc));
ON_CALL(*this, DecodePlc(_, _))
.WillByDefault(Invoke(&real_, &ExternalPcm16B::DecodePlc));
- ON_CALL(*this, Init())
- .WillByDefault(Invoke(&real_, &ExternalPcm16B::Init));
+ ON_CALL(*this, Reset())
+ .WillByDefault(Invoke(&real_, &ExternalPcm16B::Reset));
ON_CALL(*this, IncomingPacket(_, _, _, _, _))
.WillByDefault(Invoke(&real_, &ExternalPcm16B::IncomingPacket));
ON_CALL(*this, ErrorCode())
@@ -79,9 +78,8 @@ class MockExternalPcm16B : public ExternalPcm16B {
MOCK_CONST_METHOD0(HasDecodePlc,
bool());
MOCK_METHOD2(DecodePlc,
- int(int num_frames, int16_t* decoded));
- MOCK_METHOD0(Init,
- int());
+ size_t(size_t num_frames, int16_t* decoded));
+ MOCK_METHOD0(Reset, void());
MOCK_METHOD5(IncomingPacket,
int(const uint8_t* payload, size_t payload_len,
uint16_t rtp_sequence_number, uint32_t rtp_timestamp,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
index 0eb7edc9c55..97e54d83a5e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -41,7 +41,7 @@ class MockPacketBuffer : public PacketBuffer {
MOCK_CONST_METHOD0(NextRtpHeader,
const RTPHeader*());
MOCK_METHOD1(GetNextPacket,
- Packet*(int* discard_count));
+ Packet*(size_t* discard_count));
MOCK_METHOD0(DiscardNextPacket,
int());
MOCK_METHOD2(DiscardOldPackets,
@@ -49,7 +49,7 @@ class MockPacketBuffer : public PacketBuffer {
MOCK_METHOD1(DiscardAllOldPackets,
int(uint32_t timestamp_limit));
MOCK_CONST_METHOD0(NumPacketsInBuffer,
- int());
+ size_t());
MOCK_METHOD1(IncrementWaitingTimes,
void(int));
MOCK_CONST_METHOD0(current_memory_bytes,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
index 2c58c998d6a..b7833bedd87 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
@@ -11,10 +11,6 @@
'codecs': [
'cng',
'g711',
- 'g722',
- 'ilbc',
- 'isac',
- 'isac_fix',
'pcm16b',
],
'neteq_defines': [],
@@ -23,6 +19,23 @@
'codecs': ['webrtc_opus',],
'neteq_defines': ['WEBRTC_CODEC_OPUS',],
}],
+ ['build_with_mozilla==0', {
+ 'conditions': [
+ ['target_arch=="arm"', {
+ 'codecs': ['isac_fix',],
+ 'neteq_defines': ['WEBRTC_CODEC_ISACFX',],
+ }, {
+ 'codecs': ['isac',],
+ 'neteq_defines': ['WEBRTC_CODEC_ISAC',],
+ }],
+ ],
+ 'codecs': ['g722',],
+ 'neteq_defines': ['WEBRTC_CODEC_G722',],
+ }],
+ ['build_with_mozilla==0 and build_with_chromium==0', {
+ 'codecs': ['ilbc',],
+ 'neteq_defines': ['WEBRTC_CODEC_ILBC',],
+ }],
],
'neteq_dependencies': [
'<@(codecs)',
@@ -120,6 +133,10 @@
'type': '<(gtest_target_type)',
'dependencies': [
'<@(codecs)',
+ 'g722',
+ 'ilbc',
+ 'isac',
+ 'isac_fix',
'audio_decoder_interface',
'neteq_unittest_tools',
'<(DEPTH)/testing/gtest.gyp:gtest',
@@ -127,12 +144,6 @@
'<(webrtc_root)/test/test.gyp:test_support_main',
],
'defines': [
- 'AUDIO_DECODER_UNITTEST',
- 'WEBRTC_CODEC_G722',
- 'WEBRTC_CODEC_ILBC',
- 'WEBRTC_CODEC_ISACFX',
- 'WEBRTC_CODEC_ISAC',
- 'WEBRTC_CODEC_PCM16',
'<@(neteq_defines)',
],
'sources': [
@@ -200,23 +211,6 @@
},
],
}],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'audio_decoder_unittests_run',
- 'type': 'none',
- 'dependencies': [
- 'audio_decoder_unittests',
- ],
- 'includes': [
- '../../../build/isolate.gypi',
- ],
- 'sources': [
- 'audio_decoder_unittests.isolate',
- ],
- },
- ],
- }],
],
}], # include_tests
], # conditions
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 6f57a4ae6b3..2a116163bfd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -40,8 +40,6 @@ class NetEqExternalDecoderUnitTest : public test::NetEqExternalDecoderTest {
payload_size_bytes_(0),
last_send_time_(0),
last_arrival_time_(0) {
- // Init() will trigger external_decoder_->Init().
- EXPECT_CALL(*external_decoder_, Init());
// NetEq is not allowed to delete the external decoder (hence Times(0)).
EXPECT_CALL(*external_decoder_, Die()).Times(0);
Init();
@@ -169,7 +167,7 @@ class NetEqExternalDecoderUnitTest : public test::NetEqExternalDecoderTest {
class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
public ::testing::Test {
protected:
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
NetEqExternalVsInternalDecoderTest()
: NetEqExternalDecoderUnitTest(kDecoderPCM16Bswb32kHz,
@@ -188,7 +186,7 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
void GetAndVerifyOutput() override {
NetEqOutputType output_type;
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
// Get audio from internal decoder instance.
EXPECT_EQ(NetEq::kOK,
@@ -198,12 +196,13 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
&num_channels,
&output_type));
EXPECT_EQ(1, num_channels);
- EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+ EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+ samples_per_channel);
// Get audio from external decoder instance.
samples_per_channel = GetOutputAudio(kMaxBlockSize, output_, &output_type);
- for (int i = 0; i < samples_per_channel; ++i) {
+ for (size_t i = 0; i < samples_per_channel; ++i) {
ASSERT_EQ(output_[i], output_internal_[i]) <<
"Diff in sample " << i << ".";
}
@@ -240,7 +239,7 @@ TEST_F(NetEqExternalVsInternalDecoderTest, RunTest) {
class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
public ::testing::Test {
protected:
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
enum TestStates {
kInitialPhase,
@@ -293,7 +292,7 @@ class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
}
void GetAndVerifyOutput() override {
- int num_samples;
+ size_t num_samples;
NetEqOutputType output_type;
num_samples = GetOutputAudio(kMaxBlockSize, output_, &output_type);
UpdateState(output_type);
@@ -303,7 +302,7 @@ class LargeTimestampJumpTest : public NetEqExternalDecoderUnitTest,
return;
}
- for (int i = 0; i < num_samples; ++i) {
+ for (size_t i = 0; i < num_samples; ++i) {
if (output_[i] != 0)
return;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 6598a790c52..7c049b0152b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -15,6 +15,9 @@
#include <algorithm>
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/accelerate.h"
@@ -40,7 +43,6 @@
#include "webrtc/modules/audio_coding/neteq/timestamp_scaler.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/logging.h"
// Modify the code to obtain backwards bit-exactness. Once bit-exactness is no
// longer required, this #define should be removed (and the code that it
@@ -102,10 +104,9 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
"Changing to 8000 Hz.";
fs = 8000;
}
- LOG(LS_VERBOSE) << "Create NetEqImpl object with fs = " << fs << ".";
fs_hz_ = fs;
fs_mult_ = fs / 8000;
- output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
decoder_frame_length_ = 3 * output_size_samples_;
WebRtcSpl_Init();
if (create_components) {
@@ -113,9 +114,7 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
}
}
-NetEqImpl::~NetEqImpl() {
- LOG(LS_INFO) << "Deleting NetEqImpl object.";
-}
+NetEqImpl::~NetEqImpl() = default;
int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header,
const uint8_t* payload,
@@ -130,7 +129,6 @@ int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header,
int error = InsertPacketInternal(rtp_header, payload, length_bytes,
receive_timestamp, false);
if (error != 0) {
- LOG_FERR1(LS_WARNING, InsertPacketInternal, error);
error_code_ = error;
return kFail;
}
@@ -151,7 +149,6 @@ int NetEqImpl::InsertSyncPacket(const WebRtcRTPHeader& rtp_header,
rtp_header, kSyncPayload, sizeof(kSyncPayload), receive_timestamp, true);
if (error != 0) {
- LOG_FERR1(LS_WARNING, InsertPacketInternal, error);
error_code_ = error;
return kFail;
}
@@ -159,7 +156,7 @@ int NetEqImpl::InsertSyncPacket(const WebRtcRTPHeader& rtp_header,
}
int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
- int* samples_per_channel, int* num_channels,
+ size_t* samples_per_channel, int* num_channels,
NetEqOutputType* type) {
CriticalSectionScoped lock(crit_sect_.get());
LOG(LS_VERBOSE) << "GetAudio";
@@ -168,7 +165,6 @@ int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
LOG(LS_VERBOSE) << "Produced " << *samples_per_channel <<
" samples/channel for " << *num_channels << " channel(s)";
if (error != 0) {
- LOG_FERR1(LS_WARNING, GetAudioInternal, error);
error_code_ = error;
return kFail;
}
@@ -181,11 +177,10 @@ int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio,
int NetEqImpl::RegisterPayloadType(enum NetEqDecoder codec,
uint8_t rtp_payload_type) {
CriticalSectionScoped lock(crit_sect_.get());
- LOG_API2(static_cast<int>(rtp_payload_type), codec);
+ LOG(LS_VERBOSE) << "RegisterPayloadType "
+ << static_cast<int>(rtp_payload_type) << " " << codec;
int ret = decoder_database_->RegisterPayload(rtp_payload_type, codec);
if (ret != DecoderDatabase::kOK) {
- LOG_FERR2(LS_WARNING, RegisterPayload, static_cast<int>(rtp_payload_type),
- codec);
switch (ret) {
case DecoderDatabase::kInvalidRtpPayloadType:
error_code_ = kInvalidRtpPayloadType;
@@ -209,7 +204,8 @@ int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
uint8_t rtp_payload_type,
int sample_rate_hz) {
CriticalSectionScoped lock(crit_sect_.get());
- LOG_API2(static_cast<int>(rtp_payload_type), codec);
+ LOG(LS_VERBOSE) << "RegisterExternalDecoder "
+ << static_cast<int>(rtp_payload_type) << " " << codec;
if (!decoder) {
LOG(LS_ERROR) << "Cannot register external decoder with NULL pointer";
assert(false);
@@ -218,8 +214,6 @@ int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
int ret = decoder_database_->InsertExternal(rtp_payload_type, codec,
sample_rate_hz, decoder);
if (ret != DecoderDatabase::kOK) {
- LOG_FERR2(LS_WARNING, InsertExternal, static_cast<int>(rtp_payload_type),
- codec);
switch (ret) {
case DecoderDatabase::kInvalidRtpPayloadType:
error_code_ = kInvalidRtpPayloadType;
@@ -246,7 +240,6 @@ int NetEqImpl::RegisterExternalDecoder(AudioDecoder* decoder,
int NetEqImpl::RemovePayloadType(uint8_t rtp_payload_type) {
CriticalSectionScoped lock(crit_sect_.get());
- LOG_API1(static_cast<int>(rtp_payload_type));
int ret = decoder_database_->Remove(rtp_payload_type);
if (ret == DecoderDatabase::kOK) {
return kOK;
@@ -255,7 +248,6 @@ int NetEqImpl::RemovePayloadType(uint8_t rtp_payload_type) {
} else {
error_code_ = kOtherError;
}
- LOG_FERR1(LS_WARNING, Remove, static_cast<int>(rtp_payload_type));
return kFail;
}
@@ -291,8 +283,20 @@ int NetEqImpl::TargetDelay() {
return kNotImplemented;
}
-int NetEqImpl::CurrentDelay() {
- return kNotImplemented;
+int NetEqImpl::CurrentDelayMs() const {
+ CriticalSectionScoped lock(crit_sect_.get());
+ if (fs_hz_ == 0)
+ return 0;
+ // Sum up the samples in the packet buffer with the future length of the sync
+ // buffer, and divide the sum by the sample rate.
+ const size_t delay_samples =
+ packet_buffer_->NumSamplesInBuffer(decoder_database_.get(),
+ decoder_frame_length_) +
+ sync_buffer_->FutureLength();
+ // The division below will truncate.
+ const int delay_ms =
+ static_cast<int>(delay_samples) / rtc::CheckedDivExact(fs_hz_, 1000);
+ return delay_ms;
}
// Deprecated.
@@ -315,10 +319,10 @@ NetEqPlayoutMode NetEqImpl::PlayoutMode() const {
int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
CriticalSectionScoped lock(crit_sect_.get());
assert(decoder_database_.get());
- const int total_samples_in_buffers =
+ const size_t total_samples_in_buffers =
packet_buffer_->NumSamplesInBuffer(decoder_database_.get(),
decoder_frame_length_) +
- static_cast<int>(sync_buffer_->FutureLength());
+ sync_buffer_->FutureLength();
assert(delay_manager_.get());
assert(decision_logic_.get());
stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers,
@@ -327,11 +331,6 @@ int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) {
return 0;
}
-void NetEqImpl::WaitingTimes(std::vector<int>* waiting_times) {
- CriticalSectionScoped lock(crit_sect_.get());
- stats_.WaitingTimes(waiting_times);
-}
-
void NetEqImpl::GetRtcpStatistics(RtcpStatistics* stats) {
CriticalSectionScoped lock(crit_sect_.get());
if (stats) {
@@ -389,7 +388,7 @@ int NetEqImpl::LastDecoderError() {
void NetEqImpl::FlushBuffers() {
CriticalSectionScoped lock(crit_sect_.get());
- LOG_API0();
+ LOG(LS_VERBOSE) << "FlushBuffers";
packet_buffer_->Flush();
assert(sync_buffer_.get());
assert(expand_.get());
@@ -517,7 +516,6 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
if (decoder_database_->IsRed(main_header.payloadType)) {
assert(!is_sync_packet); // We had a sanity check for this.
if (payload_splitter_->SplitRed(&packet_list) != PayloadSplitter::kOK) {
- LOG_FERR1(LS_WARNING, SplitRed, packet_list.size());
PacketBuffer::DeleteAllPackets(&packet_list);
return kRedundancySplitError;
}
@@ -532,7 +530,6 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
// Check payload types.
if (decoder_database_->CheckPayloadTypes(packet_list) ==
DecoderDatabase::kDecoderNotFound) {
- LOG_FERR1(LS_WARNING, CheckPayloadTypes, packet_list.size());
PacketBuffer::DeleteAllPackets(&packet_list);
return kUnknownRtpPayloadType;
}
@@ -556,13 +553,10 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
current_packet->payload_length,
&event);
if (ret != DtmfBuffer::kOK) {
- LOG_FERR2(LS_WARNING, ParseEvent, ret,
- current_packet->payload_length);
PacketBuffer::DeleteAllPackets(&packet_list);
return kDtmfParsingError;
}
if (dtmf_buffer_->InsertEvent(event) != DtmfBuffer::kOK) {
- LOG_FERR0(LS_WARNING, InsertEvent);
PacketBuffer::DeleteAllPackets(&packet_list);
return kDtmfInsertError;
}
@@ -578,7 +572,6 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
// Check for FEC in packets, and separate payloads into several packets.
int ret = payload_splitter_->SplitFec(&packet_list, decoder_database_.get());
if (ret != PayloadSplitter::kOK) {
- LOG_FERR1(LS_WARNING, SplitFec, packet_list.size());
PacketBuffer::DeleteAllPackets(&packet_list);
switch (ret) {
case PayloadSplitter::kUnknownPayloadType:
@@ -593,7 +586,6 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
// sync-packets.
ret = payload_splitter_->SplitAudio(&packet_list, *decoder_database_);
if (ret != PayloadSplitter::kOK) {
- LOG_FERR1(LS_WARNING, SplitAudio, packet_list.size());
PacketBuffer::DeleteAllPackets(&packet_list);
switch (ret) {
case PayloadSplitter::kUnknownPayloadType:
@@ -620,7 +612,8 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
}
// Insert packets in buffer.
- int temp_bufsize = packet_buffer_->NumPacketsInBuffer();
+ const size_t buffer_length_before_insert =
+ packet_buffer_->NumPacketsInBuffer();
ret = packet_buffer_->InsertPacketList(
&packet_list,
*decoder_database_,
@@ -630,9 +623,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
// Reset DSP timestamp etc. if packet buffer flushed.
new_codec_ = true;
update_sample_rate_and_channels = true;
- LOG_F(LS_WARNING) << "Packet buffer flushed";
} else if (ret != PacketBuffer::kOK) {
- LOG_FERR1(LS_WARNING, InsertPacketList, packet_list.size());
PacketBuffer::DeleteAllPackets(&packet_list);
return kOtherError;
}
@@ -678,13 +669,18 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
delay_manager_->LastDecoderType(dec_info->codec_type);
if (delay_manager_->last_pack_cng_or_dtmf() == 0) {
// Calculate the total speech length carried in each packet.
- temp_bufsize = packet_buffer_->NumPacketsInBuffer() - temp_bufsize;
- temp_bufsize *= decoder_frame_length_;
-
- if ((temp_bufsize > 0) &&
- (temp_bufsize != decision_logic_->packet_length_samples())) {
- decision_logic_->set_packet_length_samples(temp_bufsize);
- delay_manager_->SetPacketAudioLength((1000 * temp_bufsize) / fs_hz_);
+ const size_t buffer_length_after_insert =
+ packet_buffer_->NumPacketsInBuffer();
+
+ if (buffer_length_after_insert > buffer_length_before_insert) {
+ const size_t packet_length_samples =
+ (buffer_length_after_insert - buffer_length_before_insert) *
+ decoder_frame_length_;
+ if (packet_length_samples != decision_logic_->packet_length_samples()) {
+ decision_logic_->set_packet_length_samples(packet_length_samples);
+ delay_manager_->SetPacketAudioLength(
+ rtc::checked_cast<int>((1000 * packet_length_samples) / fs_hz_));
+ }
}
// Update statistics.
@@ -707,7 +703,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
int NetEqImpl::GetAudioInternal(size_t max_length,
int16_t* output,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels) {
PacketList packet_list;
DtmfEvent dtmf_event;
@@ -716,8 +712,6 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
&play_dtmf);
if (return_value != 0) {
- LOG_FERR1(LS_WARNING, GetDecision, return_value);
- assert(false);
last_mode_ = kModeError;
return return_value;
}
@@ -732,7 +726,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
assert(vad_.get());
bool sid_frame_available =
(operation == kRfc3389Cng && !packet_list.empty());
- vad_->Update(decoded_buffer_.get(), length, speech_type,
+ vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
sid_frame_available, fs_hz_);
algorithm_buffer_->Clear();
@@ -771,7 +765,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
// This handles the case when there is no transmission and the decoder
// should produce internal comfort noise.
// TODO(hlundin): Write test for codec-internal CNG.
- DoCodecInternalCng();
+ DoCodecInternalCng(decoded_buffer_.get(), length);
break;
}
case kDtmf: {
@@ -807,7 +801,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
break;
}
case kUndefined: {
- LOG_F(LS_ERROR) << "Invalid operation kUndefined.";
+ LOG(LS_ERROR) << "Invalid operation kUndefined.";
assert(false); // This should not happen.
last_mode_ = kModeError;
return kInvalidOperation;
@@ -831,18 +825,29 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " <<
output_size_samples_ << " * " << sync_buffer_->Channels();
num_output_samples = max_length;
- num_output_samples_per_channel = static_cast<int>(
- max_length / sync_buffer_->Channels());
+ num_output_samples_per_channel = max_length / sync_buffer_->Channels();
}
- int samples_from_sync = static_cast<int>(
+ const size_t samples_from_sync =
sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel,
- output));
+ output);
*num_channels = static_cast<int>(sync_buffer_->Channels());
LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" <<
" insert " << algorithm_buffer_->Size() << " samples, extract " <<
samples_from_sync << " samples";
+ if (sync_buffer_->FutureLength() < expand_->overlap_length()) {
+ // The sync buffer should always contain |overlap_length| samples, but now
+ // too many samples have been extracted. Reinstall the |overlap_length|
+ // lookahead by moving the index.
+ const size_t missing_lookahead_samples =
+ expand_->overlap_length() - sync_buffer_->FutureLength();
+ RTC_DCHECK_GE(sync_buffer_->next_index(), missing_lookahead_samples);
+ sync_buffer_->set_next_index(sync_buffer_->next_index() -
+ missing_lookahead_samples);
+ }
if (samples_from_sync != output_size_samples_) {
- LOG_F(LS_ERROR) << "samples_from_sync != output_size_samples_";
+ LOG(LS_ERROR) << "samples_from_sync (" << samples_from_sync
+ << ") != output_size_samples_ (" << output_size_samples_
+ << ")";
// TODO(minyue): treatment of under-run, filling zeros
memset(output, 0, num_output_samples * sizeof(int16_t));
*samples_per_channel = output_size_samples_;
@@ -851,7 +856,7 @@ int NetEqImpl::GetAudioInternal(size_t max_length,
*samples_per_channel = output_size_samples_;
// Should always have overlap samples left in the |sync_buffer_|.
- assert(sync_buffer_->FutureLength() >= expand_->overlap_length());
+ RTC_DCHECK_GE(sync_buffer_->FutureLength(), expand_->overlap_length());
if (play_dtmf) {
return_value = DtmfOverdub(dtmf_event, sync_buffer_->Channels(), output);
@@ -940,7 +945,8 @@ int NetEqImpl::GetDecision(Operations* operation,
last_mode_ == kModePreemptiveExpandSuccess ||
last_mode_ == kModePreemptiveExpandLowEnergy) {
// Subtract (samples_left + output_size_samples_) from sampleMemory.
- decision_logic_->AddSampleMemory(-(samples_left + output_size_samples_));
+ decision_logic_->AddSampleMemory(
+ -(samples_left + rtc::checked_cast<int>(output_size_samples_)));
}
// Check if it is time to play a DTMF event.
@@ -965,8 +971,10 @@ int NetEqImpl::GetDecision(Operations* operation,
// Check if we already have enough samples in the |sync_buffer_|. If so,
// change decision to normal, unless the decision was merge, accelerate, or
// preemptive expand.
- if (samples_left >= output_size_samples_ && *operation != kMerge &&
- *operation != kAccelerate && *operation != kFastAccelerate &&
+ if (samples_left >= rtc::checked_cast<int>(output_size_samples_) &&
+ *operation != kMerge &&
+ *operation != kAccelerate &&
+ *operation != kFastAccelerate &&
*operation != kPreemptiveExpand) {
*operation = kNormal;
return 0;
@@ -981,9 +989,8 @@ int NetEqImpl::GetDecision(Operations* operation,
if (*play_dtmf && !header) {
timestamp_ = dtmf_event->timestamp;
} else {
- assert(header);
if (!header) {
- LOG_F(LS_ERROR) << "Packet missing where it shouldn't.";
+ LOG(LS_ERROR) << "Packet missing where it shouldn't.";
return -1;
}
timestamp_ = header->timestamp;
@@ -1014,10 +1021,10 @@ int NetEqImpl::GetDecision(Operations* operation,
stats_.ResetMcu();
}
- int required_samples = output_size_samples_;
- const int samples_10_ms = 80 * fs_mult_;
- const int samples_20_ms = 2 * samples_10_ms;
- const int samples_30_ms = 3 * samples_10_ms;
+ size_t required_samples = output_size_samples_;
+ const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_);
+ const size_t samples_20_ms = 2 * samples_10_ms;
+ const size_t samples_30_ms = 3 * samples_10_ms;
switch (*operation) {
case kExpand: {
@@ -1046,17 +1053,17 @@ int NetEqImpl::GetDecision(Operations* operation,
case kAccelerate:
case kFastAccelerate: {
// In order to do an accelerate we need at least 30 ms of audio data.
- if (samples_left >= samples_30_ms) {
+ if (samples_left >= static_cast<int>(samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
decision_logic_->set_sample_memory(samples_left);
decision_logic_->set_prev_time_scale(true);
return 0;
- } else if (samples_left >= samples_10_ms &&
+ } else if (samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms) {
// Avoid decoding more data as it might overflow the playout buffer.
*operation = kNormal;
return 0;
- } else if (samples_left < samples_20_ms &&
+ } else if (samples_left < static_cast<int>(samples_20_ms) &&
decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data. Do
// not perform accelerate yet, but wait until we only need to do one
@@ -1074,8 +1081,8 @@ int NetEqImpl::GetDecision(Operations* operation,
case kPreemptiveExpand: {
// In order to do a preemptive expand we need at least 30 ms of decoded
// audio data.
- if ((samples_left >= samples_30_ms) ||
- (samples_left >= samples_10_ms &&
+ if ((samples_left >= static_cast<int>(samples_30_ms)) ||
+ (samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
// Or, avoid decoding more data as it might overflow the playout buffer.
@@ -1084,7 +1091,7 @@ int NetEqImpl::GetDecision(Operations* operation,
decision_logic_->set_prev_time_scale(true);
return 0;
}
- if (samples_left < samples_20_ms &&
+ if (samples_left < static_cast<int>(samples_20_ms) &&
decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data.
// Still try to perform preemptive expand.
@@ -1129,7 +1136,6 @@ int NetEqImpl::GetDecision(Operations* operation,
extracted_samples = ExtractPackets(required_samples, packet_list);
if (extracted_samples < 0) {
- LOG_F(LS_WARNING) << "Failed to extract packets from buffer.";
return kPacketBufferCorruption;
}
}
@@ -1142,7 +1148,7 @@ int NetEqImpl::GetDecision(Operations* operation,
if (*operation == kAccelerate || *operation == kFastAccelerate) {
// Check that we have enough data (30ms) to do accelerate.
- if (extracted_samples + samples_left < samples_30_ms) {
+ if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) {
// TODO(hlundin): Write test for this.
// Not enough, do normal operation instead.
*operation = kNormal;
@@ -1157,7 +1163,11 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
*speech_type = AudioDecoder::kSpeech;
- AudioDecoder* decoder = NULL;
+
+ // When packet_list is empty, we may be in kCodecInternalCng mode, and for
+ // that we use current active decoder.
+ AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
+
if (!packet_list->empty()) {
const Packet* packet = packet_list->front();
uint8_t payload_type = packet->header.payloadType;
@@ -1165,7 +1175,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
decoder = decoder_database_->GetDecoder(payload_type);
assert(decoder);
if (!decoder) {
- LOG_FERR1(LS_WARNING, GetDecoder, static_cast<int>(payload_type));
+ LOG(LS_WARNING) << "Unknown payload type "
+ << static_cast<int>(payload_type);
PacketBuffer::DeleteAllPackets(packet_list);
return kDecoderNotFound;
}
@@ -1177,7 +1188,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
->GetDecoderInfo(payload_type);
assert(decoder_info);
if (!decoder_info) {
- LOG_FERR1(LS_WARNING, GetDecoderInfo, static_cast<int>(payload_type));
+ LOG(LS_WARNING) << "Unknown payload type "
+ << static_cast<int>(payload_type);
PacketBuffer::DeleteAllPackets(packet_list);
return kDecoderNotFound;
}
@@ -1196,15 +1208,14 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
if (reset_decoder_) {
// TODO(hlundin): Write test for this.
- // Reset decoder.
- if (decoder) {
- decoder->Init();
- }
+ if (decoder)
+ decoder->Reset();
+
// Reset comfort noise decoder.
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
- if (cng_decoder) {
- cng_decoder->Init();
- }
+ if (cng_decoder)
+ cng_decoder->Reset();
+
reset_decoder_ = false;
}
@@ -1224,8 +1235,14 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
decoder->DecodePlc(1, &decoded_buffer_[*decoded_length]);
}
- int return_value = DecodeLoop(packet_list, operation, decoder,
- decoded_length, speech_type);
+ int return_value;
+ if (*operation == kCodecInternalCng) {
+ RTC_DCHECK(packet_list->empty());
+ return_value = DecodeCng(decoder, decoded_length, speech_type);
+ } else {
+ return_value = DecodeLoop(packet_list, *operation, decoder,
+ decoded_length, speech_type);
+ }
if (*decoded_length < 0) {
// Error returned from the decoder.
@@ -1239,11 +1256,12 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
// Got some error code from the decoder.
decoder_error_code_ = error_code;
return_value = kDecoderErrorCode;
+ LOG(LS_WARNING) << "Decoder returned error code: " << error_code;
} else {
// Decoder does not implement error codes. Return generic error.
return_value = kOtherDecoderError;
+ LOG(LS_WARNING) << "Decoder error (no error code)";
}
- LOG_FERR2(LS_WARNING, DecodeLoop, error_code, packet_list->size());
*operation = kExpand; // Do expansion to get data instead.
}
if (*speech_type != AudioDecoder::kComfortNoise) {
@@ -1258,13 +1276,45 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
return return_value;
}
-int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
+int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
+ AudioDecoder::SpeechType* speech_type) {
+ if (!decoder) {
+ // This happens when active decoder is not defined.
+ *decoded_length = -1;
+ return 0;
+ }
+
+ while (*decoded_length < rtc::checked_cast<int>(output_size_samples_)) {
+ const int length = decoder->Decode(
+ nullptr, 0, fs_hz_,
+ (decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
+ &decoded_buffer_[*decoded_length], speech_type);
+ if (length > 0) {
+ *decoded_length += length;
+ LOG(LS_VERBOSE) << "Decoded " << length << " CNG samples";
+ } else {
+ // Error.
+ LOG(LS_WARNING) << "Failed to decode CNG";
+ *decoded_length = -1;
+ break;
+ }
+ if (*decoded_length > static_cast<int>(decoded_buffer_length_)) {
+ // Guard against overflow.
+ LOG(LS_WARNING) << "Decoded too much CNG.";
+ return kDecodedTooMuch;
+ }
+ }
+ return 0;
+}
+
+int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
AudioDecoder* decoder, int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
Packet* packet = NULL;
if (!packet_list->empty()) {
packet = packet_list->front();
}
+
// Do decoding.
while (packet &&
!decoder_database_->IsComfortNoise(packet->header.payloadType)) {
@@ -1273,9 +1323,9 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
// number decoder channels.
assert(sync_buffer_->Channels() == decoder->Channels());
assert(decoded_buffer_length_ >= kMaxFrameSize * decoder->Channels());
- assert(*operation == kNormal || *operation == kAccelerate ||
- *operation == kFastAccelerate || *operation == kMerge ||
- *operation == kPreemptiveExpand);
+ assert(operation == kNormal || operation == kAccelerate ||
+ operation == kFastAccelerate || operation == kMerge ||
+ operation == kPreemptiveExpand);
packet_list->pop_front();
size_t payload_length = packet->payload_length;
int decode_length;
@@ -1290,7 +1340,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
memset(&decoded_buffer_[*decoded_length], 0,
decoder_frame_length_ * decoder->Channels() *
sizeof(decoded_buffer_[0]));
- decode_length = decoder_frame_length_;
+ decode_length = rtc::checked_cast<int>(decoder_frame_length_);
} else if (!packet->primary) {
// This is a redundant payload; call the special decoder method.
LOG(LS_VERBOSE) << "Decoding packet (redundant):" <<
@@ -1323,20 +1373,20 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, Operations* operation,
*decoded_length += decode_length;
// Update |decoder_frame_length_| with number of samples per channel.
decoder_frame_length_ =
- decode_length / static_cast<int>(decoder->Channels());
+ static_cast<size_t>(decode_length) / decoder->Channels();
LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples ("
<< decoder->Channels() << " channel(s) -> "
<< decoder_frame_length_ << " samples per channel)";
} else if (decode_length < 0) {
// Error.
- LOG_FERR2(LS_WARNING, Decode, decode_length, payload_length);
+ LOG(LS_WARNING) << "Decode " << decode_length << " " << payload_length;
*decoded_length = -1;
PacketBuffer::DeleteAllPackets(packet_list);
break;
}
if (*decoded_length > static_cast<int>(decoded_buffer_length_)) {
// Guard against overflow.
- LOG_F(LS_WARNING) << "Decoded too much.";
+ LOG(LS_WARNING) << "Decoded too much.";
PacketBuffer::DeleteAllPackets(packet_list);
return kDecodedTooMuch;
}
@@ -1382,11 +1432,11 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf) {
assert(mute_factor_array_.get());
assert(merge_.get());
- int new_length = merge_->Process(decoded_buffer, decoded_length,
- mute_factor_array_.get(),
- algorithm_buffer_.get());
- int expand_length_correction = new_length -
- static_cast<int>(decoded_length / algorithm_buffer_->Channels());
+ size_t new_length = merge_->Process(decoded_buffer, decoded_length,
+ mute_factor_array_.get(),
+ algorithm_buffer_.get());
+ size_t expand_length_correction = new_length -
+ decoded_length / algorithm_buffer_->Channels();
// Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) {
@@ -1410,10 +1460,10 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
int NetEqImpl::DoExpand(bool play_dtmf) {
while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
- static_cast<size_t>(output_size_samples_)) {
+ output_size_samples_) {
algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer_.get());
- int length = static_cast<int>(algorithm_buffer_->Size());
+ size_t length = algorithm_buffer_->Size();
// Update in-call and post-call statistics.
if (expand_->MuteFactor(0) == 0) {
@@ -1444,7 +1494,8 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
AudioDecoder::SpeechType speech_type,
bool play_dtmf,
bool fast_accelerate) {
- const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
size_t borrowed_samples_per_channel = 0;
size_t num_channels = algorithm_buffer_->Channels();
size_t decoded_length_per_channel = decoded_length / num_channels;
@@ -1460,7 +1511,7 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
decoded_length = required_samples * num_channels;
}
- int16_t samples_removed;
+ size_t samples_removed;
Accelerate::ReturnCodes return_code =
accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate,
algorithm_buffer_.get(), &samples_removed);
@@ -1517,20 +1568,20 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
size_t decoded_length,
AudioDecoder::SpeechType speech_type,
bool play_dtmf) {
- const size_t required_samples = 240 * fs_mult_; // Must have 30 ms.
+ const size_t required_samples =
+ static_cast<size_t>(240 * fs_mult_); // Must have 30 ms.
size_t num_channels = algorithm_buffer_->Channels();
- int borrowed_samples_per_channel = 0;
- int old_borrowed_samples_per_channel = 0;
+ size_t borrowed_samples_per_channel = 0;
+ size_t old_borrowed_samples_per_channel = 0;
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
- borrowed_samples_per_channel = static_cast<int>(required_samples -
- decoded_length_per_channel);
+ borrowed_samples_per_channel =
+ required_samples - decoded_length_per_channel;
// Calculate how many of these were already played out.
- const int future_length = static_cast<int>(sync_buffer_->FutureLength());
old_borrowed_samples_per_channel =
- (borrowed_samples_per_channel > future_length) ?
- (borrowed_samples_per_channel - future_length) : 0;
+ (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
+ (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer,
sizeof(int16_t) * decoded_length);
@@ -1539,9 +1590,9 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
decoded_length = required_samples * num_channels;
}
- int16_t samples_added;
+ size_t samples_added;
PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
- decoded_buffer, static_cast<int>(decoded_length),
+ decoded_buffer, decoded_length,
old_borrowed_samples_per_channel,
algorithm_buffer_.get(), &samples_added);
stats_.PreemptiveExpandedSamples(samples_added);
@@ -1614,7 +1665,6 @@ int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
// UpdateParameters() deletes |packet|.
if (comfort_noise_->UpdateParameters(packet) ==
ComfortNoise::kInternalError) {
- LOG_FERR0(LS_WARNING, UpdateParameters);
algorithm_buffer_->Zeros(output_size_samples_);
return -comfort_noise_->internal_error_code();
}
@@ -1627,31 +1677,20 @@ int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
dtmf_tone_generator_->Reset();
}
if (cn_return == ComfortNoise::kInternalError) {
- LOG_FERR1(LS_WARNING, comfort_noise_->Generate, cn_return);
decoder_error_code_ = comfort_noise_->internal_error_code();
return kComfortNoiseErrorCode;
} else if (cn_return == ComfortNoise::kUnknownPayloadType) {
- LOG_FERR1(LS_WARNING, comfort_noise_->Generate, cn_return);
return kUnknownRtpPayloadType;
}
return 0;
}
-void NetEqImpl::DoCodecInternalCng() {
- int length = 0;
- // TODO(hlundin): Will probably need a longer buffer for multi-channel.
- int16_t decoded_buffer[kMaxFrameSize];
- AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
- if (decoder) {
- const uint8_t* dummy_payload = NULL;
- AudioDecoder::SpeechType speech_type;
- length = decoder->Decode(
- dummy_payload, 0, fs_hz_, kMaxFrameSize * sizeof(int16_t),
- decoded_buffer, &speech_type);
- }
- assert(mute_factor_array_.get());
- normal_->Process(decoded_buffer, length, last_mode_, mute_factor_array_.get(),
- algorithm_buffer_.get());
+void NetEqImpl::DoCodecInternalCng(const int16_t* decoded_buffer,
+ size_t decoded_length) {
+ RTC_DCHECK(normal_.get());
+ RTC_DCHECK(mute_factor_array_.get());
+ normal_->Process(decoded_buffer, decoded_length, last_mode_,
+ mute_factor_array_.get(), algorithm_buffer_.get());
last_mode_ = kModeCodecInternalCng;
expand_->Reset();
}
@@ -1738,17 +1777,14 @@ int NetEqImpl::DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf) {
void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
AudioDecoder* decoder = decoder_database_->GetActiveDecoder();
- int length;
+ size_t length;
if (decoder && decoder->HasDecodePlc()) {
// Use the decoder's packet-loss concealment.
// TODO(hlundin): Will probably need a longer buffer for multi-channel.
int16_t decoded_buffer[kMaxFrameSize];
length = decoder->DecodePlc(1, decoded_buffer);
- if (length > 0) {
+ if (length > 0)
algorithm_buffer_->PushBackInterleaved(decoded_buffer, length);
- } else {
- length = 0;
- }
} else {
// Do simple zero-stuffing.
length = output_size_samples_;
@@ -1765,14 +1801,14 @@ void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
int16_t* output) const {
size_t out_index = 0;
- int overdub_length = output_size_samples_; // Default value.
+ size_t overdub_length = output_size_samples_; // Default value.
if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
// Special operation for transition from "DTMF only" to "DTMF overdub".
out_index = std::min(
sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
- static_cast<size_t>(output_size_samples_));
- overdub_length = output_size_samples_ - static_cast<int>(out_index);
+ output_size_samples_);
+ overdub_length = output_size_samples_ - out_index;
}
AudioMultiVector dtmf_output(num_channels);
@@ -1784,13 +1820,14 @@ int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
if (dtmf_return_value == 0) {
dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
&dtmf_output);
- assert((size_t) overdub_length == dtmf_output.Size());
+ assert(overdub_length == dtmf_output.Size());
}
dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
return dtmf_return_value < 0 ? dtmf_return_value : 0;
}
-int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
+int NetEqImpl::ExtractPackets(size_t required_samples,
+ PacketList* packet_list) {
bool first_packet = true;
uint8_t prev_payload_type = 0;
uint32_t prev_timestamp = 0;
@@ -1800,6 +1837,7 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
const RTPHeader* header = packet_buffer_->NextRtpHeader();
assert(header);
if (!header) {
+ LOG(LS_ERROR) << "Packet buffer unexpectedly empty.";
return -1;
}
uint32_t first_timestamp = header->timestamp;
@@ -1808,13 +1846,12 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
// Packet extraction loop.
do {
timestamp_ = header->timestamp;
- int discard_count = 0;
+ size_t discard_count = 0;
Packet* packet = packet_buffer_->GetNextPacket(&discard_count);
// |header| may be invalid after the |packet_buffer_| operation.
header = NULL;
if (!packet) {
- LOG_FERR1(LS_ERROR, GetNextPacket, discard_count) <<
- "Should always be able to extract a packet here";
+ LOG(LS_ERROR) << "Should always be able to extract a packet here";
assert(false); // Should always be able to extract a packet here.
return -1;
}
@@ -1838,7 +1875,7 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
packet->header.payloadType);
if (decoder) {
if (packet->sync_packet) {
- packet_duration = decoder_frame_length_;
+ packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
} else {
if (packet->primary) {
packet_duration = decoder->PacketDuration(packet->payload,
@@ -1850,15 +1887,14 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
}
}
} else {
- LOG_FERR1(LS_WARNING, GetDecoder,
- static_cast<int>(packet->header.payloadType))
- << "Could not find a decoder for a packet about to be extracted.";
+ LOG(LS_WARNING) << "Unknown payload type "
+ << static_cast<int>(packet->header.payloadType);
assert(false);
}
if (packet_duration <= 0) {
// Decoder did not return a packet duration. Assume that the packet
// contains the same number of samples as the previous one.
- packet_duration = decoder_frame_length_;
+ packet_duration = rtc::checked_cast<int>(decoder_frame_length_);
}
extracted_samples = packet->header.timestamp - first_timestamp +
packet_duration;
@@ -1868,7 +1904,7 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
next_packet_available = false;
if (header && prev_payload_type == header->payloadType) {
int16_t seq_no_diff = header->sequenceNumber - prev_sequence_number;
- int32_t ts_diff = header->timestamp - prev_timestamp;
+ size_t ts_diff = header->timestamp - prev_timestamp;
if (seq_no_diff == 1 ||
(seq_no_diff == 0 && ts_diff == decoder_frame_length_)) {
// The next sequence number is available, or the next part of a packet
@@ -1877,7 +1913,8 @@ int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) {
}
prev_sequence_number = header->sequenceNumber;
}
- } while (extracted_samples < required_samples && next_packet_available);
+ } while (extracted_samples < rtc::checked_cast<int>(required_samples) &&
+ next_packet_available);
if (extracted_samples > 0) {
// Delete old packets only when we are going to decode something. Otherwise,
@@ -1894,19 +1931,19 @@ void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {
// Delete objects and create new ones.
expand_.reset(expand_factory_->Create(background_noise_.get(),
sync_buffer_.get(), &random_vector_,
- fs_hz, channels));
+ &stats_, fs_hz, channels));
merge_.reset(new Merge(fs_hz, channels, expand_.get(), sync_buffer_.get()));
}
void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
- LOG_API2(fs_hz, channels);
+ LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " " << channels;
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
assert(channels > 0);
fs_hz_ = fs_hz;
fs_mult_ = fs_hz / 8000;
- output_size_samples_ = kOutputSizeMs * 8 * fs_mult_;
+ output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_);
decoder_frame_length_ = 3 * output_size_samples_; // Initialize to 30ms.
last_mode_ = kModeNormal;
@@ -1917,11 +1954,9 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
mute_factor_array_[i] = 16384; // 1.0 in Q14.
}
- // Reset comfort noise decoder, if there is one active.
AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
- if (cng_decoder) {
- cng_decoder->Init();
- }
+ if (cng_decoder)
+ cng_decoder->Reset();
// Reinit post-decode VAD with new sample rate.
assert(vad_.get()); // Cannot be NULL here.
@@ -1951,9 +1986,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
accelerate_.reset(
accelerate_factory_->Create(fs_hz, channels, *background_noise_));
preemptive_expand_.reset(preemptive_expand_factory_->Create(
- fs_hz, channels,
- *background_noise_,
- static_cast<int>(expand_->overlap_length())));
+ fs_hz, channels, *background_noise_, expand_->overlap_length()));
// Delete ComfortNoise object and create a new one.
comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
index 55ba067221f..d7c9ac4d965 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -11,8 +11,6 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_NETEQ_IMPL_H_
-#include <vector>
-
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
@@ -106,7 +104,7 @@ class NetEqImpl : public webrtc::NetEq {
// Returns kOK on success, or kFail in case of an error.
int GetAudio(size_t max_length,
int16_t* output_audio,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels,
NetEqOutputType* type) override;
@@ -138,7 +136,7 @@ class NetEqImpl : public webrtc::NetEq {
int TargetDelay() override;
- int CurrentDelay() override;
+ int CurrentDelayMs() const override;
// Sets the playout mode to |mode|.
// Deprecated.
@@ -154,11 +152,6 @@ class NetEqImpl : public webrtc::NetEq {
// after the call.
int NetworkStatistics(NetEqNetworkStatistics* stats) override;
- // Writes the last packet waiting times (in ms) to |waiting_times|. The number
- // of values written is no more than 100, but may be smaller if the interface
- // is polled again before 100 packets has arrived.
- void WaitingTimes(std::vector<int>* waiting_times) override;
-
// Writes the current RTCP statistics to |stats|. The statistics are reset
// and a new report period is started with the call.
void GetRtcpStatistics(RtcpStatistics* stats) override;
@@ -203,9 +196,9 @@ class NetEqImpl : public webrtc::NetEq {
protected:
static const int kOutputSizeMs = 10;
- static const int kMaxFrameSize = 2880; // 60 ms @ 48 kHz.
+ static const size_t kMaxFrameSize = 2880; // 60 ms @ 48 kHz.
// TODO(hlundin): Provide a better value for kSyncBufferSize.
- static const int kSyncBufferSize = 2 * kMaxFrameSize;
+ static const size_t kSyncBufferSize = 2 * kMaxFrameSize;
// Inserts a new packet into NetEq. This is used by the InsertPacket method
// above. Returns 0 on success, otherwise an error code.
@@ -225,7 +218,7 @@ class NetEqImpl : public webrtc::NetEq {
// Returns 0 on success, otherwise an error code.
int GetAudioInternal(size_t max_length,
int16_t* output,
- int* samples_per_channel,
+ size_t* samples_per_channel,
int* num_channels) EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Provides a decision to the GetAudioInternal method. The decision what to
@@ -250,9 +243,14 @@ class NetEqImpl : public webrtc::NetEq {
AudioDecoder::SpeechType* speech_type)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ // Sub-method to Decode(). Performs codec internal CNG.
+ int DecodeCng(AudioDecoder* decoder, int* decoded_length,
+ AudioDecoder::SpeechType* speech_type)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
// Sub-method to Decode(). Performs the actual decoding.
int DecodeLoop(PacketList* packet_list,
- Operations* operation,
+ const Operations& operation,
AudioDecoder* decoder,
int* decoded_length,
AudioDecoder::SpeechType* speech_type)
@@ -297,7 +295,8 @@ class NetEqImpl : public webrtc::NetEq {
// Calls the audio decoder to generate codec-internal comfort noise when
// no packet was received.
- void DoCodecInternalCng() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+ void DoCodecInternalCng(const int16_t* decoded_buffer, size_t decoded_length)
+ EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Calls the DtmfToneGenerator class to generate DTMF tones.
int DoDtmf(const DtmfEvent& dtmf_event, bool* play_dtmf)
@@ -318,7 +317,7 @@ class NetEqImpl : public webrtc::NetEq {
// |required_samples| samples. The packets are inserted into |packet_list|.
// Returns the number of samples that the packets in the list will produce, or
// -1 in case of an error.
- int ExtractPackets(int required_samples, PacketList* packet_list)
+ int ExtractPackets(size_t required_samples, PacketList* packet_list)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Resets various variables and objects to new values based on the sample rate
@@ -375,8 +374,8 @@ class NetEqImpl : public webrtc::NetEq {
StatisticsCalculator stats_ GUARDED_BY(crit_sect_);
int fs_hz_ GUARDED_BY(crit_sect_);
int fs_mult_ GUARDED_BY(crit_sect_);
- int output_size_samples_ GUARDED_BY(crit_sect_);
- int decoder_frame_length_ GUARDED_BY(crit_sect_);
+ size_t output_size_samples_ GUARDED_BY(crit_sect_);
+ size_t decoder_frame_length_ GUARDED_BY(crit_sect_);
Modes last_mode_ GUARDED_BY(crit_sect_);
rtc::scoped_ptr<int16_t[]> mute_factor_array_ GUARDED_BY(crit_sect_);
size_t decoded_buffer_length_ GUARDED_BY(crit_sect_);
@@ -406,7 +405,7 @@ class NetEqImpl : public webrtc::NetEq {
uint32_t decoded_packet_timestamp_ GUARDED_BY(crit_sect_);
private:
- DISALLOW_COPY_AND_ASSIGN(NetEqImpl);
+ RTC_DISALLOW_COPY_AND_ASSIGN(NetEqImpl);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 05a8de25cb3..5489fed9100 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -13,6 +13,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/audio_coding/neteq/accelerate.h"
#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
@@ -384,7 +385,7 @@ TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
// Insert packets. The buffer should not flush.
- for (int i = 1; i <= config_.max_packets_in_buffer; ++i) {
+ for (size_t i = 1; i <= config_.max_packets_in_buffer; ++i) {
EXPECT_EQ(NetEq::kOK,
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
@@ -398,7 +399,7 @@ TEST_F(NetEqImplTest, InsertPacketsUntilBufferIsFull) {
EXPECT_EQ(NetEq::kOK,
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- EXPECT_EQ(1, packet_buffer_->NumPacketsInBuffer());
+ EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
const RTPHeader* test_header = packet_buffer_->NextRtpHeader();
EXPECT_EQ(rtp_header.header.timestamp, test_header->timestamp);
EXPECT_EQ(rtp_header.header.sequenceNumber, test_header->sequenceNumber);
@@ -413,7 +414,8 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -443,10 +445,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
return encoded_len;
}
- virtual int Init() {
- next_value_ = 1;
- return 0;
- }
+ void Reset() override { next_value_ = 1; }
size_t Channels() const override { return 1; }
@@ -466,9 +465,9 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(
@@ -480,7 +479,8 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
EXPECT_EQ(kOutputNormal, type);
// Start with a simple check that the fake decoder is behaving as expected.
- EXPECT_EQ(kPayloadLengthSamples, decoder_.next_value() - 1);
+ EXPECT_EQ(kPayloadLengthSamples,
+ static_cast<size_t>(decoder_.next_value() - 1));
// The value of the last of the output samples is the same as the number of
// samples played from the decoded packet. Thus, this number + the RTP
@@ -500,7 +500,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
// Check that the number of samples still to play from the sync buffer add
// up with what was already played out.
EXPECT_EQ(kPayloadLengthSamples - output[samples_per_channel - 1],
- static_cast<int>(sync_buffer->FutureLength()));
+ sync_buffer->FutureLength());
}
TEST_F(NetEqImplTest, ReorderedPacket) {
@@ -510,7 +510,8 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -521,7 +522,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
// Create a mock decoder object.
MockAudioDecoder mock_decoder;
- EXPECT_CALL(mock_decoder, Init()).WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
.WillRepeatedly(Return(0));
@@ -544,9 +545,9 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(
@@ -606,7 +607,8 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = kPayloadLengthSamples;
uint8_t payload[kPayloadLengthBytes] = {0};
WebRtcRTPHeader rtp_header;
@@ -623,9 +625,9 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
// Pull audio once.
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
EXPECT_EQ(NetEq::kOK,
@@ -641,7 +643,7 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
// Insert 10 packets.
- for (int i = 0; i < 10; ++i) {
+ for (size_t i = 0; i < 10; ++i) {
rtp_header.header.sequenceNumber++;
rtp_header.header.timestamp += kPayloadLengthSamples;
EXPECT_EQ(NetEq::kOK,
@@ -651,7 +653,7 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
}
// Pull audio repeatedly and make sure we get normal output, that is not PLC.
- for (int i = 0; i < 3; ++i) {
+ for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
&num_channels, &type));
@@ -672,8 +674,9 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
const uint8_t kPayloadType = 17; // Just an arbitrary number.
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateKhz = 48;
- const int kPayloadLengthSamples = 20 * kSampleRateKhz; // 20 ms.
- const int kPayloadLengthBytes = 10;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(20 * kSampleRateKhz); // 20 ms.
+ const size_t kPayloadLengthBytes = 10;
uint8_t payload[kPayloadLengthBytes] = {0};
int16_t dummy_output[kPayloadLengthSamples] = {0};
@@ -685,7 +688,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
// Create a mock decoder object.
MockAudioDecoder mock_decoder;
- EXPECT_CALL(mock_decoder, Init()).WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
.WillRepeatedly(Return(0));
@@ -736,9 +739,9 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- const int kMaxOutputSize = 10 * kSampleRateKhz;
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateKhz);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
uint32_t timestamp;
uint32_t last_timestamp;
@@ -762,7 +765,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
&num_channels, &type));
EXPECT_TRUE(neteq_->GetPlayoutTimestamp(&last_timestamp));
- for (int i = 1; i < 6; ++i) {
+ for (size_t i = 1; i < 6; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
EXPECT_EQ(1, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
@@ -783,7 +786,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- for (int i = 6; i < 8; ++i) {
+ for (size_t i = 6; i < 8; ++i) {
ASSERT_EQ(kMaxOutputSize, samples_per_channel);
EXPECT_EQ(1, num_channels);
EXPECT_EQ(expected_type[i - 1], type);
@@ -811,7 +814,8 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
const int kSampleRateHz = 8000;
- const int kPayloadLengthSamples = 10 * kSampleRateHz / 1000; // 10 ms.
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = 1;
uint8_t payload[kPayloadLengthBytes]= {0};
int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
@@ -823,9 +827,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
class MockAudioDecoder : public AudioDecoder {
public:
- int Init() override {
- return 0;
- }
+ void Reset() override {}
MOCK_CONST_METHOD2(PacketDuration, int(const uint8_t*, size_t));
MOCK_METHOD5(DecodeInternal, int(const uint8_t*, size_t, int, int16_t*,
SpeechType*));
@@ -852,7 +854,8 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
dummy_output +
kPayloadLengthSamples * kChannels),
SetArgPointee<4>(AudioDecoder::kSpeech),
- Return(kPayloadLengthSamples * kChannels)));
+ Return(static_cast<int>(
+ kPayloadLengthSamples * kChannels))));
EXPECT_CALL(decoder_, PacketDuration(Pointee(kSecondPayloadValue),
kPayloadLengthBytes))
@@ -879,9 +882,10 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
neteq_->InsertPacket(
rtp_header, payload, kPayloadLengthBytes, kReceiveTime));
- const int kMaxOutputSize = 10 * kSampleRateHz / 1000 * kChannels;
+ const size_t kMaxOutputSize =
+ static_cast<size_t>(10 * kSampleRateHz / 1000 * kChannels);
int16_t output[kMaxOutputSize];
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
NetEqOutputType type;
@@ -899,4 +903,347 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
EXPECT_EQ(kChannels, num_channels);
}
-} // namespace webrtc
+// This test inserts packets until the buffer is flushed. After that, it asks
+// NetEq for the network statistics. The purpose of the test is to make sure
+// that even though the buffer size increment is negative (which it becomes when
+// the packet causing a flush is inserted), the packet length stored in the
+// decision logic remains valid.
+TEST_F(NetEqImplTest, FloodBufferAndGetNetworkStats) {
+ UseNoMocks();
+ CreateInstance();
+
+ const size_t kPayloadLengthSamples = 80;
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples; // PCM 16-bit.
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ WebRtcRTPHeader rtp_header;
+ rtp_header.header.payloadType = kPayloadType;
+ rtp_header.header.sequenceNumber = 0x1234;
+ rtp_header.header.timestamp = 0x12345678;
+ rtp_header.header.ssrc = 0x87654321;
+
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->RegisterPayloadType(kDecoderPCM16B, kPayloadType));
+
+ // Insert packets until the buffer flushes.
+ for (size_t i = 0; i <= config_.max_packets_in_buffer; ++i) {
+ EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
+ kReceiveTime));
+ rtp_header.header.timestamp +=
+ rtc::checked_cast<uint32_t>(kPayloadLengthSamples);
+ ++rtp_header.header.sequenceNumber;
+ }
+ EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
+
+ // Ask for network statistics. This should not crash.
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(NetEq::kOK, neteq_->NetworkStatistics(&stats));
+}
+
+TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
+ const int kSampleRateHz = 8000;
+ const size_t kPayloadLengthSamples =
+ static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
+ const size_t kPayloadLengthBytes = 2 * kPayloadLengthSamples;
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ WebRtcRTPHeader rtp_header;
+ rtp_header.header.payloadType = kPayloadType;
+ rtp_header.header.sequenceNumber = 0x1234;
+ rtp_header.header.timestamp = 0x12345678;
+ rtp_header.header.ssrc = 0x87654321;
+
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+ .WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(kPayloadLengthSamples));
+ int16_t dummy_output[kPayloadLengthSamples] = {0};
+ // The below expectation will make the mock decoder write
+ // |kPayloadLengthSamples| - 5 zeros to the output array, and mark it as
+ // speech. That is, the decoded length is 5 samples shorter than the expected.
+ EXPECT_CALL(mock_decoder,
+ Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ .WillOnce(
+ DoAll(SetArrayArgument<4>(dummy_output,
+ dummy_output + kPayloadLengthSamples - 5),
+ SetArgPointee<5>(AudioDecoder::kSpeech),
+ Return(kPayloadLengthSamples - 5)));
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->RegisterExternalDecoder(&mock_decoder, kDecoderPCM16B,
+ kPayloadType, kSampleRateHz));
+
+ // Insert one packet.
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
+ kReceiveTime));
+
+ EXPECT_EQ(5u, neteq_->sync_buffer_for_test()->FutureLength());
+
+ // Pull audio once.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ int16_t output[kMaxOutputSize];
+ size_t samples_per_channel;
+ int num_channels;
+ NetEqOutputType type;
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ ASSERT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputNormal, type);
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails.
+TEST_F(NetEqImplTest, DecodingError) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
+ const int kSampleRateHz = 8000;
+ const int kDecoderErrorCode = -97; // Any negative number.
+
+ // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+ const size_t kFrameLengthSamples =
+ static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+
+ uint8_t payload[kPayloadLengthBytes] = {0};
+
+ WebRtcRTPHeader rtp_header;
+ rtp_header.header.payloadType = kPayloadType;
+ rtp_header.header.sequenceNumber = 0x1234;
+ rtp_header.header.timestamp = 0x12345678;
+ rtp_header.header.ssrc = 0x87654321;
+
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+ .WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(kFrameLengthSamples));
+ EXPECT_CALL(mock_decoder, ErrorCode())
+ .WillOnce(Return(kDecoderErrorCode));
+ EXPECT_CALL(mock_decoder, HasDecodePlc())
+ .WillOnce(Return(false));
+ int16_t dummy_output[kFrameLengthSamples] = {0};
+
+ {
+ InSequence sequence; // Dummy variable.
+ // Mock decoder works normally the first time.
+ EXPECT_CALL(mock_decoder,
+ Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ .Times(3)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<4>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<5>(AudioDecoder::kSpeech),
+ Return(kFrameLengthSamples)))
+ .RetiresOnSaturation();
+
+ // Then mock decoder fails. A common reason for failure can be buffer being
+ // too short
+ EXPECT_CALL(mock_decoder,
+ Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ .WillOnce(Return(-1))
+ .RetiresOnSaturation();
+
+ // Mock decoder finally returns to normal.
+ EXPECT_CALL(mock_decoder,
+ Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<4>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<5>(AudioDecoder::kSpeech),
+ Return(kFrameLengthSamples)));
+ }
+
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->RegisterExternalDecoder(&mock_decoder, kDecoderPCM16B,
+ kPayloadType, kSampleRateHz));
+
+ // Insert packets.
+ for (int i = 0; i < 6; ++i) {
+ rtp_header.header.sequenceNumber += 1;
+ rtp_header.header.timestamp += kFrameLengthSamples;
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
+ kReceiveTime));
+ }
+
+ // Pull audio.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ int16_t output[kMaxOutputSize];
+ size_t samples_per_channel;
+ int num_channels;
+ NetEqOutputType type;
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputNormal, type);
+
+ // Pull audio again. Decoder fails.
+ EXPECT_EQ(NetEq::kFail,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
+ EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
+ // expansion.
+ EXPECT_EQ(kOutputNormal, type);
+
+ // Pull audio again, should continue an expansion.
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputPLC, type);
+
+ // Pull audio again, should behave normal.
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputNormal, type);
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test checks the behavior of NetEq when audio decoder fails during CNG.
+TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
+ UseNoMocks();
+ CreateInstance();
+
+ const uint8_t kPayloadType = 17; // Just an arbitrary number.
+ const uint32_t kReceiveTime = 17; // Value doesn't matter for this test.
+ const int kSampleRateHz = 8000;
+ const int kDecoderErrorCode = -97; // Any negative number.
+
+ // We let decoder return 5 ms each time, and therefore, 2 packets make 10 ms.
+ const size_t kFrameLengthSamples =
+ static_cast<size_t>(5 * kSampleRateHz / 1000);
+
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+
+ uint8_t payload[kPayloadLengthBytes] = {0};
+
+ WebRtcRTPHeader rtp_header;
+ rtp_header.header.payloadType = kPayloadType;
+ rtp_header.header.sequenceNumber = 0x1234;
+ rtp_header.header.timestamp = 0x12345678;
+ rtp_header.header.ssrc = 0x87654321;
+
+ // Create a mock decoder object.
+ MockAudioDecoder mock_decoder;
+ EXPECT_CALL(mock_decoder, Reset()).WillRepeatedly(Return());
+ EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
+ EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
+ .WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+ .WillRepeatedly(Return(kFrameLengthSamples));
+ EXPECT_CALL(mock_decoder, ErrorCode())
+ .WillOnce(Return(kDecoderErrorCode));
+ int16_t dummy_output[kFrameLengthSamples] = {0};
+
+ {
+ InSequence sequence; // Dummy variable.
+ // Mock decoder works normally the first 2 times.
+ EXPECT_CALL(mock_decoder,
+ Decode(_, kPayloadLengthBytes, kSampleRateHz, _, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<4>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ Return(kFrameLengthSamples)))
+ .RetiresOnSaturation();
+
+ // Then mock decoder fails. A common reason for failure can be buffer being
+ // too short
+ EXPECT_CALL(mock_decoder, Decode(nullptr, 0, kSampleRateHz, _, _, _))
+ .WillOnce(Return(-1))
+ .RetiresOnSaturation();
+
+ // Mock decoder finally returns to normal.
+ EXPECT_CALL(mock_decoder, Decode(nullptr, 0, kSampleRateHz, _, _, _))
+ .Times(2)
+ .WillRepeatedly(
+ DoAll(SetArrayArgument<4>(dummy_output,
+ dummy_output + kFrameLengthSamples),
+ SetArgPointee<5>(AudioDecoder::kComfortNoise),
+ Return(kFrameLengthSamples)));
+ }
+
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->RegisterExternalDecoder(&mock_decoder, kDecoderPCM16B,
+ kPayloadType, kSampleRateHz));
+
+ // Insert 2 packets. This will make netEq into codec internal CNG mode.
+ for (int i = 0; i < 2; ++i) {
+ rtp_header.header.sequenceNumber += 1;
+ rtp_header.header.timestamp += kFrameLengthSamples;
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->InsertPacket(rtp_header, payload, kPayloadLengthBytes,
+ kReceiveTime));
+ }
+
+ // Pull audio.
+ const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
+ int16_t output[kMaxOutputSize];
+ size_t samples_per_channel;
+ int num_channels;
+ NetEqOutputType type;
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputCNG, type);
+
+ // Pull audio again. Decoder fails.
+ EXPECT_EQ(NetEq::kFail,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
+ EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ // TODO(minyue): should NetEq better give kOutputPLC, since it is actually an
+ // expansion.
+ EXPECT_EQ(kOutputCNG, type);
+
+ // Pull audio again, should resume codec CNG.
+ EXPECT_EQ(NetEq::kOK,
+ neteq_->GetAudio(kMaxOutputSize, output, &samples_per_channel,
+ &num_channels, &type));
+ EXPECT_EQ(kMaxOutputSize, samples_per_channel);
+ EXPECT_EQ(1, num_channels);
+ EXPECT_EQ(kOutputCNG, type);
+
+ EXPECT_CALL(mock_decoder, Die());
+}
+
+}// namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
index e1a0f69dfab..e9ebe064eb7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -10,7 +10,6 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
@@ -21,19 +20,17 @@ using ::testing::_;
using ::testing::SetArgPointee;
using ::testing::Return;
-
-class MockAudioDecoderOpus : public AudioDecoderOpus {
+class MockAudioDecoder final : public AudioDecoder {
public:
static const int kPacketDuration = 960; // 48 kHz * 20 ms
- explicit MockAudioDecoderOpus(int num_channels)
- : AudioDecoderOpus(num_channels),
- fec_enabled_(false) {
+ explicit MockAudioDecoder(size_t num_channels)
+ : num_channels_(num_channels), fec_enabled_(false) {
}
- virtual ~MockAudioDecoderOpus() { Die(); }
+ ~MockAudioDecoder() override { Die(); }
MOCK_METHOD0(Die, void());
- MOCK_METHOD0(Init, int());
+ MOCK_METHOD0(Reset, void());
int PacketDuration(const uint8_t* encoded,
size_t encoded_len) const override {
@@ -49,6 +46,8 @@ class MockAudioDecoderOpus : public AudioDecoderOpus {
return fec_enabled_;
}
+ size_t Channels() const override { return num_channels_; }
+
void set_fec_enabled(bool enable_fec) { fec_enabled_ = enable_fec; }
bool fec_enabled() const { return fec_enabled_; }
@@ -75,13 +74,14 @@ class MockAudioDecoderOpus : public AudioDecoderOpus {
}
private:
+ const size_t num_channels_;
bool fec_enabled_;
};
class NetEqNetworkStatsTest : public NetEqExternalDecoderTest {
public:
static const int kPayloadSizeByte = 30;
- static const int kFrameSizeMs = 20; // frame size of Opus
+ static const int kFrameSizeMs = 20;
static const int kMaxOutputSize = 960; // 10 ms * 48 kHz * 2 channels.
enum logic {
@@ -108,7 +108,7 @@ struct NetEqNetworkStatsCheck {
};
NetEqNetworkStatsTest(NetEqDecoder codec,
- MockAudioDecoderOpus* decoder)
+ MockAudioDecoder* decoder)
: NetEqExternalDecoderTest(codec, decoder),
external_decoder_(decoder),
samples_per_ms_(CodecSampleRateHz(codec) / 1000),
@@ -170,6 +170,9 @@ struct NetEqNetworkStatsCheck {
CHECK_NETEQ_NETWORK_STATS(added_zero_samples);
#undef CHECK_NETEQ_NETWORK_STATS
+
+ // Compare with CurrentDelay, which should be identical.
+ EXPECT_EQ(stats.current_buffer_size_ms, neteq()->CurrentDelayMs());
}
void RunTest(int num_loops, NetEqNetworkStatsCheck expects) {
@@ -224,7 +227,7 @@ struct NetEqNetworkStatsCheck {
expects.stats_ref.expand_rate = expects.stats_ref.speech_expand_rate = 1065;
RunTest(50, expects);
- // Next we enable Opus FEC.
+ // Next we enable FEC.
external_decoder_->set_fec_enabled(true);
// If FEC fills in the lost packets, no packet loss will be counted.
expects.stats_ref.packet_loss_rate = 0;
@@ -258,7 +261,7 @@ struct NetEqNetworkStatsCheck {
}
private:
- MockAudioDecoderOpus* external_decoder_;
+ MockAudioDecoder* external_decoder_;
const int samples_per_ms_;
const size_t frame_size_samples_;
rtc::scoped_ptr<test::RtpGenerator> rtp_generator_;
@@ -269,25 +272,22 @@ struct NetEqNetworkStatsCheck {
int16_t output_[kMaxOutputSize];
};
-TEST(NetEqNetworkStatsTest, OpusDecodeFec) {
- MockAudioDecoderOpus decoder(1);
- EXPECT_CALL(decoder, Init());
+TEST(NetEqNetworkStatsTest, DecodeFec) {
+ MockAudioDecoder decoder(1);
NetEqNetworkStatsTest test(kDecoderOpus, &decoder);
test.DecodeFecTest();
EXPECT_CALL(decoder, Die()).Times(1);
}
-TEST(NetEqNetworkStatsTest, StereoOpusDecodeFec) {
- MockAudioDecoderOpus decoder(2);
- EXPECT_CALL(decoder, Init());
+TEST(NetEqNetworkStatsTest, StereoDecodeFec) {
+ MockAudioDecoder decoder(2);
NetEqNetworkStatsTest test(kDecoderOpus, &decoder);
test.DecodeFecTest();
EXPECT_CALL(decoder, Die()).Times(1);
}
TEST(NetEqNetworkStatsTest, NoiseExpansionTest) {
- MockAudioDecoderOpus decoder(1);
- EXPECT_CALL(decoder, Init());
+ MockAudioDecoder decoder(1);
NetEqNetworkStatsTest test(kDecoderOpus, &decoder);
test.NoiseExpansionTest();
EXPECT_CALL(decoder, Die()).Times(1);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index ea88f24a17c..5564e20267f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -43,7 +43,7 @@ struct TestParameters {
class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
protected:
static const int kTimeStepMs = 10;
- static const int kMaxBlockSize = 480; // 10 ms @ 48 kHz.
+ static const size_t kMaxBlockSize = 480; // 10 ms @ 48 kHz.
static const uint8_t kPayloadTypeMono = 95;
static const uint8_t kPayloadTypeMulti = 96;
@@ -52,7 +52,8 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
sample_rate_hz_(GetParam().sample_rate),
samples_per_ms_(sample_rate_hz_ / 1000),
frame_size_ms_(GetParam().frame_size),
- frame_size_samples_(frame_size_ms_ * samples_per_ms_),
+ frame_size_samples_(
+ static_cast<size_t>(frame_size_ms_ * samples_per_ms_)),
output_size_samples_(10 * samples_per_ms_),
rtp_generator_mono_(samples_per_ms_),
rtp_generator_(samples_per_ms_),
@@ -212,7 +213,7 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
}
NetEqOutputType output_type;
// Get audio from mono instance.
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_mono_->GetAudio(kMaxBlockSize, output_,
@@ -242,8 +243,8 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
const int sample_rate_hz_;
const int samples_per_ms_;
const int frame_size_ms_;
- const int frame_size_samples_;
- const int output_size_samples_;
+ const size_t frame_size_samples_;
+ const size_t output_size_samples_;
NetEq* neteq_mono_;
NetEq* neteq_;
test::RtpGenerator rtp_generator_mono_;
@@ -256,8 +257,8 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
int16_t* output_multi_channel_;
WebRtcRTPHeader rtp_header_mono_;
WebRtcRTPHeader rtp_header_;
- int payload_size_bytes_;
- int multi_payload_size_bytes_;
+ size_t payload_size_bytes_;
+ size_t multi_payload_size_bytes_;
int last_send_time_;
int last_arrival_time_;
rtc::scoped_ptr<test::InputAudioFile> input_file_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
index 4b040921629..50ebbd35ef2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
@@ -7,25 +7,43 @@
# be found in the AUTHORS file in the root of the source tree.
{
+ 'conditions': [
+ ['enable_protobuf==1', {
+ 'targets': [
+ {
+ 'target_name': 'rtc_event_log_source',
+ 'type': 'static_library',
+ 'dependencies': [
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log',
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log_proto',
+ ],
+ 'sources': [
+ 'tools/rtc_event_log_source.h',
+ 'tools/rtc_event_log_source.cc',
+ ],
+ },
+ {
+ 'target_name': 'neteq_rtpplay',
+ 'type': 'executable',
+ 'dependencies': [
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
+ '<(webrtc_root)/test/test.gyp:test_support_main',
+ 'rtc_event_log_source',
+ 'neteq',
+ 'neteq_unittest_tools',
+ 'pcm16b',
+ ],
+ 'sources': [
+ 'tools/neteq_rtpplay.cc',
+ ],
+ 'defines': [
+ ],
+ }, # neteq_rtpplay
+ ],
+ }],
+ ],
'targets': [
{
- 'target_name': 'neteq_rtpplay',
- 'type': 'executable',
- 'dependencies': [
- '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
- '<(webrtc_root)/test/test.gyp:test_support_main',
- 'neteq',
- 'neteq_unittest_tools',
- 'pcm16b',
- ],
- 'sources': [
- 'tools/neteq_rtpplay.cc',
- ],
- 'defines': [
- ],
- }, # neteq_rtpplay
-
- {
'target_name': 'RTPencode',
'type': 'executable',
'dependencies': [
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index 7137a685aa1..6dfcac0d3a3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -37,16 +37,16 @@ DEFINE_bool(gen_ref, false, "Generate reference files.");
namespace webrtc {
-static bool IsAllZero(const int16_t* buf, int buf_length) {
+static bool IsAllZero(const int16_t* buf, size_t buf_length) {
bool all_zero = true;
- for (int n = 0; n < buf_length && all_zero; ++n)
+ for (size_t n = 0; n < buf_length && all_zero; ++n)
all_zero = buf[n] == 0;
return all_zero;
}
-static bool IsAllNonZero(const int16_t* buf, int buf_length) {
+static bool IsAllNonZero(const int16_t* buf, size_t buf_length) {
bool all_non_zero = true;
- for (int n = 0; n < buf_length && all_non_zero; ++n)
+ for (size_t n = 0; n < buf_length && all_non_zero; ++n)
all_non_zero = buf[n] != 0;
return all_non_zero;
}
@@ -172,7 +172,8 @@ void RefFiles::ReadFromFileAndCompare(
ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate);
ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate);
ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm);
- ASSERT_EQ(stats.added_zero_samples, ref_stats.added_zero_samples);
+ ASSERT_EQ(stats.added_zero_samples,
+ static_cast<size_t>(ref_stats.added_zero_samples));
ASSERT_EQ(stats.secondary_decoded_rate, 0);
ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate);
}
@@ -220,9 +221,9 @@ class NetEqDecodingTest : public ::testing::Test {
// NetEQ must be polled for data once every 10 ms. Thus, neither of the
// constants below can be changed.
static const int kTimeStepMs = 10;
- static const int kBlockSize8kHz = kTimeStepMs * 8;
- static const int kBlockSize16kHz = kTimeStepMs * 16;
- static const int kBlockSize32kHz = kTimeStepMs * 32;
+ static const size_t kBlockSize8kHz = kTimeStepMs * 8;
+ static const size_t kBlockSize16kHz = kTimeStepMs * 16;
+ static const size_t kBlockSize32kHz = kTimeStepMs * 32;
static const size_t kMaxBlockSize = kBlockSize32kHz;
static const int kInitSampleRateHz = 8000;
@@ -232,7 +233,7 @@ class NetEqDecodingTest : public ::testing::Test {
void SelectDecoders(NetEqDecoder* used_codec);
void LoadDecoders();
void OpenInputFile(const std::string &rtp_file);
- void Process(int* out_len);
+ void Process(size_t* out_len);
void DecodeAndCompare(const std::string& rtp_file,
const std::string& ref_file,
const std::string& stat_ref_file,
@@ -272,9 +273,9 @@ class NetEqDecodingTest : public ::testing::Test {
// Allocating the static const so that it can be passed by reference.
const int NetEqDecodingTest::kTimeStepMs;
-const int NetEqDecodingTest::kBlockSize8kHz;
-const int NetEqDecodingTest::kBlockSize16kHz;
-const int NetEqDecodingTest::kBlockSize32kHz;
+const size_t NetEqDecodingTest::kBlockSize8kHz;
+const size_t NetEqDecodingTest::kBlockSize16kHz;
+const size_t NetEqDecodingTest::kBlockSize32kHz;
const size_t NetEqDecodingTest::kMaxBlockSize;
const int NetEqDecodingTest::kInitSampleRateHz;
@@ -306,18 +307,20 @@ void NetEqDecodingTest::LoadDecoders() {
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMu, 0));
// Load PCMa.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCMa, 8));
-#ifndef WEBRTC_ANDROID
+#ifdef WEBRTC_CODEC_ILBC
// Load iLBC.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderILBC, 102));
-#endif // WEBRTC_ANDROID
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
// Load iSAC.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISAC, 103));
-#ifndef WEBRTC_ANDROID
+#endif
+#ifdef WEBRTC_CODEC_ISAC
// Load iSAC SWB.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACswb, 104));
// Load iSAC FB.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderISACfb, 105));
-#endif // WEBRTC_ANDROID
+#endif
// Load PCM16B nb.
ASSERT_EQ(0, neteq_->RegisterPayloadType(kDecoderPCM16B, 93));
// Load PCM16B wb.
@@ -334,7 +337,7 @@ void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
}
-void NetEqDecodingTest::Process(int* out_len) {
+void NetEqDecodingTest::Process(size_t* out_len) {
// Check if time to receive.
while (packet_ && sim_clock_ >= packet_->time_ms()) {
if (packet_->payload_length_bytes() > 0) {
@@ -358,7 +361,7 @@ void NetEqDecodingTest::Process(int* out_len) {
ASSERT_TRUE((*out_len == kBlockSize8kHz) ||
(*out_len == kBlockSize16kHz) ||
(*out_len == kBlockSize32kHz));
- output_sample_rate_ = *out_len / 10 * 1000;
+ output_sample_rate_ = static_cast<int>(*out_len / 10 * 1000);
// Increase time.
sim_clock_ += kTimeStepMs;
@@ -394,7 +397,7 @@ void NetEqDecodingTest::DecodeAndCompare(const std::string& rtp_file,
std::ostringstream ss;
ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
- int out_len = 0;
+ size_t out_len = 0;
ASSERT_NO_FATAL_FAILURE(Process(&out_len));
ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(out_data_, out_len));
@@ -405,6 +408,8 @@ void NetEqDecodingTest::DecodeAndCompare(const std::string& rtp_file,
ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
ASSERT_NO_FATAL_FAILURE(
network_stat_files.ProcessReference(network_stats));
+ // Compare with CurrentDelay, which should be identical.
+ EXPECT_EQ(network_stats.current_buffer_size_ms, neteq_->CurrentDelayMs());
// Process RTCPstat.
RtcpStatistics rtcp_stats;
@@ -438,8 +443,15 @@ void NetEqDecodingTest::PopulateCng(int frame_index,
*payload_len = 1; // Only noise level, no spectral parameters.
}
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+ defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#define IF_ALL_CODECS(x) x
+#else
+#define IF_ALL_CODECS(x) DISABLED_##x
+#endif
+
TEST_F(NetEqDecodingTest,
- DISABLED_ON_IOS(DISABLED_ON_ANDROID(TestBitExactness))) {
+ DISABLED_ON_IOS(DISABLED_ON_ANDROID(IF_ALL_CODECS(TestBitExactness)))) {
const std::string input_rtp_file = webrtc::test::ProjectRootPath() +
"resources/audio_coding/neteq_universal_new.rtp";
// Note that neteq4_universal_ref.pcm and neteq4_universal_ref_win_32.pcm
@@ -498,7 +510,7 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
}
// Pull out all data.
for (size_t i = 0; i < num_frames; ++i) {
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -506,46 +518,23 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
ASSERT_EQ(kBlockSize16kHz, out_len);
}
- std::vector<int> waiting_times;
- neteq_->WaitingTimes(&waiting_times);
- EXPECT_EQ(num_frames, waiting_times.size());
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
// Since all frames are dumped into NetEQ at once, but pulled out with 10 ms
// spacing (per definition), we expect the delay to increase with 10 ms for
- // each packet.
- for (size_t i = 0; i < waiting_times.size(); ++i) {
- EXPECT_EQ(static_cast<int>(i + 1) * 10, waiting_times[i]);
- }
+ // each packet. Thus, we are calculating the statistics for a series from 10
+ // to 300, in steps of 10 ms.
+ EXPECT_EQ(155, stats.mean_waiting_time_ms);
+ EXPECT_EQ(155, stats.median_waiting_time_ms);
+ EXPECT_EQ(10, stats.min_waiting_time_ms);
+ EXPECT_EQ(300, stats.max_waiting_time_ms);
// Check statistics again and make sure it's been reset.
- neteq_->WaitingTimes(&waiting_times);
- int len = waiting_times.size();
- EXPECT_EQ(0, len);
-
- // Process > 100 frames, and make sure that that we get statistics
- // only for 100 frames. Note the new SSRC, causing NetEQ to reset.
- num_frames = 110;
- for (size_t i = 0; i < num_frames; ++i) {
- uint16_t payload[kSamples] = {0};
- WebRtcRTPHeader rtp_info;
- rtp_info.header.sequenceNumber = i;
- rtp_info.header.timestamp = i * kSamples;
- rtp_info.header.ssrc = 0x1235; // Just an arbitrary SSRC.
- rtp_info.header.payloadType = 94; // PCM16b WB codec.
- rtp_info.header.markerBit = 0;
- ASSERT_EQ(0, neteq_->InsertPacket(
- rtp_info,
- reinterpret_cast<uint8_t*>(payload),
- kPayloadBytes, 0));
- int out_len;
- int num_channels;
- NetEqOutputType type;
- ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
- &num_channels, &type));
- ASSERT_EQ(kBlockSize16kHz, out_len);
- }
-
- neteq_->WaitingTimes(&waiting_times);
- EXPECT_EQ(100u, waiting_times.size());
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+ EXPECT_EQ(-1, stats.mean_waiting_time_ms);
+ EXPECT_EQ(-1, stats.median_waiting_time_ms);
+ EXPECT_EQ(-1, stats.min_waiting_time_ms);
+ EXPECT_EQ(-1, stats.max_waiting_time_ms);
}
TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
@@ -566,7 +555,7 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
}
// Pull out data once.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -597,7 +586,7 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
}
// Pull out data once.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
@@ -622,7 +611,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
const size_t kPayloadBytes = kSamples * 2;
double next_input_time_ms = 0.0;
double t_ms;
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
@@ -840,7 +829,13 @@ TEST_F(NetEqDecodingTest, UnknownPayloadType) {
EXPECT_EQ(NetEq::kUnknownRtpPayloadType, neteq_->LastError());
}
-TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(IF_ISAC(DecoderError))) {
const size_t kPayloadBytes = 100;
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
@@ -854,7 +849,7 @@ TEST_F(NetEqDecodingTest, DISABLED_ON_ANDROID(DecoderError)) {
out_data_[i] = 1;
}
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
EXPECT_EQ(NetEq::kFail,
neteq_->GetAudio(kMaxBlockSize, out_data_,
&samples_per_channel, &num_channels, &type));
@@ -887,7 +882,7 @@ TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
out_data_[i] = 1;
}
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
EXPECT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_,
&samples_per_channel,
&num_channels, &type));
@@ -908,7 +903,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
bool should_be_faded) = 0;
void CheckBgn(int sampling_rate_hz) {
- int16_t expected_samples_per_channel = 0;
+ size_t expected_samples_per_channel = 0;
uint8_t payload_type = 0xFF; // Invalid.
if (sampling_rate_hz == 8000) {
expected_samples_per_channel = kBlockSize8kHz;
@@ -932,7 +927,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
ASSERT_TRUE(input.Init(
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
10 * sampling_rate_hz, // Max 10 seconds loop length.
- static_cast<size_t>(expected_samples_per_channel)));
+ expected_samples_per_channel));
// Payload of 10 ms of PCM16 32 kHz.
uint8_t payload[kBlockSize32kHz * sizeof(int16_t)];
@@ -941,19 +936,18 @@ class NetEqBgnTest : public NetEqDecodingTest {
rtp_info.header.payloadType = payload_type;
int number_channels = 0;
- int samples_per_channel = 0;
+ size_t samples_per_channel = 0;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
- int16_t enc_len_bytes = WebRtcPcm16b_Encode(
+ size_t enc_len_bytes = WebRtcPcm16b_Encode(
input.GetNextBlock(), expected_samples_per_channel, payload);
ASSERT_EQ(enc_len_bytes, expected_samples_per_channel * 2);
number_channels = 0;
samples_per_channel = 0;
ASSERT_EQ(0,
- neteq_->InsertPacket(rtp_info, payload,
- static_cast<size_t>(enc_len_bytes),
+ neteq_->InsertPacket(rtp_info, payload, enc_len_bytes,
receive_timestamp));
ASSERT_EQ(0,
neteq_->GetAudio(kBlockSize32kHz,
@@ -1009,7 +1003,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
if (type == kOutputPLCtoCNG) {
plc_to_cng = true;
double sum_squared = 0;
- for (int k = 0; k < number_channels * samples_per_channel; ++k)
+ for (size_t k = 0; k < number_channels * samples_per_channel; ++k)
sum_squared += output[k] * output[k];
TestCondition(sum_squared, n > kFadingThreshold);
} else {
@@ -1072,7 +1066,7 @@ TEST_F(NetEqBgnTestFade, RunTest) {
CheckBgn(32000);
}
-TEST_F(NetEqDecodingTest, SyncPacketInsert) {
+TEST_F(NetEqDecodingTest, IF_ISAC(SyncPacketInsert)) {
WebRtcRTPHeader rtp_info;
uint32_t receive_timestamp = 0;
// For the readability use the following payloads instead of the defaults of
@@ -1168,7 +1162,7 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// actual decoded values.
NetEqOutputType output_type;
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
uint32_t receive_timestamp = 0;
for (int n = 0; n < 100; ++n) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, kPayloadBytes,
@@ -1246,7 +1240,7 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
// actual decoded values.
NetEqOutputType output_type;
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
uint32_t receive_timestamp = 0;
int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
for (int n = 0; n < algorithmic_frame_delay; ++n) {
@@ -1315,7 +1309,7 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
double next_input_time_ms = 0.0;
int16_t decoded[kBlockSize16kHz];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
NetEqOutputType output_type;
uint32_t receive_timestamp = 0;
@@ -1418,7 +1412,7 @@ void NetEqDecodingTest::DuplicateCng() {
algorithmic_delay_ms_ * kSampleRateKhz, 5 * kSampleRateKhz / 8);
// Insert three speech packets. Three are needed to get the frame length
// correct.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
uint8_t payload[kPayloadBytes] = {0};
@@ -1515,7 +1509,7 @@ TEST_F(NetEqDecodingTest, CngFirst) {
timestamp += kCngPeriodSamples;
// Pull audio once and make sure CNG is played.
- int out_len;
+ size_t out_len;
int num_channels;
NetEqOutputType type;
ASSERT_EQ(0, neteq_->GetAudio(kMaxBlockSize, out_data_, &out_len,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
index bf455c974c3..ebecbf94bde 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
@@ -45,12 +45,12 @@ int Normal::Process(const int16_t* input,
output->PushBackInterleaved(input, length);
int16_t* signal = &(*output)[0][0];
- const unsigned fs_mult = fs_hz_ / 8000;
+ const int fs_mult = fs_hz_ / 8000;
assert(fs_mult > 0);
// fs_shift = log2(fs_mult), rounded down.
// Note that |fs_shift| is not "exact" for 48 kHz.
// TODO(hlundin): Investigate this further.
- const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult));
+ const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult);
// Check if last RecOut call resulted in an Expand. If so, we have to take
// care of some cross-fading and unmuting.
@@ -73,11 +73,11 @@ int Normal::Process(const int16_t* input,
int16_t* signal = &(*output)[channel_ix][0];
size_t length_per_channel = length / output->Channels();
// Find largest absolute value in new data.
- int16_t decoded_max = WebRtcSpl_MaxAbsValueW16(
- signal, static_cast<int>(length_per_channel));
+ int16_t decoded_max =
+ WebRtcSpl_MaxAbsValueW16(signal, length_per_channel);
// Adjust muting factor if needed (to BGN level).
- int energy_length = std::min(static_cast<int>(fs_mult * 64),
- static_cast<int>(length_per_channel));
+ size_t energy_length =
+ std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
int scaling = 6 + fs_shift
- WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
@@ -111,7 +111,7 @@ int Normal::Process(const int16_t* input,
}
// If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
- int increment = static_cast<int>(64 / fs_mult);
+ int increment = 64 / fs_mult;
for (size_t i = 0; i < length_per_channel; i++) {
// Scale with mute factor.
assert(channel_ix < output->Channels());
@@ -131,7 +131,7 @@ int Normal::Process(const int16_t* input,
assert(fs_shift < 3); // Will always be 0, 1, or, 2.
increment = 4 >> fs_shift;
int fraction = increment;
- for (size_t i = 0; i < 8 * fs_mult; i++) {
+ for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
// TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8
// now for legacy bit-exactness.
assert(channel_ix < output->Channels());
@@ -144,7 +144,7 @@ int Normal::Process(const int16_t* input,
}
} else if (last_mode == kModeRfc3389Cng) {
assert(output->Channels() == 1); // Not adapted for multi-channel yet.
- static const int kCngLength = 32;
+ static const size_t kCngLength = 32;
int16_t cng_output[kCngLength];
// Reset mute factor and start up fresh.
external_mute_factor_array[0] = 16384;
@@ -167,7 +167,7 @@ int Normal::Process(const int16_t* input,
assert(fs_shift < 3); // Will always be 0, 1, or, 2.
int16_t increment = 4 >> fs_shift;
int16_t fraction = increment;
- for (size_t i = 0; i < 8 * fs_mult; i++) {
+ for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
// TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 now
// for legacy bit-exactness.
signal[i] =
@@ -178,7 +178,7 @@ int Normal::Process(const int16_t* input,
// Previous was neither of Expand, FadeToBGN or RFC3389_CNG, but we are
// still ramping up from previous muting.
// If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14).
- int increment = static_cast<int>(64 / fs_mult);
+ int increment = 64 / fs_mult;
size_t length_per_channel = length / output->Channels();
for (size_t i = 0; i < length_per_channel; i++) {
for (size_t channel_ix = 0; channel_ix < output->Channels();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.h
index aa24b528af4..23887f51343 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.h
@@ -61,7 +61,7 @@ class Normal {
const BackgroundNoise& background_noise_;
Expand* expand_;
- DISALLOW_COPY_AND_ASSIGN(Normal);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Normal);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
index 796409b25d6..1ac32f46a7c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
@@ -23,6 +23,7 @@
#include "webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_expand.h"
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
+#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
using ::testing::_;
@@ -36,7 +37,8 @@ TEST(Normal, CreateAndDestroy) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(1, 1000);
RandomVector random_vector;
- Expand expand(&bgn, &sync_buffer, &random_vector, fs, channels);
+ StatisticsCalculator statistics;
+ Expand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs, channels);
Normal normal(fs, &db, bgn, &expand);
EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
}
@@ -49,7 +51,9 @@ TEST(Normal, AvoidDivideByZero) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(1, 1000);
RandomVector random_vector;
- MockExpand expand(&bgn, &sync_buffer, &random_vector, fs, channels);
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+ channels);
Normal normal(fs, &db, bgn, &expand);
int16_t input[1000] = {0};
@@ -93,7 +97,9 @@ TEST(Normal, InputLengthAndChannelsDoNotMatch) {
BackgroundNoise bgn(channels);
SyncBuffer sync_buffer(channels, 1000);
RandomVector random_vector;
- MockExpand expand(&bgn, &sync_buffer, &random_vector, fs, channels);
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, fs,
+ channels);
Normal normal(fs, &db, bgn, &expand);
int16_t input[1000] = {0};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
index 5792b227ef8..c89de12318b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
@@ -16,6 +16,7 @@
#include <algorithm> // find_if()
+#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
@@ -58,6 +59,7 @@ int PacketBuffer::InsertPacket(Packet* packet) {
if (packet) {
delete packet;
}
+ LOG(LS_WARNING) << "InsertPacket invalid packet";
return kInvalidPacket;
}
@@ -66,6 +68,7 @@ int PacketBuffer::InsertPacket(Packet* packet) {
if (buffer_.size() >= max_number_of_packets_) {
// Buffer is full. Flush it.
Flush();
+ LOG(LS_WARNING) << "Packet buffer flushed";
return_val = kFlushed;
}
@@ -178,7 +181,7 @@ const RTPHeader* PacketBuffer::NextRtpHeader() const {
return const_cast<const RTPHeader*>(&(buffer_.front()->header));
}
-Packet* PacketBuffer::GetNextPacket(int* discard_count) {
+Packet* PacketBuffer::GetNextPacket(size_t* discard_count) {
if (Empty()) {
// Buffer is empty.
return NULL;
@@ -191,7 +194,7 @@ Packet* PacketBuffer::GetNextPacket(int* discard_count) {
// Discard other packets with the same timestamp. These are duplicates or
// redundant payloads that should not be used.
- int discards = 0;
+ size_t discards = 0;
while (!Empty() &&
buffer_.front()->header.timestamp == packet->header.timestamp) {
@@ -237,15 +240,15 @@ int PacketBuffer::DiscardAllOldPackets(uint32_t timestamp_limit) {
return DiscardOldPackets(timestamp_limit, 0);
}
-int PacketBuffer::NumPacketsInBuffer() const {
- return static_cast<int>(buffer_.size());
+size_t PacketBuffer::NumPacketsInBuffer() const {
+ return buffer_.size();
}
-int PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
- int last_decoded_length) const {
+size_t PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
+ size_t last_decoded_length) const {
PacketList::const_iterator it;
- int num_samples = 0;
- int last_duration = last_decoded_length;
+ size_t num_samples = 0;
+ size_t last_duration = last_decoded_length;
for (it = buffer_.begin(); it != buffer_.end(); ++it) {
Packet* packet = (*it);
AudioDecoder* decoder =
@@ -255,7 +258,7 @@ int PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
continue;
}
int duration =
- decoder->PacketDuration(packet->payload, packet->payload_length);
+ decoder->PacketDuration(packet->payload, packet->payload_length);
if (duration >= 0) {
last_duration = duration; // Save the most up-to-date (valid) duration.
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
index d2d429b9e5c..03c11e61b6e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
@@ -88,7 +88,7 @@ class PacketBuffer {
// Subsequent packets with the same timestamp as the one extracted will be
// discarded and properly deleted. The number of discarded packets will be
// written to the output variable |discard_count|.
- virtual Packet* GetNextPacket(int* discard_count);
+ virtual Packet* GetNextPacket(size_t* discard_count);
// Discards the first packet in the buffer. The packet is deleted.
// Returns PacketBuffer::kBufferEmpty if the buffer is empty,
@@ -109,12 +109,12 @@ class PacketBuffer {
// Returns the number of packets in the buffer, including duplicates and
// redundant packets.
- virtual int NumPacketsInBuffer() const;
+ virtual size_t NumPacketsInBuffer() const;
// Returns the number of samples in the buffer, including samples carried in
// duplicate and redundant packets.
- virtual int NumSamplesInBuffer(DecoderDatabase* decoder_database,
- int last_decoded_length) const;
+ virtual size_t NumSamplesInBuffer(DecoderDatabase* decoder_database,
+ size_t last_decoded_length) const;
// Increase the waiting time counter for every packet in the buffer by |inc|.
// The default value for |inc| is 1.
@@ -148,7 +148,7 @@ class PacketBuffer {
private:
size_t max_number_of_packets_;
PacketList buffer_;
- DISALLOW_COPY_AND_ASSIGN(PacketBuffer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PacketBuffer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
index 61a8ee1215f..435b6c848dc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -97,7 +97,7 @@ TEST(PacketBuffer, InsertPacket) {
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
EXPECT_EQ(4711u, next_ts);
EXPECT_FALSE(buffer.Empty());
- EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
const RTPHeader* hdr = buffer.NextRtpHeader();
EXPECT_EQ(&(packet->header), hdr); // Compare pointer addresses.
@@ -116,12 +116,12 @@ TEST(PacketBuffer, FlushBuffer) {
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
EXPECT_FALSE(buffer.Empty());
buffer.Flush();
// Buffer should delete the payloads itself.
- EXPECT_EQ(0, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(0u, buffer.NumPacketsInBuffer());
EXPECT_TRUE(buffer.Empty());
}
@@ -137,7 +137,7 @@ TEST(PacketBuffer, OverfillBuffer) {
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kOK, buffer.InsertPacket(packet));
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
uint32_t next_ts;
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
EXPECT_EQ(0u, next_ts); // Expect first inserted packet to be first in line.
@@ -145,7 +145,7 @@ TEST(PacketBuffer, OverfillBuffer) {
// Insert 11th packet; should flush the buffer and insert it after flushing.
Packet* packet = gen.NextPacket(payload_len);
EXPECT_EQ(PacketBuffer::kFlushed, buffer.InsertPacket(packet));
- EXPECT_EQ(1, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer());
EXPECT_EQ(PacketBuffer::kOK, buffer.NextTimestamp(&next_ts));
// Expect last inserted packet to be first in line.
EXPECT_EQ(packet->header.timestamp, next_ts);
@@ -179,7 +179,7 @@ TEST(PacketBuffer, InsertPacketList) {
&current_pt,
&current_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
EXPECT_EQ(0, current_pt); // Current payload type changed to 0.
EXPECT_EQ(0xFF, current_cng_pt); // CNG payload type not changed.
@@ -220,7 +220,7 @@ TEST(PacketBuffer, InsertPacketListChangePayloadType) {
&current_pt,
&current_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(1, buffer.NumPacketsInBuffer()); // Only the last packet.
+ EXPECT_EQ(1u, buffer.NumPacketsInBuffer()); // Only the last packet.
EXPECT_EQ(1, current_pt); // Current payload type changed to 0.
EXPECT_EQ(0xFF, current_cng_pt); // CNG payload type not changed.
@@ -256,7 +256,7 @@ TEST(PacketBuffer, ExtractOrderRedundancy) {
{0x0006, 0x0000001E, 1, false, -1},
};
- const int kExpectPacketsInBuffer = 9;
+ const size_t kExpectPacketsInBuffer = 9;
std::vector<Packet*> expect_order(kExpectPacketsInBuffer);
@@ -277,10 +277,10 @@ TEST(PacketBuffer, ExtractOrderRedundancy) {
EXPECT_EQ(kExpectPacketsInBuffer, buffer.NumPacketsInBuffer());
- int drop_count;
- for (int i = 0; i < kExpectPacketsInBuffer; ++i) {
+ size_t drop_count;
+ for (size_t i = 0; i < kExpectPacketsInBuffer; ++i) {
Packet* packet = buffer.GetNextPacket(&drop_count);
- EXPECT_EQ(0, drop_count);
+ EXPECT_EQ(0u, drop_count);
EXPECT_EQ(packet, expect_order[i]); // Compare pointer addresses.
delete[] packet->payload;
delete packet;
@@ -302,7 +302,7 @@ TEST(PacketBuffer, DiscardPackets) {
Packet* packet = gen.NextPacket(payload_len);
buffer.InsertPacket(packet);
}
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
// Discard them one by one and make sure that the right packets are at the
// front of the buffer.
@@ -350,7 +350,7 @@ TEST(PacketBuffer, Reordering) {
decoder_database,
&current_pt,
&current_cng_pt));
- EXPECT_EQ(10, buffer.NumPacketsInBuffer());
+ EXPECT_EQ(10u, buffer.NumPacketsInBuffer());
// Extract them and make sure that come out in the right order.
uint32_t current_ts = start_ts;
@@ -425,7 +425,7 @@ TEST(PacketBuffer, Failures) {
&current_pt,
&current_cng_pt));
EXPECT_TRUE(list.empty()); // The PacketBuffer should have depleted the list.
- EXPECT_EQ(1, buffer->NumPacketsInBuffer());
+ EXPECT_EQ(1u, buffer->NumPacketsInBuffer());
delete buffer;
EXPECT_CALL(decoder_database, Die()); // Called when object is deleted.
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
index c19375b726b..2172eee1ec5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
@@ -12,6 +12,7 @@
#include <assert.h>
+#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
namespace webrtc {
@@ -88,6 +89,7 @@ int PayloadSplitter::SplitRed(PacketList* packet_list) {
// The block lengths in the RED headers do not match the overall packet
// length. Something is corrupt. Discard this and the remaining
// payloads from this packet.
+ LOG(LS_WARNING) << "SplitRed length mismatch";
while (new_it != new_packets.end()) {
// Payload should not have been allocated yet.
assert(!(*new_it)->payload);
@@ -130,6 +132,7 @@ int PayloadSplitter::SplitFec(PacketList* packet_list,
const DecoderDatabase::DecoderInfo* info =
decoder_database->GetDecoderInfo(payload_type);
if (!info) {
+ LOG(LS_WARNING) << "SplitFec unknown payload type";
return kUnknownPayloadType;
}
// No splitting for a sync-packet.
@@ -171,6 +174,7 @@ int PayloadSplitter::SplitFec(PacketList* packet_list,
break;
}
default: {
+ LOG(LS_WARNING) << "SplitFec wrong payload type";
return kFecSplitError;
}
}
@@ -222,6 +226,7 @@ int PayloadSplitter::SplitAudio(PacketList* packet_list,
const DecoderDatabase::DecoderInfo* info =
decoder_database.GetDecoderInfo(packet->header.payloadType);
if (!info) {
+ LOG(LS_WARNING) << "SplitAudio unknown payload type";
return kUnknownPayloadType;
}
// No splitting for a sync-packet.
@@ -297,6 +302,7 @@ int PayloadSplitter::SplitAudio(PacketList* packet_list,
size_t bytes_per_frame;
int timestamps_per_frame;
if (packet->payload_length >= 950) {
+ LOG(LS_WARNING) << "SplitAudio too large iLBC payload";
return kTooLargePayload;
}
if (packet->payload_length % 38 == 0) {
@@ -308,6 +314,7 @@ int PayloadSplitter::SplitAudio(PacketList* packet_list,
bytes_per_frame = 50;
timestamps_per_frame = 240;
} else {
+ LOG(LS_WARNING) << "SplitAudio invalid iLBC payload";
return kFrameSplitError;
}
int ret = SplitByFrames(packet, bytes_per_frame, timestamps_per_frame,
@@ -402,6 +409,7 @@ int PayloadSplitter::SplitByFrames(const Packet* packet,
uint32_t timestamps_per_frame,
PacketList* new_packets) {
if (packet->payload_length % bytes_per_frame != 0) {
+ LOG(LS_WARNING) << "SplitByFrames length mismatch";
return kFrameSplitError;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.h
index 6023d4e007f..b0c4b5fe5cd 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.h
@@ -83,7 +83,7 @@ class PayloadSplitter {
uint32_t timestamps_per_frame,
PacketList* new_packets);
- DISALLOW_COPY_AND_ASSIGN(PayloadSplitter);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PayloadSplitter);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
index 07496730cd0..714073ad104 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.cc
@@ -45,7 +45,7 @@ void PostDecodeVad::Init() {
}
}
-void PostDecodeVad::Update(int16_t* signal, int length,
+void PostDecodeVad::Update(int16_t* signal, size_t length,
AudioDecoder::SpeechType speech_type,
bool sid_frame,
int fs_hz) {
@@ -68,12 +68,13 @@ void PostDecodeVad::Update(int16_t* signal, int length,
}
if (length > 0 && running_) {
- int vad_sample_index = 0;
+ size_t vad_sample_index = 0;
active_speech_ = false;
// Loop through frame sizes 30, 20, and 10 ms.
for (int vad_frame_size_ms = 30; vad_frame_size_ms >= 10;
vad_frame_size_ms -= 10) {
- int vad_frame_size_samples = vad_frame_size_ms * fs_hz / 1000;
+ size_t vad_frame_size_samples =
+ static_cast<size_t>(vad_frame_size_ms * fs_hz / 1000);
while (length - vad_sample_index >= vad_frame_size_samples) {
int vad_return = WebRtcVad_Process(
vad_instance_, fs_hz, &signal[vad_sample_index],
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.h
index fa276aa41be..7bf5ad13834 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/post_decode_vad.h
@@ -46,7 +46,7 @@ class PostDecodeVad {
// Updates post-decode VAD with the audio data in |signal| having |length|
// samples. The data is of type |speech_type|, at the sample rate |fs_hz|.
- void Update(int16_t* signal, int length,
+ void Update(int16_t* signal, size_t length,
AudioDecoder::SpeechType speech_type, bool sid_frame, int fs_hz);
// Accessors.
@@ -65,7 +65,7 @@ class PostDecodeVad {
int sid_interval_counter_;
::VadInst* vad_instance_;
- DISALLOW_COPY_AND_ASSIGN(PostDecodeVad);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PostDecodeVad);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
index 6a3f8ecf1aa..f51a5bdbc51 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.cc
@@ -18,14 +18,14 @@ namespace webrtc {
PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
const int16_t* input,
- int input_length,
- int old_data_length,
+ size_t input_length,
+ size_t old_data_length,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
old_data_length_per_channel_ = old_data_length;
// Input length must be (almost) 30 ms.
// Also, the new part must be at least |overlap_samples_| elements.
- static const int k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
+ static const size_t k15ms = 120; // 15 ms = 120 samples at 8 kHz sample rate.
if (num_channels_ == 0 ||
input_length / num_channels_ < (2 * k15ms - 1) * fs_mult_ ||
old_data_length >= input_length / num_channels_ - overlap_samples_) {
@@ -41,7 +41,7 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::Process(
void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
int16_t* best_correlation,
- int* peak_index) const {
+ size_t* peak_index) const {
// When the signal does not contain any active speech, the correlation does
// not matter. Simply set it to zero.
*best_correlation = 0;
@@ -51,7 +51,7 @@ void PreemptiveExpand::SetParametersForPassiveSpeech(size_t len,
// the new data.
// but we must ensure that best_correlation is not larger than the new data.
*peak_index = std::min(*peak_index,
- static_cast<int>(len - old_data_length_per_channel_));
+ len - old_data_length_per_channel_);
}
PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
@@ -64,8 +64,7 @@ PreemptiveExpand::ReturnCodes PreemptiveExpand::CheckCriteriaAndStretch(
AudioMultiVector* output) const {
// Pre-calculate common multiplication with |fs_mult_|.
// 120 corresponds to 15 ms.
- int fs_mult_120 = fs_mult_ * 120;
- assert(old_data_length_per_channel_ >= 0); // Make sure it's been set.
+ size_t fs_mult_120 = static_cast<size_t>(fs_mult_ * 120);
// Check for strong correlation (>0.9 in Q14) and at least 15 ms new data,
// or passive speech.
if (((best_correlation > kCorrelationThreshold) &&
@@ -107,7 +106,7 @@ PreemptiveExpand* PreemptiveExpandFactory::Create(
int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples) const {
+ size_t overlap_samples) const {
return new PreemptiveExpand(
sample_rate_hz, num_channels, background_noise, overlap_samples);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.h
index 65da7032b82..c4c236080c0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/preemptive_expand.h
@@ -32,9 +32,9 @@ class PreemptiveExpand : public TimeStretch {
PreemptiveExpand(int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples)
+ size_t overlap_samples)
: TimeStretch(sample_rate_hz, num_channels, background_noise),
- old_data_length_per_channel_(-1),
+ old_data_length_per_channel_(0),
overlap_samples_(overlap_samples) {
}
@@ -44,17 +44,17 @@ class PreemptiveExpand : public TimeStretch {
// is provided in the output |length_change_samples|. The method returns
// the outcome of the operation as an enumerator value.
ReturnCodes Process(const int16_t *pw16_decoded,
- int len,
- int old_data_len,
+ size_t len,
+ size_t old_data_len,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
// values when the signal contains no active speech.
void SetParametersForPassiveSpeech(size_t input_length,
int16_t* best_correlation,
- int* peak_index) const override;
+ size_t* peak_index) const override;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching.
@@ -67,10 +67,10 @@ class PreemptiveExpand : public TimeStretch {
AudioMultiVector* output) const override;
private:
- int old_data_length_per_channel_;
- int overlap_samples_;
+ size_t old_data_length_per_channel_;
+ size_t overlap_samples_;
- DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PreemptiveExpand);
};
struct PreemptiveExpandFactory {
@@ -81,7 +81,7 @@ struct PreemptiveExpandFactory {
int sample_rate_hz,
size_t num_channels,
const BackgroundNoise& background_noise,
- int overlap_samples) const;
+ size_t overlap_samples) const;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/random_vector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/random_vector.h
index 767dc48eee3..61651e57c22 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/random_vector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/random_vector.h
@@ -21,7 +21,7 @@ namespace webrtc {
// This class generates pseudo-random samples.
class RandomVector {
public:
- static const int kRandomTableSize = 256;
+ static const size_t kRandomTableSize = 256;
static const int16_t kRandomTable[kRandomTableSize];
RandomVector()
@@ -43,7 +43,7 @@ class RandomVector {
uint32_t seed_;
int16_t seed_increment_;
- DISALLOW_COPY_AND_ASSIGN(RandomVector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RandomVector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/rtcp.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/rtcp.h
index 2a765efa588..5235397ba39 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/rtcp.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/rtcp.h
@@ -51,7 +51,7 @@ class Rtcp {
uint32_t jitter_; // Current jitter value.
int32_t transit_; // Clock difference for previous packet.
- DISALLOW_COPY_AND_ASSIGN(Rtcp);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Rtcp);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index ce800dd065d..78c5e252582 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -12,12 +12,97 @@
#include <assert.h>
#include <string.h> // memset
+#include <algorithm>
+#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/audio_coding/neteq/decision_logic.h"
#include "webrtc/modules/audio_coding/neteq/delay_manager.h"
+#include "webrtc/system_wrappers/interface/metrics.h"
namespace webrtc {
+// Allocating the static const so that it can be passed by reference to
+// RTC_DCHECK.
+const size_t StatisticsCalculator::kLenWaitingTimes;
+
+StatisticsCalculator::PeriodicUmaLogger::PeriodicUmaLogger(
+ const std::string& uma_name,
+ int report_interval_ms,
+ int max_value)
+ : uma_name_(uma_name),
+ report_interval_ms_(report_interval_ms),
+ max_value_(max_value),
+ timer_(0) {
+}
+
+StatisticsCalculator::PeriodicUmaLogger::~PeriodicUmaLogger() = default;
+
+void StatisticsCalculator::PeriodicUmaLogger::AdvanceClock(int step_ms) {
+ timer_ += step_ms;
+ if (timer_ < report_interval_ms_) {
+ return;
+ }
+ LogToUma(Metric());
+ Reset();
+ timer_ -= report_interval_ms_;
+ RTC_DCHECK_GE(timer_, 0);
+}
+
+void StatisticsCalculator::PeriodicUmaLogger::LogToUma(int value) const {
+ RTC_HISTOGRAM_COUNTS(uma_name_, value, 1, max_value_, 50);
+}
+
+StatisticsCalculator::PeriodicUmaCount::PeriodicUmaCount(
+ const std::string& uma_name,
+ int report_interval_ms,
+ int max_value)
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
+}
+
+StatisticsCalculator::PeriodicUmaCount::~PeriodicUmaCount() {
+ // Log the count for the current (incomplete) interval.
+ LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaCount::RegisterSample() {
+ ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaCount::Metric() const {
+ return counter_;
+}
+
+void StatisticsCalculator::PeriodicUmaCount::Reset() {
+ counter_ = 0;
+}
+
+StatisticsCalculator::PeriodicUmaAverage::PeriodicUmaAverage(
+ const std::string& uma_name,
+ int report_interval_ms,
+ int max_value)
+ : PeriodicUmaLogger(uma_name, report_interval_ms, max_value) {
+}
+
+StatisticsCalculator::PeriodicUmaAverage::~PeriodicUmaAverage() {
+ // Log the average for the current (incomplete) interval.
+ LogToUma(Metric());
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::RegisterSample(int value) {
+ sum_ += value;
+ ++counter_;
+}
+
+int StatisticsCalculator::PeriodicUmaAverage::Metric() const {
+ return static_cast<int>(sum_ / counter_);
+}
+
+void StatisticsCalculator::PeriodicUmaAverage::Reset() {
+ sum_ = 0.0;
+ counter_ = 0;
+}
+
StatisticsCalculator::StatisticsCalculator()
: preemptive_samples_(0),
accelerate_samples_(0),
@@ -27,12 +112,18 @@ StatisticsCalculator::StatisticsCalculator()
discarded_packets_(0),
lost_timestamps_(0),
timestamps_since_last_report_(0),
- len_waiting_times_(0),
- next_waiting_time_index_(0),
- secondary_decoded_samples_(0) {
- memset(waiting_times_, 0, kLenWaitingTimes * sizeof(waiting_times_[0]));
+ secondary_decoded_samples_(0),
+ delayed_packet_outage_counter_(
+ "WebRTC.Audio.DelayedPacketOutageEventsPerMinute",
+ 60000, // 60 seconds report interval.
+ 100),
+ excess_buffer_delay_("WebRTC.Audio.AverageExcessBufferDelayMs",
+ 60000, // 60 seconds report interval.
+ 1000) {
}
+StatisticsCalculator::~StatisticsCalculator() = default;
+
void StatisticsCalculator::Reset() {
preemptive_samples_ = 0;
accelerate_samples_ = 0;
@@ -40,6 +131,7 @@ void StatisticsCalculator::Reset() {
expanded_speech_samples_ = 0;
expanded_noise_samples_ = 0;
secondary_decoded_samples_ = 0;
+ waiting_times_.clear();
}
void StatisticsCalculator::ResetMcu() {
@@ -48,41 +140,39 @@ void StatisticsCalculator::ResetMcu() {
timestamps_since_last_report_ = 0;
}
-void StatisticsCalculator::ResetWaitingTimeStatistics() {
- memset(waiting_times_, 0, kLenWaitingTimes * sizeof(waiting_times_[0]));
- len_waiting_times_ = 0;
- next_waiting_time_index_ = 0;
-}
-
-void StatisticsCalculator::ExpandedVoiceSamples(int num_samples) {
+void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples) {
expanded_speech_samples_ += num_samples;
}
-void StatisticsCalculator::ExpandedNoiseSamples(int num_samples) {
+void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples) {
expanded_noise_samples_ += num_samples;
}
-void StatisticsCalculator::PreemptiveExpandedSamples(int num_samples) {
+void StatisticsCalculator::PreemptiveExpandedSamples(size_t num_samples) {
preemptive_samples_ += num_samples;
}
-void StatisticsCalculator::AcceleratedSamples(int num_samples) {
+void StatisticsCalculator::AcceleratedSamples(size_t num_samples) {
accelerate_samples_ += num_samples;
}
-void StatisticsCalculator::AddZeros(int num_samples) {
+void StatisticsCalculator::AddZeros(size_t num_samples) {
added_zero_samples_ += num_samples;
}
-void StatisticsCalculator::PacketsDiscarded(int num_packets) {
+void StatisticsCalculator::PacketsDiscarded(size_t num_packets) {
discarded_packets_ += num_packets;
}
-void StatisticsCalculator::LostSamples(int num_samples) {
+void StatisticsCalculator::LostSamples(size_t num_samples) {
lost_timestamps_ += num_samples;
}
-void StatisticsCalculator::IncreaseCounter(int num_samples, int fs_hz) {
+void StatisticsCalculator::IncreaseCounter(size_t num_samples, int fs_hz) {
+ const int time_step_ms =
+ rtc::CheckedDivExact(static_cast<int>(1000 * num_samples), fs_hz);
+ delayed_packet_outage_counter_.AdvanceClock(time_step_ms);
+ excess_buffer_delay_.AdvanceClock(time_step_ms);
timestamps_since_last_report_ += static_cast<uint32_t>(num_samples);
if (timestamps_since_last_report_ >
static_cast<uint32_t>(fs_hz * kMaxReportPeriod)) {
@@ -96,22 +186,27 @@ void StatisticsCalculator::SecondaryDecodedSamples(int num_samples) {
secondary_decoded_samples_ += num_samples;
}
+void StatisticsCalculator::LogDelayedPacketOutageEvent(int outage_duration_ms) {
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
+ outage_duration_ms, 1 /* min */, 2000 /* max */,
+ 100 /* bucket count */);
+ delayed_packet_outage_counter_.RegisterSample();
+}
+
void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
- assert(next_waiting_time_index_ < kLenWaitingTimes);
- waiting_times_[next_waiting_time_index_] = waiting_time_ms;
- next_waiting_time_index_++;
- if (next_waiting_time_index_ >= kLenWaitingTimes) {
- next_waiting_time_index_ = 0;
- }
- if (len_waiting_times_ < kLenWaitingTimes) {
- len_waiting_times_++;
+ excess_buffer_delay_.RegisterSample(waiting_time_ms);
+ RTC_DCHECK_LE(waiting_times_.size(), kLenWaitingTimes);
+ if (waiting_times_.size() == kLenWaitingTimes) {
+ // Erase first value.
+ waiting_times_.pop_front();
}
+ waiting_times_.push_back(waiting_time_ms);
}
void StatisticsCalculator::GetNetworkStatistics(
int fs_hz,
- int num_samples_in_buffers,
- int samples_per_packet,
+ size_t num_samples_in_buffers,
+ size_t samples_per_packet,
const DelayManager& delay_manager,
const DecisionLogic& decision_logic,
NetEqNetworkStatistics *stats) {
@@ -123,8 +218,8 @@ void StatisticsCalculator::GetNetworkStatistics(
stats->added_zero_samples = added_zero_samples_;
stats->current_buffer_size_ms =
static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
- const int ms_per_packet = decision_logic.packet_length_samples() /
- (fs_hz / 1000);
+ const int ms_per_packet = rtc::checked_cast<int>(
+ decision_logic.packet_length_samples() / (fs_hz / 1000));
stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
ms_per_packet;
stats->jitter_peaks_found = delay_manager.PeakFound();
@@ -133,7 +228,7 @@ void StatisticsCalculator::GetNetworkStatistics(
stats->packet_loss_rate =
CalculateQ14Ratio(lost_timestamps_, timestamps_since_last_report_);
- const unsigned discarded_samples = discarded_packets_ * samples_per_packet;
+ const size_t discarded_samples = discarded_packets_ * samples_per_packet;
stats->packet_discard_rate =
CalculateQ14Ratio(discarded_samples, timestamps_since_last_report_);
@@ -155,20 +250,36 @@ void StatisticsCalculator::GetNetworkStatistics(
CalculateQ14Ratio(secondary_decoded_samples_,
timestamps_since_last_report_);
+ if (waiting_times_.size() == 0) {
+ stats->mean_waiting_time_ms = -1;
+ stats->median_waiting_time_ms = -1;
+ stats->min_waiting_time_ms = -1;
+ stats->max_waiting_time_ms = -1;
+ } else {
+ std::sort(waiting_times_.begin(), waiting_times_.end());
+ // Find mid-point elements. If the size is odd, the two values
+ // |middle_left| and |middle_right| will both be the one middle element; if
+ // the size is even, they will be the the two neighboring elements at the
+ // middle of the list.
+ const int middle_left = waiting_times_[(waiting_times_.size() - 1) / 2];
+ const int middle_right = waiting_times_[waiting_times_.size() / 2];
+ // Calculate the average of the two. (Works also for odd sizes.)
+ stats->median_waiting_time_ms = (middle_left + middle_right) / 2;
+ stats->min_waiting_time_ms = waiting_times_.front();
+ stats->max_waiting_time_ms = waiting_times_.back();
+ double sum = 0;
+ for (auto time : waiting_times_) {
+ sum += time;
+ }
+ stats->mean_waiting_time_ms = static_cast<int>(sum / waiting_times_.size());
+ }
+
// Reset counters.
ResetMcu();
Reset();
}
-void StatisticsCalculator::WaitingTimes(std::vector<int>* waiting_times) {
- if (!waiting_times) {
- return;
- }
- waiting_times->assign(waiting_times_, waiting_times_ + len_waiting_times_);
- ResetWaitingTimeStatistics();
-}
-
-uint16_t StatisticsCalculator::CalculateQ14Ratio(uint32_t numerator,
+uint16_t StatisticsCalculator::CalculateQ14Ratio(size_t numerator,
uint32_t denominator) {
if (numerator == 0) {
return 0;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.h
index a2cd9be6edc..08235df4292 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/statistics_calculator.h
@@ -11,7 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_STATISTICS_CALCULATOR_H_
-#include <vector>
+#include <deque>
+#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
@@ -28,7 +29,7 @@ class StatisticsCalculator {
public:
StatisticsCalculator();
- virtual ~StatisticsCalculator() {}
+ virtual ~StatisticsCalculator();
// Resets most of the counters.
void Reset();
@@ -36,36 +37,34 @@ class StatisticsCalculator {
// Resets the counters that are not handled by Reset().
void ResetMcu();
- // Resets the waiting time statistics.
- void ResetWaitingTimeStatistics();
-
// Reports that |num_samples| samples were produced through expansion, and
// that the expansion produced other than just noise samples.
- void ExpandedVoiceSamples(int num_samples);
+ void ExpandedVoiceSamples(size_t num_samples);
// Reports that |num_samples| samples were produced through expansion, and
// that the expansion produced only noise samples.
- void ExpandedNoiseSamples(int num_samples);
+ void ExpandedNoiseSamples(size_t num_samples);
// Reports that |num_samples| samples were produced through preemptive
// expansion.
- void PreemptiveExpandedSamples(int num_samples);
+ void PreemptiveExpandedSamples(size_t num_samples);
// Reports that |num_samples| samples were removed through accelerate.
- void AcceleratedSamples(int num_samples);
+ void AcceleratedSamples(size_t num_samples);
// Reports that |num_samples| zeros were inserted into the output.
- void AddZeros(int num_samples);
+ void AddZeros(size_t num_samples);
// Reports that |num_packets| packets were discarded.
- void PacketsDiscarded(int num_packets);
+ void PacketsDiscarded(size_t num_packets);
// Reports that |num_samples| were lost.
- void LostSamples(int num_samples);
+ void LostSamples(size_t num_samples);
// Increases the report interval counter with |num_samples| at a sample rate
- // of |fs_hz|.
- void IncreaseCounter(int num_samples, int fs_hz);
+ // of |fs_hz|. This is how the StatisticsCalculator gets notified that current
+ // time is increasing.
+ void IncreaseCounter(size_t num_samples, int fs_hz);
// Stores new packet waiting time in waiting time statistics.
void StoreWaitingTime(int waiting_time_ms);
@@ -73,40 +72,95 @@ class StatisticsCalculator {
// Reports that |num_samples| samples were decoded from secondary packets.
void SecondaryDecodedSamples(int num_samples);
+ // Logs a delayed packet outage event of |outage_duration_ms|. A delayed
+ // packet outage event is defined as an expand period caused not by an actual
+ // packet loss, but by a delayed packet.
+ virtual void LogDelayedPacketOutageEvent(int outage_duration_ms);
+
// Returns the current network statistics in |stats|. The current sample rate
// is |fs_hz|, the total number of samples in packet buffer and sync buffer
// yet to play out is |num_samples_in_buffers|, and the number of samples per
// packet is |samples_per_packet|.
void GetNetworkStatistics(int fs_hz,
- int num_samples_in_buffers,
- int samples_per_packet,
+ size_t num_samples_in_buffers,
+ size_t samples_per_packet,
const DelayManager& delay_manager,
const DecisionLogic& decision_logic,
NetEqNetworkStatistics *stats);
- void WaitingTimes(std::vector<int>* waiting_times);
-
private:
static const int kMaxReportPeriod = 60; // Seconds before auto-reset.
- static const int kLenWaitingTimes = 100;
+ static const size_t kLenWaitingTimes = 100;
+
+ class PeriodicUmaLogger {
+ public:
+ PeriodicUmaLogger(const std::string& uma_name,
+ int report_interval_ms,
+ int max_value);
+ virtual ~PeriodicUmaLogger();
+ void AdvanceClock(int step_ms);
+
+ protected:
+ void LogToUma(int value) const;
+ virtual int Metric() const = 0;
+ virtual void Reset() = 0;
+
+ const std::string uma_name_;
+ const int report_interval_ms_;
+ const int max_value_;
+ int timer_ = 0;
+ };
+
+ class PeriodicUmaCount final : public PeriodicUmaLogger {
+ public:
+ PeriodicUmaCount(const std::string& uma_name,
+ int report_interval_ms,
+ int max_value);
+ ~PeriodicUmaCount() override;
+ void RegisterSample();
+
+ protected:
+ int Metric() const override;
+ void Reset() override;
+
+ private:
+ int counter_ = 0;
+ };
+
+ class PeriodicUmaAverage final : public PeriodicUmaLogger {
+ public:
+ PeriodicUmaAverage(const std::string& uma_name,
+ int report_interval_ms,
+ int max_value);
+ ~PeriodicUmaAverage() override;
+ void RegisterSample(int value);
+
+ protected:
+ int Metric() const override;
+ void Reset() override;
+
+ private:
+ double sum_ = 0.0;
+ int counter_ = 0;
+ };
// Calculates numerator / denominator, and returns the value in Q14.
- static uint16_t CalculateQ14Ratio(uint32_t numerator, uint32_t denominator);
-
- uint32_t preemptive_samples_;
- uint32_t accelerate_samples_;
- int added_zero_samples_;
- uint32_t expanded_speech_samples_;
- uint32_t expanded_noise_samples_;
- int discarded_packets_;
- uint32_t lost_timestamps_;
+ static uint16_t CalculateQ14Ratio(size_t numerator, uint32_t denominator);
+
+ size_t preemptive_samples_;
+ size_t accelerate_samples_;
+ size_t added_zero_samples_;
+ size_t expanded_speech_samples_;
+ size_t expanded_noise_samples_;
+ size_t discarded_packets_;
+ size_t lost_timestamps_;
uint32_t timestamps_since_last_report_;
- int waiting_times_[kLenWaitingTimes]; // Used as a circular buffer.
- int len_waiting_times_;
- int next_waiting_time_index_;
+ std::deque<int> waiting_times_;
uint32_t secondary_decoded_samples_;
+ PeriodicUmaCount delayed_packet_outage_counter_;
+ PeriodicUmaAverage excess_buffer_delay_;
- DISALLOW_COPY_AND_ASSIGN(StatisticsCalculator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(StatisticsCalculator);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/sync_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/sync_buffer.h
index bbb494e9964..38e7887794f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/sync_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/sync_buffer.h
@@ -92,7 +92,7 @@ class SyncBuffer : public AudioMultiVector {
uint32_t end_timestamp_; // The timestamp of the last sample in the buffer.
size_t dtmf_index_; // Index to the first non-DTMF sample in the buffer.
- DISALLOW_COPY_AND_ASSIGN(SyncBuffer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SyncBuffer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index 1aacb401b76..b2df07aa4e9 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -23,6 +23,8 @@
#include <assert.h>
+#include <algorithm>
+
#include "webrtc/typedefs.h"
// needed for NetEqDecoder
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
@@ -76,27 +78,27 @@
void NetEQTest_GetCodec_and_PT(char* name,
webrtc::NetEqDecoder* codec,
int* PT,
- int frameLen,
+ size_t frameLen,
int* fs,
int* bitrate,
int* useRed);
int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
- int enc_frameSize,
+ size_t enc_frameSize,
int bitrate,
int sampfreq,
int vad,
- int numChannels);
+ size_t numChannels);
void defineCodecs(webrtc::NetEqDecoder* usedCodec, int* noOfCodecs);
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels);
-int NetEQTest_encode(int coder,
- int16_t* indata,
- int frameLen,
- unsigned char* encoded,
- int sampleRate,
- int* vad,
- int useVAD,
- int bitrate,
- int numChannels);
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels);
+size_t NetEQTest_encode(int coder,
+ int16_t* indata,
+ size_t frameLen,
+ unsigned char* encoded,
+ int sampleRate,
+ int* vad,
+ int useVAD,
+ int bitrate,
+ size_t numChannels);
void makeRTPheader(unsigned char* rtp_data,
int payloadType,
int seqNo,
@@ -109,13 +111,13 @@ int makeRedundantHeader(unsigned char* rtp_data,
uint16_t* blockLen,
int seqNo,
uint32_t ssrc);
-int makeDTMFpayload(unsigned char* payload_data,
- int Event,
- int End,
- int Volume,
- int Duration);
-void stereoDeInterleave(int16_t* audioSamples, int numSamples);
-void stereoInterleave(unsigned char* data, int dataLen, int stride);
+size_t makeDTMFpayload(unsigned char* payload_data,
+ int Event,
+ int End,
+ int Volume,
+ int Duration);
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples);
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride);
/*********************/
/* Codec definitions */
@@ -264,13 +266,14 @@ SPEEX_encinst_t* SPEEX16enc_inst[2];
#endif
int main(int argc, char* argv[]) {
- int packet_size, fs;
+ size_t packet_size;
+ int fs;
webrtc::NetEqDecoder usedCodec;
int payloadType;
int bitrate = 0;
int useVAD, vad;
int useRed = 0;
- int len, enc_len;
+ size_t len, enc_len;
int16_t org_data[4000];
unsigned char rtp_data[8000];
int16_t seqNo = 0xFFF;
@@ -282,14 +285,14 @@ int main(int argc, char* argv[]) {
int red_PT[2] = {0};
uint32_t red_TS[2] = {0};
uint16_t red_len[2] = {0};
- int RTPheaderLen = 12;
+ size_t RTPheaderLen = 12;
uint8_t red_data[8000];
#ifdef INSERT_OLD_PACKETS
uint16_t old_length, old_plen;
- int old_enc_len;
+ size_t old_enc_len;
int first_old_packet = 1;
unsigned char old_rtp_data[8000];
- int packet_age = 0;
+ size_t packet_age = 0;
#endif
#ifdef INSERT_DTMF_PACKETS
int NTone = 1;
@@ -298,8 +301,8 @@ int main(int argc, char* argv[]) {
bool dtmfSent = false;
#endif
bool usingStereo = false;
- int stereoMode = 0;
- int numChannels = 1;
+ size_t stereoMode = 0;
+ size_t numChannels = 1;
/* check number of parameters */
if ((argc != 6) && (argc != 7)) {
@@ -449,12 +452,13 @@ int main(int argc, char* argv[]) {
FILE* out_file = fopen(argv[2], "wb");
CHECK_NOT_NULL(out_file);
printf("Output file: %s\n\n", argv[2]);
- packet_size = atoi(argv[3]);
- if (packet_size <= 0) {
- printf("Packet size %d must be positive", packet_size);
+ int packet_size_int = atoi(argv[3]);
+ if (packet_size_int <= 0) {
+ printf("Packet size %d must be positive", packet_size_int);
return -1;
}
- printf("Packet size: %i\n", packet_size);
+ printf("Packet size: %d\n", packet_size_int);
+ packet_size = static_cast<size_t>(packet_size_int);
// check for stereo
if (argv[4][strlen(argv[4]) - 1] == '*') {
@@ -653,10 +657,6 @@ int main(int argc, char* argv[]) {
enc_len =
NetEQTest_encode(usedCodec, org_data, packet_size, &rtp_data[12], fs,
&vad, useVAD, bitrate, numChannels);
- if (enc_len == -1) {
- printf("Error encoding frame\n");
- exit(0);
- }
if (usingStereo && stereoMode != STEREO_MODE_FRAME && vad == 1) {
// interleave the encoded payload for sample-based codecs (not for CNG)
@@ -729,12 +729,12 @@ int main(int argc, char* argv[]) {
return -1;
}
#ifdef RANDOM_DATA
- for (int k = 0; k < 12 + enc_len; k++) {
+ for (size_t k = 0; k < 12 + enc_len; k++) {
rtp_data[k] = rand() + rand();
}
#endif
#ifdef RANDOM_PAYLOAD_DATA
- for (int k = 12; k < 12 + enc_len; k++) {
+ for (size_t k = 12; k < 12 + enc_len; k++) {
rtp_data[k] = rand() + rand();
}
#endif
@@ -822,7 +822,7 @@ int main(int argc, char* argv[]) {
void NetEQTest_GetCodec_and_PT(char* name,
webrtc::NetEqDecoder* codec,
int* PT,
- int frameLen,
+ size_t frameLen,
int* fs,
int* bitrate,
int* useRed) {
@@ -887,14 +887,14 @@ void NetEQTest_GetCodec_and_PT(char* name,
}
int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
- int enc_frameSize,
+ size_t enc_frameSize,
int bitrate,
int sampfreq,
int vad,
- int numChannels) {
+ size_t numChannels) {
int ok = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
VAD_inst[k] = WebRtcVad_Create();
if (!VAD_inst[k]) {
printf("Error: Couldn't allocate memory for VAD instance\n");
@@ -962,7 +962,7 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
WebRtcG729_EncoderInit(G729enc_inst[k], vad);
if ((vad == 1) && (enc_frameSize != 80)) {
printf("\nError - This simulation only supports VAD for G729 at "
- "10ms packets (not %dms)\n", (enc_frameSize >> 3));
+ "10ms packets (not %" PRIuS "ms)\n", (enc_frameSize >> 3));
}
} else {
printf("\nError - g729 is only developed for 8kHz \n");
@@ -1018,7 +1018,7 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
}
if ((vad == 1) && (enc_frameSize != 160)) {
printf("\nError - This simulation only supports VAD for Speex at "
- "20ms packets (not %dms)\n",
+ "20ms packets (not %" PRIuS "ms)\n",
(enc_frameSize >> 3));
vad = 0;
}
@@ -1049,7 +1049,7 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
}
if ((vad == 1) && (enc_frameSize != 320)) {
printf("\nError - This simulation only supports VAD for Speex at "
- "20ms packets (not %dms)\n",
+ "20ms packets (not %" PRIuS "ms)\n",
(enc_frameSize >> 4));
vad = 0;
}
@@ -1238,8 +1238,7 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
"instance\n");
exit(0);
}
- if (((enc_frameSize / 320) < 0) || ((enc_frameSize / 320) > 3) ||
- ((enc_frameSize % 320) != 0)) {
+ if (((enc_frameSize / 320) > 3) || ((enc_frameSize % 320) != 0)) {
printf("\nError - AMRwb must have frameSize of 20, 40 or 60ms\n");
exit(0);
}
@@ -1320,7 +1319,8 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
bitrate);
exit(0);
}
- WebRtcIsac_Control(ISAC_inst[k], bitrate, enc_frameSize >> 4);
+ WebRtcIsac_Control(ISAC_inst[k], bitrate,
+ static_cast<int>(enc_frameSize >> 4));
} else {
printf("\nError - iSAC only supports 480 or 960 enc_frameSize (30 or "
"60 ms)\n");
@@ -1379,7 +1379,8 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
"56000 bps (not %i)\n", bitrate);
exit(0);
}
- WebRtcIsac_Control(ISACSWB_inst[k], bitrate, enc_frameSize >> 5);
+ WebRtcIsac_Control(ISACSWB_inst[k], bitrate,
+ static_cast<int>(enc_frameSize >> 5));
} else {
printf("\nError - iSAC SWB only supports 960 enc_frameSize (30 "
"ms)\n");
@@ -1424,8 +1425,8 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
return (0);
}
-int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels) {
- for (int k = 0; k < numChannels; k++) {
+int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels) {
+ for (size_t k = 0; k < numChannels; k++) {
WebRtcVad_Free(VAD_inst[k]);
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
@@ -1552,49 +1553,50 @@ int NetEQTest_free_coders(webrtc::NetEqDecoder coder, int numChannels) {
return (0);
}
-int NetEQTest_encode(int coder,
- int16_t* indata,
- int frameLen,
- unsigned char* encoded,
- int sampleRate,
- int* vad,
- int useVAD,
- int bitrate,
- int numChannels) {
- int cdlen = 0;
+size_t NetEQTest_encode(int coder,
+ int16_t* indata,
+ size_t frameLen,
+ unsigned char* encoded,
+ int sampleRate,
+ int* vad,
+ int useVAD,
+ int bitrate,
+ size_t numChannels) {
+ size_t cdlen = 0;
int16_t* tempdata;
static int first_cng = 1;
- int16_t tempLen;
-
+ size_t tempLen;
*vad = 1;
// check VAD first
if (useVAD) {
*vad = 0;
- for (int k = 0; k < numChannels; k++) {
+ size_t sampleRate_10 = static_cast<size_t>(10 * sampleRate / 1000);
+ size_t sampleRate_20 = static_cast<size_t>(20 * sampleRate / 1000);
+ size_t sampleRate_30 = static_cast<size_t>(30 * sampleRate / 1000);
+ for (size_t k = 0; k < numChannels; k++) {
tempLen = frameLen;
tempdata = &indata[k * frameLen];
int localVad = 0;
/* Partition the signal and test each chunk for VAD.
- All chunks must be VAD=0 to produce a total VAD=0. */
- while (tempLen >= 10 * sampleRate / 1000) {
- if ((tempLen % 30 * sampleRate / 1000) ==
- 0) { // tempLen is multiple of 30ms
+ All chunks must be VAD=0 to produce a total VAD=0. */
+ while (tempLen >= sampleRate_10) {
+ if ((tempLen % sampleRate_30) == 0) { // tempLen is multiple of 30ms
localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata,
- 30 * sampleRate / 1000);
- tempdata += 30 * sampleRate / 1000;
- tempLen -= 30 * sampleRate / 1000;
- } else if (tempLen >= 20 * sampleRate / 1000) { // tempLen >= 20ms
+ sampleRate_30);
+ tempdata += sampleRate_30;
+ tempLen -= sampleRate_30;
+ } else if (tempLen >= sampleRate_20) { // tempLen >= 20ms
localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata,
- 20 * sampleRate / 1000);
- tempdata += 20 * sampleRate / 1000;
- tempLen -= 20 * sampleRate / 1000;
+ sampleRate_20);
+ tempdata += sampleRate_20;
+ tempLen -= sampleRate_20;
} else { // use 10ms
localVad |= WebRtcVad_Process(VAD_inst[k], sampleRate, tempdata,
- 10 * sampleRate / 1000);
- tempdata += 10 * sampleRate / 1000;
- tempLen -= 10 * sampleRate / 1000;
+ sampleRate_10);
+ tempdata += sampleRate_10;
+ tempLen -= sampleRate_10;
}
}
@@ -1605,7 +1607,7 @@ int NetEQTest_encode(int coder,
if (!*vad) {
// all channels are silent
cdlen = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
WebRtcCng_Encode(CNGenc_inst[k], &indata[k * frameLen],
(frameLen <= 640 ? frameLen : 640) /* max 640 */,
encoded, &tempLen, first_cng);
@@ -1619,9 +1621,9 @@ int NetEQTest_encode(int coder,
}
// loop over all channels
- int totalLen = 0;
+ size_t totalLen = 0;
- for (int k = 0; k < numChannels; k++) {
+ for (size_t k = 0; k < numChannels; k++) {
/* Encode with the selected coder type */
if (coder == webrtc::kDecoderPCMu) { /*g711 u-law */
#ifdef CODEC_G711
@@ -1650,7 +1652,8 @@ int NetEQTest_encode(int coder,
#endif
#ifdef CODEC_ILBC
else if (coder == webrtc::kDecoderILBC) { /*iLBC */
- cdlen = WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded);
+ cdlen = static_cast<size_t>(std::max(
+ WebRtcIlbcfix_Encode(iLBCenc_inst[k], indata, frameLen, encoded), 0));
}
#endif
#if (defined(CODEC_ISAC) || \
@@ -1658,28 +1661,30 @@ int NetEQTest_encode(int coder,
// NETEQ_ISACFIX_CODEC
else if (coder == webrtc::kDecoderISAC) { /*iSAC */
int noOfCalls = 0;
- cdlen = 0;
- while (cdlen <= 0) {
+ int res = 0;
+ while (res <= 0) {
#ifdef CODEC_ISAC /* floating point */
- cdlen =
+ res =
WebRtcIsac_Encode(ISAC_inst[k], &indata[noOfCalls * 160], encoded);
#else /* fixed point */
- cdlen = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
- encoded);
+ res = WebRtcIsacfix_Encode(ISAC_inst[k], &indata[noOfCalls * 160],
+ encoded);
#endif
noOfCalls++;
}
+ cdlen = static_cast<size_t>(res);
}
#endif
#ifdef CODEC_ISAC_SWB
else if (coder == webrtc::kDecoderISACswb) { /* iSAC SWB */
int noOfCalls = 0;
- cdlen = 0;
- while (cdlen <= 0) {
- cdlen = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
- encoded);
+ int res = 0;
+ while (res <= 0) {
+ res = WebRtcIsac_Encode(ISACSWB_inst[k], &indata[noOfCalls * 320],
+ encoded);
noOfCalls++;
}
+ cdlen = static_cast<size_t>(res);
}
#endif
indata += frameLen;
@@ -1755,11 +1760,11 @@ int makeRedundantHeader(unsigned char* rtp_data,
return rtpPointer - rtp_data; // length of header in bytes
}
-int makeDTMFpayload(unsigned char* payload_data,
- int Event,
- int End,
- int Volume,
- int Duration) {
+size_t makeDTMFpayload(unsigned char* payload_data,
+ int Event,
+ int End,
+ int Volume,
+ int Duration) {
unsigned char E, R, V;
R = 0;
V = (unsigned char)Volume;
@@ -1776,11 +1781,11 @@ int makeDTMFpayload(unsigned char* payload_data,
return (4);
}
-void stereoDeInterleave(int16_t* audioSamples, int numSamples) {
+void stereoDeInterleave(int16_t* audioSamples, size_t numSamples) {
int16_t* tempVec;
int16_t* readPtr, *writeL, *writeR;
- if (numSamples <= 0)
+ if (numSamples == 0)
return;
tempVec = (int16_t*)malloc(sizeof(int16_t) * numSamples);
@@ -1795,7 +1800,7 @@ void stereoDeInterleave(int16_t* audioSamples, int numSamples) {
writeR = &audioSamples[numSamples / 2];
readPtr = tempVec;
- for (int k = 0; k < numSamples; k += 2) {
+ for (size_t k = 0; k < numSamples; k += 2) {
*writeL = *readPtr;
readPtr++;
*writeR = *readPtr;
@@ -1807,7 +1812,7 @@ void stereoDeInterleave(int16_t* audioSamples, int numSamples) {
free(tempVec);
}
-void stereoInterleave(unsigned char* data, int dataLen, int stride) {
+void stereoInterleave(unsigned char* data, size_t dataLen, size_t stride) {
unsigned char* ptrL, *ptrR;
unsigned char temp[10];
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
index 134539f054a..cb0780ced04 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_ilbc_quality_test.cc
@@ -59,11 +59,11 @@ class NetEqIlbcQualityTest : public NetEqQualityTest {
}
int EncodeBlock(int16_t* in_data,
- int block_size_samples,
+ size_t block_size_samples,
uint8_t* payload,
- int max_bytes) override {
- const int kFrameSizeSamples = 80; // Samples per 10 ms.
- int encoded_samples = 0;
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 85dd54d9bd9..47fae3647d7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -43,8 +43,8 @@ class NetEqIsacQualityTest : public NetEqQualityTest {
NetEqIsacQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes);
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes);
private:
ISACFIX_MainStruct* isac_encoder_;
int bit_rate_kbps_;
@@ -78,8 +78,8 @@ void NetEqIsacQualityTest::TearDown() {
}
int NetEqIsacQualityTest::EncodeBlock(int16_t* in_data,
- int block_size_samples,
- uint8_t* payload, int max_bytes) {
+ size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) {
// ISAC takes 10 ms for every call.
const int subblocks = kIsacBlockDurationMs / 10;
const int subblock_length = 10 * kIsacInputSamplingKhz;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index 3a3b3269cfb..0406da2739d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -103,12 +103,12 @@ class NetEqOpusQualityTest : public NetEqQualityTest {
NetEqOpusQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes);
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes);
private:
WebRtcOpusEncInst* opus_encoder_;
OpusRepacketizer* repacketizer_;
- int sub_block_size_samples_;
+ size_t sub_block_size_samples_;
int bit_rate_kbps_;
bool fec_;
bool dtx_;
@@ -126,7 +126,8 @@ NetEqOpusQualityTest::NetEqOpusQualityTest()
kDecoderOpus),
opus_encoder_(NULL),
repacketizer_(NULL),
- sub_block_size_samples_(kOpusBlockDurationMs * kOpusSamplingKhz),
+ sub_block_size_samples_(
+ static_cast<size_t>(kOpusBlockDurationMs * kOpusSamplingKhz)),
bit_rate_kbps_(FLAGS_bit_rate_kbps),
fec_(FLAGS_fec),
dtx_(FLAGS_dtx),
@@ -173,8 +174,8 @@ void NetEqOpusQualityTest::TearDown() {
}
int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
- int block_size_samples,
- uint8_t* payload, int max_bytes) {
+ size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) {
EXPECT_EQ(block_size_samples, sub_block_size_samples_ * sub_packets_);
int16_t* pointer = in_data;
int value;
@@ -192,7 +193,8 @@ int NetEqOpusQualityTest::EncodeBlock(int16_t* in_data,
}
pointer += sub_block_size_samples_ * channels_;
}
- value = opus_repacketizer_out(repacketizer_, payload, max_bytes);
+ value = opus_repacketizer_out(repacketizer_, payload,
+ static_cast<opus_int32>(max_bytes));
EXPECT_GE(value, 0);
return value;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
index d94ceb60f81..0b89352308b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_pcmu_quality_test.cc
@@ -59,11 +59,11 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
}
int EncodeBlock(int16_t* in_data,
- int block_size_samples,
+ size_t block_size_samples,
uint8_t* payload,
- int max_bytes) override {
- const int kFrameSizeSamples = 80; // Samples per 10 ms.
- int encoded_samples = 0;
+ size_t max_bytes) override {
+ const size_t kFrameSizeSamples = 80; // Samples per 10 ms.
+ size_t encoded_samples = 0;
uint32_t dummy_timestamp = 0;
AudioEncoder::EncodedInfo info;
do {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
index 5577cd2ecbb..6ae81e6e966 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -12,6 +12,7 @@
#include <algorithm> // min, max
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
@@ -23,9 +24,10 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
size_t input_len,
bool fast_mode,
AudioMultiVector* output,
- int16_t* length_change_samples) {
+ size_t* length_change_samples) {
// Pre-calculate common multiplication with |fs_mult_|.
- int fs_mult_120 = fs_mult_ * 120; // Corresponds to 15 ms.
+ size_t fs_mult_120 =
+ static_cast<size_t>(fs_mult_ * 120); // Corresponds to 15 ms.
const int16_t* signal;
rtc::scoped_ptr<int16_t[]> signal_array;
@@ -48,8 +50,7 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
}
// Find maximum absolute value of input signal.
- max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal,
- static_cast<int>(signal_len));
+ max_input_value_ = WebRtcSpl_MaxAbsValueW16(signal, signal_len);
// Downsample to 4 kHz sample rate and calculate auto-correlation.
DspHelper::DownsampleTo4kHz(signal, signal_len, kDownsampledLen,
@@ -58,13 +59,12 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
AutoCorrelation();
// Find the strongest correlation peak.
- static const int kNumPeaks = 1;
- int peak_index;
+ static const size_t kNumPeaks = 1;
+ size_t peak_index;
int16_t peak_value;
DspHelper::PeakDetection(auto_correlation_, kCorrelationLen, kNumPeaks,
fs_mult_, &peak_index, &peak_value);
// Assert that |peak_index| stays within boundaries.
- assert(peak_index >= 0);
assert(peak_index <= (2 * kCorrelationLen - 1) * fs_mult_);
// Compensate peak_index for displaced starting position. The displacement
@@ -73,13 +73,13 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
// multiplication by fs_mult_ * 2.
peak_index += kMinLag * fs_mult_ * 2;
// Assert that |peak_index| stays within boundaries.
- assert(peak_index >= 20 * fs_mult_);
+ assert(peak_index >= static_cast<size_t>(20 * fs_mult_));
assert(peak_index <= 20 * fs_mult_ + (2 * kCorrelationLen - 1) * fs_mult_);
// Calculate scaling to ensure that |peak_index| samples can be square-summed
// without overflowing.
int scaling = 31 - WebRtcSpl_NormW32(max_input_value_ * max_input_value_) -
- WebRtcSpl_NormW32(peak_index);
+ WebRtcSpl_NormW32(static_cast<int32_t>(peak_index));
scaling = std::max(0, scaling);
// |vec1| starts at 15 ms minus one pitch period.
@@ -177,7 +177,7 @@ void TimeStretch::AutoCorrelation() {
}
bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- int peak_index, int scaling) const {
+ size_t peak_index, int scaling) const {
// Check if the signal seems to be active speech or not (simple VAD).
// If (vec1_energy + vec2_energy) / (2 * peak_index) <=
// 8 * background_noise_energy, then we say that the signal contains no
@@ -197,7 +197,8 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
int right_scale = 16 - WebRtcSpl_NormW32(right_side);
right_scale = std::max(0, right_scale);
left_side = left_side >> right_scale;
- right_side = peak_index * (right_side >> right_scale);
+ right_side =
+ rtc::checked_cast<int32_t>(peak_index) * (right_side >> right_scale);
// Scale |left_side| properly before comparing with |right_side|.
// (|scaling| is the scale factor before energy calculation, thus the scale
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.h
index 7c84e1a1534..00a141508b6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.h
@@ -39,7 +39,7 @@ class TimeStretch {
const BackgroundNoise& background_noise)
: sample_rate_hz_(sample_rate_hz),
fs_mult_(sample_rate_hz / 8000),
- num_channels_(static_cast<int>(num_channels)),
+ num_channels_(num_channels),
master_channel_(0), // First channel is master.
background_noise_(background_noise),
max_input_value_(0) {
@@ -48,7 +48,7 @@ class TimeStretch {
sample_rate_hz_ == 32000 ||
sample_rate_hz_ == 48000);
assert(num_channels_ > 0);
- assert(static_cast<int>(master_channel_) < num_channels_);
+ assert(master_channel_ < num_channels_);
memset(auto_correlation_, 0, sizeof(auto_correlation_));
}
@@ -60,7 +60,7 @@ class TimeStretch {
size_t input_len,
bool fast_mode,
AudioMultiVector* output,
- int16_t* length_change_samples);
+ size_t* length_change_samples);
protected:
// Sets the parameters |best_correlation| and |peak_index| to suitable
@@ -68,7 +68,7 @@ class TimeStretch {
// implemented by the sub-classes.
virtual void SetParametersForPassiveSpeech(size_t input_length,
int16_t* best_correlation,
- int* peak_index) const = 0;
+ size_t* peak_index) const = 0;
// Checks the criteria for performing the time-stretching operation and,
// if possible, performs the time-stretching. This method must be implemented
@@ -82,16 +82,16 @@ class TimeStretch {
bool fast_mode,
AudioMultiVector* output) const = 0;
- static const int kCorrelationLen = 50;
- static const int kLogCorrelationLen = 6; // >= log2(kCorrelationLen).
- static const int kMinLag = 10;
- static const int kMaxLag = 60;
- static const int kDownsampledLen = kCorrelationLen + kMaxLag;
+ static const size_t kCorrelationLen = 50;
+ static const size_t kLogCorrelationLen = 6; // >= log2(kCorrelationLen).
+ static const size_t kMinLag = 10;
+ static const size_t kMaxLag = 60;
+ static const size_t kDownsampledLen = kCorrelationLen + kMaxLag;
static const int kCorrelationThreshold = 14746; // 0.9 in Q14.
const int sample_rate_hz_;
const int fs_mult_; // Sample rate multiplier = sample_rate_hz_ / 8000.
- const int num_channels_;
+ const size_t num_channels_;
const size_t master_channel_;
const BackgroundNoise& background_noise_;
int16_t max_input_value_;
@@ -107,9 +107,9 @@ class TimeStretch {
// Performs a simple voice-activity detection based on the input parameters.
bool SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
- int peak_index, int scaling) const;
+ size_t peak_index, int scaling) const;
- DISALLOW_COPY_AND_ASSIGN(TimeStretch);
+ RTC_DISALLOW_COPY_AND_ASSIGN(TimeStretch);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
index 05385a1e3e1..0769fd34b7a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch_unittest.cc
@@ -69,18 +69,18 @@ class TimeStretchTest : public ::testing::Test {
}
const int16_t* Next30Ms() {
- CHECK(input_file_->Read(block_size_, audio_.get()));
+ RTC_CHECK(input_file_->Read(block_size_, audio_.get()));
return audio_.get();
}
// Returns the total length change (in samples) that the accelerate operation
// resulted in during the run.
- int TestAccelerate(int loops, bool fast_mode) {
+ size_t TestAccelerate(size_t loops, bool fast_mode) {
Accelerate accelerate(sample_rate_hz_, kNumChannels, background_noise_);
- int total_length_change = 0;
- for (int i = 0; i < loops; ++i) {
+ size_t total_length_change = 0;
+ for (size_t i = 0; i < loops; ++i) {
AudioMultiVector output(kNumChannels);
- int16_t length_change;
+ size_t length_change;
UpdateReturnStats(accelerate.Process(Next30Ms(), block_size_, fast_mode,
&output, &length_change));
total_length_change += length_change;
@@ -110,7 +110,7 @@ class TimeStretchTest : public ::testing::Test {
TEST_F(TimeStretchTest, Accelerate) {
// TestAccelerate returns the total length change in samples.
- EXPECT_EQ(15268, TestAccelerate(100, false));
+ EXPECT_EQ(15268U, TestAccelerate(100, false));
EXPECT_EQ(9, return_stats_[TimeStretch::kSuccess]);
EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
EXPECT_EQ(33, return_stats_[TimeStretch::kNoStretch]);
@@ -118,7 +118,7 @@ TEST_F(TimeStretchTest, Accelerate) {
TEST_F(TimeStretchTest, AccelerateFastMode) {
// TestAccelerate returns the total length change in samples.
- EXPECT_EQ(21400, TestAccelerate(100, true));
+ EXPECT_EQ(21400U, TestAccelerate(100, true));
EXPECT_EQ(31, return_stats_[TimeStretch::kSuccess]);
EXPECT_EQ(58, return_stats_[TimeStretch::kSuccessLowEnergy]);
EXPECT_EQ(11, return_stats_[TimeStretch::kNoStretch]);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler.h
index fcbb7737a42..9129d843bfb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler.h
@@ -61,7 +61,7 @@ class TimestampScaler {
uint32_t internal_ref_;
const DecoderDatabase& decoder_database_;
- DISALLOW_COPY_AND_ASSIGN(TimestampScaler);
+ RTC_DISALLOW_COPY_AND_ASSIGN(TimestampScaler);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_checksum.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_checksum.h
index b4a6a817b4b..a302cff908c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_checksum.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_checksum.h
@@ -51,7 +51,7 @@ class AudioChecksum : public AudioSink {
char checksum_result_[rtc::Md5Digest::kSize];
bool finished_;
- DISALLOW_COPY_AND_ASSIGN(AudioChecksum);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioChecksum);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_loop.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
index 87ff688738c..a897ee5aef8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_loop.h
@@ -51,7 +51,7 @@ class AudioLoop {
size_t block_length_samples_;
rtc::scoped_ptr<int16_t[]> audio_array_;
- DISALLOW_COPY_AND_ASSIGN(AudioLoop);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioLoop);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_sink.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
index b7b3ed11153..3bd2df5ca8a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/audio_sink.h
@@ -38,7 +38,7 @@ class AudioSink {
}
private:
- DISALLOW_COPY_AND_ASSIGN(AudioSink);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioSink);
};
// Forks the output audio to two AudioSink objects.
@@ -56,7 +56,7 @@ class AudioSinkFork : public AudioSink {
AudioSink* left_sink_;
AudioSink* right_sink_;
- DISALLOW_COPY_AND_ASSIGN(AudioSinkFork);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioSinkFork);
};
} // namespace test
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
index af4b8e130f0..dc07030dd68 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.cc
@@ -31,12 +31,12 @@ ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples,
seq_number_(0),
timestamp_(0),
payload_ssrc_(0xABCD1234) {
- int encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
- CHECK_EQ(2, encoded_len);
+ size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
+ RTC_CHECK_EQ(2U, encoded_len);
}
Packet* ConstantPcmPacketSource::NextPacket() {
- CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
+ RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
uint8_t* packet_memory = new uint8_t[packet_len_bytes_];
// Fill the payload part of the packet memory with the pre-encoded value.
for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
index b780fbfac18..69723035410 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h
@@ -49,7 +49,7 @@ class ConstantPcmPacketSource : public PacketSource {
uint32_t timestamp_;
const uint32_t payload_ssrc_;
- DISALLOW_COPY_AND_ASSIGN(ConstantPcmPacketSource);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ConstantPcmPacketSource);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
index 6bbb3286e49..76f31096db1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "webrtc/base/checks.h"
+
namespace webrtc {
namespace test {
@@ -37,6 +39,27 @@ bool InputAudioFile::Read(size_t samples, int16_t* destination) {
return true;
}
+bool InputAudioFile::Seek(int samples) {
+ if (!fp_) {
+ return false;
+ }
+ // Find file boundaries.
+ const long current_pos = ftell(fp_);
+ RTC_CHECK_NE(EOF, current_pos)
+ << "Error returned when getting file position.";
+ RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END)); // Move to end of file.
+ const long file_size = ftell(fp_);
+ RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
+ // Find new position.
+ long new_pos = current_pos + sizeof(int16_t) * samples; // Samples to bytes.
+ RTC_CHECK_GE(new_pos, 0)
+ << "Trying to move to before the beginning of the file";
+ new_pos = new_pos % file_size; // Wrap around the end of the file.
+ // Move to new position relative to the beginning of the file.
+ RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
+ return true;
+}
+
void InputAudioFile::DuplicateInterleaved(const int16_t* source, size_t samples,
size_t channels,
int16_t* destination) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.h
index 075b5d33b50..a6e12db24d2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/input_audio_file.h
@@ -34,6 +34,12 @@ class InputAudioFile {
// The output |destination| must have the capacity to hold |samples| elements.
virtual bool Read(size_t samples, int16_t* destination);
+ // Fast-forwards (|samples| > 0) or -backwards (|samples| < 0) the file by the
+ // indicated number of samples. Just like Read(), Seek() starts over at the
+ // beginning of the file if the end is reached. However, seeking backwards
+ // past the beginning of the file is not possible.
+ virtual bool Seek(int samples);
+
// Creates a multi-channel signal from a mono signal. Each sample is repeated
// |channels| times to create an interleaved multi-channel signal where all
// channels are identical. The output |destination| must have the capacity to
@@ -44,7 +50,7 @@ class InputAudioFile {
private:
FILE* fp_;
- DISALLOW_COPY_AND_ASSIGN(InputAudioFile);
+ RTC_DISALLOW_COPY_AND_ASSIGN(InputAudioFile);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 52c34bb79c8..49750c26c87 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -43,11 +43,11 @@ void NetEqExternalDecoderTest::InsertPacket(WebRtcRTPHeader rtp_header,
rtp_header, payload, payload_size_bytes, receive_timestamp));
}
-int NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
- int16_t* output,
- NetEqOutputType* output_type) {
+size_t NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
+ int16_t* output,
+ NetEqOutputType* output_type) {
// Get audio from regular instance.
- int samples_per_channel;
+ size_t samples_per_channel;
int num_channels;
EXPECT_EQ(NetEq::kOK,
neteq_->GetAudio(max_length,
@@ -56,7 +56,8 @@ int NetEqExternalDecoderTest::GetOutputAudio(size_t max_length,
&num_channels,
output_type));
EXPECT_EQ(channels_, num_channels);
- EXPECT_EQ(kOutputLengthMs * sample_rate_hz_ / 1000, samples_per_channel);
+ EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
+ samples_per_channel);
return samples_per_channel;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
index 0d4d2f90373..202d1f301a3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h
@@ -42,8 +42,8 @@ class NetEqExternalDecoderTest {
// Get 10 ms of audio data. The data is written to |output|, which can hold
// (at least) |max_length| elements. Returns number of samples.
- int GetOutputAudio(size_t max_length, int16_t* output,
- NetEqOutputType* output_type);
+ size_t GetOutputAudio(size_t max_length, int16_t* output,
+ NetEqOutputType* output_type);
NetEq* neteq() { return neteq_.get(); }
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 1c76d761e92..57397e14a38 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -101,19 +101,19 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
// Get output audio, but don't do anything with it.
static const int kMaxChannels = 1;
- static const int kMaxSamplesPerMs = 48000 / 1000;
+ static const size_t kMaxSamplesPerMs = 48000 / 1000;
static const int kOutputBlockSizeMs = 10;
- static const int kOutDataLen =
+ static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
if (error != NetEq::kOK)
return -1;
- assert(samples_per_channel == kSampRateHz * 10 / 1000);
+ assert(samples_per_channel == static_cast<size_t>(kSampRateHz * 10 / 1000));
time_now_ms += kOutputBlockSizeMs;
if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index c60b993ad8c..0d3fb24f805 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -218,8 +218,9 @@ NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
block_duration_ms_(block_duration_ms),
in_sampling_khz_(in_sampling_khz),
out_sampling_khz_(out_sampling_khz),
- in_size_samples_(in_sampling_khz_ * block_duration_ms_),
- out_size_samples_(out_sampling_khz_ * kOutputSizeMs),
+ in_size_samples_(
+ static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
+ out_size_samples_(static_cast<size_t>(out_sampling_khz_ * kOutputSizeMs)),
payload_size_bytes_(0),
max_payload_bytes_(0),
in_file_(new ResampleInputAudioFile(FLAGS_in_filename,
@@ -231,7 +232,7 @@ NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
const std::string out_filename = FLAGS_out_filename;
const std::string log_filename = out_filename + ".log";
log_file_.open(log_filename.c_str(), std::ofstream::out);
- CHECK(log_file_.is_open());
+ RTC_CHECK(log_file_.is_open());
if (out_filename.size() >= 4 &&
out_filename.substr(out_filename.size() - 4) == ".wav") {
@@ -392,7 +393,7 @@ int NetEqQualityTest::Transmit() {
int NetEqQualityTest::DecodeBlock() {
int channels;
- int samples;
+ size_t samples;
int ret = neteq_->GetAudio(out_size_samples_ * channels_, &out_data_[0],
&samples, &channels, NULL);
@@ -400,9 +401,9 @@ int NetEqQualityTest::DecodeBlock() {
return -1;
} else {
assert(channels == channels_);
- assert(samples == kOutputSizeMs * out_sampling_khz_);
- CHECK(output_->WriteArray(out_data_.get(), samples * channels));
- return samples;
+ assert(samples == static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
+ RTC_CHECK(output_->WriteArray(out_data_.get(), samples * channels));
+ return static_cast<int>(samples);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
index 4a0d808aeca..ba87dbf1f9d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h
@@ -76,8 +76,8 @@ class NetEqQualityTest : public ::testing::Test {
// |block_size_samples| (samples per channel),
// 2. save the bit stream to |payload| of |max_bytes| bytes in size,
// 3. returns the length of the payload (in bytes),
- virtual int EncodeBlock(int16_t* in_data, int block_size_samples,
- uint8_t* payload, int max_bytes) = 0;
+ virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ uint8_t* payload, size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
@@ -111,13 +111,13 @@ class NetEqQualityTest : public ::testing::Test {
const int out_sampling_khz_;
// Number of samples per channel in a frame.
- const int in_size_samples_;
+ const size_t in_size_samples_;
// Expected output number of samples per channel in a frame.
- const int out_size_samples_;
+ const size_t out_size_samples_;
size_t payload_size_bytes_;
- int max_payload_bytes_;
+ size_t max_payload_bytes_;
rtc::scoped_ptr<InputAudioFile> in_file_;
rtc::scoped_ptr<AudioSink> output_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index 6bcd717279b..300537b2219 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -19,10 +19,12 @@
#include <algorithm>
#include <iostream>
+#include <limits>
#include <string>
#include "google/gflags.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
@@ -30,9 +32,11 @@
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_wav_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/test/rtp_file_reader.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
@@ -324,7 +328,7 @@ size_t ReplacePayload(webrtc::test::InputAudioFile* replacement_audio_file,
// Encode it as PCM16.
assert((*payload).get());
payload_len = WebRtcPcm16b_Encode((*replacement_audio).get(),
- static_cast<int16_t>(*frame_size_samples),
+ *frame_size_samples,
(*payload).get());
assert(payload_len == 2 * *frame_size_samples);
// Change payload type to PCM16.
@@ -358,7 +362,7 @@ size_t ReplacePayload(webrtc::test::InputAudioFile* replacement_audio_file,
int main(int argc, char* argv[]) {
static const int kMaxChannels = 5;
- static const int kMaxSamplesPerMs = 48000 / 1000;
+ static const size_t kMaxSamplesPerMs = 48000 / 1000;
static const int kOutputBlockSizeMs = 10;
std::string program_name = argv[0];
@@ -384,14 +388,36 @@ int main(int argc, char* argv[]) {
}
printf("Input file: %s\n", argv[1]);
- rtc::scoped_ptr<webrtc::test::RtpFileSource> file_source(
- webrtc::test::RtpFileSource::Create(argv[1]));
+
+ // TODO(ivoc): Modify the RtpFileSource::Create and RtcEventLogSource::Create
+ // functions to return a nullptr on failure instead of crashing
+ // the program.
+
+ // This temporary solution uses a RtpFileReader directly to check if the file
+ // is a valid RtpDump file.
+ bool is_rtp_dump = false;
+ {
+ rtc::scoped_ptr<webrtc::test::RtpFileReader> rtp_reader(
+ webrtc::test::RtpFileReader::Create(
+ webrtc::test::RtpFileReader::kRtpDump, argv[1]));
+ if (rtp_reader)
+ is_rtp_dump = true;
+ }
+ rtc::scoped_ptr<webrtc::test::PacketSource> file_source;
+ webrtc::test::RtcEventLogSource* event_log_source = nullptr;
+ if (is_rtp_dump) {
+ file_source.reset(webrtc::test::RtpFileSource::Create(argv[1]));
+ } else {
+ event_log_source = webrtc::test::RtcEventLogSource::Create(argv[1]);
+ file_source.reset(event_log_source);
+ }
+
assert(file_source.get());
// Check if an SSRC value was provided.
if (!FLAGS_ssrc.empty()) {
uint32_t ssrc;
- CHECK(ParseSsrc(FLAGS_ssrc, &ssrc)) << "Flag verification has failed.";
+ RTC_CHECK(ParseSsrc(FLAGS_ssrc, &ssrc)) << "Flag verification has failed.";
file_source->SelectSsrc(ssrc);
}
@@ -413,7 +439,12 @@ int main(int argc, char* argv[]) {
webrtc::Trace::ReturnTrace();
return 0;
}
- bool packet_available = true;
+ if (packet->payload_length_bytes() == 0 && !replace_payload) {
+ std::cerr << "Warning: input file contains header-only packets, but no "
+ << "replacement file is specified." << std::endl;
+ webrtc::Trace::ReturnTrace();
+ return -1;
+ }
// Check the sample rate.
int sample_rate_hz = CodecSampleRate(packet->header().payloadType);
@@ -475,17 +506,29 @@ int main(int argc, char* argv[]) {
// This is the main simulation loop.
// Set the simulation clock to start immediately with the first packet.
- int start_time_ms = packet->time_ms();
- int time_now_ms = packet->time_ms();
- int next_input_time_ms = time_now_ms;
- int next_output_time_ms = time_now_ms;
+ int64_t start_time_ms = rtc::checked_cast<int64_t>(packet->time_ms());
+ int64_t time_now_ms = start_time_ms;
+ int64_t next_input_time_ms = time_now_ms;
+ int64_t next_output_time_ms = time_now_ms;
if (time_now_ms % kOutputBlockSizeMs != 0) {
// Make sure that next_output_time_ms is rounded up to the next multiple
// of kOutputBlockSizeMs. (Legacy bit-exactness.)
next_output_time_ms +=
kOutputBlockSizeMs - time_now_ms % kOutputBlockSizeMs;
}
- while (packet_available) {
+
+ bool packet_available = true;
+ bool output_event_available = true;
+ if (!is_rtp_dump) {
+ next_output_time_ms = event_log_source->NextAudioOutputEventMs();
+ if (next_output_time_ms == std::numeric_limits<int64_t>::max())
+ output_event_available = false;
+ start_time_ms = time_now_ms =
+ std::min(next_input_time_ms, next_output_time_ms);
+ }
+ while (packet_available || output_event_available) {
+ // Advance time to next event.
+ time_now_ms = std::min(next_input_time_ms, next_output_time_ms);
// Check if it is time to insert packet.
while (time_now_ms >= next_input_time_ms && packet_available) {
assert(packet->virtual_payload_length_bytes() > 0);
@@ -504,11 +547,9 @@ int main(int argc, char* argv[]) {
next_packet.get());
payload_ptr = payload.get();
}
- int error =
- neteq->InsertPacket(rtp_header,
- payload_ptr,
- payload_len,
- packet->time_ms() * sample_rate_hz / 1000);
+ int error = neteq->InsertPacket(
+ rtp_header, payload_ptr, payload_len,
+ static_cast<uint32_t>(packet->time_ms() * sample_rate_hz / 1000));
if (error != NetEq::kOK) {
if (neteq->LastError() == NetEq::kUnknownRtpPayloadType) {
std::cerr << "RTP Payload type "
@@ -534,29 +575,32 @@ int main(int argc, char* argv[]) {
webrtc::test::Packet* temp_packet = file_source->NextPacket();
if (temp_packet) {
packet.reset(temp_packet);
+ if (replace_payload) {
+ // At this point |packet| contains the packet *after* |next_packet|.
+ // Swap Packet objects between |packet| and |next_packet|.
+ packet.swap(next_packet);
+ // Swap the status indicators unless they're already the same.
+ if (packet_available != next_packet_available) {
+ packet_available = !packet_available;
+ next_packet_available = !next_packet_available;
+ }
+ }
+ next_input_time_ms = rtc::checked_cast<int64_t>(packet->time_ms());
} else {
+ // Set next input time to the maximum value of int64_t to prevent the
+ // time_now_ms from becoming stuck at the final value.
+ next_input_time_ms = std::numeric_limits<int64_t>::max();
packet_available = false;
}
- if (replace_payload) {
- // At this point |packet| contains the packet *after* |next_packet|.
- // Swap Packet objects between |packet| and |next_packet|.
- packet.swap(next_packet);
- // Swap the status indicators unless they're already the same.
- if (packet_available != next_packet_available) {
- packet_available = !packet_available;
- next_packet_available = !next_packet_available;
- }
- }
- next_input_time_ms = packet->time_ms();
}
// Check if it is time to get output audio.
- if (time_now_ms >= next_output_time_ms) {
- static const int kOutDataLen =
+ while (time_now_ms >= next_output_time_ms && output_event_available) {
+ static const size_t kOutDataLen =
kOutputBlockSizeMs * kMaxSamplesPerMs * kMaxChannels;
int16_t out_data[kOutDataLen];
int num_channels;
- int samples_per_channel;
+ size_t samples_per_channel;
int error = neteq->GetAudio(kOutDataLen, out_data, &samples_per_channel,
&num_channels, NULL);
if (error != NetEq::kOK) {
@@ -564,7 +608,8 @@ int main(int argc, char* argv[]) {
neteq->LastError() << std::endl;
} else {
// Calculate sample rate from output size.
- sample_rate_hz = 1000 * samples_per_channel / kOutputBlockSizeMs;
+ sample_rate_hz = rtc::checked_cast<int>(
+ 1000 * samples_per_channel / kOutputBlockSizeMs);
}
// Write to file.
@@ -575,14 +620,20 @@ int main(int argc, char* argv[]) {
webrtc::Trace::ReturnTrace();
exit(1);
}
- next_output_time_ms += kOutputBlockSizeMs;
+ if (is_rtp_dump) {
+ next_output_time_ms += kOutputBlockSizeMs;
+ if (!packet_available)
+ output_event_available = false;
+ } else {
+ next_output_time_ms = event_log_source->NextAudioOutputEventMs();
+ if (next_output_time_ms == std::numeric_limits<int64_t>::max())
+ output_event_available = false;
+ }
}
- // Advance time to next event.
- time_now_ms = std::min(next_input_time_ms, next_output_time_ms);
}
-
printf("Simulation done\n");
- printf("Produced %i ms of audio\n", time_now_ms - start_time_ms);
+ printf("Produced %i ms of audio\n",
+ static_cast<int>(time_now_ms - start_time_ms));
delete neteq;
webrtc::Trace::ReturnTrace();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_audio_file.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_audio_file.h
index ff30f673d5c..a9142a63c17 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_audio_file.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_audio_file.h
@@ -42,7 +42,7 @@ class OutputAudioFile : public AudioSink {
private:
FILE* out_file_;
- DISALLOW_COPY_AND_ASSIGN(OutputAudioFile);
+ RTC_DISALLOW_COPY_AND_ASSIGN(OutputAudioFile);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_wav_file.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_wav_file.h
index 1b1ed42829f..c36c7da9837 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_wav_file.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/output_wav_file.h
@@ -35,7 +35,7 @@ class OutputWavFile : public AudioSink {
private:
WavWriter wav_writer_;
- DISALLOW_COPY_AND_ASSIGN(OutputWavFile);
+ RTC_DISALLOW_COPY_AND_ASSIGN(OutputWavFile);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet.h
index a4e48d8953e..8e436334234 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet.h
@@ -114,7 +114,7 @@ class Packet {
double time_ms_; // Used to denote a packet's arrival time.
bool valid_header_; // Set by the RtpHeaderParser.
- DISALLOW_COPY_AND_ASSIGN(Packet);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Packet);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet_source.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet_source.h
index 968400c2152..804a94dc49c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet_source.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/packet_source.h
@@ -46,7 +46,7 @@ class PacketSource {
uint32_t ssrc_; // The selected SSRC. All other SSRCs will be discarded.
private:
- DISALLOW_COPY_AND_ASSIGN(PacketSource);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PacketSource);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
index 47450bc8870..7a0bb1a6afc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.cc
@@ -20,25 +20,22 @@ bool ResampleInputAudioFile::Read(size_t samples,
int output_rate_hz,
int16_t* destination) {
const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
- CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
+ RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
<< "Frame size and sample rates don't add up to an integer.";
rtc::scoped_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
return false;
resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
- int output_length = 0;
- CHECK_EQ(resampler_.Push(temp_destination.get(),
- static_cast<int>(samples_to_read),
- destination,
- static_cast<int>(samples),
- output_length),
- 0);
- CHECK_EQ(static_cast<int>(samples), output_length);
+ size_t output_length = 0;
+ RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
+ destination, samples, output_length),
+ 0);
+ RTC_CHECK_EQ(samples, output_length);
return true;
}
bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
- CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
+ RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
return Read(samples, output_rate_hz_, destination);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h
index b15d46faff0..c0af3546b04 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h
@@ -43,7 +43,7 @@ class ResampleInputAudioFile : public InputAudioFile {
const int file_rate_hz_;
int output_rate_hz_;
Resampler resampler_;
- DISALLOW_COPY_AND_ASSIGN(ResampleInputAudioFile);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ResampleInputAudioFile);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
new file mode 100644
index 00000000000..bfd49d3cec3
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h"
+
+#include <assert.h>
+#include <string.h>
+#include <iostream>
+#include <limits>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/call/rtc_event_log.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
+
+// Files generated at build-time by the protobuf compiler.
+#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
+#include "external/webrtc/webrtc/call/rtc_event_log.pb.h"
+#else
+#include "webrtc/call/rtc_event_log.pb.h"
+#endif
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const rtclog::RtpPacket* GetRtpPacket(const rtclog::Event& event) {
+ if (!event.has_type() || event.type() != rtclog::Event::RTP_EVENT)
+ return nullptr;
+ if (!event.has_timestamp_us() || !event.has_rtp_packet())
+ return nullptr;
+ const rtclog::RtpPacket& rtp_packet = event.rtp_packet();
+ if (!rtp_packet.has_type() || rtp_packet.type() != rtclog::AUDIO ||
+ !rtp_packet.has_incoming() || !rtp_packet.incoming() ||
+ !rtp_packet.has_packet_length() || rtp_packet.packet_length() == 0 ||
+ !rtp_packet.has_header() || rtp_packet.header().size() == 0 ||
+ rtp_packet.packet_length() < rtp_packet.header().size())
+ return nullptr;
+ return &rtp_packet;
+}
+
+const rtclog::DebugEvent* GetAudioOutputEvent(const rtclog::Event& event) {
+ if (!event.has_type() || event.type() != rtclog::Event::DEBUG_EVENT)
+ return nullptr;
+ if (!event.has_timestamp_us() || !event.has_debug_event())
+ return nullptr;
+ const rtclog::DebugEvent& debug_event = event.debug_event();
+ if (!debug_event.has_type() ||
+ debug_event.type() != rtclog::DebugEvent::AUDIO_PLAYOUT)
+ return nullptr;
+ return &debug_event;
+}
+
+} // namespace
+
+RtcEventLogSource* RtcEventLogSource::Create(const std::string& file_name) {
+ RtcEventLogSource* source = new RtcEventLogSource();
+ RTC_CHECK(source->OpenFile(file_name));
+ return source;
+}
+
+RtcEventLogSource::~RtcEventLogSource() {}
+
+bool RtcEventLogSource::RegisterRtpHeaderExtension(RTPExtensionType type,
+ uint8_t id) {
+ RTC_CHECK(parser_.get());
+ return parser_->RegisterRtpHeaderExtension(type, id);
+}
+
+Packet* RtcEventLogSource::NextPacket() {
+ while (rtp_packet_index_ < event_log_->stream_size()) {
+ const rtclog::Event& event = event_log_->stream(rtp_packet_index_);
+ const rtclog::RtpPacket* rtp_packet = GetRtpPacket(event);
+ rtp_packet_index_++;
+ if (rtp_packet) {
+ uint8_t* packet_header = new uint8_t[rtp_packet->header().size()];
+ memcpy(packet_header, rtp_packet->header().data(),
+ rtp_packet->header().size());
+ Packet* packet = new Packet(packet_header, rtp_packet->header().size(),
+ rtp_packet->packet_length(),
+ event.timestamp_us() / 1000, *parser_.get());
+ if (packet->valid_header()) {
+ // Check if the packet should not be filtered out.
+ if (!filter_.test(packet->header().payloadType) &&
+ !(use_ssrc_filter_ && packet->header().ssrc != ssrc_))
+ return packet;
+ } else {
+ std::cout << "Warning: Packet with index " << (rtp_packet_index_ - 1)
+ << " has an invalid header and will be ignored." << std::endl;
+ }
+ // The packet has either an invalid header or needs to be filtered out, so
+ // it can be deleted.
+ delete packet;
+ }
+ }
+ return nullptr;
+}
+
+int64_t RtcEventLogSource::NextAudioOutputEventMs() {
+ while (audio_output_index_ < event_log_->stream_size()) {
+ const rtclog::Event& event = event_log_->stream(audio_output_index_);
+ const rtclog::DebugEvent* debug_event = GetAudioOutputEvent(event);
+ audio_output_index_++;
+ if (debug_event)
+ return event.timestamp_us() / 1000;
+ }
+ return std::numeric_limits<int64_t>::max();
+}
+
+RtcEventLogSource::RtcEventLogSource()
+ : PacketSource(), parser_(RtpHeaderParser::Create()) {}
+
+bool RtcEventLogSource::OpenFile(const std::string& file_name) {
+ event_log_.reset(new rtclog::EventStream());
+ return RtcEventLog::ParseRtcEventLog(file_name, event_log_.get());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
new file mode 100644
index 00000000000..7150bcfe899
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
+
+#include <string>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+
+namespace webrtc {
+
+class RtpHeaderParser;
+
+namespace rtclog {
+class EventStream;
+} // namespace rtclog
+
+namespace test {
+
+class Packet;
+
+class RtcEventLogSource : public PacketSource {
+ public:
+ // Creates an RtcEventLogSource reading from |file_name|. If the file cannot
+ // be opened, or has the wrong format, NULL will be returned.
+ static RtcEventLogSource* Create(const std::string& file_name);
+
+ virtual ~RtcEventLogSource();
+
+ // Registers an RTP header extension and binds it to |id|.
+ virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
+
+ // Returns a pointer to the next packet. Returns NULL if end of file was
+ // reached.
+ Packet* NextPacket() override;
+
+ // Returns the timestamp of the next audio output event, in milliseconds. The
+ // maximum value of int64_t is returned if there are no more audio output
+ // events available.
+ int64_t NextAudioOutputEventMs();
+
+ private:
+ RtcEventLogSource();
+
+ bool OpenFile(const std::string& file_name);
+
+ int rtp_packet_index_ = 0;
+ int audio_output_index_ = 0;
+
+ rtc::scoped_ptr<rtclog::EventStream> event_log_;
+ rtc::scoped_ptr<RtpHeaderParser> parser_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogSource);
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc
index d6d1b6560af..78f0497ffff 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_analyze.cc
@@ -123,11 +123,10 @@ int main(int argc, char* argv[]) {
packet->header().markerBit,
packet->header().ssrc);
if (print_audio_level && packet->header().extension.hasAudioLevel) {
- // |audioLevel| consists of one bit for "V" and then 7 bits level.
fprintf(out_file,
" %5u (%1i)",
- packet->header().extension.audioLevel & 0x7F,
- (packet->header().extension.audioLevel & 0x80) == 0 ? 0 : 1);
+ packet->header().extension.audioLevel,
+ packet->header().extension.voiceActivity);
}
if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
if (cycles == -1) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
index f5d323ecf63..9681ad17ea8 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.cc
@@ -28,7 +28,7 @@ namespace test {
RtpFileSource* RtpFileSource::Create(const std::string& file_name) {
RtpFileSource* source = new RtpFileSource();
- CHECK(source->OpenFile(file_name));
+ RTC_CHECK(source->OpenFile(file_name));
return source;
}
@@ -47,7 +47,7 @@ Packet* RtpFileSource::NextPacket() {
if (!rtp_reader_->NextPacket(&temp_packet)) {
return NULL;
}
- if (temp_packet.length == 0) {
+ if (temp_packet.original_length == 0) {
// May be an RTCP packet.
// Read the next one.
continue;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
index d711685950e..d0856a819ce 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h
@@ -55,7 +55,7 @@ class RtpFileSource : public PacketSource {
rtc::scoped_ptr<RtpFileReader> rtp_reader_;
rtc::scoped_ptr<RtpHeaderParser> parser_;
- DISALLOW_COPY_AND_ASSIGN(RtpFileSource);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpFileSource);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
index e09f6e4ca1c..6c16192daa3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtp_generator.h
@@ -54,7 +54,7 @@ class RtpGenerator {
double drift_factor_;
private:
- DISALLOW_COPY_AND_ASSIGN(RtpGenerator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpGenerator);
};
class TimestampJumpRtpGenerator : public RtpGenerator {
@@ -75,7 +75,7 @@ class TimestampJumpRtpGenerator : public RtpGenerator {
private:
uint32_t jump_from_timestamp_;
uint32_t jump_to_timestamp_;
- DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
index f7490de5517..f2b87a5b958 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtpcat.cc
@@ -28,18 +28,18 @@ int main(int argc, char* argv[]) {
scoped_ptr<RtpFileWriter> output(
RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
- CHECK(output.get() != NULL) << "Cannot open output file.";
+ RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
printf("Output RTP file: %s\n", argv[argc - 1]);
for (int i = 1; i < argc - 1; i++) {
scoped_ptr<RtpFileReader> input(
RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
- CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
+ RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
printf("Input RTP file: %s\n", argv[i]);
webrtc::test::RtpPacket packet;
while (input->NextPacket(&packet))
- CHECK(output->WritePacket(&packet));
+ RTC_CHECK(output->WritePacket(&packet));
}
return 0;
}
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/BUILD.gn b/chromium/third_party/webrtc/modules/audio_conference_mixer/BUILD.gn
index 31f2e9affd7..3b9e2769ac9 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/BUILD.gn
@@ -22,8 +22,6 @@ source_set("audio_conference_mixer") {
"source/audio_conference_mixer_impl.h",
"source/audio_frame_manipulator.cc",
"source/audio_frame_manipulator.h",
- "source/level_indicator.cc",
- "source/level_indicator.h",
"source/memory_pool.h",
"source/memory_pool_posix.h",
"source/memory_pool_win.h",
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi b/chromium/third_party/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
index 51ee6891d5f..5aa3cc449bf 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/audio_conference_mixer.gypi
@@ -21,8 +21,6 @@
'interface/audio_conference_mixer_defines.h',
'source/audio_frame_manipulator.cc',
'source/audio_frame_manipulator.h',
- 'source/level_indicator.cc',
- 'source/level_indicator.h',
'source/memory_pool.h',
'source/memory_pool_posix.h',
'source/memory_pool_win.h',
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h b/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h
index b9be6c649de..7ff39579ee0 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h
@@ -17,7 +17,6 @@
namespace webrtc {
class AudioMixerOutputReceiver;
-class AudioMixerStatusReceiver;
class MixerParticipant;
class Trace;
@@ -45,31 +44,25 @@ public:
// Register/unregister a callback class for receiving the mixed audio.
virtual int32_t RegisterMixedStreamCallback(
- AudioMixerOutputReceiver& receiver) = 0;
+ AudioMixerOutputReceiver* receiver) = 0;
virtual int32_t UnRegisterMixedStreamCallback() = 0;
- // Register/unregister a callback class for receiving status information.
- virtual int32_t RegisterMixerStatusCallback(
- AudioMixerStatusReceiver& mixerStatusCallback,
- const uint32_t amountOf10MsBetweenCallbacks) = 0;
- virtual int32_t UnRegisterMixerStatusCallback() = 0;
-
// Add/remove participants as candidates for mixing.
- virtual int32_t SetMixabilityStatus(MixerParticipant& participant,
+ virtual int32_t SetMixabilityStatus(MixerParticipant* participant,
bool mixable) = 0;
- // mixable is set to true if a participant is a candidate for mixing.
- virtual int32_t MixabilityStatus(MixerParticipant& participant,
- bool& mixable) = 0;
+ // Returns true if a participant is a candidate for mixing.
+ virtual bool MixabilityStatus(
+ const MixerParticipant& participant) const = 0;
// Inform the mixer that the participant should always be mixed and not
// count toward the number of mixed participants. Note that a participant
// must have been added to the mixer (by calling SetMixabilityStatus())
// before this function can be successfully called.
- virtual int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant,
- const bool mixable) = 0;
- // mixable is set to true if the participant is mixed anonymously.
- virtual int32_t AnonymousMixabilityStatus(MixerParticipant& participant,
- bool& mixable) = 0;
+ virtual int32_t SetAnonymousMixabilityStatus(
+ MixerParticipant* participant, bool mixable) = 0;
+ // Returns true if the participant is mixed anonymously.
+ virtual bool AnonymousMixabilityStatus(
+ const MixerParticipant& participant) const = 0;
// Set the minimum sampling frequency at which to mix. The mixing algorithm
// may still choose to mix at a higher samling frequency to avoid
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h b/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h
index 663be182dd3..d15b7fca02c 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h
@@ -25,14 +25,15 @@ public:
// audio every time it's called.
//
// If it returns -1, the frame will not be added to the mix.
- virtual int32_t GetAudioFrame(const int32_t id, AudioFrame& audioFrame) = 0;
+ virtual int32_t GetAudioFrame(int32_t id,
+ AudioFrame* audioFrame) = 0;
- // mixed will be set to true if the participant was mixed this mix iteration
- int32_t IsMixed(bool& mixed) const;
+ // Returns true if the participant was mixed this mix iteration.
+ bool IsMixed() const;
// This function specifies the sampling frequency needed for the AudioFrame
// for future GetAudioFrame(..) calls.
- virtual int32_t NeededFrequency(const int32_t id) = 0;
+ virtual int32_t NeededFrequency(int32_t id) const = 0;
MixHistory* _mixHistory;
protected:
@@ -40,38 +41,6 @@ protected:
virtual ~MixerParticipant();
};
-// Container struct for participant statistics.
-struct ParticipantStatistics
-{
- int32_t participant;
- int32_t level;
-};
-
-class AudioMixerStatusReceiver
-{
-public:
- // Callback function that provides an array of ParticipantStatistics for the
- // participants that were mixed last mix iteration.
- virtual void MixedParticipants(
- const int32_t id,
- const ParticipantStatistics* participantStatistics,
- const uint32_t size) = 0;
- // Callback function that provides an array of the ParticipantStatistics for
- // the participants that had a positiv VAD last mix iteration.
- virtual void VADPositiveParticipants(
- const int32_t id,
- const ParticipantStatistics* participantStatistics,
- const uint32_t size) = 0;
- // Callback function that provides the audio level of the mixed audio frame
- // from the last mix iteration.
- virtual void MixedAudioLevel(
- const int32_t id,
- const uint32_t level) = 0;
-protected:
- AudioMixerStatusReceiver() {}
- virtual ~AudioMixerStatusReceiver() {}
-};
-
class AudioMixerOutputReceiver
{
public:
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 3ee2a086340..82480d2e65a 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -60,12 +60,6 @@ int MaxNumChannels(const AudioFrameList* list) {
return max_num_channels;
}
-void SetParticipantStatistics(ParticipantStatistics* stats,
- const AudioFrame& frame) {
- stats->participant = frame.id_;
- stats->level = 0; // TODO(andrew): to what should this be set?
-}
-
} // namespace
MixerParticipant::MixerParticipant()
@@ -76,8 +70,8 @@ MixerParticipant::~MixerParticipant() {
delete _mixHistory;
}
-int32_t MixerParticipant::IsMixed(bool& mixed) const {
- return _mixHistory->IsMixed(mixed);
+bool MixerParticipant::IsMixed() const {
+ return _mixHistory->IsMixed();
}
MixHistory::MixHistory()
@@ -87,15 +81,14 @@ MixHistory::MixHistory()
MixHistory::~MixHistory() {
}
-int32_t MixHistory::IsMixed(bool& mixed) const {
- mixed = _isMixed;
- return 0;
+bool MixHistory::IsMixed() const {
+ return _isMixed;
}
-int32_t MixHistory::WasMixed(bool& wasMixed) const {
+bool MixHistory::WasMixed() const {
// Was mixed is the same as is mixed depending on perspective. This function
// is for the perspective of AudioConferenceMixerImpl.
- return IsMixed(wasMixed);
+ return IsMixed();
}
int32_t MixHistory::SetIsMixed(const bool mixed) {
@@ -117,17 +110,9 @@ AudioConferenceMixer* AudioConferenceMixer::Create(int id) {
}
AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
- : _scratchParticipantsToMixAmount(0),
- _scratchMixedParticipants(),
- _scratchVadPositiveParticipantsAmount(0),
- _scratchVadPositiveParticipants(),
- _id(id),
+ : _id(id),
_minimumMixingFreq(kLowestPossible),
_mixReceiver(NULL),
- _mixerStatusCallback(NULL),
- _amountOf10MsBetweenCallbacks(1),
- _amountOf10MsUntilNextCallback(0),
- _mixerStatusCb(false),
_outputFrequency(kDefaultFrequency),
_sampleSize(0),
_audioFramePool(NULL),
@@ -137,7 +122,6 @@ AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
use_limiter_(true),
_timeStamp(0),
_timeScheduler(kProcessPeriodicityInMs),
- _mixedAudioLevel(),
_processCalls(0) {}
bool AudioConferenceMixerImpl::Init() {
@@ -271,11 +255,10 @@ int32_t AudioConferenceMixerImpl::Process() {
}
UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
- remainingParticipantsAllowedToMix);
+ &remainingParticipantsAllowedToMix);
GetAdditionalAudio(&additionalFramesList);
UpdateMixedStatus(mixedParticipantsMap);
- _scratchParticipantsToMixAmount = mixedParticipantsMap.size();
}
// Get an AudioFrame for mixing from the memory pool.
@@ -287,9 +270,7 @@ int32_t AudioConferenceMixerImpl::Process() {
return -1;
}
- bool timeForMixerCallback = false;
int retval = 0;
- int32_t audioLevel = 0;
{
CriticalSectionScoped cs(_crit.get());
@@ -305,16 +286,17 @@ int32_t AudioConferenceMixerImpl::Process() {
AudioFrame::kNormalSpeech,
AudioFrame::kVadPassive, num_mixed_channels);
- _timeStamp += _sampleSize;
+ _timeStamp += static_cast<uint32_t>(_sampleSize);
// We only use the limiter if it supports the output sample rate and
// we're actually mixing multiple streams.
- use_limiter_ = _numMixedParticipants > 1 &&
- _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
+ use_limiter_ =
+ _numMixedParticipants > 1 &&
+ _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
- MixFromList(*mixedAudio, &mixList);
- MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
- MixAnonomouslyFromList(*mixedAudio, &rampOutList);
+ MixFromList(mixedAudio, mixList);
+ MixAnonomouslyFromList(mixedAudio, additionalFramesList);
+ MixAnonomouslyFromList(mixedAudio, rampOutList);
if(mixedAudio->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
@@ -322,21 +304,9 @@ int32_t AudioConferenceMixerImpl::Process() {
mixedAudio->Mute();
} else {
// Only call the limiter if we have something to mix.
- if(!LimitMixedAudio(*mixedAudio))
+ if(!LimitMixedAudio(mixedAudio))
retval = -1;
}
-
- _mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize);
- audioLevel = _mixedAudioLevel.GetLevel();
-
- if(_mixerStatusCb) {
- _scratchVadPositiveParticipantsAmount = 0;
- UpdateVADPositiveParticipants(&mixList);
- if(_amountOf10MsUntilNextCallback-- == 0) {
- _amountOf10MsUntilNextCallback = _amountOf10MsBetweenCallbacks;
- timeForMixerCallback = true;
- }
- }
}
{
@@ -349,20 +319,6 @@ int32_t AudioConferenceMixerImpl::Process() {
dummy,
0);
}
-
- if((_mixerStatusCallback != NULL) &&
- timeForMixerCallback) {
- _mixerStatusCallback->MixedParticipants(
- _id,
- _scratchMixedParticipants,
- static_cast<uint32_t>(_scratchParticipantsToMixAmount));
-
- _mixerStatusCallback->VADPositiveParticipants(
- _id,
- _scratchVadPositiveParticipants,
- _scratchVadPositiveParticipantsAmount);
- _mixerStatusCallback->MixedAudioLevel(_id,audioLevel);
- }
}
// Reclaim all outstanding memory.
@@ -378,12 +334,12 @@ int32_t AudioConferenceMixerImpl::Process() {
}
int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
- AudioMixerOutputReceiver& mixReceiver) {
+ AudioMixerOutputReceiver* mixReceiver) {
CriticalSectionScoped cs(_cbCrit.get());
if(_mixReceiver != NULL) {
return -1;
}
- _mixReceiver = &mixReceiver;
+ _mixReceiver = mixReceiver;
return 0;
}
@@ -397,11 +353,12 @@ int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
}
int32_t AudioConferenceMixerImpl::SetOutputFrequency(
- const Frequency frequency) {
+ const Frequency& frequency) {
CriticalSectionScoped cs(_crit.get());
_outputFrequency = frequency;
- _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
+ _sampleSize =
+ static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000);
return 0;
}
@@ -412,56 +369,8 @@ AudioConferenceMixerImpl::OutputFrequency() const {
return _outputFrequency;
}
-int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback(
- AudioMixerStatusReceiver& mixerStatusCallback,
- const uint32_t amountOf10MsBetweenCallbacks) {
- if(amountOf10MsBetweenCallbacks == 0) {
- WEBRTC_TRACE(
- kTraceWarning,
- kTraceAudioMixerServer,
- _id,
- "amountOf10MsBetweenCallbacks(%d) needs to be larger than 0");
- return -1;
- }
- {
- CriticalSectionScoped cs(_cbCrit.get());
- if(_mixerStatusCallback != NULL) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
- "Mixer status callback already registered");
- return -1;
- }
- _mixerStatusCallback = &mixerStatusCallback;
- }
- {
- CriticalSectionScoped cs(_crit.get());
- _amountOf10MsBetweenCallbacks = amountOf10MsBetweenCallbacks;
- _amountOf10MsUntilNextCallback = 0;
- _mixerStatusCb = true;
- }
- return 0;
-}
-
-int32_t AudioConferenceMixerImpl::UnRegisterMixerStatusCallback() {
- {
- CriticalSectionScoped cs(_crit.get());
- if(!_mixerStatusCb)
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
- "Mixer status callback not registered");
- return -1;
- }
- _mixerStatusCb = false;
- }
- {
- CriticalSectionScoped cs(_cbCrit.get());
- _mixerStatusCallback = NULL;
- }
- return 0;
-}
-
int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
- MixerParticipant& participant,
- bool mixable) {
+ MixerParticipant* participant, bool mixable) {
if (!mixable) {
// Anonymous participants are in a separate list. Make sure that the
// participant is in the _participantList if it is being mixed.
@@ -471,7 +380,7 @@ int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
{
CriticalSectionScoped cs(_cbCrit.get());
const bool isMixed =
- IsParticipantInList(participant, &_participantList);
+ IsParticipantInList(*participant, _participantList);
// API must be called with a new state.
if(!(mixable ^ isMixed)) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
@@ -508,18 +417,16 @@ int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
return 0;
}
-int32_t AudioConferenceMixerImpl::MixabilityStatus(
- MixerParticipant& participant,
- bool& mixable) {
+bool AudioConferenceMixerImpl::MixabilityStatus(
+ const MixerParticipant& participant) const {
CriticalSectionScoped cs(_cbCrit.get());
- mixable = IsParticipantInList(participant, &_participantList);
- return 0;
+ return IsParticipantInList(participant, _participantList);
}
int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
- MixerParticipant& participant, const bool anonymous) {
+ MixerParticipant* participant, bool anonymous) {
CriticalSectionScoped cs(_cbCrit.get());
- if(IsParticipantInList(participant, &_additionalParticipantList)) {
+ if(IsParticipantInList(*participant, _additionalParticipantList)) {
if(anonymous) {
return 0;
}
@@ -551,12 +458,10 @@ int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
0 : -1;
}
-int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus(
- MixerParticipant& participant, bool& mixable) {
+bool AudioConferenceMixerImpl::AnonymousMixabilityStatus(
+ const MixerParticipant& participant) const {
CriticalSectionScoped cs(_cbCrit.get());
- mixable = IsParticipantInList(participant,
- &_additionalParticipantList);
- return 0;
+ return IsParticipantInList(participant, _additionalParticipantList);
}
int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
@@ -583,11 +488,11 @@ int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
// Check all AudioFrames that are to be mixed. The highest sampling frequency
// found is the lowest that can be used without losing information.
-int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
+int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const {
const int participantListFrequency =
- GetLowestMixingFrequencyFromList(&_participantList);
+ GetLowestMixingFrequencyFromList(_participantList);
const int anonymousListFrequency =
- GetLowestMixingFrequencyFromList(&_additionalParticipantList);
+ GetLowestMixingFrequencyFromList(_additionalParticipantList);
const int highestFreq =
(participantListFrequency > anonymousListFrequency) ?
participantListFrequency : anonymousListFrequency;
@@ -601,10 +506,10 @@ int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
}
int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
- MixerParticipantList* mixList) {
+ const MixerParticipantList& mixList) const {
int32_t highestFreq = 8000;
- for (MixerParticipantList::iterator iter = mixList->begin();
- iter != mixList->end();
+ for (MixerParticipantList::const_iterator iter = mixList.begin();
+ iter != mixList.end();
++iter) {
const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
if(neededFrequency > highestFreq) {
@@ -618,28 +523,28 @@ void AudioConferenceMixerImpl::UpdateToMix(
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerParticipant*>* mixParticipantList,
- size_t& maxAudioFrameCounter) {
+ size_t* maxAudioFrameCounter) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
- maxAudioFrameCounter);
+ *maxAudioFrameCounter);
const size_t mixListStartSize = mixList->size();
AudioFrameList activeList;
// Struct needed by the passive lists to keep track of which AudioFrame
// belongs to which MixerParticipant.
ParticipantFramePairList passiveWasNotMixedList;
ParticipantFramePairList passiveWasMixedList;
- for (MixerParticipantList::iterator participant = _participantList.begin();
- participant != _participantList.end();
+ for (MixerParticipantList::const_iterator participant =
+ _participantList.begin(); participant != _participantList.end();
++participant) {
// Stop keeping track of passive participants if there are already
// enough participants available (they wont be mixed anyway).
- bool mustAddToPassiveList = (maxAudioFrameCounter >
+ bool mustAddToPassiveList = (*maxAudioFrameCounter >
(activeList.size() +
passiveWasMixedList.size() +
passiveWasNotMixedList.size()));
bool wasMixed = false;
- (*participant)->_mixHistory->WasMixed(wasMixed);
+ wasMixed = (*participant)->_mixHistory->WasMixed();
AudioFrame* audioFrame = NULL;
if(_audioFramePool->PopMemory(audioFrame) == -1) {
WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
@@ -649,7 +554,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
audioFrame->sample_rate_hz_ = _outputFrequency;
- if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) {
+ if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrame() from participant");
_audioFramePool->PushMemory(audioFrame);
@@ -674,7 +579,7 @@ void AudioConferenceMixerImpl::UpdateToMix(
RampIn(*audioFrame);
}
- if(activeList.size() >= maxAudioFrameCounter) {
+ if(activeList.size() >= *maxAudioFrameCounter) {
// There are already more active participants than should be
// mixed. Only keep the ones with the highest energy.
AudioFrameList::iterator replaceItem;
@@ -696,14 +601,14 @@ void AudioConferenceMixerImpl::UpdateToMix(
AudioFrame* replaceFrame = *replaceItem;
bool replaceWasMixed = false;
- std::map<int, MixerParticipant*>::iterator it =
+ std::map<int, MixerParticipant*>::const_iterator it =
mixParticipantList->find(replaceFrame->id_);
// When a frame is pushed to |activeList| it is also pushed
// to mixParticipantList with the frame's id. This means
// that the Find call above should never fail.
assert(it != mixParticipantList->end());
- it->second->_mixHistory->WasMixed(replaceWasMixed);
+ replaceWasMixed = it->second->_mixHistory->WasMixed();
mixParticipantList->erase(replaceFrame->id_);
activeList.erase(replaceItem);
@@ -754,10 +659,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
}
}
- assert(activeList.size() <= maxAudioFrameCounter);
+ assert(activeList.size() <= *maxAudioFrameCounter);
// At this point it is known which participants should be mixed. Transfer
// this information to this functions output parameters.
- for (AudioFrameList::iterator iter = activeList.begin();
+ for (AudioFrameList::const_iterator iter = activeList.begin();
iter != activeList.end();
++iter) {
mixList->push_back(*iter);
@@ -766,10 +671,10 @@ void AudioConferenceMixerImpl::UpdateToMix(
// Always mix a constant number of AudioFrames. If there aren't enough
// active participants mix passive ones. Starting with those that was mixed
// last iteration.
- for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin();
- iter != passiveWasMixedList.end();
+ for (ParticipantFramePairList::const_iterator
+ iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end();
++iter) {
- if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
+ if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back((*iter)->audioFrame);
(*mixParticipantList)[(*iter)->audioFrame->id_] =
(*iter)->participant;
@@ -781,11 +686,11 @@ void AudioConferenceMixerImpl::UpdateToMix(
delete *iter;
}
// And finally the ones that have not been mixed for a while.
- for (ParticipantFramePairList::iterator iter =
+ for (ParticipantFramePairList::const_iterator iter =
passiveWasNotMixedList.begin();
iter != passiveWasNotMixedList.end();
++iter) {
- if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
+ if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
mixList->push_back((*iter)->audioFrame);
(*mixParticipantList)[(*iter)->audioFrame->id_] =
(*iter)->participant;
@@ -796,12 +701,12 @@ void AudioConferenceMixerImpl::UpdateToMix(
}
delete *iter;
}
- assert(maxAudioFrameCounter + mixListStartSize >= mixList->size());
- maxAudioFrameCounter += mixListStartSize - mixList->size();
+ assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
+ *maxAudioFrameCounter += mixListStartSize - mixList->size();
}
void AudioConferenceMixerImpl::GetAdditionalAudio(
- AudioFrameList* additionalFramesList) {
+ AudioFrameList* additionalFramesList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"GetAdditionalAudio(additionalFramesList)");
// The GetAudioFrame() callback may result in the participant being removed
@@ -813,7 +718,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
_additionalParticipantList.begin(),
_additionalParticipantList.end());
- for (MixerParticipantList::iterator participant =
+ for (MixerParticipantList::const_iterator participant =
additionalParticipantList.begin();
participant != additionalParticipantList.end();
++participant) {
@@ -825,7 +730,7 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
return;
}
audioFrame->sample_rate_hz_ = _outputFrequency;
- if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) {
+ if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
"failed to GetAudioFrame() from participant");
_audioFramePool->PushMemory(audioFrame);
@@ -841,18 +746,19 @@ void AudioConferenceMixerImpl::GetAdditionalAudio(
}
void AudioConferenceMixerImpl::UpdateMixedStatus(
- std::map<int, MixerParticipant*>& mixedParticipantsMap) {
+ const std::map<int, MixerParticipant*>& mixedParticipantsMap) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateMixedStatus(mixedParticipantsMap)");
assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
// Loop through all participants. If they are in the mix map they
// were mixed.
- for (MixerParticipantList::iterator participant = _participantList.begin();
- participant != _participantList.end();
+ for (MixerParticipantList::const_iterator
+ participant =_participantList.begin();
+ participant != _participantList.end();
++participant) {
bool isMixed = false;
- for (std::map<int, MixerParticipant*>::iterator it =
+ for (std::map<int, MixerParticipant*>::const_iterator it =
mixedParticipantsMap.begin();
it != mixedParticipantsMap.end();
++it) {
@@ -866,7 +772,7 @@ void AudioConferenceMixerImpl::UpdateMixedStatus(
}
void AudioConferenceMixerImpl::ClearAudioFrameList(
- AudioFrameList* audioFrameList) {
+ AudioFrameList* audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"ClearAudioFrameList(audioFrameList)");
for (AudioFrameList::iterator iter = audioFrameList->begin();
@@ -878,33 +784,24 @@ void AudioConferenceMixerImpl::ClearAudioFrameList(
}
void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
- AudioFrameList* mixList) {
+ AudioFrameList* mixList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"UpdateVADPositiveParticipants(mixList)");
- for (AudioFrameList::iterator iter = mixList->begin();
+ for (AudioFrameList::const_iterator iter = mixList->begin();
iter != mixList->end();
++iter) {
CalculateEnergy(**iter);
- if((*iter)->vad_activity_ == AudioFrame::kVadActive) {
- _scratchVadPositiveParticipants[
- _scratchVadPositiveParticipantsAmount].participant =
- (*iter)->id_;
- // TODO(andrew): to what should this be set?
- _scratchVadPositiveParticipants[
- _scratchVadPositiveParticipantsAmount].level = 0;
- _scratchVadPositiveParticipantsAmount++;
- }
}
}
bool AudioConferenceMixerImpl::IsParticipantInList(
- MixerParticipant& participant,
- MixerParticipantList* participantList) const {
+ const MixerParticipant& participant,
+ const MixerParticipantList& participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"IsParticipantInList(participant,participantList)");
- for (MixerParticipantList::const_iterator iter = participantList->begin();
- iter != participantList->end();
+ for (MixerParticipantList::const_iterator iter = participantList.begin();
+ iter != participantList.end();
++iter) {
if(&participant == *iter) {
return true;
@@ -914,28 +811,28 @@ bool AudioConferenceMixerImpl::IsParticipantInList(
}
bool AudioConferenceMixerImpl::AddParticipantToList(
- MixerParticipant& participant,
- MixerParticipantList* participantList) {
+ MixerParticipant* participant,
+ MixerParticipantList* participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"AddParticipantToList(participant, participantList)");
- participantList->push_back(&participant);
+ participantList->push_back(participant);
// Make sure that the mixed status is correct for new MixerParticipant.
- participant._mixHistory->ResetMixedStatus();
+ participant->_mixHistory->ResetMixedStatus();
return true;
}
bool AudioConferenceMixerImpl::RemoveParticipantFromList(
- MixerParticipant& participant,
- MixerParticipantList* participantList) {
+ MixerParticipant* participant,
+ MixerParticipantList* participantList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"RemoveParticipantFromList(participant, participantList)");
for (MixerParticipantList::iterator iter = participantList->begin();
iter != participantList->end();
++iter) {
- if(*iter == &participant) {
+ if(*iter == participant) {
participantList->erase(iter);
// Participant is no longer mixed, reset to default.
- participant._mixHistory->ResetMixedStatus();
+ participant->_mixHistory->ResetMixedStatus();
return true;
}
}
@@ -943,26 +840,26 @@ bool AudioConferenceMixerImpl::RemoveParticipantFromList(
}
int32_t AudioConferenceMixerImpl::MixFromList(
- AudioFrame& mixedAudio,
- const AudioFrameList* audioFrameList) {
+ AudioFrame* mixedAudio,
+ const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"MixFromList(mixedAudio, audioFrameList)");
- if(audioFrameList->empty()) return 0;
+ if(audioFrameList.empty()) return 0;
uint32_t position = 0;
if (_numMixedParticipants == 1) {
- mixedAudio.timestamp_ = audioFrameList->front()->timestamp_;
- mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
+ mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
+ mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
} else {
// TODO(wu): Issue 3390.
// Audio frame timestamp is only supported in one channel case.
- mixedAudio.timestamp_ = 0;
- mixedAudio.elapsed_time_ms_ = -1;
+ mixedAudio->timestamp_ = 0;
+ mixedAudio->elapsed_time_ms_ = -1;
}
- for (AudioFrameList::const_iterator iter = audioFrameList->begin();
- iter != audioFrameList->end();
+ for (AudioFrameList::const_iterator iter = audioFrameList.begin();
+ iter != audioFrameList.end();
++iter) {
if(position >= kMaximumAmountOfMixedParticipants) {
WEBRTC_TRACE(
@@ -975,10 +872,7 @@ int32_t AudioConferenceMixerImpl::MixFromList(
assert(false);
position = 0;
}
- MixFrames(&mixedAudio, (*iter), use_limiter_);
-
- SetParticipantStatistics(&_scratchMixedParticipants[position],
- **iter);
+ MixFrames(mixedAudio, (*iter), use_limiter_);
position++;
}
@@ -988,28 +882,28 @@ int32_t AudioConferenceMixerImpl::MixFromList(
// TODO(andrew): consolidate this function with MixFromList.
int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
- AudioFrame& mixedAudio,
- const AudioFrameList* audioFrameList) {
+ AudioFrame* mixedAudio,
+ const AudioFrameList& audioFrameList) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
"MixAnonomouslyFromList(mixedAudio, audioFrameList)");
- if(audioFrameList->empty()) return 0;
+ if(audioFrameList.empty()) return 0;
- for (AudioFrameList::const_iterator iter = audioFrameList->begin();
- iter != audioFrameList->end();
+ for (AudioFrameList::const_iterator iter = audioFrameList.begin();
+ iter != audioFrameList.end();
++iter) {
- MixFrames(&mixedAudio, *iter, use_limiter_);
+ MixFrames(mixedAudio, *iter, use_limiter_);
}
return 0;
}
-bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
+bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
if (!use_limiter_) {
return true;
}
// Smoothly limit the mixed frame.
- const int error = _limiter->ProcessStream(&mixedAudio);
+ const int error = _limiter->ProcessStream(mixedAudio);
// And now we can safely restore the level. This procedure results in
// some loss of resolution, deemed acceptable.
@@ -1021,7 +915,7 @@ bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
- mixedAudio += mixedAudio;
+ *mixedAudio += *mixedAudio;
if(error != _limiter->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
index b1a812a1134..bc9a27e9f0c 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h
@@ -17,7 +17,6 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
-#include "webrtc/modules/audio_conference_mixer/source/level_indicator.h"
#include "webrtc/modules/audio_conference_mixer/source/memory_pool.h"
#include "webrtc/modules/audio_conference_mixer/source/time_scheduler.h"
#include "webrtc/modules/interface/module_common_types.h"
@@ -36,15 +35,15 @@ public:
MixHistory();
~MixHistory();
- // MixerParticipant function
- int32_t IsMixed(bool& mixed) const;
+ // Returns true if the participant is being mixed.
+ bool IsMixed() const;
- // Sets wasMixed to true if the participant was mixed previous mix
+ // Returns true if the participant was mixed previous mix
// iteration.
- int32_t WasMixed(bool& wasMixed) const;
+ bool WasMixed() const;
// Updates the mixed status.
- int32_t SetIsMixed(const bool mixed);
+ int32_t SetIsMixed(bool mixed);
void ResetMixedStatus();
private:
@@ -69,32 +68,26 @@ public:
// AudioConferenceMixer functions
int32_t RegisterMixedStreamCallback(
- AudioMixerOutputReceiver& mixReceiver) override;
+ AudioMixerOutputReceiver* mixReceiver) override;
int32_t UnRegisterMixedStreamCallback() override;
- int32_t RegisterMixerStatusCallback(
- AudioMixerStatusReceiver& mixerStatusCallback,
- const uint32_t amountOf10MsBetweenCallbacks) override;
- int32_t UnRegisterMixerStatusCallback() override;
- int32_t SetMixabilityStatus(MixerParticipant& participant,
+ int32_t SetMixabilityStatus(MixerParticipant* participant,
bool mixable) override;
- int32_t MixabilityStatus(MixerParticipant& participant,
- bool& mixable) override;
+ bool MixabilityStatus(const MixerParticipant& participant) const override;
int32_t SetMinimumMixingFrequency(Frequency freq) override;
- int32_t SetAnonymousMixabilityStatus(MixerParticipant& participant,
- const bool mixable) override;
- int32_t AnonymousMixabilityStatus(MixerParticipant& participant,
- bool& mixable) override;
+ int32_t SetAnonymousMixabilityStatus(
+ MixerParticipant* participant, bool mixable) override;
+ bool AnonymousMixabilityStatus(
+ const MixerParticipant& participant) const override;
private:
enum{DEFAULT_AUDIO_FRAME_POOLSIZE = 50};
// Set/get mix frequency
- int32_t SetOutputFrequency(const Frequency frequency);
+ int32_t SetOutputFrequency(const Frequency& frequency);
Frequency OutputFrequency() const;
// Fills mixList with the AudioFrames pointers that should be used when
- // mixing. Fills mixParticipantList with ParticipantStatistics for the
- // participants who's AudioFrames are inside mixList.
+ // mixing.
// maxAudioFrameCounter both input and output specifies how many more
// AudioFrames that are allowed to be mixed.
// rampOutList contain AudioFrames corresponding to an audio stream that
@@ -104,65 +97,54 @@ private:
AudioFrameList* mixList,
AudioFrameList* rampOutList,
std::map<int, MixerParticipant*>* mixParticipantList,
- size_t& maxAudioFrameCounter);
+ size_t* maxAudioFrameCounter) const;
// Return the lowest mixing frequency that can be used without having to
// downsample any audio.
- int32_t GetLowestMixingFrequency();
- int32_t GetLowestMixingFrequencyFromList(MixerParticipantList* mixList);
+ int32_t GetLowestMixingFrequency() const;
+ int32_t GetLowestMixingFrequencyFromList(
+ const MixerParticipantList& mixList) const;
// Return the AudioFrames that should be mixed anonymously.
- void GetAdditionalAudio(AudioFrameList* additionalFramesList);
+ void GetAdditionalAudio(AudioFrameList* additionalFramesList) const;
// Update the MixHistory of all MixerParticipants. mixedParticipantsList
// should contain a map of MixerParticipants that have been mixed.
void UpdateMixedStatus(
- std::map<int, MixerParticipant*>& mixedParticipantsList);
+ const std::map<int, MixerParticipant*>& mixedParticipantsList) const;
// Clears audioFrameList and reclaims all memory associated with it.
- void ClearAudioFrameList(AudioFrameList* audioFrameList);
+ void ClearAudioFrameList(AudioFrameList* audioFrameList) const;
// Update the list of MixerParticipants who have a positive VAD. mixList
// should be a list of AudioFrames
- void UpdateVADPositiveParticipants(
- AudioFrameList* mixList);
+ void UpdateVADPositiveParticipants(AudioFrameList* mixList) const;
// This function returns true if it finds the MixerParticipant in the
// specified list of MixerParticipants.
- bool IsParticipantInList(
- MixerParticipant& participant,
- MixerParticipantList* participantList) const;
+ bool IsParticipantInList(const MixerParticipant& participant,
+ const MixerParticipantList& participantList) const;
// Add/remove the MixerParticipant to the specified
// MixerParticipant list.
bool AddParticipantToList(
- MixerParticipant& participant,
- MixerParticipantList* participantList);
+ MixerParticipant* participant,
+ MixerParticipantList* participantList) const;
bool RemoveParticipantFromList(
- MixerParticipant& removeParticipant,
- MixerParticipantList* participantList);
+ MixerParticipant* removeParticipant,
+ MixerParticipantList* participantList) const;
// Mix the AudioFrames stored in audioFrameList into mixedAudio.
- int32_t MixFromList(
- AudioFrame& mixedAudio,
- const AudioFrameList* audioFrameList);
+ int32_t MixFromList(AudioFrame* mixedAudio,
+ const AudioFrameList& audioFrameList) const;
+
// Mix the AudioFrames stored in audioFrameList into mixedAudio. No
// record will be kept of this mix (e.g. the corresponding MixerParticipants
// will not be marked as IsMixed()
- int32_t MixAnonomouslyFromList(AudioFrame& mixedAudio,
- const AudioFrameList* audioFrameList);
-
- bool LimitMixedAudio(AudioFrame& mixedAudio);
+ int32_t MixAnonomouslyFromList(AudioFrame* mixedAudio,
+ const AudioFrameList& audioFrameList) const;
- // Scratch memory
- // Note that the scratch memory may only be touched in the scope of
- // Process().
- size_t _scratchParticipantsToMixAmount;
- ParticipantStatistics _scratchMixedParticipants[
- kMaximumAmountOfMixedParticipants];
- uint32_t _scratchVadPositiveParticipantsAmount;
- ParticipantStatistics _scratchVadPositiveParticipants[
- kMaximumAmountOfMixedParticipants];
+ bool LimitMixedAudio(AudioFrame* mixedAudio) const;
rtc::scoped_ptr<CriticalSectionWrapper> _crit;
rtc::scoped_ptr<CriticalSectionWrapper> _cbCrit;
@@ -174,14 +156,9 @@ private:
// Mix result callback
AudioMixerOutputReceiver* _mixReceiver;
- AudioMixerStatusReceiver* _mixerStatusCallback;
- uint32_t _amountOf10MsBetweenCallbacks;
- uint32_t _amountOf10MsUntilNextCallback;
- bool _mixerStatusCb;
-
// The current sample frequency and sample size when mixing.
Frequency _outputFrequency;
- uint16_t _sampleSize;
+ size_t _sampleSize;
// Memory pool to avoid allocating/deallocating AudioFrames
MemoryPool<AudioFrame>* _audioFramePool;
@@ -201,9 +178,6 @@ private:
// Metronome class.
TimeScheduler _timeScheduler;
- // Smooth level indicator.
- LevelIndicator _mixedAudioLevel;
-
// Counter keeping track of concurrent calls to process.
// Note: should never be higher than 1 or lower than 0.
int16_t _processCalls;
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
index 3dce5c8bea6..636698e9c10 100644
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.cc
@@ -35,14 +35,14 @@ const float rampArray[] = {0.0000f, 0.0127f, 0.0253f, 0.0380f,
0.8608f, 0.8734f, 0.8861f, 0.8987f,
0.9114f, 0.9241f, 0.9367f, 0.9494f,
0.9620f, 0.9747f, 0.9873f, 1.0000f};
-const int rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
+const size_t rampSize = sizeof(rampArray)/sizeof(rampArray[0]);
} // namespace
namespace webrtc {
void CalculateEnergy(AudioFrame& audioFrame)
{
audioFrame.energy_ = 0;
- for(int position = 0; position < audioFrame.samples_per_channel_;
+ for(size_t position = 0; position < audioFrame.samples_per_channel_;
position++)
{
// TODO(andrew): this can easily overflow.
@@ -54,7 +54,7 @@ void CalculateEnergy(AudioFrame& audioFrame)
void RampIn(AudioFrame& audioFrame)
{
assert(rampSize <= audioFrame.samples_per_channel_);
- for(int i = 0; i < rampSize; i++)
+ for(size_t i = 0; i < rampSize; i++)
{
audioFrame.data_[i] = static_cast<int16_t>(rampArray[i] *
audioFrame.data_[i]);
@@ -64,9 +64,9 @@ void RampIn(AudioFrame& audioFrame)
void RampOut(AudioFrame& audioFrame)
{
assert(rampSize <= audioFrame.samples_per_channel_);
- for(int i = 0; i < rampSize; i++)
+ for(size_t i = 0; i < rampSize; i++)
{
- const int rampPos = rampSize - 1 - i;
+ const size_t rampPos = rampSize - 1 - i;
audioFrame.data_[i] = static_cast<int16_t>(rampArray[rampPos] *
audioFrame.data_[i]);
}
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.cc b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.cc
deleted file mode 100644
index 3c573d41aee..00000000000
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_conference_mixer/source/level_indicator.h"
-
-namespace webrtc {
-// Array for adding smothing to level changes (ad-hoc).
-const uint32_t perm[] =
- {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
-
-LevelIndicator::LevelIndicator()
- : _max(0),
- _count(0),
- _currentLevel(0)
-{
-}
-
-LevelIndicator::~LevelIndicator()
-{
-}
-
-// Level is based on the highest absolute value for all samples.
-void LevelIndicator::ComputeLevel(const int16_t* speech,
- const uint16_t nrOfSamples)
-{
- int32_t min = 0;
- for(uint32_t i = 0; i < nrOfSamples; i++)
- {
- if(_max < speech[i])
- {
- _max = speech[i];
- }
- if(min > speech[i])
- {
- min = speech[i];
- }
- }
-
- // Absolute max value.
- if(-min > _max)
- {
- _max = -min;
- }
-
- if(_count == TICKS_BEFORE_CALCULATION)
- {
- // Highest sample value maps directly to a level.
- int32_t position = _max / 1000;
- if ((position == 0) &&
- (_max > 250))
- {
- position = 1;
- }
- _currentLevel = perm[position];
- // The max value is decayed and stored so that it can be reused to slow
- // down decreases in level.
- _max = _max >> 1;
- _count = 0;
- } else {
- _count++;
- }
-}
-
-int32_t LevelIndicator::GetLevel()
-{
- return _currentLevel;
-}
-
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.h b/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.h
deleted file mode 100644
index b0e87ffa718..00000000000
--- a/chromium/third_party/webrtc/modules/audio_conference_mixer/source/level_indicator.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_LEVEL_INDICATOR_H_
-#define WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_LEVEL_INDICATOR_H_
-
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-class LevelIndicator
-{
-public:
- enum{TICKS_BEFORE_CALCULATION = 10};
-
- LevelIndicator();
- ~LevelIndicator();
-
- // Updates the level.
- void ComputeLevel(const int16_t* speech,
- const uint16_t nrOfSamples);
-
- int32_t GetLevel();
-private:
- int32_t _max;
- uint32_t _count;
- uint32_t _currentLevel;
-};
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_CONFERENCE_MIXER_SOURCE_LEVEL_INDICATOR_H_
diff --git a/chromium/third_party/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc b/chromium/third_party/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
new file mode 100644
index 00000000000..d4fbd205f15
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_conference_mixer/test/audio_conference_mixer_unittest.cc
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
+
+namespace webrtc {
+
+using testing::_;
+using testing::AtLeast;
+using testing::Invoke;
+using testing::Return;
+
+class MockAudioMixerOutputReceiver : public AudioMixerOutputReceiver {
+ public:
+ MOCK_METHOD4(NewMixedAudio, void(const int32_t id,
+ const AudioFrame& general_audio_frame,
+ const AudioFrame** unique_audio_frames,
+ const uint32_t size));
+};
+
+class MockMixerParticipant : public MixerParticipant {
+ public:
+ MockMixerParticipant() {
+ ON_CALL(*this, GetAudioFrame(_, _))
+ .WillByDefault(Invoke(this, &MockMixerParticipant::FakeAudioFrame));
+ }
+ MOCK_METHOD2(GetAudioFrame,
+ int32_t(const int32_t id, AudioFrame* audio_frame));
+ MOCK_CONST_METHOD1(NeededFrequency, int32_t(const int32_t id));
+ AudioFrame* fake_frame() { return &fake_frame_; }
+
+ private:
+ AudioFrame fake_frame_;
+ int32_t FakeAudioFrame(const int32_t id, AudioFrame* audio_frame) {
+ audio_frame->CopyFrom(fake_frame_);
+ return 0;
+ }
+};
+
+TEST(AudioConferenceMixer, AnonymousAndNamed) {
+ const int kId = 1;
+ // Should not matter even if partipants are more than
+ // kMaximumAmountOfMixedParticipants.
+ const int kNamed =
+ AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 1;
+ const int kAnonymous =
+ AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 1;
+
+ rtc::scoped_ptr<AudioConferenceMixer> mixer(
+ AudioConferenceMixer::Create(kId));
+
+ MockMixerParticipant named[kNamed];
+ MockMixerParticipant anonymous[kAnonymous];
+
+ for (int i = 0; i < kNamed; ++i) {
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], true));
+ EXPECT_TRUE(mixer->MixabilityStatus(named[i]));
+ }
+
+ for (int i = 0; i < kAnonymous; ++i) {
+ // Participant must be registered before turning it into anonymous.
+ EXPECT_EQ(-1, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[i], true));
+ EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
+ EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
+
+ EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], true));
+ EXPECT_TRUE(mixer->AnonymousMixabilityStatus(anonymous[i]));
+
+ // Anonymous participants do not show status by MixabilityStatus.
+ EXPECT_FALSE(mixer->MixabilityStatus(anonymous[i]));
+ }
+
+ for (int i = 0; i < kNamed; ++i) {
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&named[i], false));
+ EXPECT_FALSE(mixer->MixabilityStatus(named[i]));
+ }
+
+ for (int i = 0; i < kAnonymous - 1; i++) {
+ EXPECT_EQ(0, mixer->SetAnonymousMixabilityStatus(&anonymous[i], false));
+ EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[i]));
+
+ // SetAnonymousMixabilityStatus(anonymous, false) moves anonymous to the
+ // named group.
+ EXPECT_TRUE(mixer->MixabilityStatus(anonymous[i]));
+ }
+
+ // SetMixabilityStatus(anonymous, false) will remove anonymous from both
+ // anonymous and named groups.
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&anonymous[kAnonymous - 1], false));
+ EXPECT_FALSE(mixer->AnonymousMixabilityStatus(anonymous[kAnonymous - 1]));
+ EXPECT_FALSE(mixer->MixabilityStatus(anonymous[kAnonymous - 1]));
+}
+
+TEST(AudioConferenceMixer, LargestEnergyVadActiveMixed) {
+ const int kId = 1;
+ const int kParticipants =
+ AudioConferenceMixer::kMaximumAmountOfMixedParticipants + 3;
+ const int kSampleRateHz = 32000;
+
+ rtc::scoped_ptr<AudioConferenceMixer> mixer(
+ AudioConferenceMixer::Create(kId));
+
+ MockAudioMixerOutputReceiver output_receiver;
+ EXPECT_EQ(0, mixer->RegisterMixedStreamCallback(&output_receiver));
+
+ MockMixerParticipant participants[kParticipants];
+
+ for (int i = 0; i < kParticipants; ++i) {
+ participants[i].fake_frame()->id_ = i;
+ participants[i].fake_frame()->sample_rate_hz_ = kSampleRateHz;
+ participants[i].fake_frame()->speech_type_ = AudioFrame::kNormalSpeech;
+ participants[i].fake_frame()->vad_activity_ = AudioFrame::kVadActive;
+ participants[i].fake_frame()->num_channels_ = 1;
+
+ // Frame duration 10ms.
+ participants[i].fake_frame()->samples_per_channel_ = kSampleRateHz / 100;
+
+ // We set the 80-th sample value since the first 80 samples may be
+ // modified by a ramped-in window.
+ participants[i].fake_frame()->data_[80] = i;
+
+ EXPECT_EQ(0, mixer->SetMixabilityStatus(&participants[i], true));
+ EXPECT_CALL(participants[i], GetAudioFrame(_, _))
+ .Times(AtLeast(1));
+ EXPECT_CALL(participants[i], NeededFrequency(_))
+ .WillRepeatedly(Return(kSampleRateHz));
+ }
+
+ // Last participant gives audio frame with passive VAD, although it has the
+ // largest energy.
+ participants[kParticipants - 1].fake_frame()->vad_activity_ =
+ AudioFrame::kVadPassive;
+
+ EXPECT_CALL(output_receiver, NewMixedAudio(_, _, _, _))
+ .Times(AtLeast(1));
+
+ EXPECT_EQ(0, mixer->Process());
+
+ for (int i = 0; i < kParticipants; ++i) {
+ bool is_mixed = participants[i].IsMixed();
+ if (i == kParticipants - 1 || i < kParticipants - 1 -
+ AudioConferenceMixer::kMaximumAmountOfMixedParticipants) {
+ EXPECT_FALSE(is_mixed) << "Mixing status of Participant #"
+ << i << " wrong.";
+ } else {
+ EXPECT_TRUE(is_mixed) << "Mixing status of Participant #"
+ << i << " wrong.";
+ }
+ }
+
+ EXPECT_EQ(0, mixer->UnRegisterMixedStreamCallback());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate b/chromium/third_party/webrtc/modules/audio_decoder_unittests.isolate
index bec68f17c64..bec68f17c64 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate
+++ b/chromium/third_party/webrtc/modules/audio_decoder_unittests.isolate
diff --git a/chromium/third_party/webrtc/modules/audio_device/BUILD.gn b/chromium/third_party/webrtc/modules/audio_device/BUILD.gn
index f32c0562455..01893357857 100644
--- a/chromium/third_party/webrtc/modules/audio_device/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/audio_device/BUILD.gn
@@ -27,6 +27,8 @@ source_set("audio_device") {
"dummy/audio_device_dummy.h",
"dummy/file_audio_device.cc",
"dummy/file_audio_device.h",
+ "fine_audio_buffer.cc",
+ "fine_audio_buffer.h",
"include/audio_device.h",
"include/audio_device_defines.h",
]
@@ -57,8 +59,6 @@ source_set("audio_device") {
"android/audio_record_jni.h",
"android/audio_track_jni.cc",
"android/audio_track_jni.h",
- "android/fine_audio_buffer.cc",
- "android/fine_audio_buffer.h",
"android/opensles_common.cc",
"android/opensles_common.h",
"android/opensles_player.cc",
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_common.h b/chromium/third_party/webrtc/modules/audio_device/android/audio_common.h
index cb259830b99..4eecae4b706 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_common.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_common.h
@@ -13,22 +13,19 @@
namespace webrtc {
-enum {
- kDefaultSampleRate = 44100,
- kNumChannels = 1,
- // Number of bytes per audio frame.
- // Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
- kBytesPerFrame = kNumChannels * (16 / 8),
- // Delay estimates for the two different supported modes. These values
- // are based on real-time round-trip delay estimates on a large set of
- // devices and they are lower bounds since the filter length is 128 ms,
- // so the AEC works for delays in the range [50, ~170] ms and [150, ~270] ms.
- // Note that, in most cases, the lowest delay estimate will not be utilized
- // since devices that support low-latency output audio often supports
- // HW AEC as well.
- kLowLatencyModeDelayEstimateInMilliseconds = 50,
- kHighLatencyModeDelayEstimateInMilliseconds = 150,
-};
+const int kDefaultSampleRate = 44100;
+const int kNumChannels = 1;
+// Number of bytes per audio frame.
+// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
+const size_t kBytesPerFrame = kNumChannels * (16 / 8);
+// Delay estimates for the two different supported modes. These values are based
+// on real-time round-trip delay estimates on a large set of devices and they
+// are lower bounds since the filter length is 128 ms, so the AEC works for
+// delays in the range [50, ~170] ms and [150, ~270] ms. Note that, in most
+// cases, the lowest delay estimate will not be utilized since devices that
+// support low-latency output audio often supports HW AEC as well.
+const int kLowLatencyModeDelayEstimateInMilliseconds = 50;
+const int kHighLatencyModeDelayEstimateInMilliseconds = 150;
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_device_template.h b/chromium/third_party/webrtc/modules/audio_device/android/audio_device_template.h
index adc66fa6d44..47e353dfd54 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_device_template.h
@@ -27,12 +27,12 @@ namespace webrtc {
// InputType/OutputType can be any class that implements the capturing/rendering
// part of the AudioDeviceGeneric API.
// Construction and destruction must be done on one and the same thread. Each
-// internal implementation of InputType and OutputType will DCHECK if that is
-// not the case. All implemented methods must also be called on the same thread.
-// See comments in each InputType/OutputType class for more
+// internal implementation of InputType and OutputType will RTC_DCHECK if that
+// is not the case. All implemented methods must also be called on the same
+// thread. See comments in each InputType/OutputType class for more info.
// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
// and ClearAndroidAudioDeviceObjects) from a different thread but both will
-// CHECK that the calling thread is attached to a Java VM.
+// RTC_CHECK that the calling thread is attached to a Java VM.
template <class InputType, class OutputType>
class AudioDeviceTemplate : public AudioDeviceGeneric {
@@ -44,7 +44,7 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
output_(audio_manager_),
input_(audio_manager_),
initialized_(false) {
- CHECK(audio_manager);
+ RTC_CHECK(audio_manager);
audio_manager_->SetActiveAudioLayer(audio_layer);
}
@@ -58,21 +58,35 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
int32_t Init() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!initialized_);
- initialized_ = audio_manager_->Init() || output_.Init() || input_.Init();
- return initialized_ ? 0 : -1;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ if (!audio_manager_->Init())
+ return -1;
+ if (output_.Init() != 0) {
+ audio_manager_->Close();
+ return -1;
+ }
+ if (input_.Init() != 0) {
+ output_.Terminate();
+ audio_manager_->Close();
+ return -1;
+ }
+ initialized_ = true;
+ return 0;
}
int32_t Terminate() override {
- DCHECK(thread_checker_.CalledOnValidThread());
- initialized_ =
- !(output_.Terminate() || input_.Terminate() || audio_manager_->Close());
- return !initialized_ ? 0 : -1;
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ int32_t err = input_.Terminate();
+ err |= output_.Terminate();
+ err |= !audio_manager_->Close();
+ initialized_ = false;
+ RTC_DCHECK_EQ(err, 0);
+ return err;
}
bool Initialized() const override {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return initialized_;
}
@@ -374,14 +388,14 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
int32_t PlayoutDelay(uint16_t& delay_ms) const override {
// Best guess we can do is to use half of the estimated total delay.
delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
- DCHECK_GT(delay_ms, 0);
+ RTC_DCHECK_GT(delay_ms, 0);
return 0;
}
int32_t RecordingDelay(uint16_t& delay_ms) const override {
// Best guess we can do is to use half of the estimated total delay.
delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
- DCHECK_GT(delay_ms, 0);
+ RTC_DCHECK_GT(delay_ms, 0);
return 0;
}
@@ -442,10 +456,32 @@ class AudioDeviceTemplate : public AudioDeviceGeneric {
}
int32_t EnableBuiltInAEC(bool enable) override {
- CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
+ RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
return input_.EnableBuiltInAEC(enable);
}
+ // Returns true if the device both supports built in AGC and the device
+ // is not blacklisted.
+ bool BuiltInAGCIsAvailable() const override {
+ return audio_manager_->IsAutomaticGainControlSupported();
+ }
+
+ int32_t EnableBuiltInAGC(bool enable) override {
+ RTC_CHECK(BuiltInAGCIsAvailable()) << "HW AGC is not available";
+ return input_.EnableBuiltInAGC(enable);
+ }
+
+ // Returns true if the device both supports built in NS and the device
+ // is not blacklisted.
+ bool BuiltInNSIsAvailable() const override {
+ return audio_manager_->IsNoiseSuppressorSupported();
+ }
+
+ int32_t EnableBuiltInNS(bool enable) override {
+ RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available";
+ return input_.EnableBuiltInNS(enable);
+ }
+
private:
rtc::ThreadChecker thread_checker_;
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_device_unittest.cc b/chromium/third_party/webrtc/modules/audio_device/android/audio_device_unittest.cc
index b21fd6e2004..f93763b0841 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -19,6 +19,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
@@ -55,22 +56,22 @@ namespace webrtc {
// Number of callbacks (input or output) the tests waits for before we set
// an event indicating that the test was OK.
-static const int kNumCallbacks = 10;
+static const size_t kNumCallbacks = 10;
// Max amount of time we wait for an event to be set while counting callbacks.
static const int kTestTimeOutInMilliseconds = 10 * 1000;
// Average number of audio callbacks per second assuming 10ms packet size.
-static const int kNumCallbacksPerSecond = 100;
+static const size_t kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
static const int kFilePlayTimeInSec = 5;
-static const int kBitsPerSample = 16;
-static const int kBytesPerSample = kBitsPerSample / 8;
+static const size_t kBitsPerSample = 16;
+static const size_t kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored.
static const int kFullDuplexTimeInSec = 5;
// Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access).
// Only used in the RunPlayoutAndRecordingInFullDuplex test.
-static const int kNumIgnoreFirstCallbacks = 50;
+static const size_t kNumIgnoreFirstCallbacks = 50;
// Sets the number of impulses per second in the latency test.
static const int kImpulseFrequencyInHz = 1;
// Length of round-trip latency measurements. Number of transmitted impulses
@@ -90,8 +91,8 @@ enum TransportType {
// measurements.
class AudioStreamInterface {
public:
- virtual void Write(const void* source, int num_frames) = 0;
- virtual void Read(void* destination, int num_frames) = 0;
+ virtual void Write(const void* source, size_t num_frames) = 0;
+ virtual void Read(void* destination, size_t num_frames) = 0;
protected:
virtual ~AudioStreamInterface() {}
};
@@ -101,7 +102,7 @@ class AudioStreamInterface {
class FileAudioStream : public AudioStreamInterface {
public:
FileAudioStream(
- int num_callbacks, const std::string& file_name, int sample_rate)
+ size_t num_callbacks, const std::string& file_name, int sample_rate)
: file_size_in_bytes_(0),
sample_rate_(sample_rate),
file_pos_(0) {
@@ -109,23 +110,23 @@ class FileAudioStream : public AudioStreamInterface {
sample_rate_ = sample_rate;
EXPECT_GE(file_size_in_callbacks(), num_callbacks)
<< "Size of test file is not large enough to last during the test.";
- const int num_16bit_samples =
+ const size_t num_16bit_samples =
test::GetFileSize(file_name) / kBytesPerSample;
file_.reset(new int16_t[num_16bit_samples]);
FILE* audio_file = fopen(file_name.c_str(), "rb");
EXPECT_NE(audio_file, nullptr);
- int num_samples_read = fread(
+ size_t num_samples_read = fread(
file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
EXPECT_EQ(num_samples_read, num_16bit_samples);
fclose(audio_file);
}
// AudioStreamInterface::Write() is not implemented.
- void Write(const void* source, int num_frames) override {}
+ void Write(const void* source, size_t num_frames) override {}
// Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer.
- void Read(void* destination, int num_frames) override {
+ void Read(void* destination, size_t num_frames) override {
memcpy(destination,
static_cast<int16_t*> (&file_[file_pos_]),
num_frames * sizeof(int16_t));
@@ -133,17 +134,18 @@ class FileAudioStream : public AudioStreamInterface {
}
int file_size_in_seconds() const {
- return (file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+ return static_cast<int>(
+ file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
}
- int file_size_in_callbacks() const {
+ size_t file_size_in_callbacks() const {
return file_size_in_seconds() * kNumCallbacksPerSecond;
}
private:
- int file_size_in_bytes_;
+ size_t file_size_in_bytes_;
int sample_rate_;
rtc::scoped_ptr<int16_t[]> file_;
- int file_pos_;
+ size_t file_pos_;
};
// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
@@ -156,7 +158,7 @@ class FileAudioStream : public AudioStreamInterface {
// since both sides (playout and recording) are driven by its own thread.
class FifoAudioStream : public AudioStreamInterface {
public:
- explicit FifoAudioStream(int frames_per_buffer)
+ explicit FifoAudioStream(size_t frames_per_buffer)
: frames_per_buffer_(frames_per_buffer),
bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
fifo_(new AudioBufferList),
@@ -173,7 +175,7 @@ class FifoAudioStream : public AudioStreamInterface {
// Allocate new memory, copy |num_frames| samples from |source| into memory
// and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element.
- void Write(const void* source, int num_frames) override {
+ void Write(const void* source, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("+");
if (write_count_++ < kNumIgnoreFirstCallbacks) {
@@ -185,10 +187,10 @@ class FifoAudioStream : public AudioStreamInterface {
bytes_per_buffer_);
rtc::CritScope lock(&lock_);
fifo_->push_back(memory);
- const int size = fifo_->size();
+ const size_t size = fifo_->size();
if (size > largest_size_) {
largest_size_ = size;
- PRINTD("(%d)", largest_size_);
+ PRINTD("(%" PRIuS ")", largest_size_);
}
total_written_elements_ += size;
}
@@ -196,7 +198,7 @@ class FifoAudioStream : public AudioStreamInterface {
// Read pointer to data buffer from front of list, copy |num_frames| of stored
// data into |destination| and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element.
- void Read(void* destination, int num_frames) override {
+ void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
PRINTD("-");
rtc::CritScope lock(&lock_);
@@ -212,15 +214,15 @@ class FifoAudioStream : public AudioStreamInterface {
}
}
- int size() const {
+ size_t size() const {
return fifo_->size();
}
- int largest_size() const {
+ size_t largest_size() const {
return largest_size_;
}
- int average_size() const {
+ size_t average_size() const {
return (total_written_elements_ == 0) ? 0.0 : 0.5 + static_cast<float> (
total_written_elements_) / (write_count_ - kNumIgnoreFirstCallbacks);
}
@@ -235,12 +237,12 @@ class FifoAudioStream : public AudioStreamInterface {
using AudioBufferList = std::list<int16_t*>;
rtc::CriticalSection lock_;
- const int frames_per_buffer_;
- const int bytes_per_buffer_;
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
rtc::scoped_ptr<AudioBufferList> fifo_;
- int largest_size_;
- int total_written_elements_;
- int write_count_;
+ size_t largest_size_;
+ size_t total_written_elements_;
+ size_t write_count_;
};
// Inserts periodic impulses and measures the latency between the time of
@@ -249,7 +251,7 @@ class FifoAudioStream : public AudioStreamInterface {
// See http://source.android.com/devices/audio/loopback.html for details.
class LatencyMeasuringAudioStream : public AudioStreamInterface {
public:
- explicit LatencyMeasuringAudioStream(int frames_per_buffer)
+ explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
: clock_(Clock::GetRealTimeClock()),
frames_per_buffer_(frames_per_buffer),
bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
@@ -259,7 +261,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
// Insert periodic impulses in first two samples of |destination|.
- void Read(void* destination, int num_frames) override {
+ void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) {
PRINT("[");
@@ -273,15 +275,15 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
PRINT(".");
const int16_t impulse = std::numeric_limits<int16_t>::max();
int16_t* ptr16 = static_cast<int16_t*> (destination);
- for (int i = 0; i < 2; ++i) {
- *ptr16++ = impulse;
+ for (size_t i = 0; i < 2; ++i) {
+ ptr16[i] = impulse;
}
}
}
// Detect received impulses in |source|, derive time between transmission and
// detection and add the calculated delay to list of latencies.
- void Write(const void* source, int num_frames) override {
+ void Write(const void* source, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++;
if (pulse_time_ == 0) {
@@ -315,7 +317,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
}
- int num_latency_values() const {
+ size_t num_latency_values() const {
return latencies_.size();
}
@@ -350,15 +352,15 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
int IndexToMilliseconds(double index) const {
- return 10.0 * (index / frames_per_buffer_) + 0.5;
+ return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
}
private:
Clock* clock_;
- const int frames_per_buffer_;
- const int bytes_per_buffer_;
- int play_count_;
- int rec_count_;
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
+ size_t play_count_;
+ size_t rec_count_;
int64_t pulse_time_;
std::vector<int> latencies_;
};
@@ -379,8 +381,8 @@ class MockAudioTransport : public AudioTransport {
MOCK_METHOD10(RecordedDataIsAvailable,
int32_t(const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
@@ -389,12 +391,12 @@ class MockAudioTransport : public AudioTransport {
const bool keyPressed,
uint32_t& newMicLevel));
MOCK_METHOD8(NeedMorePlayData,
- int32_t(const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ int32_t(const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
- uint32_t& nSamplesOut,
+ size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));
@@ -419,8 +421,8 @@ class MockAudioTransport : public AudioTransport {
}
int32_t RealRecordedDataIsAvailable(const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
@@ -441,12 +443,12 @@ class MockAudioTransport : public AudioTransport {
return 0;
}
- int32_t RealNeedMorePlayData(const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ int32_t RealNeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
- uint32_t& nSamplesOut,
+ size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
@@ -484,10 +486,10 @@ class MockAudioTransport : public AudioTransport {
private:
EventWrapper* test_is_done_;
- int num_callbacks_;
+ size_t num_callbacks_;
int type_;
- int play_count_;
- int rec_count_;
+ size_t play_count_;
+ size_t rec_count_;
AudioStreamInterface* audio_stream_;
rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
};
@@ -525,10 +527,10 @@ class AudioDeviceTest : public ::testing::Test {
int record_channels() const {
return record_parameters_.channels();
}
- int playout_frames_per_10ms_buffer() const {
+ size_t playout_frames_per_10ms_buffer() const {
return playout_parameters_.frames_per_10ms_buffer();
}
- int record_frames_per_10ms_buffer() const {
+ size_t record_frames_per_10ms_buffer() const {
return record_parameters_.frames_per_10ms_buffer();
}
@@ -576,12 +578,14 @@ class AudioDeviceTest : public ::testing::Test {
EXPECT_TRUE(test::FileExists(file_name));
#ifdef ENABLE_PRINTF
PRINT("file name: %s\n", file_name.c_str());
- const int bytes = test::GetFileSize(file_name);
- PRINT("file size: %d [bytes]\n", bytes);
- PRINT("file size: %d [samples]\n", bytes / kBytesPerSample);
- const int seconds = bytes / (sample_rate * kBytesPerSample);
+ const size_t bytes = test::GetFileSize(file_name);
+ PRINT("file size: %" PRIuS " [bytes]\n", bytes);
+ PRINT("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
+ const int seconds =
+ static_cast<int>(bytes / (sample_rate * kBytesPerSample));
PRINT("file size: %d [secs]\n", seconds);
- PRINT("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
+ PRINT("file size: %" PRIuS " [callbacks]\n",
+ seconds * kNumCallbacksPerSecond);
#endif
return file_name;
}
@@ -827,9 +831,19 @@ TEST_F(AudioDeviceTest, StartStopPlayout) {
StopPlayout();
}
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+ StartRecording();
+ StopRecording();
+ StartRecording();
+ StopRecording();
+}
+
// Verify that calling StopPlayout() will leave us in an uninitialized state
// which will require a new call to InitPlayout(). This test does not call
-// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
EXPECT_EQ(0, audio_device()->InitPlayout());
EXPECT_EQ(0, audio_device()->StartPlayout());
@@ -961,8 +975,8 @@ TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
1000 * kFullDuplexTimeInSec));
StopPlayout();
StopRecording();
- EXPECT_LE(fifo_audio_stream->average_size(), 10);
- EXPECT_LE(fifo_audio_stream->largest_size(), 20);
+ EXPECT_LE(fifo_audio_stream->average_size(), 10u);
+ EXPECT_LE(fifo_audio_stream->largest_size(), 20u);
}
// Measures loopback latency and reports the min, max and average values for
@@ -994,7 +1008,8 @@ TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
StopRecording();
// Verify that the correct number of transmitted impulses are detected.
EXPECT_EQ(latency_audio_stream->num_latency_values(),
- kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1);
+ static_cast<size_t>(
+ kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
latency_audio_stream->PrintResults();
}
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.cc b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.cc
index 81fabdf9fec..260e793d600 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.cc
@@ -68,13 +68,15 @@ AudioManager::AudioManager()
audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
initialized_(false),
hardware_aec_(false),
+ hardware_agc_(false),
+ hardware_ns_(false),
low_latency_playout_(false),
delay_estimate_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
- CHECK(j_environment_);
+ RTC_CHECK(j_environment_);
JNINativeMethod native_methods[] = {
{"nativeCacheAudioParameters",
- "(IIZZIIJ)V",
+ "(IIZZZZIIJ)V",
reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
j_native_registration_ = j_environment_->RegisterNatives(
"org/webrtc/voiceengine/WebRtcAudioManager",
@@ -88,15 +90,15 @@ AudioManager::AudioManager()
AudioManager::~AudioManager() {
ALOGD("~dtor%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
Close();
}
void AudioManager::SetActiveAudioLayer(
AudioDeviceModule::AudioLayer audio_layer) {
ALOGD("SetActiveAudioLayer(%d)%s", audio_layer, GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!initialized_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
// Store the currenttly utilized audio layer.
audio_layer_ = audio_layer;
// The delay estimate can take one of two fixed values depending on if the
@@ -112,9 +114,9 @@ void AudioManager::SetActiveAudioLayer(
bool AudioManager::Init() {
ALOGD("Init%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!initialized_);
- DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
if (!j_audio_manager_->Init()) {
ALOGE("init failed!");
return false;
@@ -125,7 +127,7 @@ bool AudioManager::Init() {
bool AudioManager::Close() {
ALOGD("Close%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_)
return true;
j_audio_manager_->Close();
@@ -135,17 +137,27 @@ bool AudioManager::Close() {
bool AudioManager::IsCommunicationModeEnabled() const {
ALOGD("IsCommunicationModeEnabled()");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return j_audio_manager_->IsCommunicationModeEnabled();
}
bool AudioManager::IsAcousticEchoCancelerSupported() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return hardware_aec_;
}
+bool AudioManager::IsAutomaticGainControlSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_agc_;
+}
+
+bool AudioManager::IsNoiseSuppressorSupported() const {
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_ns_;
+}
+
bool AudioManager::IsLowLatencyPlayoutSupported() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
ALOGD("IsLowLatencyPlayoutSupported()");
// Some devices are blacklisted for usage of OpenSL ES even if they report
// that low-latency playout is supported. See b/21485703 for details.
@@ -162,6 +174,8 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
jint sample_rate,
jint channels,
jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
@@ -169,41 +183,49 @@ void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
webrtc::AudioManager* this_object =
reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
this_object->OnCacheAudioParameters(
- env, sample_rate, channels, hardware_aec, low_latency_output,
- output_buffer_size, input_buffer_size);
+ env, sample_rate, channels, hardware_aec, hardware_agc, hardware_ns,
+ low_latency_output, output_buffer_size, input_buffer_size);
}
void AudioManager::OnCacheAudioParameters(JNIEnv* env,
jint sample_rate,
jint channels,
jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
ALOGD("hardware_aec: %d", hardware_aec);
+ ALOGD("hardware_agc: %d", hardware_agc);
+ ALOGD("hardware_ns: %d", hardware_ns);
ALOGD("low_latency_output: %d", low_latency_output);
ALOGD("sample_rate: %d", sample_rate);
ALOGD("channels: %d", channels);
ALOGD("output_buffer_size: %d", output_buffer_size);
ALOGD("input_buffer_size: %d", input_buffer_size);
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
hardware_aec_ = hardware_aec;
+ hardware_agc_ = hardware_agc;
+ hardware_ns_ = hardware_ns;
low_latency_playout_ = low_latency_output;
// TODO(henrika): add support for stereo output.
- playout_parameters_.reset(sample_rate, channels, output_buffer_size);
- record_parameters_.reset(sample_rate, channels, input_buffer_size);
+ playout_parameters_.reset(sample_rate, channels,
+ static_cast<size_t>(output_buffer_size));
+ record_parameters_.reset(sample_rate, channels,
+ static_cast<size_t>(input_buffer_size));
}
const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
- CHECK(playout_parameters_.is_valid());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_CHECK(playout_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return playout_parameters_;
}
const AudioParameters& AudioManager::GetRecordAudioParameters() {
- CHECK(record_parameters_.is_valid());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_CHECK(record_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return record_parameters_;
}
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.h b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.h
index 8d96d27e391..9cceaacfca7 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager.h
@@ -16,6 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/audio_device_config.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
@@ -23,60 +24,6 @@
namespace webrtc {
-class AudioParameters {
- public:
- enum { kBitsPerSample = 16 };
- AudioParameters()
- : sample_rate_(0),
- channels_(0),
- frames_per_buffer_(0),
- frames_per_10ms_buffer_(0),
- bits_per_sample_(kBitsPerSample) {}
- AudioParameters(int sample_rate, int channels, int frames_per_buffer)
- : sample_rate_(sample_rate),
- channels_(channels),
- frames_per_buffer_(frames_per_buffer),
- frames_per_10ms_buffer_(sample_rate / 100),
- bits_per_sample_(kBitsPerSample) {}
- void reset(int sample_rate, int channels, int frames_per_buffer) {
- sample_rate_ = sample_rate;
- channels_ = channels;
- frames_per_buffer_ = frames_per_buffer;
- frames_per_10ms_buffer_ = (sample_rate / 100);
- }
- int sample_rate() const { return sample_rate_; }
- int channels() const { return channels_; }
- int frames_per_buffer() const { return frames_per_buffer_; }
- int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
- int bits_per_sample() const { return bits_per_sample_; }
- bool is_valid() const {
- return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
- }
- int GetBytesPerFrame() const { return channels_ * bits_per_sample_ / 8; }
- int GetBytesPerBuffer() const {
- return frames_per_buffer_ * GetBytesPerFrame();
- }
- int GetBytesPer10msBuffer() const {
- return frames_per_10ms_buffer_ * GetBytesPerFrame();
- }
- float GetBufferSizeInMilliseconds() const {
- if (sample_rate_ == 0)
- return 0.0f;
- return frames_per_buffer_ / (sample_rate_ / 1000.0f);
- }
-
- private:
- int sample_rate_;
- int channels_;
- // Lowest possible size of native audio buffer. Measured in number of frames.
- // This size is injected into the OpenSL ES output (since it does not "talk
- // Java") implementation but is currently not utilized by the Java
- // implementation since it aquires the same value internally.
- int frames_per_buffer_;
- int frames_per_10ms_buffer_;
- int bits_per_sample_;
-};
-
// Implements support for functions in the WebRTC audio stack for Android that
// relies on the AudioManager in android.media. It also populates an
// AudioParameter structure with native audio parameters detected at
@@ -127,12 +74,14 @@ class AudioManager {
const AudioParameters& GetPlayoutAudioParameters();
const AudioParameters& GetRecordAudioParameters();
- // Returns true if the device supports a built-in Acoustic Echo Canceler.
- // Some devices can also be blacklisted for use in combination with an AEC
- // and these devices will return false.
+ // Returns true if the device supports built-in audio effects for AEC, AGC
+ // and NS. Some devices can also be blacklisted for use in combination with
+ // platform effects and these devices will return false.
// Can currently only be used in combination with a Java based audio backend
// for the recoring side (i.e. using the android.media.AudioRecord API).
bool IsAcousticEchoCancelerSupported() const;
+ bool IsAutomaticGainControlSupported() const;
+ bool IsNoiseSuppressorSupported() const;
// Returns true if the device supports the low-latency audio paths in
// combination with OpenSL ES.
@@ -153,6 +102,8 @@ class AudioManager {
jint sample_rate,
jint channels,
jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size,
@@ -161,6 +112,8 @@ class AudioManager {
jint sample_rate,
jint channels,
jboolean hardware_aec,
+ jboolean hardware_agc,
+ jboolean hardware_ns,
jboolean low_latency_output,
jint output_buffer_size,
jint input_buffer_size);
@@ -190,6 +143,10 @@ class AudioManager {
// True if device supports hardware (or built-in) AEC.
bool hardware_aec_;
+ // True if device supports hardware (or built-in) AGC.
+ bool hardware_agc_;
+ // True if device supports hardware (or built-in) NS.
+ bool hardware_ns_;
// True if device supports the low-latency OpenSL ES audio path.
bool low_latency_playout_;
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager_unittest.cc
index f790e6a6bc4..a5bc840dff4 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/audio_manager_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -9,6 +9,7 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/build_info.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
@@ -60,6 +61,16 @@ TEST_F(AudioManagerTest, IsAcousticEchoCancelerSupported) {
audio_manager()->IsAcousticEchoCancelerSupported() ? "Yes" : "No");
}
+TEST_F(AudioManagerTest, IsAutomaticGainControlSupported) {
+ PRINT("%sAutomatic Gain Control support: %s\n", kTag,
+ audio_manager()->IsAutomaticGainControlSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsNoiseSuppressorSupported) {
+ PRINT("%sNoise Suppressor support: %s\n", kTag,
+ audio_manager()->IsNoiseSuppressorSupported() ? "Yes" : "No");
+}
+
TEST_F(AudioManagerTest, IsLowLatencyPlayoutSupported) {
PRINT("%sLow latency output support: %s\n", kTag,
audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");
@@ -72,14 +83,14 @@ TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
PRINT("%schannels: %d\n", kTag, playout_parameters_.channels());
- PRINT("%sframes per buffer: %d <=> %.2f ms\n", kTag,
+ PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
playout_parameters_.frames_per_buffer(),
playout_parameters_.GetBufferSizeInMilliseconds());
PRINT("RECORD: \n");
PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord");
PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
PRINT("%schannels: %d\n", kTag, record_parameters_.channels());
- PRINT("%sframes per buffer: %d <=> %.2f ms\n", kTag,
+ PRINT("%sframes per buffer: %" PRIuS " <=> %.2f ms\n", kTag,
record_parameters_.frames_per_buffer(),
record_parameters_.GetBufferSizeInMilliseconds());
}
@@ -109,11 +120,11 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
EXPECT_FALSE(params.is_valid());
EXPECT_EQ(0, params.sample_rate());
EXPECT_EQ(0, params.channels());
- EXPECT_EQ(0, params.frames_per_buffer());
- EXPECT_EQ(0, params.frames_per_10ms_buffer());
- EXPECT_EQ(0, params.GetBytesPerFrame());
- EXPECT_EQ(0, params.GetBytesPerBuffer());
- EXPECT_EQ(0, params.GetBytesPer10msBuffer());
+ EXPECT_EQ(0U, params.frames_per_buffer());
+ EXPECT_EQ(0U, params.frames_per_10ms_buffer());
+ EXPECT_EQ(0U, params.GetBytesPerFrame());
+ EXPECT_EQ(0U, params.GetBytesPerBuffer());
+ EXPECT_EQ(0U, params.GetBytesPer10msBuffer());
EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
}
@@ -121,16 +132,17 @@ TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
const int kSampleRate = 48000;
const int kChannels = 1;
- const int kFramesPerBuffer = 480;
- const int kFramesPer10msBuffer = 480;
- const int kBytesPerFrame = 2;
+ const size_t kFramesPerBuffer = 480;
+ const size_t kFramesPer10msBuffer = 480;
+ const size_t kBytesPerFrame = 2;
const float kBufferSizeInMs = 10.0f;
AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
EXPECT_TRUE(params.is_valid());
EXPECT_EQ(kSampleRate, params.sample_rate());
EXPECT_EQ(kChannels, params.channels());
EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
- EXPECT_EQ(kSampleRate / 100, params.frames_per_10ms_buffer());
+ EXPECT_EQ(static_cast<size_t>(kSampleRate / 100),
+ params.frames_per_10ms_buffer());
EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/build_info.h b/chromium/third_party/webrtc/modules/audio_device/android/build_info.h
index aea71f7e877..d9b2871841b 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/build_info.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/build_info.h
@@ -23,7 +23,7 @@ namespace webrtc {
// The calling thread is attached to the JVM at construction if needed and a
// valid Java environment object is also created.
// All Get methods must be called on the creating thread. If not, the code will
-// hit DCHECKs when calling JNIEnvironment::JavaToStdString().
+// hit RTC_DCHECKs when calling JNIEnvironment::JavaToStdString().
class BuildInfo {
public:
BuildInfo();
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/ensure_initialized.cc b/chromium/third_party/webrtc/modules/audio_device/android/ensure_initialized.cc
index a194a5e9c9a..e8197b7ca01 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/ensure_initialized.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/android/ensure_initialized.cc
@@ -12,9 +12,10 @@
#include <pthread.h>
+// Note: this dependency is dangerous since it reaches into Chromium's base.
+// There's a risk of e.g. macro clashes. This file may only be used in tests.
#include "base/android/jni_android.h"
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_device/android/audio_device_template.h"
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
#include "webrtc/modules/utility/interface/jvm_android.h"
@@ -25,10 +26,10 @@ namespace audiodevicemodule {
static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
void EnsureInitializedOnce() {
- CHECK(::base::android::IsVMInitialized());
+ RTC_CHECK(::base::android::IsVMInitialized());
JNIEnv* jni = ::base::android::AttachCurrentThread();
JavaVM* jvm = NULL;
- CHECK_EQ(0, jni->GetJavaVM(&jvm));
+ RTC_CHECK_EQ(0, jni->GetJavaVM(&jvm));
jobject context = ::base::android::GetApplicationContext();
// Initialize the Java environment (currently only used by the audio manager).
@@ -36,7 +37,7 @@ void EnsureInitializedOnce() {
}
void EnsureInitialized() {
- CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
+ RTC_CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
}
} // namespace audiodevicemodule
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.cc b/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.cc
deleted file mode 100644
index 99f853a23e4..00000000000
--- a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
-
-#include <memory.h>
-#include <stdio.h>
-#include <algorithm>
-
-#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_device/audio_device_buffer.h"
-
-namespace webrtc {
-
-FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
- int desired_frame_size_bytes,
- int sample_rate)
- : device_buffer_(device_buffer),
- desired_frame_size_bytes_(desired_frame_size_bytes),
- sample_rate_(sample_rate),
- samples_per_10_ms_(sample_rate_ * 10 / 1000),
- bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
- cached_buffer_start_(0),
- cached_bytes_(0) {
- cache_buffer_.reset(new int8_t[bytes_per_10_ms_]);
-}
-
-FineAudioBuffer::~FineAudioBuffer() {
-}
-
-int FineAudioBuffer::RequiredBufferSizeBytes() {
- // It is possible that we store the desired frame size - 1 samples. Since new
- // audio frames are pulled in chunks of 10ms we will need a buffer that can
- // hold desired_frame_size - 1 + 10ms of data. We omit the - 1.
- return desired_frame_size_bytes_ + bytes_per_10_ms_;
-}
-
-void FineAudioBuffer::GetBufferData(int8_t* buffer) {
- if (desired_frame_size_bytes_ <= cached_bytes_) {
- memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_],
- desired_frame_size_bytes_);
- cached_buffer_start_ += desired_frame_size_bytes_;
- cached_bytes_ -= desired_frame_size_bytes_;
- CHECK_LT(cached_buffer_start_ + cached_bytes_, bytes_per_10_ms_);
- return;
- }
- memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_], cached_bytes_);
- // Push another n*10ms of audio to |buffer|. n > 1 if
- // |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we
- // write the audio after the cached bytes copied earlier.
- int8_t* unwritten_buffer = &buffer[cached_bytes_];
- int bytes_left = desired_frame_size_bytes_ - cached_bytes_;
- // Ceiling of integer division: 1 + ((x - 1) / y)
- int number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_);
- for (int i = 0; i < number_of_requests; ++i) {
- device_buffer_->RequestPlayoutData(samples_per_10_ms_);
- int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
- if (num_out != samples_per_10_ms_) {
- CHECK_EQ(num_out, 0);
- cached_bytes_ = 0;
- return;
- }
- unwritten_buffer += bytes_per_10_ms_;
- CHECK_GE(bytes_left, 0);
- bytes_left -= bytes_per_10_ms_;
- }
- CHECK_LE(bytes_left, 0);
- // Put the samples that were written to |buffer| but are not used in the
- // cache.
- int cache_location = desired_frame_size_bytes_;
- int8_t* cache_ptr = &buffer[cache_location];
- cached_bytes_ = number_of_requests * bytes_per_10_ms_ -
- (desired_frame_size_bytes_ - cached_bytes_);
- // If cached_bytes_ is larger than the cache buffer, uninitialized memory
- // will be read.
- CHECK_LE(cached_bytes_, bytes_per_10_ms_);
- CHECK_EQ(-bytes_left, cached_bytes_);
- cached_buffer_start_ = 0;
- memcpy(cache_buffer_.get(), cache_ptr, cached_bytes_);
-}
-
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.h b/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.h
deleted file mode 100644
index dce40beb784..00000000000
--- a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-class AudioDeviceBuffer;
-
-// FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
-// corresponding to 10ms of data. It then allows for this data to be pulled in
-// a finer or coarser granularity. I.e. interacting with this class instead of
-// directly with the AudioDeviceBuffer one can ask for any number of audio data
-// samples.
-class FineAudioBuffer {
- public:
- // |device_buffer| is a buffer that provides 10ms of audio data.
- // |desired_frame_size_bytes| is the number of bytes of audio data
- // (not samples) |GetBufferData| should return on success.
- // |sample_rate| is the sample rate of the audio data. This is needed because
- // |device_buffer| delivers 10ms of data. Given the sample rate the number
- // of samples can be calculated.
- FineAudioBuffer(AudioDeviceBuffer* device_buffer,
- int desired_frame_size_bytes,
- int sample_rate);
- ~FineAudioBuffer();
-
- // Returns the required size of |buffer| when calling GetBufferData. If the
- // buffer is smaller memory trampling will happen.
- // |desired_frame_size_bytes| and |samples_rate| are as described in the
- // constructor.
- int RequiredBufferSizeBytes();
-
- // |buffer| must be of equal or greater size than what is returned by
- // RequiredBufferSize. This is to avoid unnecessary memcpy.
- void GetBufferData(int8_t* buffer);
-
- private:
- // Device buffer that provides 10ms chunks of data.
- AudioDeviceBuffer* device_buffer_;
- // Number of bytes delivered per GetBufferData
- int desired_frame_size_bytes_;
- int sample_rate_;
- int samples_per_10_ms_;
- // Convenience parameter to avoid converting from samples
- int bytes_per_10_ms_;
-
- // Storage for samples that are not yet asked for.
- rtc::scoped_ptr<int8_t[]> cache_buffer_;
- // Location of first unread sample.
- int cached_buffer_start_;
- // Number of bytes stored in cache.
- int cached_bytes_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_FINE_AUDIO_BUFFER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/opensles_common.h b/chromium/third_party/webrtc/modules/audio_device/android/opensles_common.h
index 75e4ff4b719..a4487b095c8 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/opensles_common.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/opensles_common.h
@@ -28,7 +28,7 @@ class ScopedSLObject {
~ScopedSLObject() { Reset(); }
SLType* Receive() {
- DCHECK(!obj_);
+ RTC_DCHECK(!obj_);
return &obj_;
}
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.cc b/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.cc
index 0789ebf1baa..b9ccfd594d3 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.cc
@@ -14,8 +14,9 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
-#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+#include "webrtc/modules/audio_device/fine_audio_buffer.h"
#define TAG "OpenSLESPlayer"
#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
@@ -59,37 +60,37 @@ OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
OpenSLESPlayer::~OpenSLESPlayer() {
ALOGD("dtor%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
Terminate();
DestroyAudioPlayer();
DestroyMix();
DestroyEngine();
- DCHECK(!engine_object_.Get());
- DCHECK(!engine_);
- DCHECK(!output_mix_.Get());
- DCHECK(!player_);
- DCHECK(!simple_buffer_queue_);
- DCHECK(!volume_);
+ RTC_DCHECK(!engine_object_.Get());
+ RTC_DCHECK(!engine_);
+ RTC_DCHECK(!output_mix_.Get());
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
}
int OpenSLESPlayer::Init() {
ALOGD("Init%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return 0;
}
int OpenSLESPlayer::Terminate() {
ALOGD("Terminate%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
StopPlayout();
return 0;
}
int OpenSLESPlayer::InitPlayout() {
ALOGD("InitPlayout%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!initialized_);
- DCHECK(!playing_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!initialized_);
+ RTC_DCHECK(!playing_);
CreateEngine();
CreateMix();
initialized_ = true;
@@ -99,9 +100,9 @@ int OpenSLESPlayer::InitPlayout() {
int OpenSLESPlayer::StartPlayout() {
ALOGD("StartPlayout%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(initialized_);
- DCHECK(!playing_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!playing_);
// The number of lower latency audio players is limited, hence we create the
// audio player in Start() and destroy it in Stop().
CreateAudioPlayer();
@@ -117,13 +118,13 @@ int OpenSLESPlayer::StartPlayout() {
// state, adding buffers will implicitly start playback.
RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
- DCHECK(playing_);
+ RTC_DCHECK(playing_);
return 0;
}
int OpenSLESPlayer::StopPlayout() {
ALOGD("StopPlayout%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_ || !playing_) {
return 0;
}
@@ -135,8 +136,8 @@ int OpenSLESPlayer::StopPlayout() {
// Verify that the buffer queue is in fact cleared as it should.
SLAndroidSimpleBufferQueueState buffer_queue_state;
(*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
- DCHECK_EQ(0u, buffer_queue_state.count);
- DCHECK_EQ(0u, buffer_queue_state.index);
+ RTC_DCHECK_EQ(0u, buffer_queue_state.count);
+ RTC_DCHECK_EQ(0u, buffer_queue_state.index);
#endif
// The number of lower latency audio players is limited, hence we create the
// audio player in Start() and destroy it in Stop().
@@ -170,7 +171,7 @@ int OpenSLESPlayer::SpeakerVolume(uint32_t& volume) const {
void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
ALOGD("AttachAudioBuffer");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
audio_device_buffer_ = audioBuffer;
const int sample_rate_hz = audio_parameters_.sample_rate();
ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
@@ -178,15 +179,16 @@ void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
const int channels = audio_parameters_.channels();
ALOGD("SetPlayoutChannels(%d)", channels);
audio_device_buffer_->SetPlayoutChannels(channels);
- CHECK(audio_device_buffer_);
+ RTC_CHECK(audio_device_buffer_);
AllocateDataBuffers();
}
-SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(int channels,
- int sample_rate,
- int bits_per_sample) {
+SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(
+ int channels,
+ int sample_rate,
+ size_t bits_per_sample) {
ALOGD("CreatePCMConfiguration");
- CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+ RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
SLDataFormat_PCM format;
format.formatType = SL_DATAFORMAT_PCM;
format.numChannels = static_cast<SLuint32>(channels);
@@ -211,7 +213,7 @@ SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(int channels,
format.samplesPerSec = SL_SAMPLINGRATE_48;
break;
default:
- CHECK(false) << "Unsupported sample rate: " << sample_rate;
+ RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate;
}
format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
@@ -221,17 +223,18 @@ SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(int channels,
else if (format.numChannels == 2)
format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
else
- CHECK(false) << "Unsupported number of channels: " << format.numChannels;
+ RTC_CHECK(false) << "Unsupported number of channels: "
+ << format.numChannels;
return format;
}
void OpenSLESPlayer::AllocateDataBuffers() {
ALOGD("AllocateDataBuffers");
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!simple_buffer_queue_);
- CHECK(audio_device_buffer_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_CHECK(audio_device_buffer_);
bytes_per_buffer_ = audio_parameters_.GetBytesPerBuffer();
- ALOGD("native buffer size: %d", bytes_per_buffer_);
+ ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_);
// Create a modified audio buffer class which allows us to ask for any number
// of samples (and not only multiple of 10ms) to match the native OpenSL ES
// buffer size.
@@ -240,8 +243,9 @@ void OpenSLESPlayer::AllocateDataBuffers() {
audio_parameters_.sample_rate()));
// Each buffer must be of this size to avoid unnecessary memcpy while caching
// data between successive callbacks.
- const int required_buffer_size = fine_buffer_->RequiredBufferSizeBytes();
- ALOGD("required buffer size: %d", required_buffer_size);
+ const size_t required_buffer_size =
+ fine_buffer_->RequiredPlayoutBufferSizeBytes();
+ ALOGD("required buffer size: %" PRIuS, required_buffer_size);
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
audio_buffers_[i].reset(new SLint8[required_buffer_size]);
}
@@ -249,10 +253,10 @@ void OpenSLESPlayer::AllocateDataBuffers() {
bool OpenSLESPlayer::CreateEngine() {
ALOGD("CreateEngine");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (engine_object_.Get())
return true;
- DCHECK(!engine_);
+ RTC_DCHECK(!engine_);
const SLEngineOption option[] = {
{SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
RETURN_ON_ERROR(
@@ -268,7 +272,7 @@ bool OpenSLESPlayer::CreateEngine() {
void OpenSLESPlayer::DestroyEngine() {
ALOGD("DestroyEngine");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!engine_object_.Get())
return;
engine_ = nullptr;
@@ -277,8 +281,8 @@ void OpenSLESPlayer::DestroyEngine() {
bool OpenSLESPlayer::CreateMix() {
ALOGD("CreateMix");
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(engine_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(engine_);
if (output_mix_.Get())
return true;
@@ -293,7 +297,7 @@ bool OpenSLESPlayer::CreateMix() {
void OpenSLESPlayer::DestroyMix() {
ALOGD("DestroyMix");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!output_mix_.Get())
return;
output_mix_.Reset();
@@ -301,14 +305,14 @@ void OpenSLESPlayer::DestroyMix() {
bool OpenSLESPlayer::CreateAudioPlayer() {
ALOGD("CreateAudioPlayer");
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(engine_object_.Get());
- DCHECK(output_mix_.Get());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(engine_object_.Get());
+ RTC_DCHECK(output_mix_.Get());
if (player_object_.Get())
return true;
- DCHECK(!player_);
- DCHECK(!simple_buffer_queue_);
- DCHECK(!volume_);
+ RTC_DCHECK(!player_);
+ RTC_DCHECK(!simple_buffer_queue_);
+ RTC_DCHECK(!volume_);
// source: Android Simple Buffer Queue Data Locator is source.
SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
@@ -386,7 +390,7 @@ bool OpenSLESPlayer::CreateAudioPlayer() {
void OpenSLESPlayer::DestroyAudioPlayer() {
ALOGD("DestroyAudioPlayer");
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!player_object_.Get())
return;
player_object_.Reset();
@@ -404,7 +408,7 @@ void OpenSLESPlayer::SimpleBufferQueueCallback(
}
void OpenSLESPlayer::FillBufferQueue() {
- DCHECK(thread_checker_opensles_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread());
SLuint32 state = GetPlayState();
if (state != SL_PLAYSTATE_PLAYING) {
ALOGW("Buffer callback in non-playing state!");
@@ -418,7 +422,7 @@ void OpenSLESPlayer::EnqueuePlayoutData() {
// to adjust for differences in buffer size between WebRTC (10ms) and native
// OpenSL ES.
SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
- fine_buffer_->GetBufferData(audio_ptr);
+ fine_buffer_->GetPlayoutData(audio_ptr);
// Enqueue the decoded audio buffer for playback.
SLresult err =
(*simple_buffer_queue_)
@@ -430,7 +434,7 @@ void OpenSLESPlayer::EnqueuePlayoutData() {
}
SLuint32 OpenSLESPlayer::GetPlayState() const {
- DCHECK(player_);
+ RTC_DCHECK(player_);
SLuint32 state;
SLresult err = (*player_)->GetPlayState(player_, &state);
if (SL_RESULT_SUCCESS != err) {
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.h b/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.h
index 2217fa09932..d96388b6b53 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.h
+++ b/chromium/third_party/webrtc/modules/audio_device/android/opensles_player.h
@@ -33,7 +33,7 @@ class FineAudioBuffer;
//
// An instance must be created and destroyed on one and the same thread.
// All public methods must also be called on the same thread. A thread checker
-// will DCHECK if any method is called on an invalid thread. Decoded audio
+// will RTC_DCHECK if any method is called on an invalid thread. Decoded audio
// buffers are requested on a dedicated internal thread managed by the OpenSL
// ES layer.
//
@@ -96,7 +96,7 @@ class OpenSLESPlayer {
// Configures the SL_DATAFORMAT_PCM structure.
SLDataFormat_PCM CreatePCMConfiguration(int channels,
int sample_rate,
- int bits_per_sample);
+ size_t bits_per_sample);
// Allocate memory for audio buffers which will be used to render audio
// via the SLAndroidSimpleBufferQueueItf interface.
@@ -145,7 +145,7 @@ class OpenSLESPlayer {
// Number of bytes per audio buffer in each |audio_buffers_[i]|.
// Typical sizes are 480 or 512 bytes corresponding to native output buffer
// sizes of 240 or 256 audio frames respectively.
- int bytes_per_buffer_;
+ size_t bytes_per_buffer_;
// Queue of audio buffers to be used by the player object for rendering
// audio. They will be used in a Round-robin way and the size of each buffer
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device.gypi b/chromium/third_party/webrtc/modules/audio_device/audio_device.gypi
index 9c9380eb696..0678d33802d 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device.gypi
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device.gypi
@@ -43,6 +43,8 @@
'dummy/audio_device_dummy.h',
'dummy/file_audio_device.cc',
'dummy/file_audio_device.h',
+ 'fine_audio_buffer.cc',
+ 'fine_audio_buffer.h',
],
'conditions': [
['OS=="linux"', {
@@ -93,8 +95,6 @@
'android/audio_track_jni.h',
'android/build_info.cc',
'android/build_info.h',
- 'android/fine_audio_buffer.cc',
- 'android/fine_audio_buffer.h',
'android/opensles_common.cc',
'android/opensles_common.h',
'android/opensles_player.cc',
@@ -103,6 +103,7 @@
'audio_device_impl.h',
'ios/audio_device_ios.h',
'ios/audio_device_ios.mm',
+ 'ios/audio_device_not_implemented_ios.mm',
'linux/alsasymboltable_linux.cc',
'linux/alsasymboltable_linux.h',
'linux/audio_device_alsa_linux.cc',
@@ -177,6 +178,7 @@
'-framework AudioToolbox',
'-framework AVFoundation',
'-framework Foundation',
+ '-framework UIKit',
],
},
},
@@ -234,25 +236,6 @@
],
},
], # targets
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'audio_device_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'audio_device_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- 'audio_device_tests.isolate',
- ],
- },
- ],
- }],
- ],
}], # include_tests
],
}
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.cc b/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.cc
index 12b28b3ab3f..cc6d6bb1f78 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.cc
@@ -13,6 +13,7 @@
#include <assert.h>
#include <string.h>
+#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_device/audio_device_config.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -130,8 +131,6 @@ int32_t AudioDeviceBuffer::InitRecording()
int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz)
{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingSampleRate(fsHz=%u)", fsHz);
-
CriticalSectionScoped lock(&_critSect);
_recSampleRate = fsHz;
return 0;
@@ -143,8 +142,6 @@ int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz)
int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz)
{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutSampleRate(fsHz=%u)", fsHz);
-
CriticalSectionScoped lock(&_critSect);
_playSampleRate = fsHz;
return 0;
@@ -174,8 +171,6 @@ int32_t AudioDeviceBuffer::PlayoutSampleRate() const
int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetRecordingChannels(channels=%u)", channels);
-
CriticalSectionScoped lock(&_critSect);
_recChannels = channels;
_recBytesPerSample = 2*channels; // 16 bits per sample in mono, 32 bits in stereo
@@ -188,8 +183,6 @@ int32_t AudioDeviceBuffer::SetRecordingChannels(uint8_t channels)
int32_t AudioDeviceBuffer::SetPlayoutChannels(uint8_t channels)
{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "AudioDeviceBuffer::SetPlayoutChannels(channels=%u)", channels);
-
CriticalSectionScoped lock(&_critSect);
_playChannels = channels;
// 16 bits per sample in mono, 32 bits in stereo
@@ -388,7 +381,7 @@ int32_t AudioDeviceBuffer::StopOutputFileRecording()
// ----------------------------------------------------------------------------
int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
- uint32_t nSamples)
+ size_t nSamples)
{
CriticalSectionScoped lock(&_critSect);
@@ -422,7 +415,7 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
}
// exctract left or right channel from input buffer to the local buffer
- for (uint32_t i = 0; i < _recSamples; i++)
+ for (size_t i = 0; i < _recSamples; i++)
{
*ptr16Out = *ptr16In;
ptr16Out++;
@@ -490,10 +483,10 @@ int32_t AudioDeviceBuffer::DeliverRecordedData()
// RequestPlayoutData
// ----------------------------------------------------------------------------
-int32_t AudioDeviceBuffer::RequestPlayoutData(uint32_t nSamples)
+int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples)
{
uint32_t playSampleRate = 0;
- uint8_t playBytesPerSample = 0;
+ size_t playBytesPerSample = 0;
uint8_t playChannels = 0;
{
CriticalSectionScoped lock(&_critSect);
@@ -528,7 +521,7 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(uint32_t nSamples)
}
}
- uint32_t nSamplesOut(0);
+ size_t nSamplesOut(0);
CriticalSectionScoped lock(&_critSectCb);
@@ -571,7 +564,7 @@ int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer)
if (_playSize > kMaxBufferSizeBytes)
{
WEBRTC_TRACE(kTraceError, kTraceUtility, _id,
- "_playSize %i exceeds kMaxBufferSizeBytes in "
+ "_playSize %" PRIuS " exceeds kMaxBufferSizeBytes in "
"AudioDeviceBuffer::GetPlayoutData", _playSize);
assert(false);
return -1;
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.h b/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.h
index a89927f711b..63a05ef82ac 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_buffer.h
@@ -19,7 +19,7 @@ namespace webrtc {
class CriticalSectionWrapper;
const uint32_t kPulsePeriodMs = 1000;
-const uint32_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
+const size_t kMaxBufferSizeBytes = 3840; // 10ms in stereo @ 96kHz
class AudioDeviceObserver;
@@ -50,7 +50,7 @@ public:
AudioDeviceModule::ChannelType& channel) const;
virtual int32_t SetRecordedBuffer(const void* audioBuffer,
- uint32_t nSamples);
+ size_t nSamples);
int32_t SetCurrentMicLevel(uint32_t level);
virtual void SetVQEData(int playDelayMS,
int recDelayMS,
@@ -58,7 +58,7 @@ public:
virtual int32_t DeliverRecordedData();
uint32_t NewMicLevel() const;
- virtual int32_t RequestPlayoutData(uint32_t nSamples);
+ virtual int32_t RequestPlayoutData(size_t nSamples);
virtual int32_t GetPlayoutData(void* audioBuffer);
int32_t StartInputFileRecording(
@@ -87,22 +87,22 @@ private:
AudioDeviceModule::ChannelType _recChannel;
// 2 or 4 depending on mono or stereo
- uint8_t _recBytesPerSample;
- uint8_t _playBytesPerSample;
+ size_t _recBytesPerSample;
+ size_t _playBytesPerSample;
// 10ms in stereo @ 96kHz
int8_t _recBuffer[kMaxBufferSizeBytes];
// one sample <=> 2 or 4 bytes
- uint32_t _recSamples;
- uint32_t _recSize; // in bytes
+ size_t _recSamples;
+ size_t _recSize; // in bytes
// 10ms in stereo @ 96kHz
int8_t _playBuffer[kMaxBufferSizeBytes];
// one sample <=> 2 or 4 bytes
- uint32_t _playSamples;
- uint32_t _playSize; // in bytes
+ size_t _playSamples;
+ size_t _playSize; // in bytes
FileWrapper& _recFile;
FileWrapper& _playFile;
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.cc b/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.cc
index 958abbf4d2c..501faba7cf7 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.cc
@@ -9,73 +9,88 @@
*/
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/base/logging.h"
namespace webrtc {
int32_t AudioDeviceGeneric::SetRecordingSampleRate(
- const uint32_t samplesPerSec)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Set recording sample rate not supported on this platform");
- return -1;
+ const uint32_t samplesPerSec) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
-int32_t AudioDeviceGeneric::SetPlayoutSampleRate(
- const uint32_t samplesPerSec)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Set playout sample rate not supported on this platform");
- return -1;
+int32_t AudioDeviceGeneric::SetPlayoutSampleRate(const uint32_t samplesPerSec) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
-int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Set loudspeaker status not supported on this platform");
- return -1;
+int32_t AudioDeviceGeneric::SetLoudspeakerStatus(bool enable) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
-int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Get loudspeaker status not supported on this platform");
- return -1;
+int32_t AudioDeviceGeneric::GetLoudspeakerStatus(bool& enable) const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
-int32_t AudioDeviceGeneric::ResetAudioDevice()
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Reset audio device not supported on this platform");
- return -1;
+int32_t AudioDeviceGeneric::ResetAudioDevice() {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
int32_t AudioDeviceGeneric::SoundDeviceControl(unsigned int par1,
- unsigned int par2, unsigned int par3, unsigned int par4)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Sound device control not supported on this platform");
- return -1;
+ unsigned int par2,
+ unsigned int par3,
+ unsigned int par4) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
bool AudioDeviceGeneric::BuiltInAECIsAvailable() const {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Built-in AEC not supported on this platform");
+ LOG_F(LS_ERROR) << "Not supported on this platform";
return false;
}
-int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable)
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Built-in AEC not supported on this platform");
- return -1;
+int32_t AudioDeviceGeneric::EnableBuiltInAEC(bool enable) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
-bool AudioDeviceGeneric::BuiltInAECIsEnabled() const
-{
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
- "Windows AEC not supported on this platform");
- return false;
+bool AudioDeviceGeneric::BuiltInAECIsEnabled() const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+bool AudioDeviceGeneric::BuiltInAGCIsAvailable() const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+int32_t AudioDeviceGeneric::EnableBuiltInAGC(bool enable) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+bool AudioDeviceGeneric::BuiltInNSIsAvailable() const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return false;
+}
+
+int32_t AudioDeviceGeneric::EnableBuiltInNS(bool enable) {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+
+int AudioDeviceGeneric::GetPlayoutAudioParameters(
+ AudioParameters* params) const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
+}
+int AudioDeviceGeneric::GetRecordAudioParameters(
+ AudioParameters* params) const {
+ LOG_F(LS_ERROR) << "Not supported on this platform";
+ return -1;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.h b/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.h
index 800cc395a8d..c76ea52428a 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.h
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_generic.h
@@ -16,168 +16,164 @@
namespace webrtc {
-class AudioDeviceGeneric
-{
+class AudioDeviceGeneric {
public:
-
- // Retrieve the currently utilized audio layer
- virtual int32_t ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const = 0;
-
- // Main initializaton and termination
- virtual int32_t Init() = 0;
- virtual int32_t Terminate() = 0;
- virtual bool Initialized() const = 0;
-
- // Device enumeration
- virtual int16_t PlayoutDevices() = 0;
- virtual int16_t RecordingDevices() = 0;
- virtual int32_t PlayoutDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) = 0;
- virtual int32_t RecordingDeviceName(
- uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) = 0;
-
- // Device selection
- virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
- virtual int32_t SetPlayoutDevice(
- AudioDeviceModule::WindowsDeviceType device) = 0;
- virtual int32_t SetRecordingDevice(uint16_t index) = 0;
- virtual int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device) = 0;
-
- // Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available) = 0;
- virtual int32_t InitPlayout() = 0;
- virtual bool PlayoutIsInitialized() const = 0;
- virtual int32_t RecordingIsAvailable(bool& available) = 0;
- virtual int32_t InitRecording() = 0;
- virtual bool RecordingIsInitialized() const = 0;
-
- // Audio transport control
- virtual int32_t StartPlayout() = 0;
- virtual int32_t StopPlayout() = 0;
- virtual bool Playing() const = 0;
- virtual int32_t StartRecording() = 0;
- virtual int32_t StopRecording() = 0;
- virtual bool Recording() const = 0;
-
- // Microphone Automatic Gain Control (AGC)
- virtual int32_t SetAGC(bool enable) = 0;
- virtual bool AGC() const = 0;
-
- // Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) = 0;
- virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
- uint16_t& volumeRight) const = 0;
-
- // Audio mixer initialization
- virtual int32_t InitSpeaker() = 0;
- virtual bool SpeakerIsInitialized() const = 0;
- virtual int32_t InitMicrophone() = 0;
- virtual bool MicrophoneIsInitialized() const = 0;
-
- // Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
- virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
- virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
- virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
- virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
- virtual int32_t SpeakerVolumeStepSize(
- uint16_t& stepSize) const = 0;
-
- // Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
- virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
- virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
- virtual int32_t MaxMicrophoneVolume(
- uint32_t& maxVolume) const = 0;
- virtual int32_t MinMicrophoneVolume(
- uint32_t& minVolume) const = 0;
- virtual int32_t MicrophoneVolumeStepSize(
- uint16_t& stepSize) const = 0;
-
- // Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
- virtual int32_t SetSpeakerMute(bool enable) = 0;
- virtual int32_t SpeakerMute(bool& enabled) const = 0;
-
- // Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
- virtual int32_t SetMicrophoneMute(bool enable) = 0;
- virtual int32_t MicrophoneMute(bool& enabled) const = 0;
-
- // Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
- virtual int32_t SetMicrophoneBoost(bool enable) = 0;
- virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
-
- // Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
- virtual int32_t SetStereoPlayout(bool enable) = 0;
- virtual int32_t StereoPlayout(bool& enabled) const = 0;
- virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
- virtual int32_t SetStereoRecording(bool enable) = 0;
- virtual int32_t StereoRecording(bool& enabled) const = 0;
-
- // Delay information and control
- virtual int32_t SetPlayoutBuffer(
- const AudioDeviceModule::BufferType type,
- uint16_t sizeMS = 0) = 0;
- virtual int32_t PlayoutBuffer(
- AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const = 0;
- virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
- virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
-
- // CPU load
- virtual int32_t CPULoad(uint16_t& load) const = 0;
-
- // Native sample rate controls (samples/sec)
- virtual int32_t SetRecordingSampleRate(
- const uint32_t samplesPerSec);
- virtual int32_t SetPlayoutSampleRate(
- const uint32_t samplesPerSec);
-
- // Speaker audio routing (for mobile devices)
- virtual int32_t SetLoudspeakerStatus(bool enable);
- virtual int32_t GetLoudspeakerStatus(bool& enable) const;
-
- // Reset Audio Device (for mobile devices)
- virtual int32_t ResetAudioDevice();
-
- // Sound Audio Device control (for WinCE only)
- virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
- unsigned int par2 = 0,
- unsigned int par3 = 0,
- unsigned int par4 = 0);
-
- // Android only
- virtual bool BuiltInAECIsAvailable() const;
-
- // Windows Core Audio and Android only.
- virtual int32_t EnableBuiltInAEC(bool enable);
-
- // Windows Core Audio only.
- virtual bool BuiltInAECIsEnabled() const;
-
-public:
- virtual bool PlayoutWarning() const = 0;
- virtual bool PlayoutError() const = 0;
- virtual bool RecordingWarning() const = 0;
- virtual bool RecordingError() const = 0;
- virtual void ClearPlayoutWarning() = 0;
- virtual void ClearPlayoutError() = 0;
- virtual void ClearRecordingWarning() = 0;
- virtual void ClearRecordingError() = 0;
-
-public:
- virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
-
- virtual ~AudioDeviceGeneric() {}
+ // Retrieve the currently utilized audio layer
+ virtual int32_t ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const = 0;
+
+ // Main initializaton and termination
+ virtual int32_t Init() = 0;
+ virtual int32_t Terminate() = 0;
+ virtual bool Initialized() const = 0;
+
+ // Device enumeration
+ virtual int16_t PlayoutDevices() = 0;
+ virtual int16_t RecordingDevices() = 0;
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) = 0;
+
+ // Device selection
+ virtual int32_t SetPlayoutDevice(uint16_t index) = 0;
+ virtual int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) = 0;
+ virtual int32_t SetRecordingDevice(uint16_t index) = 0;
+ virtual int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) = 0;
+
+ // Audio transport initialization
+ virtual int32_t PlayoutIsAvailable(bool& available) = 0;
+ virtual int32_t InitPlayout() = 0;
+ virtual bool PlayoutIsInitialized() const = 0;
+ virtual int32_t RecordingIsAvailable(bool& available) = 0;
+ virtual int32_t InitRecording() = 0;
+ virtual bool RecordingIsInitialized() const = 0;
+
+ // Audio transport control
+ virtual int32_t StartPlayout() = 0;
+ virtual int32_t StopPlayout() = 0;
+ virtual bool Playing() const = 0;
+ virtual int32_t StartRecording() = 0;
+ virtual int32_t StopRecording() = 0;
+ virtual bool Recording() const = 0;
+
+ // Microphone Automatic Gain Control (AGC)
+ virtual int32_t SetAGC(bool enable) = 0;
+ virtual bool AGC() const = 0;
+
+ // Volume control based on the Windows Wave API (Windows only)
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight) = 0;
+ virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
+ uint16_t& volumeRight) const = 0;
+
+ // Audio mixer initialization
+ virtual int32_t InitSpeaker() = 0;
+ virtual bool SpeakerIsInitialized() const = 0;
+ virtual int32_t InitMicrophone() = 0;
+ virtual bool MicrophoneIsInitialized() const = 0;
+
+ // Speaker volume controls
+ virtual int32_t SpeakerVolumeIsAvailable(bool& available) = 0;
+ virtual int32_t SetSpeakerVolume(uint32_t volume) = 0;
+ virtual int32_t SpeakerVolume(uint32_t& volume) const = 0;
+ virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const = 0;
+ virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const = 0;
+ virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const = 0;
+
+ // Microphone volume controls
+ virtual int32_t MicrophoneVolumeIsAvailable(bool& available) = 0;
+ virtual int32_t SetMicrophoneVolume(uint32_t volume) = 0;
+ virtual int32_t MicrophoneVolume(uint32_t& volume) const = 0;
+ virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const = 0;
+ virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const = 0;
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const = 0;
+
+ // Speaker mute control
+ virtual int32_t SpeakerMuteIsAvailable(bool& available) = 0;
+ virtual int32_t SetSpeakerMute(bool enable) = 0;
+ virtual int32_t SpeakerMute(bool& enabled) const = 0;
+
+ // Microphone mute control
+ virtual int32_t MicrophoneMuteIsAvailable(bool& available) = 0;
+ virtual int32_t SetMicrophoneMute(bool enable) = 0;
+ virtual int32_t MicrophoneMute(bool& enabled) const = 0;
+
+ // Microphone boost control
+ virtual int32_t MicrophoneBoostIsAvailable(bool& available) = 0;
+ virtual int32_t SetMicrophoneBoost(bool enable) = 0;
+ virtual int32_t MicrophoneBoost(bool& enabled) const = 0;
+
+ // Stereo support
+ virtual int32_t StereoPlayoutIsAvailable(bool& available) = 0;
+ virtual int32_t SetStereoPlayout(bool enable) = 0;
+ virtual int32_t StereoPlayout(bool& enabled) const = 0;
+ virtual int32_t StereoRecordingIsAvailable(bool& available) = 0;
+ virtual int32_t SetStereoRecording(bool enable) = 0;
+ virtual int32_t StereoRecording(bool& enabled) const = 0;
+
+ // Delay information and control
+ virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS = 0) = 0;
+ virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const = 0;
+ virtual int32_t PlayoutDelay(uint16_t& delayMS) const = 0;
+ virtual int32_t RecordingDelay(uint16_t& delayMS) const = 0;
+
+ // CPU load
+ virtual int32_t CPULoad(uint16_t& load) const = 0;
+
+ // Native sample rate controls (samples/sec)
+ virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec);
+ virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec);
+
+ // Speaker audio routing (for mobile devices)
+ virtual int32_t SetLoudspeakerStatus(bool enable);
+ virtual int32_t GetLoudspeakerStatus(bool& enable) const;
+
+ // Reset Audio Device (for mobile devices)
+ virtual int32_t ResetAudioDevice();
+
+ // Sound Audio Device control (for WinCE only)
+ virtual int32_t SoundDeviceControl(unsigned int par1 = 0,
+ unsigned int par2 = 0,
+ unsigned int par3 = 0,
+ unsigned int par4 = 0);
+
+ // Android only
+ virtual bool BuiltInAECIsAvailable() const;
+ virtual bool BuiltInAGCIsAvailable() const;
+ virtual bool BuiltInNSIsAvailable() const;
+
+ // Windows Core Audio and Android only.
+ virtual int32_t EnableBuiltInAEC(bool enable);
+ virtual int32_t EnableBuiltInAGC(bool enable);
+ virtual int32_t EnableBuiltInNS(bool enable);
+
+ // Windows Core Audio only.
+ virtual bool BuiltInAECIsEnabled() const;
+
+ // iOS only.
+ // TODO(henrika): add Android support.
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const;
+ virtual int GetRecordAudioParameters(AudioParameters* params) const;
+
+ virtual bool PlayoutWarning() const = 0;
+ virtual bool PlayoutError() const = 0;
+ virtual bool RecordingWarning() const = 0;
+ virtual bool RecordingError() const = 0;
+ virtual void ClearPlayoutWarning() = 0;
+ virtual void ClearPlayoutError() = 0;
+ virtual void ClearRecordingWarning() = 0;
+ virtual void ClearRecordingError() = 0;
+
+ virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) = 0;
+
+ virtual ~AudioDeviceGeneric() {}
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.cc b/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.cc
index c29ac626144..02fa9d9e345 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.cc
@@ -325,7 +325,7 @@ int32_t AudioDeviceModuleImpl::CreatePlatformSpecificObjects()
if (audioLayer == kPlatformDefaultAudio)
{
// Create iOS Audio Device implementation.
- ptrAudioDevice = new AudioDeviceIOS(Id());
+ ptrAudioDevice = new AudioDeviceIOS();
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "iPhone Audio APIs will be utilized");
}
// END #if defined(WEBRTC_IOS)
@@ -1869,34 +1869,57 @@ int32_t AudioDeviceModuleImpl::SetLoudspeakerStatus(bool enable)
// GetLoudspeakerStatus
// ----------------------------------------------------------------------------
-int32_t AudioDeviceModuleImpl::GetLoudspeakerStatus(bool* enabled) const
-{
- CHECK_INITIALIZED();
+int32_t AudioDeviceModuleImpl::GetLoudspeakerStatus(bool* enabled) const {
+ CHECK_INITIALIZED();
+ if (_ptrAudioDevice->GetLoudspeakerStatus(*enabled) != 0) {
+ return -1;
+ }
+ return 0;
+}
- if (_ptrAudioDevice->GetLoudspeakerStatus(*enabled) != 0)
- {
- return -1;
- }
+bool AudioDeviceModuleImpl::BuiltInAECIsEnabled() const {
+ CHECK_INITIALIZED_BOOL();
+ return _ptrAudioDevice->BuiltInAECIsEnabled();
+}
- return 0;
+bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
+ CHECK_INITIALIZED_BOOL();
+ return _ptrAudioDevice->BuiltInAECIsAvailable();
}
-int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable)
-{
+int32_t AudioDeviceModuleImpl::EnableBuiltInAEC(bool enable) {
CHECK_INITIALIZED();
return _ptrAudioDevice->EnableBuiltInAEC(enable);
}
-bool AudioDeviceModuleImpl::BuiltInAECIsEnabled() const
-{
- CHECK_INITIALIZED_BOOL();
+bool AudioDeviceModuleImpl::BuiltInAGCIsAvailable() const {
+ CHECK_INITIALIZED_BOOL();
+ return _ptrAudioDevice->BuiltInAGCIsAvailable();
+}
- return _ptrAudioDevice->BuiltInAECIsEnabled();
+int32_t AudioDeviceModuleImpl::EnableBuiltInAGC(bool enable) {
+ CHECK_INITIALIZED();
+ return _ptrAudioDevice->EnableBuiltInAGC(enable);
}
-bool AudioDeviceModuleImpl::BuiltInAECIsAvailable() const {
+bool AudioDeviceModuleImpl::BuiltInNSIsAvailable() const {
CHECK_INITIALIZED_BOOL();
- return _ptrAudioDevice->BuiltInAECIsAvailable();
+ return _ptrAudioDevice->BuiltInNSIsAvailable();
+}
+
+int32_t AudioDeviceModuleImpl::EnableBuiltInNS(bool enable) {
+ CHECK_INITIALIZED();
+ return _ptrAudioDevice->EnableBuiltInNS(enable);
+}
+
+int AudioDeviceModuleImpl::GetPlayoutAudioParameters(
+ AudioParameters* params) const {
+ return _ptrAudioDevice->GetPlayoutAudioParameters(params);
+}
+
+int AudioDeviceModuleImpl::GetRecordAudioParameters(
+ AudioParameters* params) const {
+ return _ptrAudioDevice->GetRecordAudioParameters(params);
}
// ============================================================================
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.h b/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.h
index efc305ba236..51096939640 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_device/audio_device_impl.h
@@ -18,217 +18,212 @@
#include "webrtc/modules/audio_device/audio_device_buffer.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
-namespace webrtc
-{
+namespace webrtc {
class AudioDeviceGeneric;
class AudioManager;
class CriticalSectionWrapper;
-class AudioDeviceModuleImpl : public AudioDeviceModule
-{
-public:
- enum PlatformType
- {
- kPlatformNotSupported = 0,
- kPlatformWin32 = 1,
- kPlatformWinCe = 2,
- kPlatformLinux = 3,
- kPlatformMac = 4,
- kPlatformAndroid = 5,
- kPlatformIOS = 6
- };
-
- int32_t CheckPlatform();
- int32_t CreatePlatformSpecificObjects();
- int32_t AttachAudioBuffer();
-
- AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
- virtual ~AudioDeviceModuleImpl();
-
-public: // RefCountedModule
- int64_t TimeUntilNextProcess() override;
- int32_t Process() override;
-
-public:
- // Factory methods (resource allocation/deallocation)
- static AudioDeviceModule* Create(
- const int32_t id,
- const AudioLayer audioLayer = kPlatformDefaultAudio);
-
- // Retrieve the currently utilized audio layer
- int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
-
- // Error handling
- ErrorCode LastError() const override;
- int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
-
- // Full-duplex transportation of PCM audio
- int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
-
- // Main initializaton and termination
- int32_t Init() override;
- int32_t Terminate() override;
- bool Initialized() const override;
-
- // Device enumeration
- int16_t PlayoutDevices() override;
- int16_t RecordingDevices() override;
- int32_t PlayoutDeviceName(uint16_t index,
+class AudioDeviceModuleImpl : public AudioDeviceModule {
+ public:
+ enum PlatformType {
+ kPlatformNotSupported = 0,
+ kPlatformWin32 = 1,
+ kPlatformWinCe = 2,
+ kPlatformLinux = 3,
+ kPlatformMac = 4,
+ kPlatformAndroid = 5,
+ kPlatformIOS = 6
+ };
+
+ int32_t CheckPlatform();
+ int32_t CreatePlatformSpecificObjects();
+ int32_t AttachAudioBuffer();
+
+ AudioDeviceModuleImpl(const int32_t id, const AudioLayer audioLayer);
+ virtual ~AudioDeviceModuleImpl();
+
+ int64_t TimeUntilNextProcess() override;
+ int32_t Process() override;
+
+ // Factory methods (resource allocation/deallocation)
+ static AudioDeviceModule* Create(
+ const int32_t id,
+ const AudioLayer audioLayer = kPlatformDefaultAudio);
+
+ // Retrieve the currently utilized audio layer
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+
+ // Error handling
+ ErrorCode LastError() const override;
+ int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) override;
+
+ // Full-duplex transportation of PCM audio
+ int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+
+ // Main initializaton and termination
+ int32_t Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override;
+
+ // Device enumeration
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize]) override;
- int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) override;
-
- // Device selection
- int32_t SetPlayoutDevice(uint16_t index) override;
- int32_t SetPlayoutDevice(WindowsDeviceType device) override;
- int32_t SetRecordingDevice(uint16_t index) override;
- int32_t SetRecordingDevice(WindowsDeviceType device) override;
-
- // Audio transport initialization
- int32_t PlayoutIsAvailable(bool* available) override;
- int32_t InitPlayout() override;
- bool PlayoutIsInitialized() const override;
- int32_t RecordingIsAvailable(bool* available) override;
- int32_t InitRecording() override;
- bool RecordingIsInitialized() const override;
-
- // Audio transport control
- int32_t StartPlayout() override;
- int32_t StopPlayout() override;
- bool Playing() const override;
- int32_t StartRecording() override;
- int32_t StopRecording() override;
- bool Recording() const override;
-
- // Microphone Automatic Gain Control (AGC)
- int32_t SetAGC(bool enable) override;
- bool AGC() const override;
-
- // Volume control based on the Windows Wave API (Windows only)
- int32_t SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) override;
- int32_t WaveOutVolume(uint16_t* volumeLeft,
- uint16_t* volumeRight) const override;
-
- // Audio mixer initialization
- int32_t InitSpeaker() override;
- bool SpeakerIsInitialized() const override;
- int32_t InitMicrophone() override;
- bool MicrophoneIsInitialized() const override;
-
- // Speaker volume controls
- int32_t SpeakerVolumeIsAvailable(bool* available) override;
- int32_t SetSpeakerVolume(uint32_t volume) override;
- int32_t SpeakerVolume(uint32_t* volume) const override;
- int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
- int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
- int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
-
- // Microphone volume controls
- int32_t MicrophoneVolumeIsAvailable(bool* available) override;
- int32_t SetMicrophoneVolume(uint32_t volume) override;
- int32_t MicrophoneVolume(uint32_t* volume) const override;
- int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
- int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
- int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
-
- // Speaker mute control
- int32_t SpeakerMuteIsAvailable(bool* available) override;
- int32_t SetSpeakerMute(bool enable) override;
- int32_t SpeakerMute(bool* enabled) const override;
-
- // Microphone mute control
- int32_t MicrophoneMuteIsAvailable(bool* available) override;
- int32_t SetMicrophoneMute(bool enable) override;
- int32_t MicrophoneMute(bool* enabled) const override;
-
- // Microphone boost control
- int32_t MicrophoneBoostIsAvailable(bool* available) override;
- int32_t SetMicrophoneBoost(bool enable) override;
- int32_t MicrophoneBoost(bool* enabled) const override;
-
- // Stereo support
- int32_t StereoPlayoutIsAvailable(bool* available) const override;
- int32_t SetStereoPlayout(bool enable) override;
- int32_t StereoPlayout(bool* enabled) const override;
- int32_t StereoRecordingIsAvailable(bool* available) const override;
- int32_t SetStereoRecording(bool enable) override;
- int32_t StereoRecording(bool* enabled) const override;
- int32_t SetRecordingChannel(const ChannelType channel) override;
- int32_t RecordingChannel(ChannelType* channel) const override;
-
- // Delay information and control
- int32_t SetPlayoutBuffer(const BufferType type,
- uint16_t sizeMS = 0) override;
- int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
- int32_t PlayoutDelay(uint16_t* delayMS) const override;
- int32_t RecordingDelay(uint16_t* delayMS) const override;
-
- // CPU load
- int32_t CPULoad(uint16_t* load) const override;
-
- // Recording of raw PCM data
- int32_t StartRawOutputFileRecording(
- const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
- int32_t StopRawOutputFileRecording() override;
- int32_t StartRawInputFileRecording(
- const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
- int32_t StopRawInputFileRecording() override;
-
- // Native sample rate controls (samples/sec)
- int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
- int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
- int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
- int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
-
- // Mobile device specific functions
- int32_t ResetAudioDevice() override;
- int32_t SetLoudspeakerStatus(bool enable) override;
- int32_t GetLoudspeakerStatus(bool* enabled) const override;
-
- bool BuiltInAECIsAvailable() const override;
-
- int32_t EnableBuiltInAEC(bool enable) override;
- bool BuiltInAECIsEnabled() const override;
-
-public:
- int32_t Id() {return _id;}
+
+ // Device selection
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(WindowsDeviceType device) override;
+
+ // Audio transport initialization
+ int32_t PlayoutIsAvailable(bool* available) override;
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override;
+ int32_t RecordingIsAvailable(bool* available) override;
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override;
+
+ // Audio transport control
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override;
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override;
+
+ // Microphone Automatic Gain Control (AGC)
+ int32_t SetAGC(bool enable) override;
+ bool AGC() const override;
+
+ // Volume control based on the Windows Wave API (Windows only)
+ int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
+ int32_t WaveOutVolume(uint16_t* volumeLeft,
+ uint16_t* volumeRight) const override;
+
+ // Audio mixer initialization
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+
+ // Speaker volume controls
+ int32_t SpeakerVolumeIsAvailable(bool* available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t* volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+ int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const override;
+
+ // Microphone volume controls
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t* volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+ int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const override;
+
+ // Speaker mute control
+ int32_t SpeakerMuteIsAvailable(bool* available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool* enabled) const override;
+
+ // Microphone mute control
+ int32_t MicrophoneMuteIsAvailable(bool* available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool* enabled) const override;
+
+ // Microphone boost control
+ int32_t MicrophoneBoostIsAvailable(bool* available) override;
+ int32_t SetMicrophoneBoost(bool enable) override;
+ int32_t MicrophoneBoost(bool* enabled) const override;
+
+ // Stereo support
+ int32_t StereoPlayoutIsAvailable(bool* available) const override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool* enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool* available) const override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool* enabled) const override;
+ int32_t SetRecordingChannel(const ChannelType channel) override;
+ int32_t RecordingChannel(ChannelType* channel) const override;
+
+ // Delay information and control
+ int32_t SetPlayoutBuffer(const BufferType type, uint16_t sizeMS = 0) override;
+ int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const override;
+ int32_t PlayoutDelay(uint16_t* delayMS) const override;
+ int32_t RecordingDelay(uint16_t* delayMS) const override;
+
+ // CPU load
+ int32_t CPULoad(uint16_t* load) const override;
+
+ // Recording of raw PCM data
+ int32_t StartRawOutputFileRecording(
+ const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
+ int32_t StopRawOutputFileRecording() override;
+ int32_t StartRawInputFileRecording(
+ const char pcmFileNameUTF8[kAdmMaxFileNameSize]) override;
+ int32_t StopRawInputFileRecording() override;
+
+ // Native sample rate controls (samples/sec)
+ int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) override;
+ int32_t RecordingSampleRate(uint32_t* samplesPerSec) const override;
+ int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) override;
+ int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const override;
+
+ // Mobile device specific functions
+ int32_t ResetAudioDevice() override;
+ int32_t SetLoudspeakerStatus(bool enable) override;
+ int32_t GetLoudspeakerStatus(bool* enabled) const override;
+
+ bool BuiltInAECIsEnabled() const override;
+ bool BuiltInAECIsAvailable() const override;
+ int32_t EnableBuiltInAEC(bool enable) override;
+ bool BuiltInAGCIsAvailable() const override;
+ int32_t EnableBuiltInAGC(bool enable) override;
+ bool BuiltInNSIsAvailable() const override;
+ int32_t EnableBuiltInNS(bool enable) override;
+
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+
+ int32_t Id() { return _id; }
#if defined(WEBRTC_ANDROID)
- // Only use this acccessor for test purposes on Android.
- AudioManager* GetAndroidAudioManagerForTest() {
- return _audioManagerAndroid.get();
- }
+ // Only use this acccessor for test purposes on Android.
+ AudioManager* GetAndroidAudioManagerForTest() {
+ return _audioManagerAndroid.get();
+ }
#endif
- AudioDeviceBuffer* GetAudioDeviceBuffer() {
- return &_audioDeviceBuffer;
- }
+ AudioDeviceBuffer* GetAudioDeviceBuffer() { return &_audioDeviceBuffer; }
-private:
- PlatformType Platform() const;
- AudioLayer PlatformAudioLayer() const;
+ private:
+ PlatformType Platform() const;
+ AudioLayer PlatformAudioLayer() const;
-private:
- CriticalSectionWrapper& _critSect;
- CriticalSectionWrapper& _critSectEventCb;
- CriticalSectionWrapper& _critSectAudioCb;
+ CriticalSectionWrapper& _critSect;
+ CriticalSectionWrapper& _critSectEventCb;
+ CriticalSectionWrapper& _critSectAudioCb;
- AudioDeviceObserver* _ptrCbAudioDeviceObserver;
+ AudioDeviceObserver* _ptrCbAudioDeviceObserver;
- AudioDeviceGeneric* _ptrAudioDevice;
+ AudioDeviceGeneric* _ptrAudioDevice;
- AudioDeviceBuffer _audioDeviceBuffer;
+ AudioDeviceBuffer _audioDeviceBuffer;
#if defined(WEBRTC_ANDROID)
- rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
+ rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
#endif
- int32_t _id;
- AudioLayer _platformAudioLayer;
- int64_t _lastProcessTime;
- PlatformType _platformType;
- bool _initialized;
- mutable ErrorCode _lastError;
+ int32_t _id;
+ AudioLayer _platformAudioLayer;
+ int64_t _lastProcessTime;
+ PlatformType _platformType;
+ bool _initialized;
+ mutable ErrorCode _lastError;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.cc b/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.cc
index 3de5344a8f1..a2eac876738 100644
--- a/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.cc
@@ -172,7 +172,7 @@ int32_t FileAudioDevice::InitRecording() {
return -1;
}
- _recordingFramesIn10MS = static_cast<uint32_t>(kRecordingFixedSampleRate/100);
+ _recordingFramesIn10MS = static_cast<size_t>(kRecordingFixedSampleRate / 100);
if (_ptrAudioBuffer) {
_ptrAudioBuffer->SetRecordingSampleRate(kRecordingFixedSampleRate);
@@ -190,7 +190,7 @@ int32_t FileAudioDevice::StartPlayout() {
return 0;
}
- _playoutFramesIn10MS = static_cast<uint32_t>(kPlayoutFixedSampleRate/100);
+ _playoutFramesIn10MS = static_cast<size_t>(kPlayoutFixedSampleRate / 100);
_playing = true;
_playoutFramesLeft = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.h b/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.h
index ffc8adc0162..91a7d22f64f 100644
--- a/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.h
+++ b/chromium/third_party/webrtc/modules/audio_device/dummy/file_audio_device.h
@@ -174,9 +174,9 @@ class FileAudioDevice : public AudioDeviceGeneric {
uint32_t _playoutFramesLeft;
CriticalSectionWrapper& _critSect;
- uint32_t _recordingBufferSizeIn10MS;
- uint32_t _recordingFramesIn10MS;
- uint32_t _playoutFramesIn10MS;
+ size_t _recordingBufferSizeIn10MS;
+ size_t _recordingFramesIn10MS;
+ size_t _playoutFramesIn10MS;
rtc::scoped_ptr<ThreadWrapper> _ptrThreadRec;
rtc::scoped_ptr<ThreadWrapper> _ptrThreadPlay;
diff --git a/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.cc b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.cc
new file mode 100644
index 00000000000..c3b07eeb404
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/fine_audio_buffer.h"
+
+#include <memory.h>
+#include <stdio.h>
+#include <algorithm>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/audio_device/audio_device_buffer.h"
+
+namespace webrtc {
+
+FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer,
+ size_t desired_frame_size_bytes,
+ int sample_rate)
+ : device_buffer_(device_buffer),
+ desired_frame_size_bytes_(desired_frame_size_bytes),
+ sample_rate_(sample_rate),
+ samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)),
+ bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)),
+ playout_cached_buffer_start_(0),
+ playout_cached_bytes_(0),
+ // Allocate extra space on the recording side to reduce the number of
+ // memmove() calls.
+ required_record_buffer_size_bytes_(
+ 5 * (desired_frame_size_bytes + bytes_per_10_ms_)),
+ record_cached_bytes_(0),
+ record_read_pos_(0),
+ record_write_pos_(0) {
+ playout_cache_buffer_.reset(new int8_t[bytes_per_10_ms_]);
+ record_cache_buffer_.reset(new int8_t[required_record_buffer_size_bytes_]);
+ memset(record_cache_buffer_.get(), 0, required_record_buffer_size_bytes_);
+}
+
+FineAudioBuffer::~FineAudioBuffer() {}
+
+size_t FineAudioBuffer::RequiredPlayoutBufferSizeBytes() {
+ // It is possible that we store the desired frame size - 1 samples. Since new
+ // audio frames are pulled in chunks of 10ms we will need a buffer that can
+ // hold desired_frame_size - 1 + 10ms of data. We omit the - 1.
+ return desired_frame_size_bytes_ + bytes_per_10_ms_;
+}
+
+void FineAudioBuffer::ResetPlayout() {
+ playout_cached_buffer_start_ = 0;
+ playout_cached_bytes_ = 0;
+ memset(playout_cache_buffer_.get(), 0, bytes_per_10_ms_);
+}
+
+void FineAudioBuffer::ResetRecord() {
+ record_cached_bytes_ = 0;
+ record_read_pos_ = 0;
+ record_write_pos_ = 0;
+ memset(record_cache_buffer_.get(), 0, required_record_buffer_size_bytes_);
+}
+
+void FineAudioBuffer::GetPlayoutData(int8_t* buffer) {
+ if (desired_frame_size_bytes_ <= playout_cached_bytes_) {
+ memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_],
+ desired_frame_size_bytes_);
+ playout_cached_buffer_start_ += desired_frame_size_bytes_;
+ playout_cached_bytes_ -= desired_frame_size_bytes_;
+ RTC_CHECK_LT(playout_cached_buffer_start_ + playout_cached_bytes_,
+ bytes_per_10_ms_);
+ return;
+ }
+ memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_],
+ playout_cached_bytes_);
+ // Push another n*10ms of audio to |buffer|. n > 1 if
+ // |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we
+ // write the audio after the cached bytes copied earlier.
+ int8_t* unwritten_buffer = &buffer[playout_cached_bytes_];
+ int bytes_left =
+ static_cast<int>(desired_frame_size_bytes_ - playout_cached_bytes_);
+ // Ceiling of integer division: 1 + ((x - 1) / y)
+ size_t number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_);
+ for (size_t i = 0; i < number_of_requests; ++i) {
+ device_buffer_->RequestPlayoutData(samples_per_10_ms_);
+ int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
+ if (static_cast<size_t>(num_out) != samples_per_10_ms_) {
+ RTC_CHECK_EQ(num_out, 0);
+ playout_cached_bytes_ = 0;
+ return;
+ }
+ unwritten_buffer += bytes_per_10_ms_;
+ RTC_CHECK_GE(bytes_left, 0);
+ bytes_left -= static_cast<int>(bytes_per_10_ms_);
+ }
+ RTC_CHECK_LE(bytes_left, 0);
+ // Put the samples that were written to |buffer| but are not used in the
+ // cache.
+ size_t cache_location = desired_frame_size_bytes_;
+ int8_t* cache_ptr = &buffer[cache_location];
+ playout_cached_bytes_ = number_of_requests * bytes_per_10_ms_ -
+ (desired_frame_size_bytes_ - playout_cached_bytes_);
+ // If playout_cached_bytes_ is larger than the cache buffer, uninitialized
+ // memory will be read.
+ RTC_CHECK_LE(playout_cached_bytes_, bytes_per_10_ms_);
+ RTC_CHECK_EQ(static_cast<size_t>(-bytes_left), playout_cached_bytes_);
+ playout_cached_buffer_start_ = 0;
+ memcpy(playout_cache_buffer_.get(), cache_ptr, playout_cached_bytes_);
+}
+
+void FineAudioBuffer::DeliverRecordedData(const int8_t* buffer,
+ size_t size_in_bytes,
+ int playout_delay_ms,
+ int record_delay_ms) {
+ RTC_CHECK_EQ(size_in_bytes, desired_frame_size_bytes_);
+ // Check if the temporary buffer can store the incoming buffer. If not,
+ // move the remaining (old) bytes to the beginning of the temporary buffer
+ // and start adding new samples after the old samples.
+ if (record_write_pos_ + size_in_bytes > required_record_buffer_size_bytes_) {
+ if (record_cached_bytes_ > 0) {
+ memmove(record_cache_buffer_.get(),
+ record_cache_buffer_.get() + record_read_pos_,
+ record_cached_bytes_);
+ }
+ record_write_pos_ = record_cached_bytes_;
+ record_read_pos_ = 0;
+ }
+ // Add recorded samples to a temporary buffer.
+ memcpy(record_cache_buffer_.get() + record_write_pos_, buffer, size_in_bytes);
+ record_write_pos_ += size_in_bytes;
+ record_cached_bytes_ += size_in_bytes;
+ // Consume samples in temporary buffer in chunks of 10ms until there is not
+ // enough data left. The number of remaining bytes in the cache is given by
+ // |record_cached_bytes_| after this while loop is done.
+ while (record_cached_bytes_ >= bytes_per_10_ms_) {
+ device_buffer_->SetRecordedBuffer(
+ record_cache_buffer_.get() + record_read_pos_, samples_per_10_ms_);
+ device_buffer_->SetVQEData(playout_delay_ms, record_delay_ms, 0);
+ device_buffer_->DeliverRecordedData();
+ // Read next chunk of 10ms data.
+ record_read_pos_ += bytes_per_10_ms_;
+ // Reduce number of cached bytes with the consumed amount.
+ record_cached_bytes_ -= bytes_per_10_ms_;
+ }
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.h b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.h
new file mode 100644
index 00000000000..4ab5cd268cc
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class AudioDeviceBuffer;
+
+// FineAudioBuffer takes an AudioDeviceBuffer (ADB) which deals with audio data
+// corresponding to 10ms of data. It then allows for this data to be pulled in
+// a finer or coarser granularity. I.e. interacting with this class instead of
+// directly with the AudioDeviceBuffer one can ask for any number of audio data
+// samples. This class also ensures that audio data can be delivered to the ADB
+// in 10ms chunks when the size of the provided audio buffers differs from 10ms.
+// As an example: calling DeliverRecordedData() with 5ms buffers will deliver
+// accumulated 10ms worth of data to the ADB every second call.
+class FineAudioBuffer {
+ public:
+ // |device_buffer| is a buffer that provides 10ms of audio data.
+ // |desired_frame_size_bytes| is the number of bytes of audio data
+ // GetPlayoutData() should return on success. It is also the required size of
+ // each recorded buffer used in DeliverRecordedData() calls.
+ // |sample_rate| is the sample rate of the audio data. This is needed because
+ // |device_buffer| delivers 10ms of data. Given the sample rate the number
+ // of samples can be calculated.
+ FineAudioBuffer(AudioDeviceBuffer* device_buffer,
+ size_t desired_frame_size_bytes,
+ int sample_rate);
+ ~FineAudioBuffer();
+
+ // Returns the required size of |buffer| when calling GetPlayoutData(). If
+ // the buffer is smaller memory trampling will happen.
+ size_t RequiredPlayoutBufferSizeBytes();
+
+ // Clears buffers and counters dealing with playour and/or recording.
+ void ResetPlayout();
+ void ResetRecord();
+
+ // |buffer| must be of equal or greater size than what is returned by
+ // RequiredBufferSize(). This is to avoid unnecessary memcpy.
+ void GetPlayoutData(int8_t* buffer);
+
+ // Consumes the audio data in |buffer| and sends it to the WebRTC layer in
+ // chunks of 10ms. The provided delay estimates in |playout_delay_ms| and
+ // |record_delay_ms| are given to the AEC in the audio processing module.
+ // They can be fixed values on most platforms and they are ignored if an
+ // external (hardware/built-in) AEC is used.
+ // The size of |buffer| is given by |size_in_bytes| and must be equal to
+ // |desired_frame_size_bytes_|. A RTC_CHECK will be hit if this is not the
+ // case.
+ // Example: buffer size is 5ms => call #1 stores 5ms of data, call #2 stores
+ // 5ms of data and sends a total of 10ms to WebRTC and clears the intenal
+ // cache. Call #3 restarts the scheme above.
+ void DeliverRecordedData(const int8_t* buffer,
+ size_t size_in_bytes,
+ int playout_delay_ms,
+ int record_delay_ms);
+
+ private:
+ // Device buffer that works with 10ms chunks of data both for playout and
+ // for recording. I.e., the WebRTC side will always be asked for audio to be
+ // played out in 10ms chunks and recorded audio will be sent to WebRTC in
+ // 10ms chunks as well. This pointer is owned by the constructor of this
+ // class and the owner must ensure that the pointer is valid during the life-
+ // time of this object.
+ AudioDeviceBuffer* const device_buffer_;
+ // Number of bytes delivered by GetPlayoutData() call and provided to
+ // DeliverRecordedData().
+ const size_t desired_frame_size_bytes_;
+ // Sample rate in Hertz.
+ const int sample_rate_;
+ // Number of audio samples per 10ms.
+ const size_t samples_per_10_ms_;
+ // Number of audio bytes per 10ms.
+ const size_t bytes_per_10_ms_;
+ // Storage for output samples that are not yet asked for.
+ rtc::scoped_ptr<int8_t[]> playout_cache_buffer_;
+ // Location of first unread output sample.
+ size_t playout_cached_buffer_start_;
+ // Number of bytes stored in output (contain samples to be played out) cache.
+ size_t playout_cached_bytes_;
+ // Storage for input samples that are about to be delivered to the WebRTC
+ // ADB or remains from the last successful delivery of a 10ms audio buffer.
+ rtc::scoped_ptr<int8_t[]> record_cache_buffer_;
+ // Required (max) size in bytes of the |record_cache_buffer_|.
+ const size_t required_record_buffer_size_bytes_;
+ // Number of bytes in input (contains recorded samples) cache.
+ size_t record_cached_bytes_;
+ // Read and write pointers used in the buffering scheme on the recording side.
+ size_t record_read_pos_;
+ size_t record_write_pos_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_FINE_AUDIO_BUFFER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer_unittest.cc b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc
index 4cff883129f..6666364c9e3 100644
--- a/chromium/third_party/webrtc/modules/audio_device/android/fine_audio_buffer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/fine_audio_buffer_unittest.cc
@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+#include "webrtc/modules/audio_device/fine_audio_buffer.h"
#include <limits.h>
#include <memory>
@@ -19,6 +19,7 @@
#include "webrtc/modules/audio_device/mock_audio_device_buffer.h"
using ::testing::_;
+using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::Return;
@@ -40,10 +41,10 @@ bool VerifyBuffer(const int8_t* buffer, int buffer_number, int size) {
return true;
}
-// This function replaces GetPlayoutData when it's called (which is done
-// implicitly when calling GetBufferData). It writes the sequence
-// 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a buffer of
-// different size than the one VerifyBuffer verifies.
+// This function replaces the real AudioDeviceBuffer::GetPlayoutData when it's
+// called (which is done implicitly when calling GetBufferData). It writes the
+// sequence 0,1,..SCHAR_MAX-1,0,1,... to the buffer. Note that this is likely a
+// buffer of different size than the one VerifyBuffer verifies.
// |iteration| is the number of calls made to UpdateBuffer prior to this call.
// |samples_per_10_ms| is the number of samples that should be written to the
// buffer (|arg0|).
@@ -57,10 +58,33 @@ ACTION_P2(UpdateBuffer, iteration, samples_per_10_ms) {
return samples_per_10_ms;
}
+// Writes a periodic ramp pattern to the supplied |buffer|. See UpdateBuffer()
+// for details.
+void UpdateInputBuffer(int8_t* buffer, int iteration, int size) {
+ int start_value = (iteration * size) % SCHAR_MAX;
+ for (int i = 0; i < size; ++i) {
+ buffer[i] = (i + start_value) % SCHAR_MAX;
+ }
+}
+
+// Action macro which verifies that the recorded 10ms chunk of audio data
+// (in |arg0|) contains the correct reference values even if they have been
+// supplied using a buffer size that is smaller or larger than 10ms.
+// See VerifyBuffer() for details.
+ACTION_P2(VerifyInputBuffer, iteration, samples_per_10_ms) {
+ const int8_t* buffer = static_cast<const int8_t*>(arg0);
+ int bytes_per_10_ms = samples_per_10_ms * static_cast<int>(sizeof(int16_t));
+ int start_value = (iteration * bytes_per_10_ms) % SCHAR_MAX;
+ for (int i = 0; i < bytes_per_10_ms; ++i) {
+ EXPECT_EQ(buffer[i], (i + start_value) % SCHAR_MAX);
+ }
+ return 0;
+}
+
void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
const int kSamplesPer10Ms = sample_rate * 10 / 1000;
- const int kFrameSizeBytes = frame_size_in_samples *
- static_cast<int>(sizeof(int16_t));
+ const int kFrameSizeBytes =
+ frame_size_in_samples * static_cast<int>(sizeof(int16_t));
const int kNumberOfFrames = 5;
// Ceiling of integer division: 1 + ((x - 1) / y)
const int kNumberOfUpdateBufferCalls =
@@ -77,15 +101,32 @@ void RunFineBufferTest(int sample_rate, int frame_size_in_samples) {
.RetiresOnSaturation();
}
}
+ {
+ InSequence s;
+ for (int j = 0; j < kNumberOfUpdateBufferCalls - 1; ++j) {
+ EXPECT_CALL(audio_device_buffer, SetRecordedBuffer(_, kSamplesPer10Ms))
+ .WillOnce(VerifyInputBuffer(j, kSamplesPer10Ms))
+ .RetiresOnSaturation();
+ }
+ }
+ EXPECT_CALL(audio_device_buffer, SetVQEData(_, _, _))
+ .Times(kNumberOfUpdateBufferCalls - 1);
+ EXPECT_CALL(audio_device_buffer, DeliverRecordedData())
+ .Times(kNumberOfUpdateBufferCalls - 1)
+ .WillRepeatedly(Return(kSamplesPer10Ms));
+
FineAudioBuffer fine_buffer(&audio_device_buffer, kFrameSizeBytes,
sample_rate);
rtc::scoped_ptr<int8_t[]> out_buffer;
- out_buffer.reset(
- new int8_t[fine_buffer.RequiredBufferSizeBytes()]);
+ out_buffer.reset(new int8_t[fine_buffer.RequiredPlayoutBufferSizeBytes()]);
+ rtc::scoped_ptr<int8_t[]> in_buffer;
+ in_buffer.reset(new int8_t[kFrameSizeBytes]);
for (int i = 0; i < kNumberOfFrames; ++i) {
- fine_buffer.GetBufferData(out_buffer.get());
+ fine_buffer.GetPlayoutData(out_buffer.get());
EXPECT_TRUE(VerifyBuffer(out_buffer.get(), i, kFrameSizeBytes));
+ UpdateInputBuffer(in_buffer.get(), i, kFrameSizeBytes);
+ fine_buffer.DeliverRecordedData(in_buffer.get(), kFrameSizeBytes, 0, 0);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_device/include/audio_device.h b/chromium/third_party/webrtc/modules/audio_device/include/audio_device.h
index 2f0c6b55ed7..c2c2b881031 100644
--- a/chromium/third_party/webrtc/modules/audio_device/include/audio_device.h
+++ b/chromium/third_party/webrtc/modules/audio_device/include/audio_device.h
@@ -187,23 +187,28 @@ class AudioDeviceModule : public RefCountedModule {
// Only supported on Android.
// TODO(henrika): Make pure virtual after updating Chromium.
virtual bool BuiltInAECIsAvailable() const { return false; }
+ virtual bool BuiltInAGCIsAvailable() const { return false; }
+ virtual bool BuiltInNSIsAvailable() const { return false; }
- // Enables the built-in AEC. Only supported on Windows and Android.
- //
- // For usage on Windows (requires Core Audio):
- // Must be called before InitRecording(). When enabled:
- // 1. StartPlayout() must be called before StartRecording().
- // 2. StopRecording() should be called before StopPlayout().
- // The reverse order may cause garbage audio to be rendered or the
- // capture side to halt until StopRecording() is called.
+ // Enables the built-in audio effects. Only supported on Android.
// TODO(henrika): Make pure virtual after updating Chromium.
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; }
-
+ virtual int32_t EnableBuiltInAGC(bool enable) { return -1; }
+ virtual int32_t EnableBuiltInNS(bool enable) { return -1; }
// Don't use.
virtual bool BuiltInAECIsEnabled() const { return false; }
+ // Only supported on iOS.
+ // TODO(henrika): Make pure virtual after updating Chromium.
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const {
+ return -1;
+ }
+ virtual int GetRecordAudioParameters(AudioParameters* params) const {
+ return -1;
+ }
+
protected:
- virtual ~AudioDeviceModule() {};
+ virtual ~AudioDeviceModule() {}
};
AudioDeviceModule* CreateAudioDeviceModule(
diff --git a/chromium/third_party/webrtc/modules/audio_device/include/audio_device_defines.h b/chromium/third_party/webrtc/modules/audio_device/include/audio_device_defines.h
index 56a584ef9ea..3ebbd23cc5f 100644
--- a/chromium/third_party/webrtc/modules/audio_device/include/audio_device_defines.h
+++ b/chromium/third_party/webrtc/modules/audio_device/include/audio_device_defines.h
@@ -8,8 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DEFINES_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DEFINES_H
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
+
+#include <stddef.h>
#include "webrtc/typedefs.h"
@@ -26,115 +28,183 @@ static const int kAdmMaxPlayoutBufferSizeMs = 250;
// AudioDeviceObserver
// ----------------------------------------------------------------------------
-class AudioDeviceObserver
-{
-public:
- enum ErrorCode
- {
- kRecordingError = 0,
- kPlayoutError = 1
- };
- enum WarningCode
- {
- kRecordingWarning = 0,
- kPlayoutWarning = 1
- };
-
- virtual void OnErrorIsReported(const ErrorCode error) = 0;
- virtual void OnWarningIsReported(const WarningCode warning) = 0;
-
-protected:
- virtual ~AudioDeviceObserver() {}
+class AudioDeviceObserver {
+ public:
+ enum ErrorCode { kRecordingError = 0, kPlayoutError = 1 };
+ enum WarningCode { kRecordingWarning = 0, kPlayoutWarning = 1 };
+
+ virtual void OnErrorIsReported(const ErrorCode error) = 0;
+ virtual void OnWarningIsReported(const WarningCode warning) = 0;
+
+ protected:
+ virtual ~AudioDeviceObserver() {}
};
// ----------------------------------------------------------------------------
// AudioTransport
// ----------------------------------------------------------------------------
-class AudioTransport
-{
-public:
- virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
- const uint8_t nChannels,
- const uint32_t samplesPerSec,
- const uint32_t totalDelayMS,
- const int32_t clockDrift,
- const uint32_t currentMicLevel,
- const bool keyPressed,
- uint32_t& newMicLevel) = 0;
-
- virtual int32_t NeedMorePlayData(const uint32_t nSamples,
- const uint8_t nBytesPerSample,
- const uint8_t nChannels,
- const uint32_t samplesPerSec,
- void* audioSamples,
- uint32_t& nSamplesOut,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) = 0;
-
- // Method to pass captured data directly and unmixed to network channels.
- // |channel_ids| contains a list of VoE channels which are the
- // sinks to the capture data. |audio_delay_milliseconds| is the sum of
- // recording delay and playout delay of the hardware. |current_volume| is
- // in the range of [0, 255], representing the current microphone analog
- // volume. |key_pressed| is used by the typing detection.
- // |need_audio_processing| specify if the data needs to be processed by APM.
- // Currently WebRtc supports only one APM, and Chrome will make sure only
- // one stream goes through APM. When |need_audio_processing| is false, the
- // values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
- // will be ignored.
- // The return value is the new microphone volume, in the range of |0, 255].
- // When the volume does not need to be updated, it returns 0.
- // TODO(xians): Remove this interface after Chrome and Libjingle switches
- // to OnData().
- virtual int OnDataAvailable(const int voe_channels[],
- int number_of_voe_channels,
- const int16_t* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool key_pressed,
- bool need_audio_processing) { return 0; }
-
- // Method to pass the captured audio data to the specific VoE channel.
- // |voe_channel| is the id of the VoE channel which is the sink to the
- // capture data.
- // TODO(xians): Remove this interface after Libjingle switches to
- // PushCaptureData().
- virtual void OnData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- int number_of_frames) {}
-
- // Method to push the captured audio data to the specific VoE channel.
- // The data will not undergo audio processing.
- // |voe_channel| is the id of the VoE channel which is the sink to the
- // capture data.
- // TODO(xians): Make the interface pure virtual after Libjingle
- // has its implementation.
- virtual void PushCaptureData(int voe_channel, const void* audio_data,
- int bits_per_sample, int sample_rate,
- int number_of_channels,
- int number_of_frames) {}
-
- // Method to pull mixed render audio data from all active VoE channels.
- // The data will not be passed as reference for audio processing internally.
- // TODO(xians): Support getting the unmixed render data from specific VoE
- // channel.
- virtual void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, int number_of_frames,
- void* audio_data,
- int64_t* elapsed_time_ms,
- int64_t* ntp_time_ms) {}
-
-protected:
- virtual ~AudioTransport() {}
+class AudioTransport {
+ public:
+ virtual int32_t RecordedDataIsAvailable(const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) = 0;
+
+ virtual int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) = 0;
+
+ // Method to pass captured data directly and unmixed to network channels.
+ // |channel_ids| contains a list of VoE channels which are the
+ // sinks to the capture data. |audio_delay_milliseconds| is the sum of
+ // recording delay and playout delay of the hardware. |current_volume| is
+ // in the range of [0, 255], representing the current microphone analog
+ // volume. |key_pressed| is used by the typing detection.
+ // |need_audio_processing| specify if the data needs to be processed by APM.
+ // Currently WebRtc supports only one APM, and Chrome will make sure only
+ // one stream goes through APM. When |need_audio_processing| is false, the
+ // values of |audio_delay_milliseconds|, |current_volume| and |key_pressed|
+ // will be ignored.
+ // The return value is the new microphone volume, in the range of |0, 255].
+ // When the volume does not need to be updated, it returns 0.
+ // TODO(xians): Remove this interface after Chrome and Libjingle switches
+ // to OnData().
+ virtual int OnDataAvailable(const int voe_channels[],
+ int number_of_voe_channels,
+ const int16_t* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ size_t number_of_frames,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool key_pressed,
+ bool need_audio_processing) {
+ return 0;
+ }
+
+ // Method to pass the captured audio data to the specific VoE channel.
+ // |voe_channel| is the id of the VoE channel which is the sink to the
+ // capture data.
+ // TODO(xians): Remove this interface after Libjingle switches to
+ // PushCaptureData().
+ virtual void OnData(int voe_channel,
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ int number_of_channels,
+ size_t number_of_frames) {}
+
+ // Method to push the captured audio data to the specific VoE channel.
+ // The data will not undergo audio processing.
+ // |voe_channel| is the id of the VoE channel which is the sink to the
+ // capture data.
+ // TODO(xians): Make the interface pure virtual after Libjingle
+ // has its implementation.
+ virtual void PushCaptureData(int voe_channel,
+ const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ int number_of_channels,
+ size_t number_of_frames) {}
+
+ // Method to pull mixed render audio data from all active VoE channels.
+ // The data will not be passed as reference for audio processing internally.
+ // TODO(xians): Support getting the unmixed render data from specific VoE
+ // channel.
+ virtual void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ int number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {}
+
+ protected:
+ virtual ~AudioTransport() {}
+};
+
+// Helper class for storage of fundamental audio parameters such as sample rate,
+// number of channels, native buffer size etc.
+// Note that one audio frame can contain more than one channel sample and each
+// sample is assumed to be a 16-bit PCM sample. Hence, one audio frame in
+// stereo contains 2 * (16/8) = 4 bytes of data.
+class AudioParameters {
+ public:
+ // This implementation does only support 16-bit PCM samples.
+ static const size_t kBitsPerSample = 16;
+ AudioParameters()
+ : sample_rate_(0),
+ channels_(0),
+ frames_per_buffer_(0),
+ frames_per_10ms_buffer_(0) {}
+ AudioParameters(int sample_rate, int channels, size_t frames_per_buffer)
+ : sample_rate_(sample_rate),
+ channels_(channels),
+ frames_per_buffer_(frames_per_buffer),
+ frames_per_10ms_buffer_(static_cast<size_t>(sample_rate / 100)) {}
+ void reset(int sample_rate, int channels, size_t frames_per_buffer) {
+ sample_rate_ = sample_rate;
+ channels_ = channels;
+ frames_per_buffer_ = frames_per_buffer;
+ frames_per_10ms_buffer_ = static_cast<size_t>(sample_rate / 100);
+ }
+ size_t bits_per_sample() const { return kBitsPerSample; }
+ void reset(int sample_rate, int channels, double ms_per_buffer) {
+ reset(sample_rate, channels,
+ static_cast<size_t>(sample_rate * ms_per_buffer + 0.5));
+ }
+ void reset(int sample_rate, int channels) {
+ reset(sample_rate, channels, static_cast<size_t>(0));
+ }
+ int sample_rate() const { return sample_rate_; }
+ int channels() const { return channels_; }
+ size_t frames_per_buffer() const { return frames_per_buffer_; }
+ size_t frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+ size_t GetBytesPerFrame() const { return channels_ * kBitsPerSample / 8; }
+ size_t GetBytesPerBuffer() const {
+ return frames_per_buffer_ * GetBytesPerFrame();
+ }
+ // The WebRTC audio device buffer (ADB) only requires that the sample rate
+ // and number of channels are configured. Hence, to be "valid", only these
+ // two attributes must be set.
+ bool is_valid() const { return ((sample_rate_ > 0) && (channels_ > 0)); }
+ // Most platforms also require that a native buffer size is defined.
+ // An audio parameter instance is considered to be "complete" if it is both
+ // "valid" (can be used by the ADB) and also has a native frame size.
+ bool is_complete() const { return (is_valid() && (frames_per_buffer_ > 0)); }
+ size_t GetBytesPer10msBuffer() const {
+ return frames_per_10ms_buffer_ * GetBytesPerFrame();
+ }
+ double GetBufferSizeInMilliseconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return frames_per_buffer_ / (sample_rate_ / 1000.0);
+ }
+ double GetBufferSizeInSeconds() const {
+ if (sample_rate_ == 0)
+ return 0.0;
+ return static_cast<double>(frames_per_buffer_) / (sample_rate_);
+ }
+
+ private:
+ int sample_rate_;
+ int channels_;
+ size_t frames_per_buffer_;
+ size_t frames_per_10ms_buffer_;
};
} // namespace webrtc
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_DEFINES_H
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_INCLUDE_AUDIO_DEVICE_DEFINES_H_
diff --git a/chromium/third_party/webrtc/modules/audio_device/include/fake_audio_device.h b/chromium/third_party/webrtc/modules/audio_device/include/fake_audio_device.h
index 8b7e87c619e..4c0739a003f 100644
--- a/chromium/third_party/webrtc/modules/audio_device/include/fake_audio_device.h
+++ b/chromium/third_party/webrtc/modules/audio_device/include/fake_audio_device.h
@@ -147,6 +147,10 @@ class FakeAudioDeviceModule : public AudioDeviceModule {
virtual bool BuiltInAECIsAvailable() const { return false; }
virtual int32_t EnableBuiltInAEC(bool enable) { return -1; }
virtual bool BuiltInAECIsEnabled() const { return false; }
+ virtual bool BuiltInAGCIsAvailable() const { return false; }
+ virtual int32_t EnableBuiltInAGC(bool enable) { return -1; }
+ virtual bool BuiltInNSIsAvailable() const { return false; }
+ virtual int32_t EnableBuiltInNS(bool enable) { return -1; }
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.h b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.h
index a3674502859..63f3cab7e27 100644
--- a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -8,263 +8,276 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
#include <AudioUnit/AudioUnit.h>
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
namespace webrtc {
-const uint32_t N_REC_SAMPLES_PER_SEC = 44000;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44000;
-
-const uint32_t N_REC_CHANNELS = 1; // default is mono recording
-const uint32_t N_PLAY_CHANNELS = 1; // default is mono playout
-const uint32_t N_DEVICE_CHANNELS = 8;
-
-const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
-const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
-
-// Number of 10 ms recording blocks in recording buffer
-const uint16_t N_REC_BUFFERS = 20;
+class FineAudioBuffer;
+
+// Implements full duplex 16-bit mono PCM audio support for iOS using a
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
+// supports audio echo cancellation. It also adds automatic gain control,
+// adjustment of voice-processing quality and muting.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All supported public methods must also be called on the same thread.
+// A thread checker will RTC_DCHECK if any supported method is called on an
+// invalid thread.
+//
+// Recorded audio will be delivered on a real-time internal I/O thread in the
+// audio unit. The audio unit will also ask for audio data to play out on this
+// same thread.
class AudioDeviceIOS : public AudioDeviceGeneric {
public:
- AudioDeviceIOS(const int32_t id);
+ AudioDeviceIOS();
~AudioDeviceIOS();
- // Retrieve the currently utilized audio layer
- virtual int32_t ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const;
-
- // Main initializaton and termination
- virtual int32_t Init();
- virtual int32_t Terminate();
- virtual bool Initialized() const;
-
- // Device enumeration
- virtual int16_t PlayoutDevices();
- virtual int16_t RecordingDevices();
- virtual int32_t PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
- virtual int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
-
- // Device selection
- virtual int32_t SetPlayoutDevice(uint16_t index);
- virtual int32_t SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device);
- virtual int32_t SetRecordingDevice(uint16_t index);
- virtual int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device);
-
- // Audio transport initialization
- virtual int32_t PlayoutIsAvailable(bool& available);
- virtual int32_t InitPlayout();
- virtual bool PlayoutIsInitialized() const;
- virtual int32_t RecordingIsAvailable(bool& available);
- virtual int32_t InitRecording();
- virtual bool RecordingIsInitialized() const;
-
- // Audio transport control
- virtual int32_t StartPlayout();
- virtual int32_t StopPlayout();
- virtual bool Playing() const;
- virtual int32_t StartRecording();
- virtual int32_t StopRecording();
- virtual bool Recording() const;
-
- // Microphone Automatic Gain Control (AGC)
- virtual int32_t SetAGC(bool enable);
- virtual bool AGC() const;
-
- // Volume control based on the Windows Wave API (Windows only)
- virtual int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight);
- virtual int32_t WaveOutVolume(uint16_t& volumeLeft,
- uint16_t& volumeRight) const;
-
- // Audio mixer initialization
- virtual int32_t InitSpeaker();
- virtual bool SpeakerIsInitialized() const;
- virtual int32_t InitMicrophone();
- virtual bool MicrophoneIsInitialized() const;
-
- // Speaker volume controls
- virtual int32_t SpeakerVolumeIsAvailable(bool& available);
- virtual int32_t SetSpeakerVolume(uint32_t volume);
- virtual int32_t SpeakerVolume(uint32_t& volume) const;
- virtual int32_t MaxSpeakerVolume(uint32_t& maxVolume) const;
- virtual int32_t MinSpeakerVolume(uint32_t& minVolume) const;
- virtual int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone volume controls
- virtual int32_t MicrophoneVolumeIsAvailable(bool& available);
- virtual int32_t SetMicrophoneVolume(uint32_t volume);
- virtual int32_t MicrophoneVolume(uint32_t& volume) const;
- virtual int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const;
- virtual int32_t MinMicrophoneVolume(uint32_t& minVolume) const;
- virtual int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const;
-
- // Microphone mute control
- virtual int32_t MicrophoneMuteIsAvailable(bool& available);
- virtual int32_t SetMicrophoneMute(bool enable);
- virtual int32_t MicrophoneMute(bool& enabled) const;
-
- // Speaker mute control
- virtual int32_t SpeakerMuteIsAvailable(bool& available);
- virtual int32_t SetSpeakerMute(bool enable);
- virtual int32_t SpeakerMute(bool& enabled) const;
-
- // Microphone boost control
- virtual int32_t MicrophoneBoostIsAvailable(bool& available);
- virtual int32_t SetMicrophoneBoost(bool enable);
- virtual int32_t MicrophoneBoost(bool& enabled) const;
-
- // Stereo support
- virtual int32_t StereoPlayoutIsAvailable(bool& available);
- virtual int32_t SetStereoPlayout(bool enable);
- virtual int32_t StereoPlayout(bool& enabled) const;
- virtual int32_t StereoRecordingIsAvailable(bool& available);
- virtual int32_t SetStereoRecording(bool enable);
- virtual int32_t StereoRecording(bool& enabled) const;
-
- // Delay information and control
- virtual int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS);
- virtual int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const;
- virtual int32_t PlayoutDelay(uint16_t& delayMS) const;
- virtual int32_t RecordingDelay(uint16_t& delayMS) const;
-
- // CPU load
- virtual int32_t CPULoad(uint16_t& load) const;
-
- public:
- virtual bool PlayoutWarning() const;
- virtual bool PlayoutError() const;
- virtual bool RecordingWarning() const;
- virtual bool RecordingError() const;
- virtual void ClearPlayoutWarning();
- virtual void ClearPlayoutError();
- virtual void ClearRecordingWarning();
- virtual void ClearRecordingError();
-
- public:
- virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
- // Reset Audio Device (for mobile devices only)
- virtual int32_t ResetAudioDevice();
-
- // enable or disable loud speaker (for iphone only)
- virtual int32_t SetLoudspeakerStatus(bool enable);
- virtual int32_t GetLoudspeakerStatus(bool& enabled) const;
-
- private:
- void Lock() {
- _critSect.Enter();
- }
-
- void UnLock() {
- _critSect.Leave();
- }
-
- int32_t Id() {
- return _id;
- }
-
- // Init and shutdown
- int32_t InitPlayOrRecord();
- int32_t ShutdownPlayOrRecord();
-
- void UpdateRecordingDelay();
- void UpdatePlayoutDelay();
-
- static OSStatus RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- static OSStatus PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames);
-
- OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData);
-
- static bool RunCapture(void* ptrThis);
- bool CaptureWorkerThread();
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+ int32_t Init() override;
+ int32_t Terminate() override;
+ bool Initialized() const override { return initialized_; }
+
+ int32_t InitPlayout() override;
+ bool PlayoutIsInitialized() const override { return play_is_initialized_; }
+
+ int32_t InitRecording() override;
+ bool RecordingIsInitialized() const override { return rec_is_initialized_; }
+
+ int32_t StartPlayout() override;
+ int32_t StopPlayout() override;
+ bool Playing() const override { return playing_; }
+
+ int32_t StartRecording() override;
+ int32_t StopRecording() override;
+ bool Recording() const override { return recording_; }
+
+ int32_t SetLoudspeakerStatus(bool enable) override;
+ int32_t GetLoudspeakerStatus(bool& enabled) const override;
+
+ // These methods returns hard-coded delay values and not dynamic delay
+ // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
+ // AEC will always be disabled in the Libjingle layer to avoid running two
+ // AEC implementations at the same time. And, it saves resources to avoid
+ // updating these delay values continuously.
+ // TODO(henrika): it would be possible to mark these two methods as not
+ // implemented since they are only called for A/V-sync purposes today and
+ // A/V-sync is not supported on iOS. However, we avoid adding error messages
+ // the log by using these dummy implementations instead.
+ int32_t PlayoutDelay(uint16_t& delayMS) const override;
+ int32_t RecordingDelay(uint16_t& delayMS) const override;
+
+ // Native audio parameters stored during construction.
+ // These methods are unique for the iOS implementation.
+ int GetPlayoutAudioParameters(AudioParameters* params) const override;
+ int GetRecordAudioParameters(AudioParameters* params) const override;
+
+ // These methods are currently not fully implemented on iOS:
+
+ // See audio_device_not_implemented.cc for trivial implementations.
+ int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const override;
+ int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
+ int32_t ResetAudioDevice() override;
+ int32_t PlayoutIsAvailable(bool& available) override;
+ int32_t RecordingIsAvailable(bool& available) override;
+ int32_t SetAGC(bool enable) override;
+ bool AGC() const override;
+ int16_t PlayoutDevices() override;
+ int16_t RecordingDevices() override;
+ int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) override;
+ int32_t SetPlayoutDevice(uint16_t index) override;
+ int32_t SetPlayoutDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetRecordingDevice(uint16_t index) override;
+ int32_t SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType device) override;
+ int32_t SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight) override;
+ int32_t WaveOutVolume(uint16_t& volumeLeft,
+ uint16_t& volumeRight) const override;
+ int32_t InitSpeaker() override;
+ bool SpeakerIsInitialized() const override;
+ int32_t InitMicrophone() override;
+ bool MicrophoneIsInitialized() const override;
+ int32_t SpeakerVolumeIsAvailable(bool& available) override;
+ int32_t SetSpeakerVolume(uint32_t volume) override;
+ int32_t SpeakerVolume(uint32_t& volume) const override;
+ int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+ int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+ int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const override;
+ int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+ int32_t SetMicrophoneVolume(uint32_t volume) override;
+ int32_t MicrophoneVolume(uint32_t& volume) const override;
+ int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+ int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+ int32_t MicrophoneVolumeStepSize(uint16_t& stepSize) const override;
+ int32_t MicrophoneMuteIsAvailable(bool& available) override;
+ int32_t SetMicrophoneMute(bool enable) override;
+ int32_t MicrophoneMute(bool& enabled) const override;
+ int32_t SpeakerMuteIsAvailable(bool& available) override;
+ int32_t SetSpeakerMute(bool enable) override;
+ int32_t SpeakerMute(bool& enabled) const override;
+ int32_t MicrophoneBoostIsAvailable(bool& available) override;
+ int32_t SetMicrophoneBoost(bool enable) override;
+ int32_t MicrophoneBoost(bool& enabled) const override;
+ int32_t StereoPlayoutIsAvailable(bool& available) override;
+ int32_t SetStereoPlayout(bool enable) override;
+ int32_t StereoPlayout(bool& enabled) const override;
+ int32_t StereoRecordingIsAvailable(bool& available) override;
+ int32_t SetStereoRecording(bool enable) override;
+ int32_t StereoRecording(bool& enabled) const override;
+ int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) override;
+ int32_t CPULoad(uint16_t& load) const override;
+ bool PlayoutWarning() const override;
+ bool PlayoutError() const override;
+ bool RecordingWarning() const override;
+ bool RecordingError() const override;
+ void ClearPlayoutWarning() override {}
+ void ClearPlayoutError() override {}
+ void ClearRecordingWarning() override {}
+ void ClearRecordingError() override {}
private:
- AudioDeviceBuffer* _ptrAudioBuffer;
-
- CriticalSectionWrapper& _critSect;
-
- rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
-
- int32_t _id;
-
- AudioUnit _auVoiceProcessing;
- void* _audioInterruptionObserver;
+ // Uses current |playout_parameters_| and |record_parameters_| to inform the
+ // audio device buffer (ADB) about our internal audio parameters.
+ void UpdateAudioDeviceBuffer();
+
+ // Since the preferred audio parameters are only hints to the OS, the actual
+ // values may be different once the AVAudioSession has been activated.
+ // This method asks for the current hardware parameters and takes actions
+ // if they should differ from what we have asked for initially. It also
+ // defines |playout_parameters_| and |record_parameters_|.
+ void SetupAudioBuffersForActiveAudioSession();
+
+ // Creates a Voice-Processing I/O unit and configures it for full-duplex
+ // audio. The selected stream format is selected to avoid internal resampling
+ // and to match the 10ms callback rate for WebRTC as well as possible.
+ // This method also initializes the created audio unit.
+ bool SetupAndInitializeVoiceProcessingAudioUnit();
+
+ // Activates our audio session, creates and initializes the voice-processing
+ // audio unit and verifies that we got the preferred native audio parameters.
+ bool InitPlayOrRecord();
+
+ // Closes and deletes the voice-processing I/O unit.
+ bool ShutdownPlayOrRecord();
+
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to signal that recorded audio is available.
+ static OSStatus RecordedDataIsAvailable(
+ void* in_ref_con,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data);
+ OSStatus OnRecordedDataIsAvailable(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames);
+
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to provide audio samples to the audio unit.
+ static OSStatus GetPlayoutData(void* in_ref_con,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data);
+ OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data);
private:
- bool _initialized;
- bool _isShutDown;
- bool _recording;
- bool _playing;
- bool _recIsInitialized;
- bool _playIsInitialized;
-
- bool _recordingDeviceIsSpecified;
- bool _playoutDeviceIsSpecified;
- bool _micIsInitialized;
- bool _speakerIsInitialized;
-
- bool _AGC;
-
- // The sampling rate to use with Audio Device Buffer
- uint32_t _adbSampFreq;
-
- // Delay calculation
- uint32_t _recordingDelay;
- uint32_t _playoutDelay;
- uint32_t _playoutDelayMeasurementCounter;
- uint32_t _recordingDelayHWAndOS;
- uint32_t _recordingDelayMeasurementCounter;
-
- // Errors and warnings count
- uint16_t _playWarning;
- uint16_t _playError;
- uint16_t _recWarning;
- uint16_t _recError;
-
- // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
- int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
- uint32_t _playoutBufferUsed; // How much is filled
-
- // Recording buffers
- int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
- uint32_t _recordingLength[N_REC_BUFFERS];
- uint32_t _recordingSeqNumber[N_REC_BUFFERS];
- uint32_t _recordingCurrentSeq;
-
- // Current total size all data in buffers, used for delay estimate
- uint32_t _recordingBufferTotalSize;
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker thread_checker_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
+ // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+ // and therefore outlives this object.
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+ // the playout and recording sides. These structure is set in two steps:
+ // first, native sample rate and #channels are defined in Init(). Next, the
+ // audio session is activated and we verify that the preferred parameters
+ // were granted by the OS. At this stage it is also possible to add a third
+ // component to the parameters; the native I/O buffer duration.
+ // A RTC_CHECK will be hit if we for some reason fail to open an audio session
+ // using the specified parameters.
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+
+ // The Voice-Processing I/O unit has the same characteristics as the
+ // Remote I/O unit (supports full duplex low-latency audio input and output)
+ // and adds AEC for for two-way duplex communication. It also adds AGC,
+ // adjustment of voice-processing quality, and muting. Hence, ideal for
+ // VoIP applications.
+ AudioUnit vpio_unit_;
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples. Is also supports a similar scheme for the recording
+ // side.
+ // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
+ // in each callback (one every 8ms). This class can then ask for 128 and the
+ // FineAudioBuffer will ask WebRTC for new data only when needed and also
+ // cache non-utilized audio between callbacks. On the recording side, iOS
+ // can provide audio data frames of size 128 and these are accumulated until
+ // enough data to supply one 10ms call exists. This 10ms chunk is then sent
+ // to WebRTC and the remaining part is stored.
+ rtc::scoped_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+ // Extra audio buffer to be used by the playout side for rendering audio.
+ // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes().
+ rtc::scoped_ptr<SInt8[]> playout_audio_buffer_;
+
+ // Provides a mechanism for encapsulating one or more buffers of audio data.
+ // Only used on the recording side.
+ AudioBufferList audio_record_buffer_list_;
+
+ // Temporary storage for recorded data. AudioUnitRender() renders into this
+ // array as soon as a frame of the desired buffer size has been recorded.
+ rtc::scoped_ptr<SInt8[]> record_audio_buffer_;
+
+ // Set to 1 when recording is active and 0 otherwise.
+ volatile int recording_;
+
+ // Set to 1 when playout is active and 0 otherwise.
+ volatile int playing_;
+
+ // Set to true after successful call to Init(), false otherwise.
+ bool initialized_;
+
+ // Set to true after successful call to InitRecording(), false otherwise.
+ bool rec_is_initialized_;
+
+ // Set to true after successful call to InitPlayout(), false otherwise.
+ bool play_is_initialized_;
+
+ // Audio interruption observer instance.
+ void* audio_interruption_observer_;
};
} // namespace webrtc
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
diff --git a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.mm b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.mm
index 47503a96d70..9db9871c359 100644
--- a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.mm
+++ b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_ios.mm
@@ -8,1905 +8,762 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
-#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/base/atomicops.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/audio_device/fine_audio_buffer.h"
+#include "webrtc/modules/utility/interface/helpers_ios.h"
namespace webrtc {
-AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
- :
- _ptrAudioBuffer(NULL),
- _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
- _id(id),
- _auVoiceProcessing(NULL),
- _audioInterruptionObserver(NULL),
- _initialized(false),
- _isShutDown(false),
- _recording(false),
- _playing(false),
- _recIsInitialized(false),
- _playIsInitialized(false),
- _recordingDeviceIsSpecified(false),
- _playoutDeviceIsSpecified(false),
- _micIsInitialized(false),
- _speakerIsInitialized(false),
- _AGC(false),
- _adbSampFreq(0),
- _recordingDelay(0),
- _playoutDelay(0),
- _playoutDelayMeasurementCounter(9999),
- _recordingDelayHWAndOS(0),
- _recordingDelayMeasurementCounter(9999),
- _playWarning(0),
- _playError(0),
- _recWarning(0),
- _recError(0),
- _playoutBufferUsed(0),
- _recordingCurrentSeq(0),
- _recordingBufferTotalSize(0) {
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
- "%s created", __FUNCTION__);
-
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
- memset(_recordingLength, 0, sizeof(_recordingLength));
- memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
-}
-
-AudioDeviceIOS::~AudioDeviceIOS() {
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
- "%s destroyed", __FUNCTION__);
-
- Terminate();
-
- delete &_critSect;
-}
-
-
-// ============================================================================
-// API
-// ============================================================================
-
-void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- _ptrAudioBuffer = audioBuffer;
-
- // inform the AudioBuffer about default settings for this implementation
- _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
- _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
- _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
- _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
-}
-
-int32_t AudioDeviceIOS::ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
- return 0;
-}
-
-int32_t AudioDeviceIOS::Init() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (_initialized) {
- return 0;
- }
-
- _isShutDown = false;
-
- // Create and start capture thread
- if (!_captureWorkerThread) {
- _captureWorkerThread = ThreadWrapper::CreateThread(
- RunCapture, this, "CaptureWorkerThread");
- bool res = _captureWorkerThread->Start();
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "CaptureWorkerThread started (res=%d)", res);
- _captureWorkerThread->SetPriority(kRealtimePriority);
- } else {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
- _id, "Thread already created");
- }
- _playWarning = 0;
- _playError = 0;
- _recWarning = 0;
- _recError = 0;
-
- _initialized = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::Terminate() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- if (!_initialized) {
- return 0;
- }
-
-
- // Stop capture thread
- if (_captureWorkerThread) {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "Stopping CaptureWorkerThread");
- bool res = _captureWorkerThread->Stop();
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "CaptureWorkerThread stopped (res=%d)", res);
- _captureWorkerThread.reset();
- }
-
- // Shut down Audio Unit
- ShutdownPlayOrRecord();
-
- _isShutDown = true;
- _initialized = false;
- _speakerIsInitialized = false;
- _micIsInitialized = false;
- _playoutDeviceIsSpecified = false;
- _recordingDeviceIsSpecified = false;
- return 0;
-}
-
-bool AudioDeviceIOS::Initialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return (_initialized);
-}
-
-int32_t AudioDeviceIOS::InitSpeaker() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Cannot init speaker when playing");
- return -1;
- }
-
- if (!_playoutDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Playout device is not specified");
- return -1;
- }
-
- // Do nothing
- _speakerIsInitialized = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::InitMicrophone() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Cannot init mic when recording");
- return -1;
- }
-
- if (!_recordingDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Recording device is not specified");
- return -1;
- }
-
- // Do nothing
-
- _micIsInitialized = true;
-
- return 0;
-}
-
-bool AudioDeviceIOS::SpeakerIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return _speakerIsInitialized;
-}
-
-bool AudioDeviceIOS::MicrophoneIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return _micIsInitialized;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Speaker volume not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) {
- WEBRTC_TRACE(
- kTraceModuleCall,
- kTraceAudioDevice,
- _id,
- "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
- volumeLeft, volumeRight);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
-
- return -1;
-}
-
-int32_t
-AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
- uint16_t& /*volumeRight*/) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MinSpeakerVolume(
- uint32_t& minVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Speaker mute not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic mute not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic boost not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
-
- if (!_micIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Microphone not initialized");
- return -1;
- }
-
- if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetMicrophoneBoost cannot be enabled on this platform");
- return -1;
- }
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- if (!_micIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Microphone not initialized");
- return -1;
- }
-
- enabled = false;
- return 0;
-}
-
-int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Stereo recording not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
-
- if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Stereo recording is not supported on this platform");
- return -1;
- }
- return 0;
-}
-
-int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- enabled = false;
- return 0;
-}
-
-int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Stereo playout not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
-
- if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Stereo playout is not supported on this platform");
- return -1;
- }
- return 0;
-}
-
-int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- enabled = false;
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetAGC(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetAGC(enable=%d)", enable);
-
- _AGC = enable;
-
- return 0;
-}
-
-bool AudioDeviceIOS::AGC() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- return _AGC;
-}
-
-int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic volume not supported on IOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MicrophoneVolumeStepSize(
- uint16_t& stepSize) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int16_t AudioDeviceIOS::PlayoutDevices() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- return (int16_t)1;
-}
-
-int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
-
- if (_playIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout already initialized");
- return -1;
- }
-
- if (index !=0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetPlayoutDevice invalid index");
- return -1;
- }
- _playoutDeviceIsSpecified = true;
-
- return 0;
-}
-
-int32_t
- AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
-
- if (index != 0) {
- return -1;
- }
- // return empty strings
- memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL) {
- memset(guid, 0, kAdmMaxGuidSize);
- }
-
- return 0;
-}
-
-int32_t
- AudioDeviceIOS::RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
-
- if (index != 0) {
- return -1;
- }
- // return empty strings
- memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL) {
- memset(guid, 0, kAdmMaxGuidSize);
- }
-
- return 0;
-}
-
-int16_t AudioDeviceIOS::RecordingDevices() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- return (int16_t)1;
-}
-
-int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
-
- if (_recIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording already initialized");
- return -1;
+#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
+
+#define LOG_AND_RETURN_IF_ERROR(error, message) \
+ do { \
+ OSStatus err = error; \
+ if (err) { \
+ LOG(LS_ERROR) << message << ": " << err; \
+ return false; \
+ } \
+ } while (0)
+
+// Preferred hardware sample rate (unit is in Hertz). The client sample rate
+// will be set to this value as well to avoid resampling the the audio unit's
+// format converter. Note that, some devices, e.g. BT headsets, only supports
+// 8000Hz as native sample rate.
+const double kPreferredSampleRate = 48000.0;
+// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
+// size used by WebRTC. The exact actual size will differ between devices.
+// Example: using 48kHz on iPhone 6 results in a native buffer size of
+// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
+// take care of any buffering required to convert between native buffers and
+// buffers used by WebRTC. It is beneficial for the performance if the native
+// size is as close to 10ms as possible since it results in "clean" callback
+// sequence without bursts of callbacks back to back.
+const double kPreferredIOBufferDuration = 0.01;
+// Try to use mono to save resources. Also avoids channel format conversion
+// in the I/O audio unit. Initial tests have shown that it is possible to use
+// mono natively for built-in microphones and for BT headsets but not for
+// wired headsets. Wired headsets only support stereo as native channel format
+// but it is a low cost operation to do a format conversion to mono in the
+// audio unit. Hence, we will not hit a RTC_CHECK in
+// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
+// preferred number of channels and the actual number of channels.
+const int kPreferredNumberOfChannels = 1;
+// Number of bytes per audio sample for 16-bit signed integer representation.
+const UInt32 kBytesPerSample = 2;
+// Hardcoded delay estimates based on real measurements.
+// TODO(henrika): these value is not used in combination with built-in AEC.
+// Can most likely be removed.
+const UInt16 kFixedPlayoutDelayEstimate = 30;
+const UInt16 kFixedRecordDelayEstimate = 30;
+
+using ios::CheckAndLogError;
+
+// Activates an audio session suitable for full duplex VoIP sessions when
+// |activate| is true. Also sets the preferred sample rate and IO buffer
+// duration. Deactivates an active audio session if |activate| is set to false.
+static void ActivateAudioSession(AVAudioSession* session, bool activate) {
+ LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
+ @autoreleasepool {
+ NSError* error = nil;
+ BOOL success = NO;
+ // Deactivate the audio session and return if |activate| is false.
+ if (!activate) {
+ success = [session setActive:NO error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
+ return;
+ }
+ // Use a category which supports simultaneous recording and playback.
+ // By default, using this category implies that our app’s audio is
+ // nonmixable, hence activating the session will interrupt any other
+ // audio sessions which are also nonmixable.
+ if (session.category != AVAudioSessionCategoryPlayAndRecord) {
+ error = nil;
+ success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
}
-
- if (index !=0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetRecordingDevice invalid index");
- return -1;
+ // Specify mode for two-way voice communication (e.g. VoIP).
+ if (session.mode != AVAudioSessionModeVoiceChat) {
+ error = nil;
+ success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
}
+ // Set the session's sample rate or the hardware sample rate.
+ // It is essential that we use the same sample rate as stream format
+ // to ensure that the I/O unit does not have to do sample rate conversion.
+ error = nil;
+ success =
+ [session setPreferredSampleRate:kPreferredSampleRate error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
+ // Set the preferred audio I/O buffer duration, in seconds.
+ // TODO(henrika): add more comments here.
+ error = nil;
+ success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
+ error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
- _recordingDeviceIsSpecified = true;
-
- return 0;
-}
+ // TODO(henrika): add observers here...
-int32_t
- AudioDeviceIOS::SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
+ // Activate the audio session. Activation can fail if another active audio
+ // session (e.g. phone call) has higher priority than ours.
+ error = nil;
+ success = [session setActive:YES error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
+ RTC_CHECK(session.isInputAvailable) << "No input path is available!";
+ // Ensure that category and mode are actually activated.
+ RTC_DCHECK(
+ [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
+ RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
+ // Try to set the preferred number of hardware audio channels. These calls
+ // must be done after setting the audio session’s category and mode and
+ // activating the session.
+ // We try to use mono in both directions to save resources and format
+ // conversions in the audio unit. Some devices does only support stereo;
+ // e.g. wired headset on iPhone 6.
+ // TODO(henrika): add support for stereo if needed.
+ error = nil;
+ success =
+ [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
+ error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
+ error = nil;
+ success =
+ [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
+ error:&error];
+ RTC_DCHECK(CheckAndLogError(success, error));
+ }
+}
+
+#if !defined(NDEBUG)
+// Helper method for printing out an AudioStreamBasicDescription structure.
+static void LogABSD(AudioStreamBasicDescription absd) {
+ char formatIDString[5];
+ UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID);
+ bcopy(&formatID, formatIDString, 4);
+ formatIDString[4] = '\0';
+ LOG(LS_INFO) << "LogABSD";
+ LOG(LS_INFO) << " sample rate: " << absd.mSampleRate;
+ LOG(LS_INFO) << " format ID: " << formatIDString;
+ LOG(LS_INFO) << " format flags: " << std::hex << absd.mFormatFlags;
+ LOG(LS_INFO) << " bytes per packet: " << absd.mBytesPerPacket;
+ LOG(LS_INFO) << " frames per packet: " << absd.mFramesPerPacket;
+ LOG(LS_INFO) << " bytes per frame: " << absd.mBytesPerFrame;
+ LOG(LS_INFO) << " channels per packet: " << absd.mChannelsPerFrame;
+ LOG(LS_INFO) << " bits per channel: " << absd.mBitsPerChannel;
+ LOG(LS_INFO) << " reserved: " << absd.mReserved;
+}
+
+// Helper method that logs essential device information strings.
+static void LogDeviceInfo() {
+ LOG(LS_INFO) << "LogDeviceInfo";
+ @autoreleasepool {
+ LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+ LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
+ LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+ LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+ }
+}
+#endif // !defined(NDEBUG)
+
+AudioDeviceIOS::AudioDeviceIOS()
+ : audio_device_buffer_(nullptr),
+ vpio_unit_(nullptr),
+ recording_(0),
+ playing_(0),
+ initialized_(false),
+ rec_is_initialized_(false),
+ play_is_initialized_(false),
+ audio_interruption_observer_(nullptr) {
+ LOGI() << "ctor" << ios::GetCurrentThreadDescription();
}
-// ----------------------------------------------------------------------------
-// SetLoudspeakerStatus
-//
-// Change the default receiver playout route to speaker.
-//
-// ----------------------------------------------------------------------------
-
-int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
-
- AVAudioSession* session = [AVAudioSession sharedInstance];
- NSString* category = session.category;
- AVAudioSessionCategoryOptions options = session.categoryOptions;
- // Respect old category options if category is
- // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
- // might not be valid for this category.
- if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
- if (enable) {
- options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
- } else {
- options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
- }
- } else {
- options = AVAudioSessionCategoryOptionDefaultToSpeaker;
- }
-
- NSError* error = nil;
- [session setCategory:AVAudioSessionCategoryPlayAndRecord
- withOptions:options
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route ");
- return -1;
- }
-
- return 0;
+AudioDeviceIOS::~AudioDeviceIOS() {
+ LOGI() << "~dtor";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
}
-int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
-
- AVAudioSession* session = [AVAudioSession sharedInstance];
- AVAudioSessionCategoryOptions options = session.categoryOptions;
- enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
-
- return 0;
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ LOGI() << "AttachAudioBuffer";
+ RTC_DCHECK(audioBuffer);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
}
-int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- available = false;
-
- // Try to initialize the playout side
- int32_t res = InitPlayout();
-
- // Cancel effect of initialization
- StopPlayout();
-
- if (res != -1) {
- available = true;
- }
-
- return 0;
+int32_t AudioDeviceIOS::Init() {
+ LOGI() << "Init";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (initialized_) {
+ return 0;
+ }
+#if !defined(NDEBUG)
+ LogDeviceInfo();
+#endif
+ // Store the preferred sample rate and preferred number of channels already
+ // here. They have not been set and confirmed yet since ActivateAudioSession()
+ // is not called until audio is about to start. However, it makes sense to
+ // store the parameters now and then verify at a later stage.
+ playout_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
+ record_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
+ // Ensure that the audio device buffer (ADB) knows about the internal audio
+ // parameters. Note that, even if we are unable to get a mono audio session,
+ // we will always tell the I/O audio unit to do a channel format conversion
+ // to guarantee mono on the "input side" of the audio unit.
+ UpdateAudioDeviceBuffer();
+ initialized_ = true;
+ return 0;
}
-int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- available = false;
-
- // Try to initialize the recording side
- int32_t res = InitRecording();
-
- // Cancel effect of initialization
- StopRecording();
-
- if (res != -1) {
- available = true;
- }
-
+int32_t AudioDeviceIOS::Terminate() {
+ LOGI() << "Terminate";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_) {
return 0;
+ }
+ ShutdownPlayOrRecord();
+ initialized_ = false;
+ return 0;
}
int32_t AudioDeviceIOS::InitPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Playout already started");
- return -1;
- }
-
- if (_playIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout already initialized");
- return 0;
- }
-
- if (!_playoutDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout device is not specified");
- return -1;
- }
-
- // Initialize the speaker
- if (InitSpeaker() == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitSpeaker() failed");
- }
-
- _playIsInitialized = true;
-
- if (!_recIsInitialized) {
- // Audio init
- if (InitPlayOrRecord() == -1) {
- // todo: Handle error
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitPlayOrRecord() failed");
- }
- } else {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already initialized - InitPlayOrRecord() not called");
+ LOGI() << "InitPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!play_is_initialized_);
+ RTC_DCHECK(!playing_);
+ if (!rec_is_initialized_) {
+ if (!InitPlayOrRecord()) {
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ return -1;
}
-
- return 0;
-}
-
-bool AudioDeviceIOS::PlayoutIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_playIsInitialized);
+ }
+ play_is_initialized_ = true;
+ return 0;
}
int32_t AudioDeviceIOS::InitRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Recording already started");
- return -1;
- }
-
- if (_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already initialized");
- return 0;
- }
-
- if (!_recordingDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording device is not specified");
- return -1;
- }
-
- // Initialize the microphone
- if (InitMicrophone() == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitMicrophone() failed");
- }
-
- _recIsInitialized = true;
-
- if (!_playIsInitialized) {
- // Audio init
- if (InitPlayOrRecord() == -1) {
- // todo: Handle error
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitPlayOrRecord() failed");
- }
- } else {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout already initialized - InitPlayOrRecord() " \
- "not called");
- }
-
- return 0;
-}
-
-bool AudioDeviceIOS::RecordingIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_recIsInitialized);
-}
-
-int32_t AudioDeviceIOS::StartRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_recIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already started");
- return 0;
- }
-
- // Reset recording buffer
- memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
- memset(_recordingLength, 0, sizeof(_recordingLength));
- memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
- _recordingCurrentSeq = 0;
- _recordingBufferTotalSize = 0;
- _recordingDelay = 0;
- _recordingDelayHWAndOS = 0;
- // Make sure first call to update delay function will update delay
- _recordingDelayMeasurementCounter = 9999;
- _recWarning = 0;
- _recError = 0;
-
- if (!_playing) {
- // Start Audio Unit
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Starting Audio Unit");
- OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " Error starting Audio Unit (result=%d)", result);
- return -1;
- }
- }
-
- _recording = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::StopRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording is not initialized");
- return 0;
- }
-
- _recording = false;
-
- if (!_playing) {
- // Both playout and recording has stopped, shutdown the device
- ShutdownPlayOrRecord();
+ LOGI() << "InitRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(initialized_);
+ RTC_DCHECK(!rec_is_initialized_);
+ RTC_DCHECK(!recording_);
+ if (!play_is_initialized_) {
+ if (!InitPlayOrRecord()) {
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ return -1;
}
-
- _recIsInitialized = false;
- _micIsInitialized = false;
-
- return 0;
-}
-
-bool AudioDeviceIOS::Recording() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_recording);
+ }
+ rec_is_initialized_ = true;
+ return 0;
}
int32_t AudioDeviceIOS::StartPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- // This lock is (among other things) needed to avoid concurrency issues
- // with capture thread
- // shutting down Audio Unit
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playing already started");
- return 0;
- }
-
- // Reset playout buffer
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- _playoutBufferUsed = 0;
- _playoutDelay = 0;
- // Make sure first call to update delay function will update delay
- _playoutDelayMeasurementCounter = 9999;
- _playWarning = 0;
- _playError = 0;
-
- if (!_recording) {
- // Start Audio Unit
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Starting Audio Unit");
- OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " Error starting Audio Unit (result=%d)", result);
- return -1;
- }
+ LOGI() << "StartPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(play_is_initialized_);
+ RTC_DCHECK(!playing_);
+ fine_audio_buffer_->ResetPlayout();
+ if (!recording_) {
+ OSStatus result = AudioOutputUnitStart(vpio_unit_);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ return -1;
}
-
- _playing = true;
-
- return 0;
+ }
+ rtc::AtomicOps::ReleaseStore(&playing_, 1);
+ return 0;
}
int32_t AudioDeviceIOS::StopPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout is not initialized");
- return 0;
- }
-
- _playing = false;
-
- if (!_recording) {
- // Both playout and recording has stopped, signal shutdown the device
- ShutdownPlayOrRecord();
- }
-
- _playIsInitialized = false;
- _speakerIsInitialized = false;
-
+ LOGI() << "StopPlayout";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!play_is_initialized_ || !playing_) {
return 0;
+ }
+ if (!recording_) {
+ ShutdownPlayOrRecord();
+ }
+ play_is_initialized_ = false;
+ rtc::AtomicOps::ReleaseStore(&playing_, 0);
+ return 0;
}
-bool AudioDeviceIOS::Playing() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return (_playing);
-}
-
-// ----------------------------------------------------------------------------
-// ResetAudioDevice
-//
-// Disable playout and recording, signal to capture thread to shutdown,
-// and set enable states after shutdown to same as current.
-// In capture thread audio device will be shutdown, then started again.
-// ----------------------------------------------------------------------------
-int32_t AudioDeviceIOS::ResetAudioDevice() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized && !_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout or recording not initialized, doing nothing");
- return 0; // Nothing to reset
- }
-
- // Store the states we have before stopping to restart below
- bool initPlay = _playIsInitialized;
- bool play = _playing;
- bool initRec = _recIsInitialized;
- bool rec = _recording;
-
- int res(0);
-
- // Stop playout and recording
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Stopping playout and recording");
- res += StopPlayout();
- res += StopRecording();
-
- // Restart
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Restarting playout and recording (%d, %d, %d, %d)",
- initPlay, play, initRec, rec);
- if (initPlay) res += InitPlayout();
- if (initRec) res += InitRecording();
- if (play) res += StartPlayout();
- if (rec) res += StartRecording();
-
- if (0 != res) {
- // Logging is done in init/start/stop calls above
- return -1;
+int32_t AudioDeviceIOS::StartRecording() {
+ LOGI() << "StartRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(rec_is_initialized_);
+ RTC_DCHECK(!recording_);
+ fine_audio_buffer_->ResetRecord();
+ if (!playing_) {
+ OSStatus result = AudioOutputUnitStart(vpio_unit_);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ return -1;
}
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
- delayMS = _playoutDelay;
- return 0;
+ }
+ rtc::AtomicOps::ReleaseStore(&recording_, 1);
+ return 0;
}
-int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
- delayMS = _recordingDelay;
- return 0;
-}
-
-int32_t
- AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
- type, sizeMS);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- type = AudioDeviceModule::kAdaptiveBufferSize;
-
- sizeMS = _playoutDelay;
-
+int32_t AudioDeviceIOS::StopRecording() {
+ LOGI() << "StopRecording";
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ if (!rec_is_initialized_ || !recording_) {
return 0;
+ }
+ if (!playing_) {
+ ShutdownPlayOrRecord();
+ }
+ rec_is_initialized_ = false;
+ rtc::AtomicOps::ReleaseStore(&recording_, 0);
+ return 0;
}
-int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-bool AudioDeviceIOS::PlayoutWarning() const {
- return (_playWarning > 0);
-}
-
-bool AudioDeviceIOS::PlayoutError() const {
- return (_playError > 0);
-}
-
-bool AudioDeviceIOS::RecordingWarning() const {
- return (_recWarning > 0);
-}
-
-bool AudioDeviceIOS::RecordingError() const {
- return (_recError > 0);
-}
-
-void AudioDeviceIOS::ClearPlayoutWarning() {
- _playWarning = 0;
-}
-
-void AudioDeviceIOS::ClearPlayoutError() {
- _playError = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingWarning() {
- _recWarning = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingError() {
- _recError = 0;
-}
-
-// ============================================================================
-// Private Methods
-// ============================================================================
-
-int32_t AudioDeviceIOS::InitPlayOrRecord() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- OSStatus result = -1;
-
- // Check if already initialized
- if (NULL != _auVoiceProcessing) {
- // We already have initialized before and created any of the audio unit,
- // check that all exist
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Already initialized");
- // todo: Call AudioUnitReset() here and empty all buffers?
- return 0;
- }
-
- // Create Voice Processing Audio Unit
- AudioComponentDescription desc;
- AudioComponent comp;
-
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
-
- comp = AudioComponentFindNext(NULL, &desc);
- if (NULL == comp) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not find audio component for Audio Unit");
- return -1;
- }
-
- result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not create Audio Unit instance (result=%d)",
- result);
- return -1;
- }
-
- // Set preferred hardware sample rate to 16 kHz
- NSError* error = nil;
- AVAudioSession* session = [AVAudioSession sharedInstance];
- Float64 preferredSampleRate(16000.0);
- [session setPreferredSampleRate:preferredSampleRate
- error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set preferred sample rate: %s", errorString);
- }
- error = nil;
- // Make the setMode:error: and setCategory:error: calls only if necessary.
- // Non-obviously, setting them to the value they already have will clear
- // transient properties (such as PortOverride) that some other component may
- // have set up.
- if (session.mode != AVAudioSessionModeVoiceChat) {
- [session setMode:AVAudioSessionModeVoiceChat error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set mode: %s", errorString);
- }
- }
- error = nil;
- if (session.category != AVAudioSessionCategoryPlayAndRecord) {
- [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set category: %s", errorString);
- }
- }
-
- //////////////////////
- // Setup Voice Processing Audio Unit
-
- // Note: For Signal Processing AU element 0 is output bus, element 1 is
- // input bus for global scope element is irrelevant (always use
- // element 0)
-
- // Enable IO on both elements
-
- // todo: Below we just log and continue upon error. We might want
- // to close AU and return error for some cases.
- // todo: Log info about setup.
-
- UInt32 enableIO = 1;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input bus
- &enableIO,
- sizeof(enableIO));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not enable IO on input (result=%d)", result);
- }
-
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output bus
- &enableIO,
- sizeof(enableIO));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not enable IO on output (result=%d)", result);
- }
-
- // Disable AU buffer allocation for the recorder, we allocate our own
- UInt32 flag = 0;
- result = AudioUnitSetProperty(
- _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
- kAudioUnitScope_Output, 1, &flag, sizeof(flag));
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Could not disable AU buffer allocation (result=%d)",
- result);
- // Should work anyway
- }
-
- // Set recording callback
- AURenderCallbackStruct auCbS;
- memset(&auCbS, 0, sizeof(auCbS));
- auCbS.inputProc = RecordProcess;
- auCbS.inputProcRefCon = this;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global, 1,
- &auCbS, sizeof(auCbS));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set record callback for Audio Unit (result=%d)",
- result);
- }
-
- // Set playout callback
- memset(&auCbS, 0, sizeof(auCbS));
- auCbS.inputProc = PlayoutProcess;
- auCbS.inputProcRefCon = this;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Global, 0,
- &auCbS, sizeof(auCbS));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set play callback for Audio Unit (result=%d)",
- result);
- }
-
- // Get stream format for out/0
- AudioStreamBasicDescription playoutDesc;
- UInt32 size = sizeof(playoutDesc);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output, 0, &playoutDesc,
- &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not get stream format Audio Unit out/0 (result=%d)",
- result);
- }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Audio Unit playout opened in sampling rate %f",
- playoutDesc.mSampleRate);
-
- playoutDesc.mSampleRate = preferredSampleRate;
-
- // Store the sampling frequency to use towards the Audio Device Buffer
- // todo: Add 48 kHz (increase buffer sizes). Other fs?
- if ((playoutDesc.mSampleRate > 44090.0)
- && (playoutDesc.mSampleRate < 44110.0)) {
- _adbSampFreq = 44100;
- } else if ((playoutDesc.mSampleRate > 15990.0)
- && (playoutDesc.mSampleRate < 16010.0)) {
- _adbSampFreq = 16000;
- } else if ((playoutDesc.mSampleRate > 7990.0)
- && (playoutDesc.mSampleRate < 8010.0)) {
- _adbSampFreq = 8000;
+// Change the default receiver playout route to speaker.
+int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
+ LOGI() << "SetLoudspeakerStatus(" << enable << ")";
+
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ NSString* category = session.category;
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ // Respect old category options if category is
+ // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
+ // might not be valid for this category.
+ if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
+ if (enable) {
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
} else {
- _adbSampFreq = 0;
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Audio Unit out/0 opened in unknown sampling rate (%f)",
- playoutDesc.mSampleRate);
- // todo: We should bail out here.
- }
-
- // Set the audio device buffer sampling rate,
- // we assume we get the same for play and record
- if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set audio device buffer recording sampling rate (%d)",
- _adbSampFreq);
- }
-
- if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set audio device buffer playout sampling rate (%d)",
- _adbSampFreq);
- }
-
- // Set stream format for in/0 (use same sampling frequency as for out/0)
- playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
- | kLinearPCMFormatFlagIsPacked
- | kLinearPCMFormatFlagIsNonInterleaved;
- playoutDesc.mBytesPerPacket = 2;
- playoutDesc.mFramesPerPacket = 1;
- playoutDesc.mBytesPerFrame = 2;
- playoutDesc.mChannelsPerFrame = 1;
- playoutDesc.mBitsPerChannel = 16;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input, 0, &playoutDesc, size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set stream format Audio Unit in/0 (result=%d)",
- result);
- }
-
- // Get stream format for in/1
- AudioStreamBasicDescription recordingDesc;
- size = sizeof(recordingDesc);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input, 1, &recordingDesc,
- &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not get stream format Audio Unit in/1 (result=%d)",
- result);
- }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Audio Unit recording opened in sampling rate %f",
- recordingDesc.mSampleRate);
-
- recordingDesc.mSampleRate = preferredSampleRate;
-
- // Set stream format for out/1 (use same sampling frequency as for in/1)
- recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
- | kLinearPCMFormatFlagIsPacked
- | kLinearPCMFormatFlagIsNonInterleaved;
-
- recordingDesc.mBytesPerPacket = 2;
- recordingDesc.mFramesPerPacket = 1;
- recordingDesc.mBytesPerFrame = 2;
- recordingDesc.mChannelsPerFrame = 1;
- recordingDesc.mBitsPerChannel = 16;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output, 1, &recordingDesc,
- size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set stream format Audio Unit out/1 (result=%d)",
- result);
- }
-
- // Initialize here already to be able to get/set stream properties.
- result = AudioUnitInitialize(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not init Audio Unit (result=%d)", result);
- }
-
- // Get hardware sample rate for logging (see if we get what we asked for)
- double sampleRate = session.sampleRate;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Current HW sample rate is %f, ADB sample rate is %d",
- sampleRate, _adbSampFreq);
-
- // Listen to audio interruptions.
- NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
- id observer =
- [center addObserverForName:AVAudioSessionInterruptionNotification
- object:nil
- queue:[NSOperationQueue mainQueue]
- usingBlock:^(NSNotification* notification) {
- NSNumber* typeNumber =
- [notification userInfo][AVAudioSessionInterruptionTypeKey];
- AVAudioSessionInterruptionType type =
- (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
- switch (type) {
- case AVAudioSessionInterruptionTypeBegan:
- // At this point our audio session has been deactivated and the
- // audio unit render callbacks no longer occur. Nothing to do.
- break;
- case AVAudioSessionInterruptionTypeEnded: {
- NSError* error = nil;
- AVAudioSession* session = [AVAudioSession sharedInstance];
- [session setActive:YES
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Error activating audio session");
- }
- // Post interruption the audio unit render callbacks don't
- // automatically continue, so we restart the unit manually here.
- AudioOutputUnitStop(_auVoiceProcessing);
- AudioOutputUnitStart(_auVoiceProcessing);
- break;
- }
- }
- }];
- // Increment refcount on observer using ARC bridge. Instance variable is a
- // void* instead of an id because header is included in other pure C++
- // files.
- _audioInterruptionObserver = (__bridge_retained void*)observer;
-
- // Activate audio session.
- error = nil;
- [session setActive:YES
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Error activating audio session");
- }
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- if (_audioInterruptionObserver != NULL) {
- NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
- // Transfer ownership of observer back to ARC, which will dealloc the
- // observer once it exits this scope.
- id observer = (__bridge_transfer id)_audioInterruptionObserver;
- [center removeObserver:observer];
- _audioInterruptionObserver = NULL;
- }
-
- // Close and delete AU
- OSStatus result = -1;
- if (NULL != _auVoiceProcessing) {
- result = AudioOutputUnitStop(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error stopping Audio Unit (result=%d)", result);
- }
- result = AudioComponentInstanceDispose(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error disposing Audio Unit (result=%d)", result);
- }
- _auVoiceProcessing = NULL;
- }
-
- return 0;
-}
-
-// ============================================================================
-// Thread Methods
-// ============================================================================
-
-OSStatus
- AudioDeviceIOS::RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
-
- return ptrThis->RecordProcessImpl(ioActionFlags,
- inTimeStamp,
- inBusNumber,
- inNumberFrames);
-}
-
-
-OSStatus
- AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames) {
- // Setup some basic stuff
- // Use temp buffer not to lock up recording buffer more than necessary
- // todo: Make dataTmp a member variable with static size that holds
- // max possible frames?
- int16_t* dataTmp = new int16_t[inNumberFrames];
- memset(dataTmp, 0, 2*inNumberFrames);
-
- AudioBufferList abList;
- abList.mNumberBuffers = 1;
- abList.mBuffers[0].mData = dataTmp;
- abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample
- abList.mBuffers[0].mNumberChannels = 1;
-
- // Get data from mic
- OSStatus res = AudioUnitRender(_auVoiceProcessing,
- ioActionFlags, inTimeStamp,
- inBusNumber, inNumberFrames, &abList);
- if (res != 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error getting rec data, error = %d", res);
-
- if (_recWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending rec warning exists");
- }
- _recWarning = 1;
-
- delete [] dataTmp;
- return 0;
- }
-
- if (_recording) {
- // Insert all data in temp buffer into recording buffers
- // There is zero or one buffer partially full at any given time,
- // all others are full or empty
- // Full means filled with noSamp10ms samples.
-
- const unsigned int noSamp10ms = _adbSampFreq / 100;
- unsigned int dataPos = 0;
- uint16_t bufPos = 0;
- int16_t insertPos = -1;
- unsigned int nCopy = 0; // Number of samples to copy
-
- while (dataPos < inNumberFrames) {
- // Loop over all recording buffers or
- // until we find the partially full buffer
- // First choice is to insert into partially full buffer,
- // second choice is to insert into empty buffer
- bufPos = 0;
- insertPos = -1;
- nCopy = 0;
- while (bufPos < N_REC_BUFFERS) {
- if ((_recordingLength[bufPos] > 0)
- && (_recordingLength[bufPos] < noSamp10ms)) {
- // Found the partially full buffer
- insertPos = static_cast<int16_t>(bufPos);
- // Don't need to search more, quit loop
- bufPos = N_REC_BUFFERS;
- } else if ((-1 == insertPos)
- && (0 == _recordingLength[bufPos])) {
- // Found an empty buffer
- insertPos = static_cast<int16_t>(bufPos);
- }
- ++bufPos;
- }
-
- // Insert data into buffer
- if (insertPos > -1) {
- // We found a non-full buffer, copy data to it
- unsigned int dataToCopy = inNumberFrames - dataPos;
- unsigned int currentRecLen = _recordingLength[insertPos];
- unsigned int roomInBuffer = noSamp10ms - currentRecLen;
- nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
-
- memcpy(&_recordingBuffer[insertPos][currentRecLen],
- &dataTmp[dataPos], nCopy*sizeof(int16_t));
- if (0 == currentRecLen) {
- _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
- ++_recordingCurrentSeq;
- }
- _recordingBufferTotalSize += nCopy;
- // Has to be done last to avoid interrupt problems
- // between threads
- _recordingLength[insertPos] += nCopy;
- dataPos += nCopy;
- } else {
- // Didn't find a non-full buffer
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Could not insert into recording buffer");
- if (_recWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending rec warning exists");
- }
- _recWarning = 1;
- dataPos = inNumberFrames; // Don't try to insert more
- }
- }
- }
-
- delete [] dataTmp;
-
- return 0;
-}
-
-OSStatus
- AudioDeviceIOS::PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
-
- return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
-}
-
-OSStatus
- AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData) {
- // Setup some basic stuff
-// assert(sizeof(short) == 2); // Assumption for implementation
-
- int16_t* data =
- static_cast<int16_t*>(ioData->mBuffers[0].mData);
- unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
- unsigned int dataSize = dataSizeBytes/2; // Number of samples
- if (dataSize != inNumberFrames) { // Should always be the same
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "dataSize (%u) != inNumberFrames (%u)",
- dataSize, (unsigned int)inNumberFrames);
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
- memset(data, 0, dataSizeBytes); // Start with empty buffer
-
-
- // Get playout data from Audio Device Buffer
-
- if (_playing) {
- unsigned int noSamp10ms = _adbSampFreq / 100;
- // todo: Member variable and allocate when samp freq is determined
- int16_t* dataTmp = new int16_t[noSamp10ms];
- memset(dataTmp, 0, 2*noSamp10ms);
- unsigned int dataPos = 0;
- int noSamplesOut = 0;
- unsigned int nCopy = 0;
-
- // First insert data from playout buffer if any
- if (_playoutBufferUsed > 0) {
- nCopy = (dataSize < _playoutBufferUsed) ?
- dataSize : _playoutBufferUsed;
- if (nCopy != _playoutBufferUsed) {
- // todo: If dataSize < _playoutBufferUsed
- // (should normally never be)
- // we must move the remaining data
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "nCopy (%u) != _playoutBufferUsed (%u)",
- nCopy, _playoutBufferUsed);
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
- memcpy(data, _playoutBuffer, 2*nCopy);
- dataPos = nCopy;
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- _playoutBufferUsed = 0;
- }
-
- // Now get the rest from Audio Device Buffer
- while (dataPos < dataSize) {
- // Update playout delay
- UpdatePlayoutDelay();
-
- // Ask for new PCM data to be played out using the AudioDeviceBuffer
- noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
-
- // Get data from Audio Device Buffer
- noSamplesOut =
- _ptrAudioBuffer->GetPlayoutData(
- reinterpret_cast<int8_t*>(dataTmp));
- // Cast OK since only equality comparison
- if (noSamp10ms != (unsigned int)noSamplesOut) {
- // Should never happen
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "noSamp10ms (%u) != noSamplesOut (%d)",
- noSamp10ms, noSamplesOut);
-
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
-
- // Insert as much as fits in data buffer
- nCopy = (dataSize-dataPos) > noSamp10ms ?
- noSamp10ms : (dataSize-dataPos);
- memcpy(&data[dataPos], dataTmp, 2*nCopy);
-
- // Save rest in playout buffer if any
- if (nCopy < noSamp10ms) {
- memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
- _playoutBufferUsed = noSamp10ms - nCopy;
- }
-
- // Update loop/index counter, if we copied less than noSamp10ms
- // samples we shall quit loop anyway
- dataPos += noSamp10ms;
- }
-
- delete [] dataTmp;
- }
-
- return 0;
-}
-
-void AudioDeviceIOS::UpdatePlayoutDelay() {
- ++_playoutDelayMeasurementCounter;
-
- if (_playoutDelayMeasurementCounter >= 100) {
- // Update HW and OS delay every second, unlikely to change
-
- // Since this is eventually rounded to integral ms, add 0.5ms
- // here to get round-to-nearest-int behavior instead of
- // truncation.
- double totalDelaySeconds = 0.0005;
-
- // HW output latency
- AVAudioSession* session = [AVAudioSession sharedInstance];
- double latency = session.outputLatency;
- assert(latency >= 0);
- totalDelaySeconds += latency;
-
- // HW buffer duration
- double ioBufferDuration = session.IOBufferDuration;
- assert(ioBufferDuration >= 0);
- totalDelaySeconds += ioBufferDuration;
-
- // AU latency
- Float64 f64(0);
- UInt32 size = sizeof(f64);
- OSStatus result = AudioUnitGetProperty(
- _auVoiceProcessing, kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error AU latency (result=%d)", result);
- }
- assert(f64 >= 0);
- totalDelaySeconds += f64;
-
- // To ms
- _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
-
- // Reset counter
- _playoutDelayMeasurementCounter = 0;
+ options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
}
-
- // todo: Add playout buffer?
+ } else {
+ options = AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
+ NSError* error = nil;
+ BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ withOptions:options
+ error:&error];
+ ios::CheckAndLogError(success, error);
+ return (error == nil) ? 0 : -1;
}
-void AudioDeviceIOS::UpdateRecordingDelay() {
- ++_recordingDelayMeasurementCounter;
-
- if (_recordingDelayMeasurementCounter >= 100) {
- // Update HW and OS delay every second, unlikely to change
-
- // Since this is eventually rounded to integral ms, add 0.5ms
- // here to get round-to-nearest-int behavior instead of
- // truncation.
- double totalDelaySeconds = 0.0005;
-
- // HW input latency
- AVAudioSession* session = [AVAudioSession sharedInstance];
- double latency = session.inputLatency;
- assert(latency >= 0);
- totalDelaySeconds += latency;
-
- // HW buffer duration
- double ioBufferDuration = session.IOBufferDuration;
- assert(ioBufferDuration >= 0);
- totalDelaySeconds += ioBufferDuration;
-
- // AU latency
- Float64 f64(0);
- UInt32 size = sizeof(f64);
- OSStatus result = AudioUnitGetProperty(
- _auVoiceProcessing, kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error AU latency (result=%d)", result);
- }
- assert(f64 >= 0);
- totalDelaySeconds += f64;
-
- // To ms
- _recordingDelayHWAndOS =
- static_cast<uint32_t>(totalDelaySeconds / 1000);
-
- // Reset counter
- _recordingDelayMeasurementCounter = 0;
- }
-
- _recordingDelay = _recordingDelayHWAndOS;
-
- // ADB recording buffer size, update every time
- // Don't count the one next 10 ms to be sent, then convert samples => ms
- const uint32_t noSamp10ms = _adbSampFreq / 100;
- if (_recordingBufferTotalSize > noSamp10ms) {
- _recordingDelay +=
- (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
- }
+int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
+ LOGI() << "GetLoudspeakerStatus";
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
+ return 0;
}
-bool AudioDeviceIOS::RunCapture(void* ptrThis) {
- return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
+ delayMS = kFixedPlayoutDelayEstimate;
+ return 0;
}
-bool AudioDeviceIOS::CaptureWorkerThread() {
- if (_recording) {
- int bufPos = 0;
- unsigned int lowestSeq = 0;
- int lowestSeqBufPos = 0;
- bool foundBuf = true;
- const unsigned int noSamp10ms = _adbSampFreq / 100;
-
- while (foundBuf) {
- // Check if we have any buffer with data to insert
- // into the Audio Device Buffer,
- // and find the one with the lowest seq number
- foundBuf = false;
- for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
- if (noSamp10ms == _recordingLength[bufPos]) {
- if (!foundBuf) {
- lowestSeq = _recordingSeqNumber[bufPos];
- lowestSeqBufPos = bufPos;
- foundBuf = true;
- } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
- lowestSeq = _recordingSeqNumber[bufPos];
- lowestSeqBufPos = bufPos;
+int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
+ delayMS = kFixedRecordDelayEstimate;
+ return 0;
+}
+
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+ LOGI() << "GetPlayoutAudioParameters";
+ RTC_DCHECK(playout_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ *params = playout_parameters_;
+ return 0;
+}
+
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+ LOGI() << "GetRecordAudioParameters";
+ RTC_DCHECK(record_parameters_.is_valid());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ *params = record_parameters_;
+ return 0;
+}
+
+void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
+ LOGI() << "UpdateAudioDevicebuffer";
+ // AttachAudioBuffer() is called at construction by the main class but check
+ // just in case.
+ RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+ // Inform the audio device buffer (ADB) about the new audio format.
+ audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+ audio_device_buffer_->SetRecordingSampleRate(
+ record_parameters_.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+}
+
+void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
+ LOGI() << "SetupAudioBuffersForActiveAudioSession";
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ // Verify the current values once the audio session has been activated.
+ LOG(LS_INFO) << " sample rate: " << session.sampleRate;
+ LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
+ LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
+ LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
+ LOG(LS_INFO) << " output latency: " << session.outputLatency;
+ LOG(LS_INFO) << " input latency: " << session.inputLatency;
+ // Log a warning message for the case when we are unable to set the preferred
+ // hardware sample rate but continue and use the non-ideal sample rate after
+ // reinitializing the audio parameters.
+ if (session.sampleRate != playout_parameters_.sample_rate()) {
+ LOG(LS_WARNING)
+ << "Failed to enable an audio session with the preferred sample rate!";
+ }
+
+ // At this stage, we also know the exact IO buffer duration and can add
+ // that info to the existing audio parameters where it is converted into
+ // number of audio frames.
+ // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
+ // Hence, 128 is the size we expect to see in upcoming render callbacks.
+ playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(),
+ session.IOBufferDuration);
+ RTC_DCHECK(playout_parameters_.is_complete());
+ record_parameters_.reset(session.sampleRate, record_parameters_.channels(),
+ session.IOBufferDuration);
+ RTC_DCHECK(record_parameters_.is_complete());
+ LOG(LS_INFO) << " frames per I/O buffer: "
+ << playout_parameters_.frames_per_buffer();
+ LOG(LS_INFO) << " bytes per I/O buffer: "
+ << playout_parameters_.GetBytesPerBuffer();
+ RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
+ record_parameters_.GetBytesPerBuffer());
+
+ // Update the ADB parameters since the sample rate might have changed.
+ UpdateAudioDeviceBuffer();
+
+ // Create a modified audio buffer class which allows us to ask for,
+ // or deliver, any number of samples (and not only multiple of 10ms) to match
+ // the native audio unit buffer size.
+ RTC_DCHECK(audio_device_buffer_);
+ fine_audio_buffer_.reset(new FineAudioBuffer(
+ audio_device_buffer_, playout_parameters_.GetBytesPerBuffer(),
+ playout_parameters_.sample_rate()));
+
+ // The extra/temporary playoutbuffer must be of this size to avoid
+ // unnecessary memcpy while caching data between successive callbacks.
+ const int required_playout_buffer_size =
+ fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
+ LOG(LS_INFO) << " required playout buffer size: "
+ << required_playout_buffer_size;
+ playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]);
+
+ // Allocate AudioBuffers to be used as storage for the received audio.
+ // The AudioBufferList structure works as a placeholder for the
+ // AudioBuffer structure, which holds a pointer to the actual data buffer
+ // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
+ // at each input callback when calling AudioUnitRender().
+ const int data_byte_size = record_parameters_.GetBytesPerBuffer();
+ record_audio_buffer_.reset(new SInt8[data_byte_size]);
+ audio_record_buffer_list_.mNumberBuffers = 1;
+ AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
+ audio_buffer->mNumberChannels = record_parameters_.channels();
+ audio_buffer->mDataByteSize = data_byte_size;
+ audio_buffer->mData = record_audio_buffer_.get();
+}
+
+bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
+ LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
+ RTC_DCHECK(!vpio_unit_);
+ // Create an audio component description to identify the Voice-Processing
+ // I/O audio unit.
+ AudioComponentDescription vpio_unit_description;
+ vpio_unit_description.componentType = kAudioUnitType_Output;
+ vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+ vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
+ vpio_unit_description.componentFlags = 0;
+ vpio_unit_description.componentFlagsMask = 0;
+ // Obtain an audio unit instance given the description.
+ AudioComponent found_vpio_unit_ref =
+ AudioComponentFindNext(nullptr, &vpio_unit_description);
+
+ // Create a Voice-Processing IO audio unit.
+ LOG_AND_RETURN_IF_ERROR(
+ AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_),
+ "Failed to create a VoiceProcessingIO audio unit");
+
+ // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
+ // input on the input scope of the input element.
+ AudioUnitElement input_bus = 1;
+ UInt32 enable_input = 1;
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input, input_bus, &enable_input,
+ sizeof(enable_input)),
+ "Failed to enable input on input scope of input element");
+
+ // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
+ // output on the output scope of the output element.
+ AudioUnitElement output_bus = 0;
+ UInt32 enable_output = 1;
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output, output_bus, &enable_output,
+ sizeof(enable_output)),
+ "Failed to enable output on output scope of output element");
+
+ // Set the application formats for input and output:
+ // - use same format in both directions
+ // - avoid resampling in the I/O unit by using the hardware sample rate
+ // - linear PCM => noncompressed audio data format with one frame per packet
+ // - no need to specify interleaving since only mono is supported
+ AudioStreamBasicDescription application_format = {0};
+ UInt32 size = sizeof(application_format);
+ RTC_DCHECK_EQ(playout_parameters_.sample_rate(),
+ record_parameters_.sample_rate());
+ RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
+ application_format.mSampleRate = playout_parameters_.sample_rate();
+ application_format.mFormatID = kAudioFormatLinearPCM;
+ application_format.mFormatFlags =
+ kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+ application_format.mBytesPerPacket = kBytesPerSample;
+ application_format.mFramesPerPacket = 1; // uncompressed
+ application_format.mBytesPerFrame = kBytesPerSample;
+ application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
+ application_format.mBitsPerChannel = 8 * kBytesPerSample;
+#if !defined(NDEBUG)
+ LogABSD(application_format);
+#endif
+
+ // Set the application format on the output scope of the input element/bus.
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, input_bus,
+ &application_format, size),
+ "Failed to set application format on output scope of input element");
+
+ // Set the application format on the input scope of the output element/bus.
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, output_bus,
+ &application_format, size),
+ "Failed to set application format on input scope of output element");
+
+ // Specify the callback function that provides audio samples to the audio
+ // unit.
+ AURenderCallbackStruct render_callback;
+ render_callback.inputProc = GetPlayoutData;
+ render_callback.inputProcRefCon = this;
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input, output_bus, &render_callback,
+ sizeof(render_callback)),
+ "Failed to specify the render callback on the output element");
+
+ // Disable AU buffer allocation for the recorder, we allocate our own.
+ // TODO(henrika): not sure that it actually saves resource to make this call.
+ UInt32 flag = 0;
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
+ kAudioUnitScope_Output, input_bus, &flag,
+ sizeof(flag)),
+ "Failed to disable buffer allocation on the input element");
+
+ // Specify the callback to be called by the I/O thread to us when input audio
+ // is available. The recorded samples can then be obtained by calling the
+ // AudioUnitRender() method.
+ AURenderCallbackStruct input_callback;
+ input_callback.inputProc = RecordedDataIsAvailable;
+ input_callback.inputProcRefCon = this;
+ LOG_AND_RETURN_IF_ERROR(
+ AudioUnitSetProperty(vpio_unit_,
+ kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global, input_bus, &input_callback,
+ sizeof(input_callback)),
+ "Failed to specify the input callback on the input element");
+
+ // Initialize the Voice-Processing I/O unit instance.
+ LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
+ "Failed to initialize the Voice-Processing I/O unit");
+ return true;
+}
+
+bool AudioDeviceIOS::InitPlayOrRecord() {
+ LOGI() << "InitPlayOrRecord";
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ // Activate the audio session and ask for a set of preferred audio parameters.
+ ActivateAudioSession(session, true);
+
+ // Ensure that we got what what we asked for in our active audio session.
+ SetupAudioBuffersForActiveAudioSession();
+
+ // Create, setup and initialize a new Voice-Processing I/O unit.
+ if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
+ return false;
+ }
+
+ // Listen to audio interruptions.
+ // TODO(henrika): learn this area better.
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ id observer = [center
+ addObserverForName:AVAudioSessionInterruptionNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* notification) {
+ NSNumber* typeNumber =
+ [notification userInfo][AVAudioSessionInterruptionTypeKey];
+ AVAudioSessionInterruptionType type =
+ (AVAudioSessionInterruptionType)[typeNumber
+ unsignedIntegerValue];
+ switch (type) {
+ case AVAudioSessionInterruptionTypeBegan:
+ // At this point our audio session has been deactivated and
+ // the audio unit render callbacks no longer occur.
+ // Nothing to do.
+ break;
+ case AVAudioSessionInterruptionTypeEnded: {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ [session setActive:YES error:&error];
+ if (error != nil) {
+ LOG_F(LS_ERROR) << "Failed to active audio session";
}
+ // Post interruption the audio unit render callbacks don't
+ // automatically continue, so we restart the unit manually
+ // here.
+ AudioOutputUnitStop(vpio_unit_);
+ AudioOutputUnitStart(vpio_unit_);
+ break;
+ }
}
- } // for
-
- // Insert data into the Audio Device Buffer if found any
- if (foundBuf) {
- // Update recording delay
- UpdateRecordingDelay();
-
- // Set the recorded buffer
- _ptrAudioBuffer->SetRecordedBuffer(
- reinterpret_cast<int8_t*>(
- _recordingBuffer[lowestSeqBufPos]),
- _recordingLength[lowestSeqBufPos]);
-
- // Don't need to set the current mic level in ADB since we only
- // support digital AGC,
- // and besides we cannot get or set the IOS mic level anyway.
-
- // Set VQE info, use clockdrift == 0
- _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
-
- // Deliver recorded samples at specified sample rate, mic level
- // etc. to the observer using callback
- _ptrAudioBuffer->DeliverRecordedData();
-
- // Make buffer available
- _recordingSeqNumber[lowestSeqBufPos] = 0;
- _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
- // Must be done last to avoid interrupt problems between threads
- _recordingLength[lowestSeqBufPos] = 0;
- }
- } // while (foundBuf)
- } // if (_recording)
-
- {
- // Normal case
- // Sleep thread (5ms) to let other threads get to work
- // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
- // Device Buffer?
- timespec t;
- t.tv_sec = 0;
- t.tv_nsec = 5*1000*1000;
- nanosleep(&t, NULL);
- }
+ }];
+ // Increment refcount on observer using ARC bridge. Instance variable is a
+ // void* instead of an id because header is included in other pure C++
+ // files.
+ audio_interruption_observer_ = (__bridge_retained void*)observer;
+ return true;
+}
- return true;
+bool AudioDeviceIOS::ShutdownPlayOrRecord() {
+ LOGI() << "ShutdownPlayOrRecord";
+ if (audio_interruption_observer_ != nullptr) {
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ // Transfer ownership of observer back to ARC, which will dealloc the
+ // observer once it exits this scope.
+ id observer = (__bridge_transfer id)audio_interruption_observer_;
+ [center removeObserver:observer];
+ audio_interruption_observer_ = nullptr;
+ }
+ // Close and delete the voice-processing I/O unit.
+ OSStatus result = -1;
+ if (nullptr != vpio_unit_) {
+ result = AudioOutputUnitStop(vpio_unit_);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
+ }
+ result = AudioComponentInstanceDispose(vpio_unit_);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
+ }
+ vpio_unit_ = nullptr;
+ }
+ // All I/O should be stopped or paused prior to deactivating the audio
+ // session, hence we deactivate as last action.
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ ActivateAudioSession(session, false);
+ return true;
+}
+
+OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
+ void* in_ref_con,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* in_time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data) {
+ RTC_DCHECK_EQ(1u, in_bus_number);
+ RTC_DCHECK(
+ !io_data); // no buffer should be allocated for input at this stage
+ AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
+ return audio_device_ios->OnRecordedDataIsAvailable(
+ io_action_flags, in_time_stamp, in_bus_number, in_number_frames);
+}
+
+OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* in_time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames) {
+ RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
+ OSStatus result = noErr;
+ // Simply return if recording is not enabled.
+ if (!rtc::AtomicOps::AcquireLoad(&recording_))
+ return result;
+ RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
+ // Obtain the recorded audio samples by initiating a rendering cycle.
+ // Since it happens on the input bus, the |io_data| parameter is a reference
+ // to the preallocated audio buffer list that the audio unit renders into.
+ // TODO(henrika): should error handling be improved?
+ AudioBufferList* io_data = &audio_record_buffer_list_;
+ result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
+ in_bus_number, in_number_frames, io_data);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ return result;
+ }
+ // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+ // Use the FineAudioBuffer instance to convert between native buffer size
+ // and the 10ms buffer size used by WebRTC.
+ const UInt32 data_size_in_bytes = io_data->mBuffers[0].mDataByteSize;
+ RTC_CHECK_EQ(data_size_in_bytes / kBytesPerSample, in_number_frames);
+ SInt8* data = static_cast<SInt8*>(io_data->mBuffers[0].mData);
+ fine_audio_buffer_->DeliverRecordedData(data, data_size_in_bytes,
+ kFixedPlayoutDelayEstimate,
+ kFixedRecordDelayEstimate);
+ return noErr;
+}
+
+OSStatus AudioDeviceIOS::GetPlayoutData(
+ void* in_ref_con,
+ AudioUnitRenderActionFlags* io_action_flags,
+ const AudioTimeStamp* in_time_stamp,
+ UInt32 in_bus_number,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data) {
+ RTC_DCHECK_EQ(0u, in_bus_number);
+ RTC_DCHECK(io_data);
+ AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
+ return audio_device_ios->OnGetPlayoutData(io_action_flags, in_number_frames,
+ io_data);
+}
+
+OSStatus AudioDeviceIOS::OnGetPlayoutData(
+ AudioUnitRenderActionFlags* io_action_flags,
+ UInt32 in_number_frames,
+ AudioBufferList* io_data) {
+ // Verify 16-bit, noninterleaved mono PCM signal format.
+ RTC_DCHECK_EQ(1u, io_data->mNumberBuffers);
+ RTC_DCHECK_EQ(1u, io_data->mBuffers[0].mNumberChannels);
+ // Get pointer to internal audio buffer to which new audio data shall be
+ // written.
+ const UInt32 dataSizeInBytes = io_data->mBuffers[0].mDataByteSize;
+ RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, in_number_frames);
+ SInt8* destination = static_cast<SInt8*>(io_data->mBuffers[0].mData);
+ // Produce silence and give audio unit a hint about it if playout is not
+ // activated.
+ if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
+ *io_action_flags |= kAudioUnitRenderAction_OutputIsSilence;
+ memset(destination, 0, dataSizeInBytes);
+ return noErr;
+ }
+ // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
+ // the native I/O audio unit) to a preallocated intermediate buffer and
+ // copy the result to the audio buffer in the |io_data| destination.
+ SInt8* source = playout_audio_buffer_.get();
+ fine_audio_buffer_->GetPlayoutData(source);
+ memcpy(destination, source, dataSizeInBytes);
+ return noErr;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm
new file mode 100644
index 00000000000..acfc30d7f35
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::ActiveAudioLayer(
+ AudioDeviceModule::AudioLayer& audioLayer) const {
+ audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::ResetAudioDevice() {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int16_t AudioDeviceIOS::PlayoutDevices() {
+ // TODO(henrika): improve.
+ LOG_F(LS_WARNING) << "Not implemented";
+ return (int16_t)1;
+}
+
+int16_t AudioDeviceIOS::RecordingDevices() {
+ // TODO(henrika): improve.
+ LOG_F(LS_WARNING) << "Not implemented";
+ return (int16_t)1;
+}
+
+int32_t AudioDeviceIOS::InitSpeaker() {
+ return 0;
+}
+
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
+ return true;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SetWaveOutVolume(uint16_t, uint16_t) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::WaveOutVolume(uint16_t&, uint16_t&) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
+ LOG_F(LS_WARNING) << "Not implemented";
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+bool AudioDeviceIOS::PlayoutWarning() const {
+ return false;
+}
+
+bool AudioDeviceIOS::PlayoutError() const {
+ return false;
+}
+
+bool AudioDeviceIOS::RecordingWarning() const {
+ return false;
+}
+
+bool AudioDeviceIOS::RecordingError() const {
+ return false;
+}
+
+int32_t AudioDeviceIOS::InitMicrophone() {
+ return 0;
+}
+
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
+ return true;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
+ enabled = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
+ LOG_F(LS_WARNING) << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
+ enabled = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
+ LOG_F(LS_WARNING) << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
+ enabled = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetAGC(bool enable) {
+ if (enable) {
+ RTC_NOTREACHED() << "Should never be called";
+ }
+ return -1;
+}
+
+bool AudioDeviceIOS::AGC() const {
+ return false;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeStepSize(uint16_t& stepSize) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
+ LOG_F(LS_WARNING) << "Not implemented";
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(
+ AudioDeviceModule::WindowsDeviceType) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
+ available = true;
+ return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutBuffer(
+ const AudioDeviceModule::BufferType type,
+ uint16_t sizeMS) {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+int32_t AudioDeviceIOS::CPULoad(uint16_t&) const {
+ RTC_NOTREACHED() << "Not implemented";
+ return -1;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
new file mode 100644
index 00000000000..d639feae03c
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -0,0 +1,787 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <list>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/format_macros.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/modules/audio_device/audio_device_impl.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/system_wrappers/interface/event_wrapper.h"
+#include "webrtc/system_wrappers/interface/sleep.h"
+#include "webrtc/test/testsupport/fileutils.h"
+
+using std::cout;
+using std::endl;
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Gt;
+using ::testing::Invoke;
+using ::testing::NiceMock;
+using ::testing::NotNull;
+using ::testing::Return;
+
+// #define ENABLE_DEBUG_PRINTF
+#ifdef ENABLE_DEBUG_PRINTF
+#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
+#else
+#define PRINTD(...) ((void)0)
+#endif
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const size_t kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Number of bits per PCM audio sample.
+static const size_t kBitsPerSample = 16;
+// Number of bytes per PCM audio sample.
+static const size_t kBytesPerSample = kBitsPerSample / 8;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const size_t kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const int kFilePlayTimeInSec = 15;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+static const int kFullDuplexTimeInSec = 10;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const size_t kNumIgnoreFirstCallbacks = 50;
+// Sets the number of impulses per second in the latency test.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseFrequencyInHz = 1;
+// Length of round-trip latency measurements. Number of transmitted impulses
+// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kMeasureLatencyTimeInSec = 5;
+// Utilized in round-trip latency measurements to avoid capturing noise samples.
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseThreshold = 50;
+static const char kTag[] = "[..........] ";
+
+enum TransportType {
+ kPlayout = 0x1,
+ kRecording = 0x2,
+};
+
+// Interface for processing the audio stream. Real implementations can e.g.
+// run audio in loopback, read audio from a file or perform latency
+// measurements.
+class AudioStreamInterface {
+ public:
+ virtual void Write(const void* source, size_t num_frames) = 0;
+ virtual void Read(void* destination, size_t num_frames) = 0;
+
+ protected:
+ virtual ~AudioStreamInterface() {}
+};
+
+// Reads audio samples from a PCM file where the file is stored in memory at
+// construction.
+class FileAudioStream : public AudioStreamInterface {
+ public:
+ FileAudioStream(size_t num_callbacks,
+ const std::string& file_name,
+ int sample_rate)
+ : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
+ file_size_in_bytes_ = test::GetFileSize(file_name);
+ sample_rate_ = sample_rate;
+ EXPECT_GE(file_size_in_callbacks(), num_callbacks)
+ << "Size of test file is not large enough to last during the test.";
+ const size_t num_16bit_samples =
+ test::GetFileSize(file_name) / kBytesPerSample;
+ file_.reset(new int16_t[num_16bit_samples]);
+ FILE* audio_file = fopen(file_name.c_str(), "rb");
+ EXPECT_NE(audio_file, nullptr);
+ size_t num_samples_read =
+ fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+ EXPECT_EQ(num_samples_read, num_16bit_samples);
+ fclose(audio_file);
+ }
+
+ // AudioStreamInterface::Write() is not implemented.
+ void Write(const void* source, size_t num_frames) override {}
+
+ // Read samples from file stored in memory (at construction) and copy
+ // |num_frames| (<=> 10ms) to the |destination| byte buffer.
+ void Read(void* destination, size_t num_frames) override {
+ memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
+ num_frames * sizeof(int16_t));
+ file_pos_ += num_frames;
+ }
+
+ int file_size_in_seconds() const {
+ return static_cast<int>(
+ file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
+ }
+ size_t file_size_in_callbacks() const {
+ return file_size_in_seconds() * kNumCallbacksPerSecond;
+ }
+
+ private:
+ size_t file_size_in_bytes_;
+ int sample_rate_;
+ rtc::scoped_ptr<int16_t[]> file_;
+ size_t file_pos_;
+};
+
+// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
+// buffers of fixed size and allows Write and Read operations. The idea is to
+// store recorded audio buffers (using Write) and then read (using Read) these
+// stored buffers with as short delay as possible when the audio layer needs
+// data to play out. The number of buffers in the FIFO will stabilize under
+// normal conditions since there will be a balance between Write and Read calls.
+// The container is a std::list container and access is protected with a lock
+// since both sides (playout and recording) are driven by its own thread.
+class FifoAudioStream : public AudioStreamInterface {
+ public:
+ explicit FifoAudioStream(size_t frames_per_buffer)
+ : frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ fifo_(new AudioBufferList),
+ largest_size_(0),
+ total_written_elements_(0),
+ write_count_(0) {
+ EXPECT_NE(fifo_.get(), nullptr);
+ }
+
+ ~FifoAudioStream() { Flush(); }
+
+ // Allocate new memory, copy |num_frames| samples from |source| into memory
+ // and add pointer to the memory location to end of the list.
+ // Increases the size of the FIFO by one element.
+ void Write(const void* source, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("+");
+ if (write_count_++ < kNumIgnoreFirstCallbacks) {
+ return;
+ }
+ int16_t* memory = new int16_t[frames_per_buffer_];
+ memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
+ rtc::CritScope lock(&lock_);
+ fifo_->push_back(memory);
+ const size_t size = fifo_->size();
+ if (size > largest_size_) {
+ largest_size_ = size;
+ PRINTD("(%" PRIuS ")", largest_size_);
+ }
+ total_written_elements_ += size;
+ }
+
+ // Read pointer to data buffer from front of list, copy |num_frames| of stored
+ // data into |destination| and delete the utilized memory allocation.
+ // Decreases the size of the FIFO by one element.
+ void Read(void* destination, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ PRINTD("-");
+ rtc::CritScope lock(&lock_);
+ if (fifo_->empty()) {
+ memset(destination, 0, bytes_per_buffer_);
+ } else {
+ int16_t* memory = fifo_->front();
+ fifo_->pop_front();
+ memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
+ delete memory;
+ }
+ }
+
+ size_t size() const { return fifo_->size(); }
+
+ size_t largest_size() const { return largest_size_; }
+
+ size_t average_size() const {
+ return (total_written_elements_ == 0)
+ ? 0.0
+ : 0.5 +
+ static_cast<float>(total_written_elements_) /
+ (write_count_ - kNumIgnoreFirstCallbacks);
+ }
+
+ private:
+ void Flush() {
+ for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
+ delete *it;
+ }
+ fifo_->clear();
+ }
+
+ using AudioBufferList = std::list<int16_t*>;
+ rtc::CriticalSection lock_;
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
+ rtc::scoped_ptr<AudioBufferList> fifo_;
+ size_t largest_size_;
+ size_t total_written_elements_;
+ size_t write_count_;
+};
+
+// Inserts periodic impulses and measures the latency between the time of
+// transmission and time of receiving the same impulse.
+// Usage requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+class LatencyMeasuringAudioStream : public AudioStreamInterface {
+ public:
+ explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
+ : clock_(Clock::GetRealTimeClock()),
+ frames_per_buffer_(frames_per_buffer),
+ bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
+ play_count_(0),
+ rec_count_(0),
+ pulse_time_(0) {}
+
+ // Insert periodic impulses in first two samples of |destination|.
+ void Read(void* destination, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ if (play_count_ == 0) {
+ PRINT("[");
+ }
+ play_count_++;
+ memset(destination, 0, bytes_per_buffer_);
+ if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
+ if (pulse_time_ == 0) {
+ pulse_time_ = clock_->TimeInMilliseconds();
+ }
+ PRINT(".");
+ const int16_t impulse = std::numeric_limits<int16_t>::max();
+ int16_t* ptr16 = static_cast<int16_t*>(destination);
+ for (size_t i = 0; i < 2; ++i) {
+ ptr16[i] = impulse;
+ }
+ }
+ }
+
+ // Detect received impulses in |source|, derive time between transmission and
+ // detection and add the calculated delay to list of latencies.
+ void Write(const void* source, size_t num_frames) override {
+ ASSERT_EQ(num_frames, frames_per_buffer_);
+ rec_count_++;
+ if (pulse_time_ == 0) {
+ // Avoid detection of new impulse response until a new impulse has
+ // been transmitted (sets |pulse_time_| to value larger than zero).
+ return;
+ }
+ const int16_t* ptr16 = static_cast<const int16_t*>(source);
+ std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
+ // Find max value in the audio buffer.
+ int max = *std::max_element(vec.begin(), vec.end());
+ // Find index (element position in vector) of the max element.
+ int index_of_max =
+ std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
+ if (max > kImpulseThreshold) {
+ PRINTD("(%d,%d)", max, index_of_max);
+ int64_t now_time = clock_->TimeInMilliseconds();
+ int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+ PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
+ PRINTD("[%d]", extra_delay);
+ // Total latency is the difference between transmit time and detection
+ // tome plus the extra delay within the buffer in which we detected the
+ // received impulse. It is transmitted at sample 0 but can be received
+ // at sample N where N > 0. The term |extra_delay| accounts for N and it
+ // is a value between 0 and 10ms.
+ latencies_.push_back(now_time - pulse_time_ + extra_delay);
+ pulse_time_ = 0;
+ } else {
+ PRINTD("-");
+ }
+ }
+
+ size_t num_latency_values() const { return latencies_.size(); }
+
+ int min_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::min_element(latencies_.begin(), latencies_.end());
+ }
+
+ int max_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return *std::max_element(latencies_.begin(), latencies_.end());
+ }
+
+ int average_latency() const {
+ if (latencies_.empty())
+ return 0;
+ return 0.5 +
+ static_cast<double>(
+ std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+ latencies_.size();
+ }
+
+ void PrintResults() const {
+ PRINT("] ");
+ for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
+ PRINT("%d ", *it);
+ }
+ PRINT("\n");
+ PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+ max_latency(), average_latency());
+ }
+
+ int IndexToMilliseconds(double index) const {
+ return 10.0 * (index / frames_per_buffer_) + 0.5;
+ }
+
+ private:
+ Clock* clock_;
+ const size_t frames_per_buffer_;
+ const size_t bytes_per_buffer_;
+ size_t play_count_;
+ size_t rec_count_;
+ int64_t pulse_time_;
+ std::vector<int> latencies_;
+};
+// Mocks the AudioTransport object and proxies actions for the two callbacks
+// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
+// of AudioStreamInterface.
+class MockAudioTransport : public AudioTransport {
+ public:
+ explicit MockAudioTransport(int type)
+ : num_callbacks_(0),
+ type_(type),
+ play_count_(0),
+ rec_count_(0),
+ audio_stream_(nullptr) {}
+
+ virtual ~MockAudioTransport() {}
+
+ MOCK_METHOD10(RecordedDataIsAvailable,
+ int32_t(const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel));
+ MOCK_METHOD8(NeedMorePlayData,
+ int32_t(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms));
+
+ // Set default actions of the mock object. We are delegating to fake
+ // implementations (of AudioStreamInterface) here.
+ void HandleCallbacks(EventWrapper* test_is_done,
+ AudioStreamInterface* audio_stream,
+ size_t num_callbacks) {
+ test_is_done_ = test_is_done;
+ audio_stream_ = audio_stream;
+ num_callbacks_ = num_callbacks;
+ if (play_mode()) {
+ ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
+ .WillByDefault(
+ Invoke(this, &MockAudioTransport::RealNeedMorePlayData));
+ }
+ if (rec_mode()) {
+ ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
+ .WillByDefault(
+ Invoke(this, &MockAudioTransport::RealRecordedDataIsAvailable));
+ }
+ }
+
+ int32_t RealRecordedDataIsAvailable(const void* audioSamples,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ const uint32_t totalDelayMS,
+ const int32_t clockDrift,
+ const uint32_t currentMicLevel,
+ const bool keyPressed,
+ uint32_t& newMicLevel) {
+ EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
+ rec_count_++;
+ // Process the recorded audio stream if an AudioStreamInterface
+ // implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Write(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
+ test_is_done_->Set();
+ }
+ return 0;
+ }
+
+ int32_t RealNeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
+ const uint8_t nChannels,
+ const uint32_t samplesPerSec,
+ void* audioSamples,
+ size_t& nSamplesOut,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) {
+ EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
+ play_count_++;
+ nSamplesOut = nSamples;
+ // Read (possibly processed) audio stream samples to be played out if an
+ // AudioStreamInterface implementation exists.
+ if (audio_stream_) {
+ audio_stream_->Read(audioSamples, nSamples);
+ }
+ if (ReceivedEnoughCallbacks()) {
+ test_is_done_->Set();
+ }
+ return 0;
+ }
+
+ bool ReceivedEnoughCallbacks() {
+ bool recording_done = false;
+ if (rec_mode())
+ recording_done = rec_count_ >= num_callbacks_;
+ else
+ recording_done = true;
+
+ bool playout_done = false;
+ if (play_mode())
+ playout_done = play_count_ >= num_callbacks_;
+ else
+ playout_done = true;
+
+ return recording_done && playout_done;
+ }
+
+ bool play_mode() const { return type_ & kPlayout; }
+ bool rec_mode() const { return type_ & kRecording; }
+
+ private:
+ EventWrapper* test_is_done_;
+ size_t num_callbacks_;
+ int type_;
+ size_t play_count_;
+ size_t rec_count_;
+ AudioStreamInterface* audio_stream_;
+};
+
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
+ protected:
+ AudioDeviceTest() : test_is_done_(EventWrapper::Create()) {
+ old_sev_ = rtc::LogMessage::GetLogToDebug();
+ // Set suitable logging level here. Change to rtc::LS_INFO for more verbose
+ // output. See webrtc/base/logging.h for complete list of options.
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+ // Add extra logging fields here (timestamps and thread id).
+ // rtc::LogMessage::LogTimestamps();
+ rtc::LogMessage::LogThreads();
+ // Creates an audio device using a default audio layer.
+ audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
+ EXPECT_NE(audio_device_.get(), nullptr);
+ EXPECT_EQ(0, audio_device_->Init());
+ EXPECT_EQ(0,
+ audio_device()->GetPlayoutAudioParameters(&playout_parameters_));
+ EXPECT_EQ(0, audio_device()->GetRecordAudioParameters(&record_parameters_));
+ }
+ virtual ~AudioDeviceTest() {
+ EXPECT_EQ(0, audio_device_->Terminate());
+ rtc::LogMessage::LogToDebug(old_sev_);
+ }
+
+ int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+ int record_sample_rate() const { return record_parameters_.sample_rate(); }
+ int playout_channels() const { return playout_parameters_.channels(); }
+ int record_channels() const { return record_parameters_.channels(); }
+ size_t playout_frames_per_10ms_buffer() const {
+ return playout_parameters_.frames_per_10ms_buffer();
+ }
+ size_t record_frames_per_10ms_buffer() const {
+ return record_parameters_.frames_per_10ms_buffer();
+ }
+
+ rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
+ return audio_device_;
+ }
+
+ AudioDeviceModuleImpl* audio_device_impl() const {
+ return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
+ }
+
+ AudioDeviceBuffer* audio_device_buffer() const {
+ return audio_device_impl()->GetAudioDeviceBuffer();
+ }
+
+ rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ rtc::scoped_refptr<AudioDeviceModule> module(
+ AudioDeviceModuleImpl::Create(0, audio_layer));
+ return module;
+ }
+
+ // Returns file name relative to the resource root given a sample rate.
+ std::string GetFileName(int sample_rate) {
+ EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100 ||
+ sample_rate == 16000);
+ char fname[64];
+ snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
+ sample_rate / 1000);
+ std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
+ EXPECT_TRUE(test::FileExists(file_name));
+#ifdef ENABLE_DEBUG_PRINTF
+ PRINTD("file name: %s\n", file_name.c_str());
+ const size_t bytes = test::GetFileSize(file_name);
+ PRINTD("file size: %" PRIuS " [bytes]\n", bytes);
+ PRINTD("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
+ const int seconds =
+ static_cast<int>(bytes / (sample_rate * kBytesPerSample));
+ PRINTD("file size: %d [secs]\n", seconds);
+ PRINTD("file size: %" PRIuS " [callbacks]\n",
+ seconds * kNumCallbacksPerSecond);
+#endif
+ return file_name;
+ }
+
+ void StartPlayout() {
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_TRUE(audio_device()->Playing());
+ }
+
+ void StopPlayout() {
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+ }
+
+ void StartRecording() {
+ EXPECT_FALSE(audio_device()->RecordingIsInitialized());
+ EXPECT_FALSE(audio_device()->Recording());
+ EXPECT_EQ(0, audio_device()->InitRecording());
+ EXPECT_TRUE(audio_device()->RecordingIsInitialized());
+ EXPECT_EQ(0, audio_device()->StartRecording());
+ EXPECT_TRUE(audio_device()->Recording());
+ }
+
+ void StopRecording() {
+ EXPECT_EQ(0, audio_device()->StopRecording());
+ EXPECT_FALSE(audio_device()->Recording());
+ }
+
+ rtc::scoped_ptr<EventWrapper> test_is_done_;
+ rtc::scoped_refptr<AudioDeviceModule> audio_device_;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+ rtc::LoggingSeverity old_sev_;
+};
+
+TEST_F(AudioDeviceTest, ConstructDestruct) {
+ // Using the test fixture to create and destruct the audio device module.
+}
+
+TEST_F(AudioDeviceTest, InitTerminate) {
+ // Initialization is part of the test fixture.
+ EXPECT_TRUE(audio_device()->Initialized());
+ EXPECT_EQ(0, audio_device()->Terminate());
+ EXPECT_FALSE(audio_device()->Initialized());
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopPlayout) {
+ StartPlayout();
+ StopPlayout();
+ StartPlayout();
+ StopPlayout();
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+ StartRecording();
+ StopRecording();
+ StartRecording();
+ StopRecording();
+}
+
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+ MockAudioTransport mock(kPlayout);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ test_is_done_->Wait(kTestTimeOutInMilliseconds);
+ StopPlayout();
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+ MockAudioTransport mock(kRecording);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock,
+ RecordedDataIsAvailable(
+ NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+ record_channels(), record_sample_rate(),
+ _, // TODO(henrika): fix delay
+ 0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ test_is_done_->Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+ MockAudioTransport mock(kPlayout | kRecording);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
+ .Times(AtLeast(kNumCallbacks));
+ EXPECT_CALL(mock,
+ RecordedDataIsAvailable(
+ NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+ record_channels(), record_sample_rate(),
+ _, // TODO(henrika): fix delay
+ 0, 0, false, _)).Times(AtLeast(kNumCallbacks));
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ StartRecording();
+ test_is_done_->Wait(kTestTimeOutInMilliseconds);
+ StopRecording();
+ StopPlayout();
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+ // TODO(henrika): extend test when mono output is supported.
+ EXPECT_EQ(1, playout_channels());
+ NiceMock<MockAudioTransport> mock(kPlayout);
+ const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+ std::string file_name = GetFileName(playout_sample_rate());
+ rtc::scoped_ptr<FileAudioStream> file_audio_stream(
+ new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
+ mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
+ num_callbacks);
+ // SetMaxPlayoutVolume();
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartPlayout();
+ test_is_done_->Wait(kTestTimeOutInMilliseconds);
+ StopPlayout();
+}
+
+TEST_F(AudioDeviceTest, Devices) {
+ // Device enumeration is not supported. Verify fixed values only.
+ EXPECT_EQ(1, audio_device()->PlayoutDevices());
+ EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+ rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
+ new FifoAudioStream(playout_frames_per_10ms_buffer()));
+ mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
+ kFullDuplexTimeInSec * kNumCallbacksPerSecond);
+ // SetMaxPlayoutVolume();
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ StartRecording();
+ StartPlayout();
+ test_is_done_->Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
+ StopPlayout();
+ StopRecording();
+ EXPECT_LE(fifo_audio_stream->average_size(), 10u);
+ EXPECT_LE(fifo_audio_stream->largest_size(), 20u);
+}
+
+// Measures loopback latency and reports the min, max and average values for
+// a full duplex audio session.
+// The latency is measured like so:
+// - Insert impulses periodically on the output side.
+// - Detect the impulses on the input side.
+// - Measure the time difference between the transmit time and receive time.
+// - Store time differences in a vector and calculate min, max and average.
+// This test requires a special hardware called Audio Loopback Dongle.
+// See http://source.android.com/devices/audio/loopback.html for details.
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
+ NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
+ rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
+ new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
+ mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
+ kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
+ EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
+ // SetMaxPlayoutVolume();
+ // DisableBuiltInAECIfAvailable();
+ StartRecording();
+ StartPlayout();
+ test_is_done_->Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
+ StopPlayout();
+ StopRecording();
+ // Verify that the correct number of transmitted impulses are detected.
+ EXPECT_EQ(latency_audio_stream->num_latency_values(),
+ static_cast<size_t>(
+ kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
+ latency_audio_stream->PrintResults();
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
index 8b0ae6f5390..7bb7347a20e 100644
--- a/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc
@@ -106,7 +106,7 @@ AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
{
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destroyed", __FUNCTION__);
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
Terminate();
if (_recBuffer)
@@ -139,7 +139,7 @@ AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
_ptrAudioBuffer = audioBuffer;
@@ -165,7 +165,7 @@ int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
int32_t AudioDeviceLinuxPulse::Init()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_initialized)
{
return 0;
@@ -235,7 +235,7 @@ int32_t AudioDeviceLinuxPulse::Init()
int32_t AudioDeviceLinuxPulse::Terminate()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!_initialized)
{
return 0;
@@ -286,13 +286,13 @@ int32_t AudioDeviceLinuxPulse::Terminate()
bool AudioDeviceLinuxPulse::Initialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_initialized);
}
int32_t AudioDeviceLinuxPulse::InitSpeaker()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_playing)
{
@@ -336,7 +336,7 @@ int32_t AudioDeviceLinuxPulse::InitSpeaker()
int32_t AudioDeviceLinuxPulse::InitMicrophone()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_recording)
{
return -1;
@@ -379,19 +379,19 @@ int32_t AudioDeviceLinuxPulse::InitMicrophone()
bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_mixerManager.SpeakerIsInitialized());
}
bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_mixerManager.MicrophoneIsInitialized());
}
int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool wasInitialized = _mixerManager.SpeakerIsInitialized();
// Make an attempt to open up the
@@ -418,7 +418,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!_playing) {
// Only update the volume if it's been set while we weren't playing.
update_speaker_volume_at_startup_ = true;
@@ -428,7 +428,7 @@ int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
uint32_t level(0);
if (_mixerManager.SpeakerVolume(level) == -1)
@@ -464,7 +464,7 @@ int32_t AudioDeviceLinuxPulse::WaveOutVolume(
int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
uint32_t& maxVolume) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
uint32_t maxVol(0);
if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
@@ -480,7 +480,7 @@ int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
uint32_t& minVolume) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
uint32_t minVol(0);
if (_mixerManager.MinSpeakerVolume(minVol) == -1)
@@ -496,7 +496,7 @@ int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
uint16_t& stepSize) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
uint16_t delta(0);
if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
@@ -511,7 +511,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool isAvailable(false);
bool wasInitialized = _mixerManager.SpeakerIsInitialized();
@@ -543,13 +543,13 @@ int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_mixerManager.SetSpeakerMute(enable));
}
int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool muted(0);
if (_mixerManager.SpeakerMute(muted) == -1)
{
@@ -562,7 +562,7 @@ int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool isAvailable(false);
bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
@@ -595,13 +595,13 @@ int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_mixerManager.SetMicrophoneMute(enable));
}
int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool muted(0);
if (_mixerManager.MicrophoneMute(muted) == -1)
{
@@ -614,7 +614,7 @@ int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool isAvailable(false);
bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
@@ -645,13 +645,13 @@ int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_mixerManager.SetMicrophoneBoost(enable));
}
int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool onOff(0);
if (_mixerManager.MicrophoneBoost(onOff) == -1)
@@ -666,7 +666,7 @@ int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_recChannels == 2 && _recording) {
available = true;
return 0;
@@ -700,7 +700,7 @@ int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (enable)
_recChannels = 2;
else
@@ -711,7 +711,7 @@ int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_recChannels == 2)
enabled = true;
else
@@ -722,7 +722,7 @@ int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_playChannels == 2 && _playing) {
available = true;
return 0;
@@ -755,7 +755,7 @@ int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (enable)
_playChannels = 2;
else
@@ -766,7 +766,7 @@ int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_playChannels == 2)
enabled = true;
else
@@ -792,7 +792,7 @@ bool AudioDeviceLinuxPulse::AGC() const
int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
// Make an attempt to open up the
@@ -876,7 +876,7 @@ int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
uint16_t& stepSize) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
uint16_t delta(0);
if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
@@ -891,7 +891,6 @@ int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
int16_t AudioDeviceLinuxPulse::PlayoutDevices()
{
- DCHECK(thread_checker_.CalledOnValidThread());
PaLock();
pa_operation* paOperation = NULL;
@@ -911,7 +910,7 @@ int16_t AudioDeviceLinuxPulse::PlayoutDevices()
int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_playIsInitialized)
{
return -1;
@@ -948,7 +947,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize])
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
const uint16_t nDevices = PlayoutDevices();
if ((index > (nDevices - 1)) || (name == NULL))
@@ -990,7 +989,7 @@ int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
char name[kAdmMaxDeviceNameSize],
char guid[kAdmMaxGuidSize])
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
const uint16_t nDevices(RecordingDevices());
if ((index > (nDevices - 1)) || (name == NULL))
@@ -1029,7 +1028,6 @@ int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
int16_t AudioDeviceLinuxPulse::RecordingDevices()
{
- DCHECK(thread_checker_.CalledOnValidThread());
PaLock();
pa_operation* paOperation = NULL;
@@ -1049,7 +1047,7 @@ int16_t AudioDeviceLinuxPulse::RecordingDevices()
int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_recIsInitialized)
{
return -1;
@@ -1083,7 +1081,7 @@ int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
available = false;
// Try to initialize the playout side
@@ -1102,7 +1100,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
available = false;
// Try to initialize the playout side
@@ -1121,7 +1119,7 @@ int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
int32_t AudioDeviceLinuxPulse::InitPlayout()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_playing)
{
@@ -1243,7 +1241,7 @@ int32_t AudioDeviceLinuxPulse::InitPlayout()
int32_t AudioDeviceLinuxPulse::InitRecording()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_recording)
{
@@ -1355,7 +1353,7 @@ int32_t AudioDeviceLinuxPulse::InitRecording()
int32_t AudioDeviceLinuxPulse::StartRecording()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!_recIsInitialized)
{
return -1;
@@ -1402,7 +1400,7 @@ int32_t AudioDeviceLinuxPulse::StartRecording()
int32_t AudioDeviceLinuxPulse::StopRecording()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
CriticalSectionScoped lock(&_critSect);
if (!_recIsInitialized)
@@ -1465,25 +1463,25 @@ int32_t AudioDeviceLinuxPulse::StopRecording()
bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_recIsInitialized);
}
bool AudioDeviceLinuxPulse::Recording() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_recording);
}
bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_playIsInitialized);
}
int32_t AudioDeviceLinuxPulse::StartPlayout()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!_playIsInitialized)
{
@@ -1537,7 +1535,7 @@ int32_t AudioDeviceLinuxPulse::StartPlayout()
int32_t AudioDeviceLinuxPulse::StopPlayout()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
CriticalSectionScoped lock(&_critSect);
if (!_playIsInitialized)
@@ -1609,14 +1607,14 @@ int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
delayMS = (uint16_t) _sndCardRecDelay;
return 0;
}
bool AudioDeviceLinuxPulse::Playing() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return (_playing);
}
@@ -1624,7 +1622,7 @@ int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
const AudioDeviceModule::BufferType type,
uint16_t sizeMS)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (type != AudioDeviceModule::kFixedBufferSize)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -1642,7 +1640,7 @@ int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
AudioDeviceModule::BufferType& type,
uint16_t& sizeMS) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
type = _playBufType;
sizeMS = _playBufDelayFixed;
diff --git a/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h b/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
index 418dd3d2879..495a7ebd35b 100644
--- a/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
+++ b/chromium/third_party/webrtc/modules/audio_device/linux/audio_device_pulse_linux.h
@@ -304,7 +304,7 @@ private:
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
- // Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
+ // Currently only does RTC_DCHECK(thread_checker_.CalledOnValidThread()).
rtc::ThreadChecker thread_checker_;
bool _initialized;
diff --git a/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc b/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
index 4df2d94f887..bc2662e3e89 100644
--- a/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.cc
@@ -63,7 +63,7 @@ AudioMixerManagerLinuxPulse::AudioMixerManagerLinuxPulse(const int32_t id) :
AudioMixerManagerLinuxPulse::~AudioMixerManagerLinuxPulse()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
"%s destructed", __FUNCTION__);
@@ -78,7 +78,7 @@ int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
pa_threaded_mainloop* mainloop,
pa_context* context)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -101,7 +101,7 @@ int32_t AudioMixerManagerLinuxPulse::SetPulseAudioObjects(
int32_t AudioMixerManagerLinuxPulse::Close()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -118,7 +118,7 @@ int32_t AudioMixerManagerLinuxPulse::Close()
int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -131,7 +131,7 @@ int32_t AudioMixerManagerLinuxPulse::CloseSpeaker()
int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -144,7 +144,7 @@ int32_t AudioMixerManagerLinuxPulse::CloseMicrophone()
int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetPlayStream(playStream)");
@@ -154,7 +154,7 @@ int32_t AudioMixerManagerLinuxPulse::SetPlayStream(pa_stream* playStream)
int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetRecStream(recStream)");
@@ -165,7 +165,7 @@ int32_t AudioMixerManagerLinuxPulse::SetRecStream(pa_stream* recStream)
int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
uint16_t deviceIndex)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::OpenSpeaker(deviceIndex=%d)",
deviceIndex);
@@ -192,7 +192,7 @@ int32_t AudioMixerManagerLinuxPulse::OpenSpeaker(
int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
uint16_t deviceIndex)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::OpenMicrophone"
"(deviceIndex=%d)", deviceIndex);
@@ -218,7 +218,7 @@ int32_t AudioMixerManagerLinuxPulse::OpenMicrophone(
bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -227,7 +227,7 @@ bool AudioMixerManagerLinuxPulse::SpeakerIsInitialized() const
bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s",
__FUNCTION__);
@@ -237,7 +237,7 @@ bool AudioMixerManagerLinuxPulse::MicrophoneIsInitialized() const
int32_t AudioMixerManagerLinuxPulse::SetSpeakerVolume(
uint32_t volume)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetSpeakerVolume(volume=%u)",
volume);
@@ -372,7 +372,7 @@ AudioMixerManagerLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const
int32_t
AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -394,7 +394,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const
int32_t
AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -411,7 +411,7 @@ AudioMixerManagerLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
int32_t
AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -427,7 +427,7 @@ AudioMixerManagerLinuxPulse::SpeakerMuteIsAvailable(bool& available)
int32_t AudioMixerManagerLinuxPulse::SetSpeakerMute(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetSpeakerMute(enable=%u)",
enable);
@@ -512,7 +512,7 @@ int32_t AudioMixerManagerLinuxPulse::SpeakerMute(bool& enabled) const
int32_t
AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paOutputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -546,7 +546,7 @@ AudioMixerManagerLinuxPulse::StereoPlayoutIsAvailable(bool& available)
int32_t
AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -590,7 +590,7 @@ AudioMixerManagerLinuxPulse::StereoRecordingIsAvailable(bool& available)
int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -606,7 +606,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMuteIsAvailable(
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetMicrophoneMute(enable=%u)",
enable);
@@ -661,7 +661,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneMute(bool enable)
int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -698,7 +698,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneMute(bool& enabled) const
int32_t
AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -716,7 +716,7 @@ AudioMixerManagerLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
"AudioMixerManagerLinuxPulse::SetMicrophoneBoost(enable=%u)",
enable);
@@ -745,7 +745,7 @@ int32_t AudioMixerManagerLinuxPulse::SetMicrophoneBoost(bool enable)
int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -762,7 +762,7 @@ int32_t AudioMixerManagerLinuxPulse::MicrophoneBoost(bool& enabled) const
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeIsAvailable(
bool& available)
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
@@ -931,7 +931,7 @@ AudioMixerManagerLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const
int32_t AudioMixerManagerLinuxPulse::MicrophoneVolumeStepSize(
uint16_t& stepSize) const
{
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (_paInputDeviceIndex == -1)
{
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
diff --git a/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h b/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
index 85676319bd6..cb3d6329833 100644
--- a/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
+++ b/chromium/third_party/webrtc/modules/audio_device/linux/audio_mixer_manager_pulse_linux.h
@@ -111,7 +111,7 @@ private:
// Stores thread ID in constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
- // Currently only does DCHECK(thread_checker_.CalledOnValidThread()).
+ // Currently only does RTC_DCHECK(thread_checker_.CalledOnValidThread()).
rtc::ThreadChecker thread_checker_;
};
diff --git a/chromium/third_party/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h b/chromium/third_party/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
index f49931e8bf3..2d8fafa04bf 100644
--- a/chromium/third_party/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
+++ b/chromium/third_party/webrtc/modules/audio_device/linux/latebindingsymboltable_linux.h
@@ -119,7 +119,7 @@ class LateBindingSymbolTable {
bool undefined_symbols_;
void *symbols_[SYMBOL_TABLE_SIZE];
- DISALLOW_COPY_AND_ASSIGN(LateBindingSymbolTable);
+ RTC_DISALLOW_COPY_AND_ASSIGN(LateBindingSymbolTable);
};
// This macro must be invoked in a header to declare a symbol table class.
diff --git a/chromium/third_party/webrtc/modules/audio_device/mac/audio_device_mac.cc b/chromium/third_party/webrtc/modules/audio_device/mac/audio_device_mac.cc
index 90e32dc187c..77dab0b83e0 100644
--- a/chromium/third_party/webrtc/modules/audio_device/mac/audio_device_mac.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/mac/audio_device_mac.cc
@@ -91,8 +91,8 @@ void AudioDeviceMac::logCAMsg(const TraceLevel level,
const int32_t id, const char *msg,
const char *err)
{
- DCHECK(msg != NULL);
- DCHECK(err != NULL);
+ RTC_DCHECK(msg != NULL);
+ RTC_DCHECK(err != NULL);
#ifdef WEBRTC_ARCH_BIG_ENDIAN
WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
@@ -154,8 +154,8 @@ AudioDeviceMac::AudioDeviceMac(const int32_t id) :
WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
"%s created", __FUNCTION__);
- DCHECK(&_stopEvent != NULL);
- DCHECK(&_stopEventRec != NULL);
+ RTC_DCHECK(&_stopEvent != NULL);
+ RTC_DCHECK(&_stopEventRec != NULL);
memset(_renderConvertData, 0, sizeof(_renderConvertData));
memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
@@ -175,8 +175,8 @@ AudioDeviceMac::~AudioDeviceMac()
Terminate();
}
- DCHECK(!capture_worker_thread_.get());
- DCHECK(!render_worker_thread_.get());
+ RTC_DCHECK(!capture_worker_thread_.get());
+ RTC_DCHECK(!render_worker_thread_.get());
if (_paRenderBuffer)
{
@@ -1664,10 +1664,10 @@ int32_t AudioDeviceMac::StartRecording()
return -1;
}
- DCHECK(!capture_worker_thread_.get());
+ RTC_DCHECK(!capture_worker_thread_.get());
capture_worker_thread_ =
ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
- DCHECK(capture_worker_thread_.get());
+ RTC_DCHECK(capture_worker_thread_.get());
capture_worker_thread_->Start();
capture_worker_thread_->SetPriority(kRealtimePriority);
@@ -1819,7 +1819,7 @@ int32_t AudioDeviceMac::StartPlayout()
return 0;
}
- DCHECK(!render_worker_thread_.get());
+ RTC_DCHECK(!render_worker_thread_.get());
render_worker_thread_ =
ThreadWrapper::CreateThread(RunRender, this, "RenderWorkerThread");
render_worker_thread_->Start();
@@ -2466,7 +2466,7 @@ OSStatus AudioDeviceMac::objectListenerProc(
void* clientData)
{
AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- DCHECK(ptrThis != NULL);
+ RTC_DCHECK(ptrThis != NULL);
ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
@@ -2752,7 +2752,7 @@ OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*,
void *clientData)
{
AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- DCHECK(ptrThis != NULL);
+ RTC_DCHECK(ptrThis != NULL);
ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
@@ -2767,7 +2767,7 @@ OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
void *userData)
{
AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
- DCHECK(ptrThis != NULL);
+ RTC_DCHECK(ptrThis != NULL);
return ptrThis->implOutConverterProc(numberDataPackets, data);
}
@@ -2779,7 +2779,7 @@ OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*,
const AudioTimeStamp*, void* clientData)
{
AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
- DCHECK(ptrThis != NULL);
+ RTC_DCHECK(ptrThis != NULL);
ptrThis->implInDeviceIOProc(inputData, inputTime);
@@ -2795,7 +2795,7 @@ OSStatus AudioDeviceMac::inConverterProc(
void *userData)
{
AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
- DCHECK(ptrThis != NULL);
+ RTC_DCHECK(ptrThis != NULL);
return ptrThis->implInConverterProc(numberDataPackets, data);
}
@@ -2852,7 +2852,7 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData,
return 0;
}
- DCHECK(_outStreamFormat.mBytesPerFrame != 0);
+ RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0);
UInt32 size = outputData->mBuffers->mDataByteSize
/ _outStreamFormat.mBytesPerFrame;
@@ -2893,7 +2893,7 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData,
OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
AudioBufferList *data)
{
- DCHECK(data->mNumberBuffers == 1);
+ RTC_DCHECK(data->mNumberBuffers == 1);
PaRingBufferSize numSamples = *numberDataPackets
* _outDesiredFormat.mChannelsPerFrame;
@@ -2967,7 +2967,7 @@ OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData,
AtomicSet32(&_captureDelayUs, captureDelayUs);
- DCHECK(inputData->mNumberBuffers == 1);
+ RTC_DCHECK(inputData->mNumberBuffers == 1);
PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize
* _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
@@ -2986,7 +2986,7 @@ OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData,
OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
AudioBufferList *data)
{
- DCHECK(data->mNumberBuffers == 1);
+ RTC_DCHECK(data->mNumberBuffers == 1);
PaRingBufferSize numSamples = *numberDataPackets
* _inStreamFormat.mChannelsPerFrame;
diff --git a/chromium/third_party/webrtc/modules/audio_device/mock_audio_device_buffer.h b/chromium/third_party/webrtc/modules/audio_device/mock_audio_device_buffer.h
index b9e66f7d1c7..07c9e2912e1 100644
--- a/chromium/third_party/webrtc/modules/audio_device/mock_audio_device_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_device/mock_audio_device_buffer.h
@@ -20,9 +20,13 @@ class MockAudioDeviceBuffer : public AudioDeviceBuffer {
public:
MockAudioDeviceBuffer() {}
virtual ~MockAudioDeviceBuffer() {}
-
- MOCK_METHOD1(RequestPlayoutData, int32_t(uint32_t nSamples));
+ MOCK_METHOD1(RequestPlayoutData, int32_t(size_t nSamples));
MOCK_METHOD1(GetPlayoutData, int32_t(void* audioBuffer));
+ MOCK_METHOD2(SetRecordedBuffer,
+ int32_t(const void* audioBuffer, size_t nSamples));
+ MOCK_METHOD3(SetVQEData,
+ void(int playDelayMS, int recDelayMS, int clockDrift));
+ MOCK_METHOD0(DeliverRecordedData, int32_t());
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_device/test/audio_device_test_api.cc b/chromium/third_party/webrtc/modules/audio_device/test/audio_device_test_api.cc
index 923d39ac73b..cd80c4deab2 100644
--- a/chromium/third_party/webrtc/modules/audio_device/test/audio_device_test_api.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/test/audio_device_test_api.cc
@@ -83,8 +83,8 @@ class AudioTransportAPI: public AudioTransport {
~AudioTransportAPI() {}
int32_t RecordedDataIsAvailable(const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t sampleRate,
const uint32_t totalDelay,
@@ -108,12 +108,12 @@ class AudioTransportAPI: public AudioTransport {
return 0;
}
- int32_t NeedMorePlayData(const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t sampleRate,
void* audioSamples,
- uint32_t& nSamplesOut,
+ size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override {
play_count_++;
@@ -133,7 +133,7 @@ class AudioTransportAPI: public AudioTransport {
const int16_t* audio_data,
int sample_rate,
int number_of_channels,
- int number_of_frames,
+ size_t number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool key_pressed,
@@ -144,10 +144,10 @@ class AudioTransportAPI: public AudioTransport {
void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
- int number_of_frames) override {}
+ size_t number_of_frames) override {}
void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, int number_of_frames,
+ int number_of_channels, size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override {}
@@ -163,7 +163,7 @@ class AudioDeviceAPITest: public testing::Test {
virtual ~AudioDeviceAPITest() {}
static void SetUpTestCase() {
- process_thread_ = ProcessThread::Create();
+ process_thread_ = ProcessThread::Create("ProcessThread");
process_thread_->Start();
// Windows:
diff --git a/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.cc b/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.cc
index ae3cd2c186d..a97d25a4514 100644
--- a/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.cc
@@ -192,8 +192,8 @@ void AudioTransportImpl::SetFullDuplex(bool enable)
int32_t AudioTransportImpl::RecordedDataIsAvailable(
const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
@@ -206,7 +206,7 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
{
AudioPacket* packet = new AudioPacket();
memcpy(packet->dataBuffer, audioSamples, nSamples * nBytesPerSample);
- packet->nSamples = (uint16_t) nSamples;
+ packet->nSamples = nSamples;
packet->nBytesPerSample = nBytesPerSample;
packet->nChannels = nChannels;
packet->samplesPerSec = samplesPerSec;
@@ -337,12 +337,12 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable(
int32_t AudioTransportImpl::NeedMorePlayData(
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
- uint32_t& nSamplesOut,
+ size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms)
{
@@ -359,15 +359,15 @@ int32_t AudioTransportImpl::NeedMorePlayData(
if (packet)
{
int ret(0);
- int lenOut(0);
+ size_t lenOut(0);
int16_t tmpBuf_96kHz[80 * 12];
int16_t* ptr16In = NULL;
int16_t* ptr16Out = NULL;
- const uint16_t nSamplesIn = packet->nSamples;
+ const size_t nSamplesIn = packet->nSamples;
const uint8_t nChannelsIn = packet->nChannels;
const uint32_t samplesPerSecIn = packet->samplesPerSec;
- const uint16_t nBytesPerSampleIn = packet->nBytesPerSample;
+ const size_t nBytesPerSampleIn = packet->nBytesPerSample;
int32_t fsInHz(samplesPerSecIn);
int32_t fsOutHz(samplesPerSec);
@@ -401,7 +401,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
ptr16Out = (int16_t*) audioSamples;
// do stereo -> mono
- for (unsigned int i = 0; i < nSamples; i++)
+ for (size_t i = 0; i < nSamples; i++)
{
*ptr16Out = *ptr16In; // use left channel
ptr16Out++;
@@ -409,7 +409,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
ptr16In++;
}
}
- assert(2*nSamples == (uint32_t)lenOut);
+ assert(2*nSamples == lenOut);
} else
{
if (_playCount % 100 == 0)
@@ -439,7 +439,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
ptr16Out = (int16_t*) audioSamples;
// do mono -> stereo
- for (unsigned int i = 0; i < nSamples; i++)
+ for (size_t i = 0; i < nSamples; i++)
{
*ptr16Out = *ptr16In; // left
ptr16Out++;
@@ -448,7 +448,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
ptr16In++;
}
}
- assert(nSamples == (uint32_t)lenOut);
+ assert(nSamples == lenOut);
} else
{
if (_playCount % 100 == 0)
@@ -483,7 +483,7 @@ int32_t AudioTransportImpl::NeedMorePlayData(
// mono sample from file is duplicated and sent to left and right
// channels
int16_t* audio16 = (int16_t*) audioSamples;
- for (unsigned int i = 0; i < nSamples; i++)
+ for (size_t i = 0; i < nSamples; i++)
{
(*audio16) = fileBuf[i]; // left
audio16++;
@@ -578,7 +578,7 @@ int AudioTransportImpl::OnDataAvailable(const int voe_channels[],
const int16_t* audio_data,
int sample_rate,
int number_of_channels,
- int number_of_frames,
+ size_t number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool key_pressed,
@@ -590,11 +590,11 @@ void AudioTransportImpl::PushCaptureData(int voe_channel,
const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
- int number_of_frames) {}
+ size_t number_of_frames) {}
void AudioTransportImpl::PullRenderData(int bits_per_sample, int sample_rate,
int number_of_channels,
- int number_of_frames,
+ size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {}
@@ -620,7 +620,8 @@ FuncTestManager::~FuncTestManager()
int32_t FuncTestManager::Init()
{
- EXPECT_TRUE((_processThread = ProcessThread::Create()) != NULL);
+ EXPECT_TRUE((_processThread = ProcessThread::Create("ProcessThread")) !=
+ NULL);
if (_processThread == NULL)
{
return -1;
@@ -857,7 +858,8 @@ int32_t FuncTestManager::TestAudioLayerSelection()
// ==================================================
// Next, try to make fresh start with new audio layer
- EXPECT_TRUE((_processThread = ProcessThread::Create()) != NULL);
+ EXPECT_TRUE((_processThread = ProcessThread::Create("ProcessThread")) !=
+ NULL);
if (_processThread == NULL)
{
return -1;
diff --git a/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.h b/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.h
index f5ddd3a21d5..a91ae814e58 100644
--- a/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_device/test/func_test_manager.h
@@ -47,8 +47,8 @@ enum TestType
struct AudioPacket
{
uint8_t dataBuffer[4 * 960];
- uint16_t nSamples;
- uint16_t nBytesPerSample;
+ size_t nSamples;
+ size_t nBytesPerSample;
uint8_t nChannels;
uint32_t samplesPerSec;
};
@@ -86,8 +86,8 @@ class AudioTransportImpl: public AudioTransport
{
public:
int32_t RecordedDataIsAvailable(const void* audioSamples,
- const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
@@ -96,12 +96,12 @@ public:
const bool keyPressed,
uint32_t& newMicLevel) override;
- int32_t NeedMorePlayData(const uint32_t nSamples,
- const uint8_t nBytesPerSample,
+ int32_t NeedMorePlayData(const size_t nSamples,
+ const size_t nBytesPerSample,
const uint8_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
- uint32_t& nSamplesOut,
+ size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override;
@@ -110,7 +110,7 @@ public:
const int16_t* audio_data,
int sample_rate,
int number_of_channels,
- int number_of_frames,
+ size_t number_of_frames,
int audio_delay_milliseconds,
int current_volume,
bool key_pressed,
@@ -119,10 +119,10 @@ public:
void PushCaptureData(int voe_channel, const void* audio_data,
int bits_per_sample, int sample_rate,
int number_of_channels,
- int number_of_frames) override;
+ size_t number_of_frames) override;
void PullRenderData(int bits_per_sample, int sample_rate,
- int number_of_channels, int number_of_frames,
+ int number_of_channels, size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) override;
diff --git a/chromium/third_party/webrtc/modules/audio_device/win/audio_mixer_manager_win.cc b/chromium/third_party/webrtc/modules/audio_device/win/audio_mixer_manager_win.cc
index 4d6e7bb9a69..79076d39a70 100644
--- a/chromium/third_party/webrtc/modules/audio_device/win/audio_mixer_manager_win.cc
+++ b/chromium/third_party/webrtc/modules/audio_device/win/audio_mixer_manager_win.cc
@@ -195,7 +195,9 @@ int32_t AudioMixerManager::EnumerateSpeakers()
for (mixId = 0; mixId < nDevices; mixId++)
{
// get capabilities for the specified mixer ID
- GetCapabilities(mixId, caps);
+ if (!GetCapabilities(mixId, caps))
+ continue;
+
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[mixerID=%d] %s: ", mixId, WideToUTF8(caps.szPname));
// scan all avaliable destinations for this mixer
for (destId = 0; destId < caps.cDestinations; destId++)
@@ -280,7 +282,9 @@ int32_t AudioMixerManager::EnumerateMicrophones()
for (mixId = 0; mixId < nDevices; mixId++)
{
// get capabilities for the specified mixer ID
- GetCapabilities(mixId, caps);
+ if (!GetCapabilities(mixId, caps))
+ continue;
+
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[mixerID=%d] %s: ", mixId, WideToUTF8(caps.szPname));
// scan all avaliable destinations for this mixer
for (destId = 0; destId < caps.cDestinations; destId++)
diff --git a/chromium/third_party/webrtc/modules/audio_device/audio_device_tests.isolate b/chromium/third_party/webrtc/modules/audio_device_tests.isolate
index f5a0c184447..f5a0c184447 100644
--- a/chromium/third_party/webrtc/modules/audio_device/audio_device_tests.isolate
+++ b/chromium/third_party/webrtc/modules/audio_device_tests.isolate
diff --git a/chromium/third_party/webrtc/modules/audio_processing/BUILD.gn b/chromium/third_party/webrtc/modules/audio_processing/BUILD.gn
index dd474293a7d..9a45cecd6c8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/audio_processing/BUILD.gn
@@ -77,6 +77,9 @@ source_set("audio_processing") {
"intelligibility/intelligibility_utils.h",
"level_estimator_impl.cc",
"level_estimator_impl.h",
+ "logging/aec_logging.h",
+ "logging/aec_logging_file_handling.cc",
+ "logging/aec_logging_file_handling.h",
"noise_suppression_impl.cc",
"noise_suppression_impl.h",
"processing_component.cc",
@@ -137,6 +140,7 @@ source_set("audio_processing") {
defines = []
deps = [
"../..:webrtc_common",
+ "../audio_coding:isac",
]
if (aec_debug_dump) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/OWNERS b/chromium/third_party/webrtc/modules/audio_processing/OWNERS
index 41a82af20d7..7b760682b09 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/OWNERS
+++ b/chromium/third_party/webrtc/modules/audio_processing/OWNERS
@@ -1,6 +1,7 @@
aluebs@webrtc.org
andrew@webrtc.org
-bjornv@webrtc.org
+henrik.lundin@webrtc.org
+peah@webrtc.org
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
index 70927074f8e..b2162ac0b91 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
@@ -29,10 +29,12 @@
#include "webrtc/modules/audio_processing/aec/aec_common.h"
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
+#include "webrtc/modules/audio_processing/logging/aec_logging.h"
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h"
#include "webrtc/system_wrappers/interface/cpu_features_wrapper.h"
#include "webrtc/typedefs.h"
+
// Buffer size (samples)
static const size_t kBufSizePartitions = 250; // 1 second of audio in 16 kHz.
@@ -818,8 +820,11 @@ static void UpdateDelayMetrics(AecCore* self) {
// negative (anti-causal system) or larger than the AEC filter length.
{
int num_delays_out_of_bounds = self->num_delay_values;
+ const int histogram_length = sizeof(self->delay_histogram) /
+ sizeof(self->delay_histogram[0]);
for (i = lookahead; i < lookahead + self->num_partitions; ++i) {
- num_delays_out_of_bounds -= self->delay_histogram[i];
+ if (i < histogram_length)
+ num_delays_out_of_bounds -= self->delay_histogram[i];
}
self->fraction_poor_delays = (float)num_delays_out_of_bounds /
self->num_delay_values;
@@ -945,7 +950,8 @@ static void NonLinearProcessing(AecCore* aec,
float fft[PART_LEN2];
float scale, dtmp;
float nlpGainHband;
- int i, j;
+ int i;
+ size_t j;
// Coherence and non-linear filter
float cohde[PART_LEN1], cohxd[PART_LEN1];
@@ -1160,8 +1166,8 @@ static void NonLinearProcessing(AecCore* aec,
memcpy(aec->eBuf, aec->eBuf + PART_LEN, sizeof(float) * PART_LEN);
// Copy the current block to the old position for H band
- for (i = 0; i < aec->num_bands - 1; ++i) {
- memcpy(aec->dBufH[i], aec->dBufH[i] + PART_LEN, sizeof(float) * PART_LEN);
+ for (j = 0; j < aec->num_bands - 1; ++j) {
+ memcpy(aec->dBufH[j], aec->dBufH[j] + PART_LEN, sizeof(float) * PART_LEN);
}
memmove(aec->xfwBuf + PART_LEN1,
@@ -1170,7 +1176,7 @@ static void NonLinearProcessing(AecCore* aec,
}
static void ProcessBlock(AecCore* aec) {
- int i;
+ size_t i;
float y[PART_LEN], e[PART_LEN];
float scale;
@@ -1219,8 +1225,8 @@ static void ProcessBlock(AecCore* aec) {
float farend[PART_LEN];
float* farend_ptr = NULL;
WebRtc_ReadBuffer(aec->far_time_buf, (void**)&farend_ptr, farend, 1);
- rtc_WavWriteSamples(aec->farFile, farend_ptr, PART_LEN);
- rtc_WavWriteSamples(aec->nearFile, nearend_ptr, PART_LEN);
+ RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, farend_ptr, PART_LEN);
+ RTC_AEC_DEBUG_WAV_WRITE(aec->nearFile, nearend_ptr, PART_LEN);
}
#endif
@@ -1347,6 +1353,10 @@ static void ProcessBlock(AecCore* aec) {
ef[1][i] = fft[2 * i + 1];
}
+ RTC_AEC_DEBUG_RAW_WRITE(aec->e_fft_file,
+ &ef[0][0],
+ sizeof(ef[0][0]) * PART_LEN1 * 2);
+
if (aec->metricsMode == 1) {
// Note that the first PART_LEN samples in fft (before transformation) are
// zero. Hence, the scaling by two in UpdateLevel() should not be
@@ -1373,10 +1383,8 @@ static void ProcessBlock(AecCore* aec) {
WebRtc_WriteBuffer(aec->outFrBufH[i], outputH[i], PART_LEN);
}
-#ifdef WEBRTC_AEC_DEBUG_DUMP
- rtc_WavWriteSamples(aec->outLinearFile, e, PART_LEN);
- rtc_WavWriteSamples(aec->outFile, output, PART_LEN);
-#endif
+ RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, e, PART_LEN);
+ RTC_AEC_DEBUG_WAV_WRITE(aec->outFile, output, PART_LEN);
}
AecCore* WebRtcAec_CreateAec() {
@@ -1511,40 +1519,19 @@ void WebRtcAec_FreeAec(AecCore* aec) {
WebRtc_FreeBuffer(aec->far_buf_windowed);
#ifdef WEBRTC_AEC_DEBUG_DUMP
WebRtc_FreeBuffer(aec->far_time_buf);
- rtc_WavClose(aec->farFile);
- rtc_WavClose(aec->nearFile);
- rtc_WavClose(aec->outFile);
- rtc_WavClose(aec->outLinearFile);
#endif
+ RTC_AEC_DEBUG_WAV_CLOSE(aec->farFile);
+ RTC_AEC_DEBUG_WAV_CLOSE(aec->nearFile);
+ RTC_AEC_DEBUG_WAV_CLOSE(aec->outFile);
+ RTC_AEC_DEBUG_WAV_CLOSE(aec->outLinearFile);
+ RTC_AEC_DEBUG_RAW_CLOSE(aec->e_fft_file);
+
WebRtc_FreeDelayEstimator(aec->delay_estimator);
WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend);
free(aec);
}
-#ifdef WEBRTC_AEC_DEBUG_DUMP
-// Open a new Wav file for writing. If it was already open with a different
-// sample frequency, close it first.
-static void ReopenWav(rtc_WavWriter** wav_file,
- const char* name,
- int seq1,
- int seq2,
- int sample_rate) {
- int written ATTRIBUTE_UNUSED;
- char filename[64];
- if (*wav_file) {
- if (rtc_WavSampleRate(*wav_file) == sample_rate)
- return;
- rtc_WavClose(*wav_file);
- }
- written = snprintf(filename, sizeof(filename), "%s%d-%d.wav",
- name, seq1, seq2);
- assert(written >= 0); // no output error
- assert((size_t)written < sizeof(filename)); // buffer was large enough
- *wav_file = rtc_WavOpen(filename, sample_rate, 1);
-}
-#endif // WEBRTC_AEC_DEBUG_DUMP
-
int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
int i;
@@ -1557,7 +1544,7 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
} else {
aec->normal_mu = 0.5f;
aec->normal_error_threshold = 1.5e-6f;
- aec->num_bands = sampFreq / 16000;
+ aec->num_bands = (size_t)(sampFreq / 16000);
}
WebRtc_InitBuffer(aec->nearFrBuf);
@@ -1574,15 +1561,24 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
WebRtc_InitBuffer(aec->far_time_buf);
{
int process_rate = sampFreq > 16000 ? 16000 : sampFreq;
- ReopenWav(&aec->farFile, "aec_far",
- aec->instance_index, aec->debug_dump_count, process_rate);
- ReopenWav(&aec->nearFile, "aec_near",
- aec->instance_index, aec->debug_dump_count, process_rate);
- ReopenWav(&aec->outFile, "aec_out",
- aec->instance_index, aec->debug_dump_count, process_rate);
- ReopenWav(&aec->outLinearFile, "aec_out_linear",
- aec->instance_index, aec->debug_dump_count, process_rate);
- }
+ RTC_AEC_DEBUG_WAV_REOPEN("aec_far", aec->instance_index,
+ aec->debug_dump_count, process_rate,
+ &aec->farFile );
+ RTC_AEC_DEBUG_WAV_REOPEN("aec_near", aec->instance_index,
+ aec->debug_dump_count, process_rate,
+ &aec->nearFile);
+ RTC_AEC_DEBUG_WAV_REOPEN("aec_out", aec->instance_index,
+ aec->debug_dump_count, process_rate,
+ &aec->outFile );
+ RTC_AEC_DEBUG_WAV_REOPEN("aec_out_linear", aec->instance_index,
+ aec->debug_dump_count, process_rate,
+ &aec->outLinearFile);
+ }
+
+ RTC_AEC_DEBUG_RAW_OPEN("aec_e_fft",
+ aec->debug_dump_count,
+ &aec->e_fft_file);
+
++aec->debug_dump_count;
#endif
aec->system_delay = 0;
@@ -1731,11 +1727,11 @@ int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements) {
void WebRtcAec_ProcessFrames(AecCore* aec,
const float* const* nearend,
- int num_bands,
- int num_samples,
+ size_t num_bands,
+ size_t num_samples,
int knownDelay,
float* const* out) {
- int i, j;
+ size_t i, j;
int out_elements = 0;
aec->frame_count++;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
index 25305275609..241f077524c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
@@ -15,6 +15,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_H_
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
#define FRAME_LEN 80
@@ -65,8 +67,8 @@ void WebRtcAec_InitAec_neon(void);
void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend);
void WebRtcAec_ProcessFrames(AecCore* aec,
const float* const* nearend,
- int num_bands,
- int num_samples,
+ size_t num_bands,
+ size_t num_samples,
int knownDelay,
float* const* out);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
index 796ea2c9c06..2de028379b6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -101,7 +101,7 @@ struct AecCore {
int mult; // sampling frequency multiple
int sampFreq;
- int num_bands;
+ size_t num_bands;
uint32_t seed;
float normal_mu; // stepsize
@@ -166,6 +166,7 @@ struct AecCore {
rtc_WavWriter* nearFile;
rtc_WavWriter* outFile;
rtc_WavWriter* outLinearFile;
+ FILE* e_fft_file;
#endif
};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.c
index 62a830ba65f..99c39efa881 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.c
@@ -64,17 +64,16 @@ void WebRtcAec_FreeResampler(void* resampInst) {
void WebRtcAec_ResampleLinear(void* resampInst,
const float* inspeech,
- int size,
+ size_t size,
float skew,
float* outspeech,
- int* size_out) {
+ size_t* size_out) {
AecResampler* obj = (AecResampler*)resampInst;
float* y;
float be, tnew;
- int tn, mm;
+ size_t tn, mm;
- assert(size >= 0);
assert(size <= 2 * FRAME_LEN);
assert(resampInst != NULL);
assert(inspeech != NULL);
@@ -94,7 +93,7 @@ void WebRtcAec_ResampleLinear(void* resampInst,
y = &obj->buffer[FRAME_LEN]; // Point at current frame
tnew = be * mm + obj->position;
- tn = (int)tnew;
+ tn = (size_t)tnew;
while (tn < size) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.h
index a37499258f3..a5002c155a4 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_resampler.h
@@ -31,9 +31,9 @@ int WebRtcAec_GetSkew(void* resampInst, int rawSkew, float* skewEst);
// Resamples input using linear interpolation.
void WebRtcAec_ResampleLinear(void* resampInst,
const float* inspeech,
- int size,
+ size_t size,
float skew,
float* outspeech,
- int* size_out);
+ size_t* size_out);
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_RESAMPLER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c b/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
index b31a84a87af..0f5cd31ddb2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -105,16 +105,16 @@ static void EstBufDelayNormal(Aec* aecInst);
static void EstBufDelayExtended(Aec* aecInst);
static int ProcessNormal(Aec* self,
const float* const* near,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t num_samples,
+ size_t num_samples,
int16_t reported_delay_ms,
int32_t skew);
static void ProcessExtended(Aec* self,
const float* const* near,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t num_samples,
+ size_t num_samples,
int16_t reported_delay_ms,
int32_t skew);
@@ -271,9 +271,9 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) {
// only buffer L band for farend
int32_t WebRtcAec_BufferFarend(void* aecInst,
const float* farend,
- int16_t nrOfSamples) {
+ size_t nrOfSamples) {
Aec* aecpc = aecInst;
- int newNrOfSamples = nrOfSamples;
+ size_t newNrOfSamples = nrOfSamples;
float new_farend[MAX_RESAMP_LEN];
const float* farend_ptr = farend;
@@ -305,11 +305,11 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
}
aecpc->farend_started = 1;
- WebRtcAec_SetSystemDelay(aecpc->aec,
- WebRtcAec_system_delay(aecpc->aec) + newNrOfSamples);
+ WebRtcAec_SetSystemDelay(
+ aecpc->aec, WebRtcAec_system_delay(aecpc->aec) + (int)newNrOfSamples);
// Write the time-domain data to |far_pre_buf|.
- WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, (size_t)newNrOfSamples);
+ WebRtc_WriteBuffer(aecpc->far_pre_buf, farend_ptr, newNrOfSamples);
// Transform to frequency domain if we have enough data.
while (WebRtc_available_read(aecpc->far_pre_buf) >= PART_LEN2) {
@@ -334,9 +334,9 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
int32_t WebRtcAec_Process(void* aecInst,
const float* const* nearend,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t nrOfSamples,
+ size_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew) {
Aec* aecpc = aecInst;
@@ -592,14 +592,14 @@ AecCore* WebRtcAec_aec_core(void* handle) {
static int ProcessNormal(Aec* aecpc,
const float* const* nearend,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t nrOfSamples,
+ size_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew) {
int retVal = 0;
- short i;
- short nBlocks10ms;
+ size_t i;
+ size_t nBlocks10ms;
// Limit resampling to doubling/halving of signal
const float minSkewEst = -0.5f;
const float maxSkewEst = 1.0f;
@@ -740,12 +740,12 @@ static int ProcessNormal(Aec* aecpc,
static void ProcessExtended(Aec* self,
const float* const* near,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t num_samples,
+ size_t num_samples,
int16_t reported_delay_ms,
int32_t skew) {
- int i;
+ size_t i;
const int delay_diff_offset = kDelayDiffOffsetSamples;
#if defined(WEBRTC_UNTRUSTED_DELAY)
reported_delay_ms = kFixedDelayMs;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
index e49a0847e03..a340cf84d0d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_INCLUDE_ECHO_CANCELLATION_H_
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
// Errors
@@ -111,7 +113,7 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq);
*/
int32_t WebRtcAec_BufferFarend(void* aecInst,
const float* farend,
- int16_t nrOfSamples);
+ size_t nrOfSamples);
/*
* Runs the echo canceller on an 80 or 160 sample blocks of data.
@@ -138,9 +140,9 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
*/
int32_t WebRtcAec_Process(void* aecInst,
const float* const* nearend,
- int num_bands,
+ size_t num_bands,
float* const* out,
- int16_t nrOfSamples,
+ size_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
index 5e26a31898e..07e3cf8add0 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -33,7 +33,7 @@ class SystemDelayTest : public ::testing::Test {
void RenderAndCapture(int device_buffer_ms);
// Fills up the far-end buffer with respect to the default device buffer size.
- int BufferFillUp();
+ size_t BufferFillUp();
// Runs and verifies the behavior in a stable startup procedure.
void RunStableStartup();
@@ -44,7 +44,7 @@ class SystemDelayTest : public ::testing::Test {
void* handle_;
Aec* self_;
- int samples_per_frame_;
+ size_t samples_per_frame_;
// Dummy input/output speech data.
static const int kSamplesPerChunk = 160;
float far_[kSamplesPerChunk];
@@ -102,7 +102,7 @@ void SystemDelayTest::Init(int sample_rate_hz) {
EXPECT_EQ(0, WebRtcAec_system_delay(self_->aec));
// One frame equals 10 ms of data.
- samples_per_frame_ = sample_rate_hz / 100;
+ samples_per_frame_ = static_cast<size_t>(sample_rate_hz / 100);
}
void SystemDelayTest::RenderAndCapture(int device_buffer_ms) {
@@ -117,15 +117,16 @@ void SystemDelayTest::RenderAndCapture(int device_buffer_ms) {
0));
}
-int SystemDelayTest::BufferFillUp() {
+size_t SystemDelayTest::BufferFillUp() {
// To make sure we have a full buffer when we verify stability we first fill
// up the far-end buffer with the same amount as we will report in through
// Process().
- int buffer_size = 0;
+ size_t buffer_size = 0;
for (int i = 0; i < kDeviceBufMs / 10; i++) {
EXPECT_EQ(0, WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
buffer_size += samples_per_frame_;
- EXPECT_EQ(buffer_size, WebRtcAec_system_delay(self_->aec));
+ EXPECT_EQ(static_cast<int>(buffer_size),
+ WebRtcAec_system_delay(self_->aec));
}
return buffer_size;
}
@@ -134,7 +135,7 @@ void SystemDelayTest::RunStableStartup() {
// To make sure we have a full buffer when we verify stability we first fill
// up the far-end buffer with the same amount as we will report in through
// Process().
- int buffer_size = BufferFillUp();
+ size_t buffer_size = BufferFillUp();
if (WebRtcAec_delay_agnostic_enabled(self_->aec) == 1) {
// In extended_filter mode we set the buffer size after the first processed
@@ -159,14 +160,16 @@ void SystemDelayTest::RunStableStartup() {
EXPECT_GT(kStableConvergenceMs, process_time_ms);
}
// Verify that the buffer has been flushed.
- EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
+ EXPECT_GE(static_cast<int>(buffer_size),
+ WebRtcAec_system_delay(self_->aec));
}
int SystemDelayTest::MapBufferSizeToSamples(int size_in_ms,
bool extended_filter) {
// If extended_filter is disabled we add an extra 10 ms for the unprocessed
// frame. That is simply how the algorithm is constructed.
- return (size_in_ms + (extended_filter ? 0 : 10)) * samples_per_frame_ / 10;
+ return static_cast<int>(
+ (size_in_ms + (extended_filter ? 0 : 10)) * samples_per_frame_ / 10);
}
// The tests should meet basic requirements and not be adjusted to what is
@@ -207,7 +210,8 @@ TEST_F(SystemDelayTest, CorrectIncreaseWhenBufferFarend) {
for (int j = 1; j <= 5; j++) {
EXPECT_EQ(0,
WebRtcAec_BufferFarend(handle_, far_, samples_per_frame_));
- EXPECT_EQ(j * samples_per_frame_, WebRtcAec_system_delay(self_->aec));
+ EXPECT_EQ(static_cast<int>(j * samples_per_frame_),
+ WebRtcAec_system_delay(self_->aec));
}
}
}
@@ -236,7 +240,8 @@ TEST_F(SystemDelayTest, CorrectDelayAfterStableStartup) {
// the average.
// In extended_filter mode we target 50% and measure after one processed
// 10 ms chunk.
- int average_reported_delay = kDeviceBufMs * samples_per_frame_ / 10;
+ int average_reported_delay =
+ static_cast<int>(kDeviceBufMs * samples_per_frame_ / 10);
EXPECT_GE(average_reported_delay, WebRtcAec_system_delay(self_->aec));
int lower_bound = WebRtcAec_extended_filter_enabled(self_->aec)
? average_reported_delay / 2 - samples_per_frame_
@@ -267,7 +272,7 @@ TEST_F(SystemDelayTest, CorrectDelayAfterUnstableStartup) {
// To make sure we have a full buffer when we verify stability we first fill
// up the far-end buffer with the same amount as we will report in on the
// average through Process().
- int buffer_size = BufferFillUp();
+ size_t buffer_size = BufferFillUp();
int buffer_offset_ms = 25;
int reported_delay_ms = 0;
@@ -285,14 +290,16 @@ TEST_F(SystemDelayTest, CorrectDelayAfterUnstableStartup) {
// Verify convergence time.
EXPECT_GE(kMaxConvergenceMs, process_time_ms);
// Verify that the buffer has been flushed.
- EXPECT_GE(buffer_size, WebRtcAec_system_delay(self_->aec));
+ EXPECT_GE(static_cast<int>(buffer_size),
+ WebRtcAec_system_delay(self_->aec));
// Verify system delay with respect to requirements, i.e., the
// |system_delay| is in the interval [60%, 100%] of what's last reported.
- EXPECT_GE(reported_delay_ms * samples_per_frame_ / 10,
- WebRtcAec_system_delay(self_->aec));
- EXPECT_LE(reported_delay_ms * samples_per_frame_ / 10 * 3 / 5,
+ EXPECT_GE(static_cast<int>(reported_delay_ms * samples_per_frame_ / 10),
WebRtcAec_system_delay(self_->aec));
+ EXPECT_LE(
+ static_cast<int>(reported_delay_ms * samples_per_frame_ / 10 * 3 / 5),
+ WebRtcAec_system_delay(self_->aec));
}
}
@@ -331,8 +338,8 @@ TEST_F(SystemDelayTest, CorrectDelayAfterStableBufferBuildUp) {
// We now have established the required buffer size. Let us verify that we
// fill up before leaving the startup phase for normal processing.
- int buffer_size = 0;
- int target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
+ size_t buffer_size = 0;
+ size_t target_buffer_size = kDeviceBufMs * samples_per_frame_ / 10 * 3 / 4;
process_time_ms = 0;
for (; process_time_ms <= kMaxConvergenceMs; process_time_ms += 10) {
RenderAndCapture(kDeviceBufMs);
@@ -345,7 +352,8 @@ TEST_F(SystemDelayTest, CorrectDelayAfterStableBufferBuildUp) {
// Verify convergence time.
EXPECT_GT(kMaxConvergenceMs, process_time_ms);
// Verify that the buffer has reached the desired size.
- EXPECT_LE(target_buffer_size, WebRtcAec_system_delay(self_->aec));
+ EXPECT_LE(static_cast<int>(target_buffer_size),
+ WebRtcAec_system_delay(self_->aec));
// Verify normal behavior (system delay is kept constant) after startup by
// running a couple of calls to BufferFarend() and Process().
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c b/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
index 5f3fa2af681..83781e97fed 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
@@ -199,7 +199,7 @@ int32_t WebRtcAecm_Init(void *aecmInst, int32_t sampFreq)
}
int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
- int16_t nrOfSamples)
+ size_t nrOfSamples)
{
AecMobile* aecm = aecmInst;
int32_t retVal = 0;
@@ -233,21 +233,21 @@ int32_t WebRtcAecm_BufferFarend(void *aecmInst, const int16_t *farend,
WebRtcAecm_DelayComp(aecm);
}
- WebRtc_WriteBuffer(aecm->farendBuf, farend, (size_t) nrOfSamples);
+ WebRtc_WriteBuffer(aecm->farendBuf, farend, nrOfSamples);
return retVal;
}
int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
const int16_t *nearendClean, int16_t *out,
- int16_t nrOfSamples, int16_t msInSndCardBuf)
+ size_t nrOfSamples, int16_t msInSndCardBuf)
{
AecMobile* aecm = aecmInst;
int32_t retVal = 0;
- short i;
+ size_t i;
short nmbrOfFilledBuffers;
- short nBlocks10ms;
- short nFrames;
+ size_t nBlocks10ms;
+ size_t nFrames;
#ifdef AEC_DEBUG
short msInAECBuf;
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h b/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
index 22e0fe6bfba..7ae15c2a3d6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
@@ -87,7 +87,7 @@ int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
*/
int32_t WebRtcAecm_BufferFarend(void* aecmInst,
const int16_t* farend,
- int16_t nrOfSamples);
+ size_t nrOfSamples);
/*
* Runs the AECM on an 80 or 160 sample blocks of data.
@@ -118,7 +118,7 @@ int32_t WebRtcAecm_Process(void* aecmInst,
const int16_t* nearendNoisy,
const int16_t* nearendClean,
int16_t* out,
- int16_t nrOfSamples,
+ size_t nrOfSamples,
int16_t msInSndCardBuf);
/*
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/agc.cc b/chromium/third_party/webrtc/modules/audio_processing/agc/agc.cc
index 80c3e1fe729..706b963aa18 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/agc.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/agc.cc
@@ -39,22 +39,22 @@ Agc::Agc()
Agc::~Agc() {}
-float Agc::AnalyzePreproc(const int16_t* audio, int length) {
+float Agc::AnalyzePreproc(const int16_t* audio, size_t length) {
assert(length > 0);
- int num_clipped = 0;
- for (int i = 0; i < length; ++i) {
+ size_t num_clipped = 0;
+ for (size_t i = 0; i < length; ++i) {
if (audio[i] == 32767 || audio[i] == -32768)
++num_clipped;
}
return 1.0f * num_clipped / length;
}
-int Agc::Process(const int16_t* audio, int length, int sample_rate_hz) {
+int Agc::Process(const int16_t* audio, size_t length, int sample_rate_hz) {
vad_.ProcessChunk(audio, length, sample_rate_hz);
const std::vector<double>& rms = vad_.chunkwise_rms();
const std::vector<double>& probabilities =
vad_.chunkwise_voice_probabilities();
- DCHECK_EQ(rms.size(), probabilities.size());
+ RTC_DCHECK_EQ(rms.size(), probabilities.size());
for (size_t i = 0; i < rms.size(); ++i) {
histogram_->Update(rms[i], probabilities[i]);
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/agc.h b/chromium/third_party/webrtc/modules/audio_processing/agc/agc.h
index dd4605e812e..08c287f8205 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/agc.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/agc.h
@@ -27,10 +27,10 @@ class Agc {
// Returns the proportion of samples in the buffer which are at full-scale
// (and presumably clipped).
- virtual float AnalyzePreproc(const int16_t* audio, int length);
+ virtual float AnalyzePreproc(const int16_t* audio, size_t length);
// |audio| must be mono; in a multi-channel stream, provide the first (usually
// left) channel.
- virtual int Process(const int16_t* audio, int length, int sample_rate_hz);
+ virtual int Process(const int16_t* audio, size_t length, int sample_rate_hz);
// Retrieves the difference between the target RMS level and the current
// signal RMS level in dB. Returns true if an update is available and false
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.cc b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
index 74f55407a42..48ce2f877c3 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.cc
@@ -95,7 +95,7 @@ class DebugFile {
~DebugFile() {
fclose(file_);
}
- void Write(const int16_t* data, int length_samples) {
+ void Write(const int16_t* data, size_t length_samples) {
fwrite(data, 1, length_samples * sizeof(int16_t), file_);
}
private:
@@ -106,7 +106,7 @@ class DebugFile {
}
~DebugFile() {
}
- void Write(const int16_t* data, int length_samples) {
+ void Write(const int16_t* data, size_t length_samples) {
}
#endif // WEBRTC_AGC_DEBUG_DUMP
};
@@ -188,8 +188,8 @@ int AgcManagerDirect::Initialize() {
void AgcManagerDirect::AnalyzePreProcess(int16_t* audio,
int num_channels,
- int samples_per_channel) {
- int length = num_channels * samples_per_channel;
+ size_t samples_per_channel) {
+ size_t length = num_channels * samples_per_channel;
if (capture_muted_) {
return;
}
@@ -230,7 +230,7 @@ void AgcManagerDirect::AnalyzePreProcess(int16_t* audio,
}
void AgcManagerDirect::Process(const int16_t* audio,
- int length,
+ size_t length,
int sample_rate_hz) {
if (capture_muted_) {
return;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.h b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.h
index d12acf30d37..6edb0f7bf13 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct.h
@@ -21,9 +21,9 @@ class DebugFile;
class GainControl;
// Callbacks that need to be injected into AgcManagerDirect to read and control
-// the volume values. They have different behavior if they are called from
-// AgcManager or AudioProcessing. This is done to remove the VoiceEngine
-// dependency in AgcManagerDirect.
+// the volume values. This is done to remove the VoiceEngine dependency in
+// AgcManagerDirect.
+// TODO(aluebs): Remove VolumeCallbacks.
class VolumeCallbacks {
public:
virtual ~VolumeCallbacks() {}
@@ -33,11 +33,10 @@ class VolumeCallbacks {
// Direct interface to use AGC to set volume and compression values.
// AudioProcessing uses this interface directly to integrate the callback-less
-// AGC. AgcManager delegates most of its calls here. See agc_manager.h for
-// undocumented methods.
+// AGC.
//
// This class is not thread-safe.
-class AgcManagerDirect {
+class AgcManagerDirect final {
public:
// AgcManagerDirect will configure GainControl internally. The user is
// responsible for processing the audio using it after the call to Process.
@@ -57,9 +56,18 @@ class AgcManagerDirect {
int Initialize();
void AnalyzePreProcess(int16_t* audio,
int num_channels,
- int samples_per_channel);
- void Process(const int16_t* audio, int length, int sample_rate_hz);
+ size_t samples_per_channel);
+ void Process(const int16_t* audio, size_t length, int sample_rate_hz);
+ // Call when the capture stream has been muted/unmuted. This causes the
+ // manager to disregard all incoming audio; chances are good it's background
+ // noise to which we'd like to avoid adapting.
+ void SetCaptureMuted(bool muted);
+ bool capture_muted() { return capture_muted_; }
+
+ float voice_probability();
+
+ private:
// Sets a new microphone level, after first checking that it hasn't been
// updated by the user, in which case no action is taken.
void SetLevel(int new_level);
@@ -69,12 +77,6 @@ class AgcManagerDirect {
// |kClippedLevelMin|.
void SetMaxLevel(int level);
- void SetCaptureMuted(bool muted);
- bool capture_muted() { return capture_muted_; }
-
- float voice_probability();
-
- private:
int CheckVolumeAndReset();
void UpdateGain();
void UpdateCompressor();
@@ -97,6 +99,8 @@ class AgcManagerDirect {
rtc::scoped_ptr<DebugFile> file_preproc_;
rtc::scoped_ptr<DebugFile> file_postproc_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(AgcManagerDirect);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc
new file mode 100644
index 00000000000..9dba2fc9016
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/agc_manager_direct_unittest.cc
@@ -0,0 +1,686 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_processing/agc/mock_agc.h"
+#include "webrtc/modules/audio_processing/include/mock_audio_processing.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/test/testsupport/trace_to_stderr.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::Eq;
+using ::testing::Mock;
+using ::testing::Return;
+using ::testing::SetArgPointee;
+using ::testing::SetArgReferee;
+
+namespace webrtc {
+namespace {
+
+const int kSampleRateHz = 32000;
+const int kNumChannels = 1;
+const int kSamplesPerChannel = kSampleRateHz / 100;
+const int kInitialVolume = 128;
+const float kAboveClippedThreshold = 0.2f;
+
+class TestVolumeCallbacks : public VolumeCallbacks {
+ public:
+ TestVolumeCallbacks() : volume_(0) {}
+ void SetMicVolume(int volume) override { volume_ = volume; }
+ int GetMicVolume() override { return volume_; }
+
+ private:
+ int volume_;
+};
+
+} // namespace
+
+class AgcManagerDirectTest : public ::testing::Test {
+ protected:
+ AgcManagerDirectTest()
+ : agc_(new MockAgc), manager_(agc_, &gctrl_, &volume_, kInitialVolume) {
+ ExpectInitialize();
+ manager_.Initialize();
+ }
+
+ void FirstProcess() {
+ EXPECT_CALL(*agc_, Reset());
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+ CallProcess(1);
+ }
+
+ void SetVolumeAndProcess(int volume) {
+ volume_.SetMicVolume(volume);
+ FirstProcess();
+ }
+
+ void ExpectCheckVolumeAndReset(int volume) {
+ volume_.SetMicVolume(volume);
+ EXPECT_CALL(*agc_, Reset());
+ }
+
+ void ExpectInitialize() {
+ EXPECT_CALL(gctrl_, set_mode(GainControl::kFixedDigital));
+ EXPECT_CALL(gctrl_, set_target_level_dbfs(2));
+ EXPECT_CALL(gctrl_, set_compression_gain_db(7));
+ EXPECT_CALL(gctrl_, enable_limiter(true));
+ }
+
+ void CallProcess(int num_calls) {
+ for (int i = 0; i < num_calls; ++i) {
+ EXPECT_CALL(*agc_, Process(_, _, _)).WillOnce(Return(0));
+ manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz);
+ }
+ }
+
+ void CallPreProc(int num_calls) {
+ for (int i = 0; i < num_calls; ++i) {
+ manager_.AnalyzePreProcess(nullptr, kNumChannels, kSamplesPerChannel);
+ }
+ }
+
+ MockAgc* agc_;
+ MockGainControl gctrl_;
+ TestVolumeCallbacks volume_;
+ AgcManagerDirect manager_;
+ test::TraceToStderr trace_to_stderr;
+};
+
+TEST_F(AgcManagerDirectTest, StartupMinVolumeConfigurationIsRespected) {
+ FirstProcess();
+ EXPECT_EQ(kInitialVolume, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MicVolumeResponseToRmsError) {
+ FirstProcess();
+
+ // Compressor default; no residual error.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+ CallProcess(1);
+
+ // Inside the compressor's window; no change of volume.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)));
+ CallProcess(1);
+
+ // Above the compressor's window; volume should be increased.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(130, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(168, volume_.GetMicVolume());
+
+ // Inside the compressor's window; no change of volume.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)));
+ CallProcess(1);
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)));
+ CallProcess(1);
+
+ // Below the compressor's window; volume should be decreased.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(167, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(163, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-9), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(129, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MicVolumeIsLimited) {
+ FirstProcess();
+
+ // Maximum upwards change is limited.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(183, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(243, volume_.GetMicVolume());
+
+ // Won't go higher than the maximum.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(255, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(254, volume_.GetMicVolume());
+
+ // Maximum downwards change is limited.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(194, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(137, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(88, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(54, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(33, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(18, volume_.GetMicVolume());
+
+ // Won't go lower than the minimum.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(12, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, CompressorStepsTowardsTarget) {
+ FirstProcess();
+
+ // Compressor default; no call to set_compression_gain_db.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(20);
+
+ // Moves slowly upwards.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(1);
+
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+ CallProcess(1);
+
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(20);
+
+ // Moves slowly downward, then reverses before reaching the original target.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(5), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(1);
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(9), Return(true)))
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+ CallProcess(1);
+
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(20);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorErrorIsDeemphasized) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+ CallProcess(1);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(20);
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(7)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(6)).WillOnce(Return(0));
+ CallProcess(1);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(_)).Times(0);
+ CallProcess(20);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorReachesMaximum) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(10), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(10)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(11)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(12)).WillOnce(Return(0));
+ CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, CompressorReachesMinimum) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(0), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(6)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(5)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(4)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(3)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(2)).WillOnce(Return(0));
+ CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, NoActionWhileMuted) {
+ manager_.SetCaptureMuted(true);
+ manager_.Process(nullptr, kSamplesPerChannel, kSampleRateHz);
+}
+
+TEST_F(AgcManagerDirectTest, UnmutingChecksVolumeWithoutRaising) {
+ FirstProcess();
+
+ manager_.SetCaptureMuted(true);
+ manager_.SetCaptureMuted(false);
+ ExpectCheckVolumeAndReset(127);
+ // SetMicVolume should not be called.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+ CallProcess(1);
+ EXPECT_EQ(127, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, UnmutingRaisesTooLowVolume) {
+ FirstProcess();
+
+ manager_.SetCaptureMuted(true);
+ manager_.SetCaptureMuted(false);
+ ExpectCheckVolumeAndReset(11);
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_)).WillOnce(Return(false));
+ CallProcess(1);
+ EXPECT_EQ(12, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ManualLevelChangeResultsInNoSetMicCall) {
+ FirstProcess();
+
+ // Change outside of compressor's range, which would normally trigger a call
+ // to SetMicVolume.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+ // GetMicVolume returns a value outside of the quantization slack, indicating
+ // a manual volume change.
+ volume_.SetMicVolume(154);
+ // SetMicVolume should not be called.
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallProcess(1);
+ EXPECT_EQ(154, volume_.GetMicVolume());
+
+ // Do the same thing, except downwards now.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ volume_.SetMicVolume(100);
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallProcess(1);
+ EXPECT_EQ(100, volume_.GetMicVolume());
+
+ // And finally verify the AGC continues working without a manual change.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(99, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, RecoveryAfterManualLevelChangeFromMax) {
+ FirstProcess();
+
+ // Force the mic up to max volume. Takes a few steps due to the residual
+ // gain limitation.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(183, volume_.GetMicVolume());
+ CallProcess(1);
+ EXPECT_EQ(243, volume_.GetMicVolume());
+ CallProcess(1);
+ EXPECT_EQ(255, volume_.GetMicVolume());
+
+ // Manual change does not result in SetMicVolume call.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ volume_.SetMicVolume(50);
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallProcess(1);
+ EXPECT_EQ(50, volume_.GetMicVolume());
+
+ // Continues working as usual afterwards.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(69, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, RecoveryAfterManualLevelChangeBelowMin) {
+ FirstProcess();
+
+ // Manual change below min.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-1), Return(true)));
+ // Don't set to zero, which will cause AGC to take no action.
+ volume_.SetMicVolume(1);
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallProcess(1);
+ EXPECT_EQ(1, volume_.GetMicVolume());
+
+ // Continues working as usual afterwards.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(2, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(11, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(20), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(18, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, NoClippingHasNoImpact) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillRepeatedly(Return(0));
+ CallPreProc(100);
+ EXPECT_EQ(128, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingUnderThresholdHasNoImpact) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillOnce(Return(0.099));
+ CallPreProc(1);
+ EXPECT_EQ(128, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingLowersVolume) {
+ SetVolumeAndProcess(255);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _)).WillOnce(Return(0.101));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, WaitingPeriodBetweenClippingChecks) {
+ SetVolumeAndProcess(255);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillRepeatedly(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(0);
+ CallPreProc(300);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(225, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingLoweringIsLimited) {
+ SetVolumeAndProcess(180);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(170, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillRepeatedly(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(0);
+ CallPreProc(1000);
+ EXPECT_EQ(170, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingMaxIsRespectedWhenEqualToLevel) {
+ SetVolumeAndProcess(255);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(10);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingMaxIsRespectedWhenHigherThanLevel) {
+ SetVolumeAndProcess(200);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(185, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+ CallProcess(10);
+ EXPECT_EQ(240, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, MaxCompressionIsIncreasedAfterClipping) {
+ SetVolumeAndProcess(210);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(195, volume_.GetMicVolume());
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(11), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(8)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(9)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(10)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(11)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(12)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(13)).WillOnce(Return(0));
+ CallProcess(1);
+
+ // Continue clipping until we hit the maximum surplus compression.
+ CallPreProc(300);
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(180, volume_.GetMicVolume());
+
+ CallPreProc(300);
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(170, volume_.GetMicVolume());
+
+ // Current level is now at the minimum, but the maximum allowed level still
+ // has more to decrease.
+ CallPreProc(300);
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ CallPreProc(1);
+
+ CallPreProc(300);
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ CallPreProc(1);
+
+ CallPreProc(300);
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ CallPreProc(1);
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(16), Return(true)))
+ .WillRepeatedly(Return(false));
+ CallProcess(19);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(14)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(15)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(16)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(17)).WillOnce(Return(0));
+ CallProcess(20);
+ EXPECT_CALL(gctrl_, set_compression_gain_db(18)).WillOnce(Return(0));
+ CallProcess(1);
+}
+
+TEST_F(AgcManagerDirectTest, UserCanRaiseVolumeAfterClipping) {
+ SetVolumeAndProcess(225);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallPreProc(1);
+ EXPECT_EQ(210, volume_.GetMicVolume());
+
+ // High enough error to trigger a volume check.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(14), Return(true)));
+ // User changed the volume.
+ volume_.SetMicVolume(250);
+ EXPECT_CALL(*agc_, Reset()).Times(1);
+ CallProcess(1);
+ EXPECT_EQ(250, volume_.GetMicVolume());
+
+ // Move down...
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(-10), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(210, volume_.GetMicVolume());
+ // And back up to the new max established by the user.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(40), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(250, volume_.GetMicVolume());
+ // Will not move above new maximum.
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillOnce(DoAll(SetArgPointee<0>(30), Return(true)));
+ CallProcess(1);
+ EXPECT_EQ(250, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, ClippingDoesNotPullLowVolumeBackUp) {
+ SetVolumeAndProcess(80);
+
+ EXPECT_CALL(*agc_, AnalyzePreproc(_, _))
+ .WillOnce(Return(kAboveClippedThreshold));
+ EXPECT_CALL(*agc_, Reset()).Times(0);
+ int initial_volume = volume_.GetMicVolume();
+ CallPreProc(1);
+ EXPECT_EQ(initial_volume, volume_.GetMicVolume());
+}
+
+TEST_F(AgcManagerDirectTest, TakesNoActionOnZeroMicVolume) {
+ FirstProcess();
+
+ EXPECT_CALL(*agc_, GetRmsErrorDb(_))
+ .WillRepeatedly(DoAll(SetArgPointee<0>(30), Return(true)));
+ volume_.SetMicVolume(0);
+ CallProcess(10);
+ EXPECT_EQ(0, volume_.GetMicVolume());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/analog_agc.c b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
index 73adb5d3d2f..be644d9701e 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/analog_agc.c
@@ -41,7 +41,7 @@ static const int16_t kOffset2[8] = {18432, 18379, 18290, 18177, 18052, 17920, 17
static const int16_t kMuteGuardTimeMs = 8000;
static const int16_t kInitCheck = 42;
-static const int16_t kNumSubframes = 10;
+static const size_t kNumSubframes = 10;
/* Default settings if config is not used */
#define AGC_DEFAULT_TARGET_LEVEL 3
@@ -112,13 +112,14 @@ static const int32_t kTargetLevelTable[64] = {134209536, 106606424, 84680493, 67
6726, 5343, 4244, 3371, 2678, 2127, 1690, 1342, 1066, 847, 673, 534, 424, 337, 268,
213, 169, 134, 107, 85, 67};
-int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, int16_t num_bands,
- int16_t samples)
+int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, size_t num_bands,
+ size_t samples)
{
int32_t nrg, max_nrg, sample, tmp32;
int32_t *ptr;
uint16_t targetGainIdx, gain;
- int16_t i, n, L, tmp16, tmp_speech[16];
+ size_t i;
+ int16_t n, L, tmp16, tmp_speech[16];
LegacyAgc* stt;
stt = (LegacyAgc*)state;
@@ -164,7 +165,7 @@ int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, int16_t num_bands,
for (i = 0; i < samples; i++)
{
- int j;
+ size_t j;
for (j = 0; j < num_bands; ++j)
{
sample = (in_mic[j][i] * gain) >> 12;
@@ -249,7 +250,7 @@ int WebRtcAgc_AddMic(void *state, int16_t* const* in_mic, int16_t num_bands,
return 0;
}
-int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
+int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, size_t samples)
{
LegacyAgc* stt;
stt = (LegacyAgc*)state;
@@ -280,16 +281,16 @@ int WebRtcAgc_AddFarend(void *state, const int16_t *in_far, int16_t samples)
}
int WebRtcAgc_VirtualMic(void *agcInst, int16_t* const* in_near,
- int16_t num_bands, int16_t samples, int32_t micLevelIn,
+ size_t num_bands, size_t samples, int32_t micLevelIn,
int32_t *micLevelOut)
{
int32_t tmpFlt, micLevelTmp, gainIdx;
uint16_t gain;
- int16_t ii, j;
+ size_t ii, j;
LegacyAgc* stt;
uint32_t nrg;
- int16_t sampleCntr;
+ size_t sampleCntr;
uint32_t frameNrg = 0;
uint32_t frameNrgLimit = 5500;
int16_t numZeroCrossing = 0;
@@ -1132,7 +1133,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
}
int WebRtcAgc_Process(void *agcInst, const int16_t* const* in_near,
- int16_t num_bands, int16_t samples,
+ size_t num_bands, size_t samples,
int16_t* const* out, int32_t inMicLevel,
int32_t *outMicLevel, int16_t echo,
uint8_t *saturationWarning)
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.c b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
index 4619b88ae55..aeafb65c78d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.c
@@ -283,7 +283,7 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc* stt, int16_t agcMode) {
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
const int16_t* in_far,
- int16_t nrSamples) {
+ size_t nrSamples) {
assert(stt != NULL);
// VAD for far end
WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
@@ -293,7 +293,7 @@ int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* stt,
int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
const int16_t* const* in_near,
- int16_t num_bands,
+ size_t num_bands,
int16_t* const* out,
uint32_t FS,
int16_t lowlevelSignal) {
@@ -310,8 +310,9 @@ int32_t WebRtcAgc_ProcessDigital(DigitalAgc* stt,
int16_t zeros = 0, zeros_fast, frac = 0;
int16_t decay;
int16_t gate, gain_adj;
- int16_t k, n, i;
- int16_t L, L2; // samples/subframe
+ int16_t k;
+ size_t n, i, L;
+ int16_t L2; // samples/subframe
// determine number of samples per ms
if (FS == 8000)
@@ -632,7 +633,7 @@ void WebRtcAgc_InitVad(AgcVad* state) {
int16_t WebRtcAgc_ProcessVad(AgcVad* state, // (i) VAD state
const int16_t* in, // (i) Speech signal
- int16_t nrSamples) // (i) number of samples
+ size_t nrSamples) // (i) number of samples
{
int32_t out, nrg, tmp32, tmp32b;
uint16_t tmpU16;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.h b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
index b8314d98915..819844d774c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/digital_agc.h
@@ -56,20 +56,20 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc* digitalAgcInst, int16_t agcMode);
int32_t WebRtcAgc_ProcessDigital(DigitalAgc* digitalAgcInst,
const int16_t* const* inNear,
- int16_t num_bands,
+ size_t num_bands,
int16_t* const* out,
uint32_t FS,
int16_t lowLevelSignal);
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc* digitalAgcInst,
const int16_t* inFar,
- int16_t nrSamples);
+ size_t nrSamples);
void WebRtcAgc_InitVad(AgcVad* vadInst);
int16_t WebRtcAgc_ProcessVad(AgcVad* vadInst, // (i) VAD state
const int16_t* in, // (i) Speech signal
- int16_t nrSamples); // (i) number of samples
+ size_t nrSamples); // (i) number of samples
int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
int16_t compressionGaindB, // Q0 (in dB)
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/gain_control.h b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/gain_control.h
index 0ccba76c4bc..08c1988f011 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/gain_control.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/legacy/gain_control.h
@@ -66,7 +66,7 @@ extern "C"
*/
int WebRtcAgc_AddFarend(void* agcInst,
const int16_t* inFar,
- int16_t samples);
+ size_t samples);
/*
* This function processes a 10 ms frame of microphone speech to determine
@@ -90,8 +90,8 @@ int WebRtcAgc_AddFarend(void* agcInst,
*/
int WebRtcAgc_AddMic(void* agcInst,
int16_t* const* inMic,
- int16_t num_bands,
- int16_t samples);
+ size_t num_bands,
+ size_t samples);
/*
* This function replaces the analog microphone with a virtual one.
@@ -118,8 +118,8 @@ int WebRtcAgc_AddMic(void* agcInst,
*/
int WebRtcAgc_VirtualMic(void* agcInst,
int16_t* const* inMic,
- int16_t num_bands,
- int16_t samples,
+ size_t num_bands,
+ size_t samples,
int32_t micLevelIn,
int32_t* micLevelOut);
@@ -159,8 +159,8 @@ int WebRtcAgc_VirtualMic(void* agcInst,
*/
int WebRtcAgc_Process(void* agcInst,
const int16_t* const* inNear,
- int16_t num_bands,
- int16_t samples,
+ size_t num_bands,
+ size_t samples,
int16_t* const* out,
int32_t inMicLevel,
int32_t* outMicLevel,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/mock_agc.h b/chromium/third_party/webrtc/modules/audio_processing/agc/mock_agc.h
index 1c36a055eca..13dbd2edd53 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/mock_agc.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/mock_agc.h
@@ -20,8 +20,8 @@ namespace webrtc {
class MockAgc : public Agc {
public:
- MOCK_METHOD2(AnalyzePreproc, float(const int16_t* audio, int length));
- MOCK_METHOD3(Process, int(const int16_t* audio, int length,
+ MOCK_METHOD2(AnalyzePreproc, float(const int16_t* audio, size_t length));
+ MOCK_METHOD3(Process, int(const int16_t* audio, size_t length,
int sample_rate_hz));
MOCK_METHOD1(GetRmsErrorDb, bool(int* error));
MOCK_METHOD0(Reset, void());
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
index 04dcaea799d..81790a159be 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
@@ -19,62 +19,35 @@
namespace webrtc {
namespace {
-const int kSamplesPer16kHzChannel = 160;
-const int kSamplesPer32kHzChannel = 320;
-const int kSamplesPer48kHzChannel = 480;
-
-bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) {
- switch (layout) {
- case AudioProcessing::kMono:
- case AudioProcessing::kStereo:
- return false;
- case AudioProcessing::kMonoAndKeyboard:
- case AudioProcessing::kStereoAndKeyboard:
- return true;
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer32kHzChannel = 320;
+const size_t kSamplesPer48kHzChannel = 480;
+
+int KeyboardChannelIndex(const StreamConfig& stream_config) {
+ if (!stream_config.has_keyboard()) {
+ assert(false);
+ return -1;
}
- assert(false);
- return false;
-}
-
-int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
- switch (layout) {
- case AudioProcessing::kMono:
- case AudioProcessing::kStereo:
- assert(false);
- return -1;
- case AudioProcessing::kMonoAndKeyboard:
- return 1;
- case AudioProcessing::kStereoAndKeyboard:
- return 2;
- }
- assert(false);
- return -1;
-}
-template <typename T>
-void StereoToMono(const T* left, const T* right, T* out,
- int num_frames) {
- for (int i = 0; i < num_frames; ++i)
- out[i] = (left[i] + right[i]) / 2;
+ return stream_config.num_channels();
}
-int NumBandsFromSamplesPerChannel(int num_frames) {
- int num_bands = 1;
+size_t NumBandsFromSamplesPerChannel(size_t num_frames) {
+ size_t num_bands = 1;
if (num_frames == kSamplesPer32kHzChannel ||
num_frames == kSamplesPer48kHzChannel) {
- num_bands = rtc::CheckedDivExact(num_frames,
- static_cast<int>(kSamplesPer16kHzChannel));
+ num_bands = rtc::CheckedDivExact(num_frames, kSamplesPer16kHzChannel);
}
return num_bands;
}
} // namespace
-AudioBuffer::AudioBuffer(int input_num_frames,
+AudioBuffer::AudioBuffer(size_t input_num_frames,
int num_input_channels,
- int process_num_frames,
+ size_t process_num_frames,
int num_process_channels,
- int output_num_frames)
+ size_t output_num_frames)
: input_num_frames_(input_num_frames),
num_input_channels_(num_input_channels),
proc_num_frames_(process_num_frames),
@@ -91,7 +64,7 @@ AudioBuffer::AudioBuffer(int input_num_frames,
assert(input_num_frames_ > 0);
assert(proc_num_frames_ > 0);
assert(output_num_frames_ > 0);
- assert(num_input_channels_ > 0 && num_input_channels_ <= 2);
+ assert(num_input_channels_ > 0);
assert(num_proc_channels_ > 0 && num_proc_channels_ <= num_input_channels_);
if (input_num_frames_ != proc_num_frames_ ||
@@ -130,29 +103,28 @@ AudioBuffer::AudioBuffer(int input_num_frames,
AudioBuffer::~AudioBuffer() {}
void AudioBuffer::CopyFrom(const float* const* data,
- int num_frames,
- AudioProcessing::ChannelLayout layout) {
- assert(num_frames == input_num_frames_);
- assert(ChannelsFromLayout(layout) == num_input_channels_);
+ const StreamConfig& stream_config) {
+ assert(stream_config.num_frames() == input_num_frames_);
+ assert(stream_config.num_channels() == num_input_channels_);
InitForNewData();
// Initialized lazily because there's a different condition in
// DeinterleaveFrom.
- if ((num_input_channels_ == 2 && num_proc_channels_ == 1) && !input_buffer_) {
+ const bool need_to_downmix =
+ num_input_channels_ > 1 && num_proc_channels_ == 1;
+ if (need_to_downmix && !input_buffer_) {
input_buffer_.reset(
new IFChannelBuffer(input_num_frames_, num_proc_channels_));
}
- if (HasKeyboardChannel(layout)) {
- keyboard_data_ = data[KeyboardChannelIndex(layout)];
+ if (stream_config.has_keyboard()) {
+ keyboard_data_ = data[KeyboardChannelIndex(stream_config)];
}
// Downmix.
const float* const* data_ptr = data;
- if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
- StereoToMono(data[0],
- data[1],
- input_buffer_->fbuf()->channels()[0],
- input_num_frames_);
+ if (need_to_downmix) {
+ DownmixToMono<float, float>(data, input_num_frames_, num_input_channels_,
+ input_buffer_->fbuf()->channels()[0]);
data_ptr = input_buffer_->fbuf_const()->channels();
}
@@ -175,11 +147,10 @@ void AudioBuffer::CopyFrom(const float* const* data,
}
}
-void AudioBuffer::CopyTo(int num_frames,
- AudioProcessing::ChannelLayout layout,
+void AudioBuffer::CopyTo(const StreamConfig& stream_config,
float* const* data) {
- assert(num_frames == output_num_frames_);
- assert(ChannelsFromLayout(layout) == num_channels_);
+ assert(stream_config.num_frames() == output_num_frames_);
+ assert(stream_config.num_channels() == num_channels_);
// Convert to the float range.
float* const* data_ptr = data;
@@ -327,9 +298,6 @@ const ChannelBuffer<float>* AudioBuffer::split_data_f() const {
}
const int16_t* AudioBuffer::mixed_low_pass_data() {
- // Currently only mixing stereo to mono is supported.
- assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
-
if (num_proc_channels_ == 1) {
return split_bands_const(0)[kBand0To8kHz];
}
@@ -339,10 +307,10 @@ const int16_t* AudioBuffer::mixed_low_pass_data() {
mixed_low_pass_channels_.reset(
new ChannelBuffer<int16_t>(num_split_frames_, 1));
}
- StereoToMono(split_bands_const(0)[kBand0To8kHz],
- split_bands_const(1)[kBand0To8kHz],
- mixed_low_pass_channels_->channels()[0],
- num_split_frames_);
+
+ DownmixToMono<int16_t, int32_t>(split_channels_const(kBand0To8kHz),
+ num_split_frames_, num_channels_,
+ mixed_low_pass_channels_->channels()[0]);
mixed_low_pass_valid_ = true;
}
return mixed_low_pass_channels_->channels()[0];
@@ -376,20 +344,20 @@ void AudioBuffer::set_num_channels(int num_channels) {
num_channels_ = num_channels;
}
-int AudioBuffer::num_frames() const {
+size_t AudioBuffer::num_frames() const {
return proc_num_frames_;
}
-int AudioBuffer::num_frames_per_band() const {
+size_t AudioBuffer::num_frames_per_band() const {
return num_split_frames_;
}
-int AudioBuffer::num_keyboard_frames() const {
+size_t AudioBuffer::num_keyboard_frames() const {
// We don't resample the keyboard channel.
return input_num_frames_;
}
-int AudioBuffer::num_bands() const {
+size_t AudioBuffer::num_bands() const {
return num_bands_;
}
@@ -411,11 +379,10 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
} else {
deinterleaved = input_buffer_->ibuf()->channels();
}
- if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
- // Downmix directly; no explicit deinterleaving needed.
- for (int i = 0; i < input_num_frames_; ++i) {
- deinterleaved[0][i] = (frame->data_[i * 2] + frame->data_[i * 2 + 1]) / 2;
- }
+ if (num_proc_channels_ == 1) {
+ // Downmix and deinterleave simultaneously.
+ DownmixInterleavedToMono(frame->data_, input_num_frames_,
+ num_input_channels_, deinterleaved[0]);
} else {
assert(num_proc_channels_ == num_input_channels_);
Deinterleave(frame->data_,
@@ -435,21 +402,37 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
}
}
-void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
- assert(proc_num_frames_ == output_num_frames_);
- assert(num_channels_ == num_input_channels_);
- assert(frame->num_channels_ == num_channels_);
- assert(frame->samples_per_channel_ == proc_num_frames_);
+void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) {
frame->vad_activity_ = activity_;
-
if (!data_changed) {
return;
}
- Interleave(data_->ibuf()->channels(),
- proc_num_frames_,
- num_channels_,
- frame->data_);
+ assert(frame->num_channels_ == num_channels_ || num_channels_ == 1);
+ assert(frame->samples_per_channel_ == output_num_frames_);
+
+ // Resample if necessary.
+ IFChannelBuffer* data_ptr = data_.get();
+ if (proc_num_frames_ != output_num_frames_) {
+ if (!output_buffer_) {
+ output_buffer_.reset(
+ new IFChannelBuffer(output_num_frames_, num_channels_));
+ }
+ for (int i = 0; i < num_channels_; ++i) {
+ output_resamplers_[i]->Resample(
+ data_->fbuf()->channels()[i], proc_num_frames_,
+ output_buffer_->fbuf()->channels()[i], output_num_frames_);
+ }
+ data_ptr = output_buffer_.get();
+ }
+
+ if (frame->num_channels_ == num_channels_) {
+ Interleave(data_ptr->ibuf()->channels(), proc_num_frames_, num_channels_,
+ frame->data_);
+ } else {
+ UpmixMonoToInterleaved(data_ptr->ibuf()->channels()[0], proc_num_frames_,
+ frame->num_channels_, frame->data_);
+ }
}
void AudioBuffer::CopyLowPassToReference() {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
index 4291fb3eb99..f82ab61ecb8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
@@ -33,19 +33,19 @@ enum Band {
class AudioBuffer {
public:
// TODO(ajm): Switch to take ChannelLayouts.
- AudioBuffer(int input_num_frames,
+ AudioBuffer(size_t input_num_frames,
int num_input_channels,
- int process_num_frames,
+ size_t process_num_frames,
int num_process_channels,
- int output_num_frames);
+ size_t output_num_frames);
virtual ~AudioBuffer();
int num_channels() const;
void set_num_channels(int num_channels);
- int num_frames() const;
- int num_frames_per_band() const;
- int num_keyboard_frames() const;
- int num_bands() const;
+ size_t num_frames() const;
+ size_t num_frames_per_band() const;
+ size_t num_keyboard_frames() const;
+ size_t num_bands() const;
// Returns a pointer array to the full-band channels.
// Usage:
@@ -109,15 +109,11 @@ class AudioBuffer {
void DeinterleaveFrom(AudioFrame* audioFrame);
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
- void InterleaveTo(AudioFrame* frame, bool data_changed) const;
+ void InterleaveTo(AudioFrame* frame, bool data_changed);
// Use for float deinterleaved data.
- void CopyFrom(const float* const* data,
- int num_frames,
- AudioProcessing::ChannelLayout layout);
- void CopyTo(int num_frames,
- AudioProcessing::ChannelLayout layout,
- float* const* data);
+ void CopyFrom(const float* const* data, const StreamConfig& stream_config);
+ void CopyTo(const StreamConfig& stream_config, float* const* data);
void CopyLowPassToReference();
// Splits the signal into different bands.
@@ -131,20 +127,20 @@ class AudioBuffer {
// The audio is passed into DeinterleaveFrom() or CopyFrom() with input
// format (samples per channel and number of channels).
- const int input_num_frames_;
+ const size_t input_num_frames_;
const int num_input_channels_;
// The audio is stored by DeinterleaveFrom() or CopyFrom() with processing
// format.
- const int proc_num_frames_;
+ const size_t proc_num_frames_;
const int num_proc_channels_;
// The audio is returned by InterleaveTo() and CopyTo() with output samples
// per channels and the current number of channels. This last one can be
// changed at any time using set_num_channels().
- const int output_num_frames_;
+ const size_t output_num_frames_;
int num_channels_;
- int num_bands_;
- int num_split_frames_;
+ size_t num_bands_;
+ size_t num_split_frames_;
bool mixed_low_pass_valid_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
@@ -156,6 +152,7 @@ class AudioBuffer {
rtc::scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
rtc::scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
rtc::scoped_ptr<IFChannelBuffer> input_buffer_;
+ rtc::scoped_ptr<IFChannelBuffer> output_buffer_;
rtc::scoped_ptr<ChannelBuffer<float> > process_buffer_;
ScopedVector<PushSincResampler> input_resamplers_;
ScopedVector<PushSincResampler> output_resamplers_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi b/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
index a9c3ebbd964..a6e8f5cc40d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
@@ -87,6 +87,9 @@
'intelligibility/intelligibility_utils.h',
'level_estimator_impl.cc',
'level_estimator_impl.h',
+ 'logging/aec_logging.h',
+ 'logging/aec_logging_file_handling.cc',
+ 'logging/aec_logging_file_handling.h',
'noise_suppression_impl.cc',
'noise_suppression_impl.h',
'processing_component.cc',
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
index 87b82a6a350..f3ee0a399d5 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -11,11 +11,13 @@
#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include <assert.h>
+#include <algorithm>
#include "webrtc/base/checks.h"
#include "webrtc/base/platform_file.h"
-#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/audio_converter.h"
#include "webrtc/common_audio/channel_buffer.h"
+#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
extern "C" {
#include "webrtc/modules/audio_processing/aec/aec_core.h"
@@ -28,6 +30,7 @@ extern "C" {
#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
#include "webrtc/modules/audio_processing/gain_control_impl.h"
#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
+#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
#include "webrtc/modules/audio_processing/level_estimator_impl.h"
#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
#include "webrtc/modules/audio_processing/processing_component.h"
@@ -48,15 +51,32 @@ extern "C" {
#endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
-#define RETURN_ON_ERR(expr) \
- do { \
- int err = (expr); \
- if (err != kNoError) { \
- return err; \
- } \
+#define RETURN_ON_ERR(expr) \
+ do { \
+ int err = (expr); \
+ if (err != kNoError) { \
+ return err; \
+ } \
} while (0)
namespace webrtc {
+namespace {
+
+static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) {
+ switch (layout) {
+ case AudioProcessing::kMono:
+ case AudioProcessing::kStereo:
+ return false;
+ case AudioProcessing::kMonoAndKeyboard:
+ case AudioProcessing::kStereoAndKeyboard:
+ return true;
+ }
+
+ assert(false);
+ return false;
+}
+
+} // namespace
// Throughout webrtc, it's assumed that success is represented by zero.
static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
@@ -75,9 +95,7 @@ static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
public:
explicit GainControlForNewAgc(GainControlImpl* gain_control)
- : real_gain_control_(gain_control),
- volume_(0) {
- }
+ : real_gain_control_(gain_control), volume_(0) {}
// GainControl implementation.
int Enable(bool enable) override {
@@ -129,6 +147,17 @@ class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
int volume_;
};
+const int AudioProcessing::kNativeSampleRatesHz[] = {
+ AudioProcessing::kSampleRate8kHz,
+ AudioProcessing::kSampleRate16kHz,
+ AudioProcessing::kSampleRate32kHz,
+ AudioProcessing::kSampleRate48kHz};
+const size_t AudioProcessing::kNumNativeSampleRates =
+ arraysize(AudioProcessing::kNativeSampleRatesHz);
+const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing::
+ kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1];
+const int AudioProcessing::kMaxAECMSampleRateHz = kSampleRate16kHz;
+
AudioProcessing* AudioProcessing::Create() {
Config config;
return Create(config, nullptr);
@@ -166,10 +195,11 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config,
debug_file_(FileWrapper::Create()),
event_msg_(new audioproc::Event()),
#endif
- fwd_in_format_(kSampleRate16kHz, 1),
+ api_format_({{{kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false},
+ {kSampleRate16kHz, 1, false}}}),
fwd_proc_format_(kSampleRate16kHz),
- fwd_out_format_(kSampleRate16kHz, 1),
- rev_in_format_(kSampleRate16kHz, 1),
rev_proc_format_(kSampleRate16kHz, 1),
split_rate_(kSampleRate16kHz),
stream_delay_ms_(0),
@@ -195,7 +225,7 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config,
beamformer_enabled_(config.Get<Beamforming>().enabled),
beamformer_(beamformer),
array_geometry_(config.Get<Beamforming>().array_geometry),
- supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled) {
+ intelligibility_enabled_(config.Get<Intelligibility>().enabled) {
echo_cancellation_ = new EchoCancellationImpl(this, crit_);
component_list_.push_back(echo_cancellation_);
@@ -251,45 +281,65 @@ int AudioProcessingImpl::Initialize() {
return InitializeLocked();
}
-int AudioProcessingImpl::set_sample_rate_hz(int rate) {
- CriticalSectionScoped crit_scoped(crit_);
- return InitializeLocked(rate,
- rate,
- rev_in_format_.rate(),
- fwd_in_format_.num_channels(),
- fwd_out_format_.num_channels(),
- rev_in_format_.num_channels());
-}
-
int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
ChannelLayout input_layout,
ChannelLayout output_layout,
ChannelLayout reverse_layout) {
+ const ProcessingConfig processing_config = {
+ {{input_sample_rate_hz,
+ ChannelsFromLayout(input_layout),
+ LayoutHasKeyboard(input_layout)},
+ {output_sample_rate_hz,
+ ChannelsFromLayout(output_layout),
+ LayoutHasKeyboard(output_layout)},
+ {reverse_sample_rate_hz,
+ ChannelsFromLayout(reverse_layout),
+ LayoutHasKeyboard(reverse_layout)},
+ {reverse_sample_rate_hz,
+ ChannelsFromLayout(reverse_layout),
+ LayoutHasKeyboard(reverse_layout)}}};
+
+ return Initialize(processing_config);
+}
+
+int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
CriticalSectionScoped crit_scoped(crit_);
- return InitializeLocked(input_sample_rate_hz,
- output_sample_rate_hz,
- reverse_sample_rate_hz,
- ChannelsFromLayout(input_layout),
- ChannelsFromLayout(output_layout),
- ChannelsFromLayout(reverse_layout));
+ return InitializeLocked(processing_config);
}
int AudioProcessingImpl::InitializeLocked() {
- const int fwd_audio_buffer_channels = beamformer_enabled_ ?
- fwd_in_format_.num_channels() :
- fwd_out_format_.num_channels();
- render_audio_.reset(new AudioBuffer(rev_in_format_.samples_per_channel(),
- rev_in_format_.num_channels(),
- rev_proc_format_.samples_per_channel(),
- rev_proc_format_.num_channels(),
- rev_proc_format_.samples_per_channel()));
- capture_audio_.reset(new AudioBuffer(fwd_in_format_.samples_per_channel(),
- fwd_in_format_.num_channels(),
- fwd_proc_format_.samples_per_channel(),
- fwd_audio_buffer_channels,
- fwd_out_format_.samples_per_channel()));
+ const int fwd_audio_buffer_channels =
+ beamformer_enabled_ ? api_format_.input_stream().num_channels()
+ : api_format_.output_stream().num_channels();
+ const int rev_audio_buffer_out_num_frames =
+ api_format_.reverse_output_stream().num_frames() == 0
+ ? rev_proc_format_.num_frames()
+ : api_format_.reverse_output_stream().num_frames();
+ if (api_format_.reverse_input_stream().num_channels() > 0) {
+ render_audio_.reset(new AudioBuffer(
+ api_format_.reverse_input_stream().num_frames(),
+ api_format_.reverse_input_stream().num_channels(),
+ rev_proc_format_.num_frames(), rev_proc_format_.num_channels(),
+ rev_audio_buffer_out_num_frames));
+ if (rev_conversion_needed()) {
+ render_converter_ = AudioConverter::Create(
+ api_format_.reverse_input_stream().num_channels(),
+ api_format_.reverse_input_stream().num_frames(),
+ api_format_.reverse_output_stream().num_channels(),
+ api_format_.reverse_output_stream().num_frames());
+ } else {
+ render_converter_.reset(nullptr);
+ }
+ } else {
+ render_audio_.reset(nullptr);
+ render_converter_.reset(nullptr);
+ }
+ capture_audio_.reset(new AudioBuffer(
+ api_format_.input_stream().num_frames(),
+ api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(),
+ fwd_audio_buffer_channels, api_format_.output_stream().num_frames()));
// Initialize all components.
for (auto item : component_list_) {
@@ -305,6 +355,8 @@ int AudioProcessingImpl::InitializeLocked() {
InitializeBeamformer();
+ InitializeIntelligibility();
+
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
int err = WriteInitMessage();
@@ -317,62 +369,61 @@ int AudioProcessingImpl::InitializeLocked() {
return kNoError;
}
-int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
- int output_sample_rate_hz,
- int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels) {
- if (input_sample_rate_hz <= 0 ||
- output_sample_rate_hz <= 0 ||
- reverse_sample_rate_hz <= 0) {
- return kBadSampleRateError;
- }
- if (num_output_channels > num_input_channels) {
- return kBadNumberChannelsError;
+int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
+ for (const auto& stream : config.streams) {
+ if (stream.num_channels() < 0) {
+ return kBadNumberChannelsError;
+ }
+ if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
+ return kBadSampleRateError;
+ }
}
- // Only mono and stereo supported currently.
- if (num_input_channels > 2 || num_input_channels < 1 ||
- num_output_channels > 2 || num_output_channels < 1 ||
- num_reverse_channels > 2 || num_reverse_channels < 1) {
+
+ const int num_in_channels = config.input_stream().num_channels();
+ const int num_out_channels = config.output_stream().num_channels();
+
+ // Need at least one input channel.
+ // Need either one output channel or as many outputs as there are inputs.
+ if (num_in_channels == 0 ||
+ !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
return kBadNumberChannelsError;
}
+
if (beamformer_enabled_ &&
- (static_cast<size_t>(num_input_channels) != array_geometry_.size() ||
- num_output_channels > 1)) {
+ (static_cast<size_t>(num_in_channels) != array_geometry_.size() ||
+ num_out_channels > 1)) {
return kBadNumberChannelsError;
}
- fwd_in_format_.set(input_sample_rate_hz, num_input_channels);
- fwd_out_format_.set(output_sample_rate_hz, num_output_channels);
- rev_in_format_.set(reverse_sample_rate_hz, num_reverse_channels);
+ api_format_ = config;
// We process at the closest native rate >= min(input rate, output rate)...
- int min_proc_rate = std::min(fwd_in_format_.rate(), fwd_out_format_.rate());
+ const int min_proc_rate =
+ std::min(api_format_.input_stream().sample_rate_hz(),
+ api_format_.output_stream().sample_rate_hz());
int fwd_proc_rate;
- if (supports_48kHz_ && min_proc_rate > kSampleRate32kHz) {
- fwd_proc_rate = kSampleRate48kHz;
- } else if (min_proc_rate > kSampleRate16kHz) {
- fwd_proc_rate = kSampleRate32kHz;
- } else if (min_proc_rate > kSampleRate8kHz) {
- fwd_proc_rate = kSampleRate16kHz;
- } else {
- fwd_proc_rate = kSampleRate8kHz;
+ for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
+ fwd_proc_rate = kNativeSampleRatesHz[i];
+ if (fwd_proc_rate >= min_proc_rate) {
+ break;
+ }
}
// ...with one exception.
- if (echo_control_mobile_->is_enabled() && min_proc_rate > kSampleRate16kHz) {
- fwd_proc_rate = kSampleRate16kHz;
+ if (echo_control_mobile_->is_enabled() &&
+ min_proc_rate > kMaxAECMSampleRateHz) {
+ fwd_proc_rate = kMaxAECMSampleRateHz;
}
- fwd_proc_format_.set(fwd_proc_rate);
+ fwd_proc_format_ = StreamConfig(fwd_proc_rate);
// We normally process the reverse stream at 16 kHz. Unless...
int rev_proc_rate = kSampleRate16kHz;
- if (fwd_proc_format_.rate() == kSampleRate8kHz) {
+ if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) {
// ...the forward stream is at 8 kHz.
rev_proc_rate = kSampleRate8kHz;
} else {
- if (rev_in_format_.rate() == kSampleRate32kHz) {
+ if (api_format_.reverse_input_stream().sample_rate_hz() ==
+ kSampleRate32kHz) {
// ...or the input is at 32 kHz, in which case we use the splitting
// filter rather than the resampler.
rev_proc_rate = kSampleRate32kHz;
@@ -381,13 +432,13 @@ int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
// Always downmix the reverse stream to mono for analysis. This has been
// demonstrated to work well for AEC in most practical scenarios.
- rev_proc_format_.set(rev_proc_rate, 1);
+ rev_proc_format_ = StreamConfig(rev_proc_rate, 1);
- if (fwd_proc_format_.rate() == kSampleRate32kHz ||
- fwd_proc_format_.rate() == kSampleRate48kHz) {
+ if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
+ fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
split_rate_ = kSampleRate16kHz;
} else {
- split_rate_ = fwd_proc_format_.rate();
+ split_rate_ = fwd_proc_format_.sample_rate_hz();
}
return InitializeLocked();
@@ -395,26 +446,12 @@ int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
// Calls InitializeLocked() if any of the audio parameters have changed from
// their current values.
-int AudioProcessingImpl::MaybeInitializeLocked(int input_sample_rate_hz,
- int output_sample_rate_hz,
- int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels) {
- if (input_sample_rate_hz == fwd_in_format_.rate() &&
- output_sample_rate_hz == fwd_out_format_.rate() &&
- reverse_sample_rate_hz == rev_in_format_.rate() &&
- num_input_channels == fwd_in_format_.num_channels() &&
- num_output_channels == fwd_out_format_.num_channels() &&
- num_reverse_channels == rev_in_format_.num_channels()) {
+int AudioProcessingImpl::MaybeInitializeLocked(
+ const ProcessingConfig& processing_config) {
+ if (processing_config == api_format_) {
return kNoError;
}
- return InitializeLocked(input_sample_rate_hz,
- output_sample_rate_hz,
- reverse_sample_rate_hz,
- num_input_channels,
- num_output_channels,
- num_reverse_channels);
+ return InitializeLocked(processing_config);
}
void AudioProcessingImpl::SetExtraOptions(const Config& config) {
@@ -429,18 +466,9 @@ void AudioProcessingImpl::SetExtraOptions(const Config& config) {
}
}
-int AudioProcessingImpl::input_sample_rate_hz() const {
- CriticalSectionScoped crit_scoped(crit_);
- return fwd_in_format_.rate();
-}
-
-int AudioProcessingImpl::sample_rate_hz() const {
- CriticalSectionScoped crit_scoped(crit_);
- return fwd_in_format_.rate();
-}
int AudioProcessingImpl::proc_sample_rate_hz() const {
- return fwd_proc_format_.rate();
+ return fwd_proc_format_.sample_rate_hz();
}
int AudioProcessingImpl::proc_split_sample_rate_hz() const {
@@ -452,11 +480,11 @@ int AudioProcessingImpl::num_reverse_channels() const {
}
int AudioProcessingImpl::num_input_channels() const {
- return fwd_in_format_.num_channels();
+ return api_format_.input_stream().num_channels();
}
int AudioProcessingImpl::num_output_channels() const {
- return fwd_out_format_.num_channels();
+ return api_format_.output_stream().num_channels();
}
void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
@@ -467,56 +495,71 @@ void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
}
}
-bool AudioProcessingImpl::output_will_be_muted() const {
- CriticalSectionScoped lock(crit_);
- return output_will_be_muted_;
-}
int AudioProcessingImpl::ProcessStream(const float* const* src,
- int samples_per_channel,
+ size_t samples_per_channel,
int input_sample_rate_hz,
ChannelLayout input_layout,
int output_sample_rate_hz,
ChannelLayout output_layout,
float* const* dest) {
CriticalSectionScoped crit_scoped(crit_);
+ StreamConfig input_stream = api_format_.input_stream();
+ input_stream.set_sample_rate_hz(input_sample_rate_hz);
+ input_stream.set_num_channels(ChannelsFromLayout(input_layout));
+ input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
+
+ StreamConfig output_stream = api_format_.output_stream();
+ output_stream.set_sample_rate_hz(output_sample_rate_hz);
+ output_stream.set_num_channels(ChannelsFromLayout(output_layout));
+ output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
+
+ if (samples_per_channel != input_stream.num_frames()) {
+ return kBadDataLengthError;
+ }
+ return ProcessStream(src, input_stream, output_stream, dest);
+}
+
+int AudioProcessingImpl::ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) {
+ CriticalSectionScoped crit_scoped(crit_);
if (!src || !dest) {
return kNullPointerError;
}
- RETURN_ON_ERR(MaybeInitializeLocked(input_sample_rate_hz,
- output_sample_rate_hz,
- rev_in_format_.rate(),
- ChannelsFromLayout(input_layout),
- ChannelsFromLayout(output_layout),
- rev_in_format_.num_channels()));
- if (samples_per_channel != fwd_in_format_.samples_per_channel()) {
- return kBadDataLengthError;
- }
+ ProcessingConfig processing_config = api_format_;
+ processing_config.input_stream() = input_config;
+ processing_config.output_stream() = output_config;
+
+ RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ assert(processing_config.input_stream().num_frames() ==
+ api_format_.input_stream().num_frames());
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
+ RETURN_ON_ERR(WriteConfigMessage(false));
+
event_msg_->set_type(audioproc::Event::STREAM);
audioproc::Stream* msg = event_msg_->mutable_stream();
const size_t channel_size =
- sizeof(float) * fwd_in_format_.samples_per_channel();
- for (int i = 0; i < fwd_in_format_.num_channels(); ++i)
+ sizeof(float) * api_format_.input_stream().num_frames();
+ for (int i = 0; i < api_format_.input_stream().num_channels(); ++i)
msg->add_input_channel(src[i], channel_size);
}
#endif
- capture_audio_->CopyFrom(src, samples_per_channel, input_layout);
+ capture_audio_->CopyFrom(src, api_format_.input_stream());
RETURN_ON_ERR(ProcessStreamLocked());
- capture_audio_->CopyTo(fwd_out_format_.samples_per_channel(),
- output_layout,
- dest);
+ capture_audio_->CopyTo(api_format_.output_stream(), dest);
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
audioproc::Stream* msg = event_msg_->mutable_stream();
const size_t channel_size =
- sizeof(float) * fwd_out_format_.samples_per_channel();
- for (int i = 0; i < fwd_out_format_.num_channels(); ++i)
+ sizeof(float) * api_format_.output_stream().num_frames();
+ for (int i = 0; i < api_format_.output_stream().num_channels(); ++i)
msg->add_output_channel(dest[i], channel_size);
RETURN_ON_ERR(WriteMessageToDebugFile());
}
@@ -538,20 +581,21 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kBadSampleRateError;
}
if (echo_control_mobile_->is_enabled() &&
- frame->sample_rate_hz_ > kSampleRate16kHz) {
+ frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
return kUnsupportedComponentError;
}
// TODO(ajm): The input and output rates and channels are currently
// constrained to be identical in the int16 interface.
- RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
- frame->sample_rate_hz_,
- rev_in_format_.rate(),
- frame->num_channels_,
- frame->num_channels_,
- rev_in_format_.num_channels()));
- if (frame->samples_per_channel_ != fwd_in_format_.samples_per_channel()) {
+ ProcessingConfig processing_config = api_format_;
+ processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
+ processing_config.input_stream().set_num_channels(frame->num_channels_);
+ processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
+ processing_config.output_stream().set_num_channels(frame->num_channels_);
+
+ RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ if (frame->samples_per_channel_ != api_format_.input_stream().num_frames()) {
return kBadDataLengthError;
}
@@ -559,9 +603,8 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::STREAM);
audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(int16_t) *
- frame->samples_per_channel_ *
- frame->num_channels_;
+ const size_t data_size =
+ sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_input_data(frame->data_, data_size);
}
#endif
@@ -573,9 +616,8 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(int16_t) *
- frame->samples_per_channel_ *
- frame->num_channels_;
+ const size_t data_size =
+ sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_output_data(frame->data_, data_size);
RETURN_ON_ERR(WriteMessageToDebugFile());
}
@@ -584,7 +626,6 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
return kNoError;
}
-
int AudioProcessingImpl::ProcessStreamLocked() {
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
@@ -599,10 +640,10 @@ int AudioProcessingImpl::ProcessStreamLocked() {
MaybeUpdateHistograms();
AudioBuffer* ca = capture_audio_.get(); // For brevity.
+
if (use_new_agc_ && gain_control_->is_enabled()) {
- agc_manager_->AnalyzePreProcess(ca->channels()[0],
- ca->num_channels(),
- fwd_proc_format_.samples_per_channel());
+ agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(),
+ fwd_proc_format_.num_frames());
}
bool data_processed = is_data_processed();
@@ -610,6 +651,11 @@ int AudioProcessingImpl::ProcessStreamLocked() {
ca->SplitIntoFrequencyBands();
}
+ if (intelligibility_enabled_) {
+ intelligibility_enhancer_->AnalyzeCaptureAudio(
+ ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels());
+ }
+
if (beamformer_enabled_) {
beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f());
ca->set_num_channels(1);
@@ -627,12 +673,10 @@ int AudioProcessingImpl::ProcessStreamLocked() {
RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
- if (use_new_agc_ &&
- gain_control_->is_enabled() &&
+ if (use_new_agc_ && gain_control_->is_enabled() &&
(!beamformer_enabled_ || beamformer_->is_target_present())) {
agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
- ca->num_frames_per_band(),
- split_rate_);
+ ca->num_frames_per_band(), split_rate_);
}
RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
@@ -646,15 +690,11 @@ int AudioProcessingImpl::ProcessStreamLocked() {
float voice_probability =
agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
- transient_suppressor_->Suppress(ca->channels_f()[0],
- ca->num_frames(),
- ca->num_channels(),
- ca->split_bands_const_f(0)[kBand0To8kHz],
- ca->num_frames_per_band(),
- ca->keyboard_data(),
- ca->num_keyboard_frames(),
- voice_probability,
- key_pressed_);
+ transient_suppressor_->Suppress(
+ ca->channels_f()[0], ca->num_frames(), ca->num_channels(),
+ ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(),
+ ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability,
+ key_pressed_);
}
// The level estimator operates on the recombined data.
@@ -665,39 +705,82 @@ int AudioProcessingImpl::ProcessStreamLocked() {
}
int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
- int samples_per_channel,
- int sample_rate_hz,
+ size_t samples_per_channel,
+ int rev_sample_rate_hz,
ChannelLayout layout) {
+ const StreamConfig reverse_config = {
+ rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
+ };
+ if (samples_per_channel != reverse_config.num_frames()) {
+ return kBadDataLengthError;
+ }
+ return AnalyzeReverseStream(data, reverse_config, reverse_config);
+}
+
+int AudioProcessingImpl::ProcessReverseStream(
+ const float* const* src,
+ const StreamConfig& reverse_input_config,
+ const StreamConfig& reverse_output_config,
+ float* const* dest) {
+ RETURN_ON_ERR(
+ AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
+ if (is_rev_processed()) {
+ render_audio_->CopyTo(api_format_.reverse_output_stream(), dest);
+ } else if (rev_conversion_needed()) {
+ render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
+ reverse_output_config.num_samples());
+ } else {
+ CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
+ reverse_input_config.num_channels(), dest);
+ }
+
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(
+ const float* const* src,
+ const StreamConfig& reverse_input_config,
+ const StreamConfig& reverse_output_config) {
CriticalSectionScoped crit_scoped(crit_);
- if (data == NULL) {
+ if (src == NULL) {
return kNullPointerError;
}
- const int num_channels = ChannelsFromLayout(layout);
- RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
- fwd_out_format_.rate(),
- sample_rate_hz,
- fwd_in_format_.num_channels(),
- fwd_out_format_.num_channels(),
- num_channels));
- if (samples_per_channel != rev_in_format_.samples_per_channel()) {
- return kBadDataLengthError;
+ if (reverse_input_config.num_channels() <= 0) {
+ return kBadNumberChannelsError;
}
+ ProcessingConfig processing_config = api_format_;
+ processing_config.reverse_input_stream() = reverse_input_config;
+ processing_config.reverse_output_stream() = reverse_output_config;
+
+ RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ assert(reverse_input_config.num_frames() ==
+ api_format_.reverse_input_stream().num_frames());
+
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
const size_t channel_size =
- sizeof(float) * rev_in_format_.samples_per_channel();
- for (int i = 0; i < num_channels; ++i)
- msg->add_channel(data[i], channel_size);
+ sizeof(float) * api_format_.reverse_input_stream().num_frames();
+ for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i)
+ msg->add_channel(src[i], channel_size);
RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
- render_audio_->CopyFrom(data, samples_per_channel, layout);
- return AnalyzeReverseStreamLocked();
+ render_audio_->CopyFrom(src, api_format_.reverse_input_stream());
+ return ProcessReverseStreamLocked();
+}
+
+int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
+ RETURN_ON_ERR(AnalyzeReverseStream(frame));
+ if (is_rev_processed()) {
+ render_audio_->InterleaveTo(frame, true);
+ }
+
+ return kNoError;
}
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
@@ -713,17 +796,27 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
return kBadSampleRateError;
}
// This interface does not tolerate different forward and reverse rates.
- if (frame->sample_rate_hz_ != fwd_in_format_.rate()) {
+ if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) {
return kBadSampleRateError;
}
- RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
- fwd_out_format_.rate(),
- frame->sample_rate_hz_,
- fwd_in_format_.num_channels(),
- fwd_in_format_.num_channels(),
- frame->num_channels_));
- if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) {
+ if (frame->num_channels_ <= 0) {
+ return kBadNumberChannelsError;
+ }
+
+ ProcessingConfig processing_config = api_format_;
+ processing_config.reverse_input_stream().set_sample_rate_hz(
+ frame->sample_rate_hz_);
+ processing_config.reverse_input_stream().set_num_channels(
+ frame->num_channels_);
+ processing_config.reverse_output_stream().set_sample_rate_hz(
+ frame->sample_rate_hz_);
+ processing_config.reverse_output_stream().set_num_channels(
+ frame->num_channels_);
+
+ RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
+ if (frame->samples_per_channel_ !=
+ api_format_.reverse_input_stream().num_frames()) {
return kBadDataLengthError;
}
@@ -731,30 +824,38 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
if (debug_file_->Open()) {
event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
- const size_t data_size = sizeof(int16_t) *
- frame->samples_per_channel_ *
- frame->num_channels_;
+ const size_t data_size =
+ sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
msg->set_data(frame->data_, data_size);
RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
-
render_audio_->DeinterleaveFrom(frame);
- return AnalyzeReverseStreamLocked();
+ return ProcessReverseStreamLocked();
}
-int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
+int AudioProcessingImpl::ProcessReverseStreamLocked() {
AudioBuffer* ra = render_audio_.get(); // For brevity.
- if (rev_proc_format_.rate() == kSampleRate32kHz) {
+ if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) {
ra->SplitIntoFrequencyBands();
}
+ if (intelligibility_enabled_) {
+ intelligibility_enhancer_->ProcessRenderAudio(
+ ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels());
+ }
+
RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
if (!use_new_agc_) {
RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
}
+ if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz &&
+ is_rev_processed()) {
+ ra->MergeFrequencyBands();
+ }
+
return kNoError;
}
@@ -790,10 +891,6 @@ void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
key_pressed_ = key_pressed;
}
-bool AudioProcessingImpl::stream_key_pressed() const {
- return key_pressed_;
-}
-
void AudioProcessingImpl::set_delay_offset_ms(int offset) {
CriticalSectionScoped crit_scoped(crit_);
delay_offset_ms_ = offset;
@@ -825,10 +922,8 @@ int AudioProcessingImpl::StartDebugRecording(
return kFileError;
}
- int err = WriteInitMessage();
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(WriteConfigMessage(true));
+ RETURN_ON_ERR(WriteInitMessage());
return kNoError;
#else
return kUnsupportedFunctionError;
@@ -854,10 +949,8 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
return kFileError;
}
- int err = WriteInitMessage();
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(WriteConfigMessage(true));
+ RETURN_ON_ERR(WriteInitMessage());
return kNoError;
#else
return kUnsupportedFunctionError;
@@ -947,13 +1040,15 @@ bool AudioProcessingImpl::is_data_processed() const {
bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
// Check if we've upmixed or downmixed the audio.
- return ((fwd_out_format_.num_channels() != fwd_in_format_.num_channels()) ||
+ return ((api_format_.output_stream().num_channels() !=
+ api_format_.input_stream().num_channels()) ||
is_data_processed || transient_suppressor_enabled_);
}
bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
- return (is_data_processed && (fwd_proc_format_.rate() == kSampleRate32kHz ||
- fwd_proc_format_.rate() == kSampleRate48kHz));
+ return (is_data_processed &&
+ (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
+ fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz));
}
bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
@@ -961,14 +1056,23 @@ bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
!transient_suppressor_enabled_) {
// Only level_estimator_ is enabled.
return false;
- } else if (fwd_proc_format_.rate() == kSampleRate32kHz ||
- fwd_proc_format_.rate() == kSampleRate48kHz) {
+ } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
+ fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
// Something besides level_estimator_ is enabled, and we have super-wb.
return true;
}
return false;
}
+bool AudioProcessingImpl::is_rev_processed() const {
+ return intelligibility_enabled_ && intelligibility_enhancer_->active();
+}
+
+bool AudioProcessingImpl::rev_conversion_needed() const {
+ return (api_format_.reverse_input_stream() !=
+ api_format_.reverse_output_stream());
+}
+
void AudioProcessingImpl::InitializeExperimentalAgc() {
if (use_new_agc_) {
if (!agc_manager_.get()) {
@@ -986,9 +1090,9 @@ void AudioProcessingImpl::InitializeTransient() {
if (!transient_suppressor_.get()) {
transient_suppressor_.reset(new TransientSuppressor());
}
- transient_suppressor_->Initialize(fwd_proc_format_.rate(),
- split_rate_,
- fwd_out_format_.num_channels());
+ transient_suppressor_->Initialize(
+ fwd_proc_format_.sample_rate_hz(), split_rate_,
+ api_format_.output_stream().num_channels());
}
}
@@ -1001,6 +1105,16 @@ void AudioProcessingImpl::InitializeBeamformer() {
}
}
+void AudioProcessingImpl::InitializeIntelligibility() {
+ if (intelligibility_enabled_) {
+ IntelligibilityEnhancer::Config config;
+ config.sample_rate_hz = split_rate_;
+ config.num_capture_channels = capture_audio_->num_channels();
+ config.num_render_channels = render_audio_->num_channels();
+ intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config));
+ }
+}
+
void AudioProcessingImpl::MaybeUpdateHistograms() {
static const int kMinDiffDelayMs = 60;
@@ -1031,8 +1145,8 @@ void AudioProcessingImpl::MaybeUpdateHistograms() {
const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000);
const int aec_system_delay_ms =
WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms;
- const int diff_aec_system_delay_ms = aec_system_delay_ms -
- last_aec_system_delay_ms_;
+ const int diff_aec_system_delay_ms =
+ aec_system_delay_ms - last_aec_system_delay_ms_;
if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
last_aec_system_delay_ms_ != 0) {
RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
@@ -1072,8 +1186,8 @@ int AudioProcessingImpl::WriteMessageToDebugFile() {
return kUnspecifiedError;
}
#if defined(WEBRTC_ARCH_BIG_ENDIAN)
- // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
- // pretty safe in assuming little-endian.
+// TODO(ajm): Use little-endian "on the wire". For the moment, we can be
+// pretty safe in assuming little-endian.
#endif
if (!event_msg_->SerializeToString(&event_str_)) {
@@ -1096,18 +1210,62 @@ int AudioProcessingImpl::WriteMessageToDebugFile() {
int AudioProcessingImpl::WriteInitMessage() {
event_msg_->set_type(audioproc::Event::INIT);
audioproc::Init* msg = event_msg_->mutable_init();
- msg->set_sample_rate(fwd_in_format_.rate());
- msg->set_num_input_channels(fwd_in_format_.num_channels());
- msg->set_num_output_channels(fwd_out_format_.num_channels());
- msg->set_num_reverse_channels(rev_in_format_.num_channels());
- msg->set_reverse_sample_rate(rev_in_format_.rate());
- msg->set_output_sample_rate(fwd_out_format_.rate());
+ msg->set_sample_rate(api_format_.input_stream().sample_rate_hz());
+ msg->set_num_input_channels(api_format_.input_stream().num_channels());
+ msg->set_num_output_channels(api_format_.output_stream().num_channels());
+ msg->set_num_reverse_channels(
+ api_format_.reverse_input_stream().num_channels());
+ msg->set_reverse_sample_rate(
+ api_format_.reverse_input_stream().sample_rate_hz());
+ msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz());
+ // TODO(ekmeyerson): Add reverse output fields to event_msg_.
+
+ RETURN_ON_ERR(WriteMessageToDebugFile());
+ return kNoError;
+}
+
+int AudioProcessingImpl::WriteConfigMessage(bool forced) {
+ audioproc::Config config;
+
+ config.set_aec_enabled(echo_cancellation_->is_enabled());
+ config.set_aec_delay_agnostic_enabled(
+ echo_cancellation_->is_delay_agnostic_enabled());
+ config.set_aec_drift_compensation_enabled(
+ echo_cancellation_->is_drift_compensation_enabled());
+ config.set_aec_extended_filter_enabled(
+ echo_cancellation_->is_extended_filter_enabled());
+ config.set_aec_suppression_level(
+ static_cast<int>(echo_cancellation_->suppression_level()));
+
+ config.set_aecm_enabled(echo_control_mobile_->is_enabled());
+ config.set_aecm_comfort_noise_enabled(
+ echo_control_mobile_->is_comfort_noise_enabled());
+ config.set_aecm_routing_mode(
+ static_cast<int>(echo_control_mobile_->routing_mode()));
+
+ config.set_agc_enabled(gain_control_->is_enabled());
+ config.set_agc_mode(static_cast<int>(gain_control_->mode()));
+ config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled());
+ config.set_noise_robust_agc_enabled(use_new_agc_);
- int err = WriteMessageToDebugFile();
- if (err != kNoError) {
- return err;
+ config.set_hpf_enabled(high_pass_filter_->is_enabled());
+
+ config.set_ns_enabled(noise_suppression_->is_enabled());
+ config.set_ns_level(static_cast<int>(noise_suppression_->level()));
+
+ config.set_transient_suppression_enabled(transient_suppressor_enabled_);
+
+ std::string serialized_config = config.SerializeAsString();
+ if (!forced && last_serialized_config_ == serialized_config) {
+ return kNoError;
}
+ last_serialized_config_ = serialized_config;
+
+ event_msg_->set_type(audioproc::Event::CONFIG);
+ event_msg_->mutable_config()->CopyFrom(config);
+
+ RETURN_ON_ERR(WriteMessageToDebugFile());
return kNoError;
}
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
index bbd17191585..bf29bf36332 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -13,6 +13,7 @@
#include <list>
#include <string>
+#include <vector>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
@@ -22,6 +23,7 @@ namespace webrtc {
class AgcManagerDirect;
class AudioBuffer;
+class AudioConverter;
template<typename T>
class Beamformer;
@@ -38,6 +40,7 @@ class NoiseSuppressionImpl;
class ProcessingComponent;
class TransientSuppressor;
class VoiceDetectionImpl;
+class IntelligibilityEnhancer;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
namespace audioproc {
@@ -47,42 +50,6 @@ class Event;
} // namespace audioproc
#endif
-class AudioRate {
- public:
- explicit AudioRate(int sample_rate_hz) { set(sample_rate_hz); }
- virtual ~AudioRate() {}
-
- void set(int rate) {
- rate_ = rate;
- samples_per_channel_ = AudioProcessing::kChunkSizeMs * rate_ / 1000;
- }
-
- int rate() const { return rate_; }
- int samples_per_channel() const { return samples_per_channel_; }
-
- private:
- int rate_;
- int samples_per_channel_;
-};
-
-class AudioFormat : public AudioRate {
- public:
- AudioFormat(int sample_rate_hz, int num_channels)
- : AudioRate(sample_rate_hz),
- num_channels_(num_channels) {}
- virtual ~AudioFormat() {}
-
- void set(int rate, int num_channels) {
- AudioRate::set(rate);
- num_channels_ = num_channels;
- }
-
- int num_channels() const { return num_channels_; }
-
- private:
- int num_channels_;
-};
-
class AudioProcessingImpl : public AudioProcessing {
public:
explicit AudioProcessingImpl(const Config& config);
@@ -99,37 +66,42 @@ class AudioProcessingImpl : public AudioProcessing {
ChannelLayout input_layout,
ChannelLayout output_layout,
ChannelLayout reverse_layout) override;
+ int Initialize(const ProcessingConfig& processing_config) override;
void SetExtraOptions(const Config& config) override;
- int set_sample_rate_hz(int rate) override;
- int input_sample_rate_hz() const override;
- int sample_rate_hz() const override;
int proc_sample_rate_hz() const override;
int proc_split_sample_rate_hz() const override;
int num_input_channels() const override;
int num_output_channels() const override;
int num_reverse_channels() const override;
void set_output_will_be_muted(bool muted) override;
- bool output_will_be_muted() const override;
int ProcessStream(AudioFrame* frame) override;
int ProcessStream(const float* const* src,
- int samples_per_channel,
+ size_t samples_per_channel,
int input_sample_rate_hz,
ChannelLayout input_layout,
int output_sample_rate_hz,
ChannelLayout output_layout,
float* const* dest) override;
+ int ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) override;
int AnalyzeReverseStream(AudioFrame* frame) override;
+ int ProcessReverseStream(AudioFrame* frame) override;
int AnalyzeReverseStream(const float* const* data,
- int samples_per_channel,
+ size_t samples_per_channel,
int sample_rate_hz,
ChannelLayout layout) override;
+ int ProcessReverseStream(const float* const* src,
+ const StreamConfig& reverse_input_config,
+ const StreamConfig& reverse_output_config,
+ float* const* dest) override;
int set_stream_delay_ms(int delay) override;
int stream_delay_ms() const override;
bool was_stream_delay_set() const override;
void set_delay_offset_ms(int offset) override;
int delay_offset_ms() const override;
void set_stream_key_pressed(bool key_pressed) override;
- bool stream_key_pressed() const override;
int StartDebugRecording(const char filename[kMaxFilenameSize]) override;
int StartDebugRecording(FILE* handle) override;
int StartDebugRecordingForPlatformFile(rtc::PlatformFile handle) override;
@@ -148,30 +120,27 @@ class AudioProcessingImpl : public AudioProcessing {
virtual int InitializeLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
private:
- int InitializeLocked(int input_sample_rate_hz,
- int output_sample_rate_hz,
- int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels)
+ int InitializeLocked(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
- int MaybeInitializeLocked(int input_sample_rate_hz,
- int output_sample_rate_hz,
- int reverse_sample_rate_hz,
- int num_input_channels,
- int num_output_channels,
- int num_reverse_channels)
+ int MaybeInitializeLocked(const ProcessingConfig& config)
EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ // TODO(ekm): Remove once all clients updated to new interface.
+ int AnalyzeReverseStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config);
int ProcessStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
- int AnalyzeReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ int ProcessReverseStreamLocked() EXCLUSIVE_LOCKS_REQUIRED(crit_);
bool is_data_processed() const;
bool output_copy_needed(bool is_data_processed) const;
bool synthesis_needed(bool is_data_processed) const;
bool analysis_needed(bool is_data_processed) const;
+ bool is_rev_processed() const;
+ bool rev_conversion_needed() const;
void InitializeExperimentalAgc() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void InitializeTransient() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void InitializeBeamformer() EXCLUSIVE_LOCKS_REQUIRED(crit_);
+ void InitializeIntelligibility() EXCLUSIVE_LOCKS_REQUIRED(crit_);
void MaybeUpdateHistograms() EXCLUSIVE_LOCKS_REQUIRED(crit_);
EchoCancellationImpl* echo_cancellation_;
@@ -187,23 +156,34 @@ class AudioProcessingImpl : public AudioProcessing {
CriticalSectionWrapper* crit_;
rtc::scoped_ptr<AudioBuffer> render_audio_;
rtc::scoped_ptr<AudioBuffer> capture_audio_;
+ rtc::scoped_ptr<AudioConverter> render_converter_;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// TODO(andrew): make this more graceful. Ideally we would split this stuff
// out into a separate class with an "enabled" and "disabled" implementation.
int WriteMessageToDebugFile();
int WriteInitMessage();
+
+ // Writes Config message. If not |forced|, only writes the current config if
+ // it is different from the last saved one; if |forced|, writes the config
+ // regardless of the last saved.
+ int WriteConfigMessage(bool forced);
+
rtc::scoped_ptr<FileWrapper> debug_file_;
rtc::scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
std::string event_str_; // Memory for protobuf serialization.
+
+ // Serialized string of last saved APM configuration.
+ std::string last_serialized_config_;
#endif
- AudioFormat fwd_in_format_;
- // This one is an AudioRate, because the forward processing number of channels
- // is mutable and is tracked by the capture_audio_.
- AudioRate fwd_proc_format_;
- AudioFormat fwd_out_format_;
- AudioFormat rev_in_format_;
- AudioFormat rev_proc_format_;
+ // Format of processing streams at input/output call sites.
+ ProcessingConfig api_format_;
+
+ // Only the rate and samples fields of fwd_proc_format_ are used because the
+ // forward processing number of channels is mutable and is tracked by the
+ // capture_audio_.
+ StreamConfig fwd_proc_format_;
+ StreamConfig rev_proc_format_;
int split_rate_;
int stream_delay_ms_;
@@ -229,7 +209,8 @@ class AudioProcessingImpl : public AudioProcessing {
rtc::scoped_ptr<Beamformer<float>> beamformer_;
const std::vector<Point> array_geometry_;
- const bool supports_48kHz_;
+ bool intelligibility_enabled_;
+ rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
index 19b9ddf596a..0314c69b042 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
@@ -70,7 +70,7 @@
'<(webrtc_root)/test/test.gyp:test_support',
],
'sources': [
- 'intelligibility/intelligibility_proc.cc',
+ 'intelligibility/test/intelligibility_proc.cc',
],
}, # intelligibility_proc
],
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/beamformer.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/beamformer.h
index ff5b034a8cd..54734dddb89 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/beamformer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/beamformer.h
@@ -12,6 +12,7 @@
#define WEBRTC_MODULES_AUDIO_PROCESSING_BEAMFORMER_BEAMFORMER_H_
#include "webrtc/common_audio/channel_buffer.h"
+#include "webrtc/modules/audio_processing/beamformer/array_util.h"
namespace webrtc {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/complex_matrix.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/complex_matrix.h
index f5be2b2f63c..bfa3563b898 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/complex_matrix.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/complex_matrix.h
@@ -59,8 +59,8 @@ class ComplexMatrix : public Matrix<complex<T> > {
}
ComplexMatrix& ConjugateTranspose(const ComplexMatrix& operand) {
- CHECK_EQ(operand.num_rows(), this->num_columns());
- CHECK_EQ(operand.num_columns(), this->num_rows());
+ RTC_CHECK_EQ(operand.num_rows(), this->num_columns());
+ RTC_CHECK_EQ(operand.num_columns(), this->num_rows());
return ConjugateTranspose(operand.elements());
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc b/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
index c70bf5e7a58..efc5b0f71af 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.cc
@@ -32,8 +32,8 @@ void CovarianceMatrixGenerator::UniformCovarianceMatrix(
float wave_number,
const std::vector<Point>& geometry,
ComplexMatrix<float>* mat) {
- CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
- CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
+ RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
complex<float>* const* mat_els = mat->elements();
for (size_t i = 0; i < geometry.size(); ++i) {
@@ -51,14 +51,14 @@ void CovarianceMatrixGenerator::UniformCovarianceMatrix(
void CovarianceMatrixGenerator::AngledCovarianceMatrix(
float sound_speed,
float angle,
- int frequency_bin,
- int fft_size,
- int num_freq_bins,
+ size_t frequency_bin,
+ size_t fft_size,
+ size_t num_freq_bins,
int sample_rate,
const std::vector<Point>& geometry,
ComplexMatrix<float>* mat) {
- CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
- CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_rows());
+ RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
ComplexMatrix<float> interf_cov_vector(1, geometry.size());
ComplexMatrix<float> interf_cov_vector_transposed(geometry.size(), 1);
@@ -75,15 +75,15 @@ void CovarianceMatrixGenerator::AngledCovarianceMatrix(
}
void CovarianceMatrixGenerator::PhaseAlignmentMasks(
- int frequency_bin,
- int fft_size,
+ size_t frequency_bin,
+ size_t fft_size,
int sample_rate,
float sound_speed,
const std::vector<Point>& geometry,
float angle,
ComplexMatrix<float>* mat) {
- CHECK_EQ(1, mat->num_rows());
- CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
+ RTC_CHECK_EQ(1, mat->num_rows());
+ RTC_CHECK_EQ(static_cast<int>(geometry.size()), mat->num_columns());
float freq_in_hertz =
(static_cast<float>(frequency_bin) / fft_size) * sample_rate;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
index 5979462751b..5375518e8aa 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/covariance_matrix_generator.h
@@ -30,9 +30,9 @@ class CovarianceMatrixGenerator {
// The covariance matrix of a source at the given angle.
static void AngledCovarianceMatrix(float sound_speed,
float angle,
- int frequency_bin,
- int fft_size,
- int num_freq_bins,
+ size_t frequency_bin,
+ size_t fft_size,
+ size_t num_freq_bins,
int sample_rate,
const std::vector<Point>& geometry,
ComplexMatrix<float>* mat);
@@ -40,8 +40,8 @@ class CovarianceMatrixGenerator {
// Calculates phase shifts that, when applied to a multichannel signal and
// added together, cause constructive interferernce for sources located at
// the given angle.
- static void PhaseAlignmentMasks(int frequency_bin,
- int fft_size,
+ static void PhaseAlignmentMasks(size_t frequency_bin,
+ size_t fft_size,
int sample_rate,
float sound_speed,
const std::vector<Point>& geometry,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/matrix.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/matrix.h
index 990f6a4a1b7..162aef1dac8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/matrix.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/matrix.h
@@ -95,7 +95,9 @@ class Matrix {
memcpy(&data_[0], data, num_rows_ * num_columns_ * sizeof(data_[0]));
}
- Matrix& CopyFromColumn(const T* const* src, int column_index, int num_rows) {
+ Matrix& CopyFromColumn(const T* const* src,
+ size_t column_index,
+ int num_rows) {
Resize(1, num_rows);
for (int i = 0; i < num_columns_; ++i) {
data_[i] = src[i][column_index];
@@ -119,7 +121,7 @@ class Matrix {
const T* const* elements() const { return &elements_[0]; }
T Trace() {
- CHECK_EQ(num_rows_, num_columns_);
+ RTC_CHECK_EQ(num_rows_, num_columns_);
T trace = 0;
for (int i = 0; i < num_rows_; ++i) {
@@ -136,8 +138,8 @@ class Matrix {
}
Matrix& Transpose(const Matrix& operand) {
- CHECK_EQ(operand.num_rows_, num_columns_);
- CHECK_EQ(operand.num_columns_, num_rows_);
+ RTC_CHECK_EQ(operand.num_rows_, num_columns_);
+ RTC_CHECK_EQ(operand.num_columns_, num_rows_);
return Transpose(operand.elements());
}
@@ -158,8 +160,8 @@ class Matrix {
}
Matrix& Add(const Matrix& operand) {
- CHECK_EQ(num_rows_, operand.num_rows_);
- CHECK_EQ(num_columns_, operand.num_columns_);
+ RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+ RTC_CHECK_EQ(num_columns_, operand.num_columns_);
for (size_t i = 0; i < data_.size(); ++i) {
data_[i] += operand.data_[i];
@@ -174,8 +176,8 @@ class Matrix {
}
Matrix& Subtract(const Matrix& operand) {
- CHECK_EQ(num_rows_, operand.num_rows_);
- CHECK_EQ(num_columns_, operand.num_columns_);
+ RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+ RTC_CHECK_EQ(num_columns_, operand.num_columns_);
for (size_t i = 0; i < data_.size(); ++i) {
data_[i] -= operand.data_[i];
@@ -190,8 +192,8 @@ class Matrix {
}
Matrix& PointwiseMultiply(const Matrix& operand) {
- CHECK_EQ(num_rows_, operand.num_rows_);
- CHECK_EQ(num_columns_, operand.num_columns_);
+ RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+ RTC_CHECK_EQ(num_columns_, operand.num_columns_);
for (size_t i = 0; i < data_.size(); ++i) {
data_[i] *= operand.data_[i];
@@ -206,8 +208,8 @@ class Matrix {
}
Matrix& PointwiseDivide(const Matrix& operand) {
- CHECK_EQ(num_rows_, operand.num_rows_);
- CHECK_EQ(num_columns_, operand.num_columns_);
+ RTC_CHECK_EQ(num_rows_, operand.num_rows_);
+ RTC_CHECK_EQ(num_columns_, operand.num_columns_);
for (size_t i = 0; i < data_.size(); ++i) {
data_[i] /= operand.data_[i];
@@ -261,15 +263,15 @@ class Matrix {
}
Matrix& Multiply(const Matrix& lhs, const Matrix& rhs) {
- CHECK_EQ(lhs.num_columns_, rhs.num_rows_);
- CHECK_EQ(num_rows_, lhs.num_rows_);
- CHECK_EQ(num_columns_, rhs.num_columns_);
+ RTC_CHECK_EQ(lhs.num_columns_, rhs.num_rows_);
+ RTC_CHECK_EQ(num_rows_, lhs.num_rows_);
+ RTC_CHECK_EQ(num_columns_, rhs.num_columns_);
return Multiply(lhs.elements(), rhs.num_rows_, rhs.elements());
}
Matrix& Multiply(const Matrix& rhs) {
- CHECK_EQ(num_columns_, rhs.num_rows_);
+ RTC_CHECK_EQ(num_columns_, rhs.num_rows_);
CopyDataToScratch();
Resize(num_rows_, rhs.num_columns_);
@@ -358,7 +360,7 @@ class Matrix {
return *this;
}
- DISALLOW_COPY_AND_ASSIGN(Matrix);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Matrix);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
index eb05ecdab37..e2b4417c13a 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/mock_nonlinear_beamformer.h
@@ -20,11 +20,13 @@ namespace webrtc {
class MockNonlinearBeamformer : public NonlinearBeamformer {
public:
- explicit MockNonlinearBeamformer(const std::vector<Point>& array_geometry);
+ explicit MockNonlinearBeamformer(const std::vector<Point>& array_geometry)
+ : NonlinearBeamformer(array_geometry) {}
MOCK_METHOD2(Initialize, void(int chunk_size_ms, int sample_rate_hz));
MOCK_METHOD2(ProcessChunk, void(const ChannelBuffer<float>& input,
ChannelBuffer<float>* output));
+ MOCK_METHOD1(IsInBeam, bool(const SphericalPointf& spherical_point));
MOCK_METHOD0(is_target_present, bool());
};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
index 66ad6259a36..da7ad0da59c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.cc
@@ -80,9 +80,9 @@ const float kHoldTargetSeconds = 0.25f;
// The returned norm is clamped to be non-negative.
float Norm(const ComplexMatrix<float>& mat,
const ComplexMatrix<float>& norm_mat) {
- CHECK_EQ(norm_mat.num_rows(), 1);
- CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
- CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
+ RTC_CHECK_EQ(norm_mat.num_rows(), 1);
+ RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_rows());
+ RTC_CHECK_EQ(norm_mat.num_columns(), mat.num_columns());
complex<float> first_product = complex<float>(0.f, 0.f);
complex<float> second_product = complex<float>(0.f, 0.f);
@@ -103,9 +103,9 @@ float Norm(const ComplexMatrix<float>& mat,
// Does conjugate(|lhs|) * |rhs| for row vectors |lhs| and |rhs|.
complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
const ComplexMatrix<float>& rhs) {
- CHECK_EQ(lhs.num_rows(), 1);
- CHECK_EQ(rhs.num_rows(), 1);
- CHECK_EQ(lhs.num_columns(), rhs.num_columns());
+ RTC_CHECK_EQ(lhs.num_rows(), 1);
+ RTC_CHECK_EQ(rhs.num_rows(), 1);
+ RTC_CHECK_EQ(lhs.num_columns(), rhs.num_columns());
const complex<float>* const* lhs_elements = lhs.elements();
const complex<float>* const* rhs_elements = rhs.elements();
@@ -119,8 +119,8 @@ complex<float> ConjugateDotProduct(const ComplexMatrix<float>& lhs,
}
// Works for positive numbers only.
-int Round(float x) {
- return std::floor(x + 0.5f);
+size_t Round(float x) {
+ return static_cast<size_t>(std::floor(x + 0.5f));
}
// Calculates the sum of absolute values of a complex matrix.
@@ -151,9 +151,9 @@ float SumSquares(const ComplexMatrix<float>& mat) {
// Does |out| = |in|.' * conj(|in|) for row vector |in|.
void TransposedConjugatedProduct(const ComplexMatrix<float>& in,
ComplexMatrix<float>* out) {
- CHECK_EQ(in.num_rows(), 1);
- CHECK_EQ(out->num_rows(), in.num_columns());
- CHECK_EQ(out->num_columns(), in.num_columns());
+ RTC_CHECK_EQ(in.num_rows(), 1);
+ RTC_CHECK_EQ(out->num_rows(), in.num_columns());
+ RTC_CHECK_EQ(out->num_columns(), in.num_columns());
const complex<float>* in_elements = in.elements()[0];
complex<float>* const* out_elements = out->elements();
for (int i = 0; i < out->num_rows(); ++i) {
@@ -179,6 +179,9 @@ std::vector<Point> GetCenteredArray(std::vector<Point> array_geometry) {
} // namespace
+// static
+const size_t NonlinearBeamformer::kNumFreqBins;
+
NonlinearBeamformer::NonlinearBeamformer(
const std::vector<Point>& array_geometry)
: num_input_channels_(array_geometry.size()),
@@ -187,7 +190,8 @@ NonlinearBeamformer::NonlinearBeamformer(
}
void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
- chunk_length_ = sample_rate_hz / (1000.f / chunk_size_ms);
+ chunk_length_ =
+ static_cast<size_t>(sample_rate_hz / (1000.f / chunk_size_ms));
sample_rate_hz_ = sample_rate_hz;
low_mean_start_bin_ = Round(kLowMeanStartHz * kFftSize / sample_rate_hz_);
low_mean_end_bin_ = Round(kLowMeanEndHz * kFftSize / sample_rate_hz_);
@@ -203,11 +207,11 @@ void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
// constant ^ ^
// low_mean_end_bin_ high_mean_end_bin_
//
- DCHECK_GT(low_mean_start_bin_, 0);
- DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
- DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
- DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
- DCHECK_LT(high_mean_end_bin_, kNumFreqBins - 1);
+ RTC_DCHECK_GT(low_mean_start_bin_, 0U);
+ RTC_DCHECK_LT(low_mean_start_bin_, low_mean_end_bin_);
+ RTC_DCHECK_LT(low_mean_end_bin_, high_mean_end_bin_);
+ RTC_DCHECK_LT(high_mean_start_bin_, high_mean_end_bin_);
+ RTC_DCHECK_LT(high_mean_end_bin_, kNumFreqBins - 1);
high_pass_postfilter_mask_ = 1.f;
is_target_present_ = false;
@@ -222,7 +226,7 @@ void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
kFftSize,
kFftSize / 2,
this));
- for (int i = 0; i < kNumFreqBins; ++i) {
+ for (size_t i = 0; i < kNumFreqBins; ++i) {
time_smooth_mask_[i] = 1.f;
final_mask_[i] = 1.f;
float freq_hz = (static_cast<float>(i) / kFftSize) * sample_rate_hz_;
@@ -237,7 +241,7 @@ void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
InitTargetCovMats();
InitInterfCovMats();
- for (int i = 0; i < kNumFreqBins; ++i) {
+ for (size_t i = 0; i < kNumFreqBins; ++i) {
rxiws_[i] = Norm(target_cov_mats_[i], delay_sum_masks_[i]);
rpsiws_[i] = Norm(interf_cov_mats_[i], delay_sum_masks_[i]);
reflected_rpsiws_[i] =
@@ -246,7 +250,7 @@ void NonlinearBeamformer::Initialize(int chunk_size_ms, int sample_rate_hz) {
}
void NonlinearBeamformer::InitDelaySumMasks() {
- for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
+ for (size_t f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
delay_sum_masks_[f_ix].Resize(1, num_input_channels_);
CovarianceMatrixGenerator::PhaseAlignmentMasks(f_ix,
kFftSize,
@@ -266,7 +270,7 @@ void NonlinearBeamformer::InitDelaySumMasks() {
}
void NonlinearBeamformer::InitTargetCovMats() {
- for (int i = 0; i < kNumFreqBins; ++i) {
+ for (size_t i = 0; i < kNumFreqBins; ++i) {
target_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
TransposedConjugatedProduct(delay_sum_masks_[i], &target_cov_mats_[i]);
complex_f normalization_factor = target_cov_mats_[i].Trace();
@@ -275,7 +279,7 @@ void NonlinearBeamformer::InitTargetCovMats() {
}
void NonlinearBeamformer::InitInterfCovMats() {
- for (int i = 0; i < kNumFreqBins; ++i) {
+ for (size_t i = 0; i < kNumFreqBins; ++i) {
interf_cov_mats_[i].Resize(num_input_channels_, num_input_channels_);
ComplexMatrixF uniform_cov_mat(num_input_channels_, num_input_channels_);
ComplexMatrixF angled_cov_mat(num_input_channels_, num_input_channels_);
@@ -308,8 +312,8 @@ void NonlinearBeamformer::InitInterfCovMats() {
void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
ChannelBuffer<float>* output) {
- DCHECK_EQ(input.num_channels(), num_input_channels_);
- DCHECK_EQ(input.num_frames_per_band(), chunk_length_);
+ RTC_DCHECK_EQ(input.num_channels(), num_input_channels_);
+ RTC_DCHECK_EQ(input.num_frames_per_band(), chunk_length_);
float old_high_pass_mask = high_pass_postfilter_mask_;
lapped_transform_->ProcessChunk(input.channels(0), output->channels(0));
@@ -320,9 +324,9 @@ void NonlinearBeamformer::ProcessChunk(const ChannelBuffer<float>& input,
input.num_frames_per_band();
// Apply delay and sum and post-filter in the time domain. WARNING: only works
// because delay-and-sum is not frequency dependent.
- for (int i = 1; i < input.num_bands(); ++i) {
+ for (size_t i = 1; i < input.num_bands(); ++i) {
float smoothed_mask = old_high_pass_mask;
- for (int j = 0; j < input.num_frames_per_band(); ++j) {
+ for (size_t j = 0; j < input.num_frames_per_band(); ++j) {
smoothed_mask += ramp_increment;
// Applying the delay and sum (at zero degrees, this is equivalent to
@@ -345,17 +349,17 @@ bool NonlinearBeamformer::IsInBeam(const SphericalPointf& spherical_point) {
void NonlinearBeamformer::ProcessAudioBlock(const complex_f* const* input,
int num_input_channels,
- int num_freq_bins,
+ size_t num_freq_bins,
int num_output_channels,
complex_f* const* output) {
- CHECK_EQ(num_freq_bins, kNumFreqBins);
- CHECK_EQ(num_input_channels, num_input_channels_);
- CHECK_EQ(num_output_channels, 1);
+ RTC_CHECK_EQ(num_freq_bins, kNumFreqBins);
+ RTC_CHECK_EQ(num_input_channels, num_input_channels_);
+ RTC_CHECK_EQ(num_output_channels, 1);
// Calculating the post-filter masks. Note that we need two for each
// frequency bin to account for the positive and negative interferer
// angle.
- for (int i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+ for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
eig_m_.CopyFromColumn(input, i, num_input_channels_);
float eig_m_norm_factor = std::sqrt(SumSquares(eig_m_));
if (eig_m_norm_factor != 0.f) {
@@ -420,7 +424,7 @@ float NonlinearBeamformer::CalculatePostfilterMask(
void NonlinearBeamformer::ApplyMasks(const complex_f* const* input,
complex_f* const* output) {
complex_f* output_channel = output[0];
- for (int f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
+ for (size_t f_ix = 0; f_ix < kNumFreqBins; ++f_ix) {
output_channel[f_ix] = complex_f(0.f, 0.f);
const complex_f* delay_sum_mask_els =
@@ -435,7 +439,7 @@ void NonlinearBeamformer::ApplyMasks(const complex_f* const* input,
// Smooth new_mask_ into time_smooth_mask_.
void NonlinearBeamformer::ApplyMaskTimeSmoothing() {
- for (int i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
+ for (size_t i = low_mean_start_bin_; i <= high_mean_end_bin_; ++i) {
time_smooth_mask_[i] = kMaskTimeSmoothAlpha * new_mask_[i] +
(1 - kMaskTimeSmoothAlpha) * time_smooth_mask_[i];
}
@@ -460,13 +464,13 @@ void NonlinearBeamformer::ApplyMaskFrequencySmoothing() {
// |------|------------|------|
// ^<------------------^
std::copy(time_smooth_mask_, time_smooth_mask_ + kNumFreqBins, final_mask_);
- for (int i = low_mean_start_bin_; i < kNumFreqBins; ++i) {
+ for (size_t i = low_mean_start_bin_; i < kNumFreqBins; ++i) {
final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] +
(1 - kMaskFrequencySmoothAlpha) * final_mask_[i - 1];
}
- for (int i = high_mean_end_bin_; i >= 0; --i) {
- final_mask_[i] = kMaskFrequencySmoothAlpha * final_mask_[i] +
- (1 - kMaskFrequencySmoothAlpha) * final_mask_[i + 1];
+ for (size_t i = high_mean_end_bin_ + 1; i > 0; --i) {
+ final_mask_[i - 1] = kMaskFrequencySmoothAlpha * final_mask_[i - 1] +
+ (1 - kMaskFrequencySmoothAlpha) * final_mask_[i];
}
}
@@ -488,17 +492,17 @@ void NonlinearBeamformer::ApplyHighFrequencyCorrection() {
}
// Compute mean over the given range of time_smooth_mask_, [first, last).
-float NonlinearBeamformer::MaskRangeMean(int first, int last) {
- DCHECK_GT(last, first);
+float NonlinearBeamformer::MaskRangeMean(size_t first, size_t last) {
+ RTC_DCHECK_GT(last, first);
const float sum = std::accumulate(time_smooth_mask_ + first,
time_smooth_mask_ + last, 0.f);
return sum / (last - first);
}
void NonlinearBeamformer::EstimateTargetPresence() {
- const int quantile =
+ const size_t quantile = static_cast<size_t>(
(high_mean_end_bin_ - low_mean_start_bin_) * kMaskQuantile +
- low_mean_start_bin_;
+ low_mean_start_bin_);
std::nth_element(new_mask_ + low_mean_start_bin_, new_mask_ + quantile,
new_mask_ + high_mean_end_bin_ + 1);
if (new_mask_[quantile] > kMaskTargetThreshold) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
index f632a60f67a..46c68bf808c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h
@@ -15,7 +15,6 @@
#include "webrtc/common_audio/lapped_transform.h"
#include "webrtc/common_audio/channel_buffer.h"
-#include "webrtc/modules/audio_processing/beamformer/array_util.h"
#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
#include "webrtc/modules/audio_processing/beamformer/complex_matrix.h"
@@ -61,7 +60,7 @@ class NonlinearBeamformer
// happens. Implements LappedTransform::Callback.
void ProcessAudioBlock(const complex<float>* const* input,
int num_input_channels,
- int num_freq_bins,
+ size_t num_freq_bins,
int num_output_channels,
complex<float>* const* output) override;
@@ -101,18 +100,18 @@ class NonlinearBeamformer
void ApplyHighFrequencyCorrection();
// Compute the means needed for the above frequency correction.
- float MaskRangeMean(int start_bin, int end_bin);
+ float MaskRangeMean(size_t start_bin, size_t end_bin);
// Applies both sets of masks to |input| and store in |output|.
void ApplyMasks(const complex_f* const* input, complex_f* const* output);
void EstimateTargetPresence();
- static const int kFftSize = 256;
- static const int kNumFreqBins = kFftSize / 2 + 1;
+ static const size_t kFftSize = 256;
+ static const size_t kNumFreqBins = kFftSize / 2 + 1;
// Deals with the fft transform and blocking.
- int chunk_length_;
+ size_t chunk_length_;
rtc::scoped_ptr<LappedTransform> lapped_transform_;
float window_[kFftSize];
@@ -123,10 +122,10 @@ class NonlinearBeamformer
const std::vector<Point> array_geometry_;
// Calculated based on user-input and constants in the .cc file.
- int low_mean_start_bin_;
- int low_mean_end_bin_;
- int high_mean_start_bin_;
- int high_mean_end_bin_;
+ size_t low_mean_start_bin_;
+ size_t low_mean_end_bin_;
+ size_t high_mean_start_bin_;
+ size_t high_mean_end_bin_;
// Quickly varying mask updated every block.
float new_mask_[kNumFreqBins];
@@ -168,9 +167,9 @@ class NonlinearBeamformer
bool is_target_present_;
// Number of blocks after which the data is considered interference if the
// mask does not pass |kMaskSignalThreshold|.
- int hold_target_blocks_;
+ size_t hold_target_blocks_;
// Number of blocks since the last mask that passed |kMaskSignalThreshold|.
- int interference_blocks_count_;
+ size_t interference_blocks_count_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
index 82a6cb050bc..cc752485e90 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/beamformer/nonlinear_beamformer_test.cc
@@ -47,7 +47,7 @@ int main(int argc, char* argv[]) {
const size_t num_mics = in_file.num_channels();
const std::vector<Point> array_geometry =
ParseArrayGeometry(FLAGS_mic_positions, num_mics);
- CHECK_EQ(array_geometry.size(), num_mics);
+ RTC_CHECK_EQ(array_geometry.size(), num_mics);
NonlinearBeamformer bf(array_geometry);
bf.Initialize(kChunkSizeMs, in_file.sample_rate());
diff --git a/chromium/third_party/webrtc/modules/audio_processing/debug.proto b/chromium/third_party/webrtc/modules/audio_processing/debug.proto
index dce2f792093..227271298c8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/debug.proto
+++ b/chromium/third_party/webrtc/modules/audio_processing/debug.proto
@@ -2,6 +2,8 @@ syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package webrtc.audioproc;
+// Contains the format of input/output/reverse audio. An Init message is added
+// when any of the fields are changed.
message Init {
optional int32 sample_rate = 1;
optional int32 device_sample_rate = 2 [deprecated=true];
@@ -39,11 +41,41 @@ message Stream {
repeated bytes output_channel = 8;
}
+// Contains the configurations of various APM component. A Config message is
+// added when any of the fields are changed.
+message Config {
+ // Next field number 17.
+ // Acoustic echo canceler.
+ optional bool aec_enabled = 1;
+ optional bool aec_delay_agnostic_enabled = 2;
+ optional bool aec_drift_compensation_enabled = 3;
+ optional bool aec_extended_filter_enabled = 4;
+ optional int32 aec_suppression_level = 5;
+ // Mobile AEC.
+ optional bool aecm_enabled = 6;
+ optional bool aecm_comfort_noise_enabled = 7;
+ optional int32 aecm_routing_mode = 8;
+ // Automatic gain controller.
+ optional bool agc_enabled = 9;
+ optional int32 agc_mode = 10;
+ optional bool agc_limiter_enabled = 11;
+ optional bool noise_robust_agc_enabled = 12;
+ // High pass filter.
+ optional bool hpf_enabled = 13;
+ // Noise suppression.
+ optional bool ns_enabled = 14;
+ optional int32 ns_level = 15;
+ // Transient suppression.
+ optional bool transient_suppression_enabled = 16;
+}
+
message Event {
enum Type {
INIT = 0;
REVERSE_STREAM = 1;
STREAM = 2;
+ CONFIG = 3;
+ UNKNOWN_EVENT = 4;
}
required Type type = 1;
@@ -51,4 +83,5 @@ message Event {
optional Init init = 2;
optional ReverseStream reverse_stream = 3;
optional Stream stream = 4;
+ optional Config config = 5;
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index f13ea8bde69..567d9a47f80 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -91,7 +91,7 @@ int EchoCancellationImpl::ProcessRenderAudio(const AudioBuffer* audio) {
err = WebRtcAec_BufferFarend(
my_handle,
audio->split_bands_const_f(j)[kBand0To8kHz],
- static_cast<int16_t>(audio->num_frames_per_band()));
+ audio->num_frames_per_band());
if (err != apm_->kNoError) {
return GetHandleError(my_handle); // TODO(ajm): warning possible?
@@ -133,7 +133,7 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
audio->split_bands_const_f(i),
audio->num_bands(),
audio->split_bands_f(i),
- static_cast<int16_t>(audio->num_frames_per_band()),
+ audio->num_frames_per_band(),
apm_->stream_delay_ms(),
stream_drift_samples_);
@@ -280,6 +280,14 @@ bool EchoCancellationImpl::is_delay_logging_enabled() const {
return delay_logging_enabled_;
}
+bool EchoCancellationImpl::is_delay_agnostic_enabled() const {
+ return delay_agnostic_enabled_;
+}
+
+bool EchoCancellationImpl::is_extended_filter_enabled() const {
+ return extended_filter_enabled_;
+}
+
// TODO(bjornv): How should we handle the multi-channel case?
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
float fraction_poor_delays = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
index 9c2b32c473b..070dcabc5d6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -32,19 +32,22 @@ class EchoCancellationImpl : public EchoCancellation,
// EchoCancellation implementation.
bool is_enabled() const override;
int stream_drift_samples() const override;
+ SuppressionLevel suppression_level() const override;
+ bool is_drift_compensation_enabled() const override;
// ProcessingComponent implementation.
int Initialize() override;
void SetExtraOptions(const Config& config) override;
+ bool is_delay_agnostic_enabled() const;
+ bool is_extended_filter_enabled() const;
+
private:
// EchoCancellation implementation.
int Enable(bool enable) override;
int enable_drift_compensation(bool enable) override;
- bool is_drift_compensation_enabled() const override;
void set_stream_drift_samples(int drift) override;
int set_suppression_level(SuppressionLevel level) override;
- SuppressionLevel suppression_level() const override;
int enable_metrics(bool enable) override;
bool are_metrics_enabled() const override;
bool stream_has_echo() const override;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index 33205eb7443..8d5ec9c4e29 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -96,7 +96,7 @@ int EchoControlMobileImpl::ProcessRenderAudio(const AudioBuffer* audio) {
err = WebRtcAecm_BufferFarend(
my_handle,
audio->split_bands_const(j)[kBand0To8kHz],
- static_cast<int16_t>(audio->num_frames_per_band()));
+ audio->num_frames_per_band());
if (err != apm_->kNoError) {
return GetHandleError(my_handle); // TODO(ajm): warning possible?
@@ -141,7 +141,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
noisy,
clean,
audio->split_bands(i)[kBand0To8kHz],
- static_cast<int16_t>(audio->num_frames_per_band()),
+ audio->num_frames_per_band(),
apm_->stream_delay_ms());
if (err != apm_->kNoError) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
index f399f480b2b..da7022545f2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -31,6 +31,8 @@ class EchoControlMobileImpl : public EchoControlMobile,
// EchoControlMobile implementation.
bool is_enabled() const override;
+ RoutingMode routing_mode() const override;
+ bool is_comfort_noise_enabled() const override;
// ProcessingComponent implementation.
int Initialize() override;
@@ -39,9 +41,7 @@ class EchoControlMobileImpl : public EchoControlMobile,
// EchoControlMobile implementation.
int Enable(bool enable) override;
int set_routing_mode(RoutingMode mode) override;
- RoutingMode routing_mode() const override;
int enable_comfort_noise(bool enable) override;
- bool is_comfort_noise_enabled() const override;
int SetEchoPath(const void* echo_path, size_t size_bytes) override;
int GetEchoPath(void* echo_path, size_t size_bytes) const override;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
index 398cf5c5fd7..8a3612dce5a 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -64,7 +64,7 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
int err = WebRtcAgc_AddFarend(
my_handle,
audio->mixed_low_pass_data(),
- static_cast<int16_t>(audio->num_frames_per_band()));
+ audio->num_frames_per_band());
if (err != apm_->kNoError) {
return GetHandleError(my_handle);
@@ -92,7 +92,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
my_handle,
audio->split_bands(i),
audio->num_bands(),
- static_cast<int16_t>(audio->num_frames_per_band()));
+ audio->num_frames_per_band());
if (err != apm_->kNoError) {
return GetHandleError(my_handle);
@@ -108,7 +108,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
my_handle,
audio->split_bands(i),
audio->num_bands(),
- static_cast<int16_t>(audio->num_frames_per_band()),
+ audio->num_frames_per_band(),
analog_capture_level_,
&capture_level_out);
@@ -146,7 +146,7 @@ int GainControlImpl::ProcessCaptureAudio(AudioBuffer* audio) {
my_handle,
audio->split_bands_const(i),
audio->num_bands(),
- static_cast<int16_t>(audio->num_frames_per_band()),
+ audio->num_frames_per_band(),
audio->split_bands(i),
capture_levels_[i],
&capture_level_out,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
index d64894367c9..f24d200cf22 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
@@ -38,19 +38,19 @@ class GainControlImpl : public GainControl,
// GainControl implementation.
bool is_enabled() const override;
int stream_analog_level() override;
+ bool is_limiter_enabled() const override;
+ Mode mode() const override;
private:
// GainControl implementation.
int Enable(bool enable) override;
int set_stream_analog_level(int level) override;
int set_mode(Mode mode) override;
- Mode mode() const override;
int set_target_level_dbfs(int level) override;
int target_level_dbfs() const override;
int set_compression_gain_db(int gain) override;
int compression_gain_db() const override;
int enable_limiter(bool enable) override;
- bool is_limiter_enabled() const override;
int set_analog_level_limits(int minimum, int maximum) override;
int analog_level_minimum() const override;
int analog_level_maximum() const override;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index 588ba414159..6302f13fcf9 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -47,7 +47,7 @@ int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
return AudioProcessing::kNoError;
}
-int Filter(FilterState* hpf, int16_t* data, int length) {
+int Filter(FilterState* hpf, int16_t* data, size_t length) {
assert(hpf != NULL);
int32_t tmp_int32 = 0;
@@ -55,7 +55,7 @@ int Filter(FilterState* hpf, int16_t* data, int length) {
int16_t* x = hpf->x;
const int16_t* ba = hpf->ba;
- for (int i = 0; i < length; i++) {
+ for (size_t i = 0; i < length; i++) {
// y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
// + -a[1] * y[i-1] + -a[2] * y[i-2];
diff --git a/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h b/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
index 6fa1c96c077..318b2f89533 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
@@ -15,6 +15,7 @@
#include <stdio.h> // FILE
#include <vector>
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/platform_file.h"
#include "webrtc/common.h"
#include "webrtc/modules/audio_processing/beamformer/array_util.h"
@@ -29,6 +30,9 @@ class AudioFrame;
template<typename T>
class Beamformer;
+class StreamConfig;
+class ProcessingConfig;
+
class EchoCancellation;
class EchoControlMobile;
class GainControl;
@@ -84,7 +88,7 @@ static const int kAgcStartupMinVolume = 0;
#endif // defined(WEBRTC_CHROMIUM_BUILD)
struct ExperimentalAgc {
ExperimentalAgc() : enabled(true), startup_min_volume(kAgcStartupMinVolume) {}
- ExperimentalAgc(bool enabled)
+ explicit ExperimentalAgc(bool enabled)
: enabled(enabled), startup_min_volume(kAgcStartupMinVolume) {}
ExperimentalAgc(bool enabled, int startup_min_volume)
: enabled(enabled), startup_min_volume(startup_min_volume) {}
@@ -113,17 +117,18 @@ struct Beamforming {
const std::vector<Point> array_geometry;
};
-// Use to enable 48kHz support in audio processing. Must be provided through the
-// constructor. It will have no impact if used with
+// Use to enable intelligibility enhancer in audio processing. Must be provided
+// though the constructor. It will have no impact if used with
// AudioProcessing::SetExtraOptions().
-struct AudioProcessing48kHzSupport {
- AudioProcessing48kHzSupport() : enabled(true) {}
- explicit AudioProcessing48kHzSupport(bool enabled) : enabled(enabled) {}
+//
+// Note: If enabled and the reverse stream has more than one output channel,
+// the reverse stream will become an upmixed mono signal.
+struct Intelligibility {
+ Intelligibility() : enabled(false) {}
+ explicit Intelligibility(bool enabled) : enabled(enabled) {}
bool enabled;
};
-static const int kAudioProcMaxNativeSampleRateHz = 32000;
-
// The Audio Processing Module (APM) provides a collection of voice processing
// components designed for real-time communications software.
//
@@ -199,6 +204,7 @@ static const int kAudioProcMaxNativeSampleRateHz = 32000;
//
class AudioProcessing {
public:
+ // TODO(mgraczyk): Remove once all methods that use ChannelLayout are gone.
enum ChannelLayout {
kMono,
// Left, right.
@@ -236,10 +242,17 @@ class AudioProcessing {
// The int16 interfaces require:
// - only |NativeRate|s be used
// - that the input, output and reverse rates must match
- // - that |output_layout| matches |input_layout|
+ // - that |processing_config.output_stream()| matches
+ // |processing_config.input_stream()|.
//
- // The float interfaces accept arbitrary rates and support differing input
- // and output layouts, but the output may only remove channels, not add.
+ // The float interfaces accept arbitrary rates and support differing input and
+ // output layouts, but the output must have either one channel or the same
+ // number of channels as the input.
+ virtual int Initialize(const ProcessingConfig& processing_config) = 0;
+
+ // Initialize with unpacked parameters. See Initialize() above for details.
+ //
+ // TODO(mgraczyk): Remove once clients are updated to use the new interface.
virtual int Initialize(int input_sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
@@ -251,15 +264,6 @@ class AudioProcessing {
// ensures the options are applied immediately.
virtual void SetExtraOptions(const Config& config) = 0;
- // DEPRECATED.
- // TODO(ajm): Remove after Chromium has upgraded to using Initialize().
- virtual int set_sample_rate_hz(int rate) = 0;
- // TODO(ajm): Remove after voice engine no longer requires it to resample
- // the reverse stream to the forward rate.
- virtual int input_sample_rate_hz() const = 0;
- // TODO(ajm): Remove after Chromium no longer depends on it.
- virtual int sample_rate_hz() const = 0;
-
// TODO(ajm): Only intended for internal use. Make private and friend the
// necessary classes?
virtual int proc_sample_rate_hz() const = 0;
@@ -273,7 +277,6 @@ class AudioProcessing {
// but some components may change behavior based on this information.
// Default false.
virtual void set_output_will_be_muted(bool muted) = 0;
- virtual bool output_will_be_muted() const = 0;
// Processes a 10 ms |frame| of the primary audio stream. On the client-side,
// this is the near-end (or captured) audio.
@@ -292,16 +295,30 @@ class AudioProcessing {
// |input_layout|. At output, the channels will be arranged according to
// |output_layout| at |output_sample_rate_hz| in |dest|.
//
- // The output layout may only remove channels, not add. |src| and |dest|
- // may use the same memory, if desired.
+ // The output layout must have one channel or as many channels as the input.
+ // |src| and |dest| may use the same memory, if desired.
+ //
+ // TODO(mgraczyk): Remove once clients are updated to use the new interface.
virtual int ProcessStream(const float* const* src,
- int samples_per_channel,
+ size_t samples_per_channel,
int input_sample_rate_hz,
ChannelLayout input_layout,
int output_sample_rate_hz,
ChannelLayout output_layout,
float* const* dest) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+ // |src| points to a channel buffer, arranged according to |input_stream|. At
+ // output, the channels will be arranged according to |output_stream| in
+ // |dest|.
+ //
+ // The output must have one channel or as many channels as the input. |src|
+ // and |dest| may use the same memory, if desired.
+ virtual int ProcessStream(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest) = 0;
+
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
// will not be modified. On the client-side, this is the far-end (or to be
// rendered) audio.
@@ -317,15 +334,29 @@ class AudioProcessing {
// |input_sample_rate_hz()|
//
// TODO(ajm): add const to input; requires an implementation fix.
+ // DEPRECATED: Use |ProcessReverseStream| instead.
+ // TODO(ekm): Remove once all users have updated to |ProcessReverseStream|.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
+ // Same as |AnalyzeReverseStream|, but may modify |frame| if intelligibility
+ // is enabled.
+ virtual int ProcessReverseStream(AudioFrame* frame) = 0;
+
// Accepts deinterleaved float audio with the range [-1, 1]. Each element
// of |data| points to a channel buffer, arranged according to |layout|.
+ // TODO(mgraczyk): Remove once clients are updated to use the new interface.
virtual int AnalyzeReverseStream(const float* const* data,
- int samples_per_channel,
- int sample_rate_hz,
+ size_t samples_per_channel,
+ int rev_sample_rate_hz,
ChannelLayout layout) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
+ // |data| points to a channel buffer, arranged according to |reverse_config|.
+ virtual int ProcessReverseStream(const float* const* src,
+ const StreamConfig& reverse_input_config,
+ const StreamConfig& reverse_output_config,
+ float* const* dest) = 0;
+
// This must be called if and only if echo processing is enabled.
//
// Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
@@ -346,7 +377,6 @@ class AudioProcessing {
// Call to signal that a key press occurred (true) or did not occur (false)
// with this chunk of audio.
virtual void set_stream_key_pressed(bool key_pressed) = 0;
- virtual bool stream_key_pressed() const = 0;
// Sets a delay |offset| in ms to add to the values passed in through
// set_stream_delay_ms(). May be positive or negative.
@@ -429,9 +459,121 @@ class AudioProcessing {
kSampleRate48kHz = 48000
};
+ static const int kNativeSampleRatesHz[];
+ static const size_t kNumNativeSampleRates;
+ static const int kMaxNativeSampleRateHz;
+ static const int kMaxAECMSampleRateHz;
+
static const int kChunkSizeMs = 10;
};
+class StreamConfig {
+ public:
+ // sample_rate_hz: The sampling rate of the stream.
+ //
+ // num_channels: The number of audio channels in the stream, excluding the
+ // keyboard channel if it is present. When passing a
+ // StreamConfig with an array of arrays T*[N],
+ //
+ // N == {num_channels + 1 if has_keyboard
+ // {num_channels if !has_keyboard
+ //
+ // has_keyboard: True if the stream has a keyboard channel. When has_keyboard
+ // is true, the last channel in any corresponding list of
+ // channels is the keyboard channel.
+ StreamConfig(int sample_rate_hz = 0,
+ int num_channels = 0,
+ bool has_keyboard = false)
+ : sample_rate_hz_(sample_rate_hz),
+ num_channels_(num_channels),
+ has_keyboard_(has_keyboard),
+ num_frames_(calculate_frames(sample_rate_hz)) {}
+
+ void set_sample_rate_hz(int value) {
+ sample_rate_hz_ = value;
+ num_frames_ = calculate_frames(value);
+ }
+ void set_num_channels(int value) { num_channels_ = value; }
+ void set_has_keyboard(bool value) { has_keyboard_ = value; }
+
+ int sample_rate_hz() const { return sample_rate_hz_; }
+
+ // The number of channels in the stream, not including the keyboard channel if
+ // present.
+ int num_channels() const { return num_channels_; }
+
+ bool has_keyboard() const { return has_keyboard_; }
+ size_t num_frames() const { return num_frames_; }
+ size_t num_samples() const { return num_channels_ * num_frames_; }
+
+ bool operator==(const StreamConfig& other) const {
+ return sample_rate_hz_ == other.sample_rate_hz_ &&
+ num_channels_ == other.num_channels_ &&
+ has_keyboard_ == other.has_keyboard_;
+ }
+
+ bool operator!=(const StreamConfig& other) const { return !(*this == other); }
+
+ private:
+ static size_t calculate_frames(int sample_rate_hz) {
+ return static_cast<size_t>(
+ AudioProcessing::kChunkSizeMs * sample_rate_hz / 1000);
+ }
+
+ int sample_rate_hz_;
+ int num_channels_;
+ bool has_keyboard_;
+ size_t num_frames_;
+};
+
+class ProcessingConfig {
+ public:
+ enum StreamName {
+ kInputStream,
+ kOutputStream,
+ kReverseInputStream,
+ kReverseOutputStream,
+ kNumStreamNames,
+ };
+
+ const StreamConfig& input_stream() const {
+ return streams[StreamName::kInputStream];
+ }
+ const StreamConfig& output_stream() const {
+ return streams[StreamName::kOutputStream];
+ }
+ const StreamConfig& reverse_input_stream() const {
+ return streams[StreamName::kReverseInputStream];
+ }
+ const StreamConfig& reverse_output_stream() const {
+ return streams[StreamName::kReverseOutputStream];
+ }
+
+ StreamConfig& input_stream() { return streams[StreamName::kInputStream]; }
+ StreamConfig& output_stream() { return streams[StreamName::kOutputStream]; }
+ StreamConfig& reverse_input_stream() {
+ return streams[StreamName::kReverseInputStream];
+ }
+ StreamConfig& reverse_output_stream() {
+ return streams[StreamName::kReverseOutputStream];
+ }
+
+ bool operator==(const ProcessingConfig& other) const {
+ for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
+ if (this->streams[i] != other.streams[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const ProcessingConfig& other) const {
+ return !(*this == other);
+ }
+
+ StreamConfig streams[StreamName::kNumStreamNames];
+};
+
// The acoustic echo cancellation (AEC) component provides better performance
// than AECM but also requires more processing power and is dependent on delay
// stability and reporting accuracy. As such it is well-suited and recommended
diff --git a/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h b/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
index 480d0e34b2c..4ff52baf1c7 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -186,6 +186,8 @@ class MockAudioProcessing : public AudioProcessing {
ChannelLayout input_layout,
ChannelLayout output_layout,
ChannelLayout reverse_layout));
+ MOCK_METHOD1(Initialize,
+ int(const ProcessingConfig& processing_config));
MOCK_METHOD1(SetExtraOptions,
void(const Config& config));
MOCK_METHOD1(set_sample_rate_hz,
@@ -212,17 +214,28 @@ class MockAudioProcessing : public AudioProcessing {
int(AudioFrame* frame));
MOCK_METHOD7(ProcessStream,
int(const float* const* src,
- int samples_per_channel,
+ size_t samples_per_channel,
int input_sample_rate_hz,
ChannelLayout input_layout,
int output_sample_rate_hz,
ChannelLayout output_layout,
float* const* dest));
+ MOCK_METHOD4(ProcessStream,
+ int(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest));
MOCK_METHOD1(AnalyzeReverseStream,
int(AudioFrame* frame));
+ MOCK_METHOD1(ProcessReverseStream, int(AudioFrame* frame));
MOCK_METHOD4(AnalyzeReverseStream,
- int(const float* const* data, int frames, int sample_rate_hz,
+ int(const float* const* data, size_t frames, int sample_rate_hz,
ChannelLayout input_layout));
+ MOCK_METHOD4(ProcessReverseStream,
+ int(const float* const* src,
+ const StreamConfig& input_config,
+ const StreamConfig& output_config,
+ float* const* dest));
MOCK_METHOD1(set_stream_delay_ms,
int(int delay));
MOCK_CONST_METHOD0(stream_delay_ms,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
index 3029e21619a..d014ce060c4 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc
@@ -17,36 +17,33 @@
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
-#include <cmath>
-#include <cstdlib>
-
+#include <math.h>
+#include <stdlib.h>
#include <algorithm>
#include <numeric>
#include "webrtc/base/checks.h"
-#include "webrtc/common_audio/vad/include/webrtc_vad.h"
+#include "webrtc/common_audio/include/audio_util.h"
#include "webrtc/common_audio/window_generator.h"
-using std::complex;
-using std::max;
-using std::min;
-
namespace webrtc {
-const int IntelligibilityEnhancer::kErbResolution = 2;
-const int IntelligibilityEnhancer::kWindowSizeMs = 2;
-const int IntelligibilityEnhancer::kChunkSizeMs = 10; // Size provided by APM.
-const int IntelligibilityEnhancer::kAnalyzeRate = 800;
-const int IntelligibilityEnhancer::kVarianceRate = 2;
-const float IntelligibilityEnhancer::kClipFreq = 200.0f;
-const float IntelligibilityEnhancer::kConfigRho = 0.02f;
-const float IntelligibilityEnhancer::kKbdAlpha = 1.5f;
+namespace {
+
+const size_t kErbResolution = 2;
+const int kWindowSizeMs = 2;
+const int kChunkSizeMs = 10; // Size provided by APM.
+const float kClipFreq = 200.0f;
+const float kConfigRho = 0.02f; // Default production and interpretation SNR.
+const float kKbdAlpha = 1.5f;
+const float kLambdaBot = -1.0f; // Extreme values in bisection
+const float kLambdaTop = -10e-18f; // search for lamda.
-// To disable gain update smoothing, set gain limit to be VERY high.
-// TODO(ekmeyerson): Add option to disable gain smoothing altogether
-// to avoid the extra computation.
-const float IntelligibilityEnhancer::kGainChangeLimit = 0.0125f;
+} // namespace
+using std::complex;
+using std::max;
+using std::min;
using VarianceType = intelligibility::VarianceArray::StepType;
IntelligibilityEnhancer::TransformCallback::TransformCallback(
@@ -58,133 +55,103 @@ IntelligibilityEnhancer::TransformCallback::TransformCallback(
void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock(
const complex<float>* const* in_block,
int in_channels,
- int frames,
+ size_t frames,
int /* out_channels */,
complex<float>* const* out_block) {
- DCHECK_EQ(parent_->freqs_, frames);
+ RTC_DCHECK_EQ(parent_->freqs_, frames);
for (int i = 0; i < in_channels; ++i) {
parent_->DispatchAudio(source_, in_block[i], out_block[i]);
}
}
-IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution,
- int sample_rate_hz,
- int channels,
- int cv_type,
- float cv_alpha,
- int cv_win,
- int analysis_rate,
- int variance_rate,
- float gain_limit)
+IntelligibilityEnhancer::IntelligibilityEnhancer()
+ : IntelligibilityEnhancer(IntelligibilityEnhancer::Config()) {
+}
+
+IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config)
: freqs_(RealFourier::ComplexLength(
- RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))),
- window_size_(1 << RealFourier::FftOrder(freqs_)),
- chunk_length_(sample_rate_hz * kChunkSizeMs / 1000),
- bank_size_(GetBankSize(sample_rate_hz, erb_resolution)),
- sample_rate_hz_(sample_rate_hz),
- erb_resolution_(erb_resolution),
- channels_(channels),
- analysis_rate_(analysis_rate),
- variance_rate_(variance_rate),
+ RealFourier::FftOrder(config.sample_rate_hz * kWindowSizeMs / 1000))),
+ window_size_(static_cast<size_t>(1 << RealFourier::FftOrder(freqs_))),
+ chunk_length_(
+ static_cast<size_t>(config.sample_rate_hz * kChunkSizeMs / 1000)),
+ bank_size_(GetBankSize(config.sample_rate_hz, kErbResolution)),
+ sample_rate_hz_(config.sample_rate_hz),
+ erb_resolution_(kErbResolution),
+ num_capture_channels_(config.num_capture_channels),
+ num_render_channels_(config.num_render_channels),
+ analysis_rate_(config.analysis_rate),
+ active_(true),
clear_variance_(freqs_,
- static_cast<VarianceType>(cv_type),
- cv_win,
- cv_alpha),
- noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f),
+ config.var_type,
+ config.var_window_size,
+ config.var_decay_rate),
+ noise_variance_(freqs_,
+ config.var_type,
+ config.var_window_size,
+ config.var_decay_rate),
filtered_clear_var_(new float[bank_size_]),
filtered_noise_var_(new float[bank_size_]),
- filter_bank_(nullptr),
+ filter_bank_(bank_size_),
center_freqs_(new float[bank_size_]),
rho_(new float[bank_size_]),
gains_eq_(new float[bank_size_]),
- gain_applier_(freqs_, gain_limit),
- temp_out_buffer_(nullptr),
- input_audio_(new float* [channels]),
+ gain_applier_(freqs_, config.gain_change_limit),
+ temp_render_out_buffer_(chunk_length_, num_render_channels_),
+ temp_capture_out_buffer_(chunk_length_, num_capture_channels_),
kbd_window_(new float[window_size_]),
render_callback_(this, AudioSource::kRenderStream),
capture_callback_(this, AudioSource::kCaptureStream),
block_count_(0),
- analysis_step_(0),
- vad_high_(WebRtcVad_Create()),
- vad_low_(WebRtcVad_Create()),
- vad_tmp_buffer_(new int16_t[chunk_length_]) {
- DCHECK_LE(kConfigRho, 1.0f);
+ analysis_step_(0) {
+ RTC_DCHECK_LE(config.rho, 1.0f);
CreateErbBank();
- WebRtcVad_Init(vad_high_);
- WebRtcVad_set_mode(vad_high_, 0); // High likelihood of speech.
- WebRtcVad_Init(vad_low_);
- WebRtcVad_set_mode(vad_low_, 3); // Low likelihood of speech.
-
- temp_out_buffer_ = static_cast<float**>(
- malloc(sizeof(*temp_out_buffer_) * channels_ +
- sizeof(**temp_out_buffer_) * chunk_length_ * channels_));
- for (int i = 0; i < channels_; ++i) {
- temp_out_buffer_[i] =
- reinterpret_cast<float*>(temp_out_buffer_ + channels_) +
- chunk_length_ * i;
- }
-
// Assumes all rho equal.
- for (int i = 0; i < bank_size_; ++i) {
- rho_[i] = kConfigRho * kConfigRho;
+ for (size_t i = 0; i < bank_size_; ++i) {
+ rho_[i] = config.rho * config.rho;
}
float freqs_khz = kClipFreq / 1000.0f;
- int erb_index = static_cast<int>(ceilf(
+ size_t erb_index = static_cast<size_t>(ceilf(
11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f));
- start_freq_ = max(1, erb_index * kErbResolution);
+ start_freq_ = std::max(static_cast<size_t>(1), erb_index * erb_resolution_);
WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_,
kbd_window_.get());
render_mangler_.reset(new LappedTransform(
- channels_, channels_, chunk_length_, kbd_window_.get(), window_size_,
- window_size_ / 2, &render_callback_));
+ num_render_channels_, num_render_channels_, chunk_length_,
+ kbd_window_.get(), window_size_, window_size_ / 2, &render_callback_));
capture_mangler_.reset(new LappedTransform(
- channels_, channels_, chunk_length_, kbd_window_.get(), window_size_,
- window_size_ / 2, &capture_callback_));
+ num_capture_channels_, num_capture_channels_, chunk_length_,
+ kbd_window_.get(), window_size_, window_size_ / 2, &capture_callback_));
}
-IntelligibilityEnhancer::~IntelligibilityEnhancer() {
- WebRtcVad_Free(vad_low_);
- WebRtcVad_Free(vad_high_);
- free(filter_bank_);
-}
+void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio,
+ int sample_rate_hz,
+ int num_channels) {
+ RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+ RTC_CHECK_EQ(num_render_channels_, num_channels);
-void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) {
- for (int i = 0; i < chunk_length_; ++i) {
- vad_tmp_buffer_[i] = (int16_t)audio[0][i];
+ if (active_) {
+ render_mangler_->ProcessChunk(audio, temp_render_out_buffer_.channels());
}
- has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_,
- vad_tmp_buffer_.get(), chunk_length_) == 1;
- // Process and enhance chunk of |audio|
- render_mangler_->ProcessChunk(audio, temp_out_buffer_);
-
- for (int i = 0; i < channels_; ++i) {
- memcpy(audio[i], temp_out_buffer_[i],
- chunk_length_ * sizeof(**temp_out_buffer_));
+ if (active_) {
+ for (int i = 0; i < num_render_channels_; ++i) {
+ memcpy(audio[i], temp_render_out_buffer_.channels()[i],
+ chunk_length_ * sizeof(**audio));
+ }
}
}
-void IntelligibilityEnhancer::ProcessCaptureAudio(float* const* audio) {
- for (int i = 0; i < chunk_length_; ++i) {
- vad_tmp_buffer_[i] = (int16_t)audio[0][i];
- }
- // TODO(bercic): The VAD was always detecting voice in the noise stream,
- // no matter what the aggressiveness, so it was temporarily disabled here.
-
- #if 0
- if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(),
- chunk_length_) == 1) {
- printf("capture HAS speech\n");
- return;
- }
- printf("capture NO speech\n");
- #endif
+void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio,
+ int sample_rate_hz,
+ int num_channels) {
+ RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz);
+ RTC_CHECK_EQ(num_capture_channels_, num_channels);
- capture_mangler_->ProcessChunk(audio, temp_out_buffer_);
+ capture_mangler_->ProcessChunk(audio, temp_capture_out_buffer_.channels());
}
void IntelligibilityEnhancer::DispatchAudio(
@@ -203,76 +170,78 @@ void IntelligibilityEnhancer::DispatchAudio(
void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block,
complex<float>* out_block) {
- float power_target;
-
if (block_count_ < 2) {
memset(out_block, 0, freqs_ * sizeof(*out_block));
++block_count_;
return;
}
- // For now, always assumes enhancement is necessary.
- // TODO(ekmeyerson): Change to only enhance if necessary,
- // based on experiments with different cutoffs.
- if (has_voice_low_ || true) {
+ // TODO(ekm): Use VAD to |Step| and |AnalyzeClearBlock| only if necessary.
+ if (true) {
clear_variance_.Step(in_block, false);
- power_target = std::accumulate(clear_variance_.variance(),
- clear_variance_.variance() + freqs_, 0.0f);
-
if (block_count_ % analysis_rate_ == analysis_rate_ - 1) {
+ const float power_target = std::accumulate(
+ clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f);
AnalyzeClearBlock(power_target);
++analysis_step_;
- if (analysis_step_ == variance_rate_) {
- analysis_step_ = 0;
- clear_variance_.Clear();
- noise_variance_.Clear();
- }
}
++block_count_;
}
- /* efidata(n,:) = sqrt(b(n)) * fidata(n,:) */
- gain_applier_.Apply(in_block, out_block);
+ if (active_) {
+ gain_applier_.Apply(in_block, out_block);
+ }
}
void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) {
FilterVariance(clear_variance_.variance(), filtered_clear_var_.get());
FilterVariance(noise_variance_.variance(), filtered_noise_var_.get());
- // Bisection search for optimal |lambda|
-
- float lambda_bot = -1.0f, lambda_top = -10e-18f, lambda;
- float power_bot, power_top, power;
- SolveForGainsGivenLambda(lambda_top, start_freq_, gains_eq_.get());
- power_top =
+ SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.get());
+ const float power_top =
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
- SolveForGainsGivenLambda(lambda_bot, start_freq_, gains_eq_.get());
- power_bot =
+ SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.get());
+ const float power_bot =
DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
- DCHECK(power_target >= power_bot && power_target <= power_top);
+ if (power_target >= power_bot && power_target <= power_top) {
+ SolveForLambda(power_target, power_bot, power_top);
+ UpdateErbGains();
+ } // Else experiencing variance underflow, so do nothing.
+}
- float power_ratio = 2.0f; // Ratio of achieved power to target power.
+void IntelligibilityEnhancer::SolveForLambda(float power_target,
+ float power_bot,
+ float power_top) {
const float kConvergeThresh = 0.001f; // TODO(ekmeyerson): Find best values
const int kMaxIters = 100; // for these, based on experiments.
+
+ const float reciprocal_power_target = 1.f / power_target;
+ float lambda_bot = kLambdaBot;
+ float lambda_top = kLambdaTop;
+ float power_ratio = 2.0f; // Ratio of achieved power to target power.
int iters = 0;
- while (fabs(power_ratio - 1.0f) > kConvergeThresh && iters <= kMaxIters) {
- lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f;
+ while (std::fabs(power_ratio - 1.0f) > kConvergeThresh &&
+ iters <= kMaxIters) {
+ const float lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f;
SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.get());
- power = DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
+ const float power =
+ DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_);
if (power < power_target) {
lambda_bot = lambda;
} else {
lambda_top = lambda;
}
- power_ratio = fabs(power / power_target);
+ power_ratio = std::fabs(power * reciprocal_power_target);
++iters;
}
+}
+void IntelligibilityEnhancer::UpdateErbGains() {
// (ERB gain) = filterbank' * (freq gain)
float* gains = gain_applier_.target();
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < freqs_; ++i) {
gains[i] = 0.0f;
- for (int j = 0; j < bank_size_; ++j) {
+ for (size_t j = 0; j < bank_size_; ++j) {
gains[i] = fmaf(filter_bank_[j][i], gains_eq_[j], gains[i]);
}
}
@@ -283,94 +252,95 @@ void IntelligibilityEnhancer::ProcessNoiseBlock(const complex<float>* in_block,
noise_variance_.Step(in_block);
}
-int IntelligibilityEnhancer::GetBankSize(int sample_rate, int erb_resolution) {
+size_t IntelligibilityEnhancer::GetBankSize(int sample_rate,
+ size_t erb_resolution) {
float freq_limit = sample_rate / 2000.0f;
- int erb_scale = ceilf(
- 11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f);
+ size_t erb_scale = static_cast<size_t>(ceilf(
+ 11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f));
return erb_scale * erb_resolution;
}
void IntelligibilityEnhancer::CreateErbBank() {
- int lf = 1, rf = 4;
+ size_t lf = 1, rf = 4;
- for (int i = 0; i < bank_size_; ++i) {
+ for (size_t i = 0; i < bank_size_; ++i) {
float abs_temp = fabsf((i + 1.0f) / static_cast<float>(erb_resolution_));
center_freqs_[i] = 676170.4f / (47.06538f - expf(0.08950404f * abs_temp));
center_freqs_[i] -= 14678.49f;
}
float last_center_freq = center_freqs_[bank_size_ - 1];
- for (int i = 0; i < bank_size_; ++i) {
+ for (size_t i = 0; i < bank_size_; ++i) {
center_freqs_[i] *= 0.5f * sample_rate_hz_ / last_center_freq;
}
- filter_bank_ = static_cast<float**>(
- malloc(sizeof(*filter_bank_) * bank_size_ +
- sizeof(**filter_bank_) * freqs_ * bank_size_));
- for (int i = 0; i < bank_size_; ++i) {
- filter_bank_[i] =
- reinterpret_cast<float*>(filter_bank_ + bank_size_) + freqs_ * i;
+ for (size_t i = 0; i < bank_size_; ++i) {
+ filter_bank_[i].resize(freqs_);
}
- for (int i = 1; i <= bank_size_; ++i) {
- int lll, ll, rr, rrr;
- lll = round(center_freqs_[max(1, i - lf) - 1] * freqs_ /
- (0.5f * sample_rate_hz_));
- ll =
- round(center_freqs_[max(1, i) - 1] * freqs_ / (0.5f * sample_rate_hz_));
- lll = min(freqs_, max(lll, 1)) - 1;
- ll = min(freqs_, max(ll, 1)) - 1;
-
- rrr = round(center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ /
- (0.5f * sample_rate_hz_));
- rr = round(center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ /
- (0.5f * sample_rate_hz_));
- rrr = min(freqs_, max(rrr, 1)) - 1;
- rr = min(freqs_, max(rr, 1)) - 1;
+ for (size_t i = 1; i <= bank_size_; ++i) {
+ size_t lll, ll, rr, rrr;
+ static const size_t kOne = 1; // Avoids repeated static_cast<>s below.
+ lll = static_cast<size_t>(round(
+ center_freqs_[max(kOne, i - lf) - 1] * freqs_ /
+ (0.5f * sample_rate_hz_)));
+ ll = static_cast<size_t>(round(
+ center_freqs_[max(kOne, i) - 1] * freqs_ / (0.5f * sample_rate_hz_)));
+ lll = min(freqs_, max(lll, kOne)) - 1;
+ ll = min(freqs_, max(ll, kOne)) - 1;
+
+ rrr = static_cast<size_t>(round(
+ center_freqs_[min(bank_size_, i + rf) - 1] * freqs_ /
+ (0.5f * sample_rate_hz_)));
+ rr = static_cast<size_t>(round(
+ center_freqs_[min(bank_size_, i + 1) - 1] * freqs_ /
+ (0.5f * sample_rate_hz_)));
+ rrr = min(freqs_, max(rrr, kOne)) - 1;
+ rr = min(freqs_, max(rr, kOne)) - 1;
float step, element;
step = 1.0f / (ll - lll);
element = 0.0f;
- for (int j = lll; j <= ll; ++j) {
+ for (size_t j = lll; j <= ll; ++j) {
filter_bank_[i - 1][j] = element;
element += step;
}
step = 1.0f / (rrr - rr);
element = 1.0f;
- for (int j = rr; j <= rrr; ++j) {
+ for (size_t j = rr; j <= rrr; ++j) {
filter_bank_[i - 1][j] = element;
element -= step;
}
- for (int j = ll; j <= rr; ++j) {
+ for (size_t j = ll; j <= rr; ++j) {
filter_bank_[i - 1][j] = 1.0f;
}
}
float sum;
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < freqs_; ++i) {
sum = 0.0f;
- for (int j = 0; j < bank_size_; ++j) {
+ for (size_t j = 0; j < bank_size_; ++j) {
sum += filter_bank_[j][i];
}
- for (int j = 0; j < bank_size_; ++j) {
+ for (size_t j = 0; j < bank_size_; ++j) {
filter_bank_[j][i] /= sum;
}
}
}
void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
- int start_freq,
+ size_t start_freq,
float* sols) {
bool quadratic = (kConfigRho < 1.0f);
const float* var_x0 = filtered_clear_var_.get();
const float* var_n0 = filtered_noise_var_.get();
- for (int n = 0; n < start_freq; ++n) {
+ for (size_t n = 0; n < start_freq; ++n) {
sols[n] = 1.0f;
}
// Analytic solution for optimal gains. See paper for derivation.
- for (int n = start_freq - 1; n < bank_size_; ++n) {
+ for (size_t n = start_freq - 1; n < bank_size_; ++n) {
float alpha0, beta0, gamma0;
gamma0 = 0.5f * rho_[n] * var_x0[n] * var_n0[n] +
lambda * var_x0[n] * var_n0[n] * var_n0[n];
@@ -387,20 +357,25 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda,
}
void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) {
- for (int i = 0; i < bank_size_; ++i) {
- result[i] = DotProduct(filter_bank_[i], var, freqs_);
+ RTC_DCHECK_GT(freqs_, 0u);
+ for (size_t i = 0; i < bank_size_; ++i) {
+ result[i] = DotProduct(&filter_bank_[i][0], var, freqs_);
}
}
float IntelligibilityEnhancer::DotProduct(const float* a,
const float* b,
- int length) {
+ size_t length) {
float ret = 0.0f;
- for (int i = 0; i < length; ++i) {
+ for (size_t i = 0; i < length; ++i) {
ret = fmaf(a[i], b[i], ret);
}
return ret;
}
+bool IntelligibilityEnhancer::active() const {
+ return active_;
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
index 8125707f120..1e9e35ac2a2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h
@@ -16,14 +16,13 @@
#define WEBRTC_MODULES_AUDIO_PROCESSING_INTELLIGIBILITY_INTELLIGIBILITY_ENHANCER_H_
#include <complex>
+#include <vector>
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common_audio/lapped_transform.h"
+#include "webrtc/common_audio/channel_buffer.h"
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
-struct WebRtcVadInst;
-typedef struct WebRtcVadInst VadInst;
-
namespace webrtc {
// Speech intelligibility enhancement module. Reads render and capture
@@ -32,32 +31,45 @@ namespace webrtc {
// Note: assumes speech and noise streams are already separated.
class IntelligibilityEnhancer {
public:
- // Construct a new instance with the given filter bank resolution,
- // sampling rate, number of channels and analysis rates.
- // |analysis_rate| sets the number of input blocks (containing speech!)
- // to elapse before a new gain computation is made. |variance_rate| specifies
- // the number of gain recomputations after which the variances are reset.
- // |cv_*| are parameters for the VarianceArray constructor for the
- // clear speech stream.
- // TODO(bercic): the |cv_*|, |*_rate| and |gain_limit| parameters should
- // probably go away once fine tuning is done. They override the internal
- // constants in the class (kGainChangeLimit, kAnalyzeRate, kVarianceRate).
- IntelligibilityEnhancer(int erb_resolution,
- int sample_rate_hz,
- int channels,
- int cv_type,
- float cv_alpha,
- int cv_win,
- int analysis_rate,
- int variance_rate,
- float gain_limit);
- ~IntelligibilityEnhancer();
+ struct Config {
+ // |var_*| are parameters for the VarianceArray constructor for the
+ // clear speech stream.
+ // TODO(bercic): the |var_*|, |*_rate| and |gain_limit| parameters should
+ // probably go away once fine tuning is done.
+ Config()
+ : sample_rate_hz(16000),
+ num_capture_channels(1),
+ num_render_channels(1),
+ var_type(intelligibility::VarianceArray::kStepDecaying),
+ var_decay_rate(0.9f),
+ var_window_size(10),
+ analysis_rate(800),
+ gain_change_limit(0.1f),
+ rho(0.02f) {}
+ int sample_rate_hz;
+ int num_capture_channels;
+ int num_render_channels;
+ intelligibility::VarianceArray::StepType var_type;
+ float var_decay_rate;
+ size_t var_window_size;
+ int analysis_rate;
+ float gain_change_limit;
+ float rho;
+ };
+
+ explicit IntelligibilityEnhancer(const Config& config);
+ IntelligibilityEnhancer(); // Initialize with default config.
// Reads and processes chunk of noise stream in time domain.
- void ProcessCaptureAudio(float* const* audio);
+ void AnalyzeCaptureAudio(float* const* audio,
+ int sample_rate_hz,
+ int num_channels);
// Reads chunk of speech in time domain and updates with modified signal.
- void ProcessRenderAudio(float* const* audio);
+ void ProcessRenderAudio(float* const* audio,
+ int sample_rate_hz,
+ int num_channels);
+ bool active() const;
private:
enum AudioSource {
@@ -72,17 +84,19 @@ class IntelligibilityEnhancer {
// All in frequency domain, receives input |in_block|, applies
// intelligibility enhancement, and writes result to |out_block|.
- virtual void ProcessAudioBlock(const std::complex<float>* const* in_block,
- int in_channels,
- int frames,
- int out_channels,
- std::complex<float>* const* out_block);
+ void ProcessAudioBlock(const std::complex<float>* const* in_block,
+ int in_channels,
+ size_t frames,
+ int out_channels,
+ std::complex<float>* const* out_block) override;
private:
IntelligibilityEnhancer* parent_;
AudioSource source_;
};
friend class TransformCallback;
+ FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestErbCreation);
+ FRIEND_TEST_ALL_PREFIXES(IntelligibilityEnhancerTest, TestSolveForGains);
// Sends streams to ProcessClearBlock or ProcessNoiseBlock based on source.
void DispatchAudio(AudioSource source,
@@ -97,65 +111,63 @@ class IntelligibilityEnhancer {
// Computes and sets modified gains.
void AnalyzeClearBlock(float power_target);
+ // Bisection search for optimal |lambda|.
+ void SolveForLambda(float power_target, float power_bot, float power_top);
+
+ // Transforms freq gains to ERB gains.
+ void UpdateErbGains();
+
// Updates variance calculation for noise input with |in_block|.
void ProcessNoiseBlock(const std::complex<float>* in_block,
std::complex<float>* out_block);
// Returns number of ERB filters.
- static int GetBankSize(int sample_rate, int erb_resolution);
+ static size_t GetBankSize(int sample_rate, size_t erb_resolution);
// Initializes ERB filterbank.
void CreateErbBank();
// Analytically solves quadratic for optimal gains given |lambda|.
// Negative gains are set to 0. Stores the results in |sols|.
- void SolveForGainsGivenLambda(float lambda, int start_freq, float* sols);
+ void SolveForGainsGivenLambda(float lambda, size_t start_freq, float* sols);
// Computes variance across ERB filters from freq variance |var|.
// Stores in |result|.
void FilterVariance(const float* var, float* result);
// Returns dot product of vectors specified by size |length| arrays |a|,|b|.
- static float DotProduct(const float* a, const float* b, int length);
-
- static const int kErbResolution;
- static const int kWindowSizeMs;
- static const int kChunkSizeMs;
- static const int kAnalyzeRate; // Default for |analysis_rate_|.
- static const int kVarianceRate; // Default for |variance_rate_|.
- static const float kClipFreq;
- static const float kConfigRho; // Default production and interpretation SNR.
- static const float kKbdAlpha;
- static const float kGainChangeLimit;
-
- const int freqs_; // Num frequencies in frequency domain.
- const int window_size_; // Window size in samples; also the block size.
- const int chunk_length_; // Chunk size in samples.
- const int bank_size_; // Num ERB filters.
+ static float DotProduct(const float* a, const float* b, size_t length);
+
+ const size_t freqs_; // Num frequencies in frequency domain.
+ const size_t window_size_; // Window size in samples; also the block size.
+ const size_t chunk_length_; // Chunk size in samples.
+ const size_t bank_size_; // Num ERB filters.
const int sample_rate_hz_;
const int erb_resolution_;
- const int channels_; // Num channels.
- const int analysis_rate_; // Num blocks before gains recalculated.
- const int variance_rate_; // Num recalculations before history is cleared.
+ const int num_capture_channels_;
+ const int num_render_channels_;
+ const int analysis_rate_; // Num blocks before gains recalculated.
+
+ const bool active_; // Whether render gains are being updated.
+ // TODO(ekm): Add logic for updating |active_|.
intelligibility::VarianceArray clear_variance_;
intelligibility::VarianceArray noise_variance_;
rtc::scoped_ptr<float[]> filtered_clear_var_;
rtc::scoped_ptr<float[]> filtered_noise_var_;
- float** filter_bank_; // TODO(ekmeyerson): Switch to using ChannelBuffer.
+ std::vector<std::vector<float>> filter_bank_;
rtc::scoped_ptr<float[]> center_freqs_;
- int start_freq_;
+ size_t start_freq_;
rtc::scoped_ptr<float[]> rho_; // Production and interpretation SNR.
// for each ERB band.
rtc::scoped_ptr<float[]> gains_eq_; // Pre-filter modified gains.
intelligibility::GainApplier gain_applier_;
- // Destination buffer used to reassemble blocked chunks before overwriting
+ // Destination buffers used to reassemble blocked chunks before overwriting
// the original input array with modifications.
- // TODO(ekmeyerson): Switch to using ChannelBuffer.
- float** temp_out_buffer_;
+ ChannelBuffer<float> temp_render_out_buffer_;
+ ChannelBuffer<float> temp_capture_out_buffer_;
- rtc::scoped_ptr<float* []> input_audio_;
rtc::scoped_ptr<float[]> kbd_window_;
TransformCallback render_callback_;
TransformCallback capture_callback_;
@@ -163,14 +175,6 @@ class IntelligibilityEnhancer {
rtc::scoped_ptr<LappedTransform> capture_mangler_;
int block_count_;
int analysis_step_;
-
- // TODO(bercic): Quick stopgap measure for voice detection in the clear
- // and noise streams.
- // Note: VAD currently does not affect anything in IntelligibilityEnhancer.
- VadInst* vad_high_;
- VadInst* vad_low_;
- rtc::scoped_ptr<int16_t[]> vad_tmp_buffer_;
- bool has_voice_low_; // Whether voice detected in speech stream.
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
new file mode 100644
index 00000000000..ce146deaf55
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer_unittest.cc
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+//
+// Unit tests for intelligibility enhancer.
+//
+
+#include <math.h>
+#include <stdlib.h>
+#include <algorithm>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
+
+namespace webrtc {
+
+namespace {
+
+// Target output for ERB create test. Generated with matlab.
+const float kTestCenterFreqs[] = {
+ 13.169f, 26.965f, 41.423f, 56.577f, 72.461f, 89.113f, 106.57f, 124.88f,
+ 144.08f, 164.21f, 185.34f, 207.5f, 230.75f, 255.16f, 280.77f, 307.66f,
+ 335.9f, 365.56f, 396.71f, 429.44f, 463.84f, 500.f};
+const float kTestFilterBank[][2] = {{0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.f},
+ {0.055556f, 0.2f},
+ {0, 0.2f},
+ {0, 0.2f},
+ {0, 0.2f},
+ {0, 0.2f}};
+static_assert(arraysize(kTestCenterFreqs) == arraysize(kTestFilterBank),
+ "Test filterbank badly initialized.");
+
+// Target output for gain solving test. Generated with matlab.
+const size_t kTestStartFreq = 12; // Lowest integral frequency for ERBs.
+const float kTestZeroVar[] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
+ 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f, 0.f,
+ 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
+static_assert(arraysize(kTestCenterFreqs) == arraysize(kTestZeroVar),
+ "Variance test data badly initialized.");
+const float kTestNonZeroVarLambdaTop[] = {
+ 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f,
+ 1.f, 1.f, 1.f, 0.f, 0.f, 0.0351f, 0.0636f, 0.0863f,
+ 0.1037f, 0.1162f, 0.1236f, 0.1251f, 0.1189f, 0.0993f};
+static_assert(arraysize(kTestCenterFreqs) ==
+ arraysize(kTestNonZeroVarLambdaTop),
+ "Variance test data badly initialized.");
+const float kMaxTestError = 0.005f;
+
+// Enhancer initialization parameters.
+const int kSamples = 2000;
+const int kSampleRate = 1000;
+const int kNumChannels = 1;
+const int kFragmentSize = kSampleRate / 100;
+
+} // namespace
+
+using std::vector;
+using intelligibility::VarianceArray;
+
+class IntelligibilityEnhancerTest : public ::testing::Test {
+ protected:
+ IntelligibilityEnhancerTest()
+ : clear_data_(kSamples), noise_data_(kSamples), orig_data_(kSamples) {
+ config_.sample_rate_hz = kSampleRate;
+ enh_.reset(new IntelligibilityEnhancer(config_));
+ }
+
+ bool CheckUpdate(VarianceArray::StepType step_type) {
+ config_.sample_rate_hz = kSampleRate;
+ config_.var_type = step_type;
+ enh_.reset(new IntelligibilityEnhancer(config_));
+ float* clear_cursor = &clear_data_[0];
+ float* noise_cursor = &noise_data_[0];
+ for (int i = 0; i < kSamples; i += kFragmentSize) {
+ enh_->AnalyzeCaptureAudio(&noise_cursor, kSampleRate, kNumChannels);
+ enh_->ProcessRenderAudio(&clear_cursor, kSampleRate, kNumChannels);
+ clear_cursor += kFragmentSize;
+ noise_cursor += kFragmentSize;
+ }
+ for (int i = 0; i < kSamples; i++) {
+ if (std::fabs(clear_data_[i] - orig_data_[i]) > kMaxTestError) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ IntelligibilityEnhancer::Config config_;
+ rtc::scoped_ptr<IntelligibilityEnhancer> enh_;
+ vector<float> clear_data_;
+ vector<float> noise_data_;
+ vector<float> orig_data_;
+};
+
+// For each class of generated data, tests that render stream is
+// updated when it should be for each variance update method.
+TEST_F(IntelligibilityEnhancerTest, TestRenderUpdate) {
+ vector<VarianceArray::StepType> step_types;
+ step_types.push_back(VarianceArray::kStepInfinite);
+ step_types.push_back(VarianceArray::kStepDecaying);
+ step_types.push_back(VarianceArray::kStepWindowed);
+ step_types.push_back(VarianceArray::kStepBlocked);
+ step_types.push_back(VarianceArray::kStepBlockBasedMovingAverage);
+ std::fill(noise_data_.begin(), noise_data_.end(), 0.0f);
+ std::fill(orig_data_.begin(), orig_data_.end(), 0.0f);
+ for (auto step_type : step_types) {
+ std::fill(clear_data_.begin(), clear_data_.end(), 0.0f);
+ EXPECT_FALSE(CheckUpdate(step_type));
+ }
+ std::srand(1);
+ auto float_rand = []() { return std::rand() * 2.f / RAND_MAX - 1; };
+ std::generate(noise_data_.begin(), noise_data_.end(), float_rand);
+ for (auto step_type : step_types) {
+ EXPECT_FALSE(CheckUpdate(step_type));
+ }
+ for (auto step_type : step_types) {
+ std::generate(clear_data_.begin(), clear_data_.end(), float_rand);
+ orig_data_ = clear_data_;
+ EXPECT_TRUE(CheckUpdate(step_type));
+ }
+}
+
+// Tests ERB bank creation, comparing against matlab output.
+TEST_F(IntelligibilityEnhancerTest, TestErbCreation) {
+ ASSERT_EQ(arraysize(kTestCenterFreqs), enh_->bank_size_);
+ for (size_t i = 0; i < enh_->bank_size_; ++i) {
+ EXPECT_NEAR(kTestCenterFreqs[i], enh_->center_freqs_[i], kMaxTestError);
+ ASSERT_EQ(arraysize(kTestFilterBank[0]), enh_->freqs_);
+ for (size_t j = 0; j < enh_->freqs_; ++j) {
+ EXPECT_NEAR(kTestFilterBank[i][j], enh_->filter_bank_[i][j],
+ kMaxTestError);
+ }
+ }
+}
+
+// Tests analytic solution for optimal gains, comparing
+// against matlab output.
+TEST_F(IntelligibilityEnhancerTest, TestSolveForGains) {
+ ASSERT_EQ(kTestStartFreq, enh_->start_freq_);
+ vector<float> sols(enh_->bank_size_);
+ float lambda = -0.001f;
+ for (size_t i = 0; i < enh_->bank_size_; i++) {
+ enh_->filtered_clear_var_[i] = 0.0f;
+ enh_->filtered_noise_var_[i] = 0.0f;
+ enh_->rho_[i] = 0.02f;
+ }
+ enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
+ for (size_t i = 0; i < enh_->bank_size_; i++) {
+ EXPECT_NEAR(kTestZeroVar[i], sols[i], kMaxTestError);
+ }
+ for (size_t i = 0; i < enh_->bank_size_; i++) {
+ enh_->filtered_clear_var_[i] = static_cast<float>(i + 1);
+ enh_->filtered_noise_var_[i] = static_cast<float>(enh_->bank_size_ - i);
+ }
+ enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
+ for (size_t i = 0; i < enh_->bank_size_; i++) {
+ EXPECT_NEAR(kTestNonZeroVarLambdaTop[i], sols[i], kMaxTestError);
+ }
+ lambda = -1.0;
+ enh_->SolveForGainsGivenLambda(lambda, enh_->start_freq_, &sols[0]);
+ for (size_t i = 0; i < enh_->bank_size_; i++) {
+ EXPECT_NEAR(kTestZeroVar[i], sols[i], kMaxTestError);
+ }
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
index 145cc087286..7da9b957a42 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.cc
@@ -14,99 +14,70 @@
#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
#include <algorithm>
-#include <cmath>
-#include <cstring>
using std::complex;
+using std::min;
+
+namespace webrtc {
-namespace {
+namespace intelligibility {
-// Return |current| changed towards |target|, with the change being at most
-// |limit|.
-inline float UpdateFactor(float target, float current, float limit) {
+float UpdateFactor(float target, float current, float limit) {
float delta = fabsf(target - current);
float sign = copysign(1.0f, target - current);
return current + sign * fminf(delta, limit);
}
-// std::isfinite for complex numbers.
-inline bool cplxfinite(complex<float> c) {
- return std::isfinite(c.real()) && std::isfinite(c.imag());
+float AddDitherIfZero(float value) {
+ return value == 0.f ? std::rand() * 0.01f / RAND_MAX : value;
}
-// std::isnormal for complex numbers.
-inline bool cplxnormal(complex<float> c) {
- return std::isnormal(c.real()) && std::isnormal(c.imag());
+complex<float> zerofudge(complex<float> c) {
+ return complex<float>(AddDitherIfZero(c.real()), AddDitherIfZero(c.imag()));
}
-// Apply a small fudge to degenerate complex values. The numbers in the array
-// were chosen randomly, so that even a series of all zeroes has some small
-// variability.
-inline complex<float> zerofudge(complex<float> c) {
- const static complex<float> fudge[7] = {{0.001f, 0.002f},
- {0.008f, 0.001f},
- {0.003f, 0.008f},
- {0.0006f, 0.0009f},
- {0.001f, 0.004f},
- {0.003f, 0.004f},
- {0.002f, 0.009f}};
- static int fudge_index = 0;
- if (cplxfinite(c) && !cplxnormal(c)) {
- fudge_index = (fudge_index + 1) % 7;
- return c + fudge[fudge_index];
- }
- return c;
-}
-
-// Incremental mean computation. Return the mean of the series with the
-// mean |mean| with added |data|.
-inline complex<float> NewMean(complex<float> mean,
- complex<float> data,
- int count) {
+complex<float> NewMean(complex<float> mean, complex<float> data, size_t count) {
return mean + (data - mean) / static_cast<float>(count);
}
-inline void AddToMean(complex<float> data, int count, complex<float>* mean) {
+void AddToMean(complex<float> data, size_t count, complex<float>* mean) {
(*mean) = NewMean(*mean, data, count);
}
-} // namespace
-using std::min;
+static const size_t kWindowBlockSize = 10;
-namespace webrtc {
-
-namespace intelligibility {
-
-static const int kWindowBlockSize = 10;
-
-VarianceArray::VarianceArray(int freqs,
+VarianceArray::VarianceArray(size_t num_freqs,
StepType type,
- int window_size,
+ size_t window_size,
float decay)
- : running_mean_(new complex<float>[freqs]()),
- running_mean_sq_(new complex<float>[freqs]()),
- sub_running_mean_(new complex<float>[freqs]()),
- sub_running_mean_sq_(new complex<float>[freqs]()),
- variance_(new float[freqs]()),
- conj_sum_(new float[freqs]()),
- freqs_(freqs),
+ : running_mean_(new complex<float>[num_freqs]()),
+ running_mean_sq_(new complex<float>[num_freqs]()),
+ sub_running_mean_(new complex<float>[num_freqs]()),
+ sub_running_mean_sq_(new complex<float>[num_freqs]()),
+ variance_(new float[num_freqs]()),
+ conj_sum_(new float[num_freqs]()),
+ num_freqs_(num_freqs),
window_size_(window_size),
decay_(decay),
history_cursor_(0),
count_(0),
- array_mean_(0.0f) {
- history_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]());
- for (int i = 0; i < freqs_; ++i) {
+ array_mean_(0.0f),
+ buffer_full_(false) {
+ history_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
+ for (size_t i = 0; i < num_freqs_; ++i) {
history_[i].reset(new complex<float>[window_size_]());
}
- subhistory_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]());
- for (int i = 0; i < freqs_; ++i) {
+ subhistory_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
+ for (size_t i = 0; i < num_freqs_; ++i) {
subhistory_[i].reset(new complex<float>[window_size_]());
}
- subhistory_sq_.reset(new rtc::scoped_ptr<complex<float>[]>[freqs_]());
- for (int i = 0; i < freqs_; ++i) {
+ subhistory_sq_.reset(new rtc::scoped_ptr<complex<float>[]>[num_freqs_]());
+ for (size_t i = 0; i < num_freqs_; ++i) {
subhistory_sq_[i].reset(new complex<float>[window_size_]());
}
switch (type) {
@@ -122,6 +93,9 @@ VarianceArray::VarianceArray(int freqs,
case kStepBlocked:
step_func_ = &VarianceArray::BlockedStep;
break;
+ case kStepBlockBasedMovingAverage:
+ step_func_ = &VarianceArray::BlockBasedMovingAverage;
+ break;
}
}
@@ -130,7 +104,7 @@ VarianceArray::VarianceArray(int freqs,
void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
array_mean_ = 0.0f;
++count_;
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> sample = data[i];
if (!skip_fudge) {
sample = zerofudge(sample);
@@ -147,10 +121,7 @@ void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
(old_sum + std::conj(sample - old_mean) * (sample - running_mean_[i]))
.real();
variance_[i] =
- conj_sum_[i] / (count_ - 1); // + fudge[fudge_index].real();
- if (skip_fudge && false) {
- // variance_[i] -= fudge[fudge_index].real();
- }
+ conj_sum_[i] / (count_ - 1);
}
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
}
@@ -161,7 +132,7 @@ void VarianceArray::InfiniteStep(const complex<float>* data, bool skip_fudge) {
void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
array_mean_ = 0.0f;
++count_;
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> sample = data[i];
sample = zerofudge(sample);
@@ -175,9 +146,6 @@ void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
running_mean_[i] = decay_ * prev + (1.0f - decay_) * sample;
running_mean_sq_[i] =
decay_ * prev2 + (1.0f - decay_) * sample * std::conj(sample);
- // variance_[i] = decay_ * variance_[i] + (1.0f - decay_) * (
- // (sample - running_mean_[i]) * std::conj(sample -
- // running_mean_[i])).real();
variance_[i] = (running_mean_sq_[i] -
running_mean_[i] * std::conj(running_mean_[i])).real();
}
@@ -189,9 +157,9 @@ void VarianceArray::DecayStep(const complex<float>* data, bool /*dummy*/) {
// Windowed variance computation. On each step, the variances for the
// window are recomputed from scratch, using Welford's algorithm.
void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
- int num = min(count_ + 1, window_size_);
+ size_t num = min(count_ + 1, window_size_);
array_mean_ = 0.0f;
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < num_freqs_; ++i) {
complex<float> mean;
float conj_sum = 0.0f;
@@ -199,7 +167,7 @@ void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
mean = history_[i][history_cursor_];
variance_[i] = 0.0f;
- for (int j = 1; j < num; ++j) {
+ for (size_t j = 1; j < num; ++j) {
complex<float> sample =
zerofudge(history_[i][(history_cursor_ + j) % window_size_]);
sample = history_[i][(history_cursor_ + j) % window_size_];
@@ -223,8 +191,8 @@ void VarianceArray::WindowedStep(const complex<float>* data, bool /*dummy*/) {
// history window and a new block is started. The variances for the window
// are recomputed from scratch at each of these transitions.
void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
- int blocks = min(window_size_, history_cursor_);
- for (int i = 0; i < freqs_; ++i) {
+ size_t blocks = min(window_size_, history_cursor_ + 1);
+ for (size_t i = 0; i < num_freqs_; ++i) {
AddToMean(data[i], count_ + 1, &sub_running_mean_[i]);
AddToMean(data[i] * std::conj(data[i]), count_ + 1,
&sub_running_mean_sq_[i]);
@@ -241,9 +209,9 @@ void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
sub_running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
running_mean_[i] = complex<float>(0.0f, 0.0f);
running_mean_sq_[i] = complex<float>(0.0f, 0.0f);
- for (int j = 0; j < min(window_size_, history_cursor_); ++j) {
- AddToMean(subhistory_[i][j], j, &running_mean_[i]);
- AddToMean(subhistory_sq_[i][j], j, &running_mean_sq_[i]);
+ for (size_t j = 0; j < min(window_size_, history_cursor_); ++j) {
+ AddToMean(subhistory_[i][j], j + 1, &running_mean_[i]);
+ AddToMean(subhistory_sq_[i][j], j + 1, &running_mean_sq_[i]);
}
++history_cursor_;
}
@@ -254,11 +222,57 @@ void VarianceArray::BlockedStep(const complex<float>* data, bool /*dummy*/) {
}
}
+// Recomputes variances for each window from scratch based on previous window.
+void VarianceArray::BlockBasedMovingAverage(const std::complex<float>* data,
+ bool /*dummy*/) {
+ // TODO(ekmeyerson) To mitigate potential divergence, add counter so that
+ // after every so often sums are computed scratch by summing over all
+ // elements instead of subtracting oldest and adding newest.
+ for (size_t i = 0; i < num_freqs_; ++i) {
+ sub_running_mean_[i] += data[i];
+ sub_running_mean_sq_[i] += data[i] * std::conj(data[i]);
+ }
+ ++count_;
+
+ // TODO(ekmeyerson) Make kWindowBlockSize nonconstant to allow
+ // experimentation with different block size,window size pairs.
+ if (count_ >= kWindowBlockSize) {
+ count_ = 0;
+
+ for (size_t i = 0; i < num_freqs_; ++i) {
+ running_mean_[i] -= subhistory_[i][history_cursor_];
+ running_mean_sq_[i] -= subhistory_sq_[i][history_cursor_];
+
+ float scale = 1.f / kWindowBlockSize;
+ subhistory_[i][history_cursor_] = sub_running_mean_[i] * scale;
+ subhistory_sq_[i][history_cursor_] = sub_running_mean_sq_[i] * scale;
+
+ sub_running_mean_[i] = std::complex<float>(0.0f, 0.0f);
+ sub_running_mean_sq_[i] = std::complex<float>(0.0f, 0.0f);
+
+ running_mean_[i] += subhistory_[i][history_cursor_];
+ running_mean_sq_[i] += subhistory_sq_[i][history_cursor_];
+
+ scale = 1.f / (buffer_full_ ? window_size_ : history_cursor_ + 1);
+ variance_[i] = std::real(running_mean_sq_[i] * scale -
+ running_mean_[i] * scale *
+ std::conj(running_mean_[i]) * scale);
+ }
+
+ ++history_cursor_;
+ if (history_cursor_ >= window_size_) {
+ buffer_full_ = true;
+ history_cursor_ = 0;
+ }
+ }
+}
+
void VarianceArray::Clear() {
- memset(running_mean_.get(), 0, sizeof(*running_mean_.get()) * freqs_);
- memset(running_mean_sq_.get(), 0, sizeof(*running_mean_sq_.get()) * freqs_);
- memset(variance_.get(), 0, sizeof(*variance_.get()) * freqs_);
- memset(conj_sum_.get(), 0, sizeof(*conj_sum_.get()) * freqs_);
+ memset(running_mean_.get(), 0, sizeof(*running_mean_.get()) * num_freqs_);
+ memset(running_mean_sq_.get(), 0,
+ sizeof(*running_mean_sq_.get()) * num_freqs_);
+ memset(variance_.get(), 0, sizeof(*variance_.get()) * num_freqs_);
+ memset(conj_sum_.get(), 0, sizeof(*conj_sum_.get()) * num_freqs_);
history_cursor_ = 0;
count_ = 0;
array_mean_ = 0.0f;
@@ -266,18 +280,18 @@ void VarianceArray::Clear() {
void VarianceArray::ApplyScale(float scale) {
array_mean_ = 0.0f;
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < num_freqs_; ++i) {
variance_[i] *= scale * scale;
array_mean_ += (variance_[i] - array_mean_) / (i + 1);
}
}
-GainApplier::GainApplier(int freqs, float change_limit)
- : freqs_(freqs),
+GainApplier::GainApplier(size_t freqs, float change_limit)
+ : num_freqs_(freqs),
change_limit_(change_limit),
target_(new float[freqs]()),
current_(new float[freqs]()) {
- for (int i = 0; i < freqs; ++i) {
+ for (size_t i = 0; i < freqs; ++i) {
target_[i] = 1.0f;
current_[i] = 1.0f;
}
@@ -285,7 +299,7 @@ GainApplier::GainApplier(int freqs, float change_limit)
void GainApplier::Apply(const complex<float>* in_block,
complex<float>* out_block) {
- for (int i = 0; i < freqs_; ++i) {
+ for (size_t i = 0; i < num_freqs_; ++i) {
float factor = sqrtf(fabsf(current_[i]));
if (!std::isnormal(factor)) {
factor = 1.0f;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
index 075b8ad46be..4ac11671474 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h
@@ -23,6 +23,26 @@ namespace webrtc {
namespace intelligibility {
+// Return |current| changed towards |target|, with the change being at most
+// |limit|.
+float UpdateFactor(float target, float current, float limit);
+
+// Apply a small fudge to degenerate complex values. The numbers in the array
+// were chosen randomly, so that even a series of all zeroes has some small
+// variability.
+std::complex<float> zerofudge(std::complex<float> c);
+
+// Incremental mean computation. Return the mean of the series with the
+// mean |mean| with added |data|.
+std::complex<float> NewMean(std::complex<float> mean,
+ std::complex<float> data,
+ size_t count);
+
+// Updates |mean| with added |data|;
+void AddToMean(std::complex<float> data,
+ size_t count,
+ std::complex<float>* mean);
+
// Internal helper for computing the variances of a stream of arrays.
// The result is an array of variances per position: the i-th variance
// is the variance of the stream of data on the i-th positions in the
@@ -43,7 +63,8 @@ class VarianceArray {
kStepInfinite = 0,
kStepDecaying,
kStepWindowed,
- kStepBlocked
+ kStepBlocked,
+ kStepBlockBasedMovingAverage
};
// Construct an instance for the given input array length (|freqs|) and
@@ -51,7 +72,7 @@ class VarianceArray {
// |window_size| is the number of samples for kStepWindowed and
// the number of blocks for kStepBlocked. |decay| is the forgetting factor
// for kStepDecaying.
- VarianceArray(int freqs, StepType type, int window_size, float decay);
+ VarianceArray(size_t freqs, StepType type, size_t window_size, float decay);
// Add a new data point to the series and compute the new variances.
// TODO(bercic) |skip_fudge| is a flag for kStepWindowed and kStepDecaying,
@@ -77,6 +98,7 @@ class VarianceArray {
void DecayStep(const std::complex<float>* data, bool dummy);
void WindowedStep(const std::complex<float>* data, bool dummy);
void BlockedStep(const std::complex<float>* data, bool dummy);
+ void BlockBasedMovingAverage(const std::complex<float>* data, bool dummy);
// TODO(ekmeyerson): Switch the following running means
// and histories from rtc::scoped_ptr to std::vector.
@@ -99,12 +121,13 @@ class VarianceArray {
rtc::scoped_ptr<float[]> variance_;
rtc::scoped_ptr<float[]> conj_sum_;
- const int freqs_;
- const int window_size_;
+ const size_t num_freqs_;
+ const size_t window_size_;
const float decay_;
- int history_cursor_;
- int count_;
+ size_t history_cursor_;
+ size_t count_;
float array_mean_;
+ bool buffer_full_;
void (VarianceArray::*step_func_)(const std::complex<float>*, bool);
};
@@ -113,7 +136,7 @@ class VarianceArray {
// constrained by a limit on the magnitude of the changes.
class GainApplier {
public:
- GainApplier(int freqs, float change_limit);
+ GainApplier(size_t freqs, float change_limit);
// Copy |in_block| to |out_block|, multiplied by the current set of gains,
// and step the current set of gains towards the target set.
@@ -124,7 +147,7 @@ class GainApplier {
float* target() const { return target_.get(); }
private:
- const int freqs_;
+ const size_t num_freqs_;
const float change_limit_;
rtc::scoped_ptr<float[]> target_;
rtc::scoped_ptr<float[]> current_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc
new file mode 100644
index 00000000000..9caa2eb0a15
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_utils_unittest.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+//
+// Unit tests for intelligibility utils.
+//
+
+#include <math.h>
+#include <complex>
+#include <iostream>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
+#include "webrtc/modules/audio_processing/intelligibility/intelligibility_utils.h"
+
+using std::complex;
+using std::vector;
+
+namespace webrtc {
+
+namespace intelligibility {
+
+vector<vector<complex<float>>> GenerateTestData(int freqs, int samples) {
+ vector<vector<complex<float>>> data(samples);
+ for (int i = 0; i < samples; i++) {
+ for (int j = 0; j < freqs; j++) {
+ const float val = 0.99f / ((i + 1) * (j + 1));
+ data[i].push_back(complex<float>(val, val));
+ }
+ }
+ return data;
+}
+
+// Tests UpdateFactor.
+TEST(IntelligibilityUtilsTest, TestUpdateFactor) {
+ EXPECT_EQ(0, intelligibility::UpdateFactor(0, 0, 0));
+ EXPECT_EQ(4, intelligibility::UpdateFactor(4, 2, 3));
+ EXPECT_EQ(3, intelligibility::UpdateFactor(4, 2, 1));
+ EXPECT_EQ(2, intelligibility::UpdateFactor(2, 4, 3));
+ EXPECT_EQ(3, intelligibility::UpdateFactor(2, 4, 1));
+}
+
+// Tests zerofudge.
+TEST(IntelligibilityUtilsTest, TestCplx) {
+ complex<float> t0(1.f, 0.f);
+ t0 = intelligibility::zerofudge(t0);
+ EXPECT_NE(t0.imag(), 0.f);
+ EXPECT_NE(t0.real(), 0.f);
+}
+
+// Tests NewMean and AddToMean.
+TEST(IntelligibilityUtilsTest, TestMeanUpdate) {
+ const complex<float> data[] = {{3, 8}, {7, 6}, {2, 1}, {8, 9}, {0, 6}};
+ const complex<float> means[] = {{3, 8}, {5, 7}, {4, 5}, {5, 6}, {4, 6}};
+ complex<float> mean(3, 8);
+ for (size_t i = 0; i < arraysize(data); i++) {
+ EXPECT_EQ(means[i], NewMean(mean, data[i], i + 1));
+ AddToMean(data[i], i + 1, &mean);
+ EXPECT_EQ(means[i], mean);
+ }
+}
+
+// Tests VarianceArray, for all variance step types.
+TEST(IntelligibilityUtilsTest, TestVarianceArray) {
+ const int kFreqs = 10;
+ const int kSamples = 100;
+ const int kWindowSize = 10; // Should pass for all kWindowSize > 1.
+ const float kDecay = 0.5f;
+ vector<VarianceArray::StepType> step_types;
+ step_types.push_back(VarianceArray::kStepInfinite);
+ step_types.push_back(VarianceArray::kStepDecaying);
+ step_types.push_back(VarianceArray::kStepWindowed);
+ step_types.push_back(VarianceArray::kStepBlocked);
+ step_types.push_back(VarianceArray::kStepBlockBasedMovingAverage);
+ const vector<vector<complex<float>>> test_data(
+ GenerateTestData(kFreqs, kSamples));
+ for (auto step_type : step_types) {
+ VarianceArray variance_array(kFreqs, step_type, kWindowSize, kDecay);
+ EXPECT_EQ(0, variance_array.variance()[0]);
+ EXPECT_EQ(0, variance_array.array_mean());
+ variance_array.ApplyScale(2.0f);
+ EXPECT_EQ(0, variance_array.variance()[0]);
+ EXPECT_EQ(0, variance_array.array_mean());
+
+ // Makes sure Step is doing something.
+ variance_array.Step(&test_data[0][0]);
+ for (int i = 1; i < kSamples; i++) {
+ variance_array.Step(&test_data[i][0]);
+ EXPECT_GE(variance_array.array_mean(), 0.0f);
+ EXPECT_LE(variance_array.array_mean(), 1.0f);
+ for (int j = 0; j < kFreqs; j++) {
+ EXPECT_GE(variance_array.variance()[j], 0.0f);
+ EXPECT_LE(variance_array.variance()[j], 1.0f);
+ }
+ }
+ variance_array.Clear();
+ EXPECT_EQ(0, variance_array.variance()[0]);
+ EXPECT_EQ(0, variance_array.array_mean());
+ }
+}
+
+// Tests exact computation on synthetic data.
+TEST(IntelligibilityUtilsTest, TestMovingBlockAverage) {
+ // Exact, not unbiased estimates.
+ const float kTestVarianceBufferNotFull = 16.5f;
+ const float kTestVarianceBufferFull1 = 66.5f;
+ const float kTestVarianceBufferFull2 = 333.375f;
+ const int kFreqs = 2;
+ const int kSamples = 50;
+ const int kWindowSize = 2;
+ const float kDecay = 0.5f;
+ const float kMaxError = 0.0001f;
+
+ VarianceArray variance_array(
+ kFreqs, VarianceArray::kStepBlockBasedMovingAverage, kWindowSize, kDecay);
+
+ vector<vector<complex<float>>> test_data(kSamples);
+ for (int i = 0; i < kSamples; i++) {
+ for (int j = 0; j < kFreqs; j++) {
+ if (i < 30) {
+ test_data[i].push_back(complex<float>(static_cast<float>(kSamples - i),
+ static_cast<float>(i + 1)));
+ } else {
+ test_data[i].push_back(complex<float>(0.f, 0.f));
+ }
+ }
+ }
+
+ for (int i = 0; i < kSamples; i++) {
+ variance_array.Step(&test_data[i][0]);
+ for (int j = 0; j < kFreqs; j++) {
+ if (i < 9) { // In utils, kWindowBlockSize = 10.
+ EXPECT_EQ(0, variance_array.variance()[j]);
+ } else if (i < 19) {
+ EXPECT_NEAR(kTestVarianceBufferNotFull, variance_array.variance()[j],
+ kMaxError);
+ } else if (i < 39) {
+ EXPECT_NEAR(kTestVarianceBufferFull1, variance_array.variance()[j],
+ kMaxError);
+ } else if (i < 49) {
+ EXPECT_NEAR(kTestVarianceBufferFull2, variance_array.variance()[j],
+ kMaxError);
+ } else {
+ EXPECT_EQ(0, variance_array.variance()[j]);
+ }
+ }
+ }
+}
+
+// Tests gain applier.
+TEST(IntelligibilityUtilsTest, TestGainApplier) {
+ const int kFreqs = 10;
+ const int kSamples = 100;
+ const float kChangeLimit = 0.1f;
+ GainApplier gain_applier(kFreqs, kChangeLimit);
+ const vector<vector<complex<float>>> in_data(
+ GenerateTestData(kFreqs, kSamples));
+ vector<vector<complex<float>>> out_data(GenerateTestData(kFreqs, kSamples));
+ for (int i = 0; i < kSamples; i++) {
+ gain_applier.Apply(&in_data[i][0], &out_data[i][0]);
+ for (int j = 0; j < kFreqs; j++) {
+ EXPECT_GT(out_data[i][j].real(), 0.0f);
+ EXPECT_LT(out_data[i][j].real(), 1.0f);
+ EXPECT_GT(out_data[i][j].imag(), 0.0f);
+ EXPECT_LT(out_data[i][j].imag(), 1.0f);
+ }
+ }
+}
+
+} // namespace intelligibility
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
index 9f7d84e701a..e20429da3f0 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/intelligibility/intelligibility_proc.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/intelligibility/test/intelligibility_proc.cc
@@ -16,9 +16,9 @@
#include <stdint.h>
#include <stdlib.h>
-#include <string>
#include <sys/stat.h>
#include <sys/types.h>
+#include <string>
#include "gflags/gflags.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -31,19 +31,24 @@
#include "webrtc/test/testsupport/fileutils.h"
using std::complex;
+using webrtc::intelligibility::VarianceArray;
namespace webrtc {
+namespace {
-using webrtc::RealFourier;
-using webrtc::IntelligibilityEnhancer;
+bool ValidateClearWindow(const char* flagname, int32_t value) {
+ return value > 0;
+}
DEFINE_int32(clear_type,
- webrtc::intelligibility::VarianceArray::kStepInfinite,
+ webrtc::intelligibility::VarianceArray::kStepDecaying,
"Variance algorithm for clear data.");
DEFINE_double(clear_alpha, 0.9, "Variance decay factor for clear data.");
DEFINE_int32(clear_window,
475,
"Window size for windowed variance for clear data.");
+const bool clear_window_dummy =
+ google::RegisterFlagValidator(&FLAGS_clear_window, &ValidateClearWindow);
DEFINE_int32(sample_rate,
16000,
"Audio sample rate used in the input and output files.");
@@ -63,8 +68,6 @@ DEFINE_string(out_file,
"Enhanced output. Use '-' to "
"play through aplay immediately.");
-// Constant IntelligibilityEnhancer constructor parameters.
-const int kErbResolution = 2;
const int kNumChannels = 1;
// void function for gtest
@@ -104,11 +107,14 @@ void void_main(int argc, char* argv[]) {
noise_file.ReadSamples(samples, &noise_fpcm[0]);
// Run intelligibility enhancement.
-
- IntelligibilityEnhancer enh(
- kErbResolution, FLAGS_sample_rate, kNumChannels, FLAGS_clear_type,
- static_cast<float>(FLAGS_clear_alpha), FLAGS_clear_window, FLAGS_ana_rate,
- FLAGS_var_rate, FLAGS_gain_limit);
+ IntelligibilityEnhancer::Config config;
+ config.sample_rate_hz = FLAGS_sample_rate;
+ config.var_type = static_cast<VarianceArray::StepType>(FLAGS_clear_type);
+ config.var_decay_rate = static_cast<float>(FLAGS_clear_alpha);
+ config.var_window_size = static_cast<size_t>(FLAGS_clear_window);
+ config.analysis_rate = FLAGS_ana_rate;
+ config.gain_change_limit = FLAGS_gain_limit;
+ IntelligibilityEnhancer enh(config);
// Slice the input into smaller chunks, as the APM would do, and feed them
// through the enhancer.
@@ -116,8 +122,8 @@ void void_main(int argc, char* argv[]) {
float* noise_cursor = &noise_fpcm[0];
for (size_t i = 0; i < samples; i += fragment_size) {
- enh.ProcessCaptureAudio(&noise_cursor);
- enh.ProcessRenderAudio(&clear_cursor);
+ enh.AnalyzeCaptureAudio(&noise_cursor, FLAGS_sample_rate, kNumChannels);
+ enh.ProcessRenderAudio(&clear_cursor, FLAGS_sample_rate, kNumChannels);
clear_cursor += fragment_size;
noise_cursor += fragment_size;
}
@@ -137,6 +143,7 @@ void void_main(int argc, char* argv[]) {
}
}
+} // namespace
} // namespace webrtc
int main(int argc, char* argv[]) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging.h b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging.h
new file mode 100644
index 00000000000..3cf9ff89edc
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_
+
+#include <stdio.h>
+
+#include "webrtc/modules/audio_processing/logging/aec_logging_file_handling.h"
+
+// To enable AEC logging, invoke GYP with -Daec_debug_dump=1.
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+// Dumps a wav data to file.
+#define RTC_AEC_DEBUG_WAV_WRITE(file, data, num_samples) \
+ do { \
+ rtc_WavWriteSamples(file, data, num_samples); \
+ } while (0)
+
+// (Re)opens a wav file for writing using the specified sample rate.
+#define RTC_AEC_DEBUG_WAV_REOPEN(name, instance_index, process_rate, \
+ sample_rate, wav_file) \
+ do { \
+ WebRtcAec_ReopenWav(name, instance_index, process_rate, sample_rate, \
+ wav_file); \
+ } while (0)
+
+// Closes a wav file.
+#define RTC_AEC_DEBUG_WAV_CLOSE(wav_file) \
+ do { \
+ rtc_WavClose(wav_file); \
+ } while (0)
+
+// Dumps a raw data to file.
+#define RTC_AEC_DEBUG_RAW_WRITE(file, data, data_size) \
+ do { \
+ (void) fwrite(data, data_size, 1, file); \
+ } while (0)
+
+// Opens a raw data file for writing using the specified sample rate.
+#define RTC_AEC_DEBUG_RAW_OPEN(name, instance_counter, file) \
+ do { \
+ WebRtcAec_RawFileOpen(name, instance_counter, file); \
+ } while (0)
+
+// Closes a raw data file.
+#define RTC_AEC_DEBUG_RAW_CLOSE(file) \
+ do { \
+ fclose(file); \
+ } while (0)
+
+#else // RTC_AEC_DEBUG_DUMP
+#define RTC_AEC_DEBUG_WAV_WRITE(file, data, num_samples) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_WAV_REOPEN(wav_file, name, instance_index, process_rate, \
+ sample_rate) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_WAV_CLOSE(wav_file) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_RAW_WRITE(file, data, data_size) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_RAW_OPEN(file, name, instance_counter) \
+ do { \
+ } while (0)
+
+#define RTC_AEC_DEBUG_RAW_CLOSE(file) \
+ do { \
+ } while (0)
+
+#endif // WEBRTC_AEC_DEBUG_DUMP
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc
new file mode 100644
index 00000000000..3a434714e1e
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/logging/aec_logging_file_handling.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/stringutils.h"
+#include "webrtc/common_audio/wav_file.h"
+#include "webrtc/typedefs.h"
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+void WebRtcAec_ReopenWav(const char* name,
+ int instance_index,
+ int process_rate,
+ int sample_rate,
+ rtc_WavWriter** wav_file) {
+ if (*wav_file) {
+ if (rtc_WavSampleRate(*wav_file) == sample_rate)
+ return;
+ rtc_WavClose(*wav_file);
+ }
+ char filename[64];
+ int written = rtc::sprintfn(filename, sizeof(filename), "%s%d-%d.wav", name,
+ instance_index, process_rate);
+
+ // Ensure there was no buffer output error.
+ RTC_DCHECK_GE(written, 0);
+ // Ensure that the buffer size was sufficient.
+ RTC_DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
+
+ *wav_file = rtc_WavOpen(filename, sample_rate, 1);
+}
+
+void WebRtcAec_RawFileOpen(const char* name, int instance_index, FILE** file) {
+ char filename[64];
+ int written = rtc::sprintfn(filename, sizeof(filename), "%s_%d.dat", name,
+ instance_index);
+
+ // Ensure there was no buffer output error.
+ RTC_DCHECK_GE(written, 0);
+ // Ensure that the buffer size was sufficient.
+ RTC_DCHECK_LT(static_cast<size_t>(written), sizeof(filename));
+
+ *file = fopen(filename, "wb");
+}
+
+#endif // WEBRTC_AEC_DEBUG_DUMP
diff --git a/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.h b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.h
new file mode 100644
index 00000000000..5ec83948726
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/logging/aec_logging_file_handling.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_FILE_HANDLING_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_FILE_HANDLING_
+
+#include <stdio.h>
+
+#include "webrtc/common_audio/wav_file.h"
+#include "webrtc/typedefs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef WEBRTC_AEC_DEBUG_DUMP
+// Opens a new Wav file for writing. If it was already open with a different
+// sample frequency, it closes it first.
+void WebRtcAec_ReopenWav(const char* name,
+ int instance_index,
+ int process_rate,
+ int sample_rate,
+ rtc_WavWriter** wav_file);
+
+// Opens dumpfile with instance-specific filename.
+void WebRtcAec_RawFileOpen(const char* name, int instance_index, FILE** file);
+
+#endif // WEBRTC_AEC_DEBUG_DUMP
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_LOGGING_FILE_HANDLING_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
index 33a0e060a68..76a39b8e09c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
@@ -32,12 +32,12 @@ class NoiseSuppressionImpl : public NoiseSuppression,
// NoiseSuppression implementation.
bool is_enabled() const override;
float speech_probability() const override;
+ Level level() const override;
private:
// NoiseSuppression implementation.
int Enable(bool enable) override;
int set_level(Level level) override;
- Level level() const override;
// ProcessingComponent implementation.
void* CreateHandle() const override;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h b/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
index 41bf9aca41b..9dac56bdeeb 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
+#include <stddef.h>
+
#include "webrtc/typedefs.h"
typedef struct NsHandleT NsHandle;
@@ -92,7 +94,7 @@ void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe);
*/
void WebRtcNs_Process(NsHandle* NS_inst,
const float* const* spframe,
- int num_bands,
+ size_t num_bands,
float* const* outframe);
/* Returns the internally used prior speech probability of the current frame.
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c b/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
index cdecd627326..13f1b2d6dc3 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
@@ -41,7 +41,7 @@ void WebRtcNs_Analyze(NsHandle* NS_inst, const float* spframe) {
void WebRtcNs_Process(NsHandle* NS_inst,
const float* const* spframe,
- int num_bands,
+ size_t num_bands,
float* const* outframe) {
WebRtcNs_ProcessCore((NoiseSuppressionC*)NS_inst, spframe, num_bands,
outframe);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
index 652f0fea00a..1d6091400e8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
@@ -217,7 +217,7 @@ int WebRtcNs_InitCore(NoiseSuppressionC* self, uint32_t fs) {
static void NoiseEstimation(NoiseSuppressionC* self,
float* magn,
float* noise) {
- int i, s, offset;
+ size_t i, s, offset;
float lmagn[HALF_ANAL_BLOCKL], delta;
if (self->updates < END_STARTUP_LONG) {
@@ -522,8 +522,8 @@ static void FeatureParameterExtraction(NoiseSuppressionC* self, int flag) {
// Spectral flatness is returned in self->featureData[0].
static void ComputeSpectralFlatness(NoiseSuppressionC* self,
const float* magnIn) {
- int i;
- int shiftLP = 1; // Option to remove first bin(s) from spectral measures.
+ size_t i;
+ size_t shiftLP = 1; // Option to remove first bin(s) from spectral measures.
float avgSpectralFlatnessNum, avgSpectralFlatnessDen, spectralTmp;
// Compute spectral measures.
@@ -568,7 +568,7 @@ static void ComputeSnr(const NoiseSuppressionC* self,
const float* noise,
float* snrLocPrior,
float* snrLocPost) {
- int i;
+ size_t i;
for (i = 0; i < self->magnLen; i++) {
// Previous post SNR.
@@ -596,7 +596,7 @@ static void ComputeSpectralDifference(NoiseSuppressionC* self,
const float* magnIn) {
// avgDiffNormMagn = var(magnIn) - cov(magnIn, magnAvgPause)^2 /
// var(magnAvgPause)
- int i;
+ size_t i;
float avgPause, avgMagn, covMagnPause, varPause, varMagn, avgDiffNormMagn;
avgPause = 0.0;
@@ -643,7 +643,8 @@ static void SpeechNoiseProb(NoiseSuppressionC* self,
float* probSpeechFinal,
const float* snrLocPrior,
const float* snrLocPost) {
- int i, sgnMap;
+ size_t i;
+ int sgnMap;
float invLrt, gainPrior, indPrior;
float logLrtTimeAvgKsum, besselTmp;
float indicator0, indicator1, indicator2;
@@ -802,7 +803,7 @@ static void UpdateNoiseEstimate(NoiseSuppressionC* self,
const float* snrLocPrior,
const float* snrLocPost,
float* noise) {
- int i;
+ size_t i;
float probSpeech, probNonSpeech;
// Time-avg parameter for noise update.
float gammaNoiseTmp = NOISE_UPDATE;
@@ -853,8 +854,8 @@ static void UpdateNoiseEstimate(NoiseSuppressionC* self,
// Output:
// * |buffer| is the updated buffer.
static void UpdateBuffer(const float* frame,
- int frame_length,
- int buffer_length,
+ size_t frame_length,
+ size_t buffer_length,
float* buffer) {
assert(buffer_length < 2 * frame_length);
@@ -885,12 +886,12 @@ static void UpdateBuffer(const float* frame,
// * |magn| is the calculated signal magnitude in the frequency domain.
static void FFT(NoiseSuppressionC* self,
float* time_data,
- int time_data_length,
- int magnitude_length,
+ size_t time_data_length,
+ size_t magnitude_length,
float* real,
float* imag,
float* magn) {
- int i;
+ size_t i;
assert(magnitude_length == time_data_length / 2 + 1);
@@ -923,10 +924,10 @@ static void FFT(NoiseSuppressionC* self,
static void IFFT(NoiseSuppressionC* self,
const float* real,
const float* imag,
- int magnitude_length,
- int time_data_length,
+ size_t magnitude_length,
+ size_t time_data_length,
float* time_data) {
- int i;
+ size_t i;
assert(time_data_length == 2 * (magnitude_length - 1));
@@ -948,8 +949,8 @@ static void IFFT(NoiseSuppressionC* self,
// * |buffer| is the buffer over which the energy is calculated.
// * |length| is the length of the buffer.
// Returns the calculated energy.
-static float Energy(const float* buffer, int length) {
- int i;
+static float Energy(const float* buffer, size_t length) {
+ size_t i;
float energy = 0.f;
for (i = 0; i < length; ++i) {
@@ -968,9 +969,9 @@ static float Energy(const float* buffer, int length) {
// * |data_windowed| is the windowed data.
static void Windowing(const float* window,
const float* data,
- int length,
+ size_t length,
float* data_windowed) {
- int i;
+ size_t i;
for (i = 0; i < length; ++i) {
data_windowed[i] = window[i] * data[i];
@@ -985,7 +986,7 @@ static void Windowing(const float* window,
static void ComputeDdBasedWienerFilter(const NoiseSuppressionC* self,
const float* magn,
float* theFilter) {
- int i;
+ size_t i;
float snrPrior, previousEstimateStsa, currentEstimateStsa;
for (i = 0; i < self->magnLen; i++) {
@@ -1041,8 +1042,8 @@ int WebRtcNs_set_policy_core(NoiseSuppressionC* self, int mode) {
}
void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame) {
- int i;
- const int kStartBand = 5; // Skip first frequency bins during estimation.
+ size_t i;
+ const size_t kStartBand = 5; // Skip first frequency bins during estimation.
int updateParsFlag;
float energy;
float signalEnergy = 0.f;
@@ -1182,11 +1183,11 @@ void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame) {
void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
const float* const* speechFrame,
- int num_bands,
+ size_t num_bands,
float* const* outFrame) {
// Main routine for noise reduction.
int flagHB = 0;
- int i, j;
+ size_t i, j;
float energy1, energy2, gain, factor, factor1, factor2;
float fout[BLOCKL_MAX];
@@ -1210,7 +1211,7 @@ void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
const float* const* speechFrameHB = NULL;
float* const* outFrameHB = NULL;
- int num_high_bands = 0;
+ size_t num_high_bands = 0;
if (num_bands > 1) {
speechFrameHB = &speechFrame[1];
outFrameHB = &outFrame[1];
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
index 8a7992ec5a6..aba1c468ed8 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
@@ -51,10 +51,10 @@ typedef struct NSParaExtract_ {
typedef struct NoiseSuppressionC_ {
uint32_t fs;
- int blockLen;
- int windShift;
- int anaLen;
- int magnLen;
+ size_t blockLen;
+ size_t windShift;
+ size_t anaLen;
+ size_t magnLen;
int aggrMode;
const float* window;
float analyzeBuf[ANAL_BLOCKL_MAX];
@@ -74,7 +74,7 @@ typedef struct NoiseSuppressionC_ {
float denoiseBound;
int gainmap;
// FFT work arrays.
- int ip[IP_LENGTH];
+ size_t ip[IP_LENGTH];
float wfft[W_LENGTH];
// Parameters for new method: some not needed, will reduce/cleanup later.
@@ -181,7 +181,7 @@ void WebRtcNs_AnalyzeCore(NoiseSuppressionC* self, const float* speechFrame);
*/
void WebRtcNs_ProcessCore(NoiseSuppressionC* self,
const float* const* inFrame,
- int num_bands,
+ size_t num_bands,
float* const* outFrame);
#ifdef __cplusplus
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
index 0f9894e9fc3..ed6125aa783 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
@@ -68,7 +68,7 @@ static const int16_t WebRtcNsx_kLogTableFrac[256] = {
#endif // WEBRTC_DETECT_NEON || WEBRTC_HAS_NEON
// Skip first frequency bins during estimation. (0 <= value < 64)
-static const int kStartBand = 5;
+static const size_t kStartBand = 5;
// hybrib Hanning & flat window
static const int16_t kBlocks80w128x[128] = {
@@ -306,7 +306,7 @@ static void UpdateNoiseEstimate(NoiseSuppressionFixedC* inst, int offset) {
int16_t tmp16 = 0;
const int16_t kExp2Const = 11819; // Q13
- int i = 0;
+ size_t i = 0;
tmp16 = WebRtcSpl_MaxValueW16(inst->noiseEstLogQuantile + offset,
inst->magnLen);
@@ -341,7 +341,7 @@ static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
const int16_t log2_const = 22713; // Q15
const int16_t width_factor = 21845;
- int i, s, offset;
+ size_t i, s, offset;
tabind = inst->stages - inst->normData;
assert(tabind < 9);
@@ -454,7 +454,7 @@ static void NoiseEstimationC(NoiseSuppressionFixedC* inst,
// Filter the data in the frequency domain, and create spectrum.
static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) {
- int i = 0, j = 0;
+ size_t i = 0, j = 0;
for (i = 0; i < inst->magnLen; i++) {
inst->real[i] = (int16_t)((inst->real[i] *
@@ -477,7 +477,7 @@ static void PrepareSpectrumC(NoiseSuppressionFixedC* inst, int16_t* freq_buf) {
static void DenormalizeC(NoiseSuppressionFixedC* inst,
int16_t* in,
int factor) {
- int i = 0;
+ size_t i = 0;
int32_t tmp32 = 0;
for (i = 0; i < inst->anaLen; i += 1) {
tmp32 = WEBRTC_SPL_SHIFT_W32((int32_t)in[i],
@@ -491,7 +491,7 @@ static void DenormalizeC(NoiseSuppressionFixedC* inst,
static void SynthesisUpdateC(NoiseSuppressionFixedC* inst,
int16_t* out_frame,
int16_t gain_factor) {
- int i = 0;
+ size_t i = 0;
int16_t tmp16a = 0;
int16_t tmp16b = 0;
int32_t tmp32 = 0;
@@ -523,7 +523,7 @@ static void SynthesisUpdateC(NoiseSuppressionFixedC* inst,
static void AnalysisUpdateC(NoiseSuppressionFixedC* inst,
int16_t* out,
int16_t* new_speech) {
- int i = 0;
+ size_t i = 0;
// For lower band update analysis buffer.
memcpy(inst->analysisBuffer, inst->analysisBuffer + inst->blockLen10ms,
@@ -542,7 +542,7 @@ static void AnalysisUpdateC(NoiseSuppressionFixedC* inst,
static void NormalizeRealBufferC(NoiseSuppressionFixedC* inst,
const int16_t* in,
int16_t* out) {
- int i = 0;
+ size_t i = 0;
assert(inst->normData >= 0);
for (i = 0; i < inst->anaLen; ++i) {
out[i] = in[i] << inst->normData; // Q(normData)
@@ -1026,7 +1026,7 @@ void WebRtcNsx_ComputeSpectralFlatness(NoiseSuppressionFixedC* inst,
int16_t zeros, frac, intPart;
- int i;
+ size_t i;
// for flatness
avgSpectralFlatnessNum = 0;
@@ -1099,7 +1099,8 @@ void WebRtcNsx_ComputeSpectralDifference(NoiseSuppressionFixedC* inst,
int16_t tmp16no1;
- int i, norm32, nShifts;
+ size_t i;
+ int norm32, nShifts;
avgPauseFX = 0;
maxPause = 0;
@@ -1198,7 +1199,7 @@ void WebRtcNsx_DataAnalysis(NoiseSuppressionFixedC* inst,
int16_t matrix_determinant = 0;
int16_t maxWinData;
- int i, j;
+ size_t i, j;
int zeros;
int net_norm = 0;
int right_shifts_in_magnU16 = 0;
@@ -1430,7 +1431,7 @@ void WebRtcNsx_DataSynthesis(NoiseSuppressionFixedC* inst, short* outFrame) {
int16_t energyRatio;
int16_t gainFactor, gainFactor1, gainFactor2;
- int i;
+ size_t i;
int outCIFFT;
int scaleEnergyOut = 0;
@@ -1531,7 +1532,7 @@ void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
int16_t avgProbSpeechHB, gainModHB, avgFilterGainHB, gainTimeDomainHB;
int16_t pink_noise_exp_avg = 0;
- int i, j;
+ size_t i, j;
int nShifts, postShifts;
int norm32no1, norm32no2;
int flag, sign;
@@ -1559,11 +1560,11 @@ void WebRtcNsx_ProcessCore(NoiseSuppressionFixedC* inst,
const short* const* speechFrameHB = NULL;
short* const* outFrameHB = NULL;
- int num_high_bands = 0;
+ size_t num_high_bands = 0;
if (num_bands > 1) {
speechFrameHB = &speechFrame[1];
outFrameHB = &outFrame[1];
- num_high_bands = num_bands - 1;
+ num_high_bands = (size_t)(num_bands - 1);
}
// Store speechFrame and transform to frequency domain
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
index 33b9a323377..f463dbbe1a2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
@@ -34,9 +34,9 @@ typedef struct NoiseSuppressionFixedC_ {
int16_t noiseEstCounter[SIMULT];
int16_t noiseEstQuantile[HALF_ANAL_BLOCKL];
- int anaLen;
- int anaLen2;
- int magnLen;
+ size_t anaLen;
+ size_t anaLen2;
+ size_t magnLen;
int aggrMode;
int stages;
int initFlag;
@@ -98,7 +98,7 @@ typedef struct NoiseSuppressionFixedC_ {
int qNoise;
int prevQNoise;
int prevQMagn;
- int blockLen10ms;
+ size_t blockLen10ms;
int16_t real[ANAL_BLOCKL_MAX];
int16_t imag[ANAL_BLOCKL_MAX];
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c
index 9c929d18656..14322d38cce 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c
@@ -33,7 +33,8 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
int32_t logLrtTimeAvgKsumFX;
int16_t indPriorFX16;
int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
- int i, normTmp, normTmp2, nShifts;
+ size_t i;
+ int normTmp, normTmp2, nShifts;
// compute feature based on average LR factor
// this is the average over all frequencies of the smooth log LRT
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c
index 0e4b28f421e..d99be8720b3 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c
@@ -32,7 +32,8 @@ void WebRtcNsx_SpeechNoiseProb(NoiseSuppressionFixedC* inst,
int32_t logLrtTimeAvgKsumFX;
int16_t indPriorFX16;
int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac;
- int i, normTmp, nShifts;
+ size_t i;
+ int normTmp, nShifts;
int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
int32_t const_max = 0x7fffffff;
@@ -331,7 +332,7 @@ void WebRtcNsx_AnalysisUpdate_mips(NoiseSuppressionFixedC* inst,
int16_t* out,
int16_t* new_speech) {
int iters, after;
- int anaLen = inst->anaLen;
+ int anaLen = (int)inst->anaLen;
int *window = (int*)inst->window;
int *anaBuf = (int*)inst->analysisBuffer;
int *outBuf = (int*)out;
@@ -504,7 +505,7 @@ void WebRtcNsx_AnalysisUpdate_mips(NoiseSuppressionFixedC* inst,
void WebRtcNsx_SynthesisUpdate_mips(NoiseSuppressionFixedC* inst,
int16_t* out_frame,
int16_t gain_factor) {
- int iters = inst->blockLen10ms >> 2;
+ int iters = (int)inst->blockLen10ms >> 2;
int after = inst->blockLen10ms & 3;
int r0, r1, r2, r3, r4, r5, r6, r7;
int16_t *window = (int16_t*)inst->window;
@@ -861,7 +862,7 @@ void WebRtcNsx_Denormalize_mips(NoiseSuppressionFixedC* inst,
int16_t* in,
int factor) {
int32_t r0, r1, r2, r3, t0;
- int len = inst->anaLen;
+ int len = (int)inst->anaLen;
int16_t *out = &inst->real[0];
int shift = factor - inst->normData;
@@ -951,7 +952,7 @@ void WebRtcNsx_NormalizeRealBuffer_mips(NoiseSuppressionFixedC* inst,
const int16_t* in,
int16_t* out) {
int32_t r0, r1, r2, r3, t0;
- int len = inst->anaLen;
+ int len = (int)inst->anaLen;
int shift = inst->normData;
__asm __volatile (
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_neon.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_neon.c
index 9675d11c176..65788ae2301 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_neon.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_neon.c
@@ -141,7 +141,7 @@ void WebRtcNsx_NoiseEstimationNeon(NoiseSuppressionFixedC* inst,
const int16_t log2_const = 22713;
const int16_t width_factor = 21845;
- int i, s, offset;
+ size_t i, s, offset;
tabind = inst->stages - inst->normData;
assert(tabind < 9);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc b/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc
index 14136bf3049..70c4422d34b 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc
@@ -28,14 +28,14 @@ void RMSLevel::Reset() {
sample_count_ = 0;
}
-void RMSLevel::Process(const int16_t* data, int length) {
- for (int i = 0; i < length; ++i) {
+void RMSLevel::Process(const int16_t* data, size_t length) {
+ for (size_t i = 0; i < length; ++i) {
sum_square_ += data[i] * data[i];
}
sample_count_ += length;
}
-void RMSLevel::ProcessMuted(int length) {
+void RMSLevel::ProcessMuted(size_t length) {
sample_count_ += length;
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/rms_level.h b/chromium/third_party/webrtc/modules/audio_processing/rms_level.h
index 055d271bb19..12fa2125f08 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/rms_level.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/rms_level.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+#include <cstddef>
+
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -35,11 +37,11 @@ class RMSLevel {
void Reset();
// Pass each chunk of audio to Process() to accumulate the level.
- void Process(const int16_t* data, int length);
+ void Process(const int16_t* data, size_t length);
// If all samples with the given |length| have a magnitude of zero, this is
// a shortcut to avoid some computation.
- void ProcessMuted(int length);
+ void ProcessMuted(size_t length);
// Computes the RMS level over all data passed to Process() since the last
// call to RMS(). The returned value is positive but should be interpreted as
@@ -48,7 +50,7 @@ class RMSLevel {
private:
float sum_square_;
- int sample_count_;
+ size_t sample_count_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc
index 00a1239b16f..60427e2db69 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc
@@ -17,10 +17,10 @@
namespace webrtc {
SplittingFilter::SplittingFilter(int num_channels,
- int num_bands,
- int num_frames)
+ size_t num_bands,
+ size_t num_frames)
: num_bands_(num_bands) {
- CHECK(num_bands_ == 2 || num_bands_ == 3);
+ RTC_CHECK(num_bands_ == 2 || num_bands_ == 3);
if (num_bands_ == 2) {
two_bands_states_.resize(num_channels);
} else if (num_bands_ == 3) {
@@ -32,10 +32,10 @@ SplittingFilter::SplittingFilter(int num_channels,
void SplittingFilter::Analysis(const IFChannelBuffer* data,
IFChannelBuffer* bands) {
- DCHECK_EQ(num_bands_, bands->num_bands());
- DCHECK_EQ(data->num_channels(), bands->num_channels());
- DCHECK_EQ(data->num_frames(),
- bands->num_frames_per_band() * bands->num_bands());
+ RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+ RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(),
+ bands->num_frames_per_band() * bands->num_bands());
if (bands->num_bands() == 2) {
TwoBandsAnalysis(data, bands);
} else if (bands->num_bands() == 3) {
@@ -45,10 +45,10 @@ void SplittingFilter::Analysis(const IFChannelBuffer* data,
void SplittingFilter::Synthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
- DCHECK_EQ(num_bands_, bands->num_bands());
- DCHECK_EQ(data->num_channels(), bands->num_channels());
- DCHECK_EQ(data->num_frames(),
- bands->num_frames_per_band() * bands->num_bands());
+ RTC_DCHECK_EQ(num_bands_, bands->num_bands());
+ RTC_DCHECK_EQ(data->num_channels(), bands->num_channels());
+ RTC_DCHECK_EQ(data->num_frames(),
+ bands->num_frames_per_band() * bands->num_bands());
if (bands->num_bands() == 2) {
TwoBandsSynthesis(bands, data);
} else if (bands->num_bands() == 3) {
@@ -58,7 +58,8 @@ void SplittingFilter::Synthesis(const IFChannelBuffer* bands,
void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
IFChannelBuffer* bands) {
- DCHECK_EQ(static_cast<int>(two_bands_states_.size()), data->num_channels());
+ RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
+ data->num_channels());
for (size_t i = 0; i < two_bands_states_.size(); ++i) {
WebRtcSpl_AnalysisQMF(data->ibuf_const()->channels()[i],
data->num_frames(),
@@ -71,7 +72,8 @@ void SplittingFilter::TwoBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
- DCHECK_EQ(static_cast<int>(two_bands_states_.size()), data->num_channels());
+ RTC_DCHECK_EQ(static_cast<int>(two_bands_states_.size()),
+ data->num_channels());
for (size_t i = 0; i < two_bands_states_.size(); ++i) {
WebRtcSpl_SynthesisQMF(bands->ibuf_const()->channels(0)[i],
bands->ibuf_const()->channels(1)[i],
@@ -84,8 +86,8 @@ void SplittingFilter::TwoBandsSynthesis(const IFChannelBuffer* bands,
void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
IFChannelBuffer* bands) {
- DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
+ data->num_channels());
for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
three_band_filter_banks_[i]->Analysis(data->fbuf_const()->channels()[i],
data->num_frames(),
@@ -95,8 +97,8 @@ void SplittingFilter::ThreeBandsAnalysis(const IFChannelBuffer* data,
void SplittingFilter::ThreeBandsSynthesis(const IFChannelBuffer* bands,
IFChannelBuffer* data) {
- DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
- data->num_channels());
+ RTC_DCHECK_EQ(static_cast<int>(three_band_filter_banks_.size()),
+ data->num_channels());
for (size_t i = 0; i < three_band_filter_banks_.size(); ++i) {
three_band_filter_banks_[i]->Synthesis(bands->fbuf_const()->bands(i),
bands->num_frames_per_band(),
diff --git a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h
index bc036c3c2a7..51088d5bdf3 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h
@@ -45,7 +45,7 @@ struct TwoBandsStates {
// used.
class SplittingFilter {
public:
- SplittingFilter(int num_channels, int num_bands, int num_frames);
+ SplittingFilter(int num_channels, size_t num_bands, size_t num_frames);
void Analysis(const IFChannelBuffer* data, IFChannelBuffer* bands);
void Synthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
@@ -58,7 +58,7 @@ class SplittingFilter {
void ThreeBandsSynthesis(const IFChannelBuffer* bands, IFChannelBuffer* data);
void InitBuffers();
- const int num_bands_;
+ const size_t num_bands_;
std::vector<TwoBandsStates> two_bands_states_;
ScopedVector<ThreeBandFilterBank> three_band_filter_banks_;
};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter_unittest.cc
index 0498cc688a5..e7af65115c6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter_unittest.cc
@@ -20,8 +20,8 @@
namespace webrtc {
namespace {
-const int kSamplesPer16kHzChannel = 160;
-const int kSamplesPer48kHzChannel = 480;
+const size_t kSamplesPer16kHzChannel = 160;
+const size_t kSamplesPer48kHzChannel = 480;
} // namespace
@@ -35,26 +35,26 @@ const int kSamplesPer48kHzChannel = 480;
TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
static const int kChannels = 1;
static const int kSampleRateHz = 48000;
- static const int kNumBands = 3;
+ static const size_t kNumBands = 3;
static const int kFrequenciesHz[kNumBands] = {1000, 12000, 18000};
static const float kAmplitude = 8192.f;
- static const int kChunks = 8;
+ static const size_t kChunks = 8;
SplittingFilter splitting_filter(kChannels,
kNumBands,
kSamplesPer48kHzChannel);
IFChannelBuffer in_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
IFChannelBuffer bands(kSamplesPer48kHzChannel, kChannels, kNumBands);
IFChannelBuffer out_data(kSamplesPer48kHzChannel, kChannels, kNumBands);
- for (int i = 0; i < kChunks; ++i) {
+ for (size_t i = 0; i < kChunks; ++i) {
// Input signal generation.
bool is_present[kNumBands];
memset(in_data.fbuf()->channels()[0],
0,
kSamplesPer48kHzChannel * sizeof(in_data.fbuf()->channels()[0][0]));
- for (int j = 0; j < kNumBands; ++j) {
- is_present[j] = i & (1 << j);
+ for (size_t j = 0; j < kNumBands; ++j) {
+ is_present[j] = i & (static_cast<size_t>(1) << j);
float amplitude = is_present[j] ? kAmplitude : 0.f;
- for (int k = 0; k < kSamplesPer48kHzChannel; ++k) {
+ for (size_t k = 0; k < kSamplesPer48kHzChannel; ++k) {
in_data.fbuf()->channels()[0][k] +=
amplitude * sin(2.f * M_PI * kFrequenciesHz[j] *
(i * kSamplesPer48kHzChannel + k) / kSampleRateHz);
@@ -64,9 +64,9 @@ TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
splitting_filter.Analysis(&in_data, &bands);
// Energy calculation.
float energy[kNumBands];
- for (int j = 0; j < kNumBands; ++j) {
+ for (size_t j = 0; j < kNumBands; ++j) {
energy[j] = 0.f;
- for (int k = 0; k < kSamplesPer16kHzChannel; ++k) {
+ for (size_t k = 0; k < kSamplesPer16kHzChannel; ++k) {
energy[j] += bands.fbuf_const()->channels(j)[0][k] *
bands.fbuf_const()->channels(j)[0][k];
}
@@ -81,9 +81,9 @@ TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
splitting_filter.Synthesis(&bands, &out_data);
// Delay and cross correlation estimation.
float xcorr = 0.f;
- for (int delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
+ for (size_t delay = 0; delay < kSamplesPer48kHzChannel; ++delay) {
float tmpcorr = 0.f;
- for (int j = delay; j < kSamplesPer48kHzChannel; ++j) {
+ for (size_t j = delay; j < kSamplesPer48kHzChannel; ++j) {
tmpcorr += in_data.fbuf_const()->channels()[0][j - delay] *
out_data.fbuf_const()->channels()[0][j];
}
@@ -94,7 +94,7 @@ TEST(SplittingFilterTest, SplitsIntoThreeBandsAndReconstructs) {
}
// High cross correlation check.
bool any_present = false;
- for (int j = 0; j < kNumBands; ++j) {
+ for (size_t j = 0; j < kNumBands; ++j) {
any_present |= is_present[j];
}
if (any_present) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index 291035a0120..3ebea13a455 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -40,6 +40,9 @@
namespace webrtc {
namespace {
+// TODO(ekmeyerson): Switch to using StreamConfig and ProcessingConfig where
+// applicable.
+
// TODO(bjornv): This is not feasible until the functionality has been
// re-implemented; see comment at the bottom of this file. For now, the user has
// to hard code the |write_ref_data| value.
@@ -62,6 +65,8 @@ const int kProcessSampleRates[] = {8000, 16000, 32000, 48000};
const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
sizeof(*kProcessSampleRates);
+enum StreamDirection { kForward = 0, kReverse };
+
void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
ChannelBuffer<int16_t> cb_int(cb->num_frames(),
cb->num_channels());
@@ -124,21 +129,23 @@ void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
}
void SetFrameTo(AudioFrame* frame, int16_t value) {
- for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
+ for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+ ++i) {
frame->data_[i] = value;
}
}
void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
ASSERT_EQ(2, frame->num_channels_);
- for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+ for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
frame->data_[i] = left;
frame->data_[i + 1] = right;
}
}
void ScaleFrame(AudioFrame* frame, float scale) {
- for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_; ++i) {
+ for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
+ ++i) {
frame->data_[i] = FloatS16ToS16(frame->data_[i] * scale);
}
}
@@ -252,13 +259,16 @@ std::map<std::string, std::string> temp_filenames;
std::string OutputFilePath(std::string name,
int input_rate,
int output_rate,
- int reverse_rate,
+ int reverse_input_rate,
+ int reverse_output_rate,
int num_input_channels,
int num_output_channels,
- int num_reverse_channels) {
+ int num_reverse_input_channels,
+ int num_reverse_output_channels,
+ StreamDirection file_direction) {
std::ostringstream ss;
- ss << name << "_i" << num_input_channels << "_" << input_rate / 1000
- << "_r" << num_reverse_channels << "_" << reverse_rate / 1000 << "_";
+ ss << name << "_i" << num_input_channels << "_" << input_rate / 1000 << "_ir"
+ << num_reverse_input_channels << "_" << reverse_input_rate / 1000 << "_";
if (num_output_channels == 1) {
ss << "mono";
} else if (num_output_channels == 2) {
@@ -266,7 +276,16 @@ std::string OutputFilePath(std::string name,
} else {
assert(false);
}
- ss << output_rate / 1000 << "_pcm";
+ ss << output_rate / 1000;
+ if (num_reverse_output_channels == 1) {
+ ss << "_rmono";
+ } else if (num_reverse_output_channels == 2) {
+ ss << "_rstereo";
+ } else {
+ assert(false);
+ }
+ ss << reverse_output_rate / 1000;
+ ss << "_d" << file_direction << "_pcm";
std::string filename = ss.str();
if (temp_filenames[filename].empty())
@@ -340,9 +359,9 @@ class ApmTest : public ::testing::Test {
void Init(int sample_rate_hz,
int output_sample_rate_hz,
int reverse_sample_rate_hz,
- int num_reverse_channels,
int num_input_channels,
int num_output_channels,
+ int num_reverse_channels,
bool open_output_file);
void Init(AudioProcessing* ap);
void EnableAllComponents();
@@ -354,8 +373,14 @@ class ApmTest : public ::testing::Test {
void ProcessWithDefaultStreamParameters(AudioFrame* frame);
void ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
int delay_min, int delay_max);
- void TestChangingChannels(int num_channels,
- AudioProcessing::Error expected_return);
+ void TestChangingChannelsInt16Interface(
+ int num_channels,
+ AudioProcessing::Error expected_return);
+ void TestChangingForwardChannels(int num_in_channels,
+ int num_out_channels,
+ AudioProcessing::Error expected_return);
+ void TestChangingReverseChannels(int num_rev_channels,
+ AudioProcessing::Error expected_return);
void RunQuantizedVolumeDoesNotGetStuckTest(int sample_rate);
void RunManualVolumeChangeIsPossibleTest(int sample_rate);
void StreamParametersTest(Format format);
@@ -449,12 +474,11 @@ void ApmTest::TearDown() {
void ApmTest::Init(AudioProcessing* ap) {
ASSERT_EQ(kNoErr,
- ap->Initialize(frame_->sample_rate_hz_,
- output_sample_rate_hz_,
- revframe_->sample_rate_hz_,
- LayoutFromChannels(frame_->num_channels_),
- LayoutFromChannels(num_output_channels_),
- LayoutFromChannels(revframe_->num_channels_)));
+ ap->Initialize(
+ {{{frame_->sample_rate_hz_, frame_->num_channels_},
+ {output_sample_rate_hz_, num_output_channels_},
+ {revframe_->sample_rate_hz_, revframe_->num_channels_},
+ {revframe_->sample_rate_hz_, revframe_->num_channels_}}}));
}
void ApmTest::Init(int sample_rate_hz,
@@ -492,13 +516,10 @@ void ApmTest::Init(int sample_rate_hz,
if (out_file_) {
ASSERT_EQ(0, fclose(out_file_));
}
- filename = OutputFilePath("out",
- sample_rate_hz,
- output_sample_rate_hz,
- reverse_sample_rate_hz,
- num_input_channels,
- num_output_channels,
- num_reverse_channels);
+ filename = OutputFilePath(
+ "out", sample_rate_hz, output_sample_rate_hz, reverse_sample_rate_hz,
+ reverse_sample_rate_hz, num_input_channels, num_output_channels,
+ num_reverse_channels, num_reverse_channels, kForward);
out_file_ = fopen(filename.c_str(), "wb");
ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
filename << "\n";
@@ -657,13 +678,18 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms, int system_delay_ms,
}
// Calculate expected delay estimate and acceptable regions. Further,
// limit them w.r.t. AEC delay estimation support.
- const int samples_per_ms = std::min(16, frame_->samples_per_channel_ / 10);
+ const size_t samples_per_ms =
+ std::min(static_cast<size_t>(16), frame_->samples_per_channel_ / 10);
int expected_median = std::min(std::max(delay_ms - system_delay_ms,
delay_min), delay_max);
- int expected_median_high = std::min(std::max(
- expected_median + 96 / samples_per_ms, delay_min), delay_max);
- int expected_median_low = std::min(std::max(
- expected_median - 96 / samples_per_ms, delay_min), delay_max);
+ int expected_median_high = std::min(
+ std::max(expected_median + static_cast<int>(96 / samples_per_ms),
+ delay_min),
+ delay_max);
+ int expected_median_low = std::min(
+ std::max(expected_median - static_cast<int>(96 / samples_per_ms),
+ delay_min),
+ delay_max);
// Verify delay metrics.
int median;
int std;
@@ -791,26 +817,82 @@ TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
EXPECT_EQ(50, apm_->stream_delay_ms());
}
-void ApmTest::TestChangingChannels(int num_channels,
- AudioProcessing::Error expected_return) {
+void ApmTest::TestChangingChannelsInt16Interface(
+ int num_channels,
+ AudioProcessing::Error expected_return) {
frame_->num_channels_ = num_channels;
EXPECT_EQ(expected_return, apm_->ProcessStream(frame_));
EXPECT_EQ(expected_return, apm_->AnalyzeReverseStream(frame_));
}
-TEST_F(ApmTest, Channels) {
- // Testing number of invalid channels.
- TestChangingChannels(0, apm_->kBadNumberChannelsError);
- TestChangingChannels(3, apm_->kBadNumberChannelsError);
- // Testing number of valid channels.
- for (int i = 1; i < 3; i++) {
- TestChangingChannels(i, kNoErr);
+void ApmTest::TestChangingForwardChannels(
+ int num_in_channels,
+ int num_out_channels,
+ AudioProcessing::Error expected_return) {
+ const StreamConfig input_stream = {frame_->sample_rate_hz_, num_in_channels};
+ const StreamConfig output_stream = {output_sample_rate_hz_, num_out_channels};
+
+ EXPECT_EQ(expected_return,
+ apm_->ProcessStream(float_cb_->channels(), input_stream,
+ output_stream, float_cb_->channels()));
+}
+
+void ApmTest::TestChangingReverseChannels(
+ int num_rev_channels,
+ AudioProcessing::Error expected_return) {
+ const ProcessingConfig processing_config = {
+ {{frame_->sample_rate_hz_, apm_->num_input_channels()},
+ {output_sample_rate_hz_, apm_->num_output_channels()},
+ {frame_->sample_rate_hz_, num_rev_channels},
+ {frame_->sample_rate_hz_, num_rev_channels}}};
+
+ EXPECT_EQ(
+ expected_return,
+ apm_->ProcessReverseStream(
+ float_cb_->channels(), processing_config.reverse_input_stream(),
+ processing_config.reverse_output_stream(), float_cb_->channels()));
+}
+
+TEST_F(ApmTest, ChannelsInt16Interface) {
+ // Testing number of invalid and valid channels.
+ Init(16000, 16000, 16000, 4, 4, 4, false);
+
+ TestChangingChannelsInt16Interface(0, apm_->kBadNumberChannelsError);
+
+ for (int i = 1; i < 4; i++) {
+ TestChangingChannelsInt16Interface(i, kNoErr);
EXPECT_EQ(i, apm_->num_input_channels());
// We always force the number of reverse channels used for processing to 1.
EXPECT_EQ(1, apm_->num_reverse_channels());
}
}
+TEST_F(ApmTest, Channels) {
+ // Testing number of invalid and valid channels.
+ Init(16000, 16000, 16000, 4, 4, 4, false);
+
+ TestChangingForwardChannels(0, 1, apm_->kBadNumberChannelsError);
+ TestChangingReverseChannels(0, apm_->kBadNumberChannelsError);
+
+ for (int i = 1; i < 4; ++i) {
+ for (int j = 0; j < 1; ++j) {
+ // Output channels much be one or match input channels.
+ if (j == 1 || i == j) {
+ TestChangingForwardChannels(i, j, kNoErr);
+ TestChangingReverseChannels(i, kNoErr);
+
+ EXPECT_EQ(i, apm_->num_input_channels());
+ EXPECT_EQ(j, apm_->num_output_channels());
+ // The number of reverse channels used for processing to is always 1.
+ EXPECT_EQ(1, apm_->num_reverse_channels());
+ } else {
+ TestChangingForwardChannels(i, j,
+ AudioProcessing::kBadNumberChannelsError);
+ }
+ }
+ }
+}
+
TEST_F(ApmTest, SampleRatesInt) {
// Testing invalid sample rates
SetContainerFormat(10000, 2, frame_, &float_cb_);
@@ -820,7 +902,6 @@ TEST_F(ApmTest, SampleRatesInt) {
for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
SetContainerFormat(fs[i], 2, frame_, &float_cb_);
EXPECT_NOERR(ProcessStreamChooser(kIntFormat));
- EXPECT_EQ(fs[i], apm_->input_sample_rate_hz());
}
}
@@ -923,8 +1004,8 @@ TEST_F(ApmTest, DISABLED_EchoCancellationReportsCorrectDelays) {
2,
false);
// Sampling frequency dependent variables.
- const int num_ms_per_block = std::max(4,
- 640 / frame_->samples_per_channel_);
+ const int num_ms_per_block =
+ std::max(4, static_cast<int>(640 / frame_->samples_per_channel_));
const int delay_min_ms = -kLookaheadBlocks * num_ms_per_block;
const int delay_max_ms = (kMaxDelayBlocks - 1) * num_ms_per_block;
@@ -1474,6 +1555,8 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
for (int j = 0; j < 1000; j++) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
+ EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(frame_));
+ EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
}
}
}
@@ -1498,6 +1581,19 @@ TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabledFloat) {
for (size_t i = 0; i < kSamples; ++i) {
EXPECT_EQ(src[i], dest[i]);
}
+
+ // Same for ProcessReverseStream.
+ float rev_dest[kSamples] = {};
+ auto rev_dest_channels = &rev_dest[0];
+
+ StreamConfig input_stream = {sample_rate, 1};
+ StreamConfig output_stream = {sample_rate, 1};
+ EXPECT_NOERR(apm_->ProcessReverseStream(&src_channels, input_stream,
+ output_stream, &rev_dest_channels));
+
+ for (size_t i = 0; i < kSamples; ++i) {
+ EXPECT_EQ(src[i], rev_dest[i]);
+ }
}
TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
@@ -2242,15 +2338,18 @@ void UpdateBestSNR(const float* ref,
// Due to the resampling distortion, we don't expect identical results, but
// enforce SNR thresholds which vary depending on the format. 0 is a special
// case SNR which corresponds to inf, or zero error.
-typedef std::tr1::tuple<int, int, int, double> AudioProcessingTestData;
+typedef std::tr1::tuple<int, int, int, int, double, double>
+ AudioProcessingTestData;
class AudioProcessingTest
: public testing::TestWithParam<AudioProcessingTestData> {
public:
AudioProcessingTest()
: input_rate_(std::tr1::get<0>(GetParam())),
output_rate_(std::tr1::get<1>(GetParam())),
- reverse_rate_(std::tr1::get<2>(GetParam())),
- expected_snr_(std::tr1::get<3>(GetParam())) {}
+ reverse_input_rate_(std::tr1::get<2>(GetParam())),
+ reverse_output_rate_(std::tr1::get<3>(GetParam())),
+ expected_snr_(std::tr1::get<4>(GetParam())),
+ expected_reverse_snr_(std::tr1::get<5>(GetParam())) {}
virtual ~AudioProcessingTest() {}
@@ -2266,13 +2365,9 @@ class AudioProcessingTest
for (size_t j = 0; j < kNumChannelsSize; ++j) {
for (size_t k = 0; k < kNumChannelsSize; ++k) {
// The reference files always have matching input and output channels.
- ProcessFormat(kNativeRates[i],
- kNativeRates[i],
- kNativeRates[i],
- kNumChannels[j],
- kNumChannels[j],
- kNumChannels[k],
- "ref");
+ ProcessFormat(kNativeRates[i], kNativeRates[i], kNativeRates[i],
+ kNativeRates[i], kNumChannels[j], kNumChannels[j],
+ kNumChannels[k], kNumChannels[k], "ref");
}
}
}
@@ -2281,62 +2376,75 @@ class AudioProcessingTest
static void TearDownTestCase() {
ClearTempFiles();
}
+
// Runs a process pass on files with the given parameters and dumps the output
- // to a file specified with |output_file_prefix|.
+ // to a file specified with |output_file_prefix|. Both forward and reverse
+ // output streams are dumped.
static void ProcessFormat(int input_rate,
int output_rate,
- int reverse_rate,
+ int reverse_input_rate,
+ int reverse_output_rate,
int num_input_channels,
int num_output_channels,
- int num_reverse_channels,
+ int num_reverse_input_channels,
+ int num_reverse_output_channels,
std::string output_file_prefix) {
Config config;
config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
EnableAllAPComponents(ap.get());
- ap->Initialize(input_rate,
- output_rate,
- reverse_rate,
- LayoutFromChannels(num_input_channels),
- LayoutFromChannels(num_output_channels),
- LayoutFromChannels(num_reverse_channels));
-
- FILE* far_file = fopen(ResourceFilePath("far", reverse_rate).c_str(), "rb");
+
+ ProcessingConfig processing_config = {
+ {{input_rate, num_input_channels},
+ {output_rate, num_output_channels},
+ {reverse_input_rate, num_reverse_input_channels},
+ {reverse_output_rate, num_reverse_output_channels}}};
+ ap->Initialize(processing_config);
+
+ FILE* far_file =
+ fopen(ResourceFilePath("far", reverse_input_rate).c_str(), "rb");
FILE* near_file = fopen(ResourceFilePath("near", input_rate).c_str(), "rb");
- FILE* out_file = fopen(OutputFilePath(output_file_prefix,
- input_rate,
- output_rate,
- reverse_rate,
- num_input_channels,
- num_output_channels,
- num_reverse_channels).c_str(), "wb");
+ FILE* out_file =
+ fopen(OutputFilePath(output_file_prefix, input_rate, output_rate,
+ reverse_input_rate, reverse_output_rate,
+ num_input_channels, num_output_channels,
+ num_reverse_input_channels,
+ num_reverse_output_channels, kForward).c_str(),
+ "wb");
+ FILE* rev_out_file =
+ fopen(OutputFilePath(output_file_prefix, input_rate, output_rate,
+ reverse_input_rate, reverse_output_rate,
+ num_input_channels, num_output_channels,
+ num_reverse_input_channels,
+ num_reverse_output_channels, kReverse).c_str(),
+ "wb");
ASSERT_TRUE(far_file != NULL);
ASSERT_TRUE(near_file != NULL);
ASSERT_TRUE(out_file != NULL);
+ ASSERT_TRUE(rev_out_file != NULL);
ChannelBuffer<float> fwd_cb(SamplesFromRate(input_rate),
num_input_channels);
- ChannelBuffer<float> rev_cb(SamplesFromRate(reverse_rate),
- num_reverse_channels);
+ ChannelBuffer<float> rev_cb(SamplesFromRate(reverse_input_rate),
+ num_reverse_input_channels);
ChannelBuffer<float> out_cb(SamplesFromRate(output_rate),
num_output_channels);
+ ChannelBuffer<float> rev_out_cb(SamplesFromRate(reverse_output_rate),
+ num_reverse_output_channels);
// Temporary buffers.
const int max_length =
- 2 * std::max(out_cb.num_frames(),
- std::max(fwd_cb.num_frames(),
- rev_cb.num_frames()));
+ 2 * std::max(std::max(out_cb.num_frames(), rev_out_cb.num_frames()),
+ std::max(fwd_cb.num_frames(), rev_cb.num_frames()));
rtc::scoped_ptr<float[]> float_data(new float[max_length]);
rtc::scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
int analog_level = 127;
while (ReadChunk(far_file, int_data.get(), float_data.get(), &rev_cb) &&
ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
- EXPECT_NOERR(ap->AnalyzeReverseStream(
- rev_cb.channels(),
- rev_cb.num_frames(),
- reverse_rate,
- LayoutFromChannels(num_reverse_channels)));
+ EXPECT_NOERR(ap->ProcessReverseStream(
+ rev_cb.channels(), processing_config.reverse_input_stream(),
+ processing_config.reverse_output_stream(), rev_out_cb.channels()));
EXPECT_NOERR(ap->set_stream_delay_ms(0));
ap->echo_cancellation()->set_stream_drift_samples(0);
@@ -2351,274 +2459,293 @@ class AudioProcessingTest
LayoutFromChannels(num_output_channels),
out_cb.channels()));
- Interleave(out_cb.channels(),
- out_cb.num_frames(),
- out_cb.num_channels(),
+ // Dump forward output to file.
+ Interleave(out_cb.channels(), out_cb.num_frames(), out_cb.num_channels(),
float_data.get());
- // Dump output to file.
int out_length = out_cb.num_channels() * out_cb.num_frames();
+
ASSERT_EQ(static_cast<size_t>(out_length),
fwrite(float_data.get(), sizeof(float_data[0]),
out_length, out_file));
+ // Dump reverse output to file.
+ Interleave(rev_out_cb.channels(), rev_out_cb.num_frames(),
+ rev_out_cb.num_channels(), float_data.get());
+ int rev_out_length = rev_out_cb.num_channels() * rev_out_cb.num_frames();
+
+ ASSERT_EQ(static_cast<size_t>(rev_out_length),
+ fwrite(float_data.get(), sizeof(float_data[0]), rev_out_length,
+ rev_out_file));
+
analog_level = ap->gain_control()->stream_analog_level();
}
fclose(far_file);
fclose(near_file);
fclose(out_file);
+ fclose(rev_out_file);
}
protected:
int input_rate_;
int output_rate_;
- int reverse_rate_;
+ int reverse_input_rate_;
+ int reverse_output_rate_;
double expected_snr_;
+ double expected_reverse_snr_;
};
TEST_P(AudioProcessingTest, Formats) {
struct ChannelFormat {
int num_input;
int num_output;
- int num_reverse;
+ int num_reverse_input;
+ int num_reverse_output;
};
ChannelFormat cf[] = {
- {1, 1, 1},
- {1, 1, 2},
- {2, 1, 1},
- {2, 1, 2},
- {2, 2, 1},
- {2, 2, 2},
+ {1, 1, 1, 1},
+ {1, 1, 2, 1},
+ {2, 1, 1, 1},
+ {2, 1, 2, 1},
+ {2, 2, 1, 1},
+ {2, 2, 2, 2},
};
size_t channel_format_size = sizeof(cf) / sizeof(*cf);
for (size_t i = 0; i < channel_format_size; ++i) {
- ProcessFormat(input_rate_,
- output_rate_,
- reverse_rate_,
- cf[i].num_input,
- cf[i].num_output,
- cf[i].num_reverse,
- "out");
- int min_ref_rate = std::min(input_rate_, output_rate_);
- int ref_rate;
-
- if (min_ref_rate > 32000) {
- ref_rate = 48000;
- } else if (min_ref_rate > 16000) {
- ref_rate = 32000;
- } else if (min_ref_rate > 8000) {
- ref_rate = 16000;
- } else {
- ref_rate = 8000;
- }
+ ProcessFormat(input_rate_, output_rate_, reverse_input_rate_,
+ reverse_output_rate_, cf[i].num_input, cf[i].num_output,
+ cf[i].num_reverse_input, cf[i].num_reverse_output, "out");
+
+ // Verify output for both directions.
+ std::vector<StreamDirection> stream_directions;
+ stream_directions.push_back(kForward);
+ stream_directions.push_back(kReverse);
+ for (StreamDirection file_direction : stream_directions) {
+ const int in_rate = file_direction ? reverse_input_rate_ : input_rate_;
+ const int out_rate = file_direction ? reverse_output_rate_ : output_rate_;
+ const int out_num =
+ file_direction ? cf[i].num_reverse_output : cf[i].num_output;
+ const double expected_snr =
+ file_direction ? expected_reverse_snr_ : expected_snr_;
+
+ const int min_ref_rate = std::min(in_rate, out_rate);
+ int ref_rate;
+
+ if (min_ref_rate > 32000) {
+ ref_rate = 48000;
+ } else if (min_ref_rate > 16000) {
+ ref_rate = 32000;
+ } else if (min_ref_rate > 8000) {
+ ref_rate = 16000;
+ } else {
+ ref_rate = 8000;
+ }
#ifdef WEBRTC_AUDIOPROC_FIXED_PROFILE
- ref_rate = std::min(ref_rate, 16000);
+ if (file_direction == kForward) {
+ ref_rate = std::min(ref_rate, 16000);
+ }
#endif
+ FILE* out_file = fopen(
+ OutputFilePath("out", input_rate_, output_rate_, reverse_input_rate_,
+ reverse_output_rate_, cf[i].num_input,
+ cf[i].num_output, cf[i].num_reverse_input,
+ cf[i].num_reverse_output, file_direction).c_str(),
+ "rb");
+ // The reference files always have matching input and output channels.
+ FILE* ref_file = fopen(
+ OutputFilePath("ref", ref_rate, ref_rate, ref_rate, ref_rate,
+ cf[i].num_output, cf[i].num_output,
+ cf[i].num_reverse_output, cf[i].num_reverse_output,
+ file_direction).c_str(),
+ "rb");
+ ASSERT_TRUE(out_file != NULL);
+ ASSERT_TRUE(ref_file != NULL);
+
+ const int ref_length = SamplesFromRate(ref_rate) * out_num;
+ const int out_length = SamplesFromRate(out_rate) * out_num;
+ // Data from the reference file.
+ rtc::scoped_ptr<float[]> ref_data(new float[ref_length]);
+ // Data from the output file.
+ rtc::scoped_ptr<float[]> out_data(new float[out_length]);
+ // Data from the resampled output, in case the reference and output rates
+ // don't match.
+ rtc::scoped_ptr<float[]> cmp_data(new float[ref_length]);
+
+ PushResampler<float> resampler;
+ resampler.InitializeIfNeeded(out_rate, ref_rate, out_num);
+
+ // Compute the resampling delay of the output relative to the reference,
+ // to find the region over which we should search for the best SNR.
+ float expected_delay_sec = 0;
+ if (in_rate != ref_rate) {
+ // Input resampling delay.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(in_rate);
+ }
+ if (out_rate != ref_rate) {
+ // Output resampling delay.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
+ // Delay of converting the output back to its processing rate for
+ // testing.
+ expected_delay_sec +=
+ PushSincResampler::AlgorithmicDelaySeconds(out_rate);
+ }
+ int expected_delay =
+ floor(expected_delay_sec * ref_rate + 0.5f) * out_num;
+
+ double variance = 0;
+ double sq_error = 0;
+ while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
+ fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
+ float* out_ptr = out_data.get();
+ if (out_rate != ref_rate) {
+ // Resample the output back to its internal processing rate if
+ // necssary.
+ ASSERT_EQ(ref_length, resampler.Resample(out_ptr, out_length,
+ cmp_data.get(), ref_length));
+ out_ptr = cmp_data.get();
+ }
- FILE* out_file = fopen(OutputFilePath("out",
- input_rate_,
- output_rate_,
- reverse_rate_,
- cf[i].num_input,
- cf[i].num_output,
- cf[i].num_reverse).c_str(), "rb");
- // The reference files always have matching input and output channels.
- FILE* ref_file = fopen(OutputFilePath("ref",
- ref_rate,
- ref_rate,
- ref_rate,
- cf[i].num_output,
- cf[i].num_output,
- cf[i].num_reverse).c_str(), "rb");
- ASSERT_TRUE(out_file != NULL);
- ASSERT_TRUE(ref_file != NULL);
-
- const int ref_length = SamplesFromRate(ref_rate) * cf[i].num_output;
- const int out_length = SamplesFromRate(output_rate_) * cf[i].num_output;
- // Data from the reference file.
- rtc::scoped_ptr<float[]> ref_data(new float[ref_length]);
- // Data from the output file.
- rtc::scoped_ptr<float[]> out_data(new float[out_length]);
- // Data from the resampled output, in case the reference and output rates
- // don't match.
- rtc::scoped_ptr<float[]> cmp_data(new float[ref_length]);
-
- PushResampler<float> resampler;
- resampler.InitializeIfNeeded(output_rate_, ref_rate, cf[i].num_output);
-
- // Compute the resampling delay of the output relative to the reference,
- // to find the region over which we should search for the best SNR.
- float expected_delay_sec = 0;
- if (input_rate_ != ref_rate) {
- // Input resampling delay.
- expected_delay_sec +=
- PushSincResampler::AlgorithmicDelaySeconds(input_rate_);
- }
- if (output_rate_ != ref_rate) {
- // Output resampling delay.
- expected_delay_sec +=
- PushSincResampler::AlgorithmicDelaySeconds(ref_rate);
- // Delay of converting the output back to its processing rate for testing.
- expected_delay_sec +=
- PushSincResampler::AlgorithmicDelaySeconds(output_rate_);
- }
- int expected_delay = floor(expected_delay_sec * ref_rate + 0.5f) *
- cf[i].num_output;
-
- double variance = 0;
- double sq_error = 0;
- while (fread(out_data.get(), sizeof(out_data[0]), out_length, out_file) &&
- fread(ref_data.get(), sizeof(ref_data[0]), ref_length, ref_file)) {
- float* out_ptr = out_data.get();
- if (output_rate_ != ref_rate) {
- // Resample the output back to its internal processing rate if necssary.
- ASSERT_EQ(ref_length, resampler.Resample(out_ptr,
- out_length,
- cmp_data.get(),
- ref_length));
- out_ptr = cmp_data.get();
+ // Update the |sq_error| and |variance| accumulators with the highest
+ // SNR of reference vs output.
+ UpdateBestSNR(ref_data.get(), out_ptr, ref_length, expected_delay,
+ &variance, &sq_error);
}
- // Update the |sq_error| and |variance| accumulators with the highest SNR
- // of reference vs output.
- UpdateBestSNR(ref_data.get(),
- out_ptr,
- ref_length,
- expected_delay,
- &variance,
- &sq_error);
- }
+ std::cout << "(" << input_rate_ << ", " << output_rate_ << ", "
+ << reverse_input_rate_ << ", " << reverse_output_rate_ << ", "
+ << cf[i].num_input << ", " << cf[i].num_output << ", "
+ << cf[i].num_reverse_input << ", " << cf[i].num_reverse_output
+ << ", " << file_direction << "): ";
+ if (sq_error > 0) {
+ double snr = 10 * log10(variance / sq_error);
+ EXPECT_GE(snr, expected_snr);
+ EXPECT_NE(0, expected_snr);
+ std::cout << "SNR=" << snr << " dB" << std::endl;
+ } else {
+ EXPECT_EQ(expected_snr, 0);
+ std::cout << "SNR="
+ << "inf dB" << std::endl;
+ }
- std::cout << "(" << input_rate_ << ", "
- << output_rate_ << ", "
- << reverse_rate_ << ", "
- << cf[i].num_input << ", "
- << cf[i].num_output << ", "
- << cf[i].num_reverse << "): ";
- if (sq_error > 0) {
- double snr = 10 * log10(variance / sq_error);
- EXPECT_GE(snr, expected_snr_);
- EXPECT_NE(0, expected_snr_);
- std::cout << "SNR=" << snr << " dB" << std::endl;
- } else {
- EXPECT_EQ(expected_snr_, 0);
- std::cout << "SNR=" << "inf dB" << std::endl;
+ fclose(out_file);
+ fclose(ref_file);
}
-
- fclose(out_file);
- fclose(ref_file);
}
}
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
INSTANTIATE_TEST_CASE_P(
- CommonFormats, AudioProcessingTest, testing::Values(
- std::tr1::make_tuple(48000, 48000, 48000, 0),
- std::tr1::make_tuple(48000, 48000, 32000, 40),
- std::tr1::make_tuple(48000, 48000, 16000, 40),
- std::tr1::make_tuple(48000, 44100, 48000, 20),
- std::tr1::make_tuple(48000, 44100, 32000, 20),
- std::tr1::make_tuple(48000, 44100, 16000, 20),
- std::tr1::make_tuple(48000, 32000, 48000, 30),
- std::tr1::make_tuple(48000, 32000, 32000, 30),
- std::tr1::make_tuple(48000, 32000, 16000, 30),
- std::tr1::make_tuple(48000, 16000, 48000, 25),
- std::tr1::make_tuple(48000, 16000, 32000, 25),
- std::tr1::make_tuple(48000, 16000, 16000, 25),
-
- std::tr1::make_tuple(44100, 48000, 48000, 30),
- std::tr1::make_tuple(44100, 48000, 32000, 30),
- std::tr1::make_tuple(44100, 48000, 16000, 30),
- std::tr1::make_tuple(44100, 44100, 48000, 20),
- std::tr1::make_tuple(44100, 44100, 32000, 20),
- std::tr1::make_tuple(44100, 44100, 16000, 20),
- std::tr1::make_tuple(44100, 32000, 48000, 30),
- std::tr1::make_tuple(44100, 32000, 32000, 30),
- std::tr1::make_tuple(44100, 32000, 16000, 30),
- std::tr1::make_tuple(44100, 16000, 48000, 25),
- std::tr1::make_tuple(44100, 16000, 32000, 25),
- std::tr1::make_tuple(44100, 16000, 16000, 25),
-
- std::tr1::make_tuple(32000, 48000, 48000, 30),
- std::tr1::make_tuple(32000, 48000, 32000, 35),
- std::tr1::make_tuple(32000, 48000, 16000, 30),
- std::tr1::make_tuple(32000, 44100, 48000, 20),
- std::tr1::make_tuple(32000, 44100, 32000, 20),
- std::tr1::make_tuple(32000, 44100, 16000, 20),
- std::tr1::make_tuple(32000, 32000, 48000, 40),
- std::tr1::make_tuple(32000, 32000, 32000, 0),
- std::tr1::make_tuple(32000, 32000, 16000, 40),
- std::tr1::make_tuple(32000, 16000, 48000, 25),
- std::tr1::make_tuple(32000, 16000, 32000, 25),
- std::tr1::make_tuple(32000, 16000, 16000, 25),
-
- std::tr1::make_tuple(16000, 48000, 48000, 25),
- std::tr1::make_tuple(16000, 48000, 32000, 25),
- std::tr1::make_tuple(16000, 48000, 16000, 25),
- std::tr1::make_tuple(16000, 44100, 48000, 15),
- std::tr1::make_tuple(16000, 44100, 32000, 15),
- std::tr1::make_tuple(16000, 44100, 16000, 15),
- std::tr1::make_tuple(16000, 32000, 48000, 25),
- std::tr1::make_tuple(16000, 32000, 32000, 25),
- std::tr1::make_tuple(16000, 32000, 16000, 25),
- std::tr1::make_tuple(16000, 16000, 48000, 40),
- std::tr1::make_tuple(16000, 16000, 32000, 50),
- std::tr1::make_tuple(16000, 16000, 16000, 0)));
+ CommonFormats,
+ AudioProcessingTest,
+ testing::Values(std::tr1::make_tuple(48000, 48000, 48000, 48000, 0, 0),
+ std::tr1::make_tuple(48000, 48000, 32000, 48000, 40, 30),
+ std::tr1::make_tuple(48000, 48000, 16000, 48000, 40, 20),
+ std::tr1::make_tuple(48000, 44100, 48000, 44100, 20, 20),
+ std::tr1::make_tuple(48000, 44100, 32000, 44100, 20, 15),
+ std::tr1::make_tuple(48000, 44100, 16000, 44100, 20, 15),
+ std::tr1::make_tuple(48000, 32000, 48000, 32000, 30, 35),
+ std::tr1::make_tuple(48000, 32000, 32000, 32000, 30, 0),
+ std::tr1::make_tuple(48000, 32000, 16000, 32000, 30, 20),
+ std::tr1::make_tuple(48000, 16000, 48000, 16000, 25, 20),
+ std::tr1::make_tuple(48000, 16000, 32000, 16000, 25, 20),
+ std::tr1::make_tuple(48000, 16000, 16000, 16000, 25, 0),
+
+ std::tr1::make_tuple(44100, 48000, 48000, 48000, 30, 0),
+ std::tr1::make_tuple(44100, 48000, 32000, 48000, 30, 30),
+ std::tr1::make_tuple(44100, 48000, 16000, 48000, 30, 20),
+ std::tr1::make_tuple(44100, 44100, 48000, 44100, 20, 20),
+ std::tr1::make_tuple(44100, 44100, 32000, 44100, 20, 15),
+ std::tr1::make_tuple(44100, 44100, 16000, 44100, 20, 15),
+ std::tr1::make_tuple(44100, 32000, 48000, 32000, 30, 35),
+ std::tr1::make_tuple(44100, 32000, 32000, 32000, 30, 0),
+ std::tr1::make_tuple(44100, 32000, 16000, 32000, 30, 20),
+ std::tr1::make_tuple(44100, 16000, 48000, 16000, 25, 20),
+ std::tr1::make_tuple(44100, 16000, 32000, 16000, 25, 20),
+ std::tr1::make_tuple(44100, 16000, 16000, 16000, 25, 0),
+
+ std::tr1::make_tuple(32000, 48000, 48000, 48000, 30, 0),
+ std::tr1::make_tuple(32000, 48000, 32000, 48000, 35, 30),
+ std::tr1::make_tuple(32000, 48000, 16000, 48000, 30, 20),
+ std::tr1::make_tuple(32000, 44100, 48000, 44100, 20, 20),
+ std::tr1::make_tuple(32000, 44100, 32000, 44100, 20, 15),
+ std::tr1::make_tuple(32000, 44100, 16000, 44100, 20, 15),
+ std::tr1::make_tuple(32000, 32000, 48000, 32000, 40, 35),
+ std::tr1::make_tuple(32000, 32000, 32000, 32000, 0, 0),
+ std::tr1::make_tuple(32000, 32000, 16000, 32000, 40, 20),
+ std::tr1::make_tuple(32000, 16000, 48000, 16000, 25, 20),
+ std::tr1::make_tuple(32000, 16000, 32000, 16000, 25, 20),
+ std::tr1::make_tuple(32000, 16000, 16000, 16000, 25, 0),
+
+ std::tr1::make_tuple(16000, 48000, 48000, 48000, 25, 0),
+ std::tr1::make_tuple(16000, 48000, 32000, 48000, 25, 30),
+ std::tr1::make_tuple(16000, 48000, 16000, 48000, 25, 20),
+ std::tr1::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+ std::tr1::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+ std::tr1::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+ std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35),
+ std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0),
+ std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+ std::tr1::make_tuple(16000, 16000, 48000, 16000, 40, 20),
+ std::tr1::make_tuple(16000, 16000, 32000, 16000, 50, 20),
+ std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
#elif defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
INSTANTIATE_TEST_CASE_P(
- CommonFormats, AudioProcessingTest, testing::Values(
- std::tr1::make_tuple(48000, 48000, 48000, 20),
- std::tr1::make_tuple(48000, 48000, 32000, 20),
- std::tr1::make_tuple(48000, 48000, 16000, 20),
- std::tr1::make_tuple(48000, 44100, 48000, 15),
- std::tr1::make_tuple(48000, 44100, 32000, 15),
- std::tr1::make_tuple(48000, 44100, 16000, 15),
- std::tr1::make_tuple(48000, 32000, 48000, 20),
- std::tr1::make_tuple(48000, 32000, 32000, 20),
- std::tr1::make_tuple(48000, 32000, 16000, 20),
- std::tr1::make_tuple(48000, 16000, 48000, 20),
- std::tr1::make_tuple(48000, 16000, 32000, 20),
- std::tr1::make_tuple(48000, 16000, 16000, 20),
-
- std::tr1::make_tuple(44100, 48000, 48000, 20),
- std::tr1::make_tuple(44100, 48000, 32000, 20),
- std::tr1::make_tuple(44100, 48000, 16000, 20),
- std::tr1::make_tuple(44100, 44100, 48000, 15),
- std::tr1::make_tuple(44100, 44100, 32000, 15),
- std::tr1::make_tuple(44100, 44100, 16000, 15),
- std::tr1::make_tuple(44100, 32000, 48000, 20),
- std::tr1::make_tuple(44100, 32000, 32000, 20),
- std::tr1::make_tuple(44100, 32000, 16000, 20),
- std::tr1::make_tuple(44100, 16000, 48000, 20),
- std::tr1::make_tuple(44100, 16000, 32000, 20),
- std::tr1::make_tuple(44100, 16000, 16000, 20),
-
- std::tr1::make_tuple(32000, 48000, 48000, 20),
- std::tr1::make_tuple(32000, 48000, 32000, 20),
- std::tr1::make_tuple(32000, 48000, 16000, 20),
- std::tr1::make_tuple(32000, 44100, 48000, 15),
- std::tr1::make_tuple(32000, 44100, 32000, 15),
- std::tr1::make_tuple(32000, 44100, 16000, 15),
- std::tr1::make_tuple(32000, 32000, 48000, 20),
- std::tr1::make_tuple(32000, 32000, 32000, 20),
- std::tr1::make_tuple(32000, 32000, 16000, 20),
- std::tr1::make_tuple(32000, 16000, 48000, 20),
- std::tr1::make_tuple(32000, 16000, 32000, 20),
- std::tr1::make_tuple(32000, 16000, 16000, 20),
-
- std::tr1::make_tuple(16000, 48000, 48000, 25),
- std::tr1::make_tuple(16000, 48000, 32000, 25),
- std::tr1::make_tuple(16000, 48000, 16000, 25),
- std::tr1::make_tuple(16000, 44100, 48000, 15),
- std::tr1::make_tuple(16000, 44100, 32000, 15),
- std::tr1::make_tuple(16000, 44100, 16000, 15),
- std::tr1::make_tuple(16000, 32000, 48000, 25),
- std::tr1::make_tuple(16000, 32000, 32000, 25),
- std::tr1::make_tuple(16000, 32000, 16000, 25),
- std::tr1::make_tuple(16000, 16000, 48000, 35),
- std::tr1::make_tuple(16000, 16000, 32000, 40),
- std::tr1::make_tuple(16000, 16000, 16000, 0)));
+ CommonFormats,
+ AudioProcessingTest,
+ testing::Values(std::tr1::make_tuple(48000, 48000, 48000, 48000, 20, 0),
+ std::tr1::make_tuple(48000, 48000, 32000, 48000, 20, 30),
+ std::tr1::make_tuple(48000, 48000, 16000, 48000, 20, 20),
+ std::tr1::make_tuple(48000, 44100, 48000, 44100, 15, 20),
+ std::tr1::make_tuple(48000, 44100, 32000, 44100, 15, 15),
+ std::tr1::make_tuple(48000, 44100, 16000, 44100, 15, 15),
+ std::tr1::make_tuple(48000, 32000, 48000, 32000, 20, 35),
+ std::tr1::make_tuple(48000, 32000, 32000, 32000, 20, 0),
+ std::tr1::make_tuple(48000, 32000, 16000, 32000, 20, 20),
+ std::tr1::make_tuple(48000, 16000, 48000, 16000, 20, 20),
+ std::tr1::make_tuple(48000, 16000, 32000, 16000, 20, 20),
+ std::tr1::make_tuple(48000, 16000, 16000, 16000, 20, 0),
+
+ std::tr1::make_tuple(44100, 48000, 48000, 48000, 20, 0),
+ std::tr1::make_tuple(44100, 48000, 32000, 48000, 20, 30),
+ std::tr1::make_tuple(44100, 48000, 16000, 48000, 20, 20),
+ std::tr1::make_tuple(44100, 44100, 48000, 44100, 15, 20),
+ std::tr1::make_tuple(44100, 44100, 32000, 44100, 15, 15),
+ std::tr1::make_tuple(44100, 44100, 16000, 44100, 15, 15),
+ std::tr1::make_tuple(44100, 32000, 48000, 32000, 20, 35),
+ std::tr1::make_tuple(44100, 32000, 32000, 32000, 20, 0),
+ std::tr1::make_tuple(44100, 32000, 16000, 32000, 20, 20),
+ std::tr1::make_tuple(44100, 16000, 48000, 16000, 20, 20),
+ std::tr1::make_tuple(44100, 16000, 32000, 16000, 20, 20),
+ std::tr1::make_tuple(44100, 16000, 16000, 16000, 20, 0),
+
+ std::tr1::make_tuple(32000, 48000, 48000, 48000, 20, 0),
+ std::tr1::make_tuple(32000, 48000, 32000, 48000, 20, 30),
+ std::tr1::make_tuple(32000, 48000, 16000, 48000, 20, 20),
+ std::tr1::make_tuple(32000, 44100, 48000, 44100, 15, 20),
+ std::tr1::make_tuple(32000, 44100, 32000, 44100, 15, 15),
+ std::tr1::make_tuple(32000, 44100, 16000, 44100, 15, 15),
+ std::tr1::make_tuple(32000, 32000, 48000, 32000, 20, 35),
+ std::tr1::make_tuple(32000, 32000, 32000, 32000, 20, 0),
+ std::tr1::make_tuple(32000, 32000, 16000, 32000, 20, 20),
+ std::tr1::make_tuple(32000, 16000, 48000, 16000, 20, 20),
+ std::tr1::make_tuple(32000, 16000, 32000, 16000, 20, 20),
+ std::tr1::make_tuple(32000, 16000, 16000, 16000, 20, 0),
+
+ std::tr1::make_tuple(16000, 48000, 48000, 48000, 25, 0),
+ std::tr1::make_tuple(16000, 48000, 32000, 48000, 25, 30),
+ std::tr1::make_tuple(16000, 48000, 16000, 48000, 25, 20),
+ std::tr1::make_tuple(16000, 44100, 48000, 44100, 15, 20),
+ std::tr1::make_tuple(16000, 44100, 32000, 44100, 15, 15),
+ std::tr1::make_tuple(16000, 44100, 16000, 44100, 15, 15),
+ std::tr1::make_tuple(16000, 32000, 48000, 32000, 25, 35),
+ std::tr1::make_tuple(16000, 32000, 32000, 32000, 25, 0),
+ std::tr1::make_tuple(16000, 32000, 16000, 32000, 25, 20),
+ std::tr1::make_tuple(16000, 16000, 48000, 16000, 35, 20),
+ std::tr1::make_tuple(16000, 16000, 32000, 16000, 40, 20),
+ std::tr1::make_tuple(16000, 16000, 16000, 16000, 0, 0)));
#endif
// TODO(henrike): re-implement functionality lost when removing the old main
diff --git a/chromium/third_party/webrtc/modules/audio_processing/test/audioproc_float.cc b/chromium/third_party/webrtc/modules/audio_processing/test/audioproc_float.cc
index dac43629cf6..9c44d76ecc2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/test/audioproc_float.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/test/audioproc_float.cc
@@ -25,7 +25,11 @@
DEFINE_string(dump, "", "The name of the debug dump file to read from.");
DEFINE_string(i, "", "The name of the input file to read from.");
+DEFINE_string(i_rev, "", "The name of the reverse input file to read from.");
DEFINE_string(o, "out.wav", "Name of the output file to write to.");
+DEFINE_string(o_rev,
+ "out_rev.wav",
+ "Name of the reverse output file to write to.");
DEFINE_int32(out_channels, 0, "Number of output channels. Defaults to input.");
DEFINE_int32(out_sample_rate, 0,
"Output sample rate in Hz. Defaults to input.");
@@ -40,6 +44,7 @@ DEFINE_bool(hpf, false, "Enable high-pass filtering.");
DEFINE_bool(ns, false, "Enable noise suppression.");
DEFINE_bool(ts, false, "Enable transient suppression.");
DEFINE_bool(bf, false, "Enable beamforming.");
+DEFINE_bool(ie, false, "Enable intelligibility enhancer.");
DEFINE_bool(all, false, "Enable all components.");
DEFINE_int32(ns_level, -1, "Noise suppression level [0 - 3].");
@@ -58,6 +63,15 @@ const char kUsage[] =
"All components are disabled by default. If any bi-directional components\n"
"are enabled, only debug dump files are permitted.";
+// Returns a StreamConfig corresponding to wav_file if it's non-nullptr.
+// Otherwise returns a default initialized StreamConfig.
+StreamConfig MakeStreamConfig(const WavFile* wav_file) {
+ if (wav_file) {
+ return {wav_file->sample_rate(), wav_file->num_channels()};
+ }
+ return {};
+}
+
} // namespace
int main(int argc, char* argv[]) {
@@ -85,30 +99,35 @@ int main(int argc, char* argv[]) {
Config config;
config.Set<ExperimentalNs>(new ExperimentalNs(FLAGS_ts || FLAGS_all));
+ config.Set<Intelligibility>(new Intelligibility(FLAGS_ie || FLAGS_all));
if (FLAGS_bf || FLAGS_all) {
const size_t num_mics = in_file.num_channels();
const std::vector<Point> array_geometry =
ParseArrayGeometry(FLAGS_mic_positions, num_mics);
- CHECK_EQ(array_geometry.size(), num_mics);
+ RTC_CHECK_EQ(array_geometry.size(), num_mics);
config.Set<Beamforming>(new Beamforming(true, array_geometry));
}
rtc::scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(config));
if (!FLAGS_dump.empty()) {
- CHECK_EQ(kNoErr, ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
+ RTC_CHECK_EQ(kNoErr,
+ ap->echo_cancellation()->Enable(FLAGS_aec || FLAGS_all));
} else if (FLAGS_aec) {
fprintf(stderr, "-aec requires a -dump file.\n");
return -1;
}
- CHECK_EQ(kNoErr, ap->gain_control()->Enable(FLAGS_agc || FLAGS_all));
- CHECK_EQ(kNoErr, ap->gain_control()->set_mode(GainControl::kFixedDigital));
- CHECK_EQ(kNoErr, ap->high_pass_filter()->Enable(FLAGS_hpf || FLAGS_all));
- CHECK_EQ(kNoErr, ap->noise_suppression()->Enable(FLAGS_ns || FLAGS_all));
+ bool process_reverse = !FLAGS_i_rev.empty();
+ RTC_CHECK_EQ(kNoErr, ap->gain_control()->Enable(FLAGS_agc || FLAGS_all));
+ RTC_CHECK_EQ(kNoErr,
+ ap->gain_control()->set_mode(GainControl::kFixedDigital));
+ RTC_CHECK_EQ(kNoErr, ap->high_pass_filter()->Enable(FLAGS_hpf || FLAGS_all));
+ RTC_CHECK_EQ(kNoErr, ap->noise_suppression()->Enable(FLAGS_ns || FLAGS_all));
if (FLAGS_ns_level != -1)
- CHECK_EQ(kNoErr, ap->noise_suppression()->set_level(
- static_cast<NoiseSuppression::Level>(FLAGS_ns_level)));
+ RTC_CHECK_EQ(kNoErr,
+ ap->noise_suppression()->set_level(
+ static_cast<NoiseSuppression::Level>(FLAGS_ns_level)));
printf("Input file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
FLAGS_i.c_str(), in_file.num_channels(), in_file.sample_rate());
@@ -124,9 +143,42 @@ int main(int argc, char* argv[]) {
std::vector<float> in_interleaved(in_buf.size());
std::vector<float> out_interleaved(out_buf.size());
+
+ rtc::scoped_ptr<WavReader> in_rev_file;
+ rtc::scoped_ptr<WavWriter> out_rev_file;
+ rtc::scoped_ptr<ChannelBuffer<float>> in_rev_buf;
+ rtc::scoped_ptr<ChannelBuffer<float>> out_rev_buf;
+ std::vector<float> in_rev_interleaved;
+ std::vector<float> out_rev_interleaved;
+ if (process_reverse) {
+ in_rev_file.reset(new WavReader(FLAGS_i_rev));
+ out_rev_file.reset(new WavWriter(FLAGS_o_rev, in_rev_file->sample_rate(),
+ in_rev_file->num_channels()));
+ printf("In rev file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+ FLAGS_i_rev.c_str(), in_rev_file->num_channels(),
+ in_rev_file->sample_rate());
+ printf("Out rev file: %s\nChannels: %d, Sample rate: %d Hz\n\n",
+ FLAGS_o_rev.c_str(), out_rev_file->num_channels(),
+ out_rev_file->sample_rate());
+ in_rev_buf.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(in_rev_file->sample_rate(), kChunksPerSecond),
+ in_rev_file->num_channels()));
+ in_rev_interleaved.resize(in_rev_buf->size());
+ out_rev_buf.reset(new ChannelBuffer<float>(
+ rtc::CheckedDivExact(out_rev_file->sample_rate(), kChunksPerSecond),
+ out_rev_file->num_channels()));
+ out_rev_interleaved.resize(out_rev_buf->size());
+ }
+
TickTime processing_start_time;
TickInterval accumulated_time;
int num_chunks = 0;
+
+ const auto input_config = MakeStreamConfig(&in_file);
+ const auto output_config = MakeStreamConfig(&out_file);
+ const auto reverse_input_config = MakeStreamConfig(in_rev_file.get());
+ const auto reverse_output_config = MakeStreamConfig(out_rev_file.get());
+
while (in_file.ReadSamples(in_interleaved.size(),
&in_interleaved[0]) == in_interleaved.size()) {
// Have logs display the file time rather than wallclock time.
@@ -135,18 +187,25 @@ int main(int argc, char* argv[]) {
&in_interleaved[0]);
Deinterleave(&in_interleaved[0], in_buf.num_frames(),
in_buf.num_channels(), in_buf.channels());
+ if (process_reverse) {
+ in_rev_file->ReadSamples(in_rev_interleaved.size(),
+ in_rev_interleaved.data());
+ FloatS16ToFloat(in_rev_interleaved.data(), in_rev_interleaved.size(),
+ in_rev_interleaved.data());
+ Deinterleave(in_rev_interleaved.data(), in_rev_buf->num_frames(),
+ in_rev_buf->num_channels(), in_rev_buf->channels());
+ }
if (FLAGS_perf) {
processing_start_time = TickTime::Now();
}
- CHECK_EQ(kNoErr,
- ap->ProcessStream(in_buf.channels(),
- in_buf.num_frames(),
- in_file.sample_rate(),
- LayoutFromChannels(in_buf.num_channels()),
- out_file.sample_rate(),
- LayoutFromChannels(out_buf.num_channels()),
- out_buf.channels()));
+ RTC_CHECK_EQ(kNoErr, ap->ProcessStream(in_buf.channels(), input_config,
+ output_config, out_buf.channels()));
+ if (process_reverse) {
+ RTC_CHECK_EQ(kNoErr, ap->ProcessReverseStream(
+ in_rev_buf->channels(), reverse_input_config,
+ reverse_output_config, out_rev_buf->channels()));
+ }
if (FLAGS_perf) {
accumulated_time += TickTime::Now() - processing_start_time;
}
@@ -156,6 +215,14 @@ int main(int argc, char* argv[]) {
FloatToFloatS16(&out_interleaved[0], out_interleaved.size(),
&out_interleaved[0]);
out_file.WriteSamples(&out_interleaved[0], out_interleaved.size());
+ if (process_reverse) {
+ Interleave(out_rev_buf->channels(), out_rev_buf->num_frames(),
+ out_rev_buf->num_channels(), out_rev_interleaved.data());
+ FloatToFloatS16(out_rev_interleaved.data(), out_rev_interleaved.size(),
+ out_rev_interleaved.data());
+ out_rev_file->WriteSamples(out_rev_interleaved.data(),
+ out_rev_interleaved.size());
+ }
num_chunks++;
}
if (FLAGS_perf) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.cc b/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.cc
index fe33ec0351f..1b9ac3ce4cb 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.cc
@@ -100,8 +100,8 @@ AudioProcessing::ChannelLayout LayoutFromChannels(int num_channels) {
std::vector<Point> ParseArrayGeometry(const std::string& mic_positions,
size_t num_mics) {
const std::vector<float> values = ParseList<float>(mic_positions);
- CHECK_EQ(values.size(), 3 * num_mics) <<
- "Could not parse mic_positions or incorrect number of points.";
+ RTC_CHECK_EQ(values.size(), 3 * num_mics)
+ << "Could not parse mic_positions or incorrect number of points.";
std::vector<Point> result;
result.reserve(num_mics);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.h b/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.h
index 7ad462c0957..8dd380b15da 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/test/test_utils.h
@@ -40,7 +40,7 @@ class RawFile final {
private:
FILE* file_handle_;
- DISALLOW_COPY_AND_ASSIGN(RawFile);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RawFile);
};
void WriteIntData(const int16_t* data,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/test/unpack.cc b/chromium/third_party/webrtc/modules/audio_processing/test/unpack.cc
index 2484828bd18..24578e240c6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/test/unpack.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/test/unpack.cc
@@ -40,6 +40,11 @@ DEFINE_bool(text,
false,
"Write non-audio files as text files instead of binary files.");
+#define PRINT_CONFIG(field_name) \
+ if (msg.has_##field_name()) { \
+ fprintf(settings_file, " " #field_name ": %d\n", msg.field_name()); \
+ }
+
namespace webrtc {
using audioproc::Event;
@@ -83,6 +88,9 @@ int do_main(int argc, char* argv[]) {
rtc::scoped_ptr<RawFile> reverse_raw_file;
rtc::scoped_ptr<RawFile> input_raw_file;
rtc::scoped_ptr<RawFile> output_raw_file;
+
+ FILE* settings_file = OpenFile(FLAGS_settings_file, "wb");
+
while (ReadMessageFromFile(debug_file, &event_msg)) {
if (event_msg.type() == Event::REVERSE_STREAM) {
if (!event_msg.has_reverse_stream()) {
@@ -217,13 +225,37 @@ int do_main(int argc, char* argv[]) {
}
}
}
+ } else if (event_msg.type() == Event::CONFIG) {
+ if (!event_msg.has_config()) {
+ printf("Corrupt input file: Config missing.\n");
+ return 1;
+ }
+ const audioproc::Config msg = event_msg.config();
+
+ fprintf(settings_file, "APM re-config at frame: %d\n", frame_count);
+
+ PRINT_CONFIG(aec_enabled);
+ PRINT_CONFIG(aec_delay_agnostic_enabled);
+ PRINT_CONFIG(aec_drift_compensation_enabled);
+ PRINT_CONFIG(aec_extended_filter_enabled);
+ PRINT_CONFIG(aec_suppression_level);
+ PRINT_CONFIG(aecm_enabled);
+ PRINT_CONFIG(aecm_comfort_noise_enabled);
+ PRINT_CONFIG(aecm_routing_mode);
+ PRINT_CONFIG(agc_enabled);
+ PRINT_CONFIG(agc_mode);
+ PRINT_CONFIG(agc_limiter_enabled);
+ PRINT_CONFIG(noise_robust_agc_enabled);
+ PRINT_CONFIG(hpf_enabled);
+ PRINT_CONFIG(ns_enabled);
+ PRINT_CONFIG(ns_level);
+ PRINT_CONFIG(transient_suppression_enabled);
} else if (event_msg.type() == Event::INIT) {
if (!event_msg.has_init()) {
printf("Corrupt input file: Init missing.\n");
return 1;
}
- static FILE* settings_file = OpenFile(FLAGS_settings_file, "wb");
const Init msg = event_msg.init();
// These should print out zeros if they're missing.
fprintf(settings_file, "Init at frame: %d\n", frame_count);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.cc b/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.cc
index efd7a796340..91e58df9b8b 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.cc
@@ -42,8 +42,8 @@
namespace webrtc {
namespace {
-const int kNumBands = 3;
-const int kSparsity = 4;
+const size_t kNumBands = 3;
+const size_t kSparsity = 4;
// Factors to take into account when choosing |kNumCoeffs|:
// 1. Higher |kNumCoeffs|, means faster transition, which ensures less
@@ -53,7 +53,7 @@ const int kSparsity = 4;
// |kNumBands| * |kSparsity| * |kNumCoeffs| / 2, so it increases linearly
// with |kNumCoeffs|.
// 3. The computation complexity also increases linearly with |kNumCoeffs|.
-const int kNumCoeffs = 4;
+const size_t kNumCoeffs = 4;
// The Matlab code to generate these |kLowpassCoeffs| is:
//
@@ -85,8 +85,11 @@ const float kLowpassCoeffs[kNumBands * kSparsity][kNumCoeffs] =
// Downsamples |in| into |out|, taking one every |kNumbands| starting from
// |offset|. |split_length| is the |out| length. |in| has to be at least
// |kNumBands| * |split_length| long.
-void Downsample(const float* in, int split_length, int offset, float* out) {
- for (int i = 0; i < split_length; ++i) {
+void Downsample(const float* in,
+ size_t split_length,
+ size_t offset,
+ float* out) {
+ for (size_t i = 0; i < split_length; ++i) {
out[i] = in[kNumBands * i + offset];
}
}
@@ -94,8 +97,8 @@ void Downsample(const float* in, int split_length, int offset, float* out) {
// Upsamples |in| into |out|, scaling by |kNumBands| and accumulating it every
// |kNumBands| starting from |offset|. |split_length| is the |in| length. |out|
// has to be at least |kNumBands| * |split_length| long.
-void Upsample(const float* in, int split_length, int offset, float* out) {
- for (int i = 0; i < split_length; ++i) {
+void Upsample(const float* in, size_t split_length, size_t offset, float* out) {
+ for (size_t i = 0; i < split_length; ++i) {
out[kNumBands * i + offset] += kNumBands * in[i];
}
}
@@ -105,11 +108,11 @@ void Upsample(const float* in, int split_length, int offset, float* out) {
// Because the low-pass filter prototype has half bandwidth it is possible to
// use a DCT to shift it in both directions at the same time, to the center
// frequencies [1 / 12, 3 / 12, 5 / 12].
-ThreeBandFilterBank::ThreeBandFilterBank(int length)
+ThreeBandFilterBank::ThreeBandFilterBank(size_t length)
: in_buffer_(rtc::CheckedDivExact(length, kNumBands)),
out_buffer_(in_buffer_.size()) {
- for (int i = 0; i < kSparsity; ++i) {
- for (int j = 0; j < kNumBands; ++j) {
+ for (size_t i = 0; i < kSparsity; ++i) {
+ for (size_t j = 0; j < kNumBands; ++j) {
analysis_filters_.push_back(new SparseFIRFilter(
kLowpassCoeffs[i * kNumBands + j], kNumCoeffs, kSparsity, i));
synthesis_filters_.push_back(new SparseFIRFilter(
@@ -119,7 +122,7 @@ ThreeBandFilterBank::ThreeBandFilterBank(int length)
dct_modulation_.resize(kNumBands * kSparsity);
for (size_t i = 0; i < dct_modulation_.size(); ++i) {
dct_modulation_[i].resize(kNumBands);
- for (int j = 0; j < kNumBands; ++j) {
+ for (size_t j = 0; j < kNumBands; ++j) {
dct_modulation_[i][j] =
2.f * cos(2.f * M_PI * i * (2.f * j + 1.f) / dct_modulation_.size());
}
@@ -133,17 +136,16 @@ ThreeBandFilterBank::ThreeBandFilterBank(int length)
// of |kSparsity|.
// 3. Modulating with cosines and accumulating to get the desired band.
void ThreeBandFilterBank::Analysis(const float* in,
- int length,
+ size_t length,
float* const* out) {
- CHECK_EQ(static_cast<int>(in_buffer_.size()),
- rtc::CheckedDivExact(length, kNumBands));
- for (int i = 0; i < kNumBands; ++i) {
+ RTC_CHECK_EQ(in_buffer_.size(), rtc::CheckedDivExact(length, kNumBands));
+ for (size_t i = 0; i < kNumBands; ++i) {
memset(out[i], 0, in_buffer_.size() * sizeof(*out[i]));
}
- for (int i = 0; i < kNumBands; ++i) {
+ for (size_t i = 0; i < kNumBands; ++i) {
Downsample(in, in_buffer_.size(), kNumBands - i - 1, &in_buffer_[0]);
- for (int j = 0; j < kSparsity; ++j) {
- const int offset = i + j * kNumBands;
+ for (size_t j = 0; j < kSparsity; ++j) {
+ const size_t offset = i + j * kNumBands;
analysis_filters_[offset]->Filter(&in_buffer_[0],
in_buffer_.size(),
&out_buffer_[0]);
@@ -159,13 +161,13 @@ void ThreeBandFilterBank::Analysis(const float* in,
// |kSparsity| signals with different delays.
// 3. Parallel to serial upsampling by a factor of |kNumBands|.
void ThreeBandFilterBank::Synthesis(const float* const* in,
- int split_length,
+ size_t split_length,
float* out) {
- CHECK_EQ(static_cast<int>(in_buffer_.size()), split_length);
+ RTC_CHECK_EQ(in_buffer_.size(), split_length);
memset(out, 0, kNumBands * in_buffer_.size() * sizeof(*out));
- for (int i = 0; i < kNumBands; ++i) {
- for (int j = 0; j < kSparsity; ++j) {
- const int offset = i + j * kNumBands;
+ for (size_t i = 0; i < kNumBands; ++i) {
+ for (size_t j = 0; j < kSparsity; ++j) {
+ const size_t offset = i + j * kNumBands;
UpModulate(in, in_buffer_.size(), offset, &in_buffer_[0]);
synthesis_filters_[offset]->Filter(&in_buffer_[0],
in_buffer_.size(),
@@ -181,11 +183,11 @@ void ThreeBandFilterBank::Synthesis(const float* const* in,
// cosines used for modulation. |split_length| is the length of |in| and each
// band of |out|.
void ThreeBandFilterBank::DownModulate(const float* in,
- int split_length,
- int offset,
+ size_t split_length,
+ size_t offset,
float* const* out) {
- for (int i = 0; i < kNumBands; ++i) {
- for (int j = 0; j < split_length; ++j) {
+ for (size_t i = 0; i < kNumBands; ++i) {
+ for (size_t j = 0; j < split_length; ++j) {
out[i][j] += dct_modulation_[offset][i] * in[j];
}
}
@@ -196,12 +198,12 @@ void ThreeBandFilterBank::DownModulate(const float* in,
// |offset| is the index in the period of the cosines used for modulation.
// |split_length| is the length of each band of |in| and |out|.
void ThreeBandFilterBank::UpModulate(const float* const* in,
- int split_length,
- int offset,
+ size_t split_length,
+ size_t offset,
float* out) {
memset(out, 0, split_length * sizeof(*out));
- for (int i = 0; i < kNumBands; ++i) {
- for (int j = 0; j < split_length; ++j) {
+ for (size_t i = 0; i < kNumBands; ++i) {
+ for (size_t j = 0; j < split_length; ++j) {
out[j] += dct_modulation_[offset][i] * in[i][j];
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.h b/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.h
index 7677448e697..18e8aee7c9b 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/three_band_filter_bank.h
@@ -34,26 +34,26 @@ namespace webrtc {
// depending on the input signal after compensating for the delay.
class ThreeBandFilterBank final {
public:
- explicit ThreeBandFilterBank(int length);
+ explicit ThreeBandFilterBank(size_t length);
// Splits |in| into 3 downsampled frequency bands in |out|.
// |length| is the |in| length. Each of the 3 bands of |out| has to have a
// length of |length| / 3.
- void Analysis(const float* in, int length, float* const* out);
+ void Analysis(const float* in, size_t length, float* const* out);
// Merges the 3 downsampled frequency bands in |in| into |out|.
// |split_length| is the length of each band of |in|. |out| has to have at
// least a length of 3 * |split_length|.
- void Synthesis(const float* const* in, int split_length, float* out);
+ void Synthesis(const float* const* in, size_t split_length, float* out);
private:
void DownModulate(const float* in,
- int split_length,
- int offset,
+ size_t split_length,
+ size_t offset,
float* const* out);
void UpModulate(const float* const* in,
- int split_length,
- int offset,
+ size_t split_length,
+ size_t offset,
float* out);
std::vector<float> in_buffer_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.cc b/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.cc
index 2f79a20ac79..206d14db755 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.cc
@@ -124,7 +124,7 @@ int TransientSuppressor::Initialize(int sample_rate_hz,
analysis_length_ * num_channels_ * sizeof(out_buffer_[0]));
// ip[0] must be zero to trigger initialization using rdft().
size_t ip_length = 2 + sqrtf(analysis_length_);
- ip_.reset(new int[ip_length]());
+ ip_.reset(new size_t[ip_length]());
memset(ip_.get(), 0, ip_length * sizeof(ip_[0]));
wfft_.reset(new float[complex_analysis_length_ - 1]);
memset(wfft_.get(), 0, (complex_analysis_length_ - 1) * sizeof(wfft_[0]));
diff --git a/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.h b/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.h
index 12e4b5ed1f4..5a6f117629d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/transient/transient_suppressor.h
@@ -86,7 +86,7 @@ class TransientSuppressor {
rtc::scoped_ptr<float[]> out_buffer_;
// Arrays for fft.
- rtc::scoped_ptr<int[]> ip_;
+ rtc::scoped_ptr<size_t[]> ip_;
rtc::scoped_ptr<float[]> wfft_;
rtc::scoped_ptr<float[]> spectral_mean_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/common.h b/chromium/third_party/webrtc/modules/audio_processing/vad/common.h
index 0772d554894..be99c1c59d4 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/common.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/common.h
@@ -12,15 +12,15 @@
#define WEBRTC_MODULES_AUDIO_PROCESSING_VAD_COMMON_H_
static const int kSampleRateHz = 16000;
-static const int kLength10Ms = kSampleRateHz / 100;
-static const int kMaxNumFrames = 4;
+static const size_t kLength10Ms = kSampleRateHz / 100;
+static const size_t kMaxNumFrames = 4;
struct AudioFeatures {
double log_pitch_gain[kMaxNumFrames];
double pitch_lag_hz[kMaxNumFrames];
double spectral_peak[kMaxNumFrames];
double rms[kMaxNumFrames];
- int num_frames;
+ size_t num_frames;
bool silence;
};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/pitch_based_vad.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
index 91638d007ed..39ec37e6ec6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/pitch_based_vad.cc
@@ -75,7 +75,7 @@ int PitchBasedVad::VoicingProbability(const AudioFeatures& features,
const double kLimLowSpectralPeak = 200;
const double kLimHighSpectralPeak = 2000;
const double kEps = 1e-12;
- for (int n = 0; n < features.num_frames; n++) {
+ for (size_t n = 0; n < features.num_frames; n++) {
gmm_features[0] = features.log_pitch_gain[n];
gmm_features[1] = features.spectral_peak[n];
gmm_features[2] = features.pitch_lag_hz[n];
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
index 84d0739d8c1..9769515c57d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.cc
@@ -17,11 +17,10 @@
namespace webrtc {
PoleZeroFilter* PoleZeroFilter::Create(const float* numerator_coefficients,
- int order_numerator,
+ size_t order_numerator,
const float* denominator_coefficients,
- int order_denominator) {
- if (order_numerator < 0 || order_denominator < 0 ||
- order_numerator > kMaxFilterOrder ||
+ size_t order_denominator) {
+ if (order_numerator > kMaxFilterOrder ||
order_denominator > kMaxFilterOrder || denominator_coefficients[0] == 0 ||
numerator_coefficients == NULL || denominator_coefficients == NULL)
return NULL;
@@ -30,9 +29,9 @@ PoleZeroFilter* PoleZeroFilter::Create(const float* numerator_coefficients,
}
PoleZeroFilter::PoleZeroFilter(const float* numerator_coefficients,
- int order_numerator,
+ size_t order_numerator,
const float* denominator_coefficients,
- int order_denominator)
+ size_t order_denominator)
: past_input_(),
past_output_(),
numerator_coefficients_(),
@@ -46,30 +45,31 @@ PoleZeroFilter::PoleZeroFilter(const float* numerator_coefficients,
sizeof(denominator_coefficients_[0]) * (order_denominator_ + 1));
if (denominator_coefficients_[0] != 1) {
- for (int n = 0; n <= order_numerator_; n++)
+ for (size_t n = 0; n <= order_numerator_; n++)
numerator_coefficients_[n] /= denominator_coefficients_[0];
- for (int n = 0; n <= order_denominator_; n++)
+ for (size_t n = 0; n <= order_denominator_; n++)
denominator_coefficients_[n] /= denominator_coefficients_[0];
}
}
template <typename T>
-static float FilterArPast(const T* past, int order, const float* coefficients) {
+static float FilterArPast(const T* past, size_t order,
+ const float* coefficients) {
float sum = 0.0f;
- int past_index = order - 1;
- for (int k = 1; k <= order; k++, past_index--)
+ size_t past_index = order - 1;
+ for (size_t k = 1; k <= order; k++, past_index--)
sum += coefficients[k] * past[past_index];
return sum;
}
int PoleZeroFilter::Filter(const int16_t* in,
- int num_input_samples,
+ size_t num_input_samples,
float* output) {
- if (in == NULL || num_input_samples < 0 || output == NULL)
+ if (in == NULL || output == NULL)
return -1;
// This is the typical case, just a memcpy.
- const int k = std::min(num_input_samples, highest_order_);
- int n;
+ const size_t k = std::min(num_input_samples, highest_order_);
+ size_t n;
for (n = 0; n < k; n++) {
output[n] = in[n] * numerator_coefficients_[0];
output[n] += FilterArPast(&past_input_[n], order_numerator_,
@@ -81,7 +81,7 @@ int PoleZeroFilter::Filter(const int16_t* in,
past_output_[n + order_denominator_] = output[n];
}
if (highest_order_ < num_input_samples) {
- for (int m = 0; n < num_input_samples; n++, m++) {
+ for (size_t m = 0; n < num_input_samples; n++, m++) {
output[n] = in[n] * numerator_coefficients_[0];
output[n] +=
FilterArPast(&in[m], order_numerator_, numerator_coefficients_);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.h b/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.h
index 038d801a1b6..bd13050a5c4 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/pole_zero_filter.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_VAD_POLE_ZERO_FILTER_H_
+#include <cstddef>
+
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -20,17 +22,17 @@ class PoleZeroFilter {
~PoleZeroFilter() {}
static PoleZeroFilter* Create(const float* numerator_coefficients,
- int order_numerator,
+ size_t order_numerator,
const float* denominator_coefficients,
- int order_denominator);
+ size_t order_denominator);
- int Filter(const int16_t* in, int num_input_samples, float* output);
+ int Filter(const int16_t* in, size_t num_input_samples, float* output);
private:
PoleZeroFilter(const float* numerator_coefficients,
- int order_numerator,
+ size_t order_numerator,
const float* denominator_coefficients,
- int order_denominator);
+ size_t order_denominator);
static const int kMaxFilterOrder = 24;
@@ -40,9 +42,9 @@ class PoleZeroFilter {
float numerator_coefficients_[kMaxFilterOrder + 1];
float denominator_coefficients_[kMaxFilterOrder + 1];
- int order_numerator_;
- int order_denominator_;
- int highest_order_;
+ size_t order_numerator_;
+ size_t order_denominator_;
+ size_t highest_order_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.cc
index 783785184db..468b8ff3f02 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.cc
@@ -42,7 +42,7 @@ StandaloneVad* StandaloneVad::Create() {
return new StandaloneVad(vad);
}
-int StandaloneVad::AddAudio(const int16_t* data, int length) {
+int StandaloneVad::AddAudio(const int16_t* data, size_t length) {
if (length != kLength10Ms)
return -1;
@@ -57,11 +57,11 @@ int StandaloneVad::AddAudio(const int16_t* data, int length) {
return 0;
}
-int StandaloneVad::GetActivity(double* p, int length_p) {
+int StandaloneVad::GetActivity(double* p, size_t length_p) {
if (index_ == 0)
return -1;
- const int num_frames = index_ / kLength10Ms;
+ const size_t num_frames = index_ / kLength10Ms;
if (num_frames > length_p)
return -1;
assert(WebRtcVad_ValidRateAndFrameLength(kSampleRateHz, index_) == 0);
@@ -73,7 +73,7 @@ int StandaloneVad::GetActivity(double* p, int length_p) {
p[0] = 0.01; // Arbitrary but small and non-zero.
else
p[0] = 0.5; // 0.5 is neutral values when combinned by other probabilities.
- for (int n = 1; n < num_frames; n++)
+ for (size_t n = 1; n < num_frames; n++)
p[n] = p[0];
// Reset the buffer to start from the beginning.
index_ = 0;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.h b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.h
index 4017a72c60f..6a25424dab5 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad.h
@@ -41,10 +41,10 @@ class StandaloneVad {
// classified as passive. In this way, when probabilities are combined, the
// effect of the stand-alone VAD is neutral if the input is classified as
// active.
- int GetActivity(double* p, int length_p);
+ int GetActivity(double* p, size_t length_p);
// Expecting 10 ms of 16 kHz audio to be pushed in.
- int AddAudio(const int16_t* data, int length);
+ int AddAudio(const int16_t* data, size_t length);
// Set aggressiveness of VAD, 0 is the least aggressive and 3 is the most
// aggressive mode. Returns -1 if the input is less than 0 or larger than 3,
@@ -56,12 +56,12 @@ class StandaloneVad {
private:
explicit StandaloneVad(VadInst* vad);
- static const int kMaxNum10msFrames = 3;
+ static const size_t kMaxNum10msFrames = 3;
// TODO(turajs): Is there a way to use scoped-pointer here?
VadInst* vad_;
int16_t buffer_[kMaxNum10msFrames * kLength10Ms];
- int index_;
+ size_t index_;
int mode_;
};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
index 404a66f3039..942008e7330 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/standalone_vad_unittest.cc
@@ -27,9 +27,9 @@ TEST(StandaloneVadTest, Api) {
// Valid frame length (for 32 kHz rate), but not what the VAD is expecting.
EXPECT_EQ(-1, vad->AddAudio(data, 320));
- const int kMaxNumFrames = 3;
+ const size_t kMaxNumFrames = 3;
double p[kMaxNumFrames];
- for (int n = 0; n < kMaxNumFrames; n++)
+ for (size_t n = 0; n < kMaxNumFrames; n++)
EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
// Pretend |p| is shorter that it should be.
@@ -41,7 +41,7 @@ TEST(StandaloneVadTest, Api) {
EXPECT_EQ(-1, vad->GetActivity(p, kMaxNumFrames));
// Should reset and result in one buffer.
- for (int n = 0; n < kMaxNumFrames + 1; n++)
+ for (size_t n = 0; n < kMaxNumFrames + 1; n++)
EXPECT_EQ(0, vad->AddAudio(data, kLength10Ms));
EXPECT_EQ(0, vad->GetActivity(p, 1));
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
index e8f27f802de..8535d1ff573 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.cc
@@ -76,7 +76,7 @@ void VadAudioProc::ResetBuffer() {
}
int VadAudioProc::ExtractFeatures(const int16_t* frame,
- int length,
+ size_t length,
AudioFeatures* features) {
features->num_frames = 0;
if (length != kNumSubframeSamples) {
@@ -100,7 +100,7 @@ int VadAudioProc::ExtractFeatures(const int16_t* frame,
features->silence = false;
Rms(features->rms, kMaxNumFrames);
- for (int i = 0; i < kNum10msSubframes; ++i) {
+ for (size_t i = 0; i < kNum10msSubframes; ++i) {
if (features->rms[i] < kSilenceRms) {
// PitchAnalysis can cause NaNs in the pitch gain if it's fed silence.
// Bail out here instead.
@@ -119,13 +119,13 @@ int VadAudioProc::ExtractFeatures(const int16_t* frame,
// Computes |kLpcOrder + 1| correlation coefficients.
void VadAudioProc::SubframeCorrelation(double* corr,
- int length_corr,
- int subframe_index) {
+ size_t length_corr,
+ size_t subframe_index) {
assert(length_corr >= kLpcOrder + 1);
double windowed_audio[kNumSubframeSamples + kNumPastSignalSamples];
- int buffer_index = subframe_index * kNumSubframeSamples;
+ size_t buffer_index = subframe_index * kNumSubframeSamples;
- for (int n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
+ for (size_t n = 0; n < kNumSubframeSamples + kNumPastSignalSamples; n++)
windowed_audio[n] = audio_buffer_[buffer_index++] * kLpcAnalWin[n];
WebRtcIsac_AutoCorr(corr, windowed_audio,
@@ -136,16 +136,16 @@ void VadAudioProc::SubframeCorrelation(double* corr,
// The analysis window is 15 ms long and it is centered on the first half of
// each 10ms sub-frame. This is equivalent to computing LPC coefficients for the
// first half of each 10 ms subframe.
-void VadAudioProc::GetLpcPolynomials(double* lpc, int length_lpc) {
+void VadAudioProc::GetLpcPolynomials(double* lpc, size_t length_lpc) {
assert(length_lpc >= kNum10msSubframes * (kLpcOrder + 1));
double corr[kLpcOrder + 1];
double reflec_coeff[kLpcOrder];
- for (int i = 0, offset_lpc = 0; i < kNum10msSubframes;
+ for (size_t i = 0, offset_lpc = 0; i < kNum10msSubframes;
i++, offset_lpc += kLpcOrder + 1) {
SubframeCorrelation(corr, kLpcOrder + 1, i);
corr[0] *= 1.0001;
// This makes Lev-Durb a bit more stable.
- for (int k = 0; k < kLpcOrder + 1; k++) {
+ for (size_t k = 0; k < kLpcOrder + 1; k++) {
corr[k] *= kCorrWeight[k];
}
WebRtcIsac_LevDurb(&lpc[offset_lpc], reflec_coeff, corr, kLpcOrder);
@@ -174,30 +174,31 @@ static float QuadraticInterpolation(float prev_val,
// with the local minimum of A(z). It saves complexity, as we save one
// inversion. Furthermore, we find the first local maximum of magnitude squared,
// to save on one square root.
-void VadAudioProc::FindFirstSpectralPeaks(double* f_peak, int length_f_peak) {
+void VadAudioProc::FindFirstSpectralPeaks(double* f_peak,
+ size_t length_f_peak) {
assert(length_f_peak >= kNum10msSubframes);
double lpc[kNum10msSubframes * (kLpcOrder + 1)];
// For all sub-frames.
GetLpcPolynomials(lpc, kNum10msSubframes * (kLpcOrder + 1));
- const int kNumDftCoefficients = kDftSize / 2 + 1;
+ const size_t kNumDftCoefficients = kDftSize / 2 + 1;
float data[kDftSize];
- for (int i = 0; i < kNum10msSubframes; i++) {
+ for (size_t i = 0; i < kNum10msSubframes; i++) {
// Convert to float with zero pad.
memset(data, 0, sizeof(data));
- for (int n = 0; n < kLpcOrder + 1; n++) {
+ for (size_t n = 0; n < kLpcOrder + 1; n++) {
data[n] = static_cast<float>(lpc[i * (kLpcOrder + 1) + n]);
}
// Transform to frequency domain.
WebRtc_rdft(kDftSize, 1, data, ip_, w_fft_);
- int index_peak = 0;
+ size_t index_peak = 0;
float prev_magn_sqr = data[0] * data[0];
float curr_magn_sqr = data[2] * data[2] + data[3] * data[3];
float next_magn_sqr;
bool found_peak = false;
- for (int n = 2; n < kNumDftCoefficients - 1; n++) {
+ for (size_t n = 2; n < kNumDftCoefficients - 1; n++) {
next_magn_sqr =
data[2 * n] * data[2 * n] + data[2 * n + 1] * data[2 * n + 1];
if (curr_magn_sqr < prev_magn_sqr && curr_magn_sqr < next_magn_sqr) {
@@ -228,7 +229,7 @@ void VadAudioProc::FindFirstSpectralPeaks(double* f_peak, int length_f_peak) {
// Using iSAC functions to estimate pitch gains & lags.
void VadAudioProc::PitchAnalysis(double* log_pitch_gains,
double* pitch_lags_hz,
- int length) {
+ size_t length) {
// TODO(turajs): This can be "imported" from iSAC & and the next two
// constants.
assert(length >= kNum10msSubframes);
@@ -260,12 +261,12 @@ void VadAudioProc::PitchAnalysis(double* log_pitch_gains,
&log_old_gain_, &old_lag_, log_pitch_gains, pitch_lags_hz);
}
-void VadAudioProc::Rms(double* rms, int length_rms) {
+void VadAudioProc::Rms(double* rms, size_t length_rms) {
assert(length_rms >= kNum10msSubframes);
- int offset = kNumPastSignalSamples;
- for (int i = 0; i < kNum10msSubframes; i++) {
+ size_t offset = kNumPastSignalSamples;
+ for (size_t i = 0; i < kNum10msSubframes; i++) {
rms[i] = 0;
- for (int n = 0; n < kNumSubframeSamples; n++, offset++)
+ for (size_t n = 0; n < kNumSubframeSamples; n++, offset++)
rms[i] += audio_buffer_[offset] * audio_buffer_[offset];
rms[i] = sqrt(rms[i] / kNumSubframeSamples);
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.h b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.h
index 6cf3937f79f..85500aed845 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc.h
@@ -30,46 +30,51 @@ class VadAudioProc {
~VadAudioProc();
int ExtractFeatures(const int16_t* audio_frame,
- int length,
+ size_t length,
AudioFeatures* audio_features);
- static const int kDftSize = 512;
+ static const size_t kDftSize = 512;
private:
- void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, int length);
- void SubframeCorrelation(double* corr, int length_corr, int subframe_index);
- void GetLpcPolynomials(double* lpc, int length_lpc);
- void FindFirstSpectralPeaks(double* f_peak, int length_f_peak);
- void Rms(double* rms, int length_rms);
+ void PitchAnalysis(double* pitch_gains, double* pitch_lags_hz, size_t length);
+ void SubframeCorrelation(double* corr,
+ size_t length_corr,
+ size_t subframe_index);
+ void GetLpcPolynomials(double* lpc, size_t length_lpc);
+ void FindFirstSpectralPeaks(double* f_peak, size_t length_f_peak);
+ void Rms(double* rms, size_t length_rms);
void ResetBuffer();
// To compute spectral peak we perform LPC analysis to get spectral envelope.
// For every 30 ms we compute 3 spectral peak there for 3 LPC analysis.
// LPC is computed over 15 ms of windowed audio. For every 10 ms sub-frame
// we need 5 ms of past signal to create the input of LPC analysis.
- static const int kNumPastSignalSamples = kSampleRateHz / 200;
+ static const size_t kNumPastSignalSamples =
+ static_cast<size_t>(kSampleRateHz / 200);
// TODO(turajs): maybe defining this at a higher level (maybe enum) so that
// all the code recognize it as "no-error."
static const int kNoError = 0;
- static const int kNum10msSubframes = 3;
- static const int kNumSubframeSamples = kSampleRateHz / 100;
- static const int kNumSamplesToProcess =
+ static const size_t kNum10msSubframes = 3;
+ static const size_t kNumSubframeSamples =
+ static_cast<size_t>(kSampleRateHz / 100);
+ static const size_t kNumSamplesToProcess =
kNum10msSubframes *
kNumSubframeSamples; // Samples in 30 ms @ given sampling rate.
- static const int kBufferLength = kNumPastSignalSamples + kNumSamplesToProcess;
- static const int kIpLength = kDftSize >> 1;
- static const int kWLength = kDftSize >> 1;
+ static const size_t kBufferLength =
+ kNumPastSignalSamples + kNumSamplesToProcess;
+ static const size_t kIpLength = kDftSize >> 1;
+ static const size_t kWLength = kDftSize >> 1;
- static const int kLpcOrder = 16;
+ static const size_t kLpcOrder = 16;
- int ip_[kIpLength];
+ size_t ip_[kIpLength];
float w_fft_[kWLength];
// A buffer of 5 ms (past audio) + 30 ms (one iSAC frame ).
float audio_buffer_[kBufferLength];
- int num_buffer_samples_;
+ size_t num_buffer_samples_;
double log_old_gain_;
double old_lag_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
index 4486879df4f..45586b9be68 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_internal.h
@@ -74,7 +74,7 @@ static const double kLpcAnalWin[] = {
0.14408883, 0.13106918, 0.11802689, 0.10496421, 0.09188339, 0.07878670,
0.06567639, 0.05255473, 0.03942400, 0.02628645, 0.01314436, 0.00000000};
-static const int kFilterOrder = 2;
+static const size_t kFilterOrder = 2;
static const float kCoeffNumerator[kFilterOrder + 1] = {0.974827f,
-1.949650f,
0.974827f};
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
index 675af70b457..f509af476f1 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/vad_audio_proc_unittest.cc
@@ -51,7 +51,7 @@ TEST(AudioProcessingTest, DISABLED_ComputingFirstSpectralPeak) {
// Read reference values.
const size_t num_frames = features.num_frames;
ASSERT_EQ(num_frames, fread(sp, sizeof(sp[0]), num_frames, peak_file));
- for (int n = 0; n < features.num_frames; n++)
+ for (size_t n = 0; n < features.num_frames; n++)
EXPECT_NEAR(features.spectral_peak[n], sp[n], 3);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.cc b/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
index 05995ed186b..ef56a3574cd 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.cc
@@ -17,7 +17,7 @@
namespace webrtc {
namespace {
-const int kMaxLength = 320;
+const size_t kMaxLength = 320;
const int kNumChannels = 1;
const double kDefaultVoiceValue = 1.0;
@@ -35,25 +35,25 @@ VoiceActivityDetector::VoiceActivityDetector()
// |chunkwise_voice_probabilities_| and |chunkwise_rms_| when there is new data.
// Otherwise it clears them.
void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
- int length,
+ size_t length,
int sample_rate_hz) {
- DCHECK_EQ(length, sample_rate_hz / 100);
- DCHECK_LE(length, kMaxLength);
+ RTC_DCHECK_EQ(static_cast<int>(length), sample_rate_hz / 100);
+ RTC_DCHECK_LE(length, kMaxLength);
// Resample to the required rate.
const int16_t* resampled_ptr = audio;
if (sample_rate_hz != kSampleRateHz) {
- CHECK_EQ(
+ RTC_CHECK_EQ(
resampler_.ResetIfNeeded(sample_rate_hz, kSampleRateHz, kNumChannels),
0);
resampler_.Push(audio, length, resampled_, kLength10Ms, length);
resampled_ptr = resampled_;
}
- DCHECK_EQ(length, kLength10Ms);
+ RTC_DCHECK_EQ(length, kLength10Ms);
// Each chunk needs to be passed into |standalone_vad_|, because internally it
// buffers the audio and processes it all at once when GetActivity() is
// called.
- CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
+ RTC_CHECK_EQ(standalone_vad_->AddAudio(resampled_ptr, length), 0);
audio_processing_.ExtractFeatures(resampled_ptr, length, &features_);
@@ -70,13 +70,13 @@ void VoiceActivityDetector::ProcessChunk(const int16_t* audio,
} else {
std::fill(chunkwise_voice_probabilities_.begin(),
chunkwise_voice_probabilities_.end(), kNeutralProbability);
- CHECK_GE(
+ RTC_CHECK_GE(
standalone_vad_->GetActivity(&chunkwise_voice_probabilities_[0],
chunkwise_voice_probabilities_.size()),
0);
- CHECK_GE(pitch_based_vad_.VoicingProbability(
- features_, &chunkwise_voice_probabilities_[0]),
- 0);
+ RTC_CHECK_GE(pitch_based_vad_.VoicingProbability(
+ features_, &chunkwise_voice_probabilities_[0]),
+ 0);
}
last_voice_probability_ = chunkwise_voice_probabilities_.back();
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.h b/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.h
index aedd6ed3249..e2dcf022a95 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/vad/voice_activity_detector.h
@@ -31,7 +31,7 @@ class VoiceActivityDetector {
// Processes each audio chunk and estimates the voice probability. The maximum
// supported sample rate is 32kHz.
// TODO(aluebs): Change |length| to size_t.
- void ProcessChunk(const int16_t* audio, int length, int sample_rate_hz);
+ void ProcessChunk(const int16_t* audio, size_t length, int sample_rate_hz);
// Returns a vector of voice probabilities for each chunk. It can be empty for
// some chunks, but it catches up afterwards returning multiple values at
diff --git a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
index 0883536d52d..710df4233fa 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -140,8 +140,8 @@ int VoiceDetectionImpl::Initialize() {
}
using_external_vad_ = false;
- frame_size_samples_ = frame_size_ms_ *
- apm_->proc_split_sample_rate_hz() / 1000;
+ frame_size_samples_ = static_cast<size_t>(
+ frame_size_ms_ * apm_->proc_split_sample_rate_hz() / 1000);
// TODO(ajm): intialize frame buffer here.
return apm_->kNoError;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
index 32f031edf25..b18808316e7 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
@@ -57,7 +57,7 @@ class VoiceDetectionImpl : public VoiceDetection,
bool using_external_vad_;
Likelihood likelihood_;
int frame_size_ms_;
- int frame_size_samples_;
+ size_t frame_size_samples_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/BUILD.gn b/chromium/third_party/webrtc/modules/bitrate_controller/BUILD.gn
index 9280f03679d..4ef536b5724 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/BUILD.gn
@@ -17,8 +17,6 @@ source_set("bitrate_controller") {
"include/bitrate_controller.h",
"send_side_bandwidth_estimation.cc",
"send_side_bandwidth_estimation.h",
- "send_time_history.cc",
- "send_time_history.h",
]
if (is_win) {
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator.cc b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator.cc
index fc83e060a84..0aec528cde1 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator.cc
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator.cc
@@ -51,7 +51,7 @@ BitrateAllocator::ObserverBitrateMap BitrateAllocator::AllocateBitrates() {
uint32_t sum_min_bitrates = 0;
for (const auto& observer : bitrate_observers_)
- sum_min_bitrates += observer.second.min_bitrate_;
+ sum_min_bitrates += observer.second.min_bitrate;
if (last_bitrate_bps_ <= sum_min_bitrates)
return LowRateAllocation(last_bitrate_bps_);
else
@@ -59,10 +59,8 @@ BitrateAllocator::ObserverBitrateMap BitrateAllocator::AllocateBitrates() {
}
int BitrateAllocator::AddBitrateObserver(BitrateObserver* observer,
- uint32_t start_bitrate_bps,
uint32_t min_bitrate_bps,
- uint32_t max_bitrate_bps,
- int* new_observer_bitrate_bps) {
+ uint32_t max_bitrate_bps) {
CriticalSectionScoped lock(crit_sect_.get());
BitrateObserverConfList::iterator it =
@@ -73,43 +71,25 @@ int BitrateAllocator::AddBitrateObserver(BitrateObserver* observer,
// properly allocate bitrate. The allocator should instead distribute any
// extra bitrate after all streams have maxed out.
max_bitrate_bps *= kTransmissionMaxBitrateMultiplier;
- int new_bwe_candidate_bps = 0;
if (it != bitrate_observers_.end()) {
// Update current configuration.
- it->second.start_bitrate_ = start_bitrate_bps;
- it->second.min_bitrate_ = min_bitrate_bps;
- it->second.max_bitrate_ = max_bitrate_bps;
- // Set the send-side bandwidth to the max of the sum of start bitrates and
- // the current estimate, so that if the user wants to immediately use more
- // bandwidth, that can be enforced.
- for (const auto& observer : bitrate_observers_)
- new_bwe_candidate_bps += observer.second.start_bitrate_;
+ it->second.min_bitrate = min_bitrate_bps;
+ it->second.max_bitrate = max_bitrate_bps;
} else {
// Add new settings.
bitrate_observers_.push_back(BitrateObserverConfiguration(
- observer, BitrateConfiguration(start_bitrate_bps, min_bitrate_bps,
- max_bitrate_bps)));
+ observer, BitrateConfiguration(min_bitrate_bps, max_bitrate_bps)));
bitrate_observers_modified_ = true;
-
- // TODO(andresp): This is a ugly way to set start bitrate.
- //
- // Only change start bitrate if we have exactly one observer. By definition
- // you can only have one start bitrate, once we have our first estimate we
- // will adapt from there.
- if (bitrate_observers_.size() == 1)
- new_bwe_candidate_bps = start_bitrate_bps;
}
- last_bitrate_bps_ = std::max<int>(new_bwe_candidate_bps, last_bitrate_bps_);
-
ObserverBitrateMap allocation = AllocateBitrates();
- *new_observer_bitrate_bps = 0;
+ int new_observer_bitrate_bps = 0;
for (auto& kv : allocation) {
kv.first->OnNetworkChanged(kv.second, last_fraction_loss_, last_rtt_);
if (kv.first == observer)
- *new_observer_bitrate_bps = kv.second;
+ new_observer_bitrate_bps = kv.second;
}
- return last_bitrate_bps_;
+ return new_observer_bitrate_bps;
}
void BitrateAllocator::RemoveBitrateObserver(BitrateObserver* observer) {
@@ -129,8 +109,8 @@ void BitrateAllocator::GetMinMaxBitrateSumBps(int* min_bitrate_sum_bps,
CriticalSectionScoped lock(crit_sect_.get());
for (const auto& observer : bitrate_observers_) {
- *min_bitrate_sum_bps += observer.second.min_bitrate_;
- *max_bitrate_sum_bps += observer.second.max_bitrate_;
+ *min_bitrate_sum_bps += observer.second.min_bitrate;
+ *max_bitrate_sum_bps += observer.second.max_bitrate;
}
}
@@ -160,15 +140,15 @@ BitrateAllocator::ObserverBitrateMap BitrateAllocator::NormalRateAllocation(
ObserverSortingMap list_max_bitrates;
for (const auto& observer : bitrate_observers_) {
list_max_bitrates.insert(std::pair<uint32_t, ObserverConfiguration>(
- observer.second.max_bitrate_,
- ObserverConfiguration(observer.first, observer.second.min_bitrate_)));
+ observer.second.max_bitrate,
+ ObserverConfiguration(observer.first, observer.second.min_bitrate)));
}
ObserverBitrateMap allocation;
ObserverSortingMap::iterator max_it = list_max_bitrates.begin();
while (max_it != list_max_bitrates.end()) {
number_of_observers--;
uint32_t observer_allowance =
- max_it->second.min_bitrate_ + bitrate_per_observer;
+ max_it->second.min_bitrate + bitrate_per_observer;
if (max_it->first < observer_allowance) {
// We have more than enough for this observer.
// Carry the remainder forward.
@@ -176,9 +156,9 @@ BitrateAllocator::ObserverBitrateMap BitrateAllocator::NormalRateAllocation(
if (number_of_observers != 0) {
bitrate_per_observer += remainder / number_of_observers;
}
- allocation[max_it->second.observer_] = max_it->first;
+ allocation[max_it->second.observer] = max_it->first;
} else {
- allocation[max_it->second.observer_] = observer_allowance;
+ allocation[max_it->second.observer] = observer_allowance;
}
list_max_bitrates.erase(max_it);
// Prepare next iteration.
@@ -193,14 +173,14 @@ BitrateAllocator::ObserverBitrateMap BitrateAllocator::LowRateAllocation(
if (enforce_min_bitrate_) {
// Min bitrate to all observers.
for (const auto& observer : bitrate_observers_)
- allocation[observer.first] = observer.second.min_bitrate_;
+ allocation[observer.first] = observer.second.min_bitrate;
} else {
- // Allocate up to |min_bitrate_| to one observer at a time, until
+ // Allocate up to |min_bitrate| to one observer at a time, until
// |bitrate| is depleted.
uint32_t remainder = bitrate;
for (const auto& observer : bitrate_observers_) {
uint32_t allocated_bitrate =
- std::min(remainder, observer.second.min_bitrate_);
+ std::min(remainder, observer.second.min_bitrate);
allocation[observer.first] = allocated_bitrate;
remainder -= allocated_bitrate;
}
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc
index b69247e8612..4fc7e83b5be 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_allocator_unittest.cc
@@ -46,22 +46,24 @@ class BitrateAllocatorTest : public ::testing::Test {
TEST_F(BitrateAllocatorTest, UpdatingBitrateObserver) {
TestBitrateObserver bitrate_observer;
- int start_bitrate;
- allocator_->AddBitrateObserver(&bitrate_observer, 200000, 100000, 1500000,
- &start_bitrate);
+ int start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer, 100000, 1500000);
EXPECT_EQ(300000, start_bitrate);
allocator_->OnNetworkChanged(200000, 0, 0);
EXPECT_EQ(200000u, bitrate_observer.last_bitrate_);
- allocator_->AddBitrateObserver(&bitrate_observer, 1500000, 100000, 1500000,
- &start_bitrate);
- EXPECT_EQ(1500000, start_bitrate);
- allocator_->OnNetworkChanged(1500000, 0, 0);
- EXPECT_EQ(1500000u, bitrate_observer.last_bitrate_);
-
- allocator_->AddBitrateObserver(&bitrate_observer, 500000, 100000, 1500000,
- &start_bitrate);
- EXPECT_EQ(1500000, start_bitrate);
+ // TODO(pbos): Expect capping to 1.5M instead of 3M when not boosting the max
+ // bitrate for FEC/retransmissions (see TODO in BitrateAllocator).
+ allocator_->OnNetworkChanged(4000000, 0, 0);
+ EXPECT_EQ(3000000u, bitrate_observer.last_bitrate_);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer, 100000, 4000000);
+ EXPECT_EQ(4000000, start_bitrate);
+
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer, 100000, 1500000);
+ EXPECT_EQ(3000000, start_bitrate);
+ EXPECT_EQ(3000000u, bitrate_observer.last_bitrate_);
allocator_->OnNetworkChanged(1500000, 0, 0);
EXPECT_EQ(1500000u, bitrate_observer.last_bitrate_);
}
@@ -69,12 +71,11 @@ TEST_F(BitrateAllocatorTest, UpdatingBitrateObserver) {
TEST_F(BitrateAllocatorTest, TwoBitrateObserversOneRtcpObserver) {
TestBitrateObserver bitrate_observer_1;
TestBitrateObserver bitrate_observer_2;
- int start_bitrate;
- allocator_->AddBitrateObserver(&bitrate_observer_1, 200000, 100000, 300000,
- &start_bitrate);
+ int start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 300000);
EXPECT_EQ(300000, start_bitrate);
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 200000, 300000,
- &start_bitrate);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 300000);
EXPECT_EQ(200000, start_bitrate);
// Test too low start bitrate, hence lower than sum of min. Min bitrates will
@@ -114,9 +115,8 @@ class BitrateAllocatorTestNoEnforceMin : public ::testing::Test {
// as intended.
TEST_F(BitrateAllocatorTestNoEnforceMin, OneBitrateObserver) {
TestBitrateObserver bitrate_observer_1;
- int start_bitrate;
- allocator_->AddBitrateObserver(&bitrate_observer_1, 200000, 100000, 400000,
- &start_bitrate);
+ int start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
EXPECT_EQ(300000, start_bitrate);
// High REMB.
@@ -135,18 +135,17 @@ TEST_F(BitrateAllocatorTestNoEnforceMin, ThreeBitrateObservers) {
TestBitrateObserver bitrate_observer_2;
TestBitrateObserver bitrate_observer_3;
// Set up the observers with min bitrates at 100000, 200000, and 300000.
- int start_bitrate;
- allocator_->AddBitrateObserver(&bitrate_observer_1, 200000, 100000, 400000,
- &start_bitrate);
+ int start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
EXPECT_EQ(300000, start_bitrate);
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 200000, 400000,
- &start_bitrate);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 400000);
EXPECT_EQ(200000, start_bitrate);
EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
- allocator_->AddBitrateObserver(&bitrate_observer_3, 200000, 300000, 400000,
- &start_bitrate);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_3, 300000, 400000);
EXPECT_EQ(0, start_bitrate);
EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
EXPECT_EQ(200000u, bitrate_observer_2.last_bitrate_);
@@ -185,18 +184,17 @@ TEST_F(BitrateAllocatorTest, ThreeBitrateObserversLowRembEnforceMin) {
TestBitrateObserver bitrate_observer_1;
TestBitrateObserver bitrate_observer_2;
TestBitrateObserver bitrate_observer_3;
- int start_bitrate;
- allocator_->AddBitrateObserver(&bitrate_observer_1, 200000, 100000, 400000,
- &start_bitrate);
+ int start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_1, 100000, 400000);
EXPECT_EQ(300000, start_bitrate);
- allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 200000, 400000,
- &start_bitrate);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_2, 200000, 400000);
EXPECT_EQ(200000, start_bitrate);
EXPECT_EQ(100000u, bitrate_observer_1.last_bitrate_);
- allocator_->AddBitrateObserver(&bitrate_observer_3, 200000, 300000, 400000,
- &start_bitrate);
+ start_bitrate =
+ allocator_->AddBitrateObserver(&bitrate_observer_3, 300000, 400000);
EXPECT_EQ(300000, start_bitrate);
EXPECT_EQ(100000, static_cast<int>(bitrate_observer_1.last_bitrate_));
EXPECT_EQ(200000, static_cast<int>(bitrate_observer_2.last_bitrate_));
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller.gypi b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller.gypi
index a0c2fc92f67..44c1b89ef2c 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller.gypi
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller.gypi
@@ -22,8 +22,6 @@
'include/bitrate_allocator.h',
'send_side_bandwidth_estimation.cc',
'send_side_bandwidth_estimation.h',
- 'send_time_history.cc',
- 'send_time_history.h',
],
# TODO(jschuh): Bug 1348: fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
index d54da99bef9..8857ee4b4a8 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.cc
@@ -87,7 +87,6 @@ BitrateControllerImpl::BitrateControllerImpl(Clock* clock,
: clock_(clock),
observer_(observer),
last_bitrate_update_ms_(clock_->TimeInMilliseconds()),
- critsect_(CriticalSectionWrapper::CreateCriticalSection()),
bandwidth_estimation_(),
reserved_bitrate_bps_(0),
last_bitrate_bps_(0),
@@ -107,7 +106,7 @@ RtcpBandwidthObserver* BitrateControllerImpl::CreateRtcpBandwidthObserver() {
void BitrateControllerImpl::SetStartBitrate(int start_bitrate_bps) {
{
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
bandwidth_estimation_.SetSendBitrate(start_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
@@ -116,7 +115,7 @@ void BitrateControllerImpl::SetStartBitrate(int start_bitrate_bps) {
void BitrateControllerImpl::SetMinMaxBitrate(int min_bitrate_bps,
int max_bitrate_bps) {
{
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
bandwidth_estimation_.SetMinMaxBitrate(min_bitrate_bps, max_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
@@ -124,7 +123,7 @@ void BitrateControllerImpl::SetMinMaxBitrate(int min_bitrate_bps,
void BitrateControllerImpl::SetReservedBitrate(uint32_t reserved_bitrate_bps) {
{
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
reserved_bitrate_bps_ = reserved_bitrate_bps;
}
MaybeTriggerOnNetworkChanged();
@@ -132,15 +131,16 @@ void BitrateControllerImpl::SetReservedBitrate(uint32_t reserved_bitrate_bps) {
void BitrateControllerImpl::OnReceivedEstimatedBitrate(uint32_t bitrate) {
{
- CriticalSectionScoped cs(critsect_.get());
- bandwidth_estimation_.UpdateReceiverEstimate(bitrate);
+ rtc::CritScope cs(&critsect_);
+ bandwidth_estimation_.UpdateReceiverEstimate(clock_->TimeInMilliseconds(),
+ bitrate);
}
MaybeTriggerOnNetworkChanged();
}
int64_t BitrateControllerImpl::TimeUntilNextProcess() {
const int64_t kBitrateControllerUpdateIntervalMs = 25;
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
int64_t time_since_update_ms =
clock_->TimeInMilliseconds() - last_bitrate_update_ms_;
return std::max<int64_t>(
@@ -151,7 +151,7 @@ int32_t BitrateControllerImpl::Process() {
if (TimeUntilNextProcess() > 0)
return 0;
{
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
bandwidth_estimation_.UpdateEstimate(clock_->TimeInMilliseconds());
}
MaybeTriggerOnNetworkChanged();
@@ -165,7 +165,7 @@ void BitrateControllerImpl::OnReceivedRtcpReceiverReport(
int number_of_packets,
int64_t now_ms) {
{
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
bandwidth_estimation_.UpdateReceiverBlock(fraction_loss, rtt,
number_of_packets, now_ms);
}
@@ -183,7 +183,7 @@ void BitrateControllerImpl::MaybeTriggerOnNetworkChanged() {
bool BitrateControllerImpl::GetNetworkParameters(uint32_t* bitrate,
uint8_t* fraction_loss,
int64_t* rtt) {
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
int current_bitrate;
bandwidth_estimation_.CurrentEstimate(&current_bitrate, fraction_loss, rtt);
*bitrate = current_bitrate;
@@ -205,7 +205,7 @@ bool BitrateControllerImpl::GetNetworkParameters(uint32_t* bitrate,
}
bool BitrateControllerImpl::AvailableBandwidth(uint32_t* bandwidth) const {
- CriticalSectionScoped cs(critsect_.get());
+ rtc::CritScope cs(&critsect_);
int bitrate;
uint8_t fraction_loss;
int64_t rtt;
@@ -218,5 +218,4 @@ bool BitrateControllerImpl::AvailableBandwidth(uint32_t* bandwidth) const {
}
return false;
}
-
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.h b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
index 3d38a54f538..a33a0e6f040 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/bitrate_controller_impl.h
@@ -20,9 +20,9 @@
#include <list>
#include <utility>
+#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -64,24 +64,23 @@ class BitrateControllerImpl : public BitrateController {
void OnNetworkChanged(uint32_t bitrate,
uint8_t fraction_loss, // 0 - 255.
- int64_t rtt)
- EXCLUSIVE_LOCKS_REQUIRED(*critsect_);
+ int64_t rtt) EXCLUSIVE_LOCKS_REQUIRED(critsect_);
// Used by process thread.
Clock* clock_;
BitrateObserver* observer_;
int64_t last_bitrate_update_ms_;
- const rtc::scoped_ptr<CriticalSectionWrapper> critsect_;
- SendSideBandwidthEstimation bandwidth_estimation_ GUARDED_BY(*critsect_);
- uint32_t reserved_bitrate_bps_ GUARDED_BY(*critsect_);
+ mutable rtc::CriticalSection critsect_;
+ SendSideBandwidthEstimation bandwidth_estimation_ GUARDED_BY(critsect_);
+ uint32_t reserved_bitrate_bps_ GUARDED_BY(critsect_);
- uint32_t last_bitrate_bps_ GUARDED_BY(*critsect_);
- uint8_t last_fraction_loss_ GUARDED_BY(*critsect_);
- int64_t last_rtt_ms_ GUARDED_BY(*critsect_);
- uint32_t last_reserved_bitrate_bps_ GUARDED_BY(*critsect_);
+ uint32_t last_bitrate_bps_ GUARDED_BY(critsect_);
+ uint8_t last_fraction_loss_ GUARDED_BY(critsect_);
+ int64_t last_rtt_ms_ GUARDED_BY(critsect_);
+ uint32_t last_reserved_bitrate_bps_ GUARDED_BY(critsect_);
- DISALLOW_IMPLICIT_CONSTRUCTORS(BitrateControllerImpl);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(BitrateControllerImpl);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_BITRATE_CONTROLLER_BITRATE_CONTROLLER_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_allocator.h b/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_allocator.h
index 9cc4b747116..5c58f569d23 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_allocator.h
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_allocator.h
@@ -37,15 +37,13 @@ class BitrateAllocator {
// Set the start and max send bitrate used by the bandwidth management.
//
- // observer, updates bitrates if already in use.
- // min_bitrate_bps = 0 equals no min bitrate.
- // max_bitrate_bps = 0 equals no max bitrate.
- // TODO(holmer): Remove start_bitrate_bps when old API is gone.
+ // |observer| updates bitrates if already in use.
+ // |min_bitrate_bps| = 0 equals no min bitrate.
+ // |max_bitrate_bps| = 0 equals no max bitrate.
+ // Returns bitrate allocated for the bitrate observer.
int AddBitrateObserver(BitrateObserver* observer,
- uint32_t start_bitrate_bps,
uint32_t min_bitrate_bps,
- uint32_t max_bitrate_bps,
- int* new_observer_bitrate_bps);
+ uint32_t max_bitrate_bps);
void RemoveBitrateObserver(BitrateObserver* observer);
@@ -61,21 +59,16 @@ class BitrateAllocator {
private:
struct BitrateConfiguration {
- BitrateConfiguration(uint32_t start_bitrate,
- uint32_t min_bitrate,
- uint32_t max_bitrate)
- : start_bitrate_(start_bitrate),
- min_bitrate_(min_bitrate),
- max_bitrate_(max_bitrate) {}
- uint32_t start_bitrate_;
- uint32_t min_bitrate_;
- uint32_t max_bitrate_;
+ BitrateConfiguration(uint32_t min_bitrate, uint32_t max_bitrate)
+ : min_bitrate(min_bitrate), max_bitrate(max_bitrate) {}
+ uint32_t min_bitrate;
+ uint32_t max_bitrate;
};
struct ObserverConfiguration {
ObserverConfiguration(BitrateObserver* observer, uint32_t bitrate)
- : observer_(observer), min_bitrate_(bitrate) {}
- BitrateObserver* observer_;
- uint32_t min_bitrate_;
+ : observer(observer), min_bitrate(bitrate) {}
+ BitrateObserver* const observer;
+ uint32_t min_bitrate;
};
typedef std::pair<BitrateObserver*, BitrateConfiguration>
BitrateObserverConfiguration;
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_controller.h b/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_controller.h
index 7303d069a4a..bb532886c70 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_controller.h
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/include/bitrate_controller.h
@@ -23,6 +23,7 @@
namespace webrtc {
class CriticalSectionWrapper;
+struct PacketInfo;
class BitrateObserver {
// Observer class for bitrate changes announced due to change in bandwidth
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
index 247361df47d..8505e7fd4d0 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.cc
@@ -27,6 +27,7 @@ const int kLimitNumPackets = 20;
const int kAvgPacketSizeBytes = 1000;
const int kDefaultMinBitrateBps = 10000;
const int kDefaultMaxBitrateBps = 1000000000;
+const int64_t kLowBitrateLogPeriodMs = 10000;
struct UmaRampUpMetric {
const char* metric_name;
@@ -71,6 +72,7 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation()
bitrate_(0),
min_bitrate_configured_(kDefaultMinBitrateBps),
max_bitrate_configured_(kDefaultMaxBitrateBps),
+ last_low_bitrate_log_ms_(-1),
time_last_receiver_block_ms_(0),
last_fraction_loss_(0),
last_round_trip_time_ms_(0),
@@ -86,7 +88,7 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation()
SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {}
void SendSideBandwidthEstimation::SetSendBitrate(int bitrate) {
- DCHECK_GT(bitrate, 0);
+ RTC_DCHECK_GT(bitrate, 0);
bitrate_ = bitrate;
// Clear last sent bitrate history so the new value can be used directly
@@ -96,7 +98,7 @@ void SendSideBandwidthEstimation::SetSendBitrate(int bitrate) {
void SendSideBandwidthEstimation::SetMinMaxBitrate(int min_bitrate,
int max_bitrate) {
- DCHECK_GE(min_bitrate, 0);
+ RTC_DCHECK_GE(min_bitrate, 0);
min_bitrate_configured_ = std::max(min_bitrate, kDefaultMinBitrateBps);
if (max_bitrate > 0) {
max_bitrate_configured_ =
@@ -118,9 +120,10 @@ void SendSideBandwidthEstimation::CurrentEstimate(int* bitrate,
*rtt = last_round_trip_time_ms_;
}
-void SendSideBandwidthEstimation::UpdateReceiverEstimate(uint32_t bandwidth) {
+void SendSideBandwidthEstimation::UpdateReceiverEstimate(
+ int64_t now_ms, uint32_t bandwidth) {
bwe_incoming_ = bandwidth;
- bitrate_ = CapBitrateToThresholds(bitrate_);
+ bitrate_ = CapBitrateToThresholds(now_ms, bitrate_);
}
void SendSideBandwidthEstimation::UpdateReceiverBlock(uint8_t fraction_loss,
@@ -200,7 +203,7 @@ void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
// packet loss reported, to allow startup bitrate probing.
if (last_fraction_loss_ == 0 && IsInStartPhase(now_ms) &&
bwe_incoming_ > bitrate_) {
- bitrate_ = CapBitrateToThresholds(bwe_incoming_);
+ bitrate_ = CapBitrateToThresholds(now_ms, bwe_incoming_);
min_bitrate_history_.clear();
min_bitrate_history_.push_back(std::make_pair(now_ms, bitrate_));
return;
@@ -251,7 +254,7 @@ void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
}
}
}
- bitrate_ = CapBitrateToThresholds(bitrate_);
+ bitrate_ = CapBitrateToThresholds(now_ms, bitrate_);
}
bool SendSideBandwidthEstimation::IsInStartPhase(int64_t now_ms) const {
@@ -279,7 +282,8 @@ void SendSideBandwidthEstimation::UpdateMinHistory(int64_t now_ms) {
min_bitrate_history_.push_back(std::make_pair(now_ms, bitrate_));
}
-uint32_t SendSideBandwidthEstimation::CapBitrateToThresholds(uint32_t bitrate) {
+uint32_t SendSideBandwidthEstimation::CapBitrateToThresholds(
+ int64_t now_ms, uint32_t bitrate) {
if (bwe_incoming_ > 0 && bitrate > bwe_incoming_) {
bitrate = bwe_incoming_;
}
@@ -287,9 +291,13 @@ uint32_t SendSideBandwidthEstimation::CapBitrateToThresholds(uint32_t bitrate) {
bitrate = max_bitrate_configured_;
}
if (bitrate < min_bitrate_configured_) {
- LOG(LS_WARNING) << "Estimated available bandwidth " << bitrate / 1000
- << " kbps is below configured min bitrate "
- << min_bitrate_configured_ / 1000 << " kbps.";
+ if (last_low_bitrate_log_ms_ == -1 ||
+ now_ms - last_low_bitrate_log_ms_ > kLowBitrateLogPeriodMs) {
+ LOG(LS_WARNING) << "Estimated available bandwidth " << bitrate / 1000
+ << " kbps is below configured min bitrate "
+ << min_bitrate_configured_ / 1000 << " kbps.";
+ last_low_bitrate_log_ms_ = now_ms;
+ }
bitrate = min_bitrate_configured_;
}
return bitrate;
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
index fb8962ad505..f50ad5184a8 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation.h
@@ -30,7 +30,7 @@ class SendSideBandwidthEstimation {
void UpdateEstimate(int64_t now_ms);
// Call when we receive a RTCP message with TMMBR or REMB.
- void UpdateReceiverEstimate(uint32_t bandwidth);
+ void UpdateReceiverEstimate(int64_t now_ms, uint32_t bandwidth);
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdateReceiverBlock(uint8_t fraction_loss,
@@ -51,7 +51,7 @@ class SendSideBandwidthEstimation {
// Returns the input bitrate capped to the thresholds defined by the max,
// min and incoming bandwidth.
- uint32_t CapBitrateToThresholds(uint32_t bitrate);
+ uint32_t CapBitrateToThresholds(int64_t now_ms, uint32_t bitrate);
// Updates history of min bitrates.
// After this method returns min_bitrate_history_.front().second contains the
@@ -67,6 +67,7 @@ class SendSideBandwidthEstimation {
uint32_t bitrate_;
uint32_t min_bitrate_configured_;
uint32_t max_bitrate_configured_;
+ int64_t last_low_bitrate_log_ms_;
int64_t time_last_receiver_block_ms_;
uint8_t last_fraction_loss_;
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
index ab052b52446..75384ae2840 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
+++ b/chromium/third_party/webrtc/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
@@ -28,7 +28,7 @@ TEST(SendSideBweTest, InitialRembWithProbing) {
bwe.UpdateReceiverBlock(0, 50, 1, now_ms);
// Initial REMB applies immediately.
- bwe.UpdateReceiverEstimate(kRembBps);
+ bwe.UpdateReceiverEstimate(now_ms, kRembBps);
bwe.UpdateEstimate(now_ms);
int bitrate;
uint8_t fraction_loss;
@@ -38,7 +38,7 @@ TEST(SendSideBweTest, InitialRembWithProbing) {
// Second REMB doesn't apply immediately.
now_ms += 2001;
- bwe.UpdateReceiverEstimate(kSecondRembBps);
+ bwe.UpdateReceiverEstimate(now_ms, kSecondRembBps);
bwe.UpdateEstimate(now_ms);
bitrate = 0;
bwe.CurrentEstimate(&bitrate, &fraction_loss, &rtt);
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history_unittest.cc b/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history_unittest.cc
deleted file mode 100644
index fc7099dbdde..00000000000
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history_unittest.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <algorithm>
-#include <limits>
-#include <vector>
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/modules/bitrate_controller/send_time_history.h"
-#include "webrtc/system_wrappers/interface/clock.h"
-
-namespace webrtc {
-
-static const int kDefaultHistoryLengthMs = 1000;
-
-class SendTimeHistoryTest : public ::testing::Test {
- protected:
- SendTimeHistoryTest() : history_(kDefaultHistoryLengthMs), clock_(0) {}
- ~SendTimeHistoryTest() {}
-
- virtual void SetUp() {}
-
- virtual void TearDown() {}
-
- SendTimeHistory history_;
- webrtc::SimulatedClock clock_;
-};
-
-TEST_F(SendTimeHistoryTest, AddRemoveOne) {
- const uint16_t kSeqNo = 1;
- const int64_t kTimestamp = 2;
- history_.AddAndRemoveOldSendTimes(kSeqNo, kTimestamp);
-
- int64_t time = 0;
- EXPECT_TRUE(history_.GetSendTime(kSeqNo, &time, false));
- EXPECT_EQ(kTimestamp, time);
-
- time = 0;
- EXPECT_TRUE(history_.GetSendTime(kSeqNo, &time, true));
- EXPECT_EQ(kTimestamp, time);
-
- time = 0;
- EXPECT_FALSE(history_.GetSendTime(kSeqNo, &time, true));
-}
-
-TEST_F(SendTimeHistoryTest, AddThenRemoveOutOfOrder) {
- struct Timestamp {
- Timestamp(uint16_t sequence_number, int64_t timestamp)
- : sequence_number(sequence_number), timestamp(timestamp) {}
- uint16_t sequence_number;
- int64_t timestamp;
- };
- std::vector<Timestamp> timestamps;
- const size_t num_items = 100;
- for (size_t i = 0; i < num_items; ++i) {
- timestamps.push_back(
- Timestamp(static_cast<uint16_t>(i), static_cast<int64_t>(i)));
- }
- std::vector<Timestamp> randomized_timestamps = timestamps;
- std::random_shuffle(randomized_timestamps.begin(),
- randomized_timestamps.end());
- for (size_t i = 0; i < num_items; ++i) {
- history_.AddAndRemoveOldSendTimes(timestamps[i].sequence_number,
- timestamps[i].timestamp);
- }
- for (size_t i = 0; i < num_items; ++i) {
- int64_t timestamp;
- EXPECT_TRUE(history_.GetSendTime(randomized_timestamps[i].sequence_number,
- &timestamp, false));
- EXPECT_EQ(randomized_timestamps[i].timestamp, timestamp);
- EXPECT_TRUE(history_.GetSendTime(randomized_timestamps[i].sequence_number,
- &timestamp, true));
- }
- for (size_t i = 0; i < num_items; ++i) {
- int64_t timestamp;
- EXPECT_FALSE(
- history_.GetSendTime(timestamps[i].sequence_number, &timestamp, false));
- }
-}
-
-TEST_F(SendTimeHistoryTest, HistorySize) {
- const int kItems = kDefaultHistoryLengthMs / 100;
- for (int i = 0; i < kItems; ++i) {
- history_.AddAndRemoveOldSendTimes(i, i * 100);
- }
- int64_t timestamp;
- for (int i = 0; i < kItems; ++i) {
- EXPECT_TRUE(history_.GetSendTime(i, &timestamp, false));
- EXPECT_EQ(i * 100, timestamp);
- }
- history_.AddAndRemoveOldSendTimes(kItems, kItems * 100);
- EXPECT_FALSE(history_.GetSendTime(0, &timestamp, false));
- for (int i = 1; i < (kItems + 1); ++i) {
- EXPECT_TRUE(history_.GetSendTime(i, &timestamp, false));
- EXPECT_EQ(i * 100, timestamp);
- }
-}
-
-TEST_F(SendTimeHistoryTest, HistorySizeWithWraparound) {
- const int kMaxSeqNo = std::numeric_limits<uint16_t>::max();
- history_.AddAndRemoveOldSendTimes(kMaxSeqNo - 2, 0);
- history_.AddAndRemoveOldSendTimes(kMaxSeqNo - 1, 100);
- history_.AddAndRemoveOldSendTimes(kMaxSeqNo, 200);
- history_.AddAndRemoveOldSendTimes(0, 1000);
- int64_t timestamp;
- EXPECT_FALSE(history_.GetSendTime(kMaxSeqNo - 2, &timestamp, false));
- EXPECT_TRUE(history_.GetSendTime(kMaxSeqNo - 1, &timestamp, false));
- EXPECT_TRUE(history_.GetSendTime(kMaxSeqNo, &timestamp, false));
- EXPECT_TRUE(history_.GetSendTime(0, &timestamp, false));
-
- // Create a gap (kMaxSeqNo - 1) -> 0.
- EXPECT_TRUE(history_.GetSendTime(kMaxSeqNo, &timestamp, true));
-
- history_.AddAndRemoveOldSendTimes(1, 1100);
-
- EXPECT_FALSE(history_.GetSendTime(kMaxSeqNo - 2, &timestamp, false));
- EXPECT_FALSE(history_.GetSendTime(kMaxSeqNo - 1, &timestamp, false));
- EXPECT_FALSE(history_.GetSendTime(kMaxSeqNo, &timestamp, false));
- EXPECT_TRUE(history_.GetSendTime(0, &timestamp, false));
- EXPECT_TRUE(history_.GetSendTime(1, &timestamp, false));
-}
-
-TEST_F(SendTimeHistoryTest, InterlievedGetAndRemove) {
- const uint16_t kSeqNo = 1;
- const int64_t kTimestamp = 2;
-
- history_.AddAndRemoveOldSendTimes(kSeqNo, kTimestamp);
- history_.AddAndRemoveOldSendTimes(kSeqNo + 1, kTimestamp + 1);
-
- int64_t time = 0;
- EXPECT_TRUE(history_.GetSendTime(kSeqNo, &time, true));
- EXPECT_EQ(kTimestamp, time);
-
- history_.AddAndRemoveOldSendTimes(kSeqNo + 2, kTimestamp + 2);
-
- EXPECT_TRUE(history_.GetSendTime(kSeqNo + 1, &time, true));
- EXPECT_EQ(kTimestamp + 1, time);
- EXPECT_TRUE(history_.GetSendTime(kSeqNo + 2, &time, true));
- EXPECT_EQ(kTimestamp + 2, time);
-}
-
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/BUILD.gn b/chromium/third_party/webrtc/modules/desktop_capture/BUILD.gn
index ca1e66ec923..aa33993192a 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/desktop_capture/BUILD.gn
@@ -12,6 +12,20 @@ import("../../build/webrtc.gni")
use_desktop_capture_differ_sse2 =
!is_ios && (current_cpu == "x86" || current_cpu == "x64")
+source_set("primitives") {
+ sources = [
+ "desktop_capture_types.h",
+ "desktop_frame.cc",
+ "desktop_frame.h",
+ "desktop_geometry.cc",
+ "desktop_geometry.h",
+ "desktop_region.cc",
+ "desktop_region.h",
+ ]
+
+ public_configs = [ "../..:common_inherited_config" ]
+}
+
source_set("desktop_capture") {
sources = [
"cropped_desktop_frame.cc",
@@ -23,17 +37,10 @@ source_set("desktop_capture") {
"desktop_and_cursor_composer.h",
"desktop_capture_options.cc",
"desktop_capture_options.h",
- "desktop_capture_types.h",
"desktop_capturer.h",
"desktop_capturer.h",
- "desktop_frame.cc",
- "desktop_frame.h",
"desktop_frame_win.cc",
"desktop_frame_win.h",
- "desktop_geometry.cc",
- "desktop_geometry.h",
- "desktop_region.cc",
- "desktop_region.h",
"differ.cc",
"differ.h",
"differ_block.cc",
@@ -120,13 +127,14 @@ source_set("desktop_capture") {
configs += [ "../..:common_config" ]
public_configs = [ "../..:common_inherited_config" ]
- if (is_clang) {
+ if (is_clang && !is_nacl) {
# Suppress warnings from Chrome's Clang plugins.
# See http://code.google.com/p/webrtc/issues/detail?id=163 for details.
configs -= [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
+ ":primitives",
"../../base:rtc_base_approved",
"../../system_wrappers",
]
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/cropped_desktop_frame.cc b/chromium/third_party/webrtc/modules/desktop_capture/cropped_desktop_frame.cc
index 9ab6fe9c7c7..2c709733e15 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/cropped_desktop_frame.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/cropped_desktop_frame.cc
@@ -20,7 +20,7 @@ class CroppedDesktopFrame : public DesktopFrame {
private:
rtc::scoped_ptr<DesktopFrame> frame_;
- DISALLOW_COPY_AND_ASSIGN(CroppedDesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(CroppedDesktopFrame);
};
DesktopFrame*
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/cropping_window_capturer_win.cc b/chromium/third_party/webrtc/modules/desktop_capture/cropping_window_capturer_win.cc
index deffe665ee5..73a2aa9d6ec 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/cropping_window_capturer_win.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/cropping_window_capturer_win.cc
@@ -119,10 +119,12 @@ class CroppingWindowCapturerWin : public CroppingWindowCapturer {
// The region from GetWindowRgn in the desktop coordinate if the region is
// rectangular, or the rect from GetWindowRect if the region is not set.
DesktopRect window_region_rect_;
+
+ AeroChecker aero_checker_;
};
bool CroppingWindowCapturerWin::ShouldUseScreenCapturer() {
- if (!rtc::IsWindows8OrLater())
+ if (!rtc::IsWindows8OrLater() && aero_checker_.IsAeroEnabled())
return false;
// Check if the window is a translucent layered window.
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc b/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
index f5a7fe8279d..74d25d4e1fa 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.cc
@@ -67,7 +67,7 @@ class DesktopFrameWithCursor : public DesktopFrame {
DesktopVector restore_position_;
rtc::scoped_ptr<DesktopFrame> restore_frame_;
- DISALLOW_COPY_AND_ASSIGN(DesktopFrameWithCursor);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DesktopFrameWithCursor);
};
DesktopFrameWithCursor::DesktopFrameWithCursor(DesktopFrame* frame,
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h b/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
index b16cf4584fa..7a72031c791 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_and_cursor_composer.h
@@ -55,7 +55,7 @@ class DesktopAndCursorComposer : public DesktopCapturer,
MouseCursorMonitor::CursorState cursor_state_;
DesktopVector cursor_position_;
- DISALLOW_COPY_AND_ASSIGN(DesktopAndCursorComposer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DesktopAndCursorComposer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame.h b/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame.h
index 29d50769595..49b964630ce 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame.h
@@ -91,20 +91,20 @@ class DesktopFrame {
rtc::scoped_ptr<DesktopRegion> shape_;
private:
- DISALLOW_COPY_AND_ASSIGN(DesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DesktopFrame);
};
// A DesktopFrame that stores data in the heap.
class BasicDesktopFrame : public DesktopFrame {
public:
explicit BasicDesktopFrame(DesktopSize size);
- virtual ~BasicDesktopFrame();
+ ~BasicDesktopFrame() override;
// Creates a BasicDesktopFrame that contains copy of |frame|.
static DesktopFrame* CopyOf(const DesktopFrame& frame);
private:
- DISALLOW_COPY_AND_ASSIGN(BasicDesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BasicDesktopFrame);
};
// A DesktopFrame that stores data in shared memory.
@@ -114,10 +114,10 @@ class SharedMemoryDesktopFrame : public DesktopFrame {
SharedMemoryDesktopFrame(DesktopSize size,
int stride,
SharedMemory* shared_memory);
- virtual ~SharedMemoryDesktopFrame();
+ ~SharedMemoryDesktopFrame() override;
private:
- DISALLOW_COPY_AND_ASSIGN(SharedMemoryDesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SharedMemoryDesktopFrame);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame_win.h b/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame_win.h
index 9530fdc89b4..15b5883c36f 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame_win.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_frame_win.h
@@ -40,7 +40,7 @@ class DesktopFrameWin : public DesktopFrame {
HBITMAP bitmap_;
rtc::scoped_ptr<SharedMemory> owned_shared_memory_;
- DISALLOW_COPY_AND_ASSIGN(DesktopFrameWin);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DesktopFrameWin);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.cc b/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.cc
index 90428199a4b..bc9972660ad 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.cc
@@ -511,6 +511,8 @@ DesktopRegion::Iterator::Iterator(const DesktopRegion& region)
}
}
+DesktopRegion::Iterator::~Iterator() {}
+
bool DesktopRegion::Iterator::IsAtEnd() const {
return row_ == region_.rows_.end();
}
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.h b/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.h
index c4528ae3496..c86da56e173 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/desktop_region.h
@@ -67,6 +67,7 @@ class DesktopRegion {
class Iterator {
public:
explicit Iterator(const DesktopRegion& target);
+ ~Iterator();
bool IsAtEnd() const;
void Advance();
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/differ.h b/chromium/third_party/webrtc/modules/desktop_capture/differ.h
index 224c6913afc..b3b0e7c244f 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/differ.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/differ.h
@@ -81,7 +81,7 @@ class Differ {
int diff_info_height_;
int diff_info_size_;
- DISALLOW_COPY_AND_ASSIGN(Differ);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Differ);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/differ_unittest.cc b/chromium/third_party/webrtc/modules/desktop_capture/differ_unittest.cc
index 3b6f859a7c4..642cb374480 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/differ_unittest.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/differ_unittest.cc
@@ -204,7 +204,7 @@ class DifferTest : public testing::Test {
rtc::scoped_ptr<uint8_t[]> curr_;
private:
- DISALLOW_COPY_AND_ASSIGN(DifferTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DifferTest);
};
TEST_F(DifferTest, Setup) {
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h b/chromium/third_party/webrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h
index bd502f08158..fe6f01beb9a 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/mac/desktop_configuration_monitor.h
@@ -58,7 +58,7 @@ class DesktopConfigurationMonitor {
MacDesktopConfiguration desktop_configuration_;
rtc::scoped_ptr<EventWrapper> display_configuration_capture_event_;
- DISALLOW_COPY_AND_ASSIGN(DesktopConfigurationMonitor);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DesktopConfigurationMonitor);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/mac/full_screen_chrome_window_detector.h b/chromium/third_party/webrtc/modules/desktop_capture/mac/full_screen_chrome_window_detector.h
index b24fc997e47..bddc8099b25 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/mac/full_screen_chrome_window_detector.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/mac/full_screen_chrome_window_detector.h
@@ -61,7 +61,7 @@ class FullScreenChromeWindowDetector {
WindowCapturer::WindowList previous_window_list_;
TickTime last_udpate_time_;
- DISALLOW_COPY_AND_ASSIGN(FullScreenChromeWindowDetector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(FullScreenChromeWindowDetector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/mac/scoped_pixel_buffer_object.h b/chromium/third_party/webrtc/modules/desktop_capture/mac/scoped_pixel_buffer_object.h
index 4d1dd1ffd6a..a32d470954b 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/mac/scoped_pixel_buffer_object.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/mac/scoped_pixel_buffer_object.h
@@ -33,7 +33,7 @@ class ScopedPixelBufferObject {
CGLContextObj cgl_context_;
GLuint pixel_buffer_object_;
- DISALLOW_COPY_AND_ASSIGN(ScopedPixelBufferObject);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScopedPixelBufferObject);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/mouse_cursor.h b/chromium/third_party/webrtc/modules/desktop_capture/mouse_cursor.h
index 1da98a49054..dd5dc0eb44d 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/mouse_cursor.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/mouse_cursor.h
@@ -40,7 +40,7 @@ class MouseCursor {
rtc::scoped_ptr<DesktopFrame> image_;
DesktopVector hotspot_;
- DISALLOW_COPY_AND_ASSIGN(MouseCursor);
+ RTC_DISALLOW_COPY_AND_ASSIGN(MouseCursor);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capture_frame_queue.h b/chromium/third_party/webrtc/modules/desktop_capture/screen_capture_frame_queue.h
index f3b11cfbffa..6cd9e3bfc8e 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capture_frame_queue.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capture_frame_queue.h
@@ -66,7 +66,7 @@ class ScreenCaptureFrameQueue {
static const int kQueueLength = 2;
rtc::scoped_ptr<SharedDesktopFrame> frames_[kQueueLength];
- DISALLOW_COPY_AND_ASSIGN(ScreenCaptureFrameQueue);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCaptureFrameQueue);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_helper.h b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_helper.h
index a8be9897910..cc60203da07 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_helper.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_helper.h
@@ -80,7 +80,7 @@ class ScreenCapturerHelper {
// If the value is <= 0, then the invalid region is not expanded to a grid.
int log_grid_size_;
- DISALLOW_COPY_AND_ASSIGN(ScreenCapturerHelper);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerHelper);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mac.mm b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mac.mm
index ceb078145d2..8da56d2f761 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mac.mm
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mac.mm
@@ -280,7 +280,7 @@ class ScreenCapturerMac : public ScreenCapturer {
CGWindowID excluded_window_;
- DISALLOW_COPY_AND_ASSIGN(ScreenCapturerMac);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerMac);
};
// DesktopFrame wrapper that flips wrapped frame upside down by inverting
@@ -303,7 +303,7 @@ class InvertedDesktopFrame : public DesktopFrame {
private:
rtc::scoped_ptr<DesktopFrame> original_frame_;
- DISALLOW_COPY_AND_ASSIGN(InvertedDesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(InvertedDesktopFrame);
};
ScreenCapturerMac::ScreenCapturerMac(
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mock_objects.h b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mock_objects.h
index 373e66f7bd7..8b83f412528 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mock_objects.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_mock_objects.h
@@ -27,7 +27,7 @@ class MockScreenCapturer : public ScreenCapturer {
MOCK_METHOD1(SelectScreen, bool(ScreenId id));
private:
- DISALLOW_COPY_AND_ASSIGN(MockScreenCapturer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(MockScreenCapturer);
};
class MockScreenCapturerCallback : public ScreenCapturer::Callback {
@@ -39,7 +39,7 @@ class MockScreenCapturerCallback : public ScreenCapturer::Callback {
MOCK_METHOD1(OnCaptureCompleted, void(DesktopFrame*));
private:
- DISALLOW_COPY_AND_ASSIGN(MockScreenCapturerCallback);
+ RTC_DISALLOW_COPY_AND_ASSIGN(MockScreenCapturerCallback);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_unittest.cc b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_unittest.cc
index 606c06153de..a3cf6d93ccb 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_unittest.cc
@@ -51,7 +51,7 @@ class FakeSharedMemory : public SharedMemory {
}
private:
char* buffer_;
- DISALLOW_COPY_AND_ASSIGN(FakeSharedMemory);
+ RTC_DISALLOW_COPY_AND_ASSIGN(FakeSharedMemory);
};
SharedMemory* ScreenCapturerTest::CreateSharedMemory(size_t size) {
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_x11.cc b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_x11.cc
index 1597e3fb611..75655762e9b 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_x11.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/screen_capturer_x11.cc
@@ -30,9 +30,12 @@
// TODO(sergeyu): Move this to a header where it can be shared.
#if defined(NDEBUG)
-#define DCHECK(condition) (void)(condition)
+#define RTC_DCHECK(condition) (void)(condition)
#else // NDEBUG
-#define DCHECK(condition) if (!(condition)) {abort();}
+#define RTC_DCHECK(condition) \
+ if (!(condition)) { \
+ abort(); \
+ }
#endif
namespace webrtc {
@@ -121,7 +124,7 @@ class ScreenCapturerLinux : public ScreenCapturer,
// |Differ| for use when polling for changes.
rtc::scoped_ptr<Differ> differ_;
- DISALLOW_COPY_AND_ASSIGN(ScreenCapturerLinux);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerLinux);
};
ScreenCapturerLinux::ScreenCapturerLinux()
@@ -233,8 +236,8 @@ void ScreenCapturerLinux::InitXDamage() {
}
void ScreenCapturerLinux::Start(Callback* callback) {
- DCHECK(!callback_);
- DCHECK(callback);
+ RTC_DCHECK(!callback_);
+ RTC_DCHECK(callback);
callback_ = callback;
}
@@ -285,7 +288,7 @@ void ScreenCapturerLinux::Capture(const DesktopRegion& region) {
}
bool ScreenCapturerLinux::GetScreenList(ScreenList* screens) {
- DCHECK(screens->size() == 0);
+ RTC_DCHECK(screens->size() == 0);
// TODO(jiayl): implement screen enumeration.
Screen default_screen;
default_screen.id = 0;
@@ -304,7 +307,7 @@ bool ScreenCapturerLinux::HandleXEvent(const XEvent& event) {
reinterpret_cast<const XDamageNotifyEvent*>(&event);
if (damage_event->damage != damage_handle_)
return false;
- DCHECK(damage_event->level == XDamageReportNonEmpty);
+ RTC_DCHECK(damage_event->level == XDamageReportNonEmpty);
return true;
} else if (event.type == ConfigureNotify) {
ScreenConfigurationChanged();
@@ -367,8 +370,8 @@ DesktopFrame* ScreenCapturerLinux::CaptureScreen() {
if (queue_.previous_frame()) {
// Full-screen polling, so calculate the invalid rects here, based on the
// changed pixels between current and previous buffers.
- DCHECK(differ_.get() != NULL);
- DCHECK(queue_.previous_frame()->data());
+ RTC_DCHECK(differ_.get() != NULL);
+ RTC_DCHECK(queue_.previous_frame()->data());
differ_->CalcDirtyRegion(queue_.previous_frame()->data(),
frame->data(), updated_region);
} else {
@@ -403,11 +406,11 @@ void ScreenCapturerLinux::SynchronizeFrame() {
// TODO(hclam): We can reduce the amount of copying here by subtracting
// |capturer_helper_|s region from |last_invalid_region_|.
// http://crbug.com/92354
- DCHECK(queue_.previous_frame());
+ RTC_DCHECK(queue_.previous_frame());
DesktopFrame* current = queue_.current_frame();
DesktopFrame* last = queue_.previous_frame();
- DCHECK(current != last);
+ RTC_DCHECK(current != last);
for (DesktopRegion::Iterator it(last_invalid_region_);
!it.IsAtEnd(); it.Advance()) {
current->CopyPixelsFrom(*last, it.rect().top_left(), it.rect());
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.cc b/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.cc
index 7651816473e..97190e04731 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.cc
@@ -41,7 +41,7 @@ class SharedDesktopFrame::Core {
Atomic32 ref_count_;
rtc::scoped_ptr<DesktopFrame> frame_;
- DISALLOW_COPY_AND_ASSIGN(Core);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Core);
};
SharedDesktopFrame::~SharedDesktopFrame() {}
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.h b/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.h
index 12d373a0f3e..7d18db153cd 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/shared_desktop_frame.h
@@ -41,7 +41,7 @@ class SharedDesktopFrame : public DesktopFrame {
rtc::scoped_refptr<Core> core_;
- DISALLOW_COPY_AND_ASSIGN(SharedDesktopFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SharedDesktopFrame);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/shared_memory.h b/chromium/third_party/webrtc/modules/desktop_capture/shared_memory.h
index 7870d833f15..631f119b5ff 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/shared_memory.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/shared_memory.h
@@ -59,7 +59,7 @@ class SharedMemory {
const int id_;
private:
- DISALLOW_COPY_AND_ASSIGN(SharedMemory);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SharedMemory);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/desktop.h b/chromium/third_party/webrtc/modules/desktop_capture/win/desktop.h
index 0f3e64d05d2..dc3b8c61b98 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/desktop.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/desktop.h
@@ -56,7 +56,7 @@ class Desktop {
// True if |desktop_| must be closed on teardown.
bool own_;
- DISALLOW_COPY_AND_ASSIGN(Desktop);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Desktop);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_gdi_object.h b/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_gdi_object.h
index 366df6d4ff0..1cac63e43d9 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_gdi_object.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_gdi_object.h
@@ -56,7 +56,7 @@ class ScopedGDIObject {
private:
T handle_;
- DISALLOW_COPY_AND_ASSIGN(ScopedGDIObject);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScopedGDIObject);
};
// The traits class that uses DeleteObject() to close a handle.
@@ -70,7 +70,7 @@ class DeleteObjectTraits {
}
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DeleteObjectTraits);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DeleteObjectTraits);
};
// The traits class that uses DestroyCursor() to close a handle.
@@ -83,7 +83,7 @@ class DestroyCursorTraits {
}
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(DestroyCursorTraits);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DestroyCursorTraits);
};
typedef ScopedGDIObject<HBITMAP, DeleteObjectTraits<HBITMAP> > ScopedBitmap;
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_thread_desktop.h b/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_thread_desktop.h
index 7566e6a0e17..df8652ac9d0 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_thread_desktop.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/scoped_thread_desktop.h
@@ -45,7 +45,7 @@ class ScopedThreadDesktop {
// The desktop handle assigned to the calling thread at creation.
rtc::scoped_ptr<Desktop> initial_;
- DISALLOW_COPY_AND_ASSIGN(ScopedThreadDesktop);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScopedThreadDesktop);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
index 352433e1cdb..4d76fce0fa2 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.cc
@@ -241,9 +241,10 @@ bool ScreenCapturerWinGdi::CaptureImage() {
DesktopFrame::kBytesPerPixel;
SharedMemory* shared_memory = callback_->CreateSharedMemory(buffer_size);
- rtc::scoped_ptr<DesktopFrame> buffer;
- buffer.reset(
+ rtc::scoped_ptr<DesktopFrame> buffer(
DesktopFrameWin::Create(size, shared_memory, desktop_dc_));
+ if (!buffer.get())
+ return false;
queue_.ReplaceCurrentFrame(buffer.release());
}
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
index 6014a1810e0..202b9aaa87d 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_gdi.h
@@ -81,7 +81,7 @@ class ScreenCapturerWinGdi : public ScreenCapturer {
// Used to suppress duplicate logging of SetThreadExecutionState errors.
bool set_thread_execution_state_failed_;
- DISALLOW_COPY_AND_ASSIGN(ScreenCapturerWinGdi);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerWinGdi);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
index b33b873430a..9ad6b58d9b5 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/screen_capturer_win_magnifier.h
@@ -145,7 +145,7 @@ class ScreenCapturerWinMagnifier : public ScreenCapturer {
// successfully. Reset at the beginning of each CaptureImage call.
bool magnifier_capture_succeeded_;
- DISALLOW_COPY_AND_ASSIGN(ScreenCapturerWinMagnifier);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerWinMagnifier);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.cc b/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.cc
index bfe7363f32d..83922ea7f80 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.cc
@@ -43,4 +43,27 @@ GetCroppedWindowRect(HWND window,
return true;
}
+AeroChecker::AeroChecker() : dwmapi_library_(nullptr), func_(nullptr) {
+ // Try to load dwmapi.dll dynamically since it is not available on XP.
+ dwmapi_library_ = LoadLibrary(L"dwmapi.dll");
+ if (dwmapi_library_) {
+ func_ = reinterpret_cast<DwmIsCompositionEnabledFunc>(
+ GetProcAddress(dwmapi_library_, "DwmIsCompositionEnabled"));
+ }
+}
+
+AeroChecker::~AeroChecker() {
+ if (dwmapi_library_) {
+ FreeLibrary(dwmapi_library_);
+ }
+}
+
+bool AeroChecker::IsAeroEnabled() {
+ BOOL result = FALSE;
+ if (func_) {
+ func_(&result);
+ }
+ return result != FALSE;
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.h b/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.h
index 2a3a470c59e..7c80490f609 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/win/window_capture_utils.h
@@ -22,4 +22,19 @@ bool GetCroppedWindowRect(HWND window,
DesktopRect* cropped_rect,
DesktopRect* original_rect);
+typedef HRESULT (WINAPI *DwmIsCompositionEnabledFunc)(BOOL* enabled);
+class AeroChecker {
+ public:
+ AeroChecker();
+ ~AeroChecker();
+
+ bool IsAeroEnabled();
+
+ private:
+ HMODULE dwmapi_library_;
+ DwmIsCompositionEnabledFunc func_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(AeroChecker);
+};
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_mac.mm b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_mac.mm
index 3acca67cb04..ecf6beeb2a5 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_mac.mm
+++ b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_mac.mm
@@ -66,7 +66,7 @@ class WindowCapturerMac : public WindowCapturer {
rtc::scoped_refptr<FullScreenChromeWindowDetector>
full_screen_chrome_window_detector_;
- DISALLOW_COPY_AND_ASSIGN(WindowCapturerMac);
+ RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerMac);
};
WindowCapturerMac::WindowCapturerMac(rtc::scoped_refptr<
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_null.cc b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_null.cc
index d9ad0ca0b10..b74f17e39bf 100755
--- a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_null.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_null.cc
@@ -35,7 +35,7 @@ class WindowCapturerNull : public WindowCapturer {
private:
Callback* callback_;
- DISALLOW_COPY_AND_ASSIGN(WindowCapturerNull);
+ RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerNull);
};
WindowCapturerNull::WindowCapturerNull()
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_win.cc b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_win.cc
index c7e1fd879c8..322a5340c9e 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_win.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_win.cc
@@ -13,6 +13,7 @@
#include <assert.h>
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/checks.h"
#include "webrtc/base/win32.h"
#include "webrtc/modules/desktop_capture/desktop_frame_win.h"
#include "webrtc/modules/desktop_capture/win/window_capture_utils.h"
@@ -22,8 +23,6 @@ namespace webrtc {
namespace {
-typedef HRESULT (WINAPI *DwmIsCompositionEnabledFunc)(BOOL* enabled);
-
BOOL CALLBACK WindowsEnumerationHandler(HWND hwnd, LPARAM param) {
WindowCapturer::WindowList* list =
reinterpret_cast<WindowCapturer::WindowList*>(param);
@@ -41,13 +40,26 @@ BOOL CALLBACK WindowsEnumerationHandler(HWND hwnd, LPARAM param) {
// Skip the Program Manager window and the Start button.
const size_t kClassLength = 256;
WCHAR class_name[kClassLength];
- GetClassName(hwnd, class_name, kClassLength);
+ const int class_name_length = GetClassName(hwnd, class_name, kClassLength);
+ RTC_DCHECK(class_name_length)
+ << "Error retrieving the application's class name";
+
// Skip Program Manager window and the Start button. This is the same logic
// that's used in Win32WindowPicker in libjingle. Consider filtering other
// windows as well (e.g. toolbars).
if (wcscmp(class_name, L"Progman") == 0 || wcscmp(class_name, L"Button") == 0)
return TRUE;
+ // Windows 8 introduced a "Modern App" identified by their class name being
+ // either ApplicationFrameWindow or windows.UI.Core.coreWindow. The
+ // associated windows cannot be captured, so we skip them.
+ // http://crbug.com/526883.
+ if (rtc::IsWindows8OrLater() &&
+ (wcscmp(class_name, L"ApplicationFrameWindow") == 0 ||
+ wcscmp(class_name, L"Windows.UI.Core.CoreWindow") == 0)) {
+ return TRUE;
+ }
+
WindowCapturer::Window window;
window.id = reinterpret_cast<WindowCapturer::WindowId>(hwnd);
@@ -81,48 +93,25 @@ class WindowCapturerWin : public WindowCapturer {
void Capture(const DesktopRegion& region) override;
private:
- bool IsAeroEnabled();
-
Callback* callback_;
// HWND and HDC for the currently selected window or NULL if window is not
// selected.
HWND window_;
- // dwmapi.dll is used to determine if desktop compositing is enabled.
- HMODULE dwmapi_library_;
- DwmIsCompositionEnabledFunc is_composition_enabled_func_;
-
DesktopSize previous_size_;
- DISALLOW_COPY_AND_ASSIGN(WindowCapturerWin);
+ AeroChecker aero_checker_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerWin);
};
WindowCapturerWin::WindowCapturerWin()
: callback_(NULL),
window_(NULL) {
- // Try to load dwmapi.dll dynamically since it is not available on XP.
- dwmapi_library_ = LoadLibrary(L"dwmapi.dll");
- if (dwmapi_library_) {
- is_composition_enabled_func_ =
- reinterpret_cast<DwmIsCompositionEnabledFunc>(
- GetProcAddress(dwmapi_library_, "DwmIsCompositionEnabled"));
- assert(is_composition_enabled_func_);
- } else {
- is_composition_enabled_func_ = NULL;
- }
}
WindowCapturerWin::~WindowCapturerWin() {
- if (dwmapi_library_)
- FreeLibrary(dwmapi_library_);
-}
-
-bool WindowCapturerWin::IsAeroEnabled() {
- BOOL result = FALSE;
- if (is_composition_enabled_func_)
- is_composition_enabled_func_(&result);
- return result != FALSE;
}
bool WindowCapturerWin::GetWindowList(WindowList* windows) {
@@ -228,7 +217,7 @@ void WindowCapturerWin::Capture(const DesktopRegion& region) {
// capturing - it somehow affects what we get from BitBlt() on the subsequent
// captures.
- if (!IsAeroEnabled() || !previous_size_.equals(frame->size())) {
+ if (!aero_checker_.IsAeroEnabled() || !previous_size_.equals(frame->size())) {
result = PrintWindow(window_, mem_dc, 0);
}
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_x11.cc b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_x11.cc
index 356830568db..76c95eba47d 100755
--- a/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_x11.cc
+++ b/chromium/third_party/webrtc/modules/desktop_capture/window_capturer_x11.cc
@@ -82,7 +82,7 @@ class XWindowProperty {
unsigned long size_; // NOLINT: type required by XGetWindowProperty
unsigned char* data_;
- DISALLOW_COPY_AND_ASSIGN(XWindowProperty);
+ RTC_DISALLOW_COPY_AND_ASSIGN(XWindowProperty);
};
class WindowCapturerLinux : public WindowCapturer,
@@ -129,7 +129,7 @@ class WindowCapturerLinux : public WindowCapturer,
::Window selected_window_;
XServerPixelBuffer x_server_pixel_buffer_;
- DISALLOW_COPY_AND_ASSIGN(WindowCapturerLinux);
+ RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerLinux);
};
WindowCapturerLinux::WindowCapturerLinux(const DesktopCaptureOptions& options)
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/x11/shared_x_display.h b/chromium/third_party/webrtc/modules/desktop_capture/x11/shared_x_display.h
index 11c5bf4482f..02755450fc2 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/x11/shared_x_display.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/x11/shared_x_display.h
@@ -76,7 +76,7 @@ class SharedXDisplay {
EventHandlersMap event_handlers_;
- DISALLOW_COPY_AND_ASSIGN(SharedXDisplay);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SharedXDisplay);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/x11/x_error_trap.h b/chromium/third_party/webrtc/modules/desktop_capture/x11/x_error_trap.h
index aa771145d59..f1f6e11c63e 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/x11/x_error_trap.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/x11/x_error_trap.h
@@ -31,7 +31,7 @@ class XErrorTrap {
XErrorHandler original_error_handler_;
bool enabled_;
- DISALLOW_COPY_AND_ASSIGN(XErrorTrap);
+ RTC_DISALLOW_COPY_AND_ASSIGN(XErrorTrap);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h b/chromium/third_party/webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h
index 98f263f3a88..d1e6632f082 100644
--- a/chromium/third_party/webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h
+++ b/chromium/third_party/webrtc/modules/desktop_capture/x11/x_server_pixel_buffer.h
@@ -77,7 +77,7 @@ class XServerPixelBuffer {
Pixmap shm_pixmap_;
GC shm_gc_;
- DISALLOW_COPY_AND_ASSIGN(XServerPixelBuffer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(XServerPixelBuffer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/interface/module_common_types.h b/chromium/third_party/webrtc/modules/interface/module_common_types.h
index 1202eee0435..67019cafaf0 100644
--- a/chromium/third_party/webrtc/modules/interface/module_common_types.h
+++ b/chromium/third_party/webrtc/modules/interface/module_common_types.h
@@ -32,8 +32,15 @@ struct RTPAudioHeader {
};
const int16_t kNoPictureId = -1;
+const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits
+const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits
const int16_t kNoTl0PicIdx = -1;
const uint8_t kNoTemporalIdx = 0xFF;
+const uint8_t kNoSpatialIdx = 0xFF;
+const uint8_t kNoGofIdx = 0xFF;
+const size_t kMaxVp9RefPics = 3;
+const size_t kMaxVp9FramesInGof = 16;
+const size_t kMaxVp9NumberOfSpatialLayers = 8;
const int kNoKeyIdx = -1;
struct RTPVideoHeaderVP8 {
@@ -62,6 +69,136 @@ struct RTPVideoHeaderVP8 {
// in a VP8 partition. Otherwise false
};
+enum TemporalStructureMode {
+ kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP...
+ kTemporalStructureMode2, // 2 temporal layers 0-1-0-1...
+ kTemporalStructureMode3 // 3 temporal layers 0-2-1-2-0-2-1-2...
+};
+
+struct GofInfoVP9 {
+ void SetGofInfoVP9(TemporalStructureMode tm) {
+ switch (tm) {
+ case kTemporalStructureMode1:
+ num_frames_in_gof = 1;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = false;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 1;
+ break;
+ case kTemporalStructureMode2:
+ num_frames_in_gof = 2;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = false;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 2;
+
+ temporal_idx[1] = 1;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+ break;
+ case kTemporalStructureMode3:
+ num_frames_in_gof = 4;
+ temporal_idx[0] = 0;
+ temporal_up_switch[0] = false;
+ num_ref_pics[0] = 1;
+ pid_diff[0][0] = 4;
+
+ temporal_idx[1] = 2;
+ temporal_up_switch[1] = true;
+ num_ref_pics[1] = 1;
+ pid_diff[1][0] = 1;
+
+ temporal_idx[2] = 1;
+ temporal_up_switch[2] = true;
+ num_ref_pics[2] = 1;
+ pid_diff[2][0] = 2;
+
+ temporal_idx[3] = 2;
+ temporal_up_switch[3] = false;
+ num_ref_pics[3] = 2;
+ pid_diff[3][0] = 1;
+ pid_diff[3][1] = 2;
+ break;
+ default:
+ assert(false);
+ }
+ }
+
+ void CopyGofInfoVP9(const GofInfoVP9& src) {
+ num_frames_in_gof = src.num_frames_in_gof;
+ for (size_t i = 0; i < num_frames_in_gof; ++i) {
+ temporal_idx[i] = src.temporal_idx[i];
+ temporal_up_switch[i] = src.temporal_up_switch[i];
+ num_ref_pics[i] = src.num_ref_pics[i];
+ for (size_t r = 0; r < num_ref_pics[i]; ++r) {
+ pid_diff[i][r] = src.pid_diff[i][r];
+ }
+ }
+ }
+
+ size_t num_frames_in_gof;
+ uint8_t temporal_idx[kMaxVp9FramesInGof];
+ bool temporal_up_switch[kMaxVp9FramesInGof];
+ size_t num_ref_pics[kMaxVp9FramesInGof];
+ int16_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
+};
+
+struct RTPVideoHeaderVP9 {
+ void InitRTPVideoHeaderVP9() {
+ inter_pic_predicted = false;
+ flexible_mode = false;
+ beginning_of_frame = false;
+ end_of_frame = false;
+ ss_data_available = false;
+ picture_id = kNoPictureId;
+ max_picture_id = kMaxTwoBytePictureId;
+ tl0_pic_idx = kNoTl0PicIdx;
+ temporal_idx = kNoTemporalIdx;
+ spatial_idx = kNoSpatialIdx;
+ temporal_up_switch = false;
+ inter_layer_predicted = false;
+ gof_idx = kNoGofIdx;
+ num_ref_pics = 0;
+ num_spatial_layers = 1;
+ }
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode; // This frame is in flexible mode.
+ bool beginning_of_frame; // True if this packet is the first in a VP9 layer
+ // frame.
+ bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
+ bool ss_data_available; // True if SS data is available in this payload
+ // descriptor.
+ int16_t picture_id; // PictureID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
+ int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
+ int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
+ // kNoTl0PicIdx means no value provided.
+ uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx.
+ uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx.
+ bool temporal_up_switch; // True if upswitch to higher frame rate is possible
+ // starting from this frame.
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+
+ uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
+
+ size_t num_ref_pics; // Number of reference pictures used by this layer
+ // frame.
+ int16_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
+ // of the reference pictures.
+ int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
+};
+
// The packetization types that we support: single, aggregated, and fragmented.
enum H264PacketizationTypes {
kH264SingleNalu, // This packet contains a single NAL unit.
@@ -85,6 +222,7 @@ struct RTPVideoHeaderH264 {
union RTPVideoTypeHeader {
RTPVideoHeaderVP8 VP8;
+ RTPVideoHeaderVP9 VP9;
RTPVideoHeaderH264 H264;
};
@@ -92,6 +230,7 @@ enum RtpVideoCodecTypes {
kRtpVideoNone,
kRtpVideoGeneric,
kRtpVideoVp8,
+ kRtpVideoVp9,
kRtpVideoH264
};
// Since RTPVideoHeader is used as a member of a union, it can't have a
@@ -253,7 +392,7 @@ class RTPFragmentationHeader {
uint8_t* fragmentationPlType; // Payload type of each fragmentation
private:
- DISALLOW_COPY_AND_ASSIGN(RTPFragmentationHeader);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RTPFragmentationHeader);
};
struct RTCPVoIPMetric {
@@ -302,7 +441,7 @@ struct FecProtectionParams {
// CallStats object using RegisterStatsObserver.
class CallStatsObserver {
public:
- virtual void OnRttUpdate(int64_t rtt_ms) = 0;
+ virtual void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) = 0;
virtual ~CallStatsObserver() {}
};
@@ -342,7 +481,7 @@ struct VideoContentMetrics {
class AudioFrame {
public:
// Stereo, 32 kHz, 60 ms (2 * 32 * 60)
- static const int kMaxDataSizeSamples = 3840;
+ static const size_t kMaxDataSizeSamples = 3840;
enum VADActivity {
kVadActive = 0,
@@ -366,7 +505,7 @@ class AudioFrame {
// |interleaved_| is not changed by this method.
void UpdateFrame(int id, uint32_t timestamp, const int16_t* data,
- int samples_per_channel, int sample_rate_hz,
+ size_t samples_per_channel, int sample_rate_hz,
SpeechType speech_type, VADActivity vad_activity,
int num_channels = 1, uint32_t energy = -1);
@@ -390,7 +529,7 @@ class AudioFrame {
// -1 represents an uninitialized value.
int64_t ntp_time_ms_;
int16_t data_[kMaxDataSizeSamples];
- int samples_per_channel_;
+ size_t samples_per_channel_;
int sample_rate_hz_;
int num_channels_;
SpeechType speech_type_;
@@ -403,7 +542,7 @@ class AudioFrame {
bool interleaved_;
private:
- DISALLOW_COPY_AND_ASSIGN(AudioFrame);
+ RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
};
inline AudioFrame::AudioFrame()
@@ -430,7 +569,7 @@ inline void AudioFrame::Reset() {
inline void AudioFrame::UpdateFrame(int id,
uint32_t timestamp,
const int16_t* data,
- int samples_per_channel,
+ size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
@@ -446,7 +585,7 @@ inline void AudioFrame::UpdateFrame(int id,
energy_ = energy;
assert(num_channels >= 0);
- const int length = samples_per_channel * num_channels;
+ const size_t length = samples_per_channel * num_channels;
assert(length <= kMaxDataSizeSamples);
if (data != NULL) {
memcpy(data_, data, sizeof(int16_t) * length);
@@ -471,7 +610,7 @@ inline void AudioFrame::CopyFrom(const AudioFrame& src) {
interleaved_ = src.interleaved_;
assert(num_channels_ >= 0);
- const int length = samples_per_channel_ * num_channels_;
+ const size_t length = samples_per_channel_ * num_channels_;
assert(length <= kMaxDataSizeSamples);
memcpy(data_, src.data_, sizeof(int16_t) * length);
}
@@ -484,7 +623,7 @@ inline AudioFrame& AudioFrame::operator>>=(const int rhs) {
assert((num_channels_ > 0) && (num_channels_ < 3));
if ((num_channels_ > 2) || (num_channels_ < 1)) return *this;
- for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+ for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
data_[i] = static_cast<int16_t>(data_[i] >> rhs);
}
return *this;
@@ -506,8 +645,8 @@ inline AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
speech_type_ = kUndefined;
}
- int offset = samples_per_channel_ * num_channels_;
- for (int i = 0; i < rhs.samples_per_channel_ * rhs.num_channels_; i++) {
+ size_t offset = samples_per_channel_ * num_channels_;
+ for (size_t i = 0; i < rhs.samples_per_channel_ * rhs.num_channels_; i++) {
data_[offset + i] = rhs.data_[i];
}
samples_per_channel_ += rhs.samples_per_channel_;
@@ -557,7 +696,7 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
sizeof(int16_t) * rhs.samples_per_channel_ * num_channels_);
} else {
// IMPROVEMENT this can be done very fast in assembly
- for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+ for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
data_[i] = ClampToInt16(wrap_guard);
@@ -582,7 +721,7 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
}
speech_type_ = kUndefined;
- for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
+ for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) - static_cast<int32_t>(rhs.data_[i]);
data_[i] = ClampToInt16(wrap_guard);
@@ -626,6 +765,46 @@ inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2;
}
+// Utility class to unwrap a sequence number to a larger type, for easier
+// handling large ranges. Note that sequence numbers will never be unwrapped
+// to a negative value.
+class SequenceNumberUnwrapper {
+ public:
+ SequenceNumberUnwrapper() : last_seq_(-1) {}
+
+ // Get the unwrapped sequence, but don't update the internal state.
+ int64_t UnwrapWithoutUpdate(uint16_t sequence_number) {
+ if (last_seq_ == -1)
+ return sequence_number;
+
+ uint16_t cropped_last = static_cast<uint16_t>(last_seq_);
+ int64_t delta = sequence_number - cropped_last;
+ if (IsNewerSequenceNumber(sequence_number, cropped_last)) {
+ if (delta < 0)
+ delta += (1 << 16); // Wrap forwards.
+ } else if (delta > 0 && (last_seq_ + delta - (1 << 16)) >= 0) {
+ // If sequence_number is older but delta is positive, this is a backwards
+ // wrap-around. However, don't wrap backwards past 0 (unwrapped).
+ delta -= (1 << 16);
+ }
+
+ return last_seq_ + delta;
+ }
+
+ // Only update the internal state to the specified last (unwrapped) sequence.
+ void UpdateLast(int64_t last_sequence) { last_seq_ = last_sequence; }
+
+ // Unwrap the sequence number and update the internal state.
+ int64_t Unwrap(uint16_t sequence_number) {
+ int64_t unwrapped = UnwrapWithoutUpdate(sequence_number);
+ UpdateLast(unwrapped);
+ return unwrapped;
+ }
+
+ private:
+ int64_t last_seq_;
+};
+
} // namespace webrtc
#endif // MODULE_COMMON_TYPES_H
diff --git a/chromium/third_party/webrtc/modules/module_common_types_unittest.cc b/chromium/third_party/webrtc/modules/module_common_types_unittest.cc
index 3e7f5941666..bc0b7a1a5b2 100644
--- a/chromium/third_party/webrtc/modules/module_common_types_unittest.cc
+++ b/chromium/third_party/webrtc/modules/module_common_types_unittest.cc
@@ -122,4 +122,63 @@ TEST(ClampToInt16, TestCases) {
EXPECT_EQ(-0x8000, ClampToInt16(-0x7FFFFFFF));
}
+TEST(SequenceNumberUnwrapper, Limits) {
+ SequenceNumberUnwrapper unwrapper;
+
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ // Delta is exactly 0x8000 but current is lower than input, wrap backwards.
+ EXPECT_EQ(0x0, unwrapper.Unwrap(0x0000));
+
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+ EXPECT_EQ(0x10000, unwrapper.Unwrap(0x0000));
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+ EXPECT_EQ(0x8000, unwrapper.Unwrap(0x8000));
+ EXPECT_EQ(0, unwrapper.Unwrap(0));
+
+ // Don't allow negative values.
+ EXPECT_EQ(0xFFFF, unwrapper.Unwrap(0xFFFF));
+}
+
+TEST(SequenceNumberUnwrapper, ForwardWraps) {
+ int64_t seq = 0;
+ SequenceNumberUnwrapper unwrapper;
+
+ const int kMaxIncrease = 0x8000 - 1;
+ const int kNumWraps = 4;
+ for (int i = 0; i < kNumWraps * 2; ++i) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ seq += kMaxIncrease;
+ }
+
+ unwrapper.UpdateLast(0);
+ for (int seq = 0; seq < kNumWraps * 0xFFFF; ++seq) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ }
+}
+
+TEST(SequenceNumberUnwrapper, BackwardWraps) {
+ SequenceNumberUnwrapper unwrapper;
+
+ const int kMaxDecrease = 0x8000 - 1;
+ const int kNumWraps = 4;
+ int64_t seq = kNumWraps * 2 * kMaxDecrease;
+ unwrapper.UpdateLast(seq);
+ for (int i = kNumWraps * 2; i >= 0; --i) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ seq -= kMaxDecrease;
+ }
+
+ seq = kNumWraps * 0xFFFF;
+ unwrapper.UpdateLast(seq);
+ for (; seq >= 0; --seq) {
+ int64_t unwrapped = unwrapper.Unwrap(static_cast<uint16_t>(seq & 0xFFFF));
+ EXPECT_EQ(seq, unwrapped);
+ }
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/modules.gyp b/chromium/third_party/webrtc/modules/modules.gyp
index b06ecc5685b..31c3dac2d56 100644
--- a/chromium/third_party/webrtc/modules/modules.gyp
+++ b/chromium/third_party/webrtc/modules/modules.gyp
@@ -58,6 +58,7 @@
'acm_receive_test',
'acm_send_test',
'audio_coding_module',
+ 'audio_conference_mixer',
'audio_device' ,
'audio_processing',
'audioproc_test_utils',
@@ -65,7 +66,6 @@
'bwe_simulator',
'cng',
'desktop_capture',
- 'isac',
'isac_fix',
'media_file',
'neteq',
@@ -93,11 +93,11 @@
'<(webrtc_root)/test/test.gyp:frame_generator',
'<(webrtc_root)/test/test.gyp:rtp_test_utils',
'<(webrtc_root)/test/test.gyp:test_support_main',
+ '<(webrtc_root)/test/webrtc_test_common.gyp:webrtc_test_common',
'<(webrtc_root)/tools/tools.gyp:agc_test_utils',
],
'sources': [
'audio_coding/codecs/cng/audio_encoder_cng_unittest.cc',
- 'audio_coding/codecs/opus/audio_encoder_mutable_opus_test.cc',
'audio_coding/main/acm2/acm_receiver_unittest.cc',
'audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc',
'audio_coding/main/acm2/audio_coding_module_unittest.cc',
@@ -159,8 +159,11 @@
'audio_coding/neteq/mock/mock_payload_splitter.h',
'audio_coding/neteq/tools/input_audio_file_unittest.cc',
'audio_coding/neteq/tools/packet_unittest.cc',
+ 'audio_conference_mixer/test/audio_conference_mixer_unittest.cc',
+ 'audio_device/fine_audio_buffer_unittest.cc',
'audio_processing/aec/echo_cancellation_unittest.cc',
'audio_processing/aec/system_delay_unittest.cc',
+ 'audio_processing/agc/agc_manager_direct_unittest.cc',
# TODO(ajm): Fix to match new interface.
# 'audio_processing/agc/agc_unittest.cc',
'audio_processing/agc/histogram_unittest.cc',
@@ -168,9 +171,10 @@
'audio_processing/beamformer/complex_matrix_unittest.cc',
'audio_processing/beamformer/covariance_matrix_generator_unittest.cc',
'audio_processing/beamformer/matrix_unittest.cc',
- 'audio_processing/beamformer/mock_nonlinear_beamformer.cc',
'audio_processing/beamformer/mock_nonlinear_beamformer.h',
'audio_processing/echo_cancellation_impl_unittest.cc',
+ 'audio_processing/intelligibility/intelligibility_enhancer_unittest.cc',
+ 'audio_processing/intelligibility/intelligibility_utils_unittest.cc',
'audio_processing/splitting_filter_unittest.cc',
'audio_processing/transient/dyadic_decimator_unittest.cc',
'audio_processing/transient/file_utils.cc',
@@ -193,7 +197,6 @@
'bitrate_controller/bitrate_allocator_unittest.cc',
'bitrate_controller/bitrate_controller_unittest.cc',
'bitrate_controller/send_side_bandwidth_estimation_unittest.cc',
- 'bitrate_controller/send_time_history_unittest.cc',
'desktop_capture/desktop_and_cursor_composer_unittest.cc',
'desktop_capture/desktop_region_unittest.cc',
'desktop_capture/differ_block_unittest.cc',
@@ -214,6 +217,7 @@
'pacing/packet_router_unittest.cc',
'remote_bitrate_estimator/bwe_simulations.cc',
'remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h',
+ 'remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h',
'remote_bitrate_estimator/inter_arrival_unittest.cc',
'remote_bitrate_estimator/overuse_detector_unittest.cc',
'remote_bitrate_estimator/rate_statistics_unittest.cc',
@@ -221,26 +225,37 @@
'remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc',
'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc',
'remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h',
+ 'remote_bitrate_estimator/remote_estimator_proxy_unittest.cc',
+ 'remote_bitrate_estimator/send_time_history_unittest.cc',
+ 'remote_bitrate_estimator/test/bwe_test_framework_unittest.cc',
+ 'remote_bitrate_estimator/test/bwe_unittest.cc',
+ 'remote_bitrate_estimator/test/metric_recorder_unittest.cc',
+ 'remote_bitrate_estimator/test/estimators/nada_unittest.cc',
+ 'remote_bitrate_estimator/transport_feedback_adapter_unittest.cc',
'rtp_rtcp/source/mock/mock_rtp_payload_strategy.h',
'rtp_rtcp/source/byte_io_unittest.cc',
'rtp_rtcp/source/fec_receiver_unittest.cc',
'rtp_rtcp/source/fec_test_helper.cc',
'rtp_rtcp/source/fec_test_helper.h',
'rtp_rtcp/source/h264_sps_parser_unittest.cc',
+ 'rtp_rtcp/source/h264_bitstream_parser_unittest.cc',
'rtp_rtcp/source/nack_rtx_unittest.cc',
+ 'rtp_rtcp/source/packet_loss_stats_unittest.cc',
'rtp_rtcp/source/producer_fec_unittest.cc',
'rtp_rtcp/source/receive_statistics_unittest.cc',
'rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc',
'rtp_rtcp/source/rtcp_format_remb_unittest.cc',
'rtp_rtcp/source/rtcp_packet_unittest.cc',
+ 'rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc',
'rtp_rtcp/source/rtcp_receiver_unittest.cc',
'rtp_rtcp/source/rtcp_sender_unittest.cc',
'rtp_rtcp/source/rtcp_utility_unittest.cc',
'rtp_rtcp/source/rtp_fec_unittest.cc',
'rtp_rtcp/source/rtp_format_h264_unittest.cc',
- 'rtp_rtcp/source/rtp_format_vp8_unittest.cc',
'rtp_rtcp/source/rtp_format_vp8_test_helper.cc',
'rtp_rtcp/source/rtp_format_vp8_test_helper.h',
+ 'rtp_rtcp/source/rtp_format_vp8_unittest.cc',
+ 'rtp_rtcp/source/rtp_format_vp9_unittest.cc',
'rtp_rtcp/source/rtp_packet_history_unittest.cc',
'rtp_rtcp/source/rtp_payload_registry_unittest.cc',
'rtp_rtcp/source/rtp_rtcp_impl_unittest.cc',
@@ -315,15 +330,12 @@
['enable_protobuf==1', {
'defines': [
'WEBRTC_AUDIOPROC_DEBUG_DUMP',
- 'RTC_AUDIOCODING_DEBUG_DUMP',
],
'dependencies': [
- 'acm_dump',
'audioproc_protobuf_utils',
'audioproc_unittest_proto',
],
'sources': [
- 'audio_coding/main/acm2/acm_dump_unittest.cc',
'audio_processing/audio_processing_impl_unittest.cc',
'audio_processing/test/audio_processing_unittest.cc',
'audio_processing/test/test_utils.h',
@@ -331,7 +343,7 @@
}],
['build_libvpx==1', {
'dependencies': [
- '<(libvpx_dir)/libvpx.gyp:libvpx',
+ '<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
['OS=="android"', {
@@ -350,17 +362,20 @@
'audio_device/android/audio_manager_unittest.cc',
'audio_device/android/ensure_initialized.cc',
'audio_device/android/ensure_initialized.h',
- 'audio_device/android/fine_audio_buffer_unittest.cc',
],
}],
['OS=="ios"', {
'sources': [
'video_coding/codecs/h264/h264_video_toolbox_nalu_unittest.cc',
+ 'audio_device/ios/audio_device_unittest_ios.cc',
],
'mac_bundle_resources': [
'<(DEPTH)/resources/audio_coding/speech_mono_16kHz.pcm',
'<(DEPTH)/resources/audio_coding/testfile32kHz.pcm',
'<(DEPTH)/resources/audio_coding/teststereo32kHz.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short16.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short44.pcm',
+ '<(DEPTH)/resources/audio_device/audio_short48.pcm',
'<(DEPTH)/resources/audio_processing/agc/agc_no_circular_buffer.dat',
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_gain.dat',
'<(DEPTH)/resources/audio_processing/agc/agc_pitch_lag.dat',
@@ -456,6 +471,45 @@
['test_isolation_mode != "noop"', {
'targets': [
{
+ 'target_name': 'audio_codec_speed_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'audio_codec_speed_tests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'audio_codec_speed_tests.isolate',
+ ],
+ },
+ {
+ 'target_name': 'audio_decoder_unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'audio_decoder_unittests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'audio_decoder_unittests.isolate',
+ ],
+ },
+ {
+ 'target_name': 'audio_device_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'audio_device_tests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'audio_device_tests.isolate',
+ ],
+ },
+ {
'target_name': 'modules_tests_run',
'type': 'none',
'dependencies': [
@@ -481,6 +535,19 @@
'modules_unittests.isolate',
],
},
+ {
+ 'target_name': 'video_render_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'video_render_tests',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'video_render_tests.isolate',
+ ],
+ },
],
}],
],
diff --git a/chromium/third_party/webrtc/modules/modules_java.gyp b/chromium/third_party/webrtc/modules/modules_java.gyp
index e59d2bd41cd..060de2a0678 100644
--- a/chromium/third_party/webrtc/modules/modules_java.gyp
+++ b/chromium/third_party/webrtc/modules/modules_java.gyp
@@ -13,25 +13,17 @@
'type': 'none',
'variables': {
'java_in_dir': 'audio_device/android/java',
+ 'additional_src_dirs': [ '../base/java/src', ],
},
+
'includes': [ '../../build/java.gypi' ],
}, # audio_device_module_java
{
- 'target_name': 'video_capture_module_java',
- 'type': 'none',
- 'dependencies': [
- 'video_render_module_java',
- ],
- 'variables': {
- 'java_in_dir': 'video_capture/android/java',
- },
- 'includes': [ '../../build/java.gypi' ],
- }, # video_capture_module_java
- {
'target_name': 'video_render_module_java',
'type': 'none',
'variables': {
'java_in_dir': 'video_render/android/java',
+ 'additional_src_dirs': [ '../base/java/src', ],
},
'includes': [ '../../build/java.gypi' ],
}, # video_render_module_java
diff --git a/chromium/third_party/webrtc/modules/modules_java_chromium.gyp b/chromium/third_party/webrtc/modules/modules_java_chromium.gyp
index 247a81d9291..32d2d8d24e8 100644
--- a/chromium/third_party/webrtc/modules/modules_java_chromium.gyp
+++ b/chromium/third_party/webrtc/modules/modules_java_chromium.gyp
@@ -17,17 +17,6 @@
'includes': [ '../../../build/java.gypi' ],
}, # audio_device_module_java
{
- 'target_name': 'video_capture_module_java',
- 'type': 'none',
- 'dependencies': [
- 'video_render_module_java',
- ],
- 'variables': {
- 'java_in_dir': 'video_capture/android/java',
- },
- 'includes': [ '../../../build/java.gypi' ],
- }, # video_capture_module_java
- {
'target_name': 'video_render_module_java',
'type': 'none',
'variables': {
diff --git a/chromium/third_party/webrtc/modules/pacing/BUILD.gn b/chromium/third_party/webrtc/modules/pacing/BUILD.gn
index 296bf1795f4..3e478c1e76b 100644
--- a/chromium/third_party/webrtc/modules/pacing/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/pacing/BUILD.gn
@@ -27,5 +27,7 @@ source_set("pacing") {
deps = [
"../../system_wrappers",
+ "../bitrate_controller",
+ "../rtp_rtcp",
]
}
diff --git a/chromium/third_party/webrtc/modules/pacing/bitrate_prober.cc b/chromium/third_party/webrtc/modules/pacing/bitrate_prober.cc
index 1ed6298ff11..d09ad2df16e 100644
--- a/chromium/third_party/webrtc/modules/pacing/bitrate_prober.cc
+++ b/chromium/third_party/webrtc/modules/pacing/bitrate_prober.cc
@@ -15,6 +15,7 @@
#include <limits>
#include <sstream>
+#include "webrtc/modules/pacing/include/paced_sender.h"
#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
@@ -88,7 +89,8 @@ int BitrateProber::TimeUntilNextProbe(int64_t now_ms) {
// We will send the first probe packet immediately if no packet has been
// sent before.
int time_until_probe_ms = 0;
- if (packet_size_last_send_ > 0 && probing_state_ == kProbing) {
+ if (packet_size_last_send_ > PacedSender::kMinProbePacketSize &&
+ probing_state_ == kProbing) {
int next_delta_ms = ComputeDeltaFromBitrate(packet_size_last_send_,
probe_bitrates_.front());
time_until_probe_ms = next_delta_ms - elapsed_time_ms;
diff --git a/chromium/third_party/webrtc/modules/pacing/include/paced_sender.h b/chromium/third_party/webrtc/modules/pacing/include/paced_sender.h
index 730d3b70281..afb196fe450 100644
--- a/chromium/third_party/webrtc/modules/pacing/include/paced_sender.h
+++ b/chromium/third_party/webrtc/modules/pacing/include/paced_sender.h
@@ -17,6 +17,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/interface/module.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -30,16 +31,8 @@ struct Packet;
class PacketQueue;
} // namespace paced_sender
-class PacedSender : public Module {
+class PacedSender : public Module, public RtpPacketSender {
public:
- enum Priority {
- kHighPriority = 0, // Pass through; will be sent immediately.
- kNormalPriority = 2, // Put in back of the line.
- kLowPriority = 3, // Put in back of the low priority line.
- };
- // Low priority packets are mixed with the normal priority packets
- // while we are paused.
-
class Callback {
public:
// Note: packets sent as a result of a callback should not pass by this
@@ -68,6 +61,8 @@ class PacedSender : public Module {
// overshoots from the encoder.
static const float kDefaultPaceMultiplier;
+ static const size_t kMinProbePacketSize = 200;
+
PacedSender(Clock* clock,
Callback* callback,
int bitrate_kbps,
@@ -103,12 +98,12 @@ class PacedSender : public Module {
// Returns true if we send the packet now, else it will add the packet
// information to the queue and call TimeToSendPacket when it's time to send.
- virtual bool SendPacket(Priority priority,
- uint32_t ssrc,
- uint16_t sequence_number,
- int64_t capture_time_ms,
- size_t bytes,
- bool retransmission);
+ bool SendPacket(RtpPacketSender::Priority priority,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t bytes,
+ bool retransmission) override;
// Returns the time since the oldest queued packet was enqueued.
virtual int64_t QueueInMs() const;
@@ -126,9 +121,6 @@ class PacedSender : public Module {
// Process any pending packets in the queue(s).
int32_t Process() override;
- protected:
- virtual bool ProbingExperimentIsEnabled() const;
-
private:
// Updates the number of bytes that can be sent for the next time interval.
void UpdateBytesPerInterval(int64_t delta_time_in_ms)
diff --git a/chromium/third_party/webrtc/modules/pacing/include/packet_router.h b/chromium/third_party/webrtc/modules/pacing/include/packet_router.h
index c1b332a6bfc..9d461d13a98 100644
--- a/chromium/third_party/webrtc/modules/pacing/include/packet_router.h
+++ b/chromium/third_party/webrtc/modules/pacing/include/packet_router.h
@@ -14,21 +14,24 @@
#include <list>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/pacing/include/paced_sender.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
namespace webrtc {
-class CriticalSectionWrapper;
-class RTPFragmentationHeader;
class RtpRtcp;
-struct RTPVideoHeader;
+namespace rtcp {
+class TransportFeedback;
+} // namespace rtcp
// PacketRouter routes outgoing data to the correct sending RTP module, based
// on the simulcast layer in RTPVideoHeader.
-class PacketRouter : public PacedSender::Callback {
+class PacketRouter : public PacedSender::Callback,
+ public TransportSequenceNumberAllocator {
public:
PacketRouter();
virtual ~PacketRouter();
@@ -44,16 +47,20 @@ class PacketRouter : public PacedSender::Callback {
size_t TimeToSendPadding(size_t bytes) override;
- private:
- // TODO(holmer): When the new video API has launched, remove crit_ and
- // assume rtp_modules_ will never change during a call. We should then also
- // switch rtp_modules_ to a map from ssrc to rtp module.
- rtc::scoped_ptr<CriticalSectionWrapper> crit_;
+ void SetTransportWideSequenceNumber(uint16_t sequence_number);
+ uint16_t AllocateSequenceNumber() override;
+
+ // Send transport feedback packet to send-side.
+ virtual bool SendFeedback(rtcp::TransportFeedback* packet);
+ private:
+ rtc::CriticalSection modules_lock_;
// Map from ssrc to sending rtp module.
- std::list<RtpRtcp*> rtp_modules_ GUARDED_BY(crit_.get());
+ std::list<RtpRtcp*> rtp_modules_ GUARDED_BY(modules_lock_);
+
+ volatile int transport_seq_;
- DISALLOW_COPY_AND_ASSIGN(PacketRouter);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PacketRouter);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_PACING_INCLUDE_PACKET_ROUTER_H_
diff --git a/chromium/third_party/webrtc/modules/pacing/paced_sender.cc b/chromium/third_party/webrtc/modules/pacing/paced_sender.cc
index 7c842bff022..55c361b0850 100644
--- a/chromium/third_party/webrtc/modules/pacing/paced_sender.cc
+++ b/chromium/third_party/webrtc/modules/pacing/paced_sender.cc
@@ -36,7 +36,7 @@ const int64_t kMaxIntervalTimeMs = 30;
namespace webrtc {
namespace paced_sender {
struct Packet {
- Packet(PacedSender::Priority priority,
+ Packet(RtpPacketSender::Priority priority,
uint32_t ssrc,
uint16_t seq_number,
int64_t capture_time_ms,
@@ -53,7 +53,7 @@ struct Packet {
retransmission(retransmission),
enqueue_order(enqueue_order) {}
- PacedSender::Priority priority;
+ RtpPacketSender::Priority priority;
uint32_t ssrc;
uint16_t sequence_number;
int64_t capture_time_ms;
@@ -268,9 +268,12 @@ void PacedSender::UpdateBitrate(int bitrate_kbps,
bitrate_bps_ = 1000 * bitrate_kbps;
}
-bool PacedSender::SendPacket(Priority priority, uint32_t ssrc,
- uint16_t sequence_number, int64_t capture_time_ms, size_t bytes,
- bool retransmission) {
+bool PacedSender::SendPacket(RtpPacketSender::Priority priority,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t bytes,
+ bool retransmission) {
CriticalSectionScoped cs(critsect_.get());
if (!enabled_) {
@@ -361,8 +364,11 @@ int32_t PacedSender::Process() {
}
}
+ if (!packets_->Empty())
+ return 0;
+
size_t padding_needed;
- if (prober_->IsProbing() && ProbingExperimentIsEnabled())
+ if (prober_->IsProbing())
padding_needed = prober_->RecommendedPacketSize();
else
padding_needed = padding_budget_->bytes_remaining();
@@ -407,9 +413,4 @@ void PacedSender::UpdateBytesPerInterval(int64_t delta_time_ms) {
media_budget_->IncreaseBudget(delta_time_ms);
padding_budget_->IncreaseBudget(delta_time_ms);
}
-
-bool PacedSender::ProbingExperimentIsEnabled() const {
- return webrtc::field_trial::FindFullName("WebRTC-BitrateProbing") ==
- "Enabled";
-}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/pacing/paced_sender_unittest.cc b/chromium/third_party/webrtc/modules/pacing/paced_sender_unittest.cc
index 07300620888..a00b5fa58de 100644
--- a/chromium/third_party/webrtc/modules/pacing/paced_sender_unittest.cc
+++ b/chromium/third_party/webrtc/modules/pacing/paced_sender_unittest.cc
@@ -743,22 +743,6 @@ TEST_F(PacedSenderTest, ProbingWithInitialFrame) {
}
}
-class ProbingPacedSender : public PacedSender {
- public:
- ProbingPacedSender(Clock* clock,
- Callback* callback,
- int bitrate_kbps,
- int max_bitrate_kbps,
- int min_bitrate_kbps)
- : PacedSender(clock,
- callback,
- bitrate_kbps,
- max_bitrate_kbps,
- min_bitrate_kbps) {}
-
- bool ProbingExperimentIsEnabled() const override { return true; }
-};
-
TEST_F(PacedSenderTest, ProbingWithTooSmallInitialFrame) {
const int kNumPackets = 11;
const int kNumDeltas = kNumPackets - 1;
@@ -770,9 +754,8 @@ TEST_F(PacedSenderTest, ProbingWithTooSmallInitialFrame) {
std::list<int> expected_deltas_list(expected_deltas,
expected_deltas + kNumPackets - 1);
PacedSenderProbing callback(expected_deltas_list, &clock_);
- send_bucket_.reset(
- new ProbingPacedSender(&clock_, &callback, kInitialBitrateKbps,
- kPaceMultiplier * kInitialBitrateKbps, 0));
+ send_bucket_.reset(new PacedSender(&clock_, &callback, kInitialBitrateKbps,
+ kPaceMultiplier * kInitialBitrateKbps, 0));
for (int i = 0; i < kNumPackets - 5; ++i) {
EXPECT_FALSE(send_bucket_->SendPacket(
diff --git a/chromium/third_party/webrtc/modules/pacing/pacing.gypi b/chromium/third_party/webrtc/modules/pacing/pacing.gypi
index 09be38f4141..faa97841c1f 100644
--- a/chromium/third_party/webrtc/modules/pacing/pacing.gypi
+++ b/chromium/third_party/webrtc/modules/pacing/pacing.gypi
@@ -13,6 +13,8 @@
'type': 'static_library',
'dependencies': [
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/modules/modules.gyp:bitrate_controller',
+ '<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
],
'sources': [
'include/paced_sender.h',
diff --git a/chromium/third_party/webrtc/modules/pacing/packet_router.cc b/chromium/third_party/webrtc/modules/pacing/packet_router.cc
index 9e15a713174..563773b41f8 100644
--- a/chromium/third_party/webrtc/modules/pacing/packet_router.cc
+++ b/chromium/third_party/webrtc/modules/pacing/packet_router.cc
@@ -10,37 +10,40 @@
#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/base/atomicops.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
-PacketRouter::PacketRouter()
- : crit_(CriticalSectionWrapper::CreateCriticalSection()) {
+PacketRouter::PacketRouter() : transport_seq_(0) {
}
PacketRouter::~PacketRouter() {
+ RTC_DCHECK(rtp_modules_.empty());
}
void PacketRouter::AddRtpModule(RtpRtcp* rtp_module) {
- CriticalSectionScoped cs(crit_.get());
- DCHECK(std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module) ==
- rtp_modules_.end());
+ rtc::CritScope cs(&modules_lock_);
+ RTC_DCHECK(std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module) ==
+ rtp_modules_.end());
rtp_modules_.push_back(rtp_module);
}
void PacketRouter::RemoveRtpModule(RtpRtcp* rtp_module) {
- CriticalSectionScoped cs(crit_.get());
- rtp_modules_.remove(rtp_module);
+ rtc::CritScope cs(&modules_lock_);
+ auto it = std::find(rtp_modules_.begin(), rtp_modules_.end(), rtp_module);
+ RTC_DCHECK(it != rtp_modules_.end());
+ rtp_modules_.erase(it);
}
bool PacketRouter::TimeToSendPacket(uint32_t ssrc,
uint16_t sequence_number,
int64_t capture_timestamp,
bool retransmission) {
- CriticalSectionScoped cs(crit_.get());
+ rtc::CritScope cs(&modules_lock_);
for (auto* rtp_module : rtp_modules_) {
if (rtp_module->SendingMedia() && ssrc == rtp_module->SSRC()) {
return rtp_module->TimeToSendPacket(ssrc, sequence_number,
@@ -50,12 +53,51 @@ bool PacketRouter::TimeToSendPacket(uint32_t ssrc,
return true;
}
-size_t PacketRouter::TimeToSendPadding(size_t bytes) {
- CriticalSectionScoped cs(crit_.get());
+size_t PacketRouter::TimeToSendPadding(size_t bytes_to_send) {
+ size_t total_bytes_sent = 0;
+ rtc::CritScope cs(&modules_lock_);
+ for (RtpRtcp* module : rtp_modules_) {
+ if (module->SendingMedia()) {
+ size_t bytes_sent =
+ module->TimeToSendPadding(bytes_to_send - total_bytes_sent);
+ total_bytes_sent += bytes_sent;
+ if (total_bytes_sent >= bytes_to_send)
+ break;
+ }
+ }
+ return total_bytes_sent;
+}
+
+void PacketRouter::SetTransportWideSequenceNumber(uint16_t sequence_number) {
+ rtc::AtomicOps::ReleaseStore(&transport_seq_, sequence_number);
+}
+
+uint16_t PacketRouter::AllocateSequenceNumber() {
+ int prev_seq = rtc::AtomicOps::AcquireLoad(&transport_seq_);
+ int desired_prev_seq;
+ int new_seq;
+ do {
+ desired_prev_seq = prev_seq;
+ new_seq = (desired_prev_seq + 1) & 0xFFFF;
+ // Note: CompareAndSwap returns the actual value of transport_seq at the
+ // time the CAS operation was executed. Thus, if prev_seq is returned, the
+ // operation was successful - otherwise we need to retry. Saving the
+ // return value saves us a load on retry.
+ prev_seq = rtc::AtomicOps::CompareAndSwap(&transport_seq_, desired_prev_seq,
+ new_seq);
+ } while (prev_seq != desired_prev_seq);
+
+ return new_seq;
+}
+
+bool PacketRouter::SendFeedback(rtcp::TransportFeedback* packet) {
+ rtc::CritScope cs(&modules_lock_);
for (auto* rtp_module : rtp_modules_) {
- if (rtp_module->SendingMedia())
- return rtp_module->TimeToSendPadding(bytes);
+ packet->WithPacketSenderSsrc(rtp_module->SSRC());
+ if (rtp_module->SendFeedbackPacket(*packet))
+ return true;
}
- return 0;
+ return false;
}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/pacing/packet_router_unittest.cc b/chromium/third_party/webrtc/modules/pacing/packet_router_unittest.cc
index f7fdf7bbcae..eecb13757c3 100644
--- a/chromium/third_party/webrtc/modules/pacing/packet_router_unittest.cc
+++ b/chromium/third_party/webrtc/modules/pacing/packet_router_unittest.cc
@@ -102,20 +102,30 @@ TEST_F(PacketRouterTest, TimeToSendPacket) {
}
TEST_F(PacketRouterTest, TimeToSendPadding) {
+ const uint16_t kSsrc1 = 1234;
+ const uint16_t kSsrc2 = 4567;
+
MockRtpRtcp rtp_1;
+ EXPECT_CALL(rtp_1, SSRC()).WillRepeatedly(Return(kSsrc1));
MockRtpRtcp rtp_2;
+ EXPECT_CALL(rtp_2, SSRC()).WillRepeatedly(Return(kSsrc2));
packet_router_->AddRtpModule(&rtp_1);
packet_router_->AddRtpModule(&rtp_2);
- // Default configuration, sending padding on the first sending module.
+ // Default configuration, sending padding on all modules sending media,
+ // ordered by SSRC.
const size_t requested_padding_bytes = 1000;
const size_t sent_padding_bytes = 890;
EXPECT_CALL(rtp_1, SendingMedia()).Times(1).WillOnce(Return(true));
EXPECT_CALL(rtp_1, TimeToSendPadding(requested_padding_bytes))
.Times(1)
.WillOnce(Return(sent_padding_bytes));
- EXPECT_CALL(rtp_2, TimeToSendPadding(_)).Times(0);
- EXPECT_EQ(sent_padding_bytes,
+ EXPECT_CALL(rtp_2, SendingMedia()).Times(1).WillOnce(Return(true));
+ EXPECT_CALL(rtp_2,
+ TimeToSendPadding(requested_padding_bytes - sent_padding_bytes))
+ .Times(1)
+ .WillOnce(Return(requested_padding_bytes - sent_padding_bytes));
+ EXPECT_EQ(requested_padding_bytes,
packet_router_->TimeToSendPadding(requested_padding_bytes));
// Let only the second module be sending and verify the padding request is
@@ -134,8 +144,7 @@ TEST_F(PacketRouterTest, TimeToSendPadding) {
EXPECT_CALL(rtp_1, TimeToSendPadding(requested_padding_bytes)).Times(0);
EXPECT_CALL(rtp_2, SendingMedia()).Times(1).WillOnce(Return(false));
EXPECT_CALL(rtp_2, TimeToSendPadding(_)).Times(0);
- EXPECT_EQ(static_cast<size_t>(0),
- packet_router_->TimeToSendPadding(requested_padding_bytes));
+ EXPECT_EQ(0u, packet_router_->TimeToSendPadding(requested_padding_bytes));
packet_router_->RemoveRtpModule(&rtp_1);
@@ -143,9 +152,21 @@ TEST_F(PacketRouterTest, TimeToSendPadding) {
// to send by not expecting any calls. Instead verify rtp_2 is called.
EXPECT_CALL(rtp_2, SendingMedia()).Times(1).WillOnce(Return(true));
EXPECT_CALL(rtp_2, TimeToSendPadding(requested_padding_bytes)).Times(1);
- EXPECT_EQ(static_cast<size_t>(0),
- packet_router_->TimeToSendPadding(requested_padding_bytes));
+ EXPECT_EQ(0u, packet_router_->TimeToSendPadding(requested_padding_bytes));
packet_router_->RemoveRtpModule(&rtp_2);
}
+
+TEST_F(PacketRouterTest, AllocateSequenceNumbers) {
+ const uint16_t kStartSeq = 0xFFF0;
+ const size_t kNumPackets = 32;
+
+ packet_router_->SetTransportWideSequenceNumber(kStartSeq - 1);
+
+ for (size_t i = 0; i < kNumPackets; ++i) {
+ uint16_t seq = packet_router_->AllocateSequenceNumber();
+ uint32_t expected_unwrapped_seq = static_cast<uint32_t>(kStartSeq) + i;
+ EXPECT_EQ(static_cast<uint16_t>(expected_unwrapped_seq & 0xFFFF), seq);
+ }
+}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/BUILD.gn b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/BUILD.gn
index b4d4af987ad..99c297dda61 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/BUILD.gn
@@ -27,6 +27,7 @@ source_set("rbe_components") {
sources = [
"aimd_rate_control.cc",
"aimd_rate_control.h",
+ "include/send_time_history.h",
"inter_arrival.cc",
"inter_arrival.h",
"overuse_detector.cc",
@@ -35,6 +36,11 @@ source_set("rbe_components") {
"overuse_estimator.h",
"remote_bitrate_estimator_abs_send_time.cc",
"remote_bitrate_estimator_single_stream.cc",
+ "remote_estimator_proxy.cc",
+ "remote_estimator_proxy.h",
+ "send_time_history.cc",
+ "transport_feedback_adapter.cc",
+ "transport_feedback_adapter.h",
]
configs += [ "../..:common_config" ]
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
index 9bac153ac82..2d5573228d4 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.cc
@@ -15,7 +15,9 @@
#include <cmath>
#include "webrtc/base/checks.h"
+
#include "webrtc/modules/remote_bitrate_estimator/overuse_detector.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
namespace webrtc {
@@ -25,8 +27,9 @@ static const int64_t kLogIntervalMs = 1000;
static const double kWithinIncomingBitrateHysteresis = 1.05;
static const int64_t kMaxFeedbackIntervalMs = 1000;
-AimdRateControl::AimdRateControl(uint32_t min_bitrate_bps)
- : min_configured_bitrate_bps_(min_bitrate_bps),
+AimdRateControl::AimdRateControl()
+ : min_configured_bitrate_bps_(
+ RemoteBitrateEstimator::kDefaultMinBitrateBps),
max_configured_bitrate_bps_(30000000),
current_bitrate_bps_(max_configured_bitrate_bps_),
avg_max_bitrate_kbps_(-1.0f),
@@ -41,11 +44,11 @@ AimdRateControl::AimdRateControl(uint32_t min_bitrate_bps)
beta_(0.85f),
rtt_(kDefaultRttMs),
time_of_last_log_(-1),
- in_experiment_(AdaptiveThresholdExperimentIsEnabled()) {
-}
+ in_experiment_(AdaptiveThresholdExperimentIsEnabled()) {}
-uint32_t AimdRateControl::GetMinBitrate() const {
- return min_configured_bitrate_bps_;
+void AimdRateControl::SetMinBitrate(int min_bitrate_bps) {
+ min_configured_bitrate_bps_ = min_bitrate_bps;
+ current_bitrate_bps_ = std::max<int>(min_bitrate_bps, current_bitrate_bps_);
}
bool AimdRateControl::ValidEstimate() const {
@@ -104,7 +107,7 @@ void AimdRateControl::Update(const RateControlInput* input, int64_t now_ms) {
// second.
if (!bitrate_is_initialized_) {
const int64_t kInitializationTimeMs = 5000;
- DCHECK_LE(kBitrateWindowMs, kInitializationTimeMs);
+ RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTimeMs);
if (time_first_incoming_estimate_ < 0) {
if (input->_incomingBitRate > 0) {
time_first_incoming_estimate_ = now_ms;
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
index b8c47a4b55a..bc5ca41dff4 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/aimd_rate_control.h
@@ -23,13 +23,13 @@ namespace webrtc {
// multiplicatively.
class AimdRateControl {
public:
- explicit AimdRateControl(uint32_t min_bitrate_bps);
+ AimdRateControl();
virtual ~AimdRateControl() {}
// Returns true if there is a valid estimate of the incoming bitrate, false
// otherwise.
bool ValidEstimate() const;
- uint32_t GetMinBitrate() const;
+ void SetMinBitrate(int min_bitrate_bps);
int64_t GetFeedbackInterval() const;
// Returns true if the bitrate estimate hasn't been changed for more than
// an RTT, or if the incoming_bitrate is more than 5% above the current
@@ -81,8 +81,6 @@ class AimdRateControl {
int64_t rtt_;
int64_t time_of_last_log_;
bool in_experiment_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AimdRateControl);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
index af69924f529..9d86ba31be3 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/bwe_simulations.cc
@@ -16,7 +16,6 @@
#include "webrtc/modules/remote_bitrate_estimator/test/packet_sender.h"
#include "webrtc/test/testsupport/fileutils.h"
-using std::string;
namespace webrtc {
namespace testing {
@@ -27,7 +26,8 @@ namespace bwe {
class BweSimulation : public BweTest,
public ::testing::TestWithParam<BandwidthEstimatorType> {
public:
- BweSimulation() : BweTest() {}
+ BweSimulation()
+ : BweTest(), random_(Clock::GetRealTimeClock()->TimeInMicroseconds()) {}
virtual ~BweSimulation() {}
protected:
@@ -36,8 +36,10 @@ class BweSimulation : public BweTest,
VerboseLogging(true);
}
+ Random random_;
+
private:
- DISALLOW_COPY_AND_ASSIGN(BweSimulation);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweSimulation);
};
INSTANTIATE_TEST_CASE_P(VideoSendersTest,
@@ -49,9 +51,10 @@ INSTANTIATE_TEST_CASE_P(VideoSendersTest,
TEST_P(BweSimulation, SprintUplinkTest) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
- RateCounterFilter counter1(&uplink_, 0, "sender_output");
+ RateCounterFilter counter1(&uplink_, 0, "sender_output",
+ bwe_names[GetParam()]);
TraceBasedDeliveryFilter filter(&uplink_, 0, "link_capacity");
- RateCounterFilter counter2(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter2(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
ASSERT_TRUE(filter.Init(test::ResourcePath("sprint-uplink", "rx")));
RunFor(60 * 1000);
@@ -60,9 +63,11 @@ TEST_P(BweSimulation, SprintUplinkTest) {
TEST_P(BweSimulation, Verizon4gDownlinkTest) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&downlink_, &source, GetParam());
- RateCounterFilter counter1(&downlink_, 0, "sender_output");
+ RateCounterFilter counter1(&downlink_, 0, "sender_output",
+ bwe_names[GetParam()] + "_up");
TraceBasedDeliveryFilter filter(&downlink_, 0, "link_capacity");
- RateCounterFilter counter2(&downlink_, 0, "receiver_input");
+ RateCounterFilter counter2(&downlink_, 0, "Receiver",
+ bwe_names[GetParam()] + "_down");
PacketReceiver receiver(&downlink_, 0, GetParam(), true, true);
ASSERT_TRUE(filter.Init(test::ResourcePath("verizon4g-downlink", "rx")));
RunFor(22 * 60 * 1000);
@@ -76,25 +81,27 @@ TEST_P(BweSimulation, Choke1000kbps500kbps1000kbpsBiDirectional) {
AdaptiveVideoSource source(kFlowIds[0], 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter choke(&uplink_, kFlowIds[0]);
- RateCounterFilter counter(&uplink_, kFlowIds[0], "receiver_input_0");
+ RateCounterFilter counter(&uplink_, kFlowIds[0], "Receiver_0",
+ bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, kFlowIds[0], GetParam(), true, false);
AdaptiveVideoSource source2(kFlowIds[1], 30, 300, 0, 0);
VideoSender sender2(&downlink_, &source2, GetParam());
ChokeFilter choke2(&downlink_, kFlowIds[1]);
DelayFilter delay(&downlink_, CreateFlowIds(kFlowIds, kNumFlows));
- RateCounterFilter counter2(&downlink_, kFlowIds[1], "receiver_input_1");
+ RateCounterFilter counter2(&downlink_, kFlowIds[1], "Receiver_1",
+ bwe_names[GetParam()]);
PacketReceiver receiver2(&downlink_, kFlowIds[1], GetParam(), true, false);
- choke2.SetCapacity(500);
- delay.SetDelayMs(0);
+ choke2.set_capacity_kbps(500);
+ delay.SetOneWayDelayMs(0);
- choke.SetCapacity(1000);
- choke.SetMaxDelay(500);
+ choke.set_capacity_kbps(1000);
+ choke.set_max_delay_ms(500);
RunFor(60 * 1000);
- choke.SetCapacity(500);
+ choke.set_capacity_kbps(500);
RunFor(60 * 1000);
- choke.SetCapacity(1000);
+ choke.set_capacity_kbps(1000);
RunFor(60 * 1000);
}
@@ -103,15 +110,15 @@ TEST_P(BweSimulation, Choke1000kbps500kbps1000kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter choke(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, false);
- choke.SetCapacity(1000);
- choke.SetMaxDelay(500);
+ choke.set_capacity_kbps(1000);
+ choke.set_max_delay_ms(500);
RunFor(60 * 1000);
- choke.SetCapacity(500);
+ choke.set_capacity_kbps(500);
RunFor(60 * 1000);
- choke.SetCapacity(1000);
+ choke.set_capacity_kbps(1000);
RunFor(60 * 1000);
}
@@ -119,14 +126,14 @@ TEST_P(BweSimulation, PacerChoke1000kbps500kbps1000kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetCapacity(1000);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(1000);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
- filter.SetCapacity(500);
+ filter.set_capacity_kbps(500);
RunFor(60 * 1000);
- filter.SetCapacity(1000);
+ filter.set_capacity_kbps(1000);
RunFor(60 * 1000);
}
@@ -134,10 +141,10 @@ TEST_P(BweSimulation, PacerChoke10000kbps) {
PeriodicKeyFrameSource source(0, 30, 300, 0, 0, 1000);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetCapacity(10000);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(10000);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
}
@@ -145,14 +152,14 @@ TEST_P(BweSimulation, PacerChoke200kbps30kbps200kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetCapacity(200);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(200);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
- filter.SetCapacity(30);
+ filter.set_capacity_kbps(30);
RunFor(60 * 1000);
- filter.SetCapacity(200);
+ filter.set_capacity_kbps(200);
RunFor(60 * 1000);
}
@@ -160,24 +167,25 @@ TEST_P(BweSimulation, Choke200kbps30kbps200kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetCapacity(200);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(200);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
- filter.SetCapacity(30);
+ filter.set_capacity_kbps(30);
RunFor(60 * 1000);
- filter.SetCapacity(200);
+ filter.set_capacity_kbps(200);
RunFor(60 * 1000);
}
TEST_P(BweSimulation, GoogleWifiTrace3Mbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
- RateCounterFilter counter1(&uplink_, 0, "sender_output");
+ RateCounterFilter counter1(&uplink_, 0, "sender_output",
+ bwe_names[GetParam()]);
TraceBasedDeliveryFilter filter(&uplink_, 0, "link_capacity");
- filter.SetMaxDelay(500);
- RateCounterFilter counter2(&uplink_, 0, "receiver_input");
+ filter.set_max_delay_ms(500);
+ RateCounterFilter counter2(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
ASSERT_TRUE(filter.Init(test::ResourcePath("google-wifi-3mbps", "rx")));
RunFor(300 * 1000);
@@ -187,9 +195,9 @@ TEST_P(BweSimulation, LinearIncreasingCapacity) {
PeriodicKeyFrameSource source(0, 30, 300, 0, 0, 1000000);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetMaxDelay(500);
+ filter.set_max_delay_ms(500);
const int kStartingCapacityKbps = 150;
const int kEndingCapacityKbps = 1500;
const int kStepKbps = 5;
@@ -197,7 +205,7 @@ TEST_P(BweSimulation, LinearIncreasingCapacity) {
for (int i = kStartingCapacityKbps; i <= kEndingCapacityKbps;
i += kStepKbps) {
- filter.SetCapacity(i);
+ filter.set_capacity_kbps(i);
RunFor(kStepTimeMs);
}
}
@@ -206,9 +214,9 @@ TEST_P(BweSimulation, LinearDecreasingCapacity) {
PeriodicKeyFrameSource source(0, 30, 300, 0, 0, 1000000);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
- filter.SetMaxDelay(500);
+ filter.set_max_delay_ms(500);
const int kStartingCapacityKbps = 1500;
const int kEndingCapacityKbps = 150;
const int kStepKbps = -5;
@@ -216,7 +224,7 @@ TEST_P(BweSimulation, LinearDecreasingCapacity) {
for (int i = kStartingCapacityKbps; i >= kEndingCapacityKbps;
i += kStepKbps) {
- filter.SetCapacity(i);
+ filter.set_capacity_kbps(i);
RunFor(kStepTimeMs);
}
}
@@ -224,10 +232,11 @@ TEST_P(BweSimulation, LinearDecreasingCapacity) {
TEST_P(BweSimulation, PacerGoogleWifiTrace3Mbps) {
PeriodicKeyFrameSource source(0, 30, 300, 0, 0, 1000);
PacedVideoSender sender(&uplink_, &source, GetParam());
- RateCounterFilter counter1(&uplink_, 0, "sender_output");
+ RateCounterFilter counter1(&uplink_, 0, "sender_output",
+ bwe_names[GetParam()]);
TraceBasedDeliveryFilter filter(&uplink_, 0, "link_capacity");
- filter.SetMaxDelay(500);
- RateCounterFilter counter2(&uplink_, 0, "receiver_input");
+ filter.set_max_delay_ms(500);
+ RateCounterFilter counter2(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), true, true);
ASSERT_TRUE(filter.Init(test::ResourcePath("google-wifi-3mbps", "rx")));
RunFor(300 * 1000);
@@ -248,16 +257,18 @@ TEST_P(BweSimulation, SelfFairnessTest) {
}
ChokeFilter choke(&uplink_, CreateFlowIds(kAllFlowIds, kNumFlows));
- choke.SetCapacity(1000);
+ choke.set_capacity_kbps(1000);
rtc::scoped_ptr<RateCounterFilter> rate_counters[kNumFlows];
for (size_t i = 0; i < kNumFlows; ++i) {
- rate_counters[i].reset(new RateCounterFilter(
- &uplink_, CreateFlowIds(&kAllFlowIds[i], 1), "receiver_input"));
+ rate_counters[i].reset(
+ new RateCounterFilter(&uplink_, CreateFlowIds(&kAllFlowIds[i], 1),
+ "Receiver", bwe_names[GetParam()]));
}
RateCounterFilter total_utilization(
- &uplink_, CreateFlowIds(kAllFlowIds, kNumFlows), "total_utilization");
+ &uplink_, CreateFlowIds(kAllFlowIds, kNumFlows), "total_utilization",
+ "Total_link_utilization");
rtc::scoped_ptr<PacketReceiver> receivers[kNumFlows];
for (size_t i = 0; i < kNumFlows; ++i) {
@@ -269,36 +280,173 @@ TEST_P(BweSimulation, SelfFairnessTest) {
}
TEST_P(BweSimulation, PacedSelfFairness50msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 4, 0, 1000, 3000, 50);
+ const int64_t kAverageOffsetMs = 20 * 1000;
+ const int kNumRmcatFlows = 4;
+ int64_t offsets_ms[kNumRmcatFlows];
+ offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ for (int i = 1; i < kNumRmcatFlows; ++i) {
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ }
+ RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 1000, 3000, 50, 50, 0,
+ offsets_ms);
}
TEST_P(BweSimulation, PacedSelfFairness500msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 4, 0, 1000, 3000, 500);
+ const int64_t kAverageOffsetMs = 20 * 1000;
+ const int kNumRmcatFlows = 4;
+ int64_t offsets_ms[kNumRmcatFlows];
+ offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ for (int i = 1; i < kNumRmcatFlows; ++i) {
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ }
+ RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 1000, 3000, 500, 50, 0,
+ offsets_ms);
}
TEST_P(BweSimulation, PacedSelfFairness1000msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 4, 0, 1000, 3000, 1000);
+ const int64_t kAverageOffsetMs = 20 * 1000;
+ const int kNumRmcatFlows = 4;
+ int64_t offsets_ms[kNumRmcatFlows];
+ offsets_ms[0] = random_.Rand(0, 2 * kAverageOffsetMs);
+ for (int i = 1; i < kNumRmcatFlows; ++i) {
+ offsets_ms[i] = offsets_ms[i - 1] + random_.Rand(0, 2 * kAverageOffsetMs);
+ }
+ RunFairnessTest(GetParam(), 4, 0, 1000, 3000, 1000, 50, 0, offsets_ms);
}
TEST_P(BweSimulation, TcpFairness50msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 50);
+ const int64_t kAverageOffsetMs = 20 * 1000;
+ int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 50, 50, 0, offset_ms);
}
TEST_P(BweSimulation, TcpFairness500msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 500);
+ const int64_t kAverageOffsetMs = 20 * 1000;
+ int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 500, 50, 0, offset_ms);
}
TEST_P(BweSimulation, TcpFairness1000msTest) {
- srand(Clock::GetRealTimeClock()->TimeInMicroseconds());
- RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 1000);
+ const int kAverageOffsetMs = 20 * 1000;
+ int64_t offset_ms[] = {random_.Rand(0, 2 * kAverageOffsetMs), 0};
+ RunFairnessTest(GetParam(), 1, 1, 1000, 2000, 1000, 50, 0, offset_ms);
+}
+
+// The following test cases begin with "Evaluation" as a referrence to the
+// Internet draft https://tools.ietf.org/html/draft-ietf-rmcat-eval-test-01.
+
+TEST_P(BweSimulation, Evaluation1) {
+ RunVariableCapacity1SingleFlow(GetParam());
+}
+
+TEST_P(BweSimulation, Evaluation2) {
+ const size_t kNumFlows = 2;
+ RunVariableCapacity2MultipleFlows(GetParam(), kNumFlows);
+}
+
+TEST_P(BweSimulation, Evaluation3) {
+ RunBidirectionalFlow(GetParam());
+}
+
+TEST_P(BweSimulation, Evaluation4) {
+ RunSelfFairness(GetParam());
+}
+
+TEST_P(BweSimulation, Evaluation5) {
+ RunRoundTripTimeFairness(GetParam());
+}
+
+TEST_P(BweSimulation, Evaluation6) {
+ RunLongTcpFairness(GetParam());
+}
+
+// Different calls to the Evaluation7 will create the same FileSizes
+// and StartingTimes as long as the seeds remain unchanged. This is essential
+// when calling it with multiple estimators for comparison purposes.
+TEST_P(BweSimulation, Evaluation7) {
+ const int kNumTcpFiles = 10;
+ RunMultipleShortTcpFairness(GetParam(),
+ BweTest::GetFileSizesBytes(kNumTcpFiles),
+ BweTest::GetStartingTimesMs(kNumTcpFiles));
+}
+
+TEST_P(BweSimulation, Evaluation8) {
+ RunPauseResumeFlows(GetParam());
+}
+
+// Following test cases begin with "GccComparison" run the
+// evaluation test cases for both GCC and other calling RMCAT.
+
+TEST_P(BweSimulation, GccComparison1) {
+ RunVariableCapacity1SingleFlow(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunVariableCapacity1SingleFlow(kFullSendSideEstimator);
+}
+
+TEST_P(BweSimulation, GccComparison2) {
+ const size_t kNumFlows = 2;
+ RunVariableCapacity2MultipleFlows(GetParam(), kNumFlows);
+ BweTest gcc_test(false);
+ gcc_test.RunVariableCapacity2MultipleFlows(kFullSendSideEstimator, kNumFlows);
+}
+
+TEST_P(BweSimulation, GccComparison3) {
+ RunBidirectionalFlow(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunBidirectionalFlow(kFullSendSideEstimator);
+}
+
+TEST_P(BweSimulation, GccComparison4) {
+ RunSelfFairness(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunSelfFairness(GetParam());
+}
+
+TEST_P(BweSimulation, GccComparison5) {
+ RunRoundTripTimeFairness(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunRoundTripTimeFairness(kFullSendSideEstimator);
+}
+
+TEST_P(BweSimulation, GccComparison6) {
+ RunLongTcpFairness(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunLongTcpFairness(kFullSendSideEstimator);
+}
+
+TEST_P(BweSimulation, GccComparison7) {
+ const int kNumTcpFiles = 10;
+
+ std::vector<int> tcp_file_sizes_bytes =
+ BweTest::GetFileSizesBytes(kNumTcpFiles);
+ std::vector<int64_t> tcp_starting_times_ms =
+ BweTest::GetStartingTimesMs(kNumTcpFiles);
+
+ RunMultipleShortTcpFairness(GetParam(), tcp_file_sizes_bytes,
+ tcp_starting_times_ms);
+
+ BweTest gcc_test(false);
+ gcc_test.RunMultipleShortTcpFairness(
+ kFullSendSideEstimator, tcp_file_sizes_bytes, tcp_starting_times_ms);
+}
+
+TEST_P(BweSimulation, GccComparison8) {
+ RunPauseResumeFlows(GetParam());
+ BweTest gcc_test(false);
+ gcc_test.RunPauseResumeFlows(kFullSendSideEstimator);
+}
+
+TEST_P(BweSimulation, GccComparisonChoke) {
+ int array[] = {1000, 500, 1000};
+ std::vector<int> capacities_kbps(array, array + 3);
+ RunChoke(GetParam(), capacities_kbps);
+
+ BweTest gcc_test(false);
+ gcc_test.RunChoke(kFullSendSideEstimator, capacities_kbps);
}
#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
} // namespace bwe
} // namespace testing
} // namespace webrtc
+
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h
new file mode 100644
index 00000000000..91a8ac8707b
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
+
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+
+namespace webrtc {
+
+class MockRemoteBitrateEstimator : public RemoteBitrateEstimator {
+ public:
+ MOCK_METHOD1(IncomingPacketFeedbackVector,
+ void(const std::vector<PacketInfo>&));
+ MOCK_METHOD4(IncomingPacket, void(int64_t, size_t, const RTPHeader&, bool));
+ MOCK_METHOD1(RemoveStream, void(unsigned int));
+ MOCK_CONST_METHOD2(LatestEstimate,
+ bool(std::vector<unsigned int>*, unsigned int*));
+ MOCK_CONST_METHOD1(GetStats, bool(ReceiveBandwidthEstimatorStats*));
+
+ // From CallStatsObserver;
+ MOCK_METHOD2(OnRttUpdate, void(int64_t, int64_t));
+
+ // From Module.
+ MOCK_METHOD0(TimeUntilNextProcess, int64_t());
+ MOCK_METHOD0(Process, int32_t());
+ MOCK_METHOD1(SetMinBitrate, void(int));
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_INCLUDE_MOCK_MOCK_REMOTE_BITRATE_ESTIMATOR_H_
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
index 057dfb8e974..4bd9d8c7bcb 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -19,6 +19,7 @@
#include "webrtc/common_types.h"
#include "webrtc/modules/interface/module.h"
#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -58,34 +59,9 @@ struct ReceiveBandwidthEstimatorStats {
std::vector<int64_t> recent_arrival_time_ms;
};
-struct PacketInfo {
- PacketInfo(int64_t arrival_time_ms,
- int64_t send_time_ms,
- uint16_t sequence_number,
- size_t payload_size,
- bool was_paced)
- : arrival_time_ms(arrival_time_ms),
- send_time_ms(send_time_ms),
- sequence_number(sequence_number),
- payload_size(payload_size),
- was_paced(was_paced) {}
- // Time corresponding to when the packet was received. Timestamped with the
- // receiver's clock.
- int64_t arrival_time_ms;
- // Time corresponding to when the packet was sent, timestamped with the
- // sender's clock.
- int64_t send_time_ms;
- // Packet identifier, incremented with 1 for every packet generated by the
- // sender.
- uint16_t sequence_number;
- // Size of the packet excluding RTP headers.
- size_t payload_size;
- // True if the packet was paced out by the pacer.
- bool was_paced;
-};
-
class RemoteBitrateEstimator : public CallStatsObserver, public Module {
public:
+ static const int kDefaultMinBitrateBps = 30000;
virtual ~RemoteBitrateEstimator() {}
virtual void IncomingPacketFeedbackVector(
@@ -115,6 +91,8 @@ class RemoteBitrateEstimator : public CallStatsObserver, public Module {
// Returns true if the statistics are available.
virtual bool GetStats(ReceiveBandwidthEstimatorStats* output) const = 0;
+ virtual void SetMinBitrate(int min_bitrate_bps) = 0;
+
protected:
static const int64_t kProcessIntervalMs = 500;
static const int64_t kStreamTimeOutMs = 2000;
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h
index 8835856353c..e104ba661dc 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/include/send_time_history.h
@@ -15,6 +15,7 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/basictypes.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
namespace webrtc {
@@ -23,8 +24,12 @@ class SendTimeHistory {
explicit SendTimeHistory(int64_t packet_age_limit);
virtual ~SendTimeHistory();
- void AddAndRemoveOldSendTimes(uint16_t sequence_number, int64_t timestamp);
- bool GetSendTime(uint16_t sequence_number, int64_t* timestamp, bool remove);
+ void AddAndRemoveOld(const PacketInfo& packet);
+ bool UpdateSendTime(uint16_t sequence_number, int64_t timestamp);
+ // Look up PacketInfo for a sent packet, based on the sequence number, and
+ // populate all fields except for receive_time. The packet parameter must
+ // thus be non-null and have the sequence_number field set.
+ bool GetInfo(PacketInfo* packet, bool remove);
void Clear();
private:
@@ -33,9 +38,9 @@ class SendTimeHistory {
const int64_t packet_age_limit_;
uint16_t oldest_sequence_number_; // Oldest may not be lowest.
- std::map<uint16_t, int64_t> history_;
+ std::map<uint16_t, PacketInfo> history_;
- DISALLOW_COPY_AND_ASSIGN(SendTimeHistory);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SendTimeHistory);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
index a9a7ae7d07f..3dee305bad1 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.cc
@@ -13,6 +13,7 @@
#include <algorithm>
#include <cassert>
+#include "webrtc/base/logging.h"
#include "webrtc/modules/interface/module_common_types.h"
namespace webrtc {
@@ -52,6 +53,14 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp,
prev_timestamp_group_.timestamp;
*arrival_time_delta_ms = current_timestamp_group_.complete_time_ms -
prev_timestamp_group_.complete_time_ms;
+ if (*arrival_time_delta_ms < 0) {
+ // The group of packets has been reordered since receiving its local
+ // arrival timestamp.
+ LOG(LS_WARNING) << "Packets are being reordered on the path from the "
+ "socket to the bandwidth estimator. Ignoring this "
+ "packet for bandwidth estimation.";
+ return false;
+ }
assert(*arrival_time_delta_ms >= 0);
*packet_size_delta = static_cast<int>(current_timestamp_group_.size) -
static_cast<int>(prev_timestamp_group_.size);
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.h
index ace855118e8..427bafcf96f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/inter_arrival.h
@@ -78,7 +78,7 @@ class InterArrival {
double timestamp_to_ms_coeff_;
bool burst_grouping_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(InterArrival);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(InterArrival);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
index b21933a1934..62bb2e1cac4 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.cc
@@ -143,7 +143,7 @@ void OveruseDetector::UpdateThreshold(double modified_offset, int64_t now_ms) {
}
void OveruseDetector::InitializeExperiment() {
- DCHECK(in_experiment_);
+ RTC_DCHECK(in_experiment_);
double k_up = 0.0;
double k_down = 0.0;
overusing_time_threshold_ = kOverUsingTimeThreshold;
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
index 5a07b4e19a6..bb69a8a0a10 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_detector.h
@@ -59,7 +59,7 @@ class OveruseDetector {
int overuse_counter_;
BandwidthUsage hypothesis_;
- DISALLOW_COPY_AND_ASSIGN(OveruseDetector);
+ RTC_DISALLOW_COPY_AND_ASSIGN(OveruseDetector);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_estimator.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_estimator.h
index 6499d8d0432..d671f39166f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_estimator.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/overuse_estimator.h
@@ -63,7 +63,7 @@ class OveruseEstimator {
double var_noise_;
std::list<double> ts_delta_hist_;
- DISALLOW_COPY_AND_ASSIGN(OveruseEstimator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(OveruseEstimator);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator.gypi b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator.gypi
index e4b21db3753..dbc58824561 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator.gypi
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator.gypi
@@ -21,6 +21,7 @@
'sources': [
'include/bwe_defines.h',
'include/remote_bitrate_estimator.h',
+ 'include/send_time_history.h',
'aimd_rate_control.cc',
'aimd_rate_control.h',
'inter_arrival.cc',
@@ -35,6 +36,11 @@
'remote_bitrate_estimator_abs_send_time.h',
'remote_bitrate_estimator_single_stream.cc',
'remote_bitrate_estimator_single_stream.h',
+ 'remote_estimator_proxy.cc',
+ 'remote_estimator_proxy.h',
+ 'send_time_history.cc',
+ 'transport_feedback_adapter.cc',
+ 'transport_feedback_adapter.h',
'test/bwe_test_logging.cc',
'test/bwe_test_logging.h',
], # source
@@ -70,9 +76,10 @@
'test/bwe_test_fileutils.h',
'test/bwe_test_framework.cc',
'test/bwe_test_framework.h',
- 'test/bwe_test_framework_unittest.cc',
'test/bwe_test_logging.cc',
'test/bwe_test_logging.h',
+ 'test/metric_recorder.cc',
+ 'test/metric_recorder.h',
'test/packet_receiver.cc',
'test/packet_receiver.h',
'test/packet_sender.cc',
@@ -82,7 +89,6 @@
'test/random.h',
'test/estimators/nada.cc',
'test/estimators/nada.h',
- 'test/estimators/nada_unittest.cc',
'test/estimators/remb.cc',
'test/estimators/remb.h',
'test/estimators/send_side.cc',
@@ -105,6 +111,7 @@
'target_name': 'bwe_tools_util',
'type': 'static_library',
'dependencies': [
+ '<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
'rtp_rtcp',
],
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
index e83ac812e31..56a309c59db 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "webrtc/modules/pacing/include/paced_sender.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -88,8 +89,7 @@ bool RemoteBitrateEstimatorAbsSendTime::IsWithinClusterBounds(
RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime(
RemoteBitrateObserver* observer,
- Clock* clock,
- uint32_t min_bitrate_bps)
+ Clock* clock)
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
observer_(observer),
clock_(clock),
@@ -98,7 +98,6 @@ bool RemoteBitrateEstimatorAbsSendTime::IsWithinClusterBounds(
estimator_(OverUseDetectorOptions()),
detector_(OverUseDetectorOptions()),
incoming_bitrate_(kBitrateWindowMs, 8000),
- remote_rate_(min_bitrate_bps),
last_process_time_(-1),
process_interval_ms_(kProcessIntervalMs),
total_propagation_delta_ms_(0),
@@ -268,7 +267,10 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo(
uint32_t ts_delta = 0;
int64_t t_delta = 0;
int size_delta = 0;
- // For now only try to detect probes while we don't have a valid estimate.
+ // For now only try to detect probes while we don't have a valid estimate, and
+ // make sure the packet was paced. We currently assume that only packets
+ // larger than 200 bytes are paced by the sender.
+ was_paced = was_paced && payload_size > PacedSender::kMinProbePacketSize;
if (was_paced &&
(!remote_rate_.ValidEstimate() ||
now_ms - first_packet_time_ms_ < kInitialProbingIntervalMs)) {
@@ -368,9 +370,10 @@ void RemoteBitrateEstimatorAbsSendTime::UpdateEstimate(int64_t now_ms) {
}
}
-void RemoteBitrateEstimatorAbsSendTime::OnRttUpdate(int64_t rtt) {
+void RemoteBitrateEstimatorAbsSendTime::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
CriticalSectionScoped cs(crit_sect_.get());
- remote_rate_.SetRtt(rtt);
+ remote_rate_.SetRtt(avg_rtt_ms);
}
void RemoteBitrateEstimatorAbsSendTime::RemoveStream(unsigned int ssrc) {
@@ -432,4 +435,9 @@ void RemoteBitrateEstimatorAbsSendTime::UpdateStats(int propagation_delta_ms,
total_propagation_delta_ms_ =
std::max(total_propagation_delta_ms_ + propagation_delta_ms, 0);
}
+
+void RemoteBitrateEstimatorAbsSendTime::SetMinBitrate(int min_bitrate_bps) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ remote_rate_.SetMinBitrate(min_bitrate_bps);
+}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
index 0d3802a89ed..b5ec81568a8 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h
@@ -46,12 +46,12 @@ struct Cluster {
num_above_min_delta(0) {}
int GetSendBitrateBps() const {
- CHECK_GT(send_mean_ms, 0.0f);
+ RTC_CHECK_GT(send_mean_ms, 0.0f);
return mean_size * 8 * 1000 / send_mean_ms;
}
int GetRecvBitrateBps() const {
- CHECK_GT(recv_mean_ms, 0.0f);
+ RTC_CHECK_GT(recv_mean_ms, 0.0f);
return mean_size * 8 * 1000 / recv_mean_ms;
}
@@ -66,8 +66,7 @@ struct Cluster {
class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator {
public:
RemoteBitrateEstimatorAbsSendTime(RemoteBitrateObserver* observer,
- Clock* clock,
- uint32_t min_bitrate_bps);
+ Clock* clock);
virtual ~RemoteBitrateEstimatorAbsSendTime() {}
void IncomingPacketFeedbackVector(
@@ -83,11 +82,12 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator {
// deleted.
int32_t Process() override;
int64_t TimeUntilNextProcess() override;
- void OnRttUpdate(int64_t rtt) override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
void RemoveStream(unsigned int ssrc) override;
bool LatestEstimate(std::vector<unsigned int>* ssrcs,
unsigned int* bitrate_bps) const override;
bool GetStats(ReceiveBandwidthEstimatorStats* output) const override;
+ void SetMinBitrate(int min_bitrate_bps) override;
private:
typedef std::map<unsigned int, int64_t> Ssrcs;
@@ -146,7 +146,7 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator {
size_t total_probes_received_;
int64_t first_packet_time_ms_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorAbsSendTime);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorAbsSendTime);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
index 6f8d6cb2c8e..195c95aacbb 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time_unittest.cc
@@ -17,16 +17,14 @@ namespace webrtc {
class RemoteBitrateEstimatorAbsSendTimeTest :
public RemoteBitrateEstimatorTest {
public:
- static const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
RemoteBitrateEstimatorAbsSendTimeTest() {}
virtual void SetUp() {
bitrate_estimator_.reset(new RemoteBitrateEstimatorAbsSendTime(
- bitrate_observer_.get(), &clock_,
- kRemoteBitrateEstimatorMinBitrateBps));
+ bitrate_observer_.get(), &clock_));
}
protected:
- DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorAbsSendTimeTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorAbsSendTimeTest);
};
TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, InitialBehavior) {
@@ -38,7 +36,7 @@ TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, RateIncreaseReordering) {
}
TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, RateIncreaseRtpTimestamps) {
- RateIncreaseRtpTimestampsTestHelper(1089);
+ RateIncreaseRtpTimestampsTestHelper(1240);
}
TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, CapacityDropOneStream) {
@@ -259,4 +257,36 @@ TEST_F(RemoteBitrateEstimatorAbsSendTimeTest,
EXPECT_TRUE(bitrate_observer_->updated());
EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 4000000u, 10000);
}
+
+TEST_F(RemoteBitrateEstimatorAbsSendTimeTest, ProbingIgnoresSmallPackets) {
+ const int kProbeLength = 5;
+ int64_t now_ms = clock_.TimeInMilliseconds();
+ // Probing with 200 bytes every 10 ms, should be ignored by the probe
+ // detection.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 200, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000),
+ true);
+ }
+
+ EXPECT_EQ(0, bitrate_estimator_->Process());
+ EXPECT_FALSE(bitrate_observer_->updated());
+
+ // Followed by a probe with 1000 bytes packets, should be detected as a
+ // probe.
+ for (int i = 0; i < kProbeLength; ++i) {
+ clock_.AdvanceTimeMilliseconds(10);
+ now_ms = clock_.TimeInMilliseconds();
+ IncomingPacket(0, 1000, now_ms, 90 * now_ms, AbsSendTime(now_ms, 1000),
+ true);
+ }
+
+ // Wait long enough so that we can call Process again.
+ clock_.AdvanceTimeMilliseconds(1000);
+
+ EXPECT_EQ(0, bitrate_estimator_->Process());
+ EXPECT_TRUE(bitrate_observer_->updated());
+ EXPECT_NEAR(bitrate_observer_->latest_bitrate(), 800000u, 10000);
+}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
index 762a3170ae1..08e076e540a 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -44,11 +44,10 @@ struct RemoteBitrateEstimatorSingleStream::Detector {
RemoteBitrateEstimatorSingleStream::RemoteBitrateEstimatorSingleStream(
RemoteBitrateObserver* observer,
- Clock* clock,
- uint32_t min_bitrate_bps)
+ Clock* clock)
: clock_(clock),
incoming_bitrate_(kBitrateWindowMs, 8000),
- remote_rate_(new AimdRateControl(min_bitrate_bps)),
+ remote_rate_(new AimdRateControl()),
observer_(observer),
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
last_process_time_(-1),
@@ -164,7 +163,7 @@ void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t now_ms) {
}
// We can't update the estimate if we don't have any active streams.
if (overuse_detectors_.empty()) {
- remote_rate_.reset(new AimdRateControl(remote_rate_->GetMinBitrate()));
+ remote_rate_.reset(new AimdRateControl());
return;
}
double mean_noise_var = sum_var_noise /
@@ -182,9 +181,10 @@ void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t now_ms) {
}
}
-void RemoteBitrateEstimatorSingleStream::OnRttUpdate(int64_t rtt) {
+void RemoteBitrateEstimatorSingleStream::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
CriticalSectionScoped cs(crit_sect_.get());
- remote_rate_->SetRtt(rtt);
+ remote_rate_->SetRtt(avg_rtt_ms);
}
void RemoteBitrateEstimatorSingleStream::RemoveStream(unsigned int ssrc) {
@@ -229,4 +229,9 @@ void RemoteBitrateEstimatorSingleStream::GetSsrcs(
}
}
+void RemoteBitrateEstimatorSingleStream::SetMinBitrate(int min_bitrate_bps) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ remote_rate_->SetMinBitrate(min_bitrate_bps);
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
index 0432355b203..2816f8d97d4 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
@@ -24,8 +24,7 @@ namespace webrtc {
class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator {
public:
RemoteBitrateEstimatorSingleStream(RemoteBitrateObserver* observer,
- Clock* clock,
- uint32_t min_bitrate_bps);
+ Clock* clock);
virtual ~RemoteBitrateEstimatorSingleStream();
void IncomingPacket(int64_t arrival_time_ms,
@@ -34,11 +33,12 @@ class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator {
bool was_paced) override;
int32_t Process() override;
int64_t TimeUntilNextProcess() override;
- void OnRttUpdate(int64_t rtt) override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
void RemoveStream(unsigned int ssrc) override;
bool LatestEstimate(std::vector<unsigned int>* ssrcs,
unsigned int* bitrate_bps) const override;
bool GetStats(ReceiveBandwidthEstimatorStats* output) const override;
+ void SetMinBitrate(int min_bitrate_bps) override;
private:
struct Detector;
@@ -61,7 +61,7 @@ class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator {
int64_t last_process_time_;
int64_t process_interval_ms_ GUARDED_BY(crit_sect_.get());
- DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorSingleStream);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorSingleStream);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
index ea4e2688d60..a6c182a7bc8 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream_unittest.cc
@@ -17,16 +17,14 @@ namespace webrtc {
class RemoteBitrateEstimatorSingleTest :
public RemoteBitrateEstimatorTest {
public:
- static const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
RemoteBitrateEstimatorSingleTest() {}
virtual void SetUp() {
bitrate_estimator_.reset(new RemoteBitrateEstimatorSingleStream(
- bitrate_observer_.get(), &clock_,
- kRemoteBitrateEstimatorMinBitrateBps));
+ bitrate_observer_.get(), &clock_));
}
protected:
- DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorSingleTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorSingleTest);
};
TEST_F(RemoteBitrateEstimatorSingleTest, InitialBehavior) {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
index 0658ec5fc76..b4fe7139b4d 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.h
@@ -100,7 +100,7 @@ class RtpStream {
uint32_t rtp_timestamp_offset_;
const double kNtpFracPerMs;
- DISALLOW_COPY_AND_ASSIGN(RtpStream);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpStream);
};
class StreamGenerator {
@@ -138,7 +138,7 @@ class StreamGenerator {
// All streams being transmitted on this simulated channel.
StreamMap streams_;
- DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
};
} // namespace testing
@@ -211,7 +211,7 @@ class RemoteBitrateEstimatorTest : public ::testing::Test {
rtc::scoped_ptr<RemoteBitrateEstimator> bitrate_estimator_;
rtc::scoped_ptr<testing::StreamGenerator> stream_generator_;
- DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RemoteBitrateEstimatorTest);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
index 62d8e129c23..d6f049f6ac1 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimators_test.cc
@@ -70,7 +70,7 @@ TEST_P(DefaultBweTest, SteadyDelay) {
VideoSender sender(&uplink_, &source, GetParam());
DelayFilter delay(&uplink_, 0);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
- delay.SetDelayMs(1000);
+ delay.SetOneWayDelayMs(1000);
RunFor(10 * 60 * 1000);
}
@@ -81,7 +81,7 @@ TEST_P(DefaultBweTest, IncreasingDelay1) {
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
RunFor(10 * 60 * 1000);
for (int i = 0; i < 30 * 2; ++i) {
- delay.SetDelayMs(i);
+ delay.SetOneWayDelayMs(i);
RunFor(10 * 1000);
}
RunFor(10 * 60 * 1000);
@@ -91,14 +91,14 @@ TEST_P(DefaultBweTest, IncreasingDelay2) {
VideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
DelayFilter delay(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "");
+ RateCounterFilter counter(&uplink_, 0, "", "");
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
RunFor(1 * 60 * 1000);
for (int i = 1; i < 51; ++i) {
- delay.SetDelayMs(10.0f * i);
+ delay.SetOneWayDelayMs(10.0f * i);
RunFor(10 * 1000);
}
- delay.SetDelayMs(0.0f);
+ delay.SetOneWayDelayMs(0.0f);
RunFor(10 * 60 * 1000);
}
@@ -109,12 +109,12 @@ TEST_P(DefaultBweTest, JumpyDelay1) {
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
RunFor(10 * 60 * 1000);
for (int i = 1; i < 200; ++i) {
- delay.SetDelayMs((10 * i) % 500);
+ delay.SetOneWayDelayMs((10 * i) % 500);
RunFor(1000);
- delay.SetDelayMs(1.0f);
+ delay.SetOneWayDelayMs(1.0f);
RunFor(1000);
}
- delay.SetDelayMs(0.0f);
+ delay.SetOneWayDelayMs(0.0f);
RunFor(10 * 60 * 1000);
}
@@ -122,9 +122,9 @@ TEST_P(DefaultBweTest, SteadyJitter) {
VideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
JitterFilter jitter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "");
+ RateCounterFilter counter(&uplink_, 0, "", "");
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
- jitter.SetJitter(20);
+ jitter.SetMaxJitter(20);
RunFor(2 * 60 * 1000);
}
@@ -134,7 +134,7 @@ TEST_P(DefaultBweTest, IncreasingJitter1) {
JitterFilter jitter(&uplink_, 0);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
for (int i = 0; i < 2 * 60 * 2; ++i) {
- jitter.SetJitter(i);
+ jitter.SetMaxJitter(i);
RunFor(10 * 1000);
}
RunFor(10 * 60 * 1000);
@@ -147,10 +147,10 @@ TEST_P(DefaultBweTest, IncreasingJitter2) {
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
RunFor(30 * 1000);
for (int i = 1; i < 51; ++i) {
- jitter.SetJitter(10.0f * i);
+ jitter.SetMaxJitter(10.0f * i);
RunFor(10 * 1000);
}
- jitter.SetJitter(0.0f);
+ jitter.SetMaxJitter(0.0f);
RunFor(10 * 60 * 1000);
}
@@ -179,7 +179,7 @@ TEST_P(DefaultBweTest, SteadyChoke) {
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter choke(&uplink_, 0);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
- choke.SetCapacity(140);
+ choke.set_capacity_kbps(140);
RunFor(10 * 60 * 1000);
}
@@ -189,7 +189,7 @@ TEST_P(DefaultBweTest, IncreasingChoke1) {
ChokeFilter choke(&uplink_, 0);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
for (int i = 1200; i >= 100; i -= 100) {
- choke.SetCapacity(i);
+ choke.set_capacity_kbps(i);
RunFor(5000);
}
}
@@ -201,7 +201,7 @@ TEST_P(DefaultBweTest, IncreasingChoke2) {
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
RunFor(60 * 1000);
for (int i = 1200; i >= 100; i -= 20) {
- choke.SetCapacity(i);
+ choke.set_capacity_kbps(i);
RunFor(1000);
}
}
@@ -211,16 +211,16 @@ TEST_P(DefaultBweTest, Multi1) {
VideoSender sender(&uplink_, &source, GetParam());
DelayFilter delay(&uplink_, 0);
ChokeFilter choke(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "");
+ RateCounterFilter counter(&uplink_, 0, "", "");
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
- choke.SetCapacity(1000);
+ choke.set_capacity_kbps(1000);
RunFor(1 * 60 * 1000);
for (int i = 1; i < 51; ++i) {
- delay.SetDelayMs(100.0f * i);
+ delay.SetOneWayDelayMs(100.0f * i);
RunFor(10 * 1000);
}
RunFor(500 * 1000);
- delay.SetDelayMs(0.0f);
+ delay.SetOneWayDelayMs(0.0f);
RunFor(5 * 60 * 1000);
}
@@ -229,10 +229,10 @@ TEST_P(DefaultBweTest, Multi2) {
VideoSender sender(&uplink_, &source, GetParam());
ChokeFilter choke(&uplink_, 0);
JitterFilter jitter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "");
+ RateCounterFilter counter(&uplink_, 0, "", "");
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
- choke.SetCapacity(2000);
- jitter.SetJitter(120);
+ choke.set_capacity_kbps(2000);
+ jitter.SetMaxJitter(120);
RunFor(5 * 60 * 1000);
}
@@ -256,7 +256,7 @@ class BweFeedbackTest
}
private:
- DISALLOW_COPY_AND_ASSIGN(BweFeedbackTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweFeedbackTest);
};
INSTANTIATE_TEST_CASE_P(VideoSendersTest,
@@ -268,11 +268,11 @@ TEST_P(BweFeedbackTest, ConstantCapacity) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
const int kCapacityKbps = 1000;
- filter.SetCapacity(kCapacityKbps);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(kCapacityKbps);
+ filter.set_max_delay_ms(500);
RunFor(180 * 1000);
PrintResults(kCapacityKbps, counter.GetBitrateStats(), 0,
receiver.GetDelayStats(), counter.GetBitrateStats());
@@ -282,16 +282,16 @@ TEST_P(BweFeedbackTest, Choke1000kbps500kbps1000kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
const int kHighCapacityKbps = 1000;
const int kLowCapacityKbps = 500;
- filter.SetCapacity(kHighCapacityKbps);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(kHighCapacityKbps);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
- filter.SetCapacity(kLowCapacityKbps);
+ filter.set_capacity_kbps(kLowCapacityKbps);
RunFor(60 * 1000);
- filter.SetCapacity(kHighCapacityKbps);
+ filter.set_capacity_kbps(kHighCapacityKbps);
RunFor(60 * 1000);
PrintResults((2 * kHighCapacityKbps + kLowCapacityKbps) / 3.0,
counter.GetBitrateStats(), 0, receiver.GetDelayStats(),
@@ -302,16 +302,16 @@ TEST_P(BweFeedbackTest, Choke200kbps30kbps200kbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
PacedVideoSender sender(&uplink_, &source, GetParam());
ChokeFilter filter(&uplink_, 0);
- RateCounterFilter counter(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
const int kHighCapacityKbps = 200;
const int kLowCapacityKbps = 30;
- filter.SetCapacity(kHighCapacityKbps);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(kHighCapacityKbps);
+ filter.set_max_delay_ms(500);
RunFor(60 * 1000);
- filter.SetCapacity(kLowCapacityKbps);
+ filter.set_capacity_kbps(kLowCapacityKbps);
RunFor(60 * 1000);
- filter.SetCapacity(kHighCapacityKbps);
+ filter.set_capacity_kbps(kHighCapacityKbps);
RunFor(60 * 1000);
PrintResults((2 * kHighCapacityKbps + kLowCapacityKbps) / 3.0,
@@ -322,9 +322,10 @@ TEST_P(BweFeedbackTest, Choke200kbps30kbps200kbps) {
TEST_P(BweFeedbackTest, Verizon4gDownlinkTest) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
- RateCounterFilter counter1(&uplink_, 0, "sender_output");
+ RateCounterFilter counter1(&uplink_, 0, "sender_output",
+ bwe_names[GetParam()]);
TraceBasedDeliveryFilter filter(&uplink_, 0, "link_capacity");
- RateCounterFilter counter2(&uplink_, 0, "receiver_input");
+ RateCounterFilter counter2(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
ASSERT_TRUE(filter.Init(test::ResourcePath("verizon4g-downlink", "rx")));
RunFor(22 * 60 * 1000);
@@ -336,10 +337,11 @@ TEST_P(BweFeedbackTest, Verizon4gDownlinkTest) {
TEST_P(BweFeedbackTest, GoogleWifiTrace3Mbps) {
AdaptiveVideoSource source(0, 30, 300, 0, 0);
VideoSender sender(&uplink_, &source, GetParam());
- RateCounterFilter counter1(&uplink_, 0, "sender_output");
+ RateCounterFilter counter1(&uplink_, 0, "sender_output",
+ bwe_names[GetParam()]);
TraceBasedDeliveryFilter filter(&uplink_, 0, "link_capacity");
- filter.SetMaxDelay(500);
- RateCounterFilter counter2(&uplink_, 0, "receiver_input");
+ filter.set_max_delay_ms(500);
+ RateCounterFilter counter2(&uplink_, 0, "Receiver", bwe_names[GetParam()]);
PacketReceiver receiver(&uplink_, 0, GetParam(), false, false);
ASSERT_TRUE(filter.Init(test::ResourcePath("google-wifi-3mbps", "rx")));
RunFor(300 * 1000);
@@ -348,27 +350,84 @@ TEST_P(BweFeedbackTest, GoogleWifiTrace3Mbps) {
}
TEST_P(BweFeedbackTest, PacedSelfFairness50msTest) {
- RunFairnessTest(GetParam(), 4, 0, 300, 3000, 50);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ const int kNumRmcatFlows = 4;
+ int64_t offset_ms[kNumRmcatFlows];
+ for (int i = 0; i < kNumRmcatFlows; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 50, kRttMs,
+ kMaxJitterMs, offset_ms);
}
TEST_P(BweFeedbackTest, PacedSelfFairness500msTest) {
- RunFairnessTest(GetParam(), 4, 0, 300, 3000, 500);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ const int kNumRmcatFlows = 4;
+ int64_t offset_ms[kNumRmcatFlows];
+ for (int i = 0; i < kNumRmcatFlows; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 500, kRttMs,
+ kMaxJitterMs, offset_ms);
}
TEST_P(BweFeedbackTest, PacedSelfFairness1000msTest) {
- RunFairnessTest(GetParam(), 4, 0, 300, 3000, 1000);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ const int kNumRmcatFlows = 4;
+ int64_t offset_ms[kNumRmcatFlows];
+ for (int i = 0; i < kNumRmcatFlows; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), kNumRmcatFlows, 0, 300, 3000, 1000, kRttMs,
+ kMaxJitterMs, offset_ms);
}
TEST_P(BweFeedbackTest, TcpFairness50msTest) {
- RunFairnessTest(GetParam(), 1, 1, 300, 2000, 50);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ int64_t offset_ms[2]; // One TCP, one RMCAT flow.
+ for (int i = 0; i < 2; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), 1, 1, 300, 2000, 50, kRttMs, kMaxJitterMs,
+ offset_ms);
}
TEST_P(BweFeedbackTest, TcpFairness500msTest) {
- RunFairnessTest(GetParam(), 1, 1, 300, 2000, 500);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ int64_t offset_ms[2]; // One TCP, one RMCAT flow.
+ for (int i = 0; i < 2; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), 1, 1, 300, 2000, 500, kRttMs, kMaxJitterMs,
+ offset_ms);
}
TEST_P(BweFeedbackTest, TcpFairness1000msTest) {
- RunFairnessTest(GetParam(), 1, 1, 300, 2000, 1000);
+ int64_t kRttMs = 100;
+ int64_t kMaxJitterMs = 15;
+
+ int64_t offset_ms[2]; // One TCP, one RMCAT flow.
+ for (int i = 0; i < 2; ++i) {
+ offset_ms[i] = std::max(0, 5000 * i + rand() % 2001 - 1000);
+ }
+
+ RunFairnessTest(GetParam(), 1, 1, 300, 2000, 1000, kRttMs, kMaxJitterMs,
+ offset_ms);
}
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
new file mode 100644
index 00000000000..73b7b15fa4a
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+
+namespace webrtc {
+
+// TODO(sprang): Tune these!
+const int RemoteEstimatorProxy::kDefaultProcessIntervalMs = 200;
+const int RemoteEstimatorProxy::kBackWindowMs = 500;
+
+RemoteEstimatorProxy::RemoteEstimatorProxy(Clock* clock,
+ PacketRouter* packet_router)
+ : clock_(clock),
+ packet_router_(packet_router),
+ last_process_time_ms_(-1),
+ media_ssrc_(0),
+ feedback_sequence_(0),
+ window_start_seq_(-1) {}
+
+RemoteEstimatorProxy::~RemoteEstimatorProxy() {}
+
+void RemoteEstimatorProxy::IncomingPacketFeedbackVector(
+ const std::vector<PacketInfo>& packet_feedback_vector) {
+ rtc::CritScope cs(&lock_);
+ for (PacketInfo info : packet_feedback_vector)
+ OnPacketArrival(info.sequence_number, info.arrival_time_ms);
+}
+
+void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header,
+ bool was_paced) {
+ RTC_DCHECK(header.extension.hasTransportSequenceNumber);
+ rtc::CritScope cs(&lock_);
+ media_ssrc_ = header.ssrc;
+ OnPacketArrival(header.extension.transportSequenceNumber, arrival_time_ms);
+}
+
+void RemoteEstimatorProxy::RemoveStream(unsigned int ssrc) {}
+
+bool RemoteEstimatorProxy::LatestEstimate(std::vector<unsigned int>* ssrcs,
+ unsigned int* bitrate_bps) const {
+ return false;
+}
+
+bool RemoteEstimatorProxy::GetStats(
+ ReceiveBandwidthEstimatorStats* output) const {
+ return false;
+}
+
+
+int64_t RemoteEstimatorProxy::TimeUntilNextProcess() {
+ int64_t now = clock_->TimeInMilliseconds();
+ int64_t time_until_next = 0;
+ if (last_process_time_ms_ != -1 &&
+ now - last_process_time_ms_ < kDefaultProcessIntervalMs) {
+ time_until_next = (last_process_time_ms_ + kDefaultProcessIntervalMs - now);
+ }
+ return time_until_next;
+}
+
+int32_t RemoteEstimatorProxy::Process() {
+ // TODO(sprang): Perhaps we need a dedicated thread here instead?
+
+ if (TimeUntilNextProcess() > 0)
+ return 0;
+ last_process_time_ms_ = clock_->TimeInMilliseconds();
+
+ bool more_to_build = true;
+ while (more_to_build) {
+ rtcp::TransportFeedback feedback_packet;
+ if (BuildFeedbackPacket(&feedback_packet)) {
+ RTC_DCHECK(packet_router_ != nullptr);
+ packet_router_->SendFeedback(&feedback_packet);
+ } else {
+ more_to_build = false;
+ }
+ }
+
+ return 0;
+}
+
+void RemoteEstimatorProxy::OnPacketArrival(uint16_t sequence_number,
+ int64_t arrival_time) {
+ int64_t seq = unwrapper_.Unwrap(sequence_number);
+
+ if (window_start_seq_ == -1) {
+ window_start_seq_ = seq;
+ // Start new feedback packet, cull old packets.
+ for (auto it = packet_arrival_times_.begin();
+ it != packet_arrival_times_.end() && it->first < seq &&
+ arrival_time - it->second >= kBackWindowMs;) {
+ auto delete_it = it;
+ ++it;
+ packet_arrival_times_.erase(delete_it);
+ }
+ } else if (seq < window_start_seq_) {
+ window_start_seq_ = seq;
+ }
+
+ RTC_DCHECK(packet_arrival_times_.end() == packet_arrival_times_.find(seq));
+ packet_arrival_times_[seq] = arrival_time;
+}
+
+bool RemoteEstimatorProxy::BuildFeedbackPacket(
+ rtcp::TransportFeedback* feedback_packet) {
+ rtc::CritScope cs(&lock_);
+ if (window_start_seq_ == -1)
+ return false;
+
+ // window_start_seq_ is the first sequence number to include in the current
+ // feedback packet. Some older may still be in the map, in case a reordering
+ // happens and we need to retransmit them.
+ auto it = packet_arrival_times_.find(window_start_seq_);
+ RTC_DCHECK(it != packet_arrival_times_.end());
+
+ // TODO(sprang): Measure receive times in microseconds and remove the
+ // conversions below.
+ feedback_packet->WithMediaSourceSsrc(media_ssrc_);
+ feedback_packet->WithBase(static_cast<uint16_t>(it->first & 0xFFFF),
+ it->second * 1000);
+ feedback_packet->WithFeedbackSequenceNumber(feedback_sequence_++);
+ for (; it != packet_arrival_times_.end(); ++it) {
+ if (!feedback_packet->WithReceivedPacket(
+ static_cast<uint16_t>(it->first & 0xFFFF), it->second * 1000)) {
+ // If we can't even add the first seq to the feedback packet, we won't be
+ // able to build it at all.
+ RTC_CHECK_NE(window_start_seq_, it->first);
+
+ // Could not add timestamp, feedback packet might be full. Return and
+ // try again with a fresh packet.
+ window_start_seq_ = it->first;
+ break;
+ }
+ // Note: Don't erase items from packet_arrival_times_ after sending, in case
+ // they need to be re-sent after a reordering. Removal will be handled
+ // by OnPacketArrival once packets are too old.
+ }
+ if (it == packet_arrival_times_.end())
+ window_start_seq_ = -1;
+
+ return true;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
new file mode 100644
index 00000000000..e867ff77a45
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
+
+#include <map>
+#include <vector>
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+
+namespace webrtc {
+
+class Clock;
+class PacketRouter;
+namespace rtcp {
+class TransportFeedback;
+}
+
+// Class used when send-side BWE is enabled: This proxy is instantiated on the
+// receive side. It buffers a number of receive timestamps and then sends
+// transport feedback messages back too the send side.
+
+class RemoteEstimatorProxy : public RemoteBitrateEstimator {
+ public:
+ RemoteEstimatorProxy(Clock* clock, PacketRouter* packet_router);
+ virtual ~RemoteEstimatorProxy();
+
+ void IncomingPacketFeedbackVector(
+ const std::vector<PacketInfo>& packet_feedback_vector) override;
+ void IncomingPacket(int64_t arrival_time_ms,
+ size_t payload_size,
+ const RTPHeader& header,
+ bool was_paced) override;
+ void RemoveStream(unsigned int ssrc) override;
+ bool LatestEstimate(std::vector<unsigned int>* ssrcs,
+ unsigned int* bitrate_bps) const override;
+ bool GetStats(ReceiveBandwidthEstimatorStats* output) const override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override {}
+ void SetMinBitrate(int min_bitrate_bps) override {}
+ int64_t TimeUntilNextProcess() override;
+ int32_t Process() override;
+
+ static const int kDefaultProcessIntervalMs;
+ static const int kBackWindowMs;
+
+ private:
+ void OnPacketArrival(uint16_t sequence_number, int64_t arrival_time)
+ EXCLUSIVE_LOCKS_REQUIRED(&lock_);
+ bool BuildFeedbackPacket(rtcp::TransportFeedback* feedback_packetket);
+
+ Clock* const clock_;
+ PacketRouter* const packet_router_;
+ int64_t last_process_time_ms_;
+
+ rtc::CriticalSection lock_;
+
+ uint32_t media_ssrc_ GUARDED_BY(&lock_);
+ uint8_t feedback_sequence_ GUARDED_BY(&lock_);
+ SequenceNumberUnwrapper unwrapper_ GUARDED_BY(&lock_);
+ int64_t window_start_seq_ GUARDED_BY(&lock_);
+ // Map unwrapped seq -> time.
+ std::map<int64_t, int64_t> packet_arrival_times_ GUARDED_BY(&lock_);
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
new file mode 100644
index 00000000000..5ebd921e7a5
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/pacing/include/packet_router.h"
+#include "webrtc/modules/remote_bitrate_estimator/remote_estimator_proxy.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+using ::testing::_;
+using ::testing::InSequence;
+using ::testing::Invoke;
+
+namespace webrtc {
+
+class MockPacketRouter : public PacketRouter {
+ public:
+ MOCK_METHOD1(SendFeedback, bool(rtcp::TransportFeedback* packet));
+};
+
+class RemoteEstimatorProxyTest : public ::testing::Test {
+ public:
+ RemoteEstimatorProxyTest() : clock_(0), proxy_(&clock_, &router_) {}
+
+ protected:
+ void IncomingPacket(uint16_t seq, int64_t time_ms) {
+ RTPHeader header;
+ header.extension.hasTransportSequenceNumber = true;
+ header.extension.transportSequenceNumber = seq;
+ header.ssrc = kMediaSsrc;
+ proxy_.IncomingPacket(time_ms, kDefaultPacketSize, header, true);
+ }
+
+ void Process() {
+ clock_.AdvanceTimeMilliseconds(
+ RemoteEstimatorProxy::kDefaultProcessIntervalMs);
+ proxy_.Process();
+ }
+
+ SimulatedClock clock_;
+ MockPacketRouter router_;
+ RemoteEstimatorProxy proxy_;
+
+ const size_t kDefaultPacketSize = 100;
+ const uint32_t kMediaSsrc = 456;
+ const uint16_t kBaseSeq = 10;
+ const int64_t kBaseTimeMs = 123;
+ const int64_t kMaxSmallDeltaMs =
+ (rtcp::TransportFeedback::kDeltaScaleFactor * 0xFF) / 1000;
+};
+
+TEST_F(RemoteEstimatorProxyTest, SendsSinglePacketFeedback) {
+ IncomingPacket(kBaseSeq, kBaseTimeMs);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<rtcp::TransportFeedback::StatusSymbol> status_vec =
+ packet->GetStatusVector();
+ EXPECT_EQ(1u, status_vec.size());
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedSmallDelta,
+ status_vec[0]);
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(1u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs, (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ return true;
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, SendsFeedbackWithVaryingDeltas) {
+ IncomingPacket(kBaseSeq, kBaseTimeMs);
+ IncomingPacket(kBaseSeq + 1, kBaseTimeMs + kMaxSmallDeltaMs);
+ IncomingPacket(kBaseSeq + 2, kBaseTimeMs + (2 * kMaxSmallDeltaMs) + 1);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<rtcp::TransportFeedback::StatusSymbol> status_vec =
+ packet->GetStatusVector();
+ EXPECT_EQ(3u, status_vec.size());
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedSmallDelta,
+ status_vec[0]);
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedSmallDelta,
+ status_vec[1]);
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedLargeDelta,
+ status_vec[2]);
+
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(3u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs, (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ EXPECT_EQ(kMaxSmallDeltaMs, delta_vec[1] / 1000);
+ EXPECT_EQ(kMaxSmallDeltaMs + 1, delta_vec[2] / 1000);
+ return true;
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, SendsFragmentedFeedback) {
+ const int64_t kTooLargeDelta =
+ rtcp::TransportFeedback::kDeltaScaleFactor * (1 << 16);
+
+ IncomingPacket(kBaseSeq, kBaseTimeMs);
+ IncomingPacket(kBaseSeq + 1, kBaseTimeMs + kTooLargeDelta);
+
+ InSequence s;
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([kTooLargeDelta, this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<rtcp::TransportFeedback::StatusSymbol> status_vec =
+ packet->GetStatusVector();
+ EXPECT_EQ(1u, status_vec.size());
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedSmallDelta,
+ status_vec[0]);
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(1u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs, (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ return true;
+ }))
+ .RetiresOnSaturation();
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([kTooLargeDelta, this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq + 1, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<rtcp::TransportFeedback::StatusSymbol> status_vec =
+ packet->GetStatusVector();
+ EXPECT_EQ(1u, status_vec.size());
+ EXPECT_EQ(rtcp::TransportFeedback::StatusSymbol::kReceivedSmallDelta,
+ status_vec[0]);
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(1u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs + kTooLargeDelta,
+ (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ return true;
+ }))
+ .RetiresOnSaturation();
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, ResendsTimestampsOnReordering) {
+ IncomingPacket(kBaseSeq, kBaseTimeMs);
+ IncomingPacket(kBaseSeq + 2, kBaseTimeMs + 2);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(2u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs, (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ EXPECT_EQ(2, delta_vec[1] / 1000);
+ return true;
+ }));
+
+ Process();
+
+ IncomingPacket(kBaseSeq + 1, kBaseTimeMs + 1);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq + 1, packet->GetBaseSequence());
+ EXPECT_EQ(kMediaSsrc, packet->GetMediaSourceSsrc());
+
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(2u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs + 1,
+ (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ EXPECT_EQ(1, delta_vec[1] / 1000);
+ return true;
+ }));
+
+ Process();
+}
+
+TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) {
+ const int64_t kTimeoutTimeMs =
+ kBaseTimeMs + RemoteEstimatorProxy::kBackWindowMs;
+
+ IncomingPacket(kBaseSeq + 2, kBaseTimeMs);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([kTimeoutTimeMs, this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq + 2, packet->GetBaseSequence());
+
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(1u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs, (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ return true;
+ }));
+
+ Process();
+
+ IncomingPacket(kBaseSeq + 3, kTimeoutTimeMs); // kBaseSeq + 2 times out here.
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([kTimeoutTimeMs, this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq + 3, packet->GetBaseSequence());
+
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(1u, delta_vec.size());
+ EXPECT_EQ(kTimeoutTimeMs,
+ (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ return true;
+ }));
+
+ Process();
+
+ // New group, with sequence starting below the first so that they may be
+ // retransmitted.
+ IncomingPacket(kBaseSeq, kBaseTimeMs - 1);
+ IncomingPacket(kBaseSeq + 1, kTimeoutTimeMs - 1);
+
+ EXPECT_CALL(router_, SendFeedback(_))
+ .Times(1)
+ .WillOnce(Invoke([kTimeoutTimeMs, this](rtcp::TransportFeedback* packet) {
+ packet->Build();
+ EXPECT_EQ(kBaseSeq, packet->GetBaseSequence());
+
+ // Four status entries (kBaseSeq + 3 missing).
+ EXPECT_EQ(4u, packet->GetStatusVector().size());
+
+ // Only three actual timestamps.
+ std::vector<int64_t> delta_vec = packet->GetReceiveDeltasUs();
+ EXPECT_EQ(3u, delta_vec.size());
+ EXPECT_EQ(kBaseTimeMs - 1,
+ (packet->GetBaseTimeUs() + delta_vec[0]) / 1000);
+ EXPECT_EQ(kTimeoutTimeMs - kBaseTimeMs, delta_vec[1] / 1000);
+ EXPECT_EQ(1, delta_vec[2] / 1000);
+ return true;
+ }));
+
+ Process();
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history.cc
index 7e0c89e73d1..fa51daddb6d 100644
--- a/chromium/third_party/webrtc/modules/bitrate_controller/send_time_history.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history.cc
@@ -10,7 +10,7 @@
#include <assert.h>
-#include "webrtc/modules/bitrate_controller/send_time_history.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/send_time_history.h"
namespace webrtc {
@@ -25,14 +25,23 @@ void SendTimeHistory::Clear() {
history_.clear();
}
-void SendTimeHistory::AddAndRemoveOldSendTimes(uint16_t sequence_number,
- int64_t timestamp) {
- EraseOld(timestamp - packet_age_limit_);
+void SendTimeHistory::AddAndRemoveOld(const PacketInfo& packet) {
+ EraseOld(packet.send_time_ms - packet_age_limit_);
if (history_.empty())
- oldest_sequence_number_ = sequence_number;
+ oldest_sequence_number_ = packet.sequence_number;
- history_[sequence_number] = timestamp;
+ history_.insert(
+ std::pair<uint16_t, PacketInfo>(packet.sequence_number, packet));
+}
+
+bool SendTimeHistory::UpdateSendTime(uint16_t sequence_number,
+ int64_t send_time_ms) {
+ auto it = history_.find(sequence_number);
+ if (it == history_.end())
+ return false;
+ it->second.send_time_ms = send_time_ms;
+ return true;
}
void SendTimeHistory::EraseOld(int64_t limit) {
@@ -40,7 +49,7 @@ void SendTimeHistory::EraseOld(int64_t limit) {
auto it = history_.find(oldest_sequence_number_);
assert(it != history_.end());
- if (it->second > limit)
+ if (it->second.send_time_ms > limit)
return; // Oldest packet within age limit, return.
// TODO(sprang): Warn if erasing (too many) old items?
@@ -68,16 +77,16 @@ void SendTimeHistory::UpdateOldestSequenceNumber() {
oldest_sequence_number_ = it->first;
}
-bool SendTimeHistory::GetSendTime(uint16_t sequence_number,
- int64_t* timestamp,
- bool remove) {
- auto it = history_.find(sequence_number);
+bool SendTimeHistory::GetInfo(PacketInfo* packet, bool remove) {
+ auto it = history_.find(packet->sequence_number);
if (it == history_.end())
return false;
- *timestamp = it->second;
+ int64_t receive_time = packet->arrival_time_ms;
+ *packet = it->second;
+ packet->arrival_time_ms = receive_time;
if (remove) {
history_.erase(it);
- if (sequence_number == oldest_sequence_number_)
+ if (packet->sequence_number == oldest_sequence_number_)
UpdateOldestSequenceNumber();
}
return true;
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history_unittest.cc
new file mode 100644
index 00000000000..e3d2c776195
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/send_time_history_unittest.cc
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/send_time_history.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+namespace webrtc {
+namespace test {
+
+static const int kDefaultHistoryLengthMs = 1000;
+
+class SendTimeHistoryTest : public ::testing::Test {
+ protected:
+ SendTimeHistoryTest() : history_(kDefaultHistoryLengthMs), clock_(0) {}
+ ~SendTimeHistoryTest() {}
+
+ virtual void SetUp() {}
+
+ virtual void TearDown() {}
+
+ SendTimeHistory history_;
+ webrtc::SimulatedClock clock_;
+};
+
+// Help class extended so we can do EXPECT_EQ and collections.
+class PacketInfo : public webrtc::PacketInfo {
+ public:
+ PacketInfo() : webrtc::PacketInfo(0, 0, 0, 0, false) {}
+ PacketInfo(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ uint16_t sequence_number,
+ size_t payload_size,
+ bool was_paced)
+ : webrtc::PacketInfo(arrival_time_ms,
+ send_time_ms,
+ sequence_number,
+ payload_size,
+ was_paced) {}
+ bool operator==(const PacketInfo& other) const {
+ return arrival_time_ms == other.arrival_time_ms &&
+ send_time_ms == other.send_time_ms &&
+ sequence_number == other.sequence_number &&
+ payload_size == other.payload_size && was_paced == other.was_paced;
+ }
+};
+
+TEST_F(SendTimeHistoryTest, AddRemoveOne) {
+ const uint16_t kSeqNo = 10;
+ const PacketInfo kSentPacket = {0, 1, kSeqNo, 1, true};
+ history_.AddAndRemoveOld(kSentPacket);
+
+ PacketInfo received_packet = {0, 0, kSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&received_packet, false));
+ EXPECT_EQ(kSentPacket, received_packet);
+
+ received_packet = {0, 0, kSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&received_packet, true));
+ EXPECT_EQ(kSentPacket, received_packet);
+
+ received_packet = {0, 0, kSeqNo, 0, false};
+ EXPECT_FALSE(history_.GetInfo(&received_packet, true));
+}
+
+TEST_F(SendTimeHistoryTest, UpdateSendTime) {
+ const uint16_t kSeqNo = 10;
+ const int64_t kSendTime = 1000;
+ const int64_t kSendTimeUpdated = 2000;
+ const PacketInfo kSentPacket = {0, kSendTime, kSeqNo, 1, true};
+ const PacketInfo kUpdatedPacket = {0, kSendTimeUpdated, kSeqNo, 1, true};
+
+ history_.AddAndRemoveOld(kSentPacket);
+ PacketInfo info = {0, 0, kSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ EXPECT_EQ(kSentPacket, info);
+
+ EXPECT_TRUE(history_.UpdateSendTime(kSeqNo, kSendTimeUpdated));
+
+ info = {0, 0, kSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+ EXPECT_EQ(kUpdatedPacket, info);
+
+ EXPECT_FALSE(history_.UpdateSendTime(kSeqNo, kSendTimeUpdated));
+}
+
+TEST_F(SendTimeHistoryTest, PopulatesExpectedFields) {
+ const uint16_t kSeqNo = 10;
+ const int64_t kSendTime = 1000;
+ const int64_t kReceiveTime = 2000;
+ const size_t kPayloadSize = 42;
+ const bool kPaced = true;
+ const PacketInfo kSentPacket = {0, kSendTime, kSeqNo, kPayloadSize, kPaced};
+
+ history_.AddAndRemoveOld(kSentPacket);
+
+ PacketInfo info = {kReceiveTime, 0, kSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+ EXPECT_EQ(kReceiveTime, info.arrival_time_ms);
+ EXPECT_EQ(kSendTime, info.send_time_ms);
+ EXPECT_EQ(kSeqNo, info.sequence_number);
+ EXPECT_EQ(kPayloadSize, info.payload_size);
+ EXPECT_EQ(kPaced, info.was_paced);
+}
+
+TEST_F(SendTimeHistoryTest, AddThenRemoveOutOfOrder) {
+ std::vector<PacketInfo> sent_packets;
+ std::vector<PacketInfo> received_packets;
+ const size_t num_items = 100;
+ const size_t kPacketSize = 400;
+ const size_t kTransmissionTime = 1234;
+ const bool kPaced = true;
+ for (size_t i = 0; i < num_items; ++i) {
+ sent_packets.push_back(PacketInfo(0, static_cast<int64_t>(i),
+ static_cast<uint16_t>(i), kPacketSize,
+ kPaced));
+ received_packets.push_back(
+ PacketInfo(static_cast<int64_t>(i) + kTransmissionTime, 0,
+ static_cast<uint16_t>(i), kPacketSize, false));
+ }
+ for (size_t i = 0; i < num_items; ++i)
+ history_.AddAndRemoveOld(sent_packets[i]);
+ std::random_shuffle(received_packets.begin(), received_packets.end());
+ for (size_t i = 0; i < num_items; ++i) {
+ PacketInfo packet = received_packets[i];
+ EXPECT_TRUE(history_.GetInfo(&packet, false));
+ PacketInfo sent_packet = sent_packets[packet.sequence_number];
+ sent_packet.arrival_time_ms = packet.arrival_time_ms;
+ EXPECT_EQ(sent_packet, packet);
+ EXPECT_TRUE(history_.GetInfo(&packet, true));
+ }
+ for (PacketInfo packet : sent_packets)
+ EXPECT_FALSE(history_.GetInfo(&packet, false));
+}
+
+TEST_F(SendTimeHistoryTest, HistorySize) {
+ const int kItems = kDefaultHistoryLengthMs / 100;
+ for (int i = 0; i < kItems; ++i)
+ history_.AddAndRemoveOld(PacketInfo(0, i * 100, i, 0, false));
+ for (int i = 0; i < kItems; ++i) {
+ PacketInfo info = {0, 0, static_cast<uint16_t>(i), 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ EXPECT_EQ(i * 100, info.send_time_ms);
+ }
+ history_.AddAndRemoveOld(PacketInfo(0, kItems * 100, kItems, 0, false));
+ PacketInfo info = {0, 0, 0, 0, false};
+ EXPECT_FALSE(history_.GetInfo(&info, false));
+ for (int i = 1; i < (kItems + 1); ++i) {
+ info = {0, 0, static_cast<uint16_t>(i), 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ EXPECT_EQ(i * 100, info.send_time_ms);
+ }
+}
+
+TEST_F(SendTimeHistoryTest, HistorySizeWithWraparound) {
+ const uint16_t kMaxSeqNo = std::numeric_limits<uint16_t>::max();
+ history_.AddAndRemoveOld(PacketInfo(0, 0, kMaxSeqNo - 2, 0, false));
+ history_.AddAndRemoveOld(PacketInfo(0, 100, kMaxSeqNo - 1, 0, false));
+ history_.AddAndRemoveOld(PacketInfo(0, 200, kMaxSeqNo, 0, false));
+ history_.AddAndRemoveOld(PacketInfo(0, kDefaultHistoryLengthMs, 0, 0, false));
+ PacketInfo info = {0, 0, static_cast<uint16_t>(kMaxSeqNo - 2), 0, false};
+ EXPECT_FALSE(history_.GetInfo(&info, false));
+ info = {0, 0, static_cast<uint16_t>(kMaxSeqNo - 1), 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ info = {0, 0, static_cast<uint16_t>(kMaxSeqNo), 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ info = {0, 0, 0, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+
+ // Create a gap (kMaxSeqNo - 1) -> 0.
+ info = {0, 0, kMaxSeqNo, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+
+ history_.AddAndRemoveOld(PacketInfo(0, 1100, 1, 0, false));
+
+ info = {0, 0, static_cast<uint16_t>(kMaxSeqNo - 2), 0, false};
+ EXPECT_FALSE(history_.GetInfo(&info, false));
+ info = {0, 0, static_cast<uint16_t>(kMaxSeqNo - 1), 0, false};
+ EXPECT_FALSE(history_.GetInfo(&info, false));
+ info = {0, 0, kMaxSeqNo, 0, false};
+ EXPECT_FALSE(history_.GetInfo(&info, false));
+ info = {0, 0, 0, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+ info = {0, 0, 1, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, false));
+}
+
+TEST_F(SendTimeHistoryTest, InterlievedGetAndRemove) {
+ const uint16_t kSeqNo = 1;
+ const int64_t kTimestamp = 2;
+ PacketInfo packets[3] = {{0, kTimestamp, kSeqNo, 0, false},
+ {0, kTimestamp + 1, kSeqNo + 1, 0, false},
+ {0, kTimestamp + 2, kSeqNo + 2, 0, false}};
+
+ history_.AddAndRemoveOld(packets[0]);
+ history_.AddAndRemoveOld(packets[1]);
+
+ PacketInfo info = {0, 0, packets[0].sequence_number, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+ EXPECT_EQ(packets[0], info);
+
+ history_.AddAndRemoveOld(packets[2]);
+
+ info = {0, 0, packets[1].sequence_number, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+ EXPECT_EQ(packets[1], info);
+
+ info = {0, 0, packets[2].sequence_number, 0, false};
+ EXPECT_TRUE(history_.GetInfo(&info, true));
+ EXPECT_EQ(packets[2], info);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.cc
index 6e8dbc7c068..c667b6864e4 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.cc
@@ -29,7 +29,31 @@ namespace bwe {
const int kSetCapacity = 1000;
BweReceiver::BweReceiver(int flow_id)
- : flow_id_(flow_id), received_packets_(kSetCapacity) {
+ : flow_id_(flow_id),
+ received_packets_(kSetCapacity),
+ rate_counter_(),
+ loss_account_() {
+}
+
+BweReceiver::BweReceiver(int flow_id, int64_t window_size_ms)
+ : flow_id_(flow_id),
+ received_packets_(kSetCapacity),
+ rate_counter_(window_size_ms),
+ loss_account_() {
+}
+
+void BweReceiver::ReceivePacket(int64_t arrival_time_ms,
+ const MediaPacket& media_packet) {
+ if (received_packets_.size() == kSetCapacity) {
+ RelieveSetAndUpdateLoss();
+ }
+
+ received_packets_.Insert(media_packet.sequence_number(),
+ media_packet.send_time_ms(), arrival_time_ms,
+ media_packet.payload_size());
+
+ rate_counter_.UpdateRates(media_packet.send_time_ms() * 1000,
+ static_cast<uint32_t>(media_packet.payload_size()));
}
class NullBweSender : public BweSender {
@@ -46,7 +70,7 @@ class NullBweSender : public BweSender {
int Process() override { return 0; }
private:
- DISALLOW_COPY_AND_ASSIGN(NullBweSender);
+ RTC_DISALLOW_COPY_AND_ASSIGN(NullBweSender);
};
int64_t GetAbsSendTimeInMs(uint32_t abs_send_time) {
@@ -97,24 +121,54 @@ BweReceiver* CreateBweReceiver(BandwidthEstimatorType type,
return NULL;
}
-float BweReceiver::GlobalPacketLossRatio() {
- if (received_packets_.empty()) {
- return 0.0f;
+// Take into account all LinkedSet content.
+void BweReceiver::UpdateLoss() {
+ loss_account_.Add(LinkedSetPacketLossRatio());
+}
+
+// Preserve 10% latest packets and update packet loss based on the oldest
+// 90%, that will be removed.
+void BweReceiver::RelieveSetAndUpdateLoss() {
+ // Compute Loss for the whole LinkedSet and updates loss_account_.
+ UpdateLoss();
+
+ size_t num_preserved_elements = received_packets_.size() / 10;
+ PacketNodeIt it = received_packets_.begin();
+ std::advance(it, num_preserved_elements);
+
+ while (it != received_packets_.end()) {
+ received_packets_.Erase(it++);
}
- // Possibly there are packets missing.
- const uint16_t kMaxGap = 1.5 * kSetCapacity;
- uint16_t min = received_packets_.find_min();
- uint16_t max = received_packets_.find_max();
-
- int gap;
- if (max - min < kMaxGap) {
- gap = max - min + 1;
- } else { // There was an overflow.
- max = received_packets_.upper_bound(kMaxGap);
- min = received_packets_.lower_bound(0xFFFF - kMaxGap);
- gap = max + (0xFFFF - min) + 2;
+
+ // Compute Loss for the preserved elements
+ loss_account_.Subtract(LinkedSetPacketLossRatio());
+}
+
+float BweReceiver::GlobalReceiverPacketLossRatio() {
+ UpdateLoss();
+ return loss_account_.LossRatio();
+}
+
+// This function considers at most kSetCapacity = 1000 packets.
+LossAccount BweReceiver::LinkedSetPacketLossRatio() {
+ if (received_packets_.empty()) {
+ return LossAccount();
}
- return static_cast<float>(received_packets_.size()) / gap;
+
+ uint16_t oldest_seq_num = received_packets_.OldestSeqNumber();
+ uint16_t newest_seq_num = received_packets_.NewestSeqNumber();
+
+ size_t set_total_packets =
+ static_cast<uint16_t>(newest_seq_num - oldest_seq_num + 1);
+
+ size_t set_received_packets = received_packets_.size();
+ size_t set_lost_packets = set_total_packets - set_received_packets;
+
+ return LossAccount(set_total_packets, set_lost_packets);
+}
+
+uint32_t BweReceiver::RecentKbps() const {
+ return (rate_counter_.bits_per_second() + 500) / 1000;
}
// Go through a fixed time window of most recent packets received and
@@ -133,35 +187,40 @@ float BweReceiver::RecentPacketLossRatio() {
// Lowest timestamp limit, oldest one that should be checked.
int64_t time_limit_ms = (*node_it)->arrival_time_ms - kPacketLossTimeWindowMs;
// Oldest and newest values found within the given time window.
- uint16_t oldest_seq_nb = (*node_it)->sequence_number;
- uint16_t newest_seq_nb = oldest_seq_nb;
+ uint16_t oldest_seq_num = (*node_it)->sequence_number;
+ uint16_t newest_seq_num = oldest_seq_num;
while (node_it != received_packets_.end()) {
if ((*node_it)->arrival_time_ms < time_limit_ms) {
break;
}
- uint16_t seq_nb = (*node_it)->sequence_number;
- if (IsNewerSequenceNumber(seq_nb, newest_seq_nb)) {
- newest_seq_nb = seq_nb;
+ uint16_t seq_num = (*node_it)->sequence_number;
+ if (IsNewerSequenceNumber(seq_num, newest_seq_num)) {
+ newest_seq_num = seq_num;
}
- if (IsNewerSequenceNumber(oldest_seq_nb, seq_nb)) {
- oldest_seq_nb = seq_nb;
+ if (IsNewerSequenceNumber(oldest_seq_num, seq_num)) {
+ oldest_seq_num = seq_num;
}
++node_it;
++number_packets_received;
}
// Interval width between oldest and newest sequence number.
- // There was an overflow if newest_seq_nb < oldest_seq_nb.
- int gap = static_cast<uint16_t>(newest_seq_nb - oldest_seq_nb + 1);
+ // There was an overflow if newest_seq_num < oldest_seq_num.
+ int gap = static_cast<uint16_t>(newest_seq_num - oldest_seq_num + 1);
return static_cast<float>(gap - number_packets_received) / gap;
}
+LinkedSet::~LinkedSet() {
+ while (!empty())
+ RemoveTail();
+}
+
void LinkedSet::Insert(uint16_t sequence_number,
int64_t send_time_ms,
int64_t arrival_time_ms,
size_t payload_size) {
- std::map<uint16_t, PacketNodeIt>::iterator it = map_.find(sequence_number);
+ auto it = map_.find(sequence_number);
if (it != map_.end()) {
PacketNodeIt node_it = it->second;
PacketIdentifierNode* node = *node_it;
@@ -179,8 +238,15 @@ void LinkedSet::Insert(uint16_t sequence_number,
arrival_time_ms, payload_size));
}
}
+
+void LinkedSet::Insert(PacketIdentifierNode packet_identifier) {
+ Insert(packet_identifier.sequence_number, packet_identifier.send_time_ms,
+ packet_identifier.arrival_time_ms, packet_identifier.payload_size);
+}
+
void LinkedSet::RemoveTail() {
map_.erase(list_.back()->sequence_number);
+ delete list_.back();
list_.pop_back();
}
void LinkedSet::UpdateHead(PacketIdentifierNode* new_head) {
@@ -188,6 +254,27 @@ void LinkedSet::UpdateHead(PacketIdentifierNode* new_head) {
map_[new_head->sequence_number] = list_.begin();
}
+void LinkedSet::Erase(PacketNodeIt node_it) {
+ map_.erase((*node_it)->sequence_number);
+ delete (*node_it);
+ list_.erase(node_it);
+}
+
+void LossAccount::Add(LossAccount rhs) {
+ num_total += rhs.num_total;
+ num_lost += rhs.num_lost;
+}
+void LossAccount::Subtract(LossAccount rhs) {
+ num_total -= rhs.num_total;
+ num_lost -= rhs.num_lost;
+}
+
+float LossAccount::LossRatio() {
+ if (num_total == 0)
+ return 0.0f;
+ return static_cast<float>(num_lost) / num_total;
+}
+
} // namespace bwe
} // namespace testing
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.h
index 6aa79cac2a8..ef9b3149d77 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe.h
@@ -13,13 +13,35 @@
#include <sstream>
+#include "webrtc/test/testsupport/gtest_prod_util.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
+#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
namespace webrtc {
namespace testing {
namespace bwe {
+// Overload map comparator.
+class SequenceNumberOlderThan {
+ public:
+ bool operator()(uint16_t seq_num_1, uint16_t seq_num_2) const {
+ return IsNewerSequenceNumber(seq_num_2, seq_num_1);
+ }
+};
+
+// Holds information for computing global packet loss.
+struct LossAccount {
+ LossAccount() : num_total(0), num_lost(0) {}
+ LossAccount(size_t num_total, size_t num_lost)
+ : num_total(num_total), num_lost(num_lost) {}
+ void Add(LossAccount rhs);
+ void Subtract(LossAccount rhs);
+ float LossRatio();
+ size_t num_total;
+ size_t num_lost;
+};
+
// Holds only essential information about packets to be saved for
// further use, e.g. for calculating packet loss and receiving rate.
struct PacketIdentifierNode {
@@ -46,6 +68,7 @@ typedef std::list<PacketIdentifierNode*>::iterator PacketNodeIt;
class LinkedSet {
public:
explicit LinkedSet(int capacity) : capacity_(capacity) {}
+ ~LinkedSet();
// If the arriving packet (identified by its sequence number) is already
// in the LinkedSet, move its Node to the head of the list. Else, create
@@ -56,19 +79,21 @@ class LinkedSet {
int64_t arrival_time_ms,
size_t payload_size);
+ void Insert(PacketIdentifierNode packet_identifier);
+
PacketNodeIt begin() { return list_.begin(); }
PacketNodeIt end() { return list_.end(); }
- bool empty() { return list_.empty(); }
- size_t size() { return list_.size(); }
- // Gets the latest arrived sequence number.
- uint16_t find_max() { return map_.rbegin()->first; }
- // Gets the first arrived sequence number still saved in the LinkedSet.
- uint16_t find_min() { return map_.begin()->first; }
- // Gets the lowest saved sequence number that is >= than the input key.
- uint16_t lower_bound(uint16_t key) { return map_.lower_bound(key)->first; }
- // Gets the highest saved sequence number that is <= than the input key.
- uint16_t upper_bound(uint16_t key) { return map_.upper_bound(key)->first; }
- size_t capacity() { return capacity_; }
+
+ bool empty() const { return list_.empty(); }
+ size_t size() const { return list_.size(); }
+ size_t capacity() const { return capacity_; }
+
+ uint16_t OldestSeqNumber() const { return empty() ? 0 : map_.begin()->first; }
+ uint16_t NewestSeqNumber() const {
+ return empty() ? 0 : map_.rbegin()->first;
+ }
+
+ void Erase(PacketNodeIt node_it);
private:
// Pop oldest element from the back of the list and remove it from the map.
@@ -76,45 +101,71 @@ class LinkedSet {
// Add new element to the front of the list and insert it in the map.
void UpdateHead(PacketIdentifierNode* new_head);
size_t capacity_;
- std::map<uint16_t, PacketNodeIt> map_;
+ std::map<uint16_t, PacketNodeIt, SequenceNumberOlderThan> map_;
std::list<PacketIdentifierNode*> list_;
};
-const int kMinBitrateKbps = 20;
-const int kMaxBitrateKbps = 3000;
+const int kMinBitrateKbps = 50;
+const int kMaxBitrateKbps = 2500;
class BweSender : public Module {
public:
BweSender() {}
+ explicit BweSender(int bitrate_kbps) : bitrate_kbps_(bitrate_kbps) {}
virtual ~BweSender() {}
virtual int GetFeedbackIntervalMs() const = 0;
virtual void GiveFeedback(const FeedbackPacket& feedback) = 0;
virtual void OnPacketsSent(const Packets& packets) = 0;
+ protected:
+ int bitrate_kbps_;
+
private:
- DISALLOW_COPY_AND_ASSIGN(BweSender);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweSender);
};
class BweReceiver {
public:
explicit BweReceiver(int flow_id);
+ BweReceiver(int flow_id, int64_t window_size_ms);
+
virtual ~BweReceiver() {}
virtual void ReceivePacket(int64_t arrival_time_ms,
- const MediaPacket& media_packet) {}
+ const MediaPacket& media_packet);
virtual FeedbackPacket* GetFeedback(int64_t now_ms) { return NULL; }
- float GlobalPacketLossRatio();
- float RecentPacketLossRatio();
size_t GetSetCapacity() { return received_packets_.capacity(); }
+ double BitrateWindowS() const { return rate_counter_.BitrateWindowS(); }
+ uint32_t RecentKbps() const; // Receiving Rate.
+
+ // Computes packet loss during an entire simulation, up to 4 billion packets.
+ float GlobalReceiverPacketLossRatio(); // Plot histogram.
+ float RecentPacketLossRatio(); // Plot dynamics.
static const int64_t kPacketLossTimeWindowMs = 500;
+ static const int64_t kReceivingRateTimeWindowMs = 1000;
protected:
int flow_id_;
// Deals with packets sent more than once.
LinkedSet received_packets_;
+ // Used for calculating recent receiving rate.
+ RateCounter rate_counter_;
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(BweReceiverTest, RecentKbps);
+ FRIEND_TEST_ALL_PREFIXES(BweReceiverTest, Loss);
+
+ void UpdateLoss();
+ void RelieveSetAndUpdateLoss();
+ // Packet loss for packets stored in the LinkedSet, up to 1000 packets.
+ // Used to update global loss account whenever the set is filled and cleared.
+ LossAccount LinkedSetPacketLossRatio();
+
+ // Used for calculating global packet loss ratio.
+ LossAccount loss_account_;
};
enum BandwidthEstimatorType {
@@ -125,6 +176,8 @@ enum BandwidthEstimatorType {
kTcpEstimator
};
+const std::string bwe_names[] = {"Null", "NADA", "REMB", "GCC", "TCP"};
+
int64_t GetAbsSendTimeInMs(uint32_t abs_send_time);
BweSender* CreateBweSender(BandwidthEstimatorType estimator,
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
index a2ce34084e3..72283b98c76 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.cc
@@ -10,10 +10,13 @@
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test.h"
+#include <sstream>
+
#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
+#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet_sender.h"
#include "webrtc/system_wrappers/interface/clock.h"
@@ -48,7 +51,7 @@ void PacketProcessorRunner::RunFor(int64_t time_ms,
processor_->RunFor(time_ms, &to_process);
QueuePackets(&to_process, time_now_ms * 1000);
if (!to_process.empty()) {
- processor_->Plot((to_process.back()->send_time_us() + 500) / 1000);
+ processor_->Plot(to_process.back()->send_time_ms());
}
in_out->merge(to_process, DereferencingComparator<Packet>);
}
@@ -89,8 +92,15 @@ void PacketProcessorRunner::QueuePackets(Packets* batch,
batch->merge(to_transfer, DereferencingComparator<Packet>);
}
-BweTest::BweTest()
- : run_time_ms_(0), time_now_ms_(-1), simulation_interval_ms_(-1) {
+// Plot link capacity by default.
+BweTest::BweTest() : BweTest(true) {
+}
+
+BweTest::BweTest(bool plot_capacity)
+ : run_time_ms_(0),
+ time_now_ms_(-1),
+ simulation_interval_ms_(-1),
+ plot_total_available_capacity_(plot_capacity) {
links_.push_back(&uplink_);
links_.push_back(&downlink_);
}
@@ -234,8 +244,27 @@ void BweTest::RunFairnessTest(BandwidthEstimatorType bwe_type,
size_t num_media_flows,
size_t num_tcp_flows,
int64_t run_time_seconds,
- int capacity_kbps,
- int max_delay_ms) {
+ uint32_t capacity_kbps,
+ int64_t max_delay_ms,
+ int64_t rtt_ms,
+ int64_t max_jitter_ms,
+ const int64_t* offsets_ms) {
+ RunFairnessTest(bwe_type, num_media_flows, num_tcp_flows, run_time_seconds,
+ capacity_kbps, max_delay_ms, rtt_ms, max_jitter_ms,
+ offsets_ms, "Fairness_test", bwe_names[bwe_type]);
+}
+
+void BweTest::RunFairnessTest(BandwidthEstimatorType bwe_type,
+ size_t num_media_flows,
+ size_t num_tcp_flows,
+ int64_t run_time_seconds,
+ uint32_t capacity_kbps,
+ int64_t max_delay_ms,
+ int64_t rtt_ms,
+ int64_t max_jitter_ms,
+ const int64_t* offsets_ms,
+ const std::string& title,
+ const std::string& flow_name) {
std::set<int> all_flow_ids;
std::set<int> media_flow_ids;
std::set<int> tcp_flow_ids;
@@ -253,50 +282,76 @@ void BweTest::RunFairnessTest(BandwidthEstimatorType bwe_type,
std::vector<VideoSource*> sources;
std::vector<PacketSender*> senders;
+ std::vector<MetricRecorder*> metric_recorders;
+
+ int64_t max_offset_ms = 0;
- size_t i = 1;
for (int media_flow : media_flow_ids) {
- // Streams started 20 seconds apart to give them different advantage when
- // competing for the bandwidth.
- const int64_t kFlowStartOffsetMs = i++ * (rand() % 10000);
sources.push_back(new AdaptiveVideoSource(media_flow, 30, 300, 0,
- kFlowStartOffsetMs));
+ offsets_ms[media_flow]));
senders.push_back(new PacedVideoSender(&uplink_, sources.back(), bwe_type));
+ max_offset_ms = std::max(max_offset_ms, offsets_ms[media_flow]);
}
- const int64_t kTcpStartOffsetMs = 5000;
- for (int tcp_flow : tcp_flow_ids)
- senders.push_back(new TcpSender(&uplink_, tcp_flow, kTcpStartOffsetMs));
+ for (int tcp_flow : tcp_flow_ids) {
+ senders.push_back(new TcpSender(&uplink_, tcp_flow, offsets_ms[tcp_flow]));
+ max_offset_ms = std::max(max_offset_ms, offsets_ms[tcp_flow]);
+ }
ChokeFilter choke(&uplink_, all_flow_ids);
- choke.SetCapacity(capacity_kbps);
- choke.SetMaxDelay(max_delay_ms);
+ choke.set_capacity_kbps(capacity_kbps);
+ choke.set_max_delay_ms(max_delay_ms);
+ LinkShare link_share(&choke);
+ int64_t one_way_delay_ms = rtt_ms / 2;
DelayFilter delay_uplink(&uplink_, all_flow_ids);
- delay_uplink.SetDelayMs(25);
+ delay_uplink.SetOneWayDelayMs(one_way_delay_ms);
+
+ JitterFilter jitter(&uplink_, all_flow_ids);
+ jitter.SetMaxJitter(max_jitter_ms);
std::vector<RateCounterFilter*> rate_counters;
- for (int flow : all_flow_ids) {
+ for (int flow : media_flow_ids) {
rate_counters.push_back(
- new RateCounterFilter(&uplink_, flow, "receiver_input"));
+ new RateCounterFilter(&uplink_, flow, "Receiver", bwe_names[bwe_type]));
+ }
+ for (int flow : tcp_flow_ids) {
+ rate_counters.push_back(new RateCounterFilter(&uplink_, flow, "Receiver",
+ bwe_names[kTcpEstimator]));
}
- RateCounterFilter total_utilization(&uplink_, all_flow_ids,
- "total_utilization");
+ RateCounterFilter total_utilization(
+ &uplink_, all_flow_ids, "total_utilization", "Total_link_utilization");
std::vector<PacketReceiver*> receivers;
- i = 0;
+ // Delays is being plotted only for the first flow.
+ // To plot all of them, replace "i == 0" with "true" on new PacketReceiver().
for (int media_flow : media_flow_ids) {
- receivers.push_back(
- new PacketReceiver(&uplink_, media_flow, bwe_type, i++ == 0, false));
+ metric_recorders.push_back(
+ new MetricRecorder(bwe_names[bwe_type], static_cast<int>(media_flow),
+ senders[media_flow], &link_share));
+ receivers.push_back(new PacketReceiver(&uplink_, media_flow, bwe_type,
+ media_flow == 0, false,
+ metric_recorders[media_flow]));
+ metric_recorders[media_flow]->set_plot_available_capacity(
+ media_flow == 0 && plot_total_available_capacity_);
+ metric_recorders[media_flow]->set_start_computing_metrics_ms(max_offset_ms);
}
+ // Delays is not being plotted only for TCP flows. To plot all of them,
+ // replace first "false" occurence with "true" on new PacketReceiver().
for (int tcp_flow : tcp_flow_ids) {
- receivers.push_back(
- new PacketReceiver(&uplink_, tcp_flow, kTcpEstimator, false, false));
+ metric_recorders.push_back(
+ new MetricRecorder(bwe_names[kTcpEstimator], static_cast<int>(tcp_flow),
+ senders[tcp_flow], &link_share));
+ receivers.push_back(new PacketReceiver(&uplink_, tcp_flow, kTcpEstimator,
+ false, false,
+ metric_recorders[tcp_flow]));
+ metric_recorders[tcp_flow]->set_plot_available_capacity(
+ tcp_flow == 0 && plot_total_available_capacity_);
}
DelayFilter delay_downlink(&downlink_, all_flow_ids);
- delay_downlink.SetDelayMs(25);
+ delay_downlink.SetOneWayDelayMs(one_way_delay_ms);
RunFor(run_time_seconds * 1000);
@@ -315,6 +370,23 @@ void BweTest::RunFairnessTest(BandwidthEstimatorType bwe_type,
PrintResults(capacity_kbps, total_utilization.GetBitrateStats(),
flow_delay_ms, flow_throughput_kbps);
+ for (int i : all_flow_ids) {
+ metric_recorders[i]->PlotThroughputHistogram(
+ title, flow_name, static_cast<int>(num_media_flows), 0);
+
+ metric_recorders[i]->PlotLossHistogram(title, flow_name,
+ static_cast<int>(num_media_flows),
+ receivers[i]->GlobalPacketLoss());
+ }
+
+ // Pointless to show delay histogram for TCP flow.
+ for (int i : media_flow_ids) {
+ metric_recorders[i]->PlotDelayHistogram(title, bwe_names[bwe_type],
+ static_cast<int>(num_media_flows),
+ one_way_delay_ms);
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], one_way_delay_ms, i);
+ }
+
for (VideoSource* source : sources)
delete source;
for (PacketSender* sender : senders)
@@ -323,7 +395,589 @@ void BweTest::RunFairnessTest(BandwidthEstimatorType bwe_type,
delete rate_counter;
for (PacketReceiver* receiver : receivers)
delete receiver;
+ for (MetricRecorder* recorder : metric_recorders)
+ delete recorder;
+}
+
+void BweTest::RunChoke(BandwidthEstimatorType bwe_type,
+ std::vector<int> capacities_kbps) {
+ int flow_id = bwe_type;
+ AdaptiveVideoSource source(flow_id, 30, 300, 0, 0);
+ VideoSender sender(&uplink_, &source, bwe_type);
+ ChokeFilter choke(&uplink_, flow_id);
+ LinkShare link_share(&choke);
+ MetricRecorder metric_recorder(bwe_names[bwe_type], flow_id, &sender,
+ &link_share);
+ PacketReceiver receiver(&uplink_, flow_id, bwe_type, true, false,
+ &metric_recorder);
+ metric_recorder.set_plot_available_capacity(plot_total_available_capacity_);
+
+ choke.set_max_delay_ms(500);
+ const int64_t kRunTimeMs = 60 * 1000;
+
+ std::stringstream title("Choke");
+ char delimiter = '_';
+
+ for (auto it = capacities_kbps.begin(); it != capacities_kbps.end(); ++it) {
+ choke.set_capacity_kbps(*it);
+ RunFor(kRunTimeMs);
+ title << delimiter << (*it);
+ delimiter = '-';
+ }
+
+ title << "_kbps,_" << (kRunTimeMs / 1000) << "s_each";
+ metric_recorder.PlotThroughputHistogram(title.str(), bwe_names[bwe_type], 1,
+ 0);
+ metric_recorder.PlotDelayHistogram(title.str(), bwe_names[bwe_type], 1, 0);
+ // receiver.PlotLossHistogram(title, bwe_names[bwe_type], 1);
+ // receiver.PlotObjectiveHistogram(title, bwe_names[bwe_type], 1);
+}
+
+// 5.1. Single Video and Audio media traffic, forward direction.
+void BweTest::RunVariableCapacity1SingleFlow(BandwidthEstimatorType bwe_type) {
+ const int kFlowId = 0; // Arbitrary value.
+ AdaptiveVideoSource source(kFlowId, 30, 300, 0, 0);
+ PacedVideoSender sender(&uplink_, &source, bwe_type);
+
+ DefaultEvaluationFilter up_filter(&uplink_, kFlowId);
+ LinkShare link_share(&(up_filter.choke));
+ MetricRecorder metric_recorder(bwe_names[bwe_type], kFlowId, &sender,
+ &link_share);
+
+ PacketReceiver receiver(&uplink_, kFlowId, bwe_type, true, true,
+ &metric_recorder);
+
+ metric_recorder.set_plot_available_capacity(plot_total_available_capacity_);
+
+ DelayFilter down_filter(&downlink_, kFlowId);
+ down_filter.SetOneWayDelayMs(kOneWayDelayMs);
+
+ // Test also with one way propagation delay = 100ms.
+ // up_filter.delay.SetOneWayDelayMs(100);
+ // down_filter.SetOneWayDelayMs(100);
+
+ up_filter.choke.set_capacity_kbps(1000);
+ RunFor(40 * 1000); // 0-40s.
+ up_filter.choke.set_capacity_kbps(2500);
+ RunFor(20 * 1000); // 40-60s.
+ up_filter.choke.set_capacity_kbps(600);
+ RunFor(20 * 1000); // 60-80s.
+ up_filter.choke.set_capacity_kbps(1000);
+ RunFor(20 * 1000); // 80-100s.
+
+ std::string title("5.1_Variable_capacity_single_flow");
+ metric_recorder.PlotThroughputHistogram(title, bwe_names[bwe_type], 1, 0);
+ metric_recorder.PlotDelayHistogram(title, bwe_names[bwe_type], 1,
+ kOneWayDelayMs);
+ metric_recorder.PlotLossHistogram(title, bwe_names[bwe_type], 1,
+ receiver.GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kOneWayDelayMs, kFlowId);
+}
+
+// 5.2. Two forward direction competing flows, variable capacity.
+void BweTest::RunVariableCapacity2MultipleFlows(BandwidthEstimatorType bwe_type,
+ size_t num_flows) {
+ std::vector<VideoSource*> sources;
+ std::vector<PacketSender*> senders;
+ std::vector<MetricRecorder*> metric_recorders;
+ std::vector<PacketReceiver*> receivers;
+
+ const int64_t kStartingApartMs = 0; // Flows initialized simultaneously.
+
+ for (size_t i = 0; i < num_flows; ++i) {
+ sources.push_back(new AdaptiveVideoSource(static_cast<int>(i), 30, 300, 0,
+ i * kStartingApartMs));
+ senders.push_back(new VideoSender(&uplink_, sources[i], bwe_type));
+ }
+
+ FlowIds flow_ids = CreateFlowIdRange(0, static_cast<int>(num_flows - 1));
+
+ DefaultEvaluationFilter up_filter(&uplink_, flow_ids);
+ LinkShare link_share(&(up_filter.choke));
+
+ RateCounterFilter total_utilization(&uplink_, flow_ids, "Total_utilization",
+ "Total_link_utilization");
+
+ // Delays is being plotted only for the first flow.
+ // To plot all of them, replace "i == 0" with "true" on new PacketReceiver().
+ for (size_t i = 0; i < num_flows; ++i) {
+ metric_recorders.push_back(new MetricRecorder(
+ bwe_names[bwe_type], static_cast<int>(i), senders[i], &link_share));
+
+ receivers.push_back(new PacketReceiver(&uplink_, static_cast<int>(i),
+ bwe_type, i == 0, false,
+ metric_recorders[i]));
+ metric_recorders[i]->set_plot_available_capacity(
+ i == 0 && plot_total_available_capacity_);
+ }
+
+ DelayFilter down_filter(&downlink_, flow_ids);
+ down_filter.SetOneWayDelayMs(kOneWayDelayMs);
+ // Test also with one way propagation delay = 100ms.
+ // up_filter.delay.SetOneWayDelayMs(100);
+ // down_filter.SetOneWayDelayMs(100);
+
+ up_filter.choke.set_capacity_kbps(4000);
+ RunFor(25 * 1000); // 0-25s.
+ up_filter.choke.set_capacity_kbps(2000);
+ RunFor(25 * 1000); // 25-50s.
+ up_filter.choke.set_capacity_kbps(3500);
+ RunFor(25 * 1000); // 50-75s.
+ up_filter.choke.set_capacity_kbps(1000);
+ RunFor(25 * 1000); // 75-100s.
+ up_filter.choke.set_capacity_kbps(2000);
+ RunFor(25 * 1000); // 100-125s.
+
+ std::string title("5.2_Variable_capacity_two_flows");
+ for (size_t i = 0; i < num_flows; ++i) {
+ metric_recorders[i]->PlotThroughputHistogram(title, bwe_names[bwe_type],
+ num_flows, 0);
+ metric_recorders[i]->PlotDelayHistogram(title, bwe_names[bwe_type],
+ num_flows, kOneWayDelayMs);
+ metric_recorders[i]->PlotLossHistogram(title, bwe_names[bwe_type],
+ num_flows,
+ receivers[i]->GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kOneWayDelayMs, i);
+ }
+
+ for (VideoSource* source : sources)
+ delete source;
+ for (PacketSender* sender : senders)
+ delete sender;
+ for (MetricRecorder* recorder : metric_recorders)
+ delete recorder;
+ for (PacketReceiver* receiver : receivers)
+ delete receiver;
+}
+
+// 5.3. Bi-directional RMCAT flows.
+void BweTest::RunBidirectionalFlow(BandwidthEstimatorType bwe_type) {
+ enum direction { kForward = 0, kBackward };
+ const size_t kNumFlows = 2;
+ rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumFlows];
+ rtc::scoped_ptr<VideoSender> senders[kNumFlows];
+ rtc::scoped_ptr<MetricRecorder> metric_recorders[kNumFlows];
+ rtc::scoped_ptr<PacketReceiver> receivers[kNumFlows];
+
+ sources[kForward].reset(new AdaptiveVideoSource(kForward, 30, 300, 0, 0));
+ senders[kForward].reset(
+ new VideoSender(&uplink_, sources[kForward].get(), bwe_type));
+
+ sources[kBackward].reset(new AdaptiveVideoSource(kBackward, 30, 300, 0, 0));
+ senders[kBackward].reset(
+ new VideoSender(&downlink_, sources[kBackward].get(), bwe_type));
+
+ DefaultEvaluationFilter up_filter(&uplink_, kForward);
+ LinkShare up_link_share(&(up_filter.choke));
+
+ metric_recorders[kForward].reset(new MetricRecorder(
+ bwe_names[bwe_type], kForward, senders[kForward].get(), &up_link_share));
+ receivers[kForward].reset(
+ new PacketReceiver(&uplink_, kForward, bwe_type, true, false,
+ metric_recorders[kForward].get()));
+
+ metric_recorders[kForward].get()->set_plot_available_capacity(
+ plot_total_available_capacity_);
+
+ DefaultEvaluationFilter down_filter(&downlink_, kBackward);
+ LinkShare down_link_share(&(down_filter.choke));
+
+ metric_recorders[kBackward].reset(
+ new MetricRecorder(bwe_names[bwe_type], kBackward,
+ senders[kBackward].get(), &down_link_share));
+ receivers[kBackward].reset(
+ new PacketReceiver(&downlink_, kBackward, bwe_type, true, false,
+ metric_recorders[kBackward].get()));
+
+ metric_recorders[kBackward].get()->set_plot_available_capacity(
+ plot_total_available_capacity_);
+
+ // Test also with one way propagation delay = 100ms.
+ // up_filter.delay.SetOneWayDelayMs(100);
+ // down_filter.delay.SetOneWayDelayMs(100);
+
+ up_filter.choke.set_capacity_kbps(2000);
+ down_filter.choke.set_capacity_kbps(2000);
+ RunFor(20 * 1000); // 0-20s.
+
+ up_filter.choke.set_capacity_kbps(1000);
+ RunFor(15 * 1000); // 20-35s.
+
+ down_filter.choke.set_capacity_kbps(800);
+ RunFor(5 * 1000); // 35-40s.
+
+ up_filter.choke.set_capacity_kbps(500);
+ RunFor(20 * 1000); // 40-60s.
+
+ up_filter.choke.set_capacity_kbps(2000);
+ RunFor(10 * 1000); // 60-70s.
+
+ down_filter.choke.set_capacity_kbps(2000);
+ RunFor(30 * 1000); // 70-100s.
+
+ std::string title("5.3_Bidirectional_flows");
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ metric_recorders[i].get()->PlotThroughputHistogram(
+ title, bwe_names[bwe_type], kNumFlows, 0);
+ metric_recorders[i].get()->PlotDelayHistogram(title, bwe_names[bwe_type],
+ kNumFlows, kOneWayDelayMs);
+ metric_recorders[i].get()->PlotLossHistogram(
+ title, bwe_names[bwe_type], kNumFlows,
+ receivers[i].get()->GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kOneWayDelayMs, i);
+ }
+}
+
+// 5.4. Three forward direction competing flows, constant capacity.
+void BweTest::RunSelfFairness(BandwidthEstimatorType bwe_type) {
+ const int kNumRmcatFlows = 3;
+ const int kNumTcpFlows = 0;
+ const int64_t kRunTimeS = 120;
+ const int kLinkCapacity = 3500;
+
+ int64_t max_delay_ms = kMaxQueueingDelayMs;
+ int64_t rtt_ms = 2 * kOneWayDelayMs;
+
+ const int64_t kStartingApartMs = 20 * 1000;
+ int64_t offsets_ms[kNumRmcatFlows];
+ for (int i = 0; i < kNumRmcatFlows; ++i) {
+ offsets_ms[i] = kStartingApartMs * i;
+ }
+
+ // Test also with one way propagation delay = 100ms.
+ // rtt_ms = 2 * 100;
+ // Test also with bottleneck queue size = 20ms and 1000ms.
+ // max_delay_ms = 20;
+ // max_delay_ms = 1000;
+
+ std::string title("5.4_Self_fairness_test");
+
+ // Test also with one way propagation delay = 100ms.
+ RunFairnessTest(bwe_type, kNumRmcatFlows, kNumTcpFlows, kRunTimeS,
+ kLinkCapacity, max_delay_ms, rtt_ms, kMaxJitterMs, offsets_ms,
+ title, bwe_names[bwe_type]);
+}
+
+// 5.5. Five competing RMCAT flows under different RTTs.
+void BweTest::RunRoundTripTimeFairness(BandwidthEstimatorType bwe_type) {
+ const int kAllFlowIds[] = {0, 1, 2, 3, 4}; // Five RMCAT flows.
+ const int64_t kAllOneWayDelayMs[] = {10, 25, 50, 100, 150};
+ const size_t kNumFlows = ARRAY_SIZE(kAllFlowIds);
+ rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumFlows];
+ rtc::scoped_ptr<VideoSender> senders[kNumFlows];
+ rtc::scoped_ptr<MetricRecorder> metric_recorders[kNumFlows];
+
+ // Flows initialized 10 seconds apart.
+ const int64_t kStartingApartMs = 10 * 1000;
+
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ sources[i].reset(new AdaptiveVideoSource(kAllFlowIds[i], 30, 300, 0,
+ i * kStartingApartMs));
+ senders[i].reset(new VideoSender(&uplink_, sources[i].get(), bwe_type));
+ }
+
+ ChokeFilter choke_filter(&uplink_, CreateFlowIds(kAllFlowIds, kNumFlows));
+ LinkShare link_share(&choke_filter);
+
+ JitterFilter jitter_filter(&uplink_, CreateFlowIds(kAllFlowIds, kNumFlows));
+
+ rtc::scoped_ptr<DelayFilter> up_delay_filters[kNumFlows];
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ up_delay_filters[i].reset(new DelayFilter(&uplink_, kAllFlowIds[i]));
+ }
+
+ RateCounterFilter total_utilization(
+ &uplink_, CreateFlowIds(kAllFlowIds, kNumFlows), "Total_utilization",
+ "Total_link_utilization");
+
+ // Delays is being plotted only for the first flow.
+ // To plot all of them, replace "i == 0" with "true" on new PacketReceiver().
+ rtc::scoped_ptr<PacketReceiver> receivers[kNumFlows];
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ metric_recorders[i].reset(
+ new MetricRecorder(bwe_names[bwe_type], static_cast<int>(i),
+ senders[i].get(), &link_share));
+
+ receivers[i].reset(new PacketReceiver(&uplink_, kAllFlowIds[i], bwe_type,
+ i == 0, false,
+ metric_recorders[i].get()));
+ metric_recorders[i].get()->set_start_computing_metrics_ms(kStartingApartMs *
+ (kNumFlows - 1));
+ metric_recorders[i].get()->set_plot_available_capacity(
+ i == 0 && plot_total_available_capacity_);
+ }
+
+ rtc::scoped_ptr<DelayFilter> down_delay_filters[kNumFlows];
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ down_delay_filters[i].reset(new DelayFilter(&downlink_, kAllFlowIds[i]));
+ }
+
+ jitter_filter.SetMaxJitter(kMaxJitterMs);
+ choke_filter.set_max_delay_ms(kMaxQueueingDelayMs);
+
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ up_delay_filters[i]->SetOneWayDelayMs(kAllOneWayDelayMs[i]);
+ down_delay_filters[i]->SetOneWayDelayMs(kAllOneWayDelayMs[i]);
+ }
+
+ choke_filter.set_capacity_kbps(3500);
+
+ RunFor(300 * 1000); // 0-300s.
+
+ std::string title("5.5_Round_Trip_Time_Fairness");
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ metric_recorders[i].get()->PlotThroughputHistogram(
+ title, bwe_names[bwe_type], kNumFlows, 0);
+ metric_recorders[i].get()->PlotDelayHistogram(title, bwe_names[bwe_type],
+ kNumFlows, kOneWayDelayMs);
+ metric_recorders[i].get()->PlotLossHistogram(
+ title, bwe_names[bwe_type], kNumFlows,
+ receivers[i].get()->GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kAllOneWayDelayMs[i],
+ i);
+ }
+}
+
+// 5.6. RMCAT Flow competing with a long TCP Flow.
+void BweTest::RunLongTcpFairness(BandwidthEstimatorType bwe_type) {
+ const size_t kNumRmcatFlows = 1;
+ const size_t kNumTcpFlows = 1;
+ const int64_t kRunTimeS = 120;
+ const int kCapacityKbps = 2000;
+ // Tcp starts at t = 0, media flow at t = 5s.
+ const int64_t kOffSetsMs[] = {5000, 0};
+
+ int64_t max_delay_ms = kMaxQueueingDelayMs;
+ int64_t rtt_ms = 2 * kOneWayDelayMs;
+
+ // Test also with one way propagation delay = 100ms.
+ // rtt_ms = 2 * 100;
+ // Test also with bottleneck queue size = 20ms and 1000ms.
+ // max_delay_ms = 20;
+ // max_delay_ms = 1000;
+
+ std::string title("5.6_Long_TCP_Fairness");
+ std::string flow_name(bwe_names[bwe_type] + 'x' + bwe_names[kTcpEstimator]);
+
+ RunFairnessTest(bwe_type, kNumRmcatFlows, kNumTcpFlows, kRunTimeS,
+ kCapacityKbps, max_delay_ms, rtt_ms, kMaxJitterMs, kOffSetsMs,
+ title, flow_name);
+}
+
+// 5.7. RMCAT Flows competing with multiple short TCP Flows.
+void BweTest::RunMultipleShortTcpFairness(
+ BandwidthEstimatorType bwe_type,
+ std::vector<int> tcp_file_sizes_bytes,
+ std::vector<int64_t> tcp_starting_times_ms) {
+ // Two RMCAT flows and ten TCP flows.
+ const int kAllRmcatFlowIds[] = {0, 1};
+ const int kAllTcpFlowIds[] = {2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
+
+ assert(tcp_starting_times_ms.size() == tcp_file_sizes_bytes.size() &&
+ tcp_starting_times_ms.size() == ARRAY_SIZE(kAllTcpFlowIds));
+
+ const size_t kNumRmcatFlows = ARRAY_SIZE(kAllRmcatFlowIds);
+ const size_t kNumTotalFlows = kNumRmcatFlows + ARRAY_SIZE(kAllTcpFlowIds);
+
+ rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumRmcatFlows];
+ rtc::scoped_ptr<PacketSender> senders[kNumTotalFlows];
+ rtc::scoped_ptr<MetricRecorder> metric_recorders[kNumTotalFlows];
+ rtc::scoped_ptr<PacketReceiver> receivers[kNumTotalFlows];
+
+ // RMCAT Flows are initialized simultaneosly at t=5 seconds.
+ const int64_t kRmcatStartingTimeMs = 5 * 1000;
+ for (size_t id : kAllRmcatFlowIds) {
+ sources[id].reset(new AdaptiveVideoSource(static_cast<int>(id), 30, 300, 0,
+ kRmcatStartingTimeMs));
+ senders[id].reset(new VideoSender(&uplink_, sources[id].get(), bwe_type));
+ }
+
+ for (size_t id : kAllTcpFlowIds) {
+ senders[id].reset(new TcpSender(&uplink_, static_cast<int>(id),
+ tcp_starting_times_ms[id - kNumRmcatFlows],
+ tcp_file_sizes_bytes[id - kNumRmcatFlows]));
+ }
+
+ FlowIds flow_ids = CreateFlowIdRange(0, static_cast<int>(kNumTotalFlows - 1));
+ DefaultEvaluationFilter up_filter(&uplink_, flow_ids);
+
+ LinkShare link_share(&(up_filter.choke));
+
+ RateCounterFilter total_utilization(&uplink_, flow_ids, "Total_utilization",
+ "Total_link_utilization");
+
+ // Delays is being plotted only for the first flow.
+ // To plot all of them, replace "i == 0" with "true" on new PacketReceiver().
+ for (size_t id : kAllRmcatFlowIds) {
+ metric_recorders[id].reset(
+ new MetricRecorder(bwe_names[bwe_type], static_cast<int>(id),
+ senders[id].get(), &link_share));
+ receivers[id].reset(new PacketReceiver(&uplink_, static_cast<int>(id),
+ bwe_type, id == 0, false,
+ metric_recorders[id].get()));
+ metric_recorders[id].get()->set_start_computing_metrics_ms(
+ kRmcatStartingTimeMs);
+ metric_recorders[id].get()->set_plot_available_capacity(
+ id == 0 && plot_total_available_capacity_);
+ }
+
+ // Delays is not being plotted only for TCP flows. To plot all of them,
+ // replace first "false" occurence with "true" on new PacketReceiver().
+ for (size_t id : kAllTcpFlowIds) {
+ metric_recorders[id].reset(
+ new MetricRecorder(bwe_names[kTcpEstimator], static_cast<int>(id),
+ senders[id].get(), &link_share));
+ receivers[id].reset(new PacketReceiver(&uplink_, static_cast<int>(id),
+ kTcpEstimator, false, false,
+ metric_recorders[id].get()));
+ metric_recorders[id].get()->set_plot_available_capacity(
+ id == 0 && plot_total_available_capacity_);
+ }
+
+ DelayFilter down_filter(&downlink_, flow_ids);
+ down_filter.SetOneWayDelayMs(kOneWayDelayMs);
+
+ // Test also with one way propagation delay = 100ms.
+ // up_filter.delay.SetOneWayDelayMs(100);
+ // down_filter.SetOneWayDelayms(100);
+
+ // Test also with bottleneck queue size = 20ms and 1000ms.
+ // up_filter.choke.set_max_delay_ms(20);
+ // up_filter.choke.set_max_delay_ms(1000);
+
+ // Test also with no Jitter:
+ // up_filter.jitter.SetMaxJitter(0);
+
+ up_filter.choke.set_capacity_kbps(2000);
+
+ RunFor(300 * 1000); // 0-300s.
+
+ std::string title("5.7_Multiple_short_TCP_flows");
+ for (size_t id : kAllRmcatFlowIds) {
+ metric_recorders[id].get()->PlotThroughputHistogram(
+ title, bwe_names[bwe_type], kNumRmcatFlows, 0);
+ metric_recorders[id].get()->PlotDelayHistogram(
+ title, bwe_names[bwe_type], kNumRmcatFlows, kOneWayDelayMs);
+ metric_recorders[id].get()->PlotLossHistogram(
+ title, bwe_names[bwe_type], kNumRmcatFlows,
+ receivers[id].get()->GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kOneWayDelayMs, id);
+ }
+}
+
+// 5.8. Three forward direction competing flows, constant capacity.
+// During the test, one of the flows is paused and later resumed.
+void BweTest::RunPauseResumeFlows(BandwidthEstimatorType bwe_type) {
+ const int kAllFlowIds[] = {0, 1, 2}; // Three RMCAT flows.
+ const size_t kNumFlows = ARRAY_SIZE(kAllFlowIds);
+
+ rtc::scoped_ptr<AdaptiveVideoSource> sources[kNumFlows];
+ rtc::scoped_ptr<VideoSender> senders[kNumFlows];
+ rtc::scoped_ptr<MetricRecorder> metric_recorders[kNumFlows];
+ rtc::scoped_ptr<PacketReceiver> receivers[kNumFlows];
+
+ // Flows initialized simultaneously.
+ const int64_t kStartingApartMs = 0;
+
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ sources[i].reset(new AdaptiveVideoSource(kAllFlowIds[i], 30, 300, 0,
+ i * kStartingApartMs));
+ senders[i].reset(new VideoSender(&uplink_, sources[i].get(), bwe_type));
+ }
+
+ DefaultEvaluationFilter filter(&uplink_,
+ CreateFlowIds(kAllFlowIds, kNumFlows));
+
+ LinkShare link_share(&(filter.choke));
+
+ RateCounterFilter total_utilization(
+ &uplink_, CreateFlowIds(kAllFlowIds, kNumFlows), "Total_utilization",
+ "Total_link_utilization");
+
+ // Delays is being plotted only for the first flow.
+ // To plot all of them, replace "i == 0" with "true" on new PacketReceiver().
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ metric_recorders[i].reset(
+ new MetricRecorder(bwe_names[bwe_type], static_cast<int>(i),
+ senders[i].get(), &link_share));
+ receivers[i].reset(new PacketReceiver(&uplink_, kAllFlowIds[i], bwe_type,
+ i == 0, false,
+ metric_recorders[i].get()));
+ metric_recorders[i].get()->set_start_computing_metrics_ms(kStartingApartMs *
+ (kNumFlows - 1));
+ metric_recorders[i].get()->set_plot_available_capacity(
+ i == 0 && plot_total_available_capacity_);
+ }
+
+ // Test also with one way propagation delay = 100ms.
+ // filter.delay.SetOneWayDelayMs(100);
+ filter.choke.set_capacity_kbps(3500);
+
+ RunFor(40 * 1000); // 0-40s.
+ senders[0].get()->Pause();
+ RunFor(20 * 1000); // 40-60s.
+ senders[0].get()->Resume(20 * 1000);
+ RunFor(60 * 1000); // 60-120s.
+
+ int64_t paused[] = {20 * 1000, 0, 0};
+
+ // First flow is being paused, hence having a different optimum.
+ const std::string optima_lines[] = {"1", "2", "2"};
+
+ std::string title("5.8_Pause_and_resume_media_flow");
+ for (size_t i = 0; i < kNumFlows; ++i) {
+ metric_recorders[i].get()->PlotThroughputHistogram(
+ title, bwe_names[bwe_type], kNumFlows, paused[i], optima_lines[i]);
+ metric_recorders[i].get()->PlotDelayHistogram(title, bwe_names[bwe_type],
+ kNumFlows, kOneWayDelayMs);
+ metric_recorders[i].get()->PlotLossHistogram(
+ title, bwe_names[bwe_type], kNumFlows,
+ receivers[i].get()->GlobalPacketLoss());
+ BWE_TEST_LOGGING_BASELINEBAR(5, bwe_names[bwe_type], kOneWayDelayMs, i);
+ }
+}
+
+// Following functions are used for randomizing TCP file size and
+// starting time, used on 5.7 RunMultipleShortTcpFairness.
+// They are pseudo-random generators, creating always the same
+// value sequence for a given Random seed.
+
+std::vector<int> BweTest::GetFileSizesBytes(int num_files) {
+ // File size chosen from uniform distribution between [100,1000] kB.
+ const int kMinKbytes = 100;
+ const int kMaxKbytes = 1000;
+
+ Random random(0x12345678);
+ std::vector<int> tcp_file_sizes_bytes;
+
+ while (num_files-- > 0) {
+ tcp_file_sizes_bytes.push_back(random.Rand(kMinKbytes, kMaxKbytes) * 1000);
+ }
+
+ return tcp_file_sizes_bytes;
}
+
+std::vector<int64_t> BweTest::GetStartingTimesMs(int num_files) {
+ // OFF state behaves as an exp. distribution with mean = 10 seconds.
+ const float kMeanMs = 10000.0f;
+ Random random(0x12345678);
+
+ std::vector<int64_t> tcp_starting_times_ms;
+
+ // Two TCP Flows are initialized simultaneosly at t=0 seconds.
+ for (int i = 0; i < 2; ++i, --num_files) {
+ tcp_starting_times_ms.push_back(0);
+ }
+
+ // Other TCP Flows are initialized in an OFF state.
+ while (num_files-- > 0) {
+ tcp_starting_times_ms.push_back(
+ static_cast<int64_t>(random.Exponential(1.0f / kMeanMs)));
+ }
+
+ return tcp_starting_times_ms;
+}
+
} // namespace bwe
} // namespace testing
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.h
index 303aca528c5..5fb32521951 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test.h
@@ -67,8 +67,49 @@ class Link : public PacketProcessorListener {
class BweTest {
public:
BweTest();
+ explicit BweTest(bool plot_capacity);
~BweTest();
+ void RunChoke(BandwidthEstimatorType bwe_type,
+ std::vector<int> capacities_kbps);
+
+ void RunVariableCapacity1SingleFlow(BandwidthEstimatorType bwe_type);
+ void RunVariableCapacity2MultipleFlows(BandwidthEstimatorType bwe_type,
+ size_t num_flows);
+ void RunBidirectionalFlow(BandwidthEstimatorType bwe_type);
+ void RunSelfFairness(BandwidthEstimatorType bwe_type);
+ void RunRoundTripTimeFairness(BandwidthEstimatorType bwe_type);
+ void RunLongTcpFairness(BandwidthEstimatorType bwe_type);
+ void RunMultipleShortTcpFairness(BandwidthEstimatorType bwe_type,
+ std::vector<int> tcp_file_sizes_bytes,
+ std::vector<int64_t> tcp_starting_times_ms);
+ void RunPauseResumeFlows(BandwidthEstimatorType bwe_type);
+
+ void RunFairnessTest(BandwidthEstimatorType bwe_type,
+ size_t num_media_flows,
+ size_t num_tcp_flows,
+ int64_t run_time_seconds,
+ uint32_t capacity_kbps,
+ int64_t max_delay_ms,
+ int64_t rtt_ms,
+ int64_t max_jitter_ms,
+ const int64_t* offsets_ms);
+
+ void RunFairnessTest(BandwidthEstimatorType bwe_type,
+ size_t num_media_flows,
+ size_t num_tcp_flows,
+ int64_t run_time_seconds,
+ uint32_t capacity_kbps,
+ int64_t max_delay_ms,
+ int64_t rtt_ms,
+ int64_t max_jitter_ms,
+ const int64_t* offsets_ms,
+ const std::string& title,
+ const std::string& flow_name);
+
+ static std::vector<int> GetFileSizesBytes(int num_files);
+ static std::vector<int64_t> GetStartingTimesMs(int num_files);
+
protected:
void SetUp();
@@ -87,13 +128,6 @@ class BweTest {
std::map<int, Stats<double>> flow_delay_ms,
std::map<int, Stats<double>> flow_throughput_kbps);
- void RunFairnessTest(BandwidthEstimatorType bwe_type,
- size_t num_media_flows,
- size_t num_tcp_flows,
- int64_t run_time_seconds,
- int capacity_kbps,
- int max_delay_ms);
-
Link downlink_;
Link uplink_;
@@ -107,9 +141,53 @@ class BweTest {
int64_t simulation_interval_ms_;
std::vector<Link*> links_;
Packets packets_;
+ bool plot_total_available_capacity_;
- DISALLOW_COPY_AND_ASSIGN(BweTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweTest);
};
+
+// Default Evaluation parameters:
+// Link capacity: 4000ms;
+// Queueing delay capacity: 300ms.
+// One-Way propagation delay: 50ms.
+// Jitter model: Truncated gaussian.
+// Maximum end-to-end jitter: 30ms = 2*standard_deviation.
+// Bottleneck queue type: Drop tail.
+// Path loss ratio: 0%.
+
+const int kOneWayDelayMs = 50;
+const int kMaxQueueingDelayMs = 300;
+const int kMaxCapacityKbps = 4000;
+const int kMaxJitterMs = 15;
+
+struct DefaultEvaluationFilter {
+ DefaultEvaluationFilter(PacketProcessorListener* listener, int flow_id)
+ : choke(listener, flow_id),
+ delay(listener, flow_id),
+ jitter(listener, flow_id) {
+ SetDefaultParameters();
+ }
+
+ DefaultEvaluationFilter(PacketProcessorListener* listener,
+ const FlowIds& flow_ids)
+ : choke(listener, flow_ids),
+ delay(listener, flow_ids),
+ jitter(listener, flow_ids) {
+ SetDefaultParameters();
+ }
+
+ void SetDefaultParameters() {
+ delay.SetOneWayDelayMs(kOneWayDelayMs);
+ choke.set_max_delay_ms(kMaxQueueingDelayMs);
+ choke.set_capacity_kbps(kMaxCapacityKbps);
+ jitter.SetMaxJitter(kMaxJitterMs);
+ }
+
+ ChokeFilter choke;
+ DelayFilter delay;
+ JitterFilter jitter;
+};
+
} // namespace bwe
} // namespace testing
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.cc
index dbb5ade0b23..d7abede707e 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_baselinefile.cc
@@ -94,7 +94,7 @@ class BaseLineFileVerify : public BaseLineFileInterface {
rtc::scoped_ptr<ResourceFileReader> reader_;
bool fail_to_read_response_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BaseLineFileVerify);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(BaseLineFileVerify);
};
class BaseLineFileUpdate : public BaseLineFileInterface {
@@ -146,7 +146,7 @@ class BaseLineFileUpdate : public BaseLineFileInterface {
std::vector<uint32_t> output_content_;
std::string filepath_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(BaseLineFileUpdate);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(BaseLineFileUpdate);
};
BaseLineFileInterface* BaseLineFileInterface::Create(
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
index e73a545e538..2881eba424d 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_fileutils.h
@@ -35,7 +35,7 @@ class ResourceFileReader {
private:
explicit ResourceFileReader(FILE* file) : file_(file) {}
FILE* file_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ResourceFileReader);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ResourceFileReader);
};
class OutputFileWriter {
@@ -50,7 +50,7 @@ class OutputFileWriter {
private:
explicit OutputFileWriter(FILE* file) : file_(file) {}
FILE* file_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(OutputFileWriter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(OutputFileWriter);
};
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
index 5e66697215d..d4201933e78 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.cc
@@ -20,9 +20,10 @@ namespace bwe {
class DelayCapHelper {
public:
+ // Max delay = 0 stands for +infinite.
DelayCapHelper() : max_delay_us_(0), delay_stats_() {}
- void SetMaxDelay(int max_delay_ms) {
+ void set_max_delay_ms(int64_t max_delay_ms) {
BWE_TEST_LOGGING_ENABLE(false);
BWE_TEST_LOGGING_LOG1("Max Delay", "%d ms", static_cast<int>(max_delay_ms));
assert(max_delay_ms >= 0);
@@ -31,7 +32,7 @@ class DelayCapHelper {
bool ShouldSendPacket(int64_t send_time_us, int64_t arrival_time_us) {
int64_t packet_delay_us = send_time_us - arrival_time_us;
- delay_stats_.Push(std::min(packet_delay_us, max_delay_us_) / 1000);
+ delay_stats_.Push((std::min(packet_delay_us, max_delay_us_) + 500) / 1000);
return (max_delay_us_ == 0 || max_delay_us_ >= packet_delay_us);
}
@@ -43,7 +44,7 @@ class DelayCapHelper {
int64_t max_delay_us_;
Stats<double> delay_stats_;
- DISALLOW_COPY_AND_ASSIGN(DelayCapHelper);
+ RTC_DISALLOW_COPY_AND_ASSIGN(DelayCapHelper);
};
const FlowIds CreateFlowIds(const int *flow_ids_array, size_t num_flow_ids) {
@@ -51,58 +52,97 @@ const FlowIds CreateFlowIds(const int *flow_ids_array, size_t num_flow_ids) {
return flow_ids;
}
-class RateCounter {
- public:
- RateCounter()
- : kWindowSizeUs(1000000),
- packets_per_second_(0),
- bytes_per_second_(0),
- last_accumulated_us_(0),
- window_() {}
-
- void UpdateRates(int64_t send_time_us, uint32_t payload_size) {
- packets_per_second_++;
- bytes_per_second_ += payload_size;
- last_accumulated_us_ = send_time_us;
- window_.push_back(std::make_pair(send_time_us, payload_size));
- while (!window_.empty()) {
- const TimeSizePair& packet = window_.front();
- if (packet.first > (last_accumulated_us_ - kWindowSizeUs)) {
- break;
- }
- assert(packets_per_second_ >= 1);
- assert(bytes_per_second_ >= packet.second);
- packets_per_second_--;
- bytes_per_second_ -= packet.second;
- window_.pop_front();
+const FlowIds CreateFlowIdRange(int initial_value, int last_value) {
+ int size = last_value - initial_value + 1;
+ assert(size > 0);
+ int* flow_ids_array = new int[size];
+ for (int i = initial_value; i <= last_value; ++i) {
+ flow_ids_array[i - initial_value] = i;
+ }
+ return CreateFlowIds(flow_ids_array, size);
+}
+
+void RateCounter::UpdateRates(int64_t send_time_us, uint32_t payload_size) {
+ ++recently_received_packets_;
+ recently_received_bytes_ += payload_size;
+ last_accumulated_us_ = send_time_us;
+ window_.push_back(std::make_pair(send_time_us, payload_size));
+ while (!window_.empty()) {
+ const TimeSizePair& packet = window_.front();
+ if (packet.first > (last_accumulated_us_ - window_size_us_)) {
+ break;
}
+ assert(recently_received_packets_ >= 1);
+ assert(recently_received_bytes_ >= packet.second);
+ --recently_received_packets_;
+ recently_received_bytes_ -= packet.second;
+ window_.pop_front();
}
+}
- uint32_t bits_per_second() const {
- return bytes_per_second_ * 8;
- }
+uint32_t RateCounter::bits_per_second() const {
+ return (8 * recently_received_bytes_) / BitrateWindowS();
+}
- uint32_t packets_per_second() const { return packets_per_second_; }
+uint32_t RateCounter::packets_per_second() const {
+ return recently_received_packets_ / BitrateWindowS();
+}
- private:
- typedef std::pair<int64_t, uint32_t> TimeSizePair;
+double RateCounter::BitrateWindowS() const {
+ return static_cast<double>(window_size_us_) / (1000 * 1000);
+}
- const int64_t kWindowSizeUs;
- uint32_t packets_per_second_;
- uint32_t bytes_per_second_;
- int64_t last_accumulated_us_;
- std::list<TimeSizePair> window_;
-};
+Random::Random(uint32_t seed) : a_(0x531FDB97 ^ seed), b_(0x6420ECA8 + seed) {
+}
+
+float Random::Rand() {
+ const float kScale = 1.0f / 0xffffffff;
+ float result = kScale * b_;
+ a_ ^= b_;
+ b_ += a_;
+ return result;
+}
+
+int Random::Rand(int low, int high) {
+ float uniform = Rand() * (high - low + 1) + low;
+ return static_cast<int>(uniform);
+}
+
+int Random::Gaussian(int mean, int standard_deviation) {
+ // Creating a Normal distribution variable from two independent uniform
+ // variables based on the Box-Muller transform, which is defined on the
+ // interval (0, 1], hence the mask+add below.
+ const double kPi = 3.14159265358979323846;
+ const double kScale = 1.0 / 0x80000000ul;
+ double u1 = kScale * ((a_ & 0x7ffffffful) + 1);
+ double u2 = kScale * ((b_ & 0x7ffffffful) + 1);
+ a_ ^= b_;
+ b_ += a_;
+ return static_cast<int>(
+ mean + standard_deviation * sqrt(-2 * log(u1)) * cos(2 * kPi * u2));
+}
+
+int Random::Exponential(float lambda) {
+ float uniform = Rand();
+ return static_cast<int>(-log(uniform) / lambda);
+}
Packet::Packet()
- : flow_id_(0), creation_time_us_(-1), send_time_us_(-1), payload_size_(0) {
+ : flow_id_(0),
+ creation_time_us_(-1),
+ send_time_us_(-1),
+ sender_timestamp_us_(-1),
+ payload_size_(0),
+ paced_(false) {
}
Packet::Packet(int flow_id, int64_t send_time_us, size_t payload_size)
: flow_id_(flow_id),
creation_time_us_(send_time_us),
send_time_us_(send_time_us),
- payload_size_(payload_size) {
+ sender_timestamp_us_(send_time_us),
+ payload_size_(payload_size),
+ paced_(false) {
}
Packet::~Packet() {
@@ -137,7 +177,7 @@ MediaPacket::MediaPacket(int flow_id,
: Packet(flow_id, send_time_us, payload_size), header_(header) {
}
-MediaPacket::MediaPacket(int64_t send_time_us, uint32_t sequence_number)
+MediaPacket::MediaPacket(int64_t send_time_us, uint16_t sequence_number)
: Packet(0, send_time_us, 0) {
header_ = RTPHeader();
header_.sequenceNumber = sequence_number;
@@ -203,14 +243,23 @@ PacketProcessor::~PacketProcessor() {
}
}
+uint32_t PacketProcessor::packets_per_second() const {
+ return rate_counter_.packets_per_second();
+}
+
+uint32_t PacketProcessor::bits_per_second() const {
+ return rate_counter_.bits_per_second();
+}
+
RateCounterFilter::RateCounterFilter(PacketProcessorListener* listener,
int flow_id,
- const char* name)
+ const char* name,
+ const std::string& plot_name)
: PacketProcessor(listener, flow_id, kRegular),
- rate_counter_(new RateCounter()),
packets_per_second_stats_(),
kbps_stats_(),
- name_() {
+ start_plotting_time_ms_(0),
+ plot_name_(plot_name) {
std::stringstream ss;
ss << name << "_" << flow_id;
name_ = ss.str();
@@ -218,31 +267,36 @@ RateCounterFilter::RateCounterFilter(PacketProcessorListener* listener,
RateCounterFilter::RateCounterFilter(PacketProcessorListener* listener,
const FlowIds& flow_ids,
- const char* name)
+ const char* name,
+ const std::string& plot_name)
: PacketProcessor(listener, flow_ids, kRegular),
- rate_counter_(new RateCounter()),
packets_per_second_stats_(),
kbps_stats_(),
- name_() {
+ start_plotting_time_ms_(0),
+ plot_name_(plot_name) {
std::stringstream ss;
- ss << name << "_";
+ ss << name;
+ char delimiter = '_';
for (int flow_id : flow_ids) {
- ss << flow_id << ",";
+ ss << delimiter << flow_id;
+ delimiter = ',';
}
name_ = ss.str();
}
-RateCounterFilter::~RateCounterFilter() {
- LogStats();
+RateCounterFilter::RateCounterFilter(PacketProcessorListener* listener,
+ const FlowIds& flow_ids,
+ const char* name,
+ int64_t start_plotting_time_ms,
+ const std::string& plot_name)
+ : RateCounterFilter(listener, flow_ids, name, plot_name) {
+ start_plotting_time_ms_ = start_plotting_time_ms;
}
-uint32_t RateCounterFilter::packets_per_second() const {
- return rate_counter_->packets_per_second();
+RateCounterFilter::~RateCounterFilter() {
+ LogStats();
}
-uint32_t RateCounterFilter::bits_per_second() const {
- return rate_counter_->bits_per_second();
-}
void RateCounterFilter::LogStats() {
BWE_TEST_LOGGING_CONTEXT("RateCounterFilter");
@@ -255,19 +309,29 @@ Stats<double> RateCounterFilter::GetBitrateStats() const {
}
void RateCounterFilter::Plot(int64_t timestamp_ms) {
+ uint32_t plot_kbps = 0;
+ if (timestamp_ms >= start_plotting_time_ms_) {
+ plot_kbps = rate_counter_.bits_per_second() / 1000.0;
+ }
BWE_TEST_LOGGING_CONTEXT(name_.c_str());
- BWE_TEST_LOGGING_PLOT(0, "Throughput_#1", timestamp_ms,
- rate_counter_->bits_per_second() / 1000.0);
+ if (plot_name_.empty()) {
+ BWE_TEST_LOGGING_PLOT(0, "Throughput_kbps#1", timestamp_ms, plot_kbps);
+ } else {
+ BWE_TEST_LOGGING_PLOT_WITH_NAME(0, "Throughput_kbps#1", timestamp_ms,
+ plot_kbps, plot_name_);
+ }
+
+ RTC_UNUSED(plot_kbps);
}
void RateCounterFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
assert(in_out);
for (const Packet* packet : *in_out) {
- rate_counter_->UpdateRates(packet->send_time_us(),
- static_cast<int>(packet->payload_size()));
+ rate_counter_.UpdateRates(packet->send_time_us(),
+ static_cast<int>(packet->payload_size()));
}
- packets_per_second_stats_.Push(rate_counter_->packets_per_second());
- kbps_stats_.Push(rate_counter_->bits_per_second() / 1000.0);
+ packets_per_second_stats_.Push(rate_counter_.packets_per_second());
+ kbps_stats_.Push(rate_counter_.bits_per_second() / 1000.0);
}
LossFilter::LossFilter(PacketProcessorListener* listener, int flow_id)
@@ -303,30 +367,32 @@ void LossFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
}
}
+const int64_t kDefaultOneWayDelayUs = 0;
+
DelayFilter::DelayFilter(PacketProcessorListener* listener, int flow_id)
: PacketProcessor(listener, flow_id, kRegular),
- delay_us_(0),
+ one_way_delay_us_(kDefaultOneWayDelayUs),
last_send_time_us_(0) {
}
DelayFilter::DelayFilter(PacketProcessorListener* listener,
const FlowIds& flow_ids)
: PacketProcessor(listener, flow_ids, kRegular),
- delay_us_(0),
+ one_way_delay_us_(kDefaultOneWayDelayUs),
last_send_time_us_(0) {
}
-void DelayFilter::SetDelayMs(int64_t delay_ms) {
+void DelayFilter::SetOneWayDelayMs(int64_t one_way_delay_ms) {
BWE_TEST_LOGGING_ENABLE(false);
- BWE_TEST_LOGGING_LOG1("Delay", "%d ms", static_cast<int>(delay_ms));
- assert(delay_ms >= 0);
- delay_us_ = delay_ms * 1000;
+ BWE_TEST_LOGGING_LOG1("Delay", "%d ms", static_cast<int>(one_way_delay_ms));
+ assert(one_way_delay_ms >= 0);
+ one_way_delay_us_ = one_way_delay_ms * 1000;
}
void DelayFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
assert(in_out);
for (Packet* packet : *in_out) {
- int64_t new_send_time_us = packet->send_time_us() + delay_us_;
+ int64_t new_send_time_us = packet->send_time_us() + one_way_delay_us_;
last_send_time_us_ = std::max(last_send_time_us_, new_send_time_us);
packet->set_send_time_us(last_send_time_us_);
}
@@ -336,7 +402,8 @@ JitterFilter::JitterFilter(PacketProcessorListener* listener, int flow_id)
: PacketProcessor(listener, flow_id, kRegular),
random_(0x89674523),
stddev_jitter_us_(0),
- last_send_time_us_(0) {
+ last_send_time_us_(0),
+ reordering_(false) {
}
JitterFilter::JitterFilter(PacketProcessorListener* listener,
@@ -344,27 +411,62 @@ JitterFilter::JitterFilter(PacketProcessorListener* listener,
: PacketProcessor(listener, flow_ids, kRegular),
random_(0x89674523),
stddev_jitter_us_(0),
- last_send_time_us_(0) {
+ last_send_time_us_(0),
+ reordering_(false) {
}
-void JitterFilter::SetJitter(int64_t stddev_jitter_ms) {
+const int kN = 3; // Truncated N sigma gaussian.
+
+void JitterFilter::SetMaxJitter(int64_t max_jitter_ms) {
BWE_TEST_LOGGING_ENABLE(false);
- BWE_TEST_LOGGING_LOG1("Jitter", "%d ms",
- static_cast<int>(stddev_jitter_ms));
- assert(stddev_jitter_ms >= 0);
- stddev_jitter_us_ = stddev_jitter_ms * 1000;
+ BWE_TEST_LOGGING_LOG1("Max Jitter", "%d ms", static_cast<int>(max_jitter_ms));
+ assert(max_jitter_ms >= 0);
+ // Truncated gaussian, Max jitter = kN*sigma.
+ stddev_jitter_us_ = (max_jitter_ms * 1000 + kN / 2) / kN;
+}
+
+namespace {
+inline int64_t TruncatedNSigmaGaussian(Random* const random,
+ int64_t mean,
+ int64_t std_dev) {
+ int64_t gaussian_random = random->Gaussian(mean, std_dev);
+ return std::max(std::min(gaussian_random, kN * std_dev), -kN * std_dev);
+}
}
void JitterFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
assert(in_out);
for (Packet* packet : *in_out) {
- int64_t new_send_time_us = packet->send_time_us();
- new_send_time_us += random_.Gaussian(0, stddev_jitter_us_);
- last_send_time_us_ = std::max(last_send_time_us_, new_send_time_us);
- packet->set_send_time_us(last_send_time_us_);
+ int64_t jitter_us =
+ std::abs(TruncatedNSigmaGaussian(&random_, 0, stddev_jitter_us_));
+ int64_t new_send_time_us = packet->send_time_us() + jitter_us;
+
+ if (!reordering_) {
+ new_send_time_us = std::max(last_send_time_us_, new_send_time_us);
+ }
+
+ // Receiver timestamp cannot be lower than sender timestamp.
+ assert(new_send_time_us >= packet->sender_timestamp_us());
+
+ packet->set_send_time_us(new_send_time_us);
+ last_send_time_us_ = new_send_time_us;
}
}
+// Computes the expected value for a right sided (abs) truncated gaussian.
+// Does not take into account possible reoerdering updates.
+int64_t JitterFilter::MeanUs() {
+ const double kPi = 3.1415926535897932;
+ double max_jitter_us = static_cast<double>(kN * stddev_jitter_us_);
+ double right_sided_mean_us =
+ static_cast<double>(stddev_jitter_us_) / sqrt(kPi / 2.0);
+ double truncated_mean_us =
+ right_sided_mean_us *
+ (1.0 - exp(-pow(static_cast<double>(kN), 2.0) / 2.0)) +
+ max_jitter_us * erfc(static_cast<double>(kN));
+ return static_cast<int64_t>(truncated_mean_us + 0.5);
+}
+
ReorderFilter::ReorderFilter(PacketProcessorListener* listener, int flow_id)
: PacketProcessor(listener, flow_id, kRegular),
random_(0x27452389),
@@ -404,9 +506,11 @@ void ReorderFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
}
}
+const uint32_t kDefaultKbps = 1200;
+
ChokeFilter::ChokeFilter(PacketProcessorListener* listener, int flow_id)
: PacketProcessor(listener, flow_id, kRegular),
- kbps_(1200),
+ capacity_kbps_(kDefaultKbps),
last_send_time_us_(0),
delay_cap_helper_(new DelayCapHelper()) {
}
@@ -414,27 +518,34 @@ ChokeFilter::ChokeFilter(PacketProcessorListener* listener, int flow_id)
ChokeFilter::ChokeFilter(PacketProcessorListener* listener,
const FlowIds& flow_ids)
: PacketProcessor(listener, flow_ids, kRegular),
- kbps_(1200),
+ capacity_kbps_(kDefaultKbps),
last_send_time_us_(0),
delay_cap_helper_(new DelayCapHelper()) {
}
ChokeFilter::~ChokeFilter() {}
-void ChokeFilter::SetCapacity(uint32_t kbps) {
+void ChokeFilter::set_capacity_kbps(uint32_t kbps) {
BWE_TEST_LOGGING_ENABLE(false);
BWE_TEST_LOGGING_LOG1("BitrateChoke", "%d kbps", kbps);
- kbps_ = kbps;
+ capacity_kbps_ = kbps;
+}
+
+uint32_t ChokeFilter::capacity_kbps() {
+ return capacity_kbps_;
}
void ChokeFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
assert(in_out);
for (PacketsIt it = in_out->begin(); it != in_out->end(); ) {
int64_t earliest_send_time_us =
- last_send_time_us_ +
- ((*it)->payload_size() * 8 * 1000 + kbps_ / 2) / kbps_;
+ std::max(last_send_time_us_, (*it)->send_time_us());
+
int64_t new_send_time_us =
- std::max((*it)->send_time_us(), earliest_send_time_us);
+ earliest_send_time_us +
+ ((*it)->payload_size() * 8 * 1000 + capacity_kbps_ / 2) /
+ capacity_kbps_;
+
if (delay_cap_helper_->ShouldSendPacket(new_send_time_us,
(*it)->send_time_us())) {
(*it)->set_send_time_us(new_send_time_us);
@@ -447,8 +558,8 @@ void ChokeFilter::RunFor(int64_t /*time_ms*/, Packets* in_out) {
}
}
-void ChokeFilter::SetMaxDelay(int max_delay_ms) {
- delay_cap_helper_->SetMaxDelay(max_delay_ms);
+void ChokeFilter::set_max_delay_ms(int64_t max_delay_ms) {
+ delay_cap_helper_->set_max_delay_ms(max_delay_ms);
}
Stats<double> ChokeFilter::GetDelayStats() const {
@@ -565,8 +676,8 @@ void TraceBasedDeliveryFilter::RunFor(int64_t time_ms, Packets* in_out) {
kbps_stats_.Push(rate_counter_->bits_per_second() / 1000.0);
}
-void TraceBasedDeliveryFilter::SetMaxDelay(int max_delay_ms) {
- delay_cap_helper_->SetMaxDelay(max_delay_ms);
+void TraceBasedDeliveryFilter::set_max_delay_ms(int64_t max_delay_ms) {
+ delay_cap_helper_->set_max_delay_ms(max_delay_ms);
}
Stats<double> TraceBasedDeliveryFilter::GetDelayStats() const {
@@ -606,6 +717,7 @@ VideoSource::VideoSource(int flow_id,
frame_size_bytes_(bits_per_second_ / 8 / fps),
flow_id_(flow_id),
next_frame_ms_(first_frame_offset_ms),
+ next_frame_rand_ms_(0),
now_ms_(0),
prototype_header_() {
memset(&prototype_header_, 0, sizeof(prototype_header_));
@@ -617,6 +729,10 @@ uint32_t VideoSource::NextFrameSize() {
return frame_size_bytes_;
}
+int64_t VideoSource::GetTimeUntilNextFrameMs() const {
+ return next_frame_ms_ + next_frame_rand_ms_ - now_ms_;
+}
+
uint32_t VideoSource::NextPacketSize(uint32_t frame_size,
uint32_t remaining_payload) {
return std::min(kMaxPayloadSizeBytes, remaining_payload);
@@ -624,21 +740,33 @@ uint32_t VideoSource::NextPacketSize(uint32_t frame_size,
void VideoSource::RunFor(int64_t time_ms, Packets* in_out) {
assert(in_out);
- std::stringstream ss;
- ss << "SendEstimate_" << flow_id_ << "#1";
- BWE_TEST_LOGGING_PLOT(0, ss.str(), now_ms_, bits_per_second_ / 1000);
+
now_ms_ += time_ms;
Packets new_packets;
+
while (now_ms_ >= next_frame_ms_) {
- prototype_header_.timestamp = kTimestampBase +
- static_cast<uint32_t>(next_frame_ms_ * 90.0);
+ const int64_t kRandAmplitude = 2;
+ // A variance picked uniformly from {-1, 0, 1} ms is added to the frame
+ // timestamp.
+ next_frame_rand_ms_ =
+ kRandAmplitude * static_cast<float>(rand()) / RAND_MAX -
+ kRandAmplitude / 2;
+
+ // Ensure frame will not have a negative timestamp.
+ int64_t next_frame_ms =
+ std::max<int64_t>(next_frame_ms_ + next_frame_rand_ms_, 0);
+
+ prototype_header_.timestamp =
+ kTimestampBase + static_cast<uint32_t>(next_frame_ms * 90.0);
prototype_header_.extension.transmissionTimeOffset = 0;
// Generate new packets for this frame, all with the same timestamp,
// but the payload size is capped, so if the whole frame doesn't fit in
// one packet, we will see a number of equally sized packets followed by
// one smaller at the tail.
- int64_t send_time_us = next_frame_ms_ * 1000.0;
+
+ int64_t send_time_us = next_frame_ms * 1000.0;
+
uint32_t frame_size = NextFrameSize();
uint32_t payload_size = frame_size;
@@ -648,13 +776,14 @@ void VideoSource::RunFor(int64_t time_ms, Packets* in_out) {
MediaPacket* new_packet =
new MediaPacket(flow_id_, send_time_us, size, prototype_header_);
new_packets.push_back(new_packet);
- new_packet->SetAbsSendTimeMs(next_frame_ms_);
+ new_packet->SetAbsSendTimeMs(next_frame_ms);
new_packet->set_sender_timestamp_us(send_time_us);
payload_size -= size;
}
next_frame_ms_ += frame_period_ms_;
}
+
in_out->merge(new_packets, DereferencingComparator<Packet>);
}
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
index cfbac9c3628..77b03fe7b12 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/bitrate_controller/include/bitrate_controller.h"
#include "webrtc/modules/interface/module_common_types.h"
@@ -40,10 +41,39 @@ namespace testing {
namespace bwe {
class DelayCapHelper;
-class RateCounter;
+
+class RateCounter {
+ public:
+ RateCounter(int64_t window_size_ms)
+ : window_size_us_(1000 * window_size_ms),
+ recently_received_packets_(0),
+ recently_received_bytes_(0),
+ last_accumulated_us_(0),
+ window_() {}
+
+ RateCounter() : RateCounter(1000) {}
+
+ void UpdateRates(int64_t send_time_us, uint32_t payload_size);
+
+ int64_t window_size_ms() const { return (window_size_us_ + 500) / 1000; }
+ uint32_t packets_per_second() const;
+ uint32_t bits_per_second() const;
+
+ double BitrateWindowS() const;
+
+ private:
+ typedef std::pair<int64_t, uint32_t> TimeSizePair;
+
+ int64_t window_size_us_;
+ uint32_t recently_received_packets_;
+ uint32_t recently_received_bytes_;
+ int64_t last_accumulated_us_;
+ std::list<TimeSizePair> window_;
+};
typedef std::set<int> FlowIds;
const FlowIds CreateFlowIds(const int *flow_ids_array, size_t num_flow_ids);
+const FlowIds CreateFlowIdRange(int initial_value, int last_value);
template <typename T>
bool DereferencingComparator(const T* const& a, const T* const& b) {
@@ -143,6 +173,32 @@ template<typename T> class Stats {
T max_;
};
+class Random {
+ public:
+ explicit Random(uint32_t seed);
+
+ // Return pseudo random number in the interval [0.0, 1.0].
+ float Rand();
+
+ // Return pseudo rounded random number in interval [low, high].
+ int Rand(int low, int high);
+
+ // Normal Distribution.
+ int Gaussian(int mean, int standard_deviation);
+
+ // Exponential Distribution.
+ int Exponential(float lambda);
+
+ // TODO(solenberg): Random from histogram.
+ // template<typename T> int Distribution(const std::vector<T> histogram) {
+
+ private:
+ uint32_t a_;
+ uint32_t b_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Random);
+};
+
bool IsTimeSorted(const Packets& packets);
class PacketProcessor;
@@ -179,38 +235,50 @@ class PacketProcessor {
const FlowIds& flow_ids() const { return flow_ids_; }
+ uint32_t packets_per_second() const;
+ uint32_t bits_per_second() const;
+
+ protected:
+ RateCounter rate_counter_;
+
private:
PacketProcessorListener* listener_;
const FlowIds flow_ids_;
- DISALLOW_COPY_AND_ASSIGN(PacketProcessor);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PacketProcessor);
};
class RateCounterFilter : public PacketProcessor {
public:
RateCounterFilter(PacketProcessorListener* listener,
int flow_id,
- const char* name);
+ const char* name,
+ const std::string& plot_name);
+ RateCounterFilter(PacketProcessorListener* listener,
+ const FlowIds& flow_ids,
+ const char* name,
+ const std::string& plot_name);
RateCounterFilter(PacketProcessorListener* listener,
const FlowIds& flow_ids,
- const char* name);
+ const char* name,
+ int64_t start_plotting_time_ms,
+ const std::string& plot_name);
virtual ~RateCounterFilter();
- uint32_t packets_per_second() const;
- uint32_t bits_per_second() const;
-
void LogStats();
Stats<double> GetBitrateStats() const;
virtual void Plot(int64_t timestamp_ms);
virtual void RunFor(int64_t time_ms, Packets* in_out);
private:
- rtc::scoped_ptr<RateCounter> rate_counter_;
Stats<double> packets_per_second_stats_;
Stats<double> kbps_stats_;
std::string name_;
+ int64_t start_plotting_time_ms_;
+ // Algorithm name if single flow, Total link utilization if all flows.
+ std::string plot_name_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RateCounterFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RateCounterFilter);
};
class LossFilter : public PacketProcessor {
@@ -226,7 +294,7 @@ class LossFilter : public PacketProcessor {
Random random_;
float loss_fraction_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(LossFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LossFilter);
};
class DelayFilter : public PacketProcessor {
@@ -235,14 +303,14 @@ class DelayFilter : public PacketProcessor {
DelayFilter(PacketProcessorListener* listener, const FlowIds& flow_ids);
virtual ~DelayFilter() {}
- void SetDelayMs(int64_t delay_ms);
+ void SetOneWayDelayMs(int64_t one_way_delay_ms);
virtual void RunFor(int64_t time_ms, Packets* in_out);
private:
- int64_t delay_us_;
+ int64_t one_way_delay_us_;
int64_t last_send_time_us_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(DelayFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DelayFilter);
};
class JitterFilter : public PacketProcessor {
@@ -251,15 +319,18 @@ class JitterFilter : public PacketProcessor {
JitterFilter(PacketProcessorListener* listener, const FlowIds& flow_ids);
virtual ~JitterFilter() {}
- void SetJitter(int64_t stddev_jitter_ms);
+ void SetMaxJitter(int64_t stddev_jitter_ms);
virtual void RunFor(int64_t time_ms, Packets* in_out);
+ void set_reorderdering(bool reordering) { reordering_ = reordering; }
+ int64_t MeanUs();
private:
Random random_;
int64_t stddev_jitter_us_;
int64_t last_send_time_us_;
+ bool reordering_; // False by default.
- DISALLOW_IMPLICIT_CONSTRUCTORS(JitterFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(JitterFilter);
};
// Reorders two consecutive packets with a probability of reorder_percent.
@@ -276,7 +347,7 @@ class ReorderFilter : public PacketProcessor {
Random random_;
float reorder_fraction_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ReorderFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ReorderFilter);
};
// Apply a bitrate choke with an infinite queue on the packet stream.
@@ -286,18 +357,21 @@ class ChokeFilter : public PacketProcessor {
ChokeFilter(PacketProcessorListener* listener, const FlowIds& flow_ids);
virtual ~ChokeFilter();
- void SetCapacity(uint32_t kbps);
- void SetMaxDelay(int max_delay_ms);
+ void set_capacity_kbps(uint32_t kbps);
+ void set_max_delay_ms(int64_t max_queueing_delay_ms);
+
+ uint32_t capacity_kbps();
+
virtual void RunFor(int64_t time_ms, Packets* in_out);
Stats<double> GetDelayStats() const;
private:
- uint32_t kbps_;
+ uint32_t capacity_kbps_;
int64_t last_send_time_us_;
rtc::scoped_ptr<DelayCapHelper> delay_cap_helper_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ChokeFilter);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ChokeFilter);
};
class TraceBasedDeliveryFilter : public PacketProcessor {
@@ -317,7 +391,7 @@ class TraceBasedDeliveryFilter : public PacketProcessor {
virtual void Plot(int64_t timestamp_ms);
virtual void RunFor(int64_t time_ms, Packets* in_out);
- void SetMaxDelay(int max_delay_ms);
+ void set_max_delay_ms(int64_t max_delay_ms);
Stats<double> GetDelayStats() const;
Stats<double> GetBitrateStats() const;
@@ -335,7 +409,7 @@ class TraceBasedDeliveryFilter : public PacketProcessor {
Stats<double> packets_per_second_stats_;
Stats<double> kbps_stats_;
- DISALLOW_COPY_AND_ASSIGN(TraceBasedDeliveryFilter);
+ RTC_DISALLOW_COPY_AND_ASSIGN(TraceBasedDeliveryFilter);
};
class VideoSource {
@@ -353,7 +427,7 @@ class VideoSource {
virtual void SetBitrateBps(int bitrate_bps) {}
uint32_t bits_per_second() const { return bits_per_second_; }
uint32_t max_payload_size_bytes() const { return kMaxPayloadSizeBytes; }
- int64_t GetTimeUntilNextFrameMs() const { return next_frame_ms_ - now_ms_; }
+ int64_t GetTimeUntilNextFrameMs() const;
protected:
virtual uint32_t NextFrameSize();
@@ -369,10 +443,11 @@ class VideoSource {
private:
const int flow_id_;
int64_t next_frame_ms_;
+ int64_t next_frame_rand_ms_;
int64_t now_ms_;
RTPHeader prototype_header_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(VideoSource);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VideoSource);
};
class AdaptiveVideoSource : public VideoSource {
@@ -387,7 +462,7 @@ class AdaptiveVideoSource : public VideoSource {
void SetBitrateBps(int bitrate_bps) override;
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AdaptiveVideoSource);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AdaptiveVideoSource);
};
class PeriodicKeyFrameSource : public AdaptiveVideoSource {
@@ -410,7 +485,7 @@ class PeriodicKeyFrameSource : public AdaptiveVideoSource {
uint32_t frame_counter_;
int compensation_bytes_;
int compensation_per_frame_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(PeriodicKeyFrameSource);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(PeriodicKeyFrameSource);
};
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
index dd7bbfcfeba..6cd6ee7efe6 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework_unittest.cc
@@ -181,7 +181,8 @@ TEST(BweTestFramework_StatsTest, MinMax) {
class BweTestFramework_RateCounterFilterTest : public ::testing::Test {
public:
- BweTestFramework_RateCounterFilterTest() : filter_(NULL, 0, ""), now_ms_(0) {}
+ BweTestFramework_RateCounterFilterTest()
+ : filter_(NULL, 0, "", ""), now_ms_(0) {}
virtual ~BweTestFramework_RateCounterFilterTest() {}
protected:
@@ -207,7 +208,7 @@ class BweTestFramework_RateCounterFilterTest : public ::testing::Test {
RateCounterFilter filter_;
int64_t now_ms_;
- DISALLOW_COPY_AND_ASSIGN(BweTestFramework_RateCounterFilterTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweTestFramework_RateCounterFilterTest);
};
TEST_F(BweTestFramework_RateCounterFilterTest, Short) {
@@ -332,7 +333,7 @@ class BweTestFramework_DelayFilterTest : public ::testing::Test {
}
void TestDelayFilter(int64_t delay_ms) {
- filter_.SetDelayMs(delay_ms);
+ filter_.SetOneWayDelayMs(delay_ms);
TestDelayFilter(1, 0, 0); // No input should yield no output
// Single packet
@@ -340,7 +341,7 @@ class BweTestFramework_DelayFilterTest : public ::testing::Test {
TestDelayFilter(delay_ms, 0, 0);
for (int i = 0; i < delay_ms; ++i) {
- filter_.SetDelayMs(i);
+ filter_.SetOneWayDelayMs(i);
TestDelayFilter(1, 10, 10);
}
TestDelayFilter(0, 0, 0);
@@ -350,11 +351,11 @@ class BweTestFramework_DelayFilterTest : public ::testing::Test {
TestDelayFilter(delay_ms, 0, 0);
for (int i = 1; i < delay_ms + 1; ++i) {
- filter_.SetDelayMs(i);
+ filter_.SetOneWayDelayMs(i);
TestDelayFilter(1, 5, 5);
}
TestDelayFilter(0, 0, 0);
- filter_.SetDelayMs(2 * delay_ms);
+ filter_.SetOneWayDelayMs(2 * delay_ms);
TestDelayFilter(1, 0, 0);
TestDelayFilter(delay_ms, 13, 13);
TestDelayFilter(delay_ms, 0, 0);
@@ -363,11 +364,11 @@ class BweTestFramework_DelayFilterTest : public ::testing::Test {
TestDelayFilter(delay_ms, 0, 0);
for (int i = 0; i < 2 * delay_ms; ++i) {
- filter_.SetDelayMs(2 * delay_ms - i - 1);
+ filter_.SetOneWayDelayMs(2 * delay_ms - i - 1);
TestDelayFilter(1, 5, 5);
}
TestDelayFilter(0, 0, 0);
- filter_.SetDelayMs(0);
+ filter_.SetOneWayDelayMs(0);
TestDelayFilter(0, 7, 7);
ASSERT_TRUE(IsTimeSorted(accumulated_packets_));
@@ -379,16 +380,16 @@ class BweTestFramework_DelayFilterTest : public ::testing::Test {
private:
int64_t now_ms_;
- uint32_t sequence_number_;
+ uint16_t sequence_number_;
- DISALLOW_COPY_AND_ASSIGN(BweTestFramework_DelayFilterTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweTestFramework_DelayFilterTest);
};
TEST_F(BweTestFramework_DelayFilterTest, Delay0) {
TestDelayFilter(1, 0, 0); // No input should yield no output
TestDelayFilter(1, 10, 10); // Expect no delay (delay time is zero)
TestDelayFilter(1, 0, 0); // Check no packets are still in buffer
- filter_.SetDelayMs(0);
+ filter_.SetOneWayDelayMs(0);
TestDelayFilter(1, 5, 5); // Expect no delay (delay time is zero)
TestDelayFilter(1, 0, 0); // Check no packets are still in buffer
}
@@ -415,7 +416,7 @@ TEST_F(BweTestFramework_DelayFilterTest, JumpToZeroDelay) {
Packets packets;
// Delay a bunch of packets, accumulate them to the 'acc' list.
- delay.SetDelayMs(100.0f);
+ delay.SetOneWayDelayMs(100.0f);
for (uint32_t i = 0; i < 10; ++i) {
packets.push_back(new MediaPacket(i * 100, i));
}
@@ -426,7 +427,7 @@ TEST_F(BweTestFramework_DelayFilterTest, JumpToZeroDelay) {
// Drop delay to zero, send a few more packets through the delay, append them
// to the 'acc' list and verify that it is all sorted.
- delay.SetDelayMs(0.0f);
+ delay.SetOneWayDelayMs(0.0f);
for (uint32_t i = 10; i < 50; ++i) {
packets.push_back(new MediaPacket(i * 100, i));
}
@@ -445,24 +446,24 @@ TEST_F(BweTestFramework_DelayFilterTest, IncreasingDelay) {
TestDelayFilter(i);
}
// Reach a steady state.
- filter_.SetDelayMs(100);
+ filter_.SetOneWayDelayMs(100);
TestDelayFilter(1, 20, 20);
TestDelayFilter(2, 0, 0);
TestDelayFilter(99, 20, 20);
// Drop delay back down to zero.
- filter_.SetDelayMs(0);
+ filter_.SetOneWayDelayMs(0);
TestDelayFilter(1, 100, 100);
TestDelayFilter(23010, 0, 0);
ASSERT_TRUE(IsTimeSorted(accumulated_packets_));
ASSERT_TRUE(IsSequenceNumberSorted(accumulated_packets_));
}
-static void TestJitterFilter(int64_t stddev_jitter_ms) {
+static void TestJitterFilter(int64_t max_jitter_ms) {
JitterFilter filter(NULL, 0);
- filter.SetJitter(stddev_jitter_ms);
+ filter.SetMaxJitter(max_jitter_ms);
int64_t now_ms = 0;
- uint32_t sequence_number = 0;
+ uint16_t sequence_number = 0;
// Generate packets, add jitter to them, accumulate the altered packets.
Packets original;
@@ -473,9 +474,9 @@ static void TestJitterFilter(int64_t stddev_jitter_ms) {
packets.push_back(new MediaPacket(now_ms * 1000, sequence_number));
original.push_back(new MediaPacket(now_ms * 1000, sequence_number));
++sequence_number;
- now_ms += 5 * stddev_jitter_ms;
+ now_ms += 5 * max_jitter_ms;
}
- filter.RunFor(stddev_jitter_ms, &packets);
+ filter.RunFor(max_jitter_ms, &packets);
jittered.splice(jittered.end(), packets);
}
@@ -490,17 +491,22 @@ static void TestJitterFilter(int64_t stddev_jitter_ms) {
// difference (jitter) in stats, then check that mean jitter is close to zero
// and standard deviation of jitter is what we set it to.
Stats<double> jitter_us;
+ int64_t max_jitter_obtained_us = 0;
for (PacketsIt it1 = original.begin(), it2 = jittered.begin();
it1 != original.end() && it2 != jittered.end(); ++it1, ++it2) {
const MediaPacket* packet1 = static_cast<const MediaPacket*>(*it1);
const MediaPacket* packet2 = static_cast<const MediaPacket*>(*it2);
EXPECT_EQ(packet1->header().sequenceNumber,
packet2->header().sequenceNumber);
- jitter_us.Push(packet1->send_time_us() - packet2->send_time_us());
+ max_jitter_obtained_us =
+ std::max(max_jitter_obtained_us,
+ packet2->send_time_us() - packet1->send_time_us());
+ jitter_us.Push(packet2->send_time_us() - packet1->send_time_us());
}
- EXPECT_NEAR(0.0, jitter_us.GetMean(), stddev_jitter_ms * 1000.0 * 0.008);
- EXPECT_NEAR(stddev_jitter_ms * 1000.0, jitter_us.GetStdDev(),
- stddev_jitter_ms * 1000.0 * 0.02);
+ EXPECT_NEAR(filter.MeanUs(), jitter_us.GetMean(),
+ max_jitter_ms * 1000.0 * 0.01);
+ EXPECT_NEAR(max_jitter_ms * 1000.0, max_jitter_obtained_us,
+ max_jitter_ms * 1000.0 * 0.01);
for (auto* packet : original)
delete packet;
for (auto* packet : jittered)
@@ -527,14 +533,14 @@ TEST(BweTestFramework_JitterFilterTest, Jitter1031) {
TestJitterFilter(1031);
}
-static void TestReorderFilter(uint32_t reorder_percent, uint32_t near_value) {
- const uint32_t kPacketCount = 10000;
+static void TestReorderFilter(uint16_t reorder_percent, uint16_t near_value) {
+ const uint16_t kPacketCount = 10000;
// Generate packets with 10 ms interval.
Packets packets;
int64_t now_ms = 0;
- uint32_t sequence_number = 1;
- for (uint32_t i = 0; i < kPacketCount; ++i, now_ms += 10) {
+ uint16_t sequence_number = 1;
+ for (uint16_t i = 0; i < kPacketCount; ++i, now_ms += 10) {
packets.push_back(new MediaPacket(now_ms * 1000, sequence_number++));
}
ASSERT_TRUE(IsTimeSorted(packets));
@@ -548,11 +554,11 @@ static void TestReorderFilter(uint32_t reorder_percent, uint32_t near_value) {
// We measure the amount of reordering by summing the distance by which out-
// of-order packets have been moved in the stream.
- uint32_t distance = 0;
- uint32_t last_sequence_number = 0;
+ uint16_t distance = 0;
+ uint16_t last_sequence_number = 0;
for (auto* packet : packets) {
const MediaPacket* media_packet = static_cast<const MediaPacket*>(packet);
- uint32_t sequence_number = media_packet->header().sequenceNumber;
+ uint16_t sequence_number = media_packet->header().sequenceNumber;
if (sequence_number < last_sequence_number) {
distance += last_sequence_number - sequence_number;
}
@@ -643,7 +649,7 @@ class BweTestFramework_ChokeFilterTest : public ::testing::Test {
delete output_packets_.front();
output_packets_.pop_front();
}
- EXPECT_EQ(expected_kbit_transmitted, (bytes_transmitted * 8) / 1000);
+ EXPECT_EQ(expected_kbit_transmitted, (bytes_transmitted * 8 + 500) / 1000);
}
void CheckMaxDelay(int64_t max_delay_ms) {
@@ -657,26 +663,56 @@ class BweTestFramework_ChokeFilterTest : public ::testing::Test {
private:
int64_t now_ms_;
- uint32_t sequence_number_;
+ uint16_t sequence_number_;
Packets output_packets_;
std::vector<int64_t> send_times_us_;
- DISALLOW_COPY_AND_ASSIGN(BweTestFramework_ChokeFilterTest);
+ RTC_DISALLOW_COPY_AND_ASSIGN(BweTestFramework_ChokeFilterTest);
};
+TEST_F(BweTestFramework_ChokeFilterTest, NoQueue) {
+ const int kCapacityKbps = 10;
+ const size_t kPacketSizeBytes = 125;
+ const int64_t kExpectedSendTimeUs =
+ (kPacketSizeBytes * 8 * 1000 + kCapacityKbps / 2) / kCapacityKbps;
+ uint16_t sequence_number = 0;
+ int64_t send_time_us = 0;
+ ChokeFilter filter(NULL, 0);
+ filter.set_capacity_kbps(10);
+ Packets packets;
+ RTPHeader header;
+ for (int i = 0; i < 2; ++i) {
+ header.sequenceNumber = sequence_number++;
+ // Payload is 1000 bits.
+ packets.push_back(
+ new MediaPacket(0, send_time_us, kPacketSizeBytes, header));
+ // Packets are sent far enough a part plus an extra millisecond so that they
+ // will never be in the choke queue at the same time.
+ send_time_us += kExpectedSendTimeUs + 1000;
+ }
+ ASSERT_TRUE(IsTimeSorted(packets));
+ filter.RunFor(2 * kExpectedSendTimeUs + 1000, &packets);
+ EXPECT_EQ(kExpectedSendTimeUs, packets.front()->send_time_us());
+ delete packets.front();
+ packets.pop_front();
+ EXPECT_EQ(2 * kExpectedSendTimeUs + 1000, packets.front()->send_time_us());
+ delete packets.front();
+ packets.pop_front();
+}
+
TEST_F(BweTestFramework_ChokeFilterTest, Short) {
// 100ms, 100 packets, 10 kbps choke -> 1 kbit of data should have propagated.
// That is actually just a single packet, since each packet has 1000 bits of
// payload.
ChokeFilter filter(NULL, 0);
- filter.SetCapacity(10);
+ filter.set_capacity_kbps(10);
TestChoke(&filter, 100, 100, 1);
}
TEST_F(BweTestFramework_ChokeFilterTest, Medium) {
// 100ms, 10 packets, 10 kbps choke -> 1 packet through, or 1 kbit.
ChokeFilter filter(NULL, 0);
- filter.SetCapacity(10);
+ filter.set_capacity_kbps(10);
TestChoke(&filter, 100, 10, 1);
// 200ms, no new packets -> another packet through.
TestChoke(&filter, 100, 0, 1);
@@ -689,7 +725,7 @@ TEST_F(BweTestFramework_ChokeFilterTest, Medium) {
TEST_F(BweTestFramework_ChokeFilterTest, Long) {
// 100ms, 100 packets in queue, 10 kbps choke -> 1 packet through, or 1 kbit.
ChokeFilter filter(NULL, 0);
- filter.SetCapacity(10);
+ filter.set_capacity_kbps(10);
TestChoke(&filter, 100, 100, 1);
// 200ms, no input, another packet through.
TestChoke(&filter, 100, 0, 1);
@@ -697,22 +733,22 @@ TEST_F(BweTestFramework_ChokeFilterTest, Long) {
TestChoke(&filter, 800, 0, 8);
// 10000ms, no input, raise choke to 100 kbps. Remaining 90 packets in queue
// should be propagated, for a total of 90 kbps.
- filter.SetCapacity(100);
+ filter.set_capacity_kbps(100);
TestChoke(&filter, 9000, 0, 90);
// 10100ms, 20 more packets -> 10 packets or 10 kbit through.
TestChoke(&filter, 100, 20, 10);
// 10300ms, 10 more packets -> 20 packets out.
TestChoke(&filter, 200, 10, 20);
// 11300ms, no input, queue should be empty.
- filter.SetCapacity(10);
+ filter.set_capacity_kbps(10);
TestChoke(&filter, 1000, 0, 0);
}
TEST_F(BweTestFramework_ChokeFilterTest, MaxDelay) {
// 10 kbps choke, 500 ms delay cap
ChokeFilter filter(NULL, 0);
- filter.SetCapacity(10);
- filter.SetMaxDelay(500);
+ filter.set_capacity_kbps(10);
+ filter.set_max_delay_ms(500);
// 100ms, 100 packets in queue, 10 kbps choke -> 1 packet through, or 1 kbit.
TestChoke(&filter, 100, 100, 1);
CheckMaxDelay(500);
@@ -722,18 +758,18 @@ TEST_F(BweTestFramework_ChokeFilterTest, MaxDelay) {
TestChoke(&filter, 9500, 0, 0);
// 100 ms delay cap
- filter.SetMaxDelay(100);
- // 10100ms, 50 more packets -> 2 packets or 2 kbit through.
- TestChoke(&filter, 100, 50, 2);
+ filter.set_max_delay_ms(100);
+ // 10100ms, 50 more packets -> 1 packets or 1 kbit through.
+ TestChoke(&filter, 100, 50, 1);
CheckMaxDelay(100);
// 20000ms, no input, remaining packets in queue should have been dropped.
TestChoke(&filter, 9900, 0, 0);
// Reset delay cap (0 is no cap) and verify no packets are dropped.
- filter.SetCapacity(10);
- filter.SetMaxDelay(0);
- TestChoke(&filter, 100, 100, 2);
- TestChoke(&filter, 9900, 0, 98);
+ filter.set_capacity_kbps(10);
+ filter.set_max_delay_ms(0);
+ TestChoke(&filter, 100, 100, 1);
+ TestChoke(&filter, 9900, 0, 99);
}
TEST_F(BweTestFramework_ChokeFilterTest, ShortTrace) {
@@ -754,7 +790,7 @@ TEST_F(BweTestFramework_ChokeFilterTest, ShortTraceTwoWraps) {
TEST_F(BweTestFramework_ChokeFilterTest, ShortTraceMaxDelay) {
TraceBasedDeliveryFilter filter(NULL, 0);
- filter.SetMaxDelay(25);
+ filter.set_max_delay_ms(25);
ASSERT_TRUE(filter.Init(test::ResourcePath("synthetic-trace", "rx")));
// Uses all slots up to 110 ms. Several packets are being dropped.
TestChoke(&filter, 110, 20, 9);
@@ -775,12 +811,14 @@ void TestVideoSender(VideoSender* sender,
ASSERT_TRUE(IsTimeSorted(packets));
ASSERT_TRUE(IsSequenceNumberSorted(packets));
EXPECT_EQ(expected_packets, packets.size());
+
int64_t send_time_us = -1;
size_t total_payload_size = 0;
uint32_t absolute_send_time = 0;
uint32_t absolute_send_time_wraps = 0;
uint32_t rtp_timestamp = 0;
uint32_t rtp_timestamp_wraps = 0;
+
for (const auto* packet : packets) {
const MediaPacket* media_packet = static_cast<const MediaPacket*>(packet);
EXPECT_LE(send_time_us, media_packet->send_time_us());
@@ -800,6 +838,7 @@ void TestVideoSender(VideoSender* sender,
}
rtp_timestamp = media_packet->header().timestamp;
}
+
EXPECT_EQ(expected_total_payload_size, total_payload_size);
EXPECT_GE(1u, absolute_send_time_wraps);
EXPECT_GE(1u, rtp_timestamp_wraps);
@@ -808,6 +847,8 @@ void TestVideoSender(VideoSender* sender,
delete packet;
}
+// Random {-1, 0, +1} ms was added to frame timestamps.
+
TEST(BweTestFramework_VideoSenderTest, Fps1Kbps80_1s) {
// 1 fps, 80 kbps
VideoSource source(0, 1.0f, 80, 0x1234, 0);
@@ -816,14 +857,16 @@ TEST(BweTestFramework_VideoSenderTest, Fps1Kbps80_1s) {
// We're at 1 fps, so all packets should be generated on first call, giving 10
// packets of each 1000 bytes, total 10000 bytes.
TestVideoSender(&sender, 1, 9, 400, 10000);
- // 999ms, should see no output here.
- TestVideoSender(&sender, 998, 0, 0, 0);
- // 1999ms, should get data for one more frame.
- TestVideoSender(&sender, 1000, 9, 400, 10000);
- // 2000ms, one more frame.
- TestVideoSender(&sender, 1, 9, 400, 10000);
- // 2999ms, should see nothing.
- TestVideoSender(&sender, 999, 0, 0, 0);
+ // 998ms, should see no output here.
+ TestVideoSender(&sender, 997, 0, 0, 0);
+ // 1001ms, should get data for one more frame.
+ TestVideoSender(&sender, 3, 9, 400, 10000);
+ // 1998ms, should see no output here.
+ TestVideoSender(&sender, 997, 0, 0, 0);
+ // 2001ms, one more frame.
+ TestVideoSender(&sender, 3, 9, 400, 10000);
+ // 2998ms, should see nothing.
+ TestVideoSender(&sender, 997, 0, 0, 0);
}
TEST(BweTestFramework_VideoSenderTest, Fps1Kbps80_1s_Offset) {
@@ -831,20 +874,20 @@ TEST(BweTestFramework_VideoSenderTest, Fps1Kbps80_1s_Offset) {
VideoSource source(0, 1.0f, 80, 0x1234, 500);
VideoSender sender(NULL, &source, kNullEstimator);
EXPECT_EQ(80000u, source.bits_per_second());
- // 499ms, no output.
- TestVideoSender(&sender, 499, 0, 0, 0);
- // 500ms, first frame (this is the offset we set), 10 packets of 1000 bytes.
- TestVideoSender(&sender, 1, 9, 400, 10000);
- // 1499ms, nothing.
- TestVideoSender(&sender, 999, 0, 0, 0);
- // 1999ms, second frame.
- TestVideoSender(&sender, 500, 9, 400, 10000);
- // 2499ms, nothing.
- TestVideoSender(&sender, 500, 0, 0, 0);
- // 2500ms, third frame.
- TestVideoSender(&sender, 1, 9, 400, 10000);
- // 3499ms, nothing.
- TestVideoSender(&sender, 999, 0, 0, 0);
+ // 498ms, no output.
+ TestVideoSender(&sender, 498, 0, 0, 0);
+ // 501ms, first frame (this is the offset we set), 10 packets of 1000 bytes.
+ TestVideoSender(&sender, 3, 9, 400, 10000);
+ // 1498ms, nothing.
+ TestVideoSender(&sender, 997, 0, 0, 0);
+ // 1501ms, second frame.
+ TestVideoSender(&sender, 3, 9, 400, 10000);
+ // 2498ms, nothing.
+ TestVideoSender(&sender, 997, 0, 0, 0);
+ // 2501ms, third frame.
+ TestVideoSender(&sender, 3, 9, 400, 10000);
+ // 3498ms, nothing.
+ TestVideoSender(&sender, 997, 0, 0, 0);
}
TEST(BweTestFramework_VideoSenderTest, Fps50Kpbs80_11s) {
@@ -852,55 +895,55 @@ TEST(BweTestFramework_VideoSenderTest, Fps50Kpbs80_11s) {
VideoSource source(0, 50.0f, 80, 0x1234, 0);
VideoSender sender(NULL, &source, kNullEstimator);
EXPECT_EQ(80000u, source.bits_per_second());
- // 9998ms, should see 500 frames, 200 byte payloads, total 100000 bytes.
- TestVideoSender(&sender, 9998, 500, 200, 100000);
- // 9999ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
- // 10000ms, 501st frame as a single packet.
- TestVideoSender(&sender, 1, 1, 200, 200);
- // 10998ms, 49 more frames.
- TestVideoSender(&sender, 998, 49, 200, 9800);
- // 10999ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
-}
-
-TEST(BweTestFramework_VideoSenderTest, Fps10Kpbs120_1s) {
+ // 9981, should see 500 frames, 200 byte payloads, total 100000 bytes.
+ TestVideoSender(&sender, 9981, 500, 200, 100000);
+ // 9998ms, nothing.
+ TestVideoSender(&sender, 17, 0, 0, 0);
+ // 10001ms, 501st frame as a single packet.
+ TestVideoSender(&sender, 3, 1, 200, 200);
+ // 10981ms, 49 more frames.
+ TestVideoSender(&sender, 981, 49, 200, 9800);
+ // 10998ms, nothing.
+ TestVideoSender(&sender, 17, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps20Kpbs120_1s) {
// 20 fps, 120 kbps.
VideoSource source(0, 20.0f, 120, 0x1234, 0);
VideoSender sender(NULL, &source, kNullEstimator);
EXPECT_EQ(120000u, source.bits_per_second());
- // 498ms, 10 frames with 750 byte payloads, total 7500 bytes.
- TestVideoSender(&sender, 498, 10, 750, 7500);
- // 499ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
- // 500ms, one more frame.
- TestVideoSender(&sender, 1, 1, 750, 750);
- // 998ms, 9 more frames.
- TestVideoSender(&sender, 498, 9, 750, 6750);
- // 999ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
-}
-
-TEST(BweTestFramework_VideoSenderTest, Fps30Kbps800_20s) {
- // 20 fps, 820 kbps.
+ // 451ms, 10 frames with 750 byte payloads, total 7500 bytes.
+ TestVideoSender(&sender, 451, 10, 750, 7500);
+ // 498ms, nothing.
+ TestVideoSender(&sender, 47, 0, 0, 0);
+ // 501ms, one more frame.
+ TestVideoSender(&sender, 3, 1, 750, 750);
+ // 951ms, 9 more frames.
+ TestVideoSender(&sender, 450, 9, 750, 6750);
+ // 998ms, nothing.
+ TestVideoSender(&sender, 47, 0, 0, 0);
+}
+
+TEST(BweTestFramework_VideoSenderTest, Fps25Kbps820_20s) {
+ // 25 fps, 820 kbps.
VideoSource source(0, 25.0f, 820, 0x1234, 0);
VideoSender sender(NULL, &source, kNullEstimator);
EXPECT_EQ(820000u, source.bits_per_second());
- // 9998ms, 250 frames. 820 kbps = 102500 bytes/s, so total should be 1025000.
+ // 9961ms, 250 frames. 820 kbps = 102500 bytes/s, so total should be 1025000.
// Each frame is 102500/25=4100 bytes, or 5 packets (4 @1000 bytes, 1 @100),
// so packet count should be 5*250=1250 and last packet of each frame has
// 100 bytes of payload.
- TestVideoSender(&sender, 9998, 1000, 500, 1025000);
- // 9999ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
- // 19998ms, 250 more frames.
- TestVideoSender(&sender, 9999, 1000, 500, 1025000);
- // 19999ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
- // 20038ms, one more frame, as described above (25fps == 40ms/frame).
- TestVideoSender(&sender, 39, 4, 500, 4100);
- // 20039ms, nothing.
- TestVideoSender(&sender, 1, 0, 0, 0);
+ TestVideoSender(&sender, 9961, 1000, 500, 1025000);
+ // 9998ms, nothing.
+ TestVideoSender(&sender, 37, 0, 0, 0);
+ // 19961ms, 250 more frames.
+ TestVideoSender(&sender, 9963, 1000, 500, 1025000);
+ // 19998ms, nothing.
+ TestVideoSender(&sender, 37, 0, 0, 0);
+ // 20001ms, one more frame, as described above (25fps == 40ms/frame).
+ TestVideoSender(&sender, 3, 4, 500, 4100);
+ // 20038ms, nothing.
+ TestVideoSender(&sender, 37, 0, 0, 0);
}
TEST(BweTestFramework_VideoSenderTest, TestAppendInOrder) {
@@ -943,7 +986,7 @@ TEST(BweTestFramework_VideoSenderTest, FeedbackIneffective) {
VideoSender sender(NULL, &source, kNullEstimator);
EXPECT_EQ(820000u, source.bits_per_second());
- TestVideoSender(&sender, 9998, 1000, 500, 1025000);
+ TestVideoSender(&sender, 9961, 1000, 500, 1025000);
// Make sure feedback has no effect on a regular video sender.
RembFeedback* feedback = new RembFeedback(0, 0, 0, 512000, RTCPReportBlock());
@@ -951,14 +994,14 @@ TEST(BweTestFramework_VideoSenderTest, FeedbackIneffective) {
packets.push_back(feedback);
sender.RunFor(0, &packets);
EXPECT_EQ(820000u, source.bits_per_second());
- TestVideoSender(&sender, 9998, 1000, 500, 1025000);
+ TestVideoSender(&sender, 10000, 1000, 500, 1025000);
}
TEST(BweTestFramework_AdaptiveVideoSenderTest, FeedbackChangesBitrate) {
AdaptiveVideoSource source(0, 25.0f, 820, 0x1234, 0);
VideoSender sender(NULL, &source, kRembEstimator);
EXPECT_EQ(820000u, source.bits_per_second());
- TestVideoSender(&sender, 9998, 1000, 500, 1025000);
+ TestVideoSender(&sender, 9961, 1000, 500, 1025000);
// Make sure we can reduce the bitrate.
RembFeedback* feedback = new RembFeedback(0, 0, 0, 512000, RTCPReportBlock());
@@ -966,7 +1009,7 @@ TEST(BweTestFramework_AdaptiveVideoSenderTest, FeedbackChangesBitrate) {
packets.push_back(feedback);
sender.RunFor(0, &packets);
EXPECT_EQ(512000u, source.bits_per_second());
- TestVideoSender(&sender, 9998, 750, 160, 640000);
+ TestVideoSender(&sender, 10000, 750, 160, 640000);
// Increase the bitrate to the initial bitrate and verify that the output is
// the same.
@@ -991,7 +1034,7 @@ TEST(BweTestFramework_AdaptiveVideoSenderTest, Paced_FeedbackChangesBitrate) {
packets.push_back(feedback);
sender.RunFor(10000, &packets);
ASSERT_EQ(512000u, source.bits_per_second());
- TestVideoSender(&sender, 9998, 750, 160, 640000);
+ TestVideoSender(&sender, 10000, 750, 160, 640000);
// Increase the bitrate to the initial bitrate and verify that the output is
// the same.
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
index de13023b264..94233131815 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.cc
@@ -91,16 +91,106 @@ void Logging::Log(const char format[], ...) {
}
void Logging::Plot(int figure, double value) {
+ Plot(figure, value, "-");
+}
+
+void Logging::Plot(int figure, double value, const std::string& alg_name) {
CriticalSectionScoped cs(crit_sect_.get());
ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
assert(it != thread_map_.end());
const State& state = it->second.stack.top();
+ std::string label = state.tag + '@' + alg_name;
+ std::string prefix("Available");
+ if (alg_name.compare(0, prefix.length(), prefix) == 0) {
+ std::string receiver("Receiver");
+ size_t start_pos = label.find(receiver);
+ if (start_pos != std::string::npos) {
+ label.replace(start_pos, receiver.length(), "Sender");
+ }
+ }
if (state.enabled) {
- printf("PLOT\t%d\t%s\t%f\t%f\n", figure, state.tag.c_str(),
+ printf("PLOT\t%d\t%s\t%f\t%f\n", figure, label.c_str(),
state.timestamp_ms * 0.001, value);
}
}
+void Logging::PlotBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ assert(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("BAR\t%d\t%s_%d\t%f\n", figure, name.c_str(), flow_id, value);
+ }
+}
+
+void Logging::PlotBaselineBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ assert(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("BASELINE\t%d\t%s_%d\t%f\n", figure, name.c_str(), flow_id, value);
+ }
+}
+
+void Logging::PlotErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ int flow_id) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ assert(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("ERRORBAR\t%d\t%s_%d\t%f\t%f\t%f\t%s\n", figure, name.c_str(),
+ flow_id, value, ylow, yhigh, error_title.c_str());
+ }
+}
+
+void Logging::PlotLimitErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ double ymax,
+ const std::string& limit_title,
+ int flow_id) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ assert(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("LIMITERRORBAR\t%d\t%s_%d\t%f\t%f\t%f\t%s\t%f\t%s\n", figure,
+ name.c_str(), flow_id, value, ylow, yhigh, error_title.c_str(), ymax,
+ limit_title.c_str());
+ }
+}
+
+void Logging::PlotLabel(int figure,
+ const std::string& title,
+ const std::string& y_label,
+ int num_flows) {
+ CriticalSectionScoped cs(crit_sect_.get());
+ ThreadMap::iterator it = thread_map_.find(ThreadWrapper::GetThreadId());
+ assert(it != thread_map_.end());
+ const State& state = it->second.stack.top();
+ if (state.enabled) {
+ printf("LABEL\t%d\t%s\t%s\t%d\n", figure, title.c_str(), y_label.c_str(),
+ num_flows);
+ }
+}
+
Logging::Logging()
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
thread_map_() {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
index c214d70c27e..4115d30c2a6 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h
@@ -91,7 +91,31 @@
// |name| is a char*, std::string or uint32_t to name the plotted value.
// |time| is an int64_t time in ms, or -1 to inherit time from previous context.
// |value| is a double precision float to be plotted.
+// |alg_name| is an optional argument, a string
#define BWE_TEST_LOGGING_PLOT(figure, name, time, value)
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME(figure, name, time, value, alg_name)
+
+// Print to stdout in tab-separated format suitable for plotting, e.g.:
+// BAR figure Context1_Context2_Name x_left width value
+// |figure| is a figure id. Different figures are plotted in different windows.
+// |name| is a char*, std::string or uint32_t to name the plotted value.
+// |value| is a double precision float to be plotted.
+// |ylow| and |yhigh| are double precision float for the error line.
+// |title| is a string and refers to the error label.
+// |ymax| is a double precision float for the limit horizontal line.
+// |limit_title| is a string and refers to the limit label.
+#define BWE_TEST_LOGGING_BAR(figure, name, value, flow_id)
+#define BWE_TEST_LOGGING_ERRORBAR(figure, name, value, ylow, yhigh, \
+ error_title, flow_id)
+#define BWE_TEST_LOGGING_LIMITERRORBAR( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, flow_id)
+
+#define BWE_TEST_LOGGING_BASELINEBAR(figure, name, value, flow_id)
+
+// |num_flows| is an integer refering to the number of RMCAT flows in the
+// scenario.
+// Define |x_label| and |y_label| for plots.
+#define BWE_TEST_LOGGING_LABEL(figure, x_label, y_label, num_flows)
#else // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE
@@ -157,11 +181,57 @@
#define BWE_TEST_LOGGING_PLOT(figure, name, time, value) \
do { \
- __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __LINE__, name, \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
static_cast<int64_t>(time), true); \
webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, value); \
} while (0);
+#define BWE_TEST_LOGGING_PLOT_WITH_NAME(figure, name, time, value, alg_name) \
+ do { \
+ __BWE_TEST_LOGGING_CONTEXT_DECLARE(__bwe_log_, __PLOT__, name, \
+ static_cast<int64_t>(time), true); \
+ webrtc::testing::bwe::Logging::GetInstance()->Plot(figure, value, \
+ alg_name); \
+ } while (0);
+
+#define BWE_TEST_LOGGING_BAR(figure, name, value, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotBar(figure, name, value, \
+ flow_id); \
+ } while (0);
+
+#define BWE_TEST_LOGGING_BASELINEBAR(figure, name, value, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotBaselineBar( \
+ figure, name, value, flow_id); \
+ } while (0);
+
+#define BWE_TEST_LOGGING_ERRORBAR(figure, name, value, ylow, yhigh, title, \
+ flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotErrorBar( \
+ figure, name, value, ylow, yhigh, title, flow_id); \
+ } while (0);
+
+#define BWE_TEST_LOGGING_LIMITERRORBAR( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, flow_id) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(name); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotLimitErrorBar( \
+ figure, name, value, ylow, yhigh, error_title, ymax, limit_title, \
+ flow_id); \
+ } while (0);
+
+#define BWE_TEST_LOGGING_LABEL(figure, title, y_label, num_flows) \
+ do { \
+ BWE_TEST_LOGGING_CONTEXT(title); \
+ webrtc::testing::bwe::Logging::GetInstance()->PlotLabel( \
+ figure, title, y_label, num_flows); \
+ } while (0);
+
namespace webrtc {
class CriticalSectionWrapper;
@@ -178,7 +248,7 @@ class Logging {
Context(const char* name, int64_t timestamp_ms, bool enabled);
~Context();
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Context);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Context);
};
static Logging* GetInstance();
@@ -190,6 +260,33 @@ class Logging {
void Log(const char format[], ...);
void Plot(int figure, double value);
+ void Plot(int figure, double value, const std::string& alg_name);
+ void PlotBar(int figure, const std::string& name, double value, int flow_id);
+ void PlotBaselineBar(int figure,
+ const std::string& name,
+ double value,
+ int flow_id);
+ void PlotErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ int flow_id);
+
+ void PlotLimitErrorBar(int figure,
+ const std::string& name,
+ double value,
+ double ylow,
+ double yhigh,
+ const std::string& error_title,
+ double ymax,
+ const std::string& limit_title,
+ int flow_id);
+ void PlotLabel(int figure,
+ const std::string& title,
+ const std::string& y_label,
+ int num_flows);
private:
struct State {
@@ -216,7 +313,7 @@ class Logging {
rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
ThreadMap thread_map_;
- DISALLOW_COPY_AND_ASSIGN(Logging);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Logging);
};
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc
new file mode 100644
index 00000000000..6b3ce4847cf
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_unittest.cc
@@ -0,0 +1,393 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+const int kSetCapacity = 1000;
+
+class LinkedSetTest : public ::testing::Test {
+ public:
+ LinkedSetTest() : linked_set_(kSetCapacity) {}
+
+ ~LinkedSetTest() {}
+
+ protected:
+ LinkedSet linked_set_;
+};
+
+TEST_F(LinkedSetTest, EmptySet) {
+ EXPECT_EQ(linked_set_.OldestSeqNumber(), 0);
+ EXPECT_EQ(linked_set_.NewestSeqNumber(), 0);
+}
+
+TEST_F(LinkedSetTest, SinglePacket) {
+ const uint16_t kSeqNumber = 1; // Arbitrary.
+ // Other parameters don't matter here.
+ linked_set_.Insert(kSeqNumber, 0, 0, 0);
+
+ EXPECT_EQ(linked_set_.OldestSeqNumber(), kSeqNumber);
+ EXPECT_EQ(linked_set_.NewestSeqNumber(), kSeqNumber);
+}
+
+TEST_F(LinkedSetTest, MultiplePackets) {
+ const uint16_t kNumberPackets = 100;
+
+ std::vector<uint16_t> sequence_numbers;
+ for (size_t i = 0; i < kNumberPackets; ++i) {
+ sequence_numbers.push_back(static_cast<uint16_t>(i + 1));
+ }
+ random_shuffle(sequence_numbers.begin(), sequence_numbers.end());
+
+ for (size_t i = 0; i < kNumberPackets; ++i) {
+ // Other parameters don't matter here.
+ linked_set_.Insert(static_cast<uint16_t>(i), 0, 0, 0);
+ }
+
+ // Packets arriving out of order should not affect the following values:
+ EXPECT_EQ(linked_set_.OldestSeqNumber(), 0);
+ EXPECT_EQ(linked_set_.NewestSeqNumber(), kNumberPackets - 1);
+}
+
+TEST_F(LinkedSetTest, Overflow) {
+ const int kFirstSeqNumber = -100;
+ const int kLastSeqNumber = 100;
+
+ for (int i = kFirstSeqNumber; i <= kLastSeqNumber; ++i) {
+ // Other parameters don't matter here.
+ linked_set_.Insert(static_cast<uint16_t>(i), 0, 0, 0);
+ }
+
+ // Packets arriving out of order should not affect the following values:
+ EXPECT_EQ(linked_set_.OldestSeqNumber(),
+ static_cast<uint16_t>(kFirstSeqNumber));
+ EXPECT_EQ(linked_set_.NewestSeqNumber(),
+ static_cast<uint16_t>(kLastSeqNumber));
+}
+
+class SequenceNumberOlderThanTest : public ::testing::Test {
+ public:
+ SequenceNumberOlderThanTest() {}
+ ~SequenceNumberOlderThanTest() {}
+
+ protected:
+ SequenceNumberOlderThan comparator_;
+};
+
+TEST_F(SequenceNumberOlderThanTest, Operator) {
+ // Operator()(x, y) returns true <==> y is newer than x.
+ EXPECT_TRUE(comparator_.operator()(0x0000, 0x0001));
+ EXPECT_TRUE(comparator_.operator()(0x0001, 0x1000));
+ EXPECT_FALSE(comparator_.operator()(0x0001, 0x0000));
+ EXPECT_FALSE(comparator_.operator()(0x0002, 0x0002));
+ EXPECT_TRUE(comparator_.operator()(0xFFF6, 0x000A));
+ EXPECT_FALSE(comparator_.operator()(0x000A, 0xFFF6));
+ EXPECT_TRUE(comparator_.operator()(0x0000, 0x8000));
+ EXPECT_FALSE(comparator_.operator()(0x8000, 0x0000));
+}
+
+class LossAccountTest : public ::testing::Test {
+ public:
+ LossAccountTest() {}
+ ~LossAccountTest() {}
+
+ protected:
+ LossAccount loss_account_;
+};
+
+TEST_F(LossAccountTest, Operations) {
+ const size_t kTotal = 100; // Arbitrary values.
+ const size_t kLost = 10;
+
+ LossAccount rhs(kTotal, kLost);
+
+ loss_account_.Add(rhs);
+ EXPECT_EQ(loss_account_.num_total, kTotal);
+ EXPECT_EQ(loss_account_.num_lost, kLost);
+ EXPECT_NEAR(loss_account_.LossRatio(), static_cast<float>(kLost) / kTotal,
+ 0.001f);
+
+ loss_account_.Subtract(rhs);
+ EXPECT_EQ(loss_account_.num_total, 0UL);
+ EXPECT_EQ(loss_account_.num_lost, 0UL);
+ EXPECT_NEAR(loss_account_.LossRatio(), 0.0f, 0.001f);
+}
+
+class BweReceiverTest : public ::testing::Test {
+ public:
+ BweReceiverTest() : bwe_receiver_(kFlowId) {}
+ ~BweReceiverTest() {}
+
+ protected:
+ const int kFlowId = 1; // Arbitrary.
+ BweReceiver bwe_receiver_;
+};
+
+TEST_F(BweReceiverTest, ReceivingRateNoPackets) {
+ EXPECT_EQ(bwe_receiver_.RecentKbps(), static_cast<size_t>(0));
+}
+
+TEST_F(BweReceiverTest, ReceivingRateSinglePacket) {
+ const size_t kPayloadSizeBytes = 500 * 1000;
+ const int64_t kSendTimeUs = 300 * 1000;
+ const int64_t kArrivalTimeMs = kSendTimeUs / 1000 + 100;
+ const uint16_t kSequenceNumber = 1;
+ const int64_t kTimeWindowMs = BweReceiver::kReceivingRateTimeWindowMs;
+
+ const MediaPacket media_packet(kFlowId, kSendTimeUs, kPayloadSizeBytes,
+ kSequenceNumber);
+ bwe_receiver_.ReceivePacket(kArrivalTimeMs, media_packet);
+
+ const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeWindowMs;
+
+ EXPECT_NEAR(bwe_receiver_.RecentKbps(), kReceivingRateKbps,
+ static_cast<float>(kReceivingRateKbps) / 100.0f);
+}
+
+TEST_F(BweReceiverTest, ReceivingRateSmallPackets) {
+ const size_t kPayloadSizeBytes = 100 * 1000;
+ const int64_t kTimeGapMs = 50; // Between each packet.
+ const int64_t kOneWayDelayMs = 50;
+
+ for (int i = 1; i < 50; ++i) {
+ int64_t send_time_us = i * kTimeGapMs * 1000;
+ int64_t arrival_time_ms = send_time_us / 1000 + kOneWayDelayMs;
+ uint16_t sequence_number = i;
+ const MediaPacket media_packet(kFlowId, send_time_us, kPayloadSizeBytes,
+ sequence_number);
+ bwe_receiver_.ReceivePacket(arrival_time_ms, media_packet);
+ }
+
+ const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeGapMs;
+ EXPECT_NEAR(bwe_receiver_.RecentKbps(), kReceivingRateKbps,
+ static_cast<float>(kReceivingRateKbps) / 100.0f);
+}
+
+TEST_F(BweReceiverTest, PacketLossNoPackets) {
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+}
+
+TEST_F(BweReceiverTest, PacketLossSinglePacket) {
+ const MediaPacket media_packet(kFlowId, 0, 0, 0);
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+}
+
+TEST_F(BweReceiverTest, PacketLossContiguousPackets) {
+ const int64_t kTimeWindowMs = BweReceiver::kPacketLossTimeWindowMs;
+ size_t set_capacity = bwe_receiver_.GetSetCapacity();
+
+ for (int i = 0; i < 10; ++i) {
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ // Sequence_number and flow_id are the only members that matter here.
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Arrival time = 0, all packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ }
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+
+ for (int i = 30; i > 20; i--) {
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ // Sequence_number and flow_id are the only members that matter here.
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Only the packets sent in this for loop will be considered.
+ bwe_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet);
+ }
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+
+ // Should handle uint16_t overflow.
+ for (int i = 0xFFFF - 10; i < 0xFFFF + 10; ++i) {
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Only the packets sent in this for loop will be considered.
+ bwe_receiver_.ReceivePacket(4 * kTimeWindowMs, media_packet);
+ }
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+
+ // Should handle set overflow.
+ for (int i = 0; i < set_capacity * 1.5; ++i) {
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Only the packets sent in this for loop will be considered.
+ bwe_receiver_.ReceivePacket(6 * kTimeWindowMs, media_packet);
+ }
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+}
+
+// Should handle duplicates.
+TEST_F(BweReceiverTest, PacketLossDuplicatedPackets) {
+ const int64_t kTimeWindowMs = BweReceiver::kPacketLossTimeWindowMs;
+
+ for (int i = 0; i < 10; ++i) {
+ const MediaPacket media_packet(kFlowId, 0, 0, 0);
+ // Arrival time = 0, all packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ }
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+
+ // Missing the element 5.
+ const uint16_t kSequenceNumbers[] = {1, 2, 3, 4, 6, 7, 8};
+ const int kNumPackets = ARRAY_SIZE(kSequenceNumbers);
+
+ // Insert each sequence number twice.
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < kNumPackets; j++) {
+ const MediaPacket media_packet(kFlowId, 0, 0, kSequenceNumbers[j]);
+ // Only the packets sent in this for loop will be considered.
+ bwe_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet);
+ }
+ }
+
+ EXPECT_NEAR(bwe_receiver_.RecentPacketLossRatio(), 1.0f / (kNumPackets + 1),
+ 0.1f / (kNumPackets + 1));
+}
+
+TEST_F(BweReceiverTest, PacketLossLakingPackets) {
+ size_t set_capacity = bwe_receiver_.GetSetCapacity();
+ EXPECT_LT(set_capacity, static_cast<size_t>(0xFFFF));
+
+ // Missing every other packet.
+ for (size_t i = 0; i < set_capacity; ++i) {
+ if ((i & 1) == 0) { // Only even sequence numbers.
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Arrival time = 0, all packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ }
+ }
+ EXPECT_NEAR(bwe_receiver_.RecentPacketLossRatio(), 0.5f, 0.01f);
+}
+
+TEST_F(BweReceiverTest, PacketLossLakingFewPackets) {
+ size_t set_capacity = bwe_receiver_.GetSetCapacity();
+ EXPECT_LT(set_capacity, static_cast<size_t>(0xFFFF));
+
+ const int kPeriod = 100;
+ // Missing one for each kPeriod packets.
+ for (size_t i = 0; i < set_capacity; ++i) {
+ if ((i % kPeriod) != 0) {
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
+ // Arrival time = 0, all packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ }
+ }
+ EXPECT_NEAR(bwe_receiver_.RecentPacketLossRatio(), 1.0f / kPeriod,
+ 0.1f / kPeriod);
+}
+
+// Packet's sequence numbers greatly apart, expect high loss.
+TEST_F(BweReceiverTest, PacketLossWideGap) {
+ const int64_t kTimeWindowMs = BweReceiver::kPacketLossTimeWindowMs;
+
+ const MediaPacket media_packet1(0, 0, 0, 1);
+ const MediaPacket media_packet2(0, 0, 0, 1000);
+ // Only these two packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet1);
+ bwe_receiver_.ReceivePacket(0, media_packet2);
+ EXPECT_NEAR(bwe_receiver_.RecentPacketLossRatio(), 0.998f, 0.0001f);
+
+ const MediaPacket media_packet3(0, 0, 0, 0);
+ const MediaPacket media_packet4(0, 0, 0, 0x8000);
+ // Only these two packets will be considered.
+ bwe_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet3);
+ bwe_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet4);
+ EXPECT_NEAR(bwe_receiver_.RecentPacketLossRatio(), 0.99994f, 0.00001f);
+}
+
+// Packets arriving unordered should not be counted as losted.
+TEST_F(BweReceiverTest, PacketLossUnorderedPackets) {
+ size_t num_packets = bwe_receiver_.GetSetCapacity() / 2;
+ std::vector<uint16_t> sequence_numbers;
+
+ for (size_t i = 0; i < num_packets; ++i) {
+ sequence_numbers.push_back(static_cast<uint16_t>(i + 1));
+ }
+
+ random_shuffle(sequence_numbers.begin(), sequence_numbers.end());
+
+ for (size_t i = 0; i < num_packets; ++i) {
+ const MediaPacket media_packet(kFlowId, 0, 0, sequence_numbers[i]);
+ // Arrival time = 0, all packets will be considered.
+ bwe_receiver_.ReceivePacket(0, media_packet);
+ }
+
+ EXPECT_EQ(bwe_receiver_.RecentPacketLossRatio(), 0.0f);
+}
+
+TEST_F(BweReceiverTest, RecentKbps) {
+ EXPECT_EQ(bwe_receiver_.RecentKbps(), 0U);
+
+ const size_t kPacketSizeBytes = 1200;
+ const int kNumPackets = 100;
+
+ double window_size_s = bwe_receiver_.BitrateWindowS();
+
+ // Receive packets at the same time.
+ for (int i = 0; i < kNumPackets; ++i) {
+ MediaPacket packet(kFlowId, 0L, kPacketSizeBytes, static_cast<uint16_t>(i));
+ bwe_receiver_.ReceivePacket(0, packet);
+ }
+
+ EXPECT_NEAR(bwe_receiver_.RecentKbps(),
+ (8 * kNumPackets * kPacketSizeBytes) / (1000 * window_size_s),
+ 10);
+
+ int64_t time_gap_ms =
+ 2 * 1000 * window_size_s; // Larger than rate_counter time window.
+
+ MediaPacket packet(kFlowId, time_gap_ms * 1000, kPacketSizeBytes,
+ static_cast<uint16_t>(kNumPackets));
+ bwe_receiver_.ReceivePacket(time_gap_ms, packet);
+
+ EXPECT_NEAR(bwe_receiver_.RecentKbps(),
+ (8 * kPacketSizeBytes) / (1000 * window_size_s), 10);
+}
+
+TEST_F(BweReceiverTest, Loss) {
+ EXPECT_NEAR(bwe_receiver_.GlobalReceiverPacketLossRatio(), 0.0f, 0.001f);
+
+ LossAccount loss_account = bwe_receiver_.LinkedSetPacketLossRatio();
+ EXPECT_NEAR(loss_account.LossRatio(), 0.0f, 0.001f);
+
+ // Insert packets 1-50 and 151-200;
+ for (int i = 1; i <= 200; ++i) {
+ // Packet size and timestamp do not matter here.
+ MediaPacket packet(kFlowId, 0L, 0UL, static_cast<uint16_t>(i));
+ bwe_receiver_.ReceivePacket(0, packet);
+ if (i == 50) {
+ i += 100;
+ }
+ }
+
+ loss_account = bwe_receiver_.LinkedSetPacketLossRatio();
+ EXPECT_NEAR(loss_account.LossRatio(), 0.5f, 0.001f);
+
+ bwe_receiver_.RelieveSetAndUpdateLoss();
+ EXPECT_EQ(bwe_receiver_.received_packets_.size(), 100U / 10);
+
+ // No packet loss within the preserved packets.
+ loss_account = bwe_receiver_.LinkedSetPacketLossRatio();
+ EXPECT_NEAR(loss_account.LossRatio(), 0.0f, 0.001f);
+
+ // RelieveSetAndUpdateLoss automatically updates loss account.
+ EXPECT_NEAR(bwe_receiver_.GlobalReceiverPacketLossRatio(), 0.5f, 0.001f);
+}
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
index c28749d1d3a..d77447f1ea1 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.cc
@@ -17,8 +17,8 @@
#include <math.h>
#include <algorithm>
#include <vector>
-#include <iostream>
+#include "webrtc/base/common.h"
#include "webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_logging.h"
#include "webrtc/modules/rtp_rtcp/interface/receive_statistics.h"
@@ -27,17 +27,14 @@ namespace webrtc {
namespace testing {
namespace bwe {
-const int NadaBweReceiver::kMedian;
-const int NadaBweSender::kMinRefRateKbps;
-const int NadaBweSender::kMaxRefRateKbps;
-const int64_t NadaBweReceiver::kReceivingRateTimeWindowMs;
+const int64_t NadaBweReceiver::kReceivingRateTimeWindowMs = 500;
NadaBweReceiver::NadaBweReceiver(int flow_id)
- : BweReceiver(flow_id),
+ : BweReceiver(flow_id, kReceivingRateTimeWindowMs),
clock_(0),
last_feedback_ms_(0),
recv_stats_(ReceiveStatistics::Create(&clock_)),
- baseline_delay_ms_(0),
+ baseline_delay_ms_(10000), // Initialized as an upper bound.
delay_signal_ms_(0),
last_congestion_signal_ms_(0),
last_delays_index_(0),
@@ -57,15 +54,19 @@ void NadaBweReceiver::ReceivePacket(int64_t arrival_time_ms,
clock_.AdvanceTimeMilliseconds(arrival_time_ms - clock_.TimeInMilliseconds());
recv_stats_->IncomingPacket(media_packet.header(),
media_packet.payload_size(), false);
- int64_t delay_ms = arrival_time_ms -
- media_packet.creation_time_us() / 1000; // Refered as x_n.
+ // Refered as x_n.
+ int64_t delay_ms = arrival_time_ms - media_packet.sender_timestamp_ms();
+
// The min should be updated within the first 10 minutes.
if (clock_.TimeInMilliseconds() < 10 * 60 * 1000) {
baseline_delay_ms_ = std::min(baseline_delay_ms_, delay_ms);
}
+
delay_signal_ms_ = delay_ms - baseline_delay_ms_; // Refered as d_n.
+ const int kMedian = ARRAY_SIZE(last_delays_ms_);
last_delays_ms_[(last_delays_index_++) % kMedian] = delay_signal_ms_;
int size = std::min(last_delays_index_, kMedian);
+
int64_t median_filtered_delay_ms_ = MedianFilter(last_delays_ms_, size);
exp_smoothed_delay_ms_ = ExponentialSmoothingFilter(
median_filtered_delay_ms_, exp_smoothed_delay_ms_, kAlpha);
@@ -83,9 +84,8 @@ void NadaBweReceiver::ReceivePacket(int64_t arrival_time_ms,
est_queuing_delay_signal_ms_ = 0;
}
- received_packets_.Insert(media_packet.sequence_number(),
- media_packet.send_time_ms(), arrival_time_ms,
- media_packet.payload_size());
+ // Log received packet information.
+ BweReceiver::ReceivePacket(arrival_time_ms, media_packet);
}
FeedbackPacket* NadaBweReceiver::GetFeedback(int64_t now_ms) {
@@ -109,75 +109,35 @@ FeedbackPacket* NadaBweReceiver::GetFeedback(int64_t now_ms) {
last_feedback_ms_ = now_ms;
last_congestion_signal_ms_ = congestion_signal_ms;
- PacketIdentifierNode* latest = *(received_packets_.begin());
- int64_t corrected_send_time_ms =
- latest->send_time_ms + now_ms - latest->arrival_time_ms;
+ int64_t corrected_send_time_ms = 0L;
+
+ if (!received_packets_.empty()) {
+ PacketIdentifierNode* latest = *(received_packets_.begin());
+ corrected_send_time_ms =
+ latest->send_time_ms + now_ms - latest->arrival_time_ms;
+ }
// Sends a tuple containing latest values of <d_hat_n, d_tilde_n, x_n, x'_n,
// R_r> and additional information.
- return new NadaFeedback(flow_id_, now_ms, exp_smoothed_delay_ms_,
+ return new NadaFeedback(flow_id_, now_ms * 1000, exp_smoothed_delay_ms_,
est_queuing_delay_signal_ms_, congestion_signal_ms,
- derivative, RecentReceivingRate(),
- corrected_send_time_ms);
-}
-
-// For a given time window, compute the receiving speed rate in kbps.
-// As described below, three cases are considered depending on the number of
-// packets received.
-size_t NadaBweReceiver::RecentReceivingRate() {
- // If the receiver didn't receive any packet, return 0.
- if (received_packets_.empty()) {
- return 0.0f;
- }
- size_t total_size = 0;
- int number_packets = 0;
-
- PacketNodeIt node_it = received_packets_.begin();
-
- int64_t last_time_ms = (*node_it)->arrival_time_ms;
- int64_t start_time_ms = last_time_ms;
- PacketNodeIt end = received_packets_.end();
-
- // Stops after including the first packet out of the timeWindow.
- // Ameliorates results when there are wide gaps between packets.
- // E.g. Large packets : p1(0ms), p2(3000ms).
- while (node_it != end) {
- total_size += (*node_it)->payload_size;
- last_time_ms = (*node_it)->arrival_time_ms;
- ++number_packets;
- if ((*node_it)->arrival_time_ms <
- start_time_ms - kReceivingRateTimeWindowMs) {
- break;
- }
- ++node_it;
- }
-
- int64_t corrected_time_ms;
- // If the receiver received a single packet, return its size*8/timeWindow.
- if (number_packets == 1) {
- corrected_time_ms = kReceivingRateTimeWindowMs;
- }
- // If the receiver received multiple packets, use as time interval the gap
- // between first and last packet falling in the timeWindow corrected by the
- // factor number_packets/(number_packets-1).
- // E.g: Let timeWindow = 500ms, payload_size = 500 bytes, number_packets = 2,
- // packets received at t1(0ms) and t2(499 or 501ms). This prevent the function
- // from returning ~2*8, sending instead a more likely ~1*8 kbps.
- else {
- corrected_time_ms = (number_packets * (start_time_ms - last_time_ms)) /
- (number_packets - 1);
- }
-
- // Converting from bytes/ms to kbits/s.
- return static_cast<size_t>(8 * total_size / corrected_time_ms);
+ derivative, RecentKbps(), corrected_send_time_ms);
}
+// If size is even, the median is the average of the two middlemost numbers.
int64_t NadaBweReceiver::MedianFilter(int64_t* last_delays_ms, int size) {
- // Typically, size = 5.
std::vector<int64_t> array_copy(last_delays_ms, last_delays_ms + size);
std::nth_element(array_copy.begin(), array_copy.begin() + size / 2,
array_copy.end());
- return array_copy.at(size / 2);
+ if (size % 2 == 1) {
+ // Typically, size = 5. For odd size values, right and left are equal.
+ return array_copy.at(size / 2);
+ }
+ int64_t right = array_copy.at(size / 2);
+ std::nth_element(array_copy.begin(), array_copy.begin() + (size - 1) / 2,
+ array_copy.end());
+ int64_t left = array_copy.at((size - 1) / 2);
+ return (left + right + 1) / 2;
}
int64_t NadaBweReceiver::ExponentialSmoothingFilter(int64_t new_value,
@@ -192,16 +152,16 @@ int64_t NadaBweReceiver::ExponentialSmoothingFilter(int64_t new_value,
// Implementation according to Cisco's proposal by default.
NadaBweSender::NadaBweSender(int kbps, BitrateObserver* observer, Clock* clock)
- : clock_(clock),
+ : BweSender(kbps), // Referred as "Reference Rate" = R_n.,
+ clock_(clock),
observer_(observer),
- bitrate_kbps_(kbps),
original_operating_mode_(true) {
}
NadaBweSender::NadaBweSender(BitrateObserver* observer, Clock* clock)
- : clock_(clock),
+ : BweSender(kMinBitrateKbps), // Referred as "Reference Rate" = R_n.
+ clock_(clock),
observer_(observer),
- bitrate_kbps_(kMinRefRateKbps),
original_operating_mode_(true) {
}
@@ -251,23 +211,23 @@ void NadaBweSender::GiveFeedback(const FeedbackPacket& feedback) {
if (fb.congestion_signal() == fb.est_queuing_delay_signal_ms() &&
fb.est_queuing_delay_signal_ms() < kQueuingDelayUpperBoundMs &&
fb.exp_smoothed_delay_ms() <
- kMinRefRateKbps / kProportionalityDelayBits &&
+ kMinBitrateKbps / kProportionalityDelayBits &&
fb.derivative() < kDerivativeUpperBound &&
- fb.receiving_rate() > kMinRefRateKbps) {
+ fb.receiving_rate() > kMinBitrateKbps) {
AcceleratedRampUp(fb);
} else if (fb.congestion_signal() > kMaxCongestionSignalMs ||
fb.exp_smoothed_delay_ms() > kMaxCongestionSignalMs) {
AcceleratedRampDown(fb);
} else {
double bitrate_reference =
- (2.0 * bitrate_kbps_) / (kMaxRefRateKbps + kMinRefRateKbps);
+ (2.0 * bitrate_kbps_) / (kMaxBitrateKbps + kMinBitrateKbps);
double smoothing_factor = pow(bitrate_reference, 0.75);
GradualRateUpdate(fb, delta_s, smoothing_factor);
}
}
- bitrate_kbps_ = std::min(bitrate_kbps_, kMaxRefRateKbps);
- bitrate_kbps_ = std::max(bitrate_kbps_, kMinRefRateKbps);
+ bitrate_kbps_ = std::min(bitrate_kbps_, kMaxBitrateKbps);
+ bitrate_kbps_ = std::max(bitrate_kbps_, kMinBitrateKbps);
observer_->OnNetworkChanged(1000 * bitrate_kbps_, 0, rtt_ms);
}
@@ -311,11 +271,11 @@ void NadaBweSender::GradualRateUpdate(const NadaFeedback& fb,
float x_hat = fb.congestion_signal() + kEta * kTauOMs * fb.derivative();
float kTheta =
- kPriorityWeight * (kMaxRefRateKbps - kMinRefRateKbps) * kReferenceDelayMs;
+ kPriorityWeight * (kMaxBitrateKbps - kMinBitrateKbps) * kReferenceDelayMs;
int original_increase =
static_cast<int>((kKappa * delta_s *
- (kTheta - (bitrate_kbps_ - kMinRefRateKbps) * x_hat)) /
+ (kTheta - (bitrate_kbps_ - kMinBitrateKbps) * x_hat)) /
(kTauOMs * kTauOMs) +
0.5f);
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
index 6fea6240355..eee90cf4632 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada.h
@@ -40,13 +40,12 @@ class NadaBweReceiver : public BweReceiver {
const MediaPacket& media_packet) override;
FeedbackPacket* GetFeedback(int64_t now_ms) override;
- size_t RecentReceivingRate();
static int64_t MedianFilter(int64_t* v, int size);
static int64_t ExponentialSmoothingFilter(int64_t new_value,
int64_t last_smoothed_value,
float alpha);
- static const int64_t kReceivingRateTimeWindowMs = 500;
+ static const int64_t kReceivingRateTimeWindowMs;
private:
SimulatedClock clock_;
@@ -58,9 +57,7 @@ class NadaBweReceiver : public BweReceiver {
int last_delays_index_;
int64_t exp_smoothed_delay_ms_; // Referred as d_hat_n.
int64_t est_queuing_delay_signal_ms_; // Referred as d_tilde_n.
-
- static const int kMedian = 5; // Used for k-points Median Filter.
- int64_t last_delays_ms_[kMedian]; // Used for Median Filter.
+ int64_t last_delays_ms_[5]; // Used for Median Filter.
};
class NadaBweSender : public BweSender {
@@ -89,16 +86,12 @@ class NadaBweSender : public BweSender {
}
int64_t NowMs() const { return clock_->TimeInMilliseconds(); }
- static const int kMinRefRateKbps = 150; // Referred as R_min.
- static const int kMaxRefRateKbps = 1500; // Referred as R_max.
-
private:
Clock* const clock_;
BitrateObserver* const observer_;
// Used as an upper bound for calling AcceleratedRampDown.
- const float kMaxCongestionSignalMs = 40.0f + kMinRefRateKbps / 15;
+ const float kMaxCongestionSignalMs = 40.0f + kMinBitrateKbps / 15;
// Referred as R_min, default initialization for bitrate R_n.
- int bitrate_kbps_; // Referred as "Reference Rate" = R_n.
int64_t last_feedback_ms_ = 0;
// Referred as delta_0, initialized as an upper bound.
int64_t min_feedback_delay_ms_ = 200;
@@ -106,7 +99,7 @@ class NadaBweSender : public BweSender {
int64_t min_round_trip_time_ms_ = 100;
bool original_operating_mode_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(NadaBweSender);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(NadaBweSender);
};
} // namespace bwe
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
index 967c2b2b5e4..a0f56b73b7d 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/nada_unittest.cc
@@ -14,6 +14,7 @@
#include <numeric>
#include "webrtc/base/common.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
#include "webrtc/modules/remote_bitrate_estimator/test/packet.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -112,11 +113,12 @@ class NadaSenderSideTest : public ::testing::Test {
};
class NadaReceiverSideTest : public ::testing::Test {
- protected:
+ public:
NadaReceiverSideTest() : nada_receiver_(kFlowId) {}
~NadaReceiverSideTest() {}
- const int kFlowId = 0;
+ protected:
+ const int kFlowId = 1; // Arbitrary.
NadaBweReceiver nada_receiver_;
};
@@ -165,9 +167,9 @@ class NadaFbGenerator {
// Verify if AcceleratedRampUp is called and that bitrate increases.
TEST_F(NadaSenderSideTest, AcceleratedRampUp) {
- const int64_t kRefSignalMs = 3;
+ const int64_t kRefSignalMs = 1;
const int64_t kOneWayDelayMs = 50;
- int original_bitrate = 2 * NadaBweSender::kMinRefRateKbps;
+ int original_bitrate = 2 * kMinBitrateKbps;
size_t receiving_rate = static_cast<size_t>(original_bitrate);
int64_t send_time_ms = nada_sender_.NowMs() - kOneWayDelayMs;
@@ -199,7 +201,7 @@ TEST_F(NadaSenderSideTest, AcceleratedRampUp) {
// Verify if AcceleratedRampDown is called and if bitrate decreases.
TEST_F(NadaSenderSideTest, AcceleratedRampDown) {
const int64_t kOneWayDelayMs = 50;
- int original_bitrate = 3 * NadaBweSender::kMinRefRateKbps;
+ int original_bitrate = 3 * kMinBitrateKbps;
size_t receiving_rate = static_cast<size_t>(original_bitrate);
int64_t send_time_ms = nada_sender_.NowMs() - kOneWayDelayMs;
@@ -216,8 +218,7 @@ TEST_F(NadaSenderSideTest, AcceleratedRampDown) {
// Updates the bitrate according to the receiving rate and other constant
// parameters.
nada_sender_.AcceleratedRampDown(congested_fb);
- int bitrate_2_kbps =
- std::max(nada_sender_.bitrate_kbps(), NadaBweSender::kMinRefRateKbps);
+ int bitrate_2_kbps = std::max(nada_sender_.bitrate_kbps(), kMinBitrateKbps);
EXPECT_EQ(bitrate_2_kbps, bitrate_1_kbps);
}
@@ -225,7 +226,7 @@ TEST_F(NadaSenderSideTest, GradualRateUpdate) {
const int64_t kDeltaSMs = 20;
const int64_t kRefSignalMs = 20;
const int64_t kOneWayDelayMs = 50;
- int original_bitrate = 2 * NadaBweSender::kMinRefRateKbps;
+ int original_bitrate = 2 * kMinBitrateKbps;
size_t receiving_rate = static_cast<size_t>(original_bitrate);
int64_t send_time_ms = nada_sender_.NowMs() - kOneWayDelayMs;
@@ -251,8 +252,8 @@ TEST_F(NadaSenderSideTest, GradualRateUpdate) {
// Sending bitrate should decrease and reach its Min bound.
TEST_F(NadaSenderSideTest, VeryLowBandwith) {
const int64_t kOneWayDelayMs = 50;
- const int kMin = NadaBweSender::kMinRefRateKbps;
- size_t receiving_rate = static_cast<size_t>(kMin);
+
+ size_t receiving_rate = static_cast<size_t>(kMinBitrateKbps);
int64_t send_time_ms = nada_sender_.NowMs() - kOneWayDelayMs;
NadaFeedback extremely_congested_fb =
@@ -260,7 +261,7 @@ TEST_F(NadaSenderSideTest, VeryLowBandwith) {
NadaFeedback congested_fb =
NadaFbGenerator::CongestedFb(receiving_rate, send_time_ms);
- nada_sender_.set_bitrate_kbps(5 * kMin);
+ nada_sender_.set_bitrate_kbps(5 * kMinBitrateKbps);
nada_sender_.set_original_operating_mode(true);
for (int i = 0; i < 100; ++i) {
// Trigger GradualRateUpdate mode.
@@ -268,26 +269,25 @@ TEST_F(NadaSenderSideTest, VeryLowBandwith) {
}
// The original implementation doesn't allow the bitrate to stay at kMin,
// even if the congestion signal is very high.
- EXPECT_GE(nada_sender_.bitrate_kbps(), kMin);
+ EXPECT_GE(nada_sender_.bitrate_kbps(), kMinBitrateKbps);
nada_sender_.set_original_operating_mode(false);
- nada_sender_.set_bitrate_kbps(5 * kMin);
+ nada_sender_.set_bitrate_kbps(5 * kMinBitrateKbps);
- for (int i = 0; i < 100; ++i) {
+ for (int i = 0; i < 1000; ++i) {
int previous_bitrate = nada_sender_.bitrate_kbps();
// Trigger AcceleratedRampDown mode.
nada_sender_.GiveFeedback(congested_fb);
EXPECT_LE(nada_sender_.bitrate_kbps(), previous_bitrate);
}
- EXPECT_EQ(nada_sender_.bitrate_kbps(), kMin);
+ EXPECT_EQ(nada_sender_.bitrate_kbps(), kMinBitrateKbps);
}
// Sending bitrate should increase and reach its Max bound.
TEST_F(NadaSenderSideTest, VeryHighBandwith) {
const int64_t kOneWayDelayMs = 50;
- const int kMax = NadaBweSender::kMaxRefRateKbps;
- const size_t kRecentReceivingRate = static_cast<size_t>(kMax);
- const int64_t kRefSignalMs = 5;
+ const size_t kRecentReceivingRate = static_cast<size_t>(kMaxBitrateKbps);
+ const int64_t kRefSignalMs = 1;
int64_t send_time_ms = nada_sender_.NowMs() - kOneWayDelayMs;
NadaFeedback not_congested_fb = NadaFbGenerator::NotCongestedFb(
@@ -299,280 +299,164 @@ TEST_F(NadaSenderSideTest, VeryHighBandwith) {
nada_sender_.GiveFeedback(not_congested_fb);
EXPECT_GE(nada_sender_.bitrate_kbps(), previous_bitrate);
}
- EXPECT_EQ(nada_sender_.bitrate_kbps(), kMax);
+ EXPECT_EQ(nada_sender_.bitrate_kbps(), kMaxBitrateKbps);
nada_sender_.set_original_operating_mode(false);
- nada_sender_.set_bitrate_kbps(NadaBweSender::kMinRefRateKbps);
+ nada_sender_.set_bitrate_kbps(kMinBitrateKbps);
for (int i = 0; i < 100; ++i) {
int previous_bitrate = nada_sender_.bitrate_kbps();
nada_sender_.GiveFeedback(not_congested_fb);
EXPECT_GE(nada_sender_.bitrate_kbps(), previous_bitrate);
}
- EXPECT_EQ(nada_sender_.bitrate_kbps(), kMax);
+ EXPECT_EQ(nada_sender_.bitrate_kbps(), kMaxBitrateKbps);
}
-TEST_F(NadaReceiverSideTest, ReceivingRateNoPackets) {
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(), static_cast<size_t>(0));
+TEST_F(NadaReceiverSideTest, FeedbackInitialCases) {
+ rtc::scoped_ptr<NadaFeedback> nada_feedback(
+ static_cast<NadaFeedback*>(nada_receiver_.GetFeedback(0)));
+ EXPECT_EQ(nada_feedback, nullptr);
+
+ nada_feedback.reset(
+ static_cast<NadaFeedback*>(nada_receiver_.GetFeedback(100)));
+ EXPECT_EQ(nada_feedback->exp_smoothed_delay_ms(), -1);
+ EXPECT_EQ(nada_feedback->est_queuing_delay_signal_ms(), 0L);
+ EXPECT_EQ(nada_feedback->congestion_signal(), 0L);
+ EXPECT_EQ(nada_feedback->derivative(), 0.0f);
+ EXPECT_EQ(nada_feedback->receiving_rate(), 0.0f);
}
-TEST_F(NadaReceiverSideTest, ReceivingRateSinglePacket) {
- const size_t kPayloadSizeBytes = 500 * 1000;
- const int64_t kSendTimeUs = 300 * 1000;
- const int64_t kArrivalTimeMs = kSendTimeUs / 1000 + 100;
- const uint16_t kSequenceNumber = 1;
- const int64_t kTimeWindowMs = NadaBweReceiver::kReceivingRateTimeWindowMs;
-
- const MediaPacket media_packet(kFlowId, kSendTimeUs, kPayloadSizeBytes,
- kSequenceNumber);
- nada_receiver_.ReceivePacket(kArrivalTimeMs, media_packet);
-
- const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeWindowMs;
-
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(), kReceivingRateKbps);
-}
-
-TEST_F(NadaReceiverSideTest, ReceivingRateLargePackets) {
- const size_t kPayloadSizeBytes = 3000 * 1000;
- const int64_t kTimeGapMs = 3000; // Between each packet.
- const int64_t kOneWayDelayMs = 1000;
-
- for (int i = 1; i < 5; ++i) {
- int64_t send_time_us = i * kTimeGapMs * 1000;
- int64_t arrival_time_ms = send_time_us / 1000 + kOneWayDelayMs;
- uint16_t sequence_number = i;
- const MediaPacket media_packet(kFlowId, send_time_us, kPayloadSizeBytes,
- sequence_number);
- nada_receiver_.ReceivePacket(arrival_time_ms, media_packet);
- }
-
- const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeGapMs;
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(), kReceivingRateKbps);
-}
-
-TEST_F(NadaReceiverSideTest, ReceivingRateSmallPackets) {
- const size_t kPayloadSizeBytes = 100 * 1000;
+TEST_F(NadaReceiverSideTest, FeedbackEmptyQueues) {
const int64_t kTimeGapMs = 50; // Between each packet.
const int64_t kOneWayDelayMs = 50;
- for (int i = 1; i < 50; ++i) {
+ // No added latency, delay = kOneWayDelayMs.
+ for (int i = 1; i < 10; ++i) {
int64_t send_time_us = i * kTimeGapMs * 1000;
int64_t arrival_time_ms = send_time_us / 1000 + kOneWayDelayMs;
- uint16_t sequence_number = i;
- const MediaPacket media_packet(kFlowId, send_time_us, kPayloadSizeBytes,
- sequence_number);
- nada_receiver_.ReceivePacket(arrival_time_ms, media_packet);
- }
-
- const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeGapMs;
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(), kReceivingRateKbps);
-}
-
-TEST_F(NadaReceiverSideTest, ReceivingRateIntermittentPackets) {
- const size_t kPayloadSizeBytes = 100 * 1000;
- const int64_t kTimeGapMs = 50; // Between each packet.
- const int64_t kFirstSendTimeMs = 0;
- const int64_t kOneWayDelayMs = 50;
-
- // Gap between first and other packets
- const MediaPacket media_packet(kFlowId, kFirstSendTimeMs, kPayloadSizeBytes,
- 1);
- nada_receiver_.ReceivePacket(kFirstSendTimeMs + kOneWayDelayMs, media_packet);
-
- const int64_t kDelayAfterFirstPacketMs = 1000;
- const int kNumPackets = 5; // Small enough so that all packets are covered.
- EXPECT_LT((kNumPackets - 2) * kTimeGapMs,
- NadaBweReceiver::kReceivingRateTimeWindowMs);
- const int64_t kTimeWindowMs =
- kDelayAfterFirstPacketMs + (kNumPackets - 2) * kTimeGapMs;
-
- for (int i = 2; i <= kNumPackets; ++i) {
- int64_t send_time_us =
- ((i - 2) * kTimeGapMs + kFirstSendTimeMs + kDelayAfterFirstPacketMs) *
- 1000;
- int64_t arrival_time_ms = send_time_us / 1000 + kOneWayDelayMs;
- uint16_t sequence_number = i;
- const MediaPacket media_packet(kFlowId, send_time_us, kPayloadSizeBytes,
- sequence_number);
+ uint16_t sequence_number = static_cast<uint16_t>(i);
+ // Payload sizes are not important here.
+ const MediaPacket media_packet(kFlowId, send_time_us, 0, sequence_number);
nada_receiver_.ReceivePacket(arrival_time_ms, media_packet);
}
- const size_t kTotalReceivedKb = 8 * kNumPackets * kPayloadSizeBytes;
- const int64_t kCorrectedTimeWindowMs =
- (kTimeWindowMs * kNumPackets) / (kNumPackets - 1);
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(),
- kTotalReceivedKb / kCorrectedTimeWindowMs);
-}
-
-TEST_F(NadaReceiverSideTest, ReceivingRateDuplicatedPackets) {
- const size_t kPayloadSizeBytes = 500 * 1000;
- const int64_t kSendTimeUs = 300 * 1000;
- const int64_t kArrivalTimeMs = kSendTimeUs / 1000 + 100;
- const uint16_t kSequenceNumber = 1;
- const int64_t kTimeWindowMs = NadaBweReceiver::kReceivingRateTimeWindowMs;
-
- // Insert the same packet twice.
- for (int i = 0; i < 2; ++i) {
- const MediaPacket media_packet(kFlowId, kSendTimeUs + 50 * i,
- kPayloadSizeBytes, kSequenceNumber);
- nada_receiver_.ReceivePacket(kArrivalTimeMs + 50 * i, media_packet);
- }
- // Should be counted only once.
- const size_t kReceivingRateKbps = 8 * kPayloadSizeBytes / kTimeWindowMs;
-
- EXPECT_EQ(nada_receiver_.RecentReceivingRate(), kReceivingRateKbps);
+ // Baseline delay will be equal kOneWayDelayMs.
+ rtc::scoped_ptr<NadaFeedback> nada_feedback(
+ static_cast<NadaFeedback*>(nada_receiver_.GetFeedback(500)));
+ EXPECT_EQ(nada_feedback->exp_smoothed_delay_ms(), 0L);
+ EXPECT_EQ(nada_feedback->est_queuing_delay_signal_ms(), 0L);
+ EXPECT_EQ(nada_feedback->congestion_signal(), 0L);
+ EXPECT_EQ(nada_feedback->derivative(), 0.0f);
}
-TEST_F(NadaReceiverSideTest, PacketLossNoPackets) {
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-}
+TEST_F(NadaReceiverSideTest, FeedbackIncreasingDelay) {
+ // Since packets are 100ms apart, each one corresponds to a feedback.
+ const int64_t kTimeGapMs = 100; // Between each packet.
-TEST_F(NadaReceiverSideTest, PacketLossSinglePacket) {
- const MediaPacket media_packet(kFlowId, 0, 0, 0);
- nada_receiver_.ReceivePacket(0, media_packet);
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-}
+ // Raw delays are = [10 20 30 40 50 60 70 80] ms.
+ // Baseline delay will be 50 ms.
+ // Delay signals should be: [0 10 20 30 40 50 60 70] ms.
+ const int64_t kMedianFilteredDelaysMs[] = {0, 5, 10, 15, 20, 30, 40, 50};
+ const int kNumPackets = ARRAY_SIZE(kMedianFilteredDelaysMs);
+ const float kAlpha = 0.1f; // Used for exponential smoothing.
-TEST_F(NadaReceiverSideTest, PacketLossContiguousPackets) {
- const int64_t kTimeWindowMs = NadaBweReceiver::kPacketLossTimeWindowMs;
- size_t set_capacity = nada_receiver_.GetSetCapacity();
+ int64_t exp_smoothed_delays_ms[kNumPackets];
+ exp_smoothed_delays_ms[0] = kMedianFilteredDelaysMs[0];
- for (int i = 0; i < 10; ++i) {
- uint16_t sequence_number = static_cast<uint16_t>(i);
- // Sequence_number and flow_id are the only members that matter here.
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Arrival time = 0, all packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet);
+ for (int i = 1; i < kNumPackets; ++i) {
+ exp_smoothed_delays_ms[i] = static_cast<int64_t>(
+ kAlpha * kMedianFilteredDelaysMs[i] +
+ (1.0f - kAlpha) * exp_smoothed_delays_ms[i - 1] + 0.5f);
}
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
- for (int i = 30; i > 20; i--) {
- uint16_t sequence_number = static_cast<uint16_t>(i);
- // Sequence_number and flow_id are the only members that matter here.
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Only the packets sent in this for loop will be considered.
- nada_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet);
- }
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-
- // Should handle uint16_t overflow.
- for (int i = 0xFFFF - 10; i < 0xFFFF + 10; ++i) {
- uint16_t sequence_number = static_cast<uint16_t>(i);
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Only the packets sent in this for loop will be considered.
- nada_receiver_.ReceivePacket(4 * kTimeWindowMs, media_packet);
- }
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-
- // Should handle set overflow.
- for (int i = 0; i < set_capacity * 1.5; ++i) {
- uint16_t sequence_number = static_cast<uint16_t>(i);
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Only the packets sent in this for loop will be considered.
- nada_receiver_.ReceivePacket(6 * kTimeWindowMs, media_packet);
- }
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-}
-
-// Should handle duplicates.
-TEST_F(NadaReceiverSideTest, PacketLossDuplicatedPackets) {
- const int64_t kTimeWindowMs = NadaBweReceiver::kPacketLossTimeWindowMs;
-
- for (int i = 0; i < 10; ++i) {
- const MediaPacket media_packet(kFlowId, 0, 0, 0);
- // Arrival time = 0, all packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet);
- }
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
-
- // Missing the element 5.
- const uint16_t kSequenceNumbers[] = {1, 2, 3, 4, 6, 7, 8};
- const int kNumPackets = ARRAY_SIZE(kSequenceNumbers);
-
- // Insert each sequence number twice.
- for (int i = 0; i < 2; ++i) {
- for (int j = 0; j < kNumPackets; j++) {
- const MediaPacket media_packet(kFlowId, 0, 0, kSequenceNumbers[j]);
- // Only the packets sent in this for loop will be considered.
- nada_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet);
- }
- }
-
- EXPECT_NEAR(nada_receiver_.RecentPacketLossRatio(), 1.0f / (kNumPackets + 1),
- 0.1f / (kNumPackets + 1));
-}
+ for (int i = 0; i < kNumPackets; ++i) {
+ int64_t send_time_us = (i + 1) * kTimeGapMs * 1000;
+ int64_t arrival_time_ms = send_time_us / 1000 + 10 * (i + 1);
+ uint16_t sequence_number = static_cast<uint16_t>(i + 1);
+ // Payload sizes are not important here.
+ const MediaPacket media_packet(kFlowId, send_time_us, 0, sequence_number);
+ nada_receiver_.ReceivePacket(arrival_time_ms, media_packet);
-TEST_F(NadaReceiverSideTest, PacketLossLakingPackets) {
- size_t set_capacity = nada_receiver_.GetSetCapacity();
- EXPECT_LT(set_capacity, static_cast<size_t>(0xFFFF));
-
- // Missing every other packet.
- for (size_t i = 0; i < set_capacity; ++i) {
- if ((i & 1) == 0) { // Only even sequence numbers.
- uint16_t sequence_number = static_cast<uint16_t>(i);
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Arrival time = 0, all packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet);
+ rtc::scoped_ptr<NadaFeedback> nada_feedback(static_cast<NadaFeedback*>(
+ nada_receiver_.GetFeedback(arrival_time_ms)));
+ EXPECT_EQ(nada_feedback->exp_smoothed_delay_ms(),
+ exp_smoothed_delays_ms[i]);
+ // Since delay signals are lower than 50ms, they will not be non-linearly
+ // warped.
+ EXPECT_EQ(nada_feedback->est_queuing_delay_signal_ms(),
+ exp_smoothed_delays_ms[i]);
+ // Zero loss, congestion signal = queuing_delay
+ EXPECT_EQ(nada_feedback->congestion_signal(), exp_smoothed_delays_ms[i]);
+ if (i == 0) {
+ EXPECT_NEAR(nada_feedback->derivative(),
+ static_cast<float>(exp_smoothed_delays_ms[i]) / kTimeGapMs,
+ 0.005f);
+ } else {
+ EXPECT_NEAR(nada_feedback->derivative(),
+ static_cast<float>(exp_smoothed_delays_ms[i] -
+ exp_smoothed_delays_ms[i - 1]) /
+ kTimeGapMs,
+ 0.005f);
}
}
- EXPECT_NEAR(nada_receiver_.RecentPacketLossRatio(), 0.5f, 0.01f);
}
-TEST_F(NadaReceiverSideTest, PacketLossLakingFewPackets) {
- size_t set_capacity = nada_receiver_.GetSetCapacity();
- EXPECT_LT(set_capacity, static_cast<size_t>(0xFFFF));
-
- const int kPeriod = 100;
- // Missing one for each kPeriod packets.
- for (size_t i = 0; i < set_capacity; ++i) {
- if ((i % kPeriod) != 0) {
- uint16_t sequence_number = static_cast<uint16_t>(i);
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_number);
- // Arrival time = 0, all packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet);
- }
+int64_t Warp(int64_t input) {
+ const int64_t kMinThreshold = 50; // Referred as d_th.
+ const int64_t kMaxThreshold = 400; // Referred as d_max.
+ if (input < kMinThreshold) {
+ return input;
+ } else if (input < kMaxThreshold) {
+ return static_cast<int64_t>(
+ pow((static_cast<double>(kMaxThreshold - input)) /
+ (kMaxThreshold - kMinThreshold),
+ 4.0) *
+ kMinThreshold);
+ } else {
+ return 0L;
}
- EXPECT_NEAR(nada_receiver_.RecentPacketLossRatio(), 1.0f / kPeriod,
- 0.1f / kPeriod);
-}
-
-// Packet's sequence numbers greatly apart, expect high loss.
-TEST_F(NadaReceiverSideTest, PacketLossWideGap) {
- const int64_t kTimeWindowMs = NadaBweReceiver::kPacketLossTimeWindowMs;
-
- const MediaPacket media_packet1(0, 0, 0, 1);
- const MediaPacket media_packet2(0, 0, 0, 1000);
- // Only these two packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet1);
- nada_receiver_.ReceivePacket(0, media_packet2);
- EXPECT_NEAR(nada_receiver_.RecentPacketLossRatio(), 0.998f, 0.0001f);
-
- const MediaPacket media_packet3(0, 0, 0, 0);
- const MediaPacket media_packet4(0, 0, 0, 0x8000);
- // Only these two packets will be considered.
- nada_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet3);
- nada_receiver_.ReceivePacket(2 * kTimeWindowMs, media_packet4);
- EXPECT_NEAR(nada_receiver_.RecentPacketLossRatio(), 0.99994f, 0.00001f);
}
-// Packets arriving unordered should not be counted as losted.
-TEST_F(NadaReceiverSideTest, PacketLossUnorderedPackets) {
- size_t num_packets = nada_receiver_.GetSetCapacity() / 2;
- std::vector<uint16_t> sequence_numbers;
-
- for (size_t i = 0; i < num_packets; ++i) {
- sequence_numbers.push_back(static_cast<uint16_t>(i + 1));
+TEST_F(NadaReceiverSideTest, FeedbackWarpedDelay) {
+ // Since packets are 100ms apart, each one corresponds to a feedback.
+ const int64_t kTimeGapMs = 100; // Between each packet.
+
+ // Raw delays are = [50 250 450 650 850 1050 1250 1450] ms.
+ // Baseline delay will be 50 ms.
+ // Delay signals should be: [0 200 400 600 800 1000 1200 1400] ms.
+ const int64_t kMedianFilteredDelaysMs[] = {
+ 0, 100, 200, 300, 400, 600, 800, 1000};
+ const int kNumPackets = ARRAY_SIZE(kMedianFilteredDelaysMs);
+ const float kAlpha = 0.1f; // Used for exponential smoothing.
+
+ int64_t exp_smoothed_delays_ms[kNumPackets];
+ exp_smoothed_delays_ms[0] = kMedianFilteredDelaysMs[0];
+
+ for (int i = 1; i < kNumPackets; ++i) {
+ exp_smoothed_delays_ms[i] = static_cast<int64_t>(
+ kAlpha * kMedianFilteredDelaysMs[i] +
+ (1.0f - kAlpha) * exp_smoothed_delays_ms[i - 1] + 0.5f);
}
- random_shuffle(sequence_numbers.begin(), sequence_numbers.end());
+ for (int i = 0; i < kNumPackets; ++i) {
+ int64_t send_time_us = (i + 1) * kTimeGapMs * 1000;
+ int64_t arrival_time_ms = send_time_us / 1000 + 50 + 200 * i;
+ uint16_t sequence_number = static_cast<uint16_t>(i + 1);
+ // Payload sizes are not important here.
+ const MediaPacket media_packet(kFlowId, send_time_us, 0, sequence_number);
+ nada_receiver_.ReceivePacket(arrival_time_ms, media_packet);
- for (size_t i = 0; i < num_packets; ++i) {
- const MediaPacket media_packet(kFlowId, 0, 0, sequence_numbers[i]);
- // Arrival time = 0, all packets will be considered.
- nada_receiver_.ReceivePacket(0, media_packet);
+ rtc::scoped_ptr<NadaFeedback> nada_feedback(static_cast<NadaFeedback*>(
+ nada_receiver_.GetFeedback(arrival_time_ms)));
+ EXPECT_EQ(nada_feedback->exp_smoothed_delay_ms(),
+ exp_smoothed_delays_ms[i]);
+ // Delays can be non-linearly warped.
+ EXPECT_EQ(nada_feedback->est_queuing_delay_signal_ms(),
+ Warp(exp_smoothed_delays_ms[i]));
+ // Zero loss, congestion signal = queuing_delay
+ EXPECT_EQ(nada_feedback->congestion_signal(),
+ Warp(exp_smoothed_delays_ms[i]));
}
-
- EXPECT_EQ(nada_receiver_.RecentPacketLossRatio(), 0.0f);
}
TEST_F(FilterTest, MedianConstantArray) {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
index 393df8e871e..b18b9f06b99 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.cc
@@ -69,15 +69,13 @@ RembReceiver::RembReceiver(int flow_id, bool plot)
recv_stats_(ReceiveStatistics::Create(&clock_)),
latest_estimate_bps_(-1),
last_feedback_ms_(-1),
- estimator_(new RemoteBitrateEstimatorAbsSendTime(
- this,
- &clock_,
- kRemoteBitrateEstimatorMinBitrateBps)) {
+ estimator_(new RemoteBitrateEstimatorAbsSendTime(this, &clock_)) {
std::stringstream ss;
ss << "Estimate_" << flow_id_ << "#1";
estimate_log_prefix_ = ss.str();
// Default RTT in RemoteRateControl is 200 ms ; 50 ms is more realistic.
- estimator_->OnRttUpdate(50);
+ estimator_->OnRttUpdate(50, 50);
+ estimator_->SetMinBitrate(kRemoteBitrateEstimatorMinBitrateBps);
}
RembReceiver::~RembReceiver() {
@@ -101,9 +99,8 @@ void RembReceiver::ReceivePacket(int64_t arrival_time_ms,
clock_.AdvanceTimeMilliseconds(arrival_time_ms - clock_.TimeInMilliseconds());
ASSERT_TRUE(arrival_time_ms == clock_.TimeInMilliseconds());
- received_packets_.Insert(media_packet.sequence_number(),
- media_packet.send_time_ms(), arrival_time_ms,
- media_packet.payload_size());
+ // Log received packet information.
+ BweReceiver::ReceivePacket(arrival_time_ms, media_packet);
}
FeedbackPacket* RembReceiver::GetFeedback(int64_t now_ms) {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.h
index 93279e660e0..7dfd7a84591 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/remb.h
@@ -44,7 +44,7 @@ class RembBweSender : public BweSender {
private:
Clock* clock_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RembBweSender);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RembBweSender);
};
class RembReceiver : public BweReceiver, public RemoteBitrateObserver {
@@ -73,7 +73,7 @@ class RembReceiver : public BweReceiver, public RemoteBitrateObserver {
int64_t last_feedback_ms_;
rtc::scoped_ptr<RemoteBitrateEstimator> estimator_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RembReceiver);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RembReceiver);
};
} // namespace bwe
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.cc
index 9caa8b14220..250d0c1262a 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.cc
@@ -23,9 +23,7 @@ const int kFeedbackIntervalMs = 50;
FullBweSender::FullBweSender(int kbps, BitrateObserver* observer, Clock* clock)
: bitrate_controller_(
BitrateController::CreateBitrateController(clock, observer)),
- rbe_(new RemoteBitrateEstimatorAbsSendTime(this,
- clock,
- 1000 * kMinBitrateKbps)),
+ rbe_(new RemoteBitrateEstimatorAbsSendTime(this, clock)),
feedback_observer_(bitrate_controller_->CreateRtcpBandwidthObserver()),
clock_(clock),
send_time_history_(10000),
@@ -36,6 +34,7 @@ FullBweSender::FullBweSender(int kbps, BitrateObserver* observer, Clock* clock)
bitrate_controller_->SetStartBitrate(1000 * kbps);
bitrate_controller_->SetMinMaxBitrate(1000 * kMinBitrateKbps,
1000 * kMaxBitrateKbps);
+ rbe_->SetMinBitrate(1000 * kMinBitrateKbps);
}
FullBweSender::~FullBweSender() {
@@ -50,18 +49,16 @@ void FullBweSender::GiveFeedback(const FeedbackPacket& feedback) {
static_cast<const SendSideBweFeedback&>(feedback);
if (fb.packet_feedback_vector().empty())
return;
- // TODO(sprang): Unconstify PacketInfo so we don't need temp copy?
std::vector<PacketInfo> packet_feedback_vector(fb.packet_feedback_vector());
- for (PacketInfo& packet : packet_feedback_vector) {
- if (!send_time_history_.GetSendTime(packet.sequence_number,
- &packet.send_time_ms, true)) {
+ for (PacketInfo& packet_info : packet_feedback_vector) {
+ if (!send_time_history_.GetInfo(&packet_info, true)) {
LOG(LS_WARNING) << "Ack arrived too late.";
}
}
int64_t rtt_ms =
clock_->TimeInMilliseconds() - feedback.latest_send_time_ms();
- rbe_->OnRttUpdate(rtt_ms);
+ rbe_->OnRttUpdate(rtt_ms, rtt_ms);
BWE_TEST_LOGGING_PLOT(1, "RTT", clock_->TimeInMilliseconds(), rtt_ms);
rbe_->IncomingPacketFeedbackVector(packet_feedback_vector);
@@ -95,9 +92,10 @@ void FullBweSender::OnPacketsSent(const Packets& packets) {
for (Packet* packet : packets) {
if (packet->GetPacketType() == Packet::kMedia) {
MediaPacket* media_packet = static_cast<MediaPacket*>(packet);
- send_time_history_.AddAndRemoveOldSendTimes(
- media_packet->header().sequenceNumber,
- media_packet->GetAbsSendTimeInMs());
+ PacketInfo info(0, media_packet->sender_timestamp_ms(),
+ media_packet->header().sequenceNumber,
+ media_packet->payload_size(), packet->paced());
+ send_time_history_.AddAndRemoveOld(info);
}
}
}
@@ -127,12 +125,11 @@ SendSideBweReceiver::~SendSideBweReceiver() {
void SendSideBweReceiver::ReceivePacket(int64_t arrival_time_ms,
const MediaPacket& media_packet) {
packet_feedback_vector_.push_back(PacketInfo(
- arrival_time_ms, media_packet.sender_timestamp_us() / 1000,
+ arrival_time_ms, media_packet.sender_timestamp_ms(),
media_packet.header().sequenceNumber, media_packet.payload_size(), true));
- received_packets_.Insert(media_packet.sequence_number(),
- media_packet.send_time_ms(), arrival_time_ms,
- media_packet.payload_size());
+ // Log received packet information.
+ BweReceiver::ReceivePacket(arrival_time_ms, media_packet);
}
FeedbackPacket* SendSideBweReceiver::GetFeedback(int64_t now_ms) {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.h
index c76e360a003..ab9abc5cbc6 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/send_side.h
@@ -13,7 +13,7 @@
#include <vector>
-#include "webrtc/modules/bitrate_controller/send_time_history.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/send_time_history.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
namespace webrtc {
@@ -45,7 +45,7 @@ class FullBweSender : public BweSender, public RemoteBitrateObserver {
bool has_received_ack_;
uint16_t last_acked_seq_num_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FullBweSender);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FullBweSender);
};
class SendSideBweReceiver : public BweReceiver {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
index 154d68c520c..a02abc6ab8f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/estimators/tcp.cc
@@ -33,12 +33,11 @@ TcpBweReceiver::~TcpBweReceiver() {
void TcpBweReceiver::ReceivePacket(int64_t arrival_time_ms,
const MediaPacket& media_packet) {
- latest_owd_ms_ = arrival_time_ms - media_packet.sender_timestamp_us() / 1000;
+ latest_owd_ms_ = arrival_time_ms - media_packet.sender_timestamp_ms() / 1000;
acks_.push_back(media_packet.header().sequenceNumber);
- received_packets_.Insert(media_packet.sequence_number(),
- media_packet.send_time_ms(), arrival_time_ms,
- media_packet.payload_size());
+ // Log received packet information.
+ BweReceiver::ReceivePacket(arrival_time_ms, media_packet);
}
FeedbackPacket* TcpBweReceiver::GetFeedback(int64_t now_ms) {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc
new file mode 100644
index 00000000000..6202b4a6a30
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.cc
@@ -0,0 +1,445 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
+
+#include "webrtc/modules/remote_bitrate_estimator/test/packet_sender.h"
+
+#include <algorithm>
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+namespace {
+// Holder mean, Manhattan distance for p=1, EuclidianNorm/sqrt(n) for p=2.
+template <typename T>
+double NormLp(T sum, size_t size, double p) {
+ return pow(sum / size, 1.0 / p);
+}
+}
+
+const double kP = 1.0; // Used for Norm Lp.
+
+LinkShare::LinkShare(ChokeFilter* choke_filter)
+ : choke_filter_(choke_filter), running_flows_(choke_filter->flow_ids()) {
+}
+
+void LinkShare::PauseFlow(int flow_id) {
+ running_flows_.erase(flow_id);
+}
+
+void LinkShare::ResumeFlow(int flow_id) {
+ running_flows_.insert(flow_id);
+}
+
+uint32_t LinkShare::TotalAvailableKbps() {
+ return choke_filter_->capacity_kbps();
+}
+
+uint32_t LinkShare::AvailablePerFlowKbps(int flow_id) {
+ uint32_t available_capacity_per_flow_kbps = 0;
+ if (running_flows_.find(flow_id) != running_flows_.end()) {
+ available_capacity_per_flow_kbps =
+ TotalAvailableKbps() / static_cast<uint32_t>(running_flows_.size());
+ }
+ return available_capacity_per_flow_kbps;
+}
+
+MetricRecorder::MetricRecorder(const std::string algorithm_name,
+ int flow_id,
+ PacketSender* packet_sender,
+ LinkShare* link_share)
+ : algorithm_name_(algorithm_name),
+ flow_id_(flow_id),
+ link_share_(link_share),
+ now_ms_(0),
+ sum_delays_ms_(0),
+ delay_histogram_ms_(),
+ sum_delays_square_ms2_(0),
+ sum_throughput_bytes_(0),
+ last_unweighted_estimate_error_(0),
+ optimal_throughput_bits_(0),
+ last_available_bitrate_per_flow_kbps_(0),
+ start_computing_metrics_ms_(0),
+ started_computing_metrics_(false),
+ num_packets_received_(0) {
+ std::fill_n(sum_lp_weighted_estimate_error_, 2, 0);
+ if (packet_sender != nullptr)
+ packet_sender->set_metric_recorder(this);
+}
+
+void MetricRecorder::SetPlotInformation(
+ const std::vector<std::string>& prefixes,
+ bool plot_delay,
+ bool plot_loss) {
+ assert(prefixes.size() == kNumMetrics);
+ for (size_t i = 0; i < kNumMetrics; ++i) {
+ plot_information_[i].prefix = prefixes[i];
+ }
+ plot_information_[kThroughput].plot_interval_ms = 100;
+ plot_information_[kSendingEstimate].plot_interval_ms = 100;
+ plot_information_[kDelay].plot_interval_ms = 100;
+ plot_information_[kLoss].plot_interval_ms = 500;
+ plot_information_[kObjective].plot_interval_ms = 1000;
+ plot_information_[kTotalAvailable].plot_interval_ms = 1000;
+ plot_information_[kAvailablePerFlow].plot_interval_ms = 1000;
+
+ for (int i = kThroughput; i < kNumMetrics; ++i) {
+ plot_information_[i].last_plot_ms = 0;
+ switch (i) {
+ case kSendingEstimate:
+ case kObjective:
+ case kAvailablePerFlow:
+ plot_information_[i].plot = false;
+ break;
+ case kLoss:
+ plot_information_[i].plot = plot_loss;
+ break;
+ case kDelay:
+ plot_information_[i].plot = plot_delay;
+ break;
+ default:
+ plot_information_[i].plot = true;
+ }
+ }
+}
+
+void MetricRecorder::PlotAllDynamics() {
+ for (int i = kThroughput; i < kNumMetrics; ++i) {
+ if (plot_information_[i].plot &&
+ now_ms_ - plot_information_[i].last_plot_ms >=
+ plot_information_[i].plot_interval_ms) {
+ PlotDynamics(i);
+ }
+ }
+}
+
+void MetricRecorder::PlotDynamics(int metric) {
+ if (metric == kTotalAvailable) {
+ BWE_TEST_LOGGING_PLOT_WITH_NAME(
+ 0, plot_information_[kTotalAvailable].prefix, now_ms_,
+ GetTotalAvailableKbps(), "Available");
+ } else if (metric == kAvailablePerFlow) {
+ BWE_TEST_LOGGING_PLOT_WITH_NAME(
+ 0, plot_information_[kAvailablePerFlow].prefix, now_ms_,
+ GetAvailablePerFlowKbps(), "Available_per_flow");
+ } else {
+ PlotLine(metric, plot_information_[metric].prefix,
+ plot_information_[metric].time_ms,
+ plot_information_[metric].value);
+ }
+ plot_information_[metric].last_plot_ms = now_ms_;
+}
+
+template <typename T>
+void MetricRecorder::PlotLine(int windows_id,
+ const std::string& prefix,
+ int64_t time_ms,
+ T y) {
+ BWE_TEST_LOGGING_PLOT_WITH_NAME(windows_id, prefix, time_ms,
+ static_cast<double>(y), algorithm_name_);
+}
+
+void MetricRecorder::UpdateTimeMs(int64_t time_ms) {
+ now_ms_ = std::max(now_ms_, time_ms);
+}
+
+void MetricRecorder::UpdateThroughput(int64_t bitrate_kbps,
+ size_t payload_size) {
+ // Total throughput should be computed before updating the time.
+ PushThroughputBytes(payload_size, now_ms_);
+ plot_information_[kThroughput].Update(now_ms_, bitrate_kbps);
+}
+
+void MetricRecorder::UpdateSendingEstimateKbps(int64_t bitrate_kbps) {
+ plot_information_[kSendingEstimate].Update(now_ms_, bitrate_kbps);
+}
+
+void MetricRecorder::UpdateDelayMs(int64_t delay_ms) {
+ PushDelayMs(delay_ms, now_ms_);
+ plot_information_[kDelay].Update(now_ms_, delay_ms);
+}
+
+void MetricRecorder::UpdateLoss(float loss_ratio) {
+ plot_information_[kLoss].Update(now_ms_, loss_ratio);
+}
+
+void MetricRecorder::UpdateObjective() {
+ plot_information_[kObjective].Update(now_ms_, ObjectiveFunction());
+}
+
+uint32_t MetricRecorder::GetTotalAvailableKbps() {
+ if (link_share_ == nullptr)
+ return 0;
+ return link_share_->TotalAvailableKbps();
+}
+
+uint32_t MetricRecorder::GetAvailablePerFlowKbps() {
+ if (link_share_ == nullptr)
+ return 0;
+ return link_share_->AvailablePerFlowKbps(flow_id_);
+}
+
+uint32_t MetricRecorder::GetSendingEstimateKbps() {
+ return static_cast<uint32_t>(plot_information_[kSendingEstimate].value);
+}
+
+void MetricRecorder::PushDelayMs(int64_t delay_ms, int64_t arrival_time_ms) {
+ if (ShouldRecord(arrival_time_ms)) {
+ sum_delays_ms_ += delay_ms;
+ sum_delays_square_ms2_ += delay_ms * delay_ms;
+ if (delay_histogram_ms_.find(delay_ms) == delay_histogram_ms_.end()) {
+ delay_histogram_ms_[delay_ms] = 0;
+ }
+ ++delay_histogram_ms_[delay_ms];
+ }
+}
+
+void MetricRecorder::UpdateEstimateError(int64_t new_value) {
+ int64_t lp_value = pow(static_cast<double>(std::abs(new_value)), kP);
+ if (new_value < 0) {
+ sum_lp_weighted_estimate_error_[0] += lp_value;
+ } else {
+ sum_lp_weighted_estimate_error_[1] += lp_value;
+ }
+}
+
+void MetricRecorder::PushThroughputBytes(size_t payload_size,
+ int64_t arrival_time_ms) {
+ if (ShouldRecord(arrival_time_ms)) {
+ ++num_packets_received_;
+ sum_throughput_bytes_ += payload_size;
+
+ int64_t current_available_per_flow_kbps =
+ static_cast<int64_t>(GetAvailablePerFlowKbps());
+
+ int64_t current_bitrate_diff_kbps =
+ static_cast<int64_t>(GetSendingEstimateKbps()) -
+ current_available_per_flow_kbps;
+
+ int64_t weighted_estimate_error =
+ (((current_bitrate_diff_kbps + last_unweighted_estimate_error_) *
+ (arrival_time_ms - plot_information_[kThroughput].time_ms)) /
+ 2);
+
+ UpdateEstimateError(weighted_estimate_error);
+
+ optimal_throughput_bits_ +=
+ ((current_available_per_flow_kbps +
+ last_available_bitrate_per_flow_kbps_) *
+ (arrival_time_ms - plot_information_[kThroughput].time_ms)) /
+ 2;
+
+ last_available_bitrate_per_flow_kbps_ = current_available_per_flow_kbps;
+ }
+}
+
+bool MetricRecorder::ShouldRecord(int64_t arrival_time_ms) {
+ if (arrival_time_ms >= start_computing_metrics_ms_) {
+ if (!started_computing_metrics_) {
+ start_computing_metrics_ms_ = arrival_time_ms;
+ now_ms_ = arrival_time_ms;
+ started_computing_metrics_ = true;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void MetricRecorder::PlotThroughputHistogram(
+ const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t extra_offset_ms,
+ const std::string optimum_id) const {
+ double optimal_bitrate_per_flow_kbps = static_cast<double>(
+ optimal_throughput_bits_ / RunDurationMs(extra_offset_ms));
+
+ double neg_error = Renormalize(
+ NormLp(sum_lp_weighted_estimate_error_[0], num_packets_received_, kP));
+ double pos_error = Renormalize(
+ NormLp(sum_lp_weighted_estimate_error_[1], num_packets_received_, kP));
+
+ double average_bitrate_kbps = AverageBitrateKbps(extra_offset_ms);
+
+ // Prevent the error to be too close to zero (plotting issue).
+ double extra_error = average_bitrate_kbps / 500;
+
+ std::string optimum_title =
+ optimum_id.empty() ? "optimal_bitrate" : "optimal_bitrates#" + optimum_id;
+
+ BWE_TEST_LOGGING_LABEL(4, title, "average_bitrate_(kbps)", num_flows);
+ BWE_TEST_LOGGING_LIMITERRORBAR(
+ 4, bwe_name, average_bitrate_kbps,
+ average_bitrate_kbps - neg_error - extra_error,
+ average_bitrate_kbps + pos_error + extra_error, "estimate_error",
+ optimal_bitrate_per_flow_kbps, optimum_title, flow_id_);
+
+ BWE_TEST_LOGGING_LOG1("RESULTS >>> " + bwe_name + " Channel utilization : ",
+ "%lf %%",
+ 100.0 * static_cast<double>(average_bitrate_kbps) /
+ optimal_bitrate_per_flow_kbps);
+
+ RTC_UNUSED(pos_error);
+ RTC_UNUSED(neg_error);
+ RTC_UNUSED(extra_error);
+ RTC_UNUSED(optimal_bitrate_per_flow_kbps);
+}
+
+void MetricRecorder::PlotThroughputHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t extra_offset_ms) const {
+ PlotThroughputHistogram(title, bwe_name, num_flows, extra_offset_ms, "");
+}
+
+void MetricRecorder::PlotDelayHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t one_way_path_delay_ms) const {
+ double average_delay_ms =
+ static_cast<double>(sum_delays_ms_) / num_packets_received_;
+
+ // Prevent the error to be too close to zero (plotting issue).
+ double extra_error = average_delay_ms / 500;
+ double tenth_sigma_ms = DelayStdDev() / 10.0 + extra_error;
+ int64_t percentile_5_ms = NthDelayPercentile(5);
+ int64_t percentile_95_ms = NthDelayPercentile(95);
+
+ BWE_TEST_LOGGING_LABEL(5, title, "average_delay_(ms)", num_flows)
+ BWE_TEST_LOGGING_ERRORBAR(5, bwe_name, average_delay_ms, percentile_5_ms,
+ percentile_95_ms, "5th and 95th percentiles",
+ flow_id_);
+
+ // Log added latency, disregard baseline path delay.
+ BWE_TEST_LOGGING_LOG1("RESULTS >>> " + bwe_name + " Delay average : ",
+ "%lf ms", average_delay_ms - one_way_path_delay_ms);
+ BWE_TEST_LOGGING_LOG1("RESULTS >>> " + bwe_name + " Delay 5th percentile : ",
+ "%ld ms", percentile_5_ms - one_way_path_delay_ms);
+ BWE_TEST_LOGGING_LOG1("RESULTS >>> " + bwe_name + " Delay 95th percentile : ",
+ "%ld ms", percentile_95_ms - one_way_path_delay_ms);
+
+ RTC_UNUSED(tenth_sigma_ms);
+ RTC_UNUSED(percentile_5_ms);
+ RTC_UNUSED(percentile_95_ms);
+}
+
+void MetricRecorder::PlotLossHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ float global_loss_ratio) const {
+ BWE_TEST_LOGGING_LABEL(6, title, "packet_loss_ratio_(%)", num_flows)
+ BWE_TEST_LOGGING_BAR(6, bwe_name, 100.0f * global_loss_ratio, flow_id_);
+
+ BWE_TEST_LOGGING_LOG1("RESULTS >>> " + bwe_name + " Loss Ratio : ", "%f %%",
+ 100.0f * global_loss_ratio);
+}
+
+void MetricRecorder::PlotObjectiveHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows) const {
+ BWE_TEST_LOGGING_LABEL(7, title, "objective_function", num_flows)
+ BWE_TEST_LOGGING_BAR(7, bwe_name, ObjectiveFunction(), flow_id_);
+}
+
+void MetricRecorder::PlotZero() {
+ for (int i = kThroughput; i <= kLoss; ++i) {
+ if (plot_information_[i].plot) {
+ std::stringstream prefix;
+ prefix << "Receiver_" << flow_id_ << "_" + plot_information_[i].prefix;
+ PlotLine(i, prefix.str(), now_ms_, 0);
+ plot_information_[i].last_plot_ms = now_ms_;
+ }
+ }
+}
+
+void MetricRecorder::PauseFlow() {
+ PlotZero();
+ link_share_->PauseFlow(flow_id_);
+}
+
+void MetricRecorder::ResumeFlow(int64_t paused_time_ms) {
+ UpdateTimeMs(now_ms_ + paused_time_ms);
+ PlotZero();
+ link_share_->ResumeFlow(flow_id_);
+}
+
+double MetricRecorder::AverageBitrateKbps(int64_t extra_offset_ms) const {
+ int64_t duration_ms = RunDurationMs(extra_offset_ms);
+ if (duration_ms == 0)
+ return 0.0;
+ return static_cast<double>(8 * sum_throughput_bytes_ / duration_ms);
+}
+
+int64_t MetricRecorder::RunDurationMs(int64_t extra_offset_ms) const {
+ return now_ms_ - start_computing_metrics_ms_ - extra_offset_ms;
+}
+
+double MetricRecorder::DelayStdDev() const {
+ if (num_packets_received_ == 0) {
+ return 0.0;
+ }
+ double mean = static_cast<double>(sum_delays_ms_) / num_packets_received_;
+ double mean2 =
+ static_cast<double>(sum_delays_square_ms2_) / num_packets_received_;
+ return sqrt(mean2 - pow(mean, 2.0));
+}
+
+// Since delay values are bounded in a subset of [0, 5000] ms,
+// this function's execution time is O(1), independend of num_packets_received_.
+int64_t MetricRecorder::NthDelayPercentile(int n) const {
+ if (num_packets_received_ == 0) {
+ return 0;
+ }
+ size_t num_packets_remaining = (n * num_packets_received_) / 100;
+ for (auto hist : delay_histogram_ms_) {
+ if (num_packets_remaining <= hist.second)
+ return static_cast<int64_t>(hist.first);
+ num_packets_remaining -= hist.second;
+ }
+
+ assert(false);
+ return -1;
+}
+
+// The weighted_estimate_error_ was weighted based on time windows.
+// This function scales back the result before plotting.
+double MetricRecorder::Renormalize(double x) const {
+ return (x * num_packets_received_) / now_ms_;
+}
+
+inline double U(int64_t x, double alpha) {
+ if (alpha == 1.0) {
+ return log(static_cast<double>(x));
+ }
+ return pow(static_cast<double>(x), 1.0 - alpha) / (1.0 - alpha);
+}
+
+inline double U(size_t x, double alpha) {
+ return U(static_cast<int64_t>(x), alpha);
+}
+
+// TODO(magalhaesc): Update ObjectiveFunction.
+double MetricRecorder::ObjectiveFunction() const {
+ const double kDelta = 0.15; // Delay penalty factor.
+ const double kAlpha = 1.0;
+ const double kBeta = 1.0;
+
+ double throughput_metric = U(sum_throughput_bytes_, kAlpha);
+ double delay_penalty = kDelta * U(sum_delays_ms_, kBeta);
+
+ return throughput_metric - delay_penalty;
+}
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h
new file mode 100644
index 00000000000..2be13e0b0b7
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_METRIC_RECORDER_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_METRIC_RECORDER_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "webrtc/base/common.h"
+#include "webrtc/test/testsupport/gtest_prod_util.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+class ChokeFilter;
+class PacketSender;
+
+class LinkShare {
+ public:
+ explicit LinkShare(ChokeFilter* choke_filter);
+
+ void PauseFlow(int flow_id); // Increases available capacity per flow.
+ void ResumeFlow(int flow_id); // Decreases available capacity per flow.
+
+ uint32_t TotalAvailableKbps();
+ // If the given flow is paused, its output is zero.
+ uint32_t AvailablePerFlowKbps(int flow_id);
+
+ private:
+ ChokeFilter* choke_filter_;
+ std::set<int> running_flows_;
+};
+
+struct PlotInformation {
+ PlotInformation()
+ : prefix(),
+ last_plot_ms(0),
+ time_ms(0),
+ value(0.0),
+ plot_interval_ms(0) {}
+ template <typename T>
+ void Update(int64_t now_ms, T new_value) {
+ time_ms = now_ms;
+ value = static_cast<double>(new_value);
+ }
+ std::string prefix;
+ bool plot;
+ int64_t last_plot_ms;
+ int64_t time_ms;
+ double value;
+ int64_t plot_interval_ms;
+};
+
+class MetricRecorder {
+ public:
+ MetricRecorder(const std::string algorithm_name,
+ int flow_id,
+ PacketSender* packet_sender,
+ LinkShare* link_share);
+
+ void SetPlotInformation(const std::vector<std::string>& prefixes,
+ bool plot_delay,
+ bool plot_loss);
+
+ template <typename T>
+ void PlotLine(int windows_id,
+ const std::string& prefix,
+ int64_t time_ms,
+ T y);
+
+ void PlotDynamics(int metric);
+ void PlotAllDynamics();
+
+ void UpdateTimeMs(int64_t time_ms);
+ void UpdateThroughput(int64_t bitrate_kbps, size_t payload_size);
+ void UpdateSendingEstimateKbps(int64_t bitrate_kbps);
+ void UpdateDelayMs(int64_t delay_ms);
+ void UpdateLoss(float loss_ratio);
+ void UpdateObjective();
+
+ void PlotThroughputHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t extra_offset_ms,
+ const std::string optimum_id) const;
+
+ void PlotThroughputHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t extra_offset_ms) const;
+
+ void PlotDelayHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ int64_t one_way_path_delay_ms) const;
+
+ void PlotLossHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows,
+ float global_loss_ratio) const;
+
+ void PlotObjectiveHistogram(const std::string& title,
+ const std::string& bwe_name,
+ size_t num_flows) const;
+
+ void set_start_computing_metrics_ms(int64_t start_computing_metrics_ms) {
+ start_computing_metrics_ms_ = start_computing_metrics_ms;
+ }
+
+ void set_plot_available_capacity(bool plot) {
+ plot_information_[kTotalAvailable].plot = plot;
+ }
+
+ void PauseFlow(); // Plot zero.
+ void ResumeFlow(int64_t paused_time_ms); // Plot zero.
+ void PlotZero();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(MetricRecorderTest, NoPackets);
+ FRIEND_TEST_ALL_PREFIXES(MetricRecorderTest, RegularPackets);
+ FRIEND_TEST_ALL_PREFIXES(MetricRecorderTest, VariableDelayPackets);
+
+ uint32_t GetTotalAvailableKbps();
+ uint32_t GetAvailablePerFlowKbps();
+ uint32_t GetSendingEstimateKbps();
+ double ObjectiveFunction() const;
+
+ double Renormalize(double x) const;
+ bool ShouldRecord(int64_t arrival_time_ms);
+
+ void PushDelayMs(int64_t delay_ms, int64_t arrival_time_ms);
+ void PushThroughputBytes(size_t throughput_bytes, int64_t arrival_time_ms);
+
+ void UpdateEstimateError(int64_t new_value);
+ double DelayStdDev() const;
+ int64_t NthDelayPercentile(int n) const;
+ double AverageBitrateKbps(int64_t extra_offset_ms) const;
+ int64_t RunDurationMs(int64_t extra_offset_ms) const;
+
+ enum Metrics {
+ kThroughput = 0,
+ kSendingEstimate,
+ kDelay,
+ kLoss,
+ kObjective,
+ kTotalAvailable,
+ kAvailablePerFlow,
+ kNumMetrics
+ };
+
+ std::string algorithm_name_;
+ int flow_id_;
+ LinkShare* link_share_;
+
+ int64_t now_ms_;
+
+ PlotInformation plot_information_[kNumMetrics];
+
+ int64_t sum_delays_ms_;
+ // delay_histogram_ms_[i] counts how many packets have delay = i ms.
+ std::map<int64_t, size_t> delay_histogram_ms_;
+ int64_t sum_delays_square_ms2_; // Used to compute standard deviation.
+ size_t sum_throughput_bytes_;
+ // ((Receiving rate - available bitrate per flow) * time window)^p.
+ // 0 for negative values, 1 for positive values.
+ int64_t sum_lp_weighted_estimate_error_[2];
+ int64_t last_unweighted_estimate_error_;
+ int64_t optimal_throughput_bits_;
+ int64_t last_available_bitrate_per_flow_kbps_;
+ int64_t start_computing_metrics_ms_;
+ bool started_computing_metrics_;
+ size_t num_packets_received_;
+};
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TEST_METRIC_RECORDER_H_
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder_unittest.cc
new file mode 100644
index 00000000000..7d4ed5fd5f4
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/metric_recorder_unittest.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
+
+#include <math.h>
+#include <algorithm>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+namespace testing {
+namespace bwe {
+
+class MetricRecorderTest : public ::testing::Test {
+ public:
+ MetricRecorderTest() : metric_recorder_("Test", 0, nullptr, nullptr) {}
+
+ ~MetricRecorderTest() {}
+
+ protected:
+ MetricRecorder metric_recorder_;
+};
+
+TEST_F(MetricRecorderTest, NoPackets) {
+ EXPECT_EQ(metric_recorder_.AverageBitrateKbps(0), 0);
+ EXPECT_EQ(metric_recorder_.DelayStdDev(), 0.0);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(0), 0);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(5), 0);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(95), 0);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(100), 0);
+}
+
+TEST_F(MetricRecorderTest, RegularPackets) {
+ const size_t kPayloadSizeBytes = 1200;
+ const int64_t kDelayMs = 20;
+ const int64_t kInterpacketGapMs = 5;
+ const int kNumPackets = 1000;
+
+ for (int i = 0; i < kNumPackets; ++i) {
+ int64_t arrival_time_ms = kInterpacketGapMs * i + kDelayMs;
+ metric_recorder_.UpdateTimeMs(arrival_time_ms);
+ metric_recorder_.PushDelayMs(kDelayMs, arrival_time_ms);
+ metric_recorder_.PushThroughputBytes(kPayloadSizeBytes, arrival_time_ms);
+ }
+
+ EXPECT_NEAR(
+ metric_recorder_.AverageBitrateKbps(0),
+ static_cast<uint32_t>(kPayloadSizeBytes * 8) / (kInterpacketGapMs), 10);
+
+ EXPECT_EQ(metric_recorder_.DelayStdDev(), 0.0);
+
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(0), kDelayMs);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(5), kDelayMs);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(95), kDelayMs);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(100), kDelayMs);
+}
+
+TEST_F(MetricRecorderTest, VariableDelayPackets) {
+ const size_t kPayloadSizeBytes = 1200;
+ const int64_t kInterpacketGapMs = 2000;
+ const int kNumPackets = 1000;
+
+ std::vector<int64_t> delays_ms;
+ for (int i = 0; i < kNumPackets; ++i) {
+ delays_ms.push_back(static_cast<int64_t>(i + 1));
+ }
+ // Order of packets should not matter here.
+ std::random_shuffle(delays_ms.begin(), delays_ms.end());
+
+ int first_received_ms = delays_ms[0];
+ int64_t last_received_ms = 0;
+ for (int i = 0; i < kNumPackets; ++i) {
+ int64_t arrival_time_ms = kInterpacketGapMs * i + delays_ms[i];
+ last_received_ms = std::max(last_received_ms, arrival_time_ms);
+ metric_recorder_.UpdateTimeMs(arrival_time_ms);
+ metric_recorder_.PushDelayMs(delays_ms[i], arrival_time_ms);
+ metric_recorder_.PushThroughputBytes(kPayloadSizeBytes, arrival_time_ms);
+ }
+
+ size_t received_bits = kPayloadSizeBytes * 8 * kNumPackets;
+ EXPECT_NEAR(metric_recorder_.AverageBitrateKbps(0),
+ static_cast<uint32_t>(received_bits) /
+ ((last_received_ms - first_received_ms)),
+ 10);
+
+ double expected_x = (kNumPackets + 1) / 2.0;
+ double expected_x2 = ((kNumPackets + 1) * (2 * kNumPackets + 1)) / 6.0;
+ double var = expected_x2 - pow(expected_x, 2.0);
+ EXPECT_NEAR(metric_recorder_.DelayStdDev(), sqrt(var), kNumPackets / 1000.0);
+
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(0), 1);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(5), (5 * kNumPackets) / 100);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(95), (95 * kNumPackets) / 100);
+ EXPECT_EQ(metric_recorder_.NthDelayPercentile(100), kNumPackets);
+}
+
+} // namespace bwe
+} // namespace testing
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet.h
index 647e357f9b9..11885a4544f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet.h
@@ -34,15 +34,23 @@ class Packet {
virtual bool operator<(const Packet& rhs) const;
virtual int flow_id() const { return flow_id_; }
- virtual int64_t creation_time_us() const { return creation_time_us_; }
virtual void set_send_time_us(int64_t send_time_us);
virtual int64_t send_time_us() const { return send_time_us_; }
+ virtual int64_t sender_timestamp_us() const { return sender_timestamp_us_; }
virtual size_t payload_size() const { return payload_size_; }
virtual Packet::Type GetPacketType() const = 0;
- void set_sender_timestamp_us(int64_t sender_timestamp_us) {
+ virtual void set_sender_timestamp_us(int64_t sender_timestamp_us) {
sender_timestamp_us_ = sender_timestamp_us;
}
- int64_t sender_timestamp_us() const { return sender_timestamp_us_; }
+ virtual void set_paced(bool paced) { paced_ = paced; }
+ virtual bool paced() const { return paced_; }
+ virtual int64_t creation_time_ms() const {
+ return (creation_time_us_ + 500) / 1000;
+ }
+ virtual int64_t sender_timestamp_ms() const {
+ return (sender_timestamp_us_ + 500) / 1000;
+ }
+ virtual int64_t send_time_ms() const { return (send_time_us_ + 500) / 1000; }
protected:
int flow_id_;
@@ -50,6 +58,7 @@ class Packet {
int64_t send_time_us_; // Time the packet left last processor touching it.
int64_t sender_timestamp_us_; // Time the packet left the Sender.
size_t payload_size_; // Size of the (non-existent, simulated) payload.
+ bool paced_; // True if sent through paced sender.
};
class MediaPacket : public Packet {
@@ -63,7 +72,8 @@ class MediaPacket : public Packet {
int64_t send_time_us,
size_t payload_size,
const RTPHeader& header);
- MediaPacket(int64_t send_time_us, uint32_t sequence_number);
+ MediaPacket(int64_t send_time_us, uint16_t sequence_number);
+
virtual ~MediaPacket() {}
int64_t GetAbsSendTimeInMs() const {
@@ -75,7 +85,6 @@ class MediaPacket : public Packet {
const RTPHeader& header() const { return header_; }
virtual Packet::Type GetPacketType() const { return kMedia; }
uint16_t sequence_number() const { return header_.sequenceNumber; }
- int64_t send_time_ms() const { return send_time_us_ / 1000; }
private:
static const int kAbsSendTimeFraction = 18;
@@ -99,7 +108,7 @@ class FeedbackPacket : public Packet {
int64_t latest_send_time_ms() const { return latest_send_time_ms_; }
private:
- int64_t latest_send_time_ms_; // Time stamp for the latest sent packet.
+ int64_t latest_send_time_ms_; // Time stamp for the latest sent FbPacket.
};
class RembFeedback : public FeedbackPacket {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
index c13f14437a5..0edfeca4a6f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.cc
@@ -10,7 +10,6 @@
#include "webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h"
-#include <math.h>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
@@ -29,35 +28,48 @@ PacketReceiver::PacketReceiver(PacketProcessorListener* listener,
int flow_id,
BandwidthEstimatorType bwe_type,
bool plot_delay,
- bool plot_bwe)
+ bool plot_bwe,
+ MetricRecorder* metric_recorder)
: PacketProcessor(listener, flow_id, kReceiver),
- delay_log_prefix_(),
- metric_log_prefix_(),
- packet_loss_log_prefix_(),
- last_delay_plot_ms_(0),
- last_metric_plot_ms_(0),
- last_packet_loss_plot_ms_(0),
- plot_delay_(plot_delay),
- // TODO(magalhaesc) Add separated plot_objective_function and
- // plot_packet_loss parameters to the constructor.
- plot_objective_function_(plot_delay),
- plot_packet_loss_(plot_delay),
bwe_receiver_(CreateBweReceiver(bwe_type, flow_id, plot_bwe)),
- total_delay_ms_(0),
- total_throughput_(0),
- number_packets_(0) {
- // Setup the prefix ststd::rings used when logging.
- std::stringstream ss1;
- ss1 << "Delay_" << flow_id << "#2";
- delay_log_prefix_ = ss1.str();
-
- std::stringstream ss2;
- ss2 << "Objective_function_" << flow_id << "#2";
- metric_log_prefix_ = ss2.str();
-
- std::stringstream ss3;
- ss3 << "Packet_Loss_" << flow_id << "#2";
- packet_loss_log_prefix_ = ss3.str();
+ metric_recorder_(metric_recorder),
+ plot_delay_(plot_delay),
+ last_delay_plot_ms_(0),
+ // #2 aligns the plot with the right axis.
+ delay_prefix_("Delay_ms#2"),
+ bwe_type_(bwe_type) {
+ if (metric_recorder_ != nullptr) {
+ // Setup the prefix std::strings used when logging.
+ std::vector<std::string> prefixes;
+
+ // Metric recorder plots them in separated figures,
+ // alignment will take place with the #1 left axis.
+ prefixes.push_back("Throughput_kbps#1");
+ prefixes.push_back("Sending_Estimate_kbps#1");
+ prefixes.push_back("Delay_ms_#1");
+ prefixes.push_back("Packet_Loss_#1");
+ prefixes.push_back("Objective_function_#1");
+
+ // Plot Total/PerFlow Available capacity together with throughputs.
+ prefixes.push_back("Throughput_kbps#1"); // Total Available.
+ prefixes.push_back("Throughput_kbps#1"); // Available per flow.
+
+ bool plot_loss = plot_delay; // Plot loss if delay is plotted.
+ metric_recorder_->SetPlotInformation(prefixes, plot_delay, plot_loss);
+ }
+}
+
+PacketReceiver::PacketReceiver(PacketProcessorListener* listener,
+ int flow_id,
+ BandwidthEstimatorType bwe_type,
+ bool plot_delay,
+ bool plot_bwe)
+ : PacketReceiver(listener,
+ flow_id,
+ bwe_type,
+ plot_delay,
+ plot_bwe,
+ nullptr) {
}
PacketReceiver::~PacketReceiver() {
@@ -77,16 +89,18 @@ void PacketReceiver::RunFor(int64_t time_ms, Packets* in_out) {
const MediaPacket* media_packet = static_cast<const MediaPacket*>(*it);
// We're treating the send time (from previous filter) as the arrival
// time once packet reaches the estimator.
- int64_t arrival_time_ms = (media_packet->send_time_us() + 500) / 1000;
- int64_t send_time_ms = (media_packet->creation_time_us() + 500) / 1000;
+ int64_t arrival_time_ms = media_packet->send_time_ms();
+ int64_t send_time_ms = media_packet->creation_time_ms();
delay_stats_.Push(arrival_time_ms - send_time_ms);
- PlotDelay(arrival_time_ms, send_time_ms);
- PlotObjectiveFunction(arrival_time_ms);
- PlotPacketLoss(arrival_time_ms);
- total_delay_ms_ += arrival_time_ms - send_time_ms;
- total_throughput_ += media_packet->payload_size();
- ++number_packets_;
+ if (metric_recorder_ != nullptr) {
+ metric_recorder_->UpdateTimeMs(arrival_time_ms);
+ UpdateMetrics(arrival_time_ms, send_time_ms,
+ media_packet->payload_size());
+ metric_recorder_->PlotAllDynamics();
+ } else if (plot_delay_) {
+ PlotDelay(arrival_time_ms, send_time_ms);
+ }
bwe_receiver_->ReceivePacket(arrival_time_ms, *media_packet);
FeedbackPacket* fb = bwe_receiver_->GetFeedback(arrival_time_ms);
@@ -102,46 +116,27 @@ void PacketReceiver::RunFor(int64_t time_ms, Packets* in_out) {
in_out->merge(feedback, DereferencingComparator<Packet>);
}
-void PacketReceiver::PlotDelay(int64_t arrival_time_ms, int64_t send_time_ms) {
- static const int kDelayPlotIntervalMs = 100;
- if (!plot_delay_)
- return;
- if (arrival_time_ms - last_delay_plot_ms_ > kDelayPlotIntervalMs) {
- BWE_TEST_LOGGING_PLOT(0, delay_log_prefix_, arrival_time_ms,
- arrival_time_ms - send_time_ms);
- last_delay_plot_ms_ = arrival_time_ms;
- }
-}
-
-double PacketReceiver::ObjectiveFunction() {
- const double kDelta = 1.0; // Delay penalty factor.
- double throughput_metric = log(static_cast<double>(total_throughput_));
- double delay_penalty = kDelta * log(static_cast<double>(total_delay_ms_));
- return throughput_metric - delay_penalty;
+void PacketReceiver::UpdateMetrics(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size) {
+ metric_recorder_->UpdateThroughput(bwe_receiver_->RecentKbps(), payload_size);
+ metric_recorder_->UpdateDelayMs(arrival_time_ms - send_time_ms);
+ metric_recorder_->UpdateLoss(bwe_receiver_->RecentPacketLossRatio());
+ metric_recorder_->UpdateObjective();
}
-void PacketReceiver::PlotObjectiveFunction(int64_t arrival_time_ms) {
- static const int kMetricPlotIntervalMs = 1000;
- if (!plot_objective_function_) {
- return;
- }
- if (arrival_time_ms - last_metric_plot_ms_ > kMetricPlotIntervalMs) {
- BWE_TEST_LOGGING_PLOT(1, metric_log_prefix_, arrival_time_ms,
- ObjectiveFunction());
- last_metric_plot_ms_ = arrival_time_ms;
+void PacketReceiver::PlotDelay(int64_t arrival_time_ms, int64_t send_time_ms) {
+ const int64_t kDelayPlotIntervalMs = 100;
+ if (arrival_time_ms >= last_delay_plot_ms_ + kDelayPlotIntervalMs) {
+ BWE_TEST_LOGGING_PLOT_WITH_NAME(0, delay_prefix_, arrival_time_ms,
+ arrival_time_ms - send_time_ms,
+ bwe_names[bwe_type_]);
+ last_delay_plot_ms_ = arrival_time_ms;
}
}
-void PacketReceiver::PlotPacketLoss(int64_t arrival_time_ms) {
- static const int kPacketLossPlotIntervalMs = 500;
- if (!plot_packet_loss_) {
- return;
- }
- if (arrival_time_ms - last_packet_loss_plot_ms_ > kPacketLossPlotIntervalMs) {
- BWE_TEST_LOGGING_PLOT(2, packet_loss_log_prefix_, arrival_time_ms,
- bwe_receiver_->RecentPacketLossRatio());
- last_packet_loss_plot_ms_ = arrival_time_ms;
- }
+float PacketReceiver::GlobalPacketLoss() {
+ return bwe_receiver_->GlobalReceiverPacketLossRatio();
}
Stats<double> PacketReceiver::GetDelayStats() const {
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h
index a6838269c68..fb9e9fd7ab7 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_receiver.h
@@ -17,6 +17,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe_test_framework.h"
+#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
namespace webrtc {
namespace testing {
@@ -29,6 +30,12 @@ class PacketReceiver : public PacketProcessor {
BandwidthEstimatorType bwe_type,
bool plot_delay,
bool plot_bwe);
+ PacketReceiver(PacketProcessorListener* listener,
+ int flow_id,
+ BandwidthEstimatorType bwe_type,
+ bool plot_delay,
+ bool plot_bwe,
+ MetricRecorder* metric_recorder);
~PacketReceiver();
// Implements PacketProcessor.
@@ -38,31 +45,25 @@ class PacketReceiver : public PacketProcessor {
Stats<double> GetDelayStats() const;
+ float GlobalPacketLoss();
+
protected:
- void PlotDelay(int64_t arrival_time_ms, int64_t send_time_ms);
- void PlotObjectiveFunction(int64_t arrival_time_ms);
- void PlotPacketLoss(int64_t arrival_time_ms);
- double ObjectiveFunction();
+ void UpdateMetrics(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ size_t payload_size);
- int64_t now_ms_;
- std::string delay_log_prefix_;
- std::string metric_log_prefix_;
- std::string packet_loss_log_prefix_;
- int64_t last_delay_plot_ms_;
- int64_t last_metric_plot_ms_;
- int64_t last_packet_loss_plot_ms_;
- bool plot_delay_;
- bool plot_objective_function_;
- bool plot_packet_loss_;
Stats<double> delay_stats_;
rtc::scoped_ptr<BweReceiver> bwe_receiver_;
- int64_t total_delay_ms_;
- size_t total_throughput_;
- int number_packets_;
-
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PacketReceiver);
+ void PlotDelay(int64_t arrival_time_ms, int64_t send_time_ms);
+ MetricRecorder* metric_recorder_;
+ bool plot_delay_; // Used in case there isn't a metric recorder.
+ int64_t last_delay_plot_ms_;
+ std::string delay_prefix_;
+ BandwidthEstimatorType bwe_type_;
+
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(PacketReceiver);
};
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
index eeaec865898..21c2f365ae1 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.cc
@@ -17,11 +17,39 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/test/bwe.h"
+#include "webrtc/modules/remote_bitrate_estimator/test/metric_recorder.h"
namespace webrtc {
namespace testing {
namespace bwe {
+void PacketSender::Pause() {
+ running_ = false;
+ if (metric_recorder_ != nullptr) {
+ metric_recorder_->PauseFlow();
+ }
+}
+
+void PacketSender::Resume(int64_t paused_time_ms) {
+ running_ = true;
+ if (metric_recorder_ != nullptr) {
+ metric_recorder_->ResumeFlow(paused_time_ms);
+ }
+}
+
+void PacketSender::set_metric_recorder(MetricRecorder* metric_recorder) {
+ metric_recorder_ = metric_recorder;
+}
+
+void PacketSender::RecordBitrate() {
+ if (metric_recorder_ != nullptr) {
+ BWE_TEST_LOGGING_CONTEXT("Sender");
+ BWE_TEST_LOGGING_CONTEXT(*flow_ids().begin());
+ metric_recorder_->UpdateTimeMs(clock_.TimeInMilliseconds());
+ metric_recorder_->UpdateSendingEstimateKbps(TargetBitrateKbps());
+ }
+}
+
std::list<FeedbackPacket*> GetFeedbackPackets(Packets* in_out,
int64_t end_time_ms,
int flow_id) {
@@ -48,13 +76,24 @@ VideoSender::VideoSender(PacketProcessorListener* listener,
bwe_(CreateBweSender(estimator_type,
source_->bits_per_second() / 1000,
this,
- &clock_)) {
+ &clock_)),
+ previous_sending_bitrate_(0) {
modules_.push_back(bwe_.get());
}
VideoSender::~VideoSender() {
}
+void VideoSender::Pause() {
+ previous_sending_bitrate_ = TargetBitrateKbps();
+ PacketSender::Pause();
+}
+
+void VideoSender::Resume(int64_t paused_time_ms) {
+ source_->SetBitrateBps(previous_sending_bitrate_);
+ PacketSender::Resume(paused_time_ms);
+}
+
void VideoSender::RunFor(int64_t time_ms, Packets* in_out) {
std::list<FeedbackPacket*> feedbacks = GetFeedbackPackets(
in_out, clock_.TimeInMilliseconds() + time_ms, source_->flow_id());
@@ -70,22 +109,30 @@ void VideoSender::ProcessFeedbackAndGeneratePackets(
int64_t time_to_run_ms = std::min<int64_t>(time_ms, 100);
if (!feedbacks->empty()) {
int64_t time_until_feedback_ms =
- feedbacks->front()->send_time_us() / 1000 -
- clock_.TimeInMilliseconds();
+ feedbacks->front()->send_time_ms() - clock_.TimeInMilliseconds();
time_to_run_ms =
std::max<int64_t>(std::min(time_ms, time_until_feedback_ms), 0);
}
+
+ if (!running_) {
+ source_->SetBitrateBps(0);
+ }
+
Packets generated;
source_->RunFor(time_to_run_ms, &generated);
bwe_->OnPacketsSent(generated);
packets->merge(generated, DereferencingComparator<Packet>);
+
clock_.AdvanceTimeMilliseconds(time_to_run_ms);
+
if (!feedbacks->empty()) {
bwe_->GiveFeedback(*feedbacks->front());
delete feedbacks->front();
feedbacks->pop_front();
}
+
bwe_->Process();
+
time_ms -= time_to_run_ms;
} while (time_ms > 0);
assert(feedbacks->empty());
@@ -99,6 +146,11 @@ void VideoSender::OnNetworkChanged(uint32_t target_bitrate_bps,
uint8_t fraction_lost,
int64_t rtt) {
source_->SetBitrateBps(target_bitrate_bps);
+ RecordBitrate();
+}
+
+uint32_t VideoSender::TargetBitrateKbps() {
+ return (source_->bits_per_second() + 500) / 1000;
}
PacedVideoSender::PacedVideoSender(PacketProcessorListener* listener,
@@ -133,10 +185,8 @@ void PacedVideoSender::RunFor(int64_t time_ms, Packets* in_out) {
int64_t time_until_process_ms = TimeUntilNextProcess(modules_);
int64_t time_until_feedback_ms = time_ms;
if (!feedbacks.empty())
- time_until_feedback_ms =
- std::max<int64_t>(feedbacks.front()->send_time_us() / 1000 -
- clock_.TimeInMilliseconds(),
- 0);
+ time_until_feedback_ms = std::max<int64_t>(
+ feedbacks.front()->send_time_ms() - clock_.TimeInMilliseconds(), 0);
int64_t time_until_next_event_ms =
std::min(time_until_feedback_ms, time_until_process_ms);
@@ -159,11 +209,10 @@ void PacedVideoSender::RunFor(int64_t time_ms, Packets* in_out) {
if (!generated_packets.empty()) {
for (Packet* packet : generated_packets) {
MediaPacket* media_packet = static_cast<MediaPacket*>(packet);
- pacer_.SendPacket(PacedSender::kNormalPriority,
- media_packet->header().ssrc,
- media_packet->header().sequenceNumber,
- (media_packet->send_time_us() + 500) / 1000,
- media_packet->payload_size(), false);
+ pacer_.SendPacket(
+ PacedSender::kNormalPriority, media_packet->header().ssrc,
+ media_packet->header().sequenceNumber, media_packet->send_time_ms(),
+ media_packet->payload_size(), false);
pacer_queue_.push_back(packet);
assert(pacer_queue_.size() < 10000);
}
@@ -222,6 +271,8 @@ void PacedVideoSender::QueuePackets(Packets* batch,
}
Packets to_transfer;
to_transfer.splice(to_transfer.begin(), queue_, queue_.begin(), it);
+ for (Packet* packet : to_transfer)
+ packet->set_paced(true);
bwe_->OnPacketsSent(to_transfer);
batch->merge(to_transfer, DereferencingComparator<Packet>);
}
@@ -235,9 +286,11 @@ bool PacedVideoSender::TimeToSendPacket(uint32_t ssrc,
MediaPacket* media_packet = static_cast<MediaPacket*>(*it);
if (media_packet->header().sequenceNumber == sequence_number) {
int64_t pace_out_time_ms = clock_.TimeInMilliseconds();
+
// Make sure a packet is never paced out earlier than when it was put into
// the pacer.
- assert(pace_out_time_ms >= (media_packet->send_time_us() + 500) / 1000);
+ assert(pace_out_time_ms >= media_packet->send_time_ms());
+
media_packet->SetAbsSendTimeMs(pace_out_time_ms);
media_packet->set_send_time_us(1000 * pace_out_time_ms);
media_packet->set_sender_timestamp_us(1000 * pace_out_time_ms);
@@ -262,14 +315,49 @@ void PacedVideoSender::OnNetworkChanged(uint32_t target_bitrate_bps,
PacedSender::kDefaultPaceMultiplier * target_bitrate_bps / 1000, 0);
}
+const int kNoLimit = std::numeric_limits<int>::max();
+const int kPacketSizeBytes = 1200;
+
+TcpSender::TcpSender(PacketProcessorListener* listener,
+ int flow_id,
+ int64_t offset_ms)
+ : TcpSender(listener, flow_id, offset_ms, kNoLimit) {
+}
+
+TcpSender::TcpSender(PacketProcessorListener* listener,
+ int flow_id,
+ int64_t offset_ms,
+ int send_limit_bytes)
+ : PacketSender(listener, flow_id),
+ cwnd_(10),
+ ssthresh_(kNoLimit),
+ ack_received_(false),
+ last_acked_seq_num_(0),
+ next_sequence_number_(0),
+ offset_ms_(offset_ms),
+ last_reduction_time_ms_(-1),
+ last_rtt_ms_(0),
+ total_sent_bytes_(0),
+ send_limit_bytes_(send_limit_bytes),
+ last_generated_packets_ms_(0),
+ num_recent_sent_packets_(0),
+ bitrate_kbps_(0) {
+}
+
void TcpSender::RunFor(int64_t time_ms, Packets* in_out) {
if (clock_.TimeInMilliseconds() + time_ms < offset_ms_) {
clock_.AdvanceTimeMilliseconds(time_ms);
+ if (running_) {
+ Pause();
+ }
return;
}
+
+ if (!running_ && total_sent_bytes_ == 0) {
+ Resume(offset_ms_);
+ }
+
int64_t start_time_ms = clock_.TimeInMilliseconds();
- BWE_TEST_LOGGING_CONTEXT("Sender");
- BWE_TEST_LOGGING_CONTEXT(*flow_ids().begin());
std::list<FeedbackPacket*> feedbacks = GetFeedbackPackets(
in_out, clock_.TimeInMilliseconds() + time_ms, *flow_ids().begin());
@@ -277,9 +365,9 @@ void TcpSender::RunFor(int64_t time_ms, Packets* in_out) {
// number of packets in_flight_ and the max number of packets in flight
// (cwnd_). Therefore SendPackets() isn't directly dependent on time_ms.
for (FeedbackPacket* fb : feedbacks) {
- clock_.AdvanceTimeMilliseconds(fb->send_time_us() / 1000 -
+ clock_.AdvanceTimeMilliseconds(fb->send_time_ms() -
clock_.TimeInMilliseconds());
- last_rtt_ms_ = fb->send_time_us() / 1000 - fb->latest_send_time_ms();
+ last_rtt_ms_ = fb->send_time_ms() - fb->latest_send_time_ms();
UpdateCongestionControl(fb);
SendPackets(in_out);
}
@@ -314,7 +402,7 @@ void TcpSender::SendPackets(Packets* in_out) {
void TcpSender::UpdateCongestionControl(const FeedbackPacket* fb) {
const TcpFeedback* tcp_fb = static_cast<const TcpFeedback*>(fb);
- DCHECK(!tcp_fb->acked_packets().empty());
+ RTC_DCHECK(!tcp_fb->acked_packets().empty());
ack_received_ = true;
uint16_t expected = tcp_fb->acked_packets().back() - last_acked_seq_num_;
@@ -359,15 +447,48 @@ void TcpSender::HandleLoss() {
Packets TcpSender::GeneratePackets(size_t num_packets) {
Packets generated;
+
+ UpdateSendBitrateEstimate(num_packets);
+
for (size_t i = 0; i < num_packets; ++i) {
- generated.push_back(new MediaPacket(*flow_ids().begin(),
- 1000 * clock_.TimeInMilliseconds(),
- 1200, next_sequence_number_++));
+ if ((total_sent_bytes_ + kPacketSizeBytes) > send_limit_bytes_) {
+ if (running_) {
+ Pause();
+ }
+ break;
+ }
+ generated.push_back(
+ new MediaPacket(*flow_ids().begin(), 1000 * clock_.TimeInMilliseconds(),
+ kPacketSizeBytes, next_sequence_number_++));
generated.back()->set_sender_timestamp_us(
1000 * clock_.TimeInMilliseconds());
+
+ total_sent_bytes_ += kPacketSizeBytes;
}
+
return generated;
}
+
+void TcpSender::UpdateSendBitrateEstimate(size_t num_packets) {
+ const int kTimeWindowMs = 500;
+ num_recent_sent_packets_ += num_packets;
+
+ int64_t delta_ms = clock_.TimeInMilliseconds() - last_generated_packets_ms_;
+ if (delta_ms >= kTimeWindowMs) {
+ bitrate_kbps_ =
+ static_cast<uint32_t>(8 * num_recent_sent_packets_ * kPacketSizeBytes) /
+ delta_ms;
+ last_generated_packets_ms_ = clock_.TimeInMilliseconds();
+ num_recent_sent_packets_ = 0;
+ }
+
+ RecordBitrate();
+}
+
+uint32_t TcpSender::TargetBitrateKbps() {
+ return bitrate_kbps_;
+}
+
} // namespace bwe
} // namespace testing
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
index 2e690c83770..c42647e2d3f 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/packet_sender.h
@@ -25,14 +25,18 @@ namespace webrtc {
namespace testing {
namespace bwe {
+class MetricRecorder;
+
class PacketSender : public PacketProcessor {
public:
PacketSender(PacketProcessorListener* listener, int flow_id)
: PacketProcessor(listener, flow_id, kSender),
+ running_(true),
// For Packet::send_time_us() to be comparable with timestamps from
// clock_, the clock of the PacketSender and the Source must be aligned.
// We assume that both start at time 0.
- clock_(0) {}
+ clock_(0),
+ metric_recorder_(nullptr) {}
virtual ~PacketSender() {}
// Call GiveFeedback() with the returned interval in milliseconds, provided
// there is a new estimate available.
@@ -42,8 +46,20 @@ class PacketSender : public PacketProcessor {
virtual int GetFeedbackIntervalMs() const = 0;
void SetSenderTimestamps(Packets* in_out);
+ virtual uint32_t TargetBitrateKbps() { return 0; }
+
+ virtual void Pause();
+ virtual void Resume(int64_t paused_time_ms);
+
+ void set_metric_recorder(MetricRecorder* metric_recorder);
+ virtual void RecordBitrate();
+
protected:
+ bool running_; // Initialized by default as true.
SimulatedClock clock_;
+
+ private:
+ MetricRecorder* metric_recorder_;
};
class VideoSender : public PacketSender, public BitrateObserver {
@@ -58,11 +74,16 @@ class VideoSender : public PacketSender, public BitrateObserver {
virtual VideoSource* source() const { return source_; }
+ uint32_t TargetBitrateKbps() override;
+
// Implements BitrateObserver.
void OnNetworkChanged(uint32_t target_bitrate_bps,
uint8_t fraction_lost,
int64_t rtt) override;
+ void Pause() override;
+ void Resume(int64_t paused_time_ms) override;
+
protected:
void ProcessFeedbackAndGeneratePackets(int64_t time_ms,
std::list<FeedbackPacket*>* feedbacks,
@@ -74,7 +95,8 @@ class VideoSender : public PacketSender, public BitrateObserver {
std::list<Module*> modules_;
private:
- DISALLOW_COPY_AND_ASSIGN(VideoSender);
+ uint32_t previous_sending_bitrate_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(VideoSender);
};
class PacedVideoSender : public VideoSender, public PacedSender::Callback {
@@ -107,33 +129,29 @@ class PacedVideoSender : public VideoSender, public PacedSender::Callback {
Packets queue_;
Packets pacer_queue_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(PacedVideoSender);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(PacedVideoSender);
};
class TcpSender : public PacketSender {
public:
- TcpSender(PacketProcessorListener* listener, int flow_id, int64_t offset_ms)
- : PacketSender(listener, flow_id),
- cwnd_(10),
- ssthresh_(std::numeric_limits<int>::max()),
- ack_received_(false),
- last_acked_seq_num_(0),
- next_sequence_number_(0),
- offset_ms_(offset_ms),
- last_reduction_time_ms_(-1),
- last_rtt_ms_(0) {}
-
+ TcpSender(PacketProcessorListener* listener, int flow_id, int64_t offset_ms);
+ TcpSender(PacketProcessorListener* listener,
+ int flow_id,
+ int64_t offset_ms,
+ int send_limit_bytes);
virtual ~TcpSender() {}
void RunFor(int64_t time_ms, Packets* in_out) override;
int GetFeedbackIntervalMs() const override { return 10; }
+ uint32_t TargetBitrateKbps() override;
+
private:
struct InFlight {
public:
InFlight(const MediaPacket& packet)
: sequence_number(packet.header().sequenceNumber),
- time_ms(packet.send_time_us() / 1000) {}
+ time_ms(packet.send_time_ms()) {}
InFlight(uint16_t seq_num, int64_t now_ms)
: sequence_number(seq_num), time_ms(now_ms) {}
@@ -153,6 +171,7 @@ class TcpSender : public PacketSender {
int TriggerTimeouts();
void HandleLoss();
Packets GeneratePackets(size_t num_packets);
+ void UpdateSendBitrateEstimate(size_t num_packets);
float cwnd_;
int ssthresh_;
@@ -163,6 +182,11 @@ class TcpSender : public PacketSender {
int64_t offset_ms_;
int64_t last_reduction_time_ms_;
int64_t last_rtt_ms_;
+ int total_sent_bytes_;
+ int send_limit_bytes_; // Initialized by default as kNoLimit.
+ int64_t last_generated_packets_ms_;
+ size_t num_recent_sent_packets_;
+ uint32_t bitrate_kbps_;
};
} // namespace bwe
} // namespace testing
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_bars.sh b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_bars.sh
new file mode 100755
index 00000000000..9f7fb162039
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_bars.sh
@@ -0,0 +1,286 @@
+#!/bin/bash
+
+# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# To set up in e.g. Eclipse, run a separate shell and pipe the output from the
+# test into this script.
+#
+# In Eclipse, that amounts to creating a Run Configuration which starts
+# "/bin/bash" with the arguments "-c [trunk_path]/out/Debug/modules_unittests
+# --gtest_filter=*BweTest* | [trunk_path]/webrtc/modules/
+# remote_bitrate_estimator/test/plot_bars.sh
+
+# This script supports multiple figures (windows), the figure is specified as an
+# identifier at the first argument after the PLOT command. Each figure has a
+# single y axis and a dual y axis mode. If any line specifies an axis by ending
+# with "#<axis number (1 or 2)>" two y axis will be used, the first will be
+# assumed to represent bitrate (in kbps) and the second will be assumed to
+# represent time deltas (in ms).
+
+log=$(</dev/stdin)
+
+# Plot histograms.
+function gen_gnuplot_bar_input {
+ x_start=1
+ x_end=3.75
+ bars=$(echo "$log" | grep "BAR")
+
+ labels=$(echo "$log" | grep "^LABEL")
+ figures=($(echo "$bars" | cut -f 2 | sort | uniq))
+
+ echo "reset" # Clears previous settings.
+
+ echo "set title font 'Verdana,22'"
+ echo "set xtics font 'Verdana,24'"
+ echo "set ytics font 'Verdana,14'"
+ echo "set ylabel font 'Verdana,16'"
+
+ echo "set xrange[$x_start:$x_end]"
+ echo "set style fill solid 0.5"
+ echo "set style fill solid border -1"
+
+ declare -a ydist=(11.5 10.5 10.5) # Used to correctly offset the y label.
+ i=0
+ for figure in "${figures[@]}" ; do
+
+ echo "set terminal wxt $figure size 440,440 dashed"
+ echo "set ylabel offset ${ydist[$i]}, -3"
+ (( i++ ))
+
+ title=$(echo "$labels" | grep "^LABEL.$figure" | cut -f 3 | \
+ head -n 1 | sed 's/_/ /g')
+ y_label=$(echo "$labels" | grep "^LABEL.$figure" | cut -f 4 | \
+ head -n 1 | sed 's/_/ /g')
+
+ # RMCAT flows.
+ num_flows=$(echo "$labels" | grep "^LABEL.$figure" | cut -f 5 | \
+ head -n 1)
+
+ # RMCAT algorithm 1.
+ x_label_1=$(echo "$log" | grep "BAR.$figure" | cut -f 3 | sed 's/_/\t/g' \
+ | cut -f 1 | sort | uniq | head -n 1 )
+
+ # RMCAT algorithm 2.
+ x_label_2=$(echo "$log" | grep "BAR.$figure" | cut -f 3 | sed 's/_/\t/g' \
+ | cut -f 1 | sort | uniq | sed -n 2p)
+
+ x_labels="('$x_label_1' 2, '$x_label_2' 3)"
+ tcp_flow=false
+
+ tcp_space=0.2 # Extra horizontal space between bars.
+
+ # Parse labels if there are other flows in addition to RMCAT ones.
+ IFS='x' read -ra split_label_1 <<< "$x_label_1"
+
+ if (( ${#split_label_1[@]} > "1" )); then
+ tcp_flow=true
+ box_width=$(echo "(1.0-$tcp_space/2)/$num_flows" | bc -l)
+ echo "set xtics font 'Verdana,16'"
+ x_labels="("
+ delimiter=""
+ abscissa=$(echo $x_start + 0.5 + 0.5*$box_width | bc)
+ for label in "${split_label_1[@]}" ; do
+ x_labels+="$delimiter'$label' $abscissa"
+ abscissa=$(echo $abscissa + $box_width | bc)
+ delimiter=", "
+ done
+ abscissa=$(echo $abscissa + $tcp_space | bc)
+ IFS='x' read -ra split_label_2 <<< "$x_label_2"
+ for label in "${split_label_2[@]}" ; do
+ x_labels+="$delimiter'$label' $abscissa"
+ abscissa=$(echo $abscissa + $box_width | bc)
+ done
+ x_labels="$x_labels)"
+ else
+ box_width=$(echo 1.0/$num_flows | bc -l)
+ fi
+
+ echo "set boxwidth $box_width"
+
+ # Plots can be directly exported to image files.
+ file_name=$(echo "$labels" | grep "^LABEL.$figure" | cut -f 5 | head -n 1)
+
+ y_max=0 # Used to scale the plot properly.
+
+ # Scale all latency plots with the same vertical scale.
+ delay_figure=5
+ if (( $figure==$delay_figure )) ; then
+ y_max=400
+ else # Take y_max = 1.1 * highest plot value.
+
+ # Since only the optimal bitrate for the first flow is being ploted,
+ # consider only this one for scalling purposes.
+ data_sets=$(echo "$bars" | grep "LIMITERRORBAR.$figure" | cut -f 3 | \
+ sed 's/_/\t/g' | cut -f 1 | sort | uniq)
+
+ if (( ${#data_sets[@]} > "0" )); then
+ for set in $data_sets ; do
+ y=$(echo "$bars" | grep "LIMITERRORBAR.$figure.$set" | cut -f 8 | \
+ head -n 1)
+ if (( $(bc <<< "$y > $y_max") == 1 )); then
+ y_max=$y
+ fi
+ done
+ fi
+
+ data_sets=$(echo "$bars" | grep "ERRORBAR.$figure" | cut -f 3 | \
+ sort | uniq)
+ if (( ${#data_sets[@]} > "0" )); then
+ for set in $data_sets ; do
+ y=$(echo "$bars" | grep "ERRORBAR.$figure.$set" | cut -f 6 | \
+ head -n 1)
+ if (( $(bc <<< "$y > $y_max") == 1 )) ; then
+ y_max=$y
+ fi
+ done
+ fi
+
+ data_sets=$(echo "$bars" | grep "BAR.$figure" | cut -f 3 | sort | uniq)
+
+ for set in $data_sets ; do
+ y=$(echo "$bars" | grep "BAR.$figure.$set" | cut -f 4 | head -n 1)
+ if (( $(bc <<< "$y > $y_max") == 1 )) ; then
+ y_max=$y
+ fi
+ done
+
+ y_max=$(echo $y_max*1.1 | bc)
+ fi
+
+
+ echo "set ylabel \"$y_label\""
+ echo "set yrange[0:$y_max]"
+
+ echo "set multiplot"
+
+ # Plot bars.
+ data_sets=$(echo "$bars" | grep "BAR.$figure" | cut -f 3 | sort | uniq)
+
+ echo "set xtics $x_labels"
+ echo "plot '-' using 1:4:2 with boxes lc variable notitle"
+
+ echo
+
+ color=11 # Green.
+ x_bar=$(echo $x_start + 0.5 + 0.5*$box_width | bc)
+ for set in $data_sets ; do
+ echo -n "$x_bar $color "
+ echo "$bars" | grep "BAR.$figure.$set" | cut -f 3,4
+
+ # Add extra space if TCP flows are being plotted.
+ if $tcp_flow && \
+ (( $(bc <<< "$x_bar < $x_start + 1.5 - 0.5*$tcp_space") == 1 )) && \
+ (( $(bc <<< "$x_bar + $box_width > $x_start + 1.5 + 0.5*$tcp_space") \
+ == 1 )); then
+ x_bar=$(echo $x_bar + $tcp_space | bc)
+ fi
+
+ x_bar=$(echo $x_bar + $box_width | bc)
+
+ if (( $(bc <<< "$x_bar > 2.5") == 1 )) ; then
+ color=12 # Blue.
+ fi
+ # Different bar color for TCP flows:
+ if $tcp_flow && \
+ (( $(bc <<< "(100*$x_bar)%100 < 50") == 1 ))
+ then
+ color=18 # Gray.
+ fi
+ done
+ echo "e"
+
+ # Plot Baseline bars, e.g. one-way path delay on latency plots.
+ data_sets=$(echo "$log" | grep "BASELINE.$figure" | cut -f 3 | sort | uniq)
+
+ if (( ${#data_sets} > "0" )); then
+ echo "set xtics $x_labels"
+ echo "plot '-' using 1:4:2 with boxes lc variable notitle"
+
+ echo
+
+ color=18 # Gray.
+ x_bar=$(echo $x_start + 0.5 + 0.5*$box_width | bc)
+ for set in $data_sets ; do
+ echo -n "$x_bar $color "
+ echo "$log" | grep "BASELINE.$figure.$set" | cut -f 3,4
+
+ # Add extra space if TCP flows are being plotted.
+ if $tcp_flow && \
+ (( $(bc <<< "$x_bar < $x_start + 1.5 - 0.5*$tcp_space") == 1 )) && \
+ (( $(bc <<< "$x_bar + $box_width > $x_start + 1.5 \
+ + 0.5*$tcp_space") == 1 )); then
+ x_bar=$(echo $x_bar + $tcp_space | bc)
+ fi
+
+ x_bar=$(echo $x_bar + $box_width | bc)
+
+ done
+ echo "e"
+ fi
+
+ # Plot vertical error lines, e.g. y +- sigma.
+ data_sets=$(echo "$bars" | grep "ERRORBAR.$figure" | cut -f 3 | sort | uniq)
+
+ if (( ${#data_sets} > "0" )); then
+
+ echo "set key left"
+ error_title=$(echo "$bars" | grep "ERRORBAR.$figure" | cut -f 7 | \
+ head -n 1 | sed 's/_/ /g')
+
+ echo "set xtics $x_labels"
+ echo "plot '-' using 1:3:4:5 title '$error_title' with yerr"
+
+ x_error_line=$(echo $x_start + 0.5 + 0.5*$box_width | bc)
+ for set in $data_sets ; do
+ echo -n "$x_error_line "
+ echo "$bars" | grep "ERRORBAR.$figure.$set" | cut -f 3,4,5,6
+
+ # Add extra space if TCP flows are being plotted.
+ if $tcp_flow && \
+ (( $(bc <<< "$x_error_line < $x_start + 1.5 - 0.5*$tcp_space") == 1 \
+ )) && (( $(bc <<< "$x_error_line + $box_width > $x_start + 1.5 \
+ + 0.5*$tcp_space") == 1 )); then
+ x_error_line=$(echo $x_error_line + $tcp_space | bc)
+ fi
+
+ x_error_line=$(echo $x_error_line + $box_width | bc)
+ done
+ echo "e"
+ fi
+
+ # Plot horizontal dashed lines, e.g. y = optimal bitrate.
+ data_sets=$(echo "$bars" | grep "LIMITERRORBAR.$figure" | cut -f 3 \
+ | sort | uniq)
+ if (( ${#data_sets} > "0" )); then
+
+ echo "set style line 1 lt 1 lw 3 pt 3 ps 0 linecolor rgb 'black'"
+
+ limit_titles=$(echo "$bars" | grep "LIMITERRORBAR.$figure" | cut -f 9 \
+ | sort | uniq)
+
+ for title in $limit_titles ; do
+ y_max=$(echo "$bars" | grep "LIMITERRORBAR.$figure" | grep "$title" \
+ | cut -f 8 | head -n 1)
+
+ retouched_title=$(echo "$title" | sed 's/#/\t/g' | cut -f 1 \
+ | sed 's/_/ /g')
+
+ echo "set key right top"
+ echo "set xtics $x_labels"
+ echo "plot $y_max lt 7 lw 1 linecolor rgb 'black' \
+ title '$retouched_title'"
+ done
+
+ fi
+
+ echo "unset multiplot"
+ done
+}
+
+gen_gnuplot_bar_input | gnuplot -persist
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.py b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.py
new file mode 100644
index 00000000000..1bae1e81f03
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# This script is used to plot simulation dynamics.
+# Able to plot each flow separately. Other plot boxes can be added,
+# currently one for Throughput, one for Latency and one for Packet Loss.
+
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy
+import re
+import sys
+
+# Change this to True to save the figure to a file. Look below for details.
+save_figure = False
+
+class Variable(object):
+ def __init__(self, variable):
+ self._ID = variable[0]
+ self._xlabel = variable[1]
+ self._ylabel = variable[2]
+ self._subplot = variable[3]
+ self._y_max = variable[4]
+ self.samples = dict()
+
+ def getID(self):
+ return self._ID
+
+ def getXLabel(self):
+ return self._xlabel
+
+ def getYLabel(self):
+ return self._ylabel
+
+ def getSubplot(self):
+ return self._subplot
+
+ def getYMax(self):
+ return self._y_max
+
+ def getNumberOfFlows(self):
+ return len(self.samples)
+
+
+ def addSample(self, line):
+ groups = re.search(r'_(((\d)+((,(\d)+)*))_(\D+))#\d@(\S+)', line)
+
+ # Each variable will be plotted in a separated box.
+ var_name = groups.group(1)
+ alg_name = groups.group(8)
+
+ alg_name = alg_name.replace('_', ' ')
+
+ if alg_name not in self.samples.keys():
+ self.samples[alg_name] = {}
+
+ if var_name not in self.samples[alg_name].keys():
+ self.samples[alg_name][var_name] = []
+
+ sample = re.search(r'(\d+\.\d+)\t([-]?\d+\.\d+)', line)
+
+ s = (sample.group(1), sample.group(2))
+ self.samples[alg_name][var_name].append(s)
+
+def plotVar(v, ax, show_legend, show_x_label):
+ if show_x_label:
+ ax.set_xlabel(v.getXLabel(), fontsize='large')
+ ax.set_ylabel(v.getYLabel(), fontsize='large')
+
+ for alg in v.samples.keys():
+
+ for series in v.samples[alg].keys():
+
+ x = [sample[0] for sample in v.samples[alg][series]]
+ y = [sample[1] for sample in v.samples[alg][series]]
+ x = numpy.array(x)
+ y = numpy.array(y)
+
+ line = plt.plot(x, y, label=alg, linewidth=4.0)
+
+ colormap = {'Available0':'#AAAAAA',
+ 'Available1':'#AAAAAA',
+ 'GCC0':'#80D000',
+ 'GCC1':'#008000',
+ 'GCC2':'#00F000',
+ 'GCC3':'#00B000',
+ 'GCC4':'#70B020',
+ 'NADA0':'#0000AA',
+ 'NADA1':'#A0A0FF',
+ 'NADA2':'#0000FF',
+ 'NADA3':'#C0A0FF',
+ 'NADA4':'#9060B0',}
+
+ flow_id = re.search(r'(\d+(,\d+)*)', series) # One or multiple ids.
+ key = alg + flow_id.group(1)
+
+ if key in colormap:
+ plt.setp(line, color=colormap[key])
+ elif alg == 'TCP':
+ plt.setp(line, color='#AAAAAA')
+ else:
+ plt.setp(line, color='#654321')
+
+ if alg.startswith('Available'):
+ plt.setp(line, linestyle='--')
+ plt.grid(True)
+
+ # x1, x2, y1, y2
+ _, x2, _, y2 = plt.axis()
+ if v.getYMax() >= 0:
+ y2 = v.getYMax()
+ plt.axis((0, x2, 0, y2))
+
+ if show_legend:
+ plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.40),
+ shadow=True, fontsize='large', ncol=len(v.samples))
+
+def main():
+ variables = [
+ ('Throughput_kbps', "Time (s)", "Throughput (kbps)", 1, 4000),
+ ('Delay_ms', "Time (s)", "One-way Delay (ms)", 2, 500),
+ ('Packet_Loss', "Time (s)", "Packet Loss Ratio", 3, 1.0),
+ # ('Sending_Estimate_kbps', "Time (s)", "Sending Estimate (kbps)",
+ # 4, 4000),
+ ]
+
+ var = []
+
+ # Create objects.
+ for variable in variables:
+ var.append(Variable(variable))
+
+ # Add samples to the objects.
+ for line in sys.stdin:
+ if line.startswith("[ RUN ]"):
+ test_name = re.search(r'\.(\w+)', line).group(1)
+ if line.startswith("PLOT"):
+ for v in var:
+ if v.getID() in line:
+ v.addSample(line)
+
+ matplotlib.rcParams.update({'font.size': 48/len(variables)})
+
+ # Plot variables.
+ fig = plt.figure()
+
+ # Offest and threshold on the same plot.
+ n = var[-1].getSubplot()
+ i = 0
+ for v in var:
+ ax = fig.add_subplot(n, 1, v.getSubplot())
+ plotVar(v, ax, i == 0, i == n - 1)
+ i += 1
+
+ if save_figure:
+ fig.savefig(test_name + ".png")
+ plt.show()
+
+if __name__ == '__main__':
+ main()
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_plot.sh b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.sh
index 66b7417e46b..fd104a1704f 100755
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_plot.sh
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/plot_dynamics.sh
@@ -14,9 +14,9 @@
# In Eclipse, that amounts to creating a Run Configuration which starts
# "/bin/bash" with the arguments "-c [trunk_path]/out/Debug/modules_unittests
# --gtest_filter=*BweTest* | [trunk_path]/webrtc/modules/
-# remote_bitrate_estimator/bwe_plot.
+# remote_bitrate_estimator/test/plot_dynamics.sh
-# bwe_plot.sh supports multiple figures (windows), the figure is specified as an
+# This script supports multiple figures (windows), the figure is specified as an
# identifier at the first argument after the PLOT command. Each figure has a
# single y axis and a dual y axis mode. If any line specifies an axis by ending
# with "#<axis number (1 or 2)>" two y axis will be used, the first will be
@@ -25,38 +25,53 @@
log=$(</dev/stdin)
+# Plot dynamics.
function gen_gnuplot_input {
colors=(a7001f 0a60c2 b2582b 21a66c d6604d 4393c3 f4a582 92c5de edcbb7 b1c5d0)
plots=$(echo "$log" | grep "^PLOT")
+ # Each figure corresponds to a separate plot window.
figures=($(echo "$plots" | cut -f 2 | sort | uniq))
for figure in "${figures[@]}" ; do
+ # Each data set corresponds to a plot line.
data_sets=$(echo "$plots" | grep "^PLOT.$figure" | cut -f 3 | sort | uniq)
+ # Lines can be scaled on the left (1) or right (2) axis.
linetypes=($(echo "$data_sets" | grep "#" | cut -d '#' -f 2 | \
- cut -d ' ' -f 1))
- echo -n "reset; "
- echo -n "set terminal wxt $figure size 1440,900 font \"Arial,9\"; "
- echo -n "set xlabel \"Seconds\"; "
- if (( "${#linetypes[@]}" > "0" )); then
- echo -n "set ylabel 'bitrate (kbps)';"
- echo -n "set ytics nomirror;"
- echo -n "set y2label 'time delta (ms)';"
- echo -n "set y2tics nomirror;"
+ cut -d '@' -f 1 | uniq))
+
+ # Set plot configurations.
+ echo "reset; "
+ echo "set terminal wxt $figure size 1440,900 font \"Arial,9\"; "
+ echo "set xlabel \"Seconds\"; "
+ if (( "${#linetypes[@]}" > "1" )); then
+ echo "set ylabel 'Bitrate (kbps)';" # Left side.
+ echo "set ytics nomirror;"
+ echo "set y2label 'Time delta (ms)';" # Right side.
+ echo "set y2tics nomirror;"
+ else
+ # Single axis (left side), set its label according to data.
+ y_label=$(echo "$data_sets" | grep "#" | cut -d '#' -f 1 | \
+ cut -d ' ' -f 1 | cut -d '/' -f 3 | sed 's/[0-9]/#/g' | \
+ cut -d '#' -f 3 | head -n 1 | sed 's/_/ /g')
+ echo "set ylabel \"$y_label\";"
fi
- echo -n "plot "
+
i=0
+ echo -n "plot "
for set in $data_sets ; do
(( i++ )) && echo -n ","
echo -n "'-' with "
echo -n "linespoints "
echo -n "ps 0.5 "
echo -n "lc rgbcolor \"#${colors[$(($i % 10))]}\" "
- if (( "${#linetypes[@]}" > "0" )); then
- if (( "$i" <= "${#linetypes[@]}" )); then
- echo -n "axes x1y${linetypes[$i - 1]} "
+ if (( "${#linetypes[@]}" > "1" )); then
+ # Multiple sets can have a same line plot.
+ linetype=$(echo "$set" | grep "#" | cut -d '#' -f 2 | cut -d '@' -f 1)
+ if (( "${#linetype}" > "0")); then
+ echo -n "axes x1y$linetype "
else
# If no line type is specified, but line types are used, we will
- # default to the bitrate axis.
+ # default to scale on the left axis.
echo -n "axes x1y1 "
fi
fi
@@ -69,5 +84,4 @@ function gen_gnuplot_input {
done
done
}
-
gen_gnuplot_input | gnuplot -persist
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/random.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/random.h
index 9713e437116..31c1ec142eb 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/random.h
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/test/random.h
@@ -33,7 +33,7 @@ class Random {
uint32_t a_;
uint32_t b_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Random);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Random);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
index d424919f7af..9493805a1c7 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.cc
@@ -10,16 +10,59 @@
#include "webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp.h"
+#include <sstream>
#include <stdio.h>
#include <string>
+#include "gflags/gflags.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
#include "webrtc/test/rtp_file_reader.h"
-const int kMinBitrateBps = 30000;
+namespace flags {
+
+DEFINE_string(extension_type,
+ "abs",
+ "Extension type, either abs for absolute send time or tsoffset "
+ "for timestamp offset.");
+std::string ExtensionType() {
+ return static_cast<std::string>(FLAGS_extension_type);
+}
+
+DEFINE_int32(extension_id, 3, "Extension id.");
+int ExtensionId() {
+ return static_cast<int>(FLAGS_extension_id);
+}
+
+DEFINE_string(input_file, "", "Input file.");
+std::string InputFile() {
+ return static_cast<std::string>(FLAGS_input_file);
+}
+
+DEFINE_string(ssrc_filter,
+ "",
+ "Comma-separated list of SSRCs in hexadecimal which are to be "
+ "used as input to the BWE (only applicable to pcap files).");
+std::set<uint32_t> SsrcFilter() {
+ std::string ssrc_filter_string = static_cast<std::string>(FLAGS_ssrc_filter);
+ if (ssrc_filter_string.empty())
+ return std::set<uint32_t>();
+ std::stringstream ss;
+ std::string ssrc_filter = ssrc_filter_string;
+ std::set<uint32_t> ssrcs;
+
+ // Parse the ssrcs in hexadecimal format.
+ ss << std::hex << ssrc_filter;
+ uint32_t ssrc;
+ while (ss >> ssrc) {
+ ssrcs.insert(ssrc);
+ ss.ignore(1, ',');
+ }
+ return ssrcs;
+}
+} // namespace flags
bool ParseArgsAndSetupEstimator(int argc,
char** argv,
@@ -29,37 +72,56 @@ bool ParseArgsAndSetupEstimator(int argc,
webrtc::RtpHeaderParser** parser,
webrtc::RemoteBitrateEstimator** estimator,
std::string* estimator_used) {
- *rtp_reader = webrtc::test::RtpFileReader::Create(
- webrtc::test::RtpFileReader::kRtpDump, argv[3]);
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ std::string filename = flags::InputFile();
+
+ std::set<uint32_t> ssrc_filter = flags::SsrcFilter();
+ fprintf(stderr, "Filter on SSRC: ");
+ for (auto& s : ssrc_filter) {
+ fprintf(stderr, "0x%08x, ", s);
+ }
+ fprintf(stderr, "\n");
+ if (filename.substr(filename.find_last_of(".")) == ".pcap") {
+ fprintf(stderr, "Opening as pcap\n");
+ *rtp_reader = webrtc::test::RtpFileReader::Create(
+ webrtc::test::RtpFileReader::kPcap, filename.c_str(),
+ flags::SsrcFilter());
+ } else {
+ fprintf(stderr, "Opening as rtp\n");
+ *rtp_reader = webrtc::test::RtpFileReader::Create(
+ webrtc::test::RtpFileReader::kRtpDump, filename.c_str());
+ }
if (!*rtp_reader) {
- fprintf(stderr, "Cannot open input file %s\n", argv[3]);
+ fprintf(stderr, "Cannot open input file %s\n", filename.c_str());
return false;
}
- fprintf(stderr, "Input file: %s\n\n", argv[3]);
- webrtc::RTPExtensionType extension = webrtc::kRtpExtensionAbsoluteSendTime;
+ fprintf(stderr, "Input file: %s\n\n", filename.c_str());
- if (strncmp("tsoffset", argv[1], 8) == 0) {
+ webrtc::RTPExtensionType extension = webrtc::kRtpExtensionAbsoluteSendTime;
+ if (flags::ExtensionType() == "tsoffset") {
extension = webrtc::kRtpExtensionTransmissionTimeOffset;
fprintf(stderr, "Extension: toffset\n");
- } else {
+ } else if (flags::ExtensionType() == "abs") {
fprintf(stderr, "Extension: abs\n");
+ } else {
+ fprintf(stderr, "Unknown extension type\n");
+ return false;
}
- int id = atoi(argv[2]);
// Setup the RTP header parser and the bitrate estimator.
*parser = webrtc::RtpHeaderParser::Create();
- (*parser)->RegisterRtpHeaderExtension(extension, id);
+ (*parser)->RegisterRtpHeaderExtension(extension, flags::ExtensionId());
if (estimator) {
switch (extension) {
case webrtc::kRtpExtensionAbsoluteSendTime: {
- *estimator = new webrtc::RemoteBitrateEstimatorAbsSendTime(
- observer, clock, kMinBitrateBps);
+ *estimator =
+ new webrtc::RemoteBitrateEstimatorAbsSendTime(observer, clock);
*estimator_used = "AbsoluteSendTimeRemoteBitrateEstimator";
break;
}
case webrtc::kRtpExtensionTransmissionTimeOffset: {
- *estimator = new webrtc::RemoteBitrateEstimatorSingleStream(
- observer, clock, kMinBitrateBps);
+ *estimator =
+ new webrtc::RemoteBitrateEstimatorSingleStream(observer, clock);
*estimator_used = "RemoteBitrateEstimator";
break;
}
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
index 965586757de..19e4a07b4d3 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/bwe_rtp_play.cc
@@ -38,15 +38,6 @@ class Observer : public webrtc::RemoteBitrateObserver {
};
int main(int argc, char** argv) {
- if (argc < 4) {
- printf("Usage: bwe_rtp_play <extension type> <extension id> "
- "<input_file.rtp>\n");
- printf("<extension type> can either be:\n"
- " abs for absolute send time or\n"
- " tsoffset for timestamp offset.\n"
- "<extension id> is the id associated with the extension.\n");
- return -1;
- }
webrtc::test::RtpFileReader* reader;
webrtc::RemoteBitrateEstimator* estimator;
webrtc::RtpHeaderParser* parser;
@@ -76,22 +67,24 @@ int main(int argc, char** argv) {
packet.time_ms = packet.time_ms - first_rtp_time_ms;
while (true) {
if (next_rtp_time_ms <= clock.TimeInMilliseconds()) {
- webrtc::RTPHeader header;
- parser->Parse(packet.data, packet.length, &header);
- if (header.extension.hasAbsoluteSendTime)
- ++abs_send_time_count;
- if (header.extension.hasTransmissionTimeOffset)
- ++ts_offset_count;
- size_t packet_length = packet.length;
- // Some RTP dumps only include the header, in which case packet.length
- // is equal to the header length. In those cases packet.original_length
- // usually contains the original packet length.
- if (packet.original_length > 0) {
- packet_length = packet.original_length;
+ if (!parser->IsRtcp(packet.data, packet.length)) {
+ webrtc::RTPHeader header;
+ parser->Parse(packet.data, packet.length, &header);
+ if (header.extension.hasAbsoluteSendTime)
+ ++abs_send_time_count;
+ if (header.extension.hasTransmissionTimeOffset)
+ ++ts_offset_count;
+ size_t packet_length = packet.length;
+ // Some RTP dumps only include the header, in which case packet.length
+ // is equal to the header length. In those cases packet.original_length
+ // usually contains the original packet length.
+ if (packet.original_length > 0) {
+ packet_length = packet.original_length;
+ }
+ rbe->IncomingPacket(clock.TimeInMilliseconds(),
+ packet_length - header.headerLength, header, true);
+ ++packet_counter;
}
- rbe->IncomingPacket(clock.TimeInMilliseconds(),
- packet_length - header.headerLength, header, true);
- ++packet_counter;
if (!rtp_reader->NextPacket(&packet)) {
break;
}
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
index f2ff7dfb852..e2774818861 100644
--- a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/tools/rtp_to_text.cc
@@ -19,17 +19,6 @@
#include "webrtc/test/rtp_file_reader.h"
int main(int argc, char** argv) {
- if (argc < 4) {
- fprintf(stderr, "Usage: rtp_to_text <extension type> <extension id>"
- " <input_file.rtp> [-t]\n");
- fprintf(stderr, "<extension type> can either be:\n"
- " abs for absolute send time or\n"
- " tsoffset for timestamp offset.\n"
- "<extension id> is the id associated with the extension.\n"
- " -t is an optional flag, if set only packet arrival time will be"
- " output.\n");
- return -1;
- }
webrtc::test::RtpFileReader* reader;
webrtc::RtpHeaderParser* parser;
if (!ParseArgsAndSetupEstimator(argc, argv, NULL, NULL, &reader, &parser,
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
new file mode 100644
index 00000000000..5f51bc55e9c
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h"
+#include "webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "webrtc/modules/utility/interface/process_thread.h"
+
+namespace webrtc {
+
+const int64_t kNoTimestamp = -1;
+const int64_t kSendTimeHistoryWindowMs = 10000;
+const int64_t kBaseTimestampScaleFactor =
+ rtcp::TransportFeedback::kDeltaScaleFactor * (1 << 8);
+const int64_t kBaseTimestampRangeSizeUs = kBaseTimestampScaleFactor * (1 << 24);
+
+TransportFeedbackAdapter::TransportFeedbackAdapter(
+ RtcpBandwidthObserver* bandwidth_observer,
+ Clock* clock,
+ ProcessThread* process_thread)
+ : send_time_history_(kSendTimeHistoryWindowMs),
+ rtcp_bandwidth_observer_(bandwidth_observer),
+ process_thread_(process_thread),
+ clock_(clock),
+ current_offset_ms_(kNoTimestamp),
+ last_timestamp_us_(kNoTimestamp) {}
+
+TransportFeedbackAdapter::~TransportFeedbackAdapter() {
+ if (bitrate_estimator_.get())
+ process_thread_->DeRegisterModule(bitrate_estimator_.get());
+}
+
+void TransportFeedbackAdapter::SetBitrateEstimator(
+ RemoteBitrateEstimator* rbe) {
+ if (bitrate_estimator_.get() != rbe) {
+ bitrate_estimator_.reset(rbe);
+ process_thread_->RegisterModule(rbe);
+ }
+}
+
+void TransportFeedbackAdapter::OnPacketSent(const PacketInfo& info) {
+ rtc::CritScope cs(&lock_);
+ send_time_history_.AddAndRemoveOld(info);
+}
+
+void TransportFeedbackAdapter::OnTransportFeedback(
+ const rtcp::TransportFeedback& feedback) {
+ int64_t timestamp_us = feedback.GetBaseTimeUs();
+ // Add timestamp deltas to a local time base selected on first packet arrival.
+ // This won't be the true time base, but makes it easier to manually inspect
+ // time stamps.
+ if (last_timestamp_us_ == kNoTimestamp) {
+ current_offset_ms_ = clock_->TimeInMilliseconds();
+ } else {
+ int64_t delta = timestamp_us - last_timestamp_us_;
+
+ // Detect and compensate for wrap-arounds in base time.
+ if (std::abs(delta - kBaseTimestampRangeSizeUs) < std::abs(delta)) {
+ delta -= kBaseTimestampRangeSizeUs; // Wrap backwards.
+ } else if (std::abs(delta + kBaseTimestampRangeSizeUs) < std::abs(delta)) {
+ delta += kBaseTimestampRangeSizeUs; // Wrap forwards.
+ }
+
+ current_offset_ms_ += delta / 1000;
+ }
+ last_timestamp_us_ = timestamp_us;
+
+ uint16_t sequence_number = feedback.GetBaseSequence();
+ std::vector<int64_t> delta_vec = feedback.GetReceiveDeltasUs();
+ auto delta_it = delta_vec.begin();
+ std::vector<PacketInfo> packet_feedback_vector;
+ packet_feedback_vector.reserve(delta_vec.size());
+
+ {
+ rtc::CritScope cs(&lock_);
+ size_t failed_lookups = 0;
+ int64_t offset_us = 0;
+ for (auto symbol : feedback.GetStatusVector()) {
+ if (symbol != rtcp::TransportFeedback::StatusSymbol::kNotReceived) {
+ RTC_DCHECK(delta_it != delta_vec.end());
+ offset_us += *(delta_it++);
+ int64_t timestamp_ms = current_offset_ms_ + (offset_us / 1000);
+ PacketInfo info = {timestamp_ms, 0, sequence_number, 0, false};
+ if (send_time_history_.GetInfo(&info, true)) {
+ packet_feedback_vector.push_back(info);
+ } else {
+ ++failed_lookups;
+ }
+ }
+ ++sequence_number;
+ }
+ RTC_DCHECK(delta_it == delta_vec.end());
+ if (failed_lookups > 0) {
+ LOG(LS_WARNING) << "Failed to lookup send time for " << failed_lookups
+ << " packet" << (failed_lookups > 1 ? "s" : "")
+ << ". Send time history too small?";
+ }
+ }
+
+ RTC_DCHECK(bitrate_estimator_.get() != nullptr);
+ bitrate_estimator_->IncomingPacketFeedbackVector(packet_feedback_vector);
+}
+
+void TransportFeedbackAdapter::OnReceiveBitrateChanged(
+ const std::vector<unsigned int>& ssrcs,
+ unsigned int bitrate) {
+ rtcp_bandwidth_observer_->OnReceivedEstimatedBitrate(bitrate);
+}
+
+void TransportFeedbackAdapter::OnRttUpdate(int64_t avg_rtt_ms,
+ int64_t max_rtt_ms) {
+ RTC_DCHECK(bitrate_estimator_.get() != nullptr);
+ bitrate_estimator_->OnRttUpdate(avg_rtt_ms, max_rtt_ms);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h
new file mode 100644
index 00000000000..56b2c73873d
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TRANSPORT_FEEDBACK_ADAPTER_H_
+#define WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TRANSPORT_FEEDBACK_ADAPTER_H_
+
+#include <vector>
+
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/send_time_history.h"
+
+namespace webrtc {
+
+class ProcessThread;
+
+class TransportFeedbackAdapter : public TransportFeedbackObserver,
+ public CallStatsObserver,
+ public RemoteBitrateObserver {
+ public:
+ TransportFeedbackAdapter(RtcpBandwidthObserver* bandwidth_observer,
+ Clock* clock,
+ ProcessThread* process_thread);
+ virtual ~TransportFeedbackAdapter();
+
+ void OnPacketSent(const PacketInfo& info) override;
+
+ void OnTransportFeedback(const rtcp::TransportFeedback& feedback) override;
+
+ void SetBitrateEstimator(RemoteBitrateEstimator* rbe);
+
+ RemoteBitrateEstimator* GetBitrateEstimator() const {
+ return bitrate_estimator_.get();
+ }
+
+ private:
+ void OnReceiveBitrateChanged(const std::vector<unsigned int>& ssrcs,
+ unsigned int bitrate) override;
+ void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
+
+ rtc::CriticalSection lock_;
+ SendTimeHistory send_time_history_ GUARDED_BY(&lock_);
+ rtc::scoped_ptr<RtcpBandwidthObserver> rtcp_bandwidth_observer_;
+ rtc::scoped_ptr<RemoteBitrateEstimator> bitrate_estimator_;
+ ProcessThread* const process_thread_;
+ Clock* const clock_;
+ int64_t current_offset_ms_;
+ int64_t last_timestamp_us_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_REMOTE_BITRATE_ESTIMATOR_TRANSPORT_FEEDBACK_ADAPTER_H_
diff --git a/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc
new file mode 100644
index 00000000000..1bf4b1ec3e7
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter_unittest.cc
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <limits>
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h"
+#include "webrtc/modules/remote_bitrate_estimator/transport_feedback_adapter.h"
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+#include "webrtc/modules/utility/interface/mock/mock_process_thread.h"
+#include "webrtc/system_wrappers/interface/clock.h"
+
+using ::testing::_;
+using ::testing::Invoke;
+
+namespace webrtc {
+namespace test {
+
+class TransportFeedbackAdapterTest : public ::testing::Test {
+ public:
+ TransportFeedbackAdapterTest()
+ : clock_(0),
+ bitrate_estimator_(nullptr),
+ receiver_estimated_bitrate_(0) {}
+
+ virtual ~TransportFeedbackAdapterTest() {}
+
+ virtual void SetUp() {
+ adapter_.reset(new TransportFeedbackAdapter(
+ new RtcpBandwidthObserverAdapter(this), &clock_, &process_thread_));
+
+ bitrate_estimator_ = new MockRemoteBitrateEstimator();
+ EXPECT_CALL(process_thread_, RegisterModule(bitrate_estimator_)).Times(1);
+ adapter_->SetBitrateEstimator(bitrate_estimator_);
+ }
+
+ virtual void TearDown() {
+ EXPECT_CALL(process_thread_, DeRegisterModule(bitrate_estimator_)).Times(1);
+ adapter_.reset();
+ }
+
+ protected:
+ // Proxy class used since TransportFeedbackAdapter will own the instance
+ // passed at construction.
+ class RtcpBandwidthObserverAdapter : public RtcpBandwidthObserver {
+ public:
+ explicit RtcpBandwidthObserverAdapter(TransportFeedbackAdapterTest* owner)
+ : owner_(owner) {}
+
+ void OnReceivedEstimatedBitrate(uint32_t bitrate) override {
+ owner_->receiver_estimated_bitrate_ = bitrate;
+ }
+
+ void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
+ int64_t rtt,
+ int64_t now_ms) override {
+ RTC_NOTREACHED();
+ }
+
+ TransportFeedbackAdapterTest* const owner_;
+ };
+
+ void OnReceivedEstimatedBitrate(uint32_t bitrate) {}
+
+ void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
+ int64_t rtt,
+ int64_t now_ms) {}
+
+ void ComparePacketVectors(const std::vector<PacketInfo>& truth,
+ const std::vector<PacketInfo>& input) {
+ ASSERT_EQ(truth.size(), input.size());
+ size_t len = truth.size();
+ // truth contains the input data for the test, and input is what will be
+ // sent to the bandwidth estimator. truth.arrival_tims_ms is used to
+ // populate the transport feedback messages. As these times may be changed
+ // (because of resolution limits in the packets, and because of the time
+ // base adjustment performed by the TransportFeedbackAdapter at the first
+ // packet, the truth[x].arrival_time and input[x].arrival_time may not be
+ // equal. However, the difference must be the same for all x.
+ int64_t arrival_time_delta =
+ truth[0].arrival_time_ms - input[0].arrival_time_ms;
+ for (size_t i = 0; i < len; ++i) {
+ EXPECT_EQ(truth[i].arrival_time_ms,
+ input[i].arrival_time_ms + arrival_time_delta);
+ EXPECT_EQ(truth[i].send_time_ms, input[i].send_time_ms);
+ EXPECT_EQ(truth[i].sequence_number, input[i].sequence_number);
+ EXPECT_EQ(truth[i].payload_size, input[i].payload_size);
+ EXPECT_EQ(truth[i].was_paced, input[i].was_paced);
+ }
+ }
+
+ // Utility method, to reset arrival_time_ms before adding send time.
+ void OnPacketSent(PacketInfo info) {
+ info.arrival_time_ms = 0;
+ adapter_->OnPacketSent(info);
+ }
+
+ SimulatedClock clock_;
+ MockProcessThread process_thread_;
+ MockRemoteBitrateEstimator* bitrate_estimator_;
+ rtc::scoped_ptr<TransportFeedbackAdapter> adapter_;
+
+ uint32_t receiver_estimated_bitrate_;
+};
+
+TEST_F(TransportFeedbackAdapterTest, AdaptsFeedbackAndPopulatesSendTimes) {
+ std::vector<PacketInfo> packets;
+ packets.push_back(PacketInfo(100, 200, 0, 1500, true));
+ packets.push_back(PacketInfo(110, 210, 1, 1500, true));
+ packets.push_back(PacketInfo(120, 220, 2, 1500, true));
+ packets.push_back(PacketInfo(130, 230, 3, 1500, true));
+ packets.push_back(PacketInfo(140, 240, 4, 1500, true));
+
+ for (const PacketInfo& packet : packets)
+ OnPacketSent(packet);
+
+ rtcp::TransportFeedback feedback;
+ feedback.WithBase(packets[0].sequence_number,
+ packets[0].arrival_time_ms * 1000);
+
+ for (const PacketInfo& packet : packets) {
+ EXPECT_TRUE(feedback.WithReceivedPacket(packet.sequence_number,
+ packet.arrival_time_ms * 1000));
+ }
+
+ feedback.Build();
+
+ EXPECT_CALL(*bitrate_estimator_, IncomingPacketFeedbackVector(_))
+ .Times(1)
+ .WillOnce(Invoke(
+ [packets, this](const std::vector<PacketInfo>& feedback_vector) {
+ ComparePacketVectors(packets, feedback_vector);
+ }));
+ adapter_->OnTransportFeedback(feedback);
+}
+
+TEST_F(TransportFeedbackAdapterTest, HandlesDroppedPackets) {
+ std::vector<PacketInfo> packets;
+ packets.push_back(PacketInfo(100, 200, 0, 1500, true));
+ packets.push_back(PacketInfo(110, 210, 1, 1500, true));
+ packets.push_back(PacketInfo(120, 220, 2, 1500, true));
+ packets.push_back(PacketInfo(130, 230, 3, 1500, true));
+ packets.push_back(PacketInfo(140, 240, 4, 1500, true));
+
+ const uint16_t kSendSideDropBefore = 1;
+ const uint16_t kReceiveSideDropAfter = 3;
+
+ for (const PacketInfo& packet : packets) {
+ if (packet.sequence_number >= kSendSideDropBefore)
+ OnPacketSent(packet);
+ }
+
+ rtcp::TransportFeedback feedback;
+ feedback.WithBase(packets[0].sequence_number,
+ packets[0].arrival_time_ms * 1000);
+
+ for (const PacketInfo& packet : packets) {
+ if (packet.sequence_number <= kReceiveSideDropAfter) {
+ EXPECT_TRUE(feedback.WithReceivedPacket(packet.sequence_number,
+ packet.arrival_time_ms * 1000));
+ }
+ }
+
+ feedback.Build();
+
+ std::vector<PacketInfo> expected_packets(
+ packets.begin() + kSendSideDropBefore,
+ packets.begin() + kReceiveSideDropAfter + 1);
+
+ EXPECT_CALL(*bitrate_estimator_, IncomingPacketFeedbackVector(_))
+ .Times(1)
+ .WillOnce(Invoke([expected_packets,
+ this](const std::vector<PacketInfo>& feedback_vector) {
+ ComparePacketVectors(expected_packets, feedback_vector);
+ }));
+ adapter_->OnTransportFeedback(feedback);
+}
+
+TEST_F(TransportFeedbackAdapterTest, SendTimeWrapsBothWays) {
+ int64_t kHighArrivalTimeMs = rtcp::TransportFeedback::kDeltaScaleFactor *
+ static_cast<int64_t>(1 << 8) *
+ static_cast<int64_t>((1 << 23) - 1) / 1000;
+ std::vector<PacketInfo> packets;
+ packets.push_back(PacketInfo(kHighArrivalTimeMs - 64, 200, 0, 1500, true));
+ packets.push_back(PacketInfo(kHighArrivalTimeMs + 64, 210, 1, 1500, true));
+ packets.push_back(PacketInfo(kHighArrivalTimeMs, 220, 2, 1500, true));
+
+ for (const PacketInfo& packet : packets)
+ OnPacketSent(packet);
+
+ for (size_t i = 0; i < packets.size(); ++i) {
+ rtc::scoped_ptr<rtcp::TransportFeedback> feedback(
+ new rtcp::TransportFeedback());
+ feedback->WithBase(packets[i].sequence_number,
+ packets[i].arrival_time_ms * 1000);
+
+ EXPECT_TRUE(feedback->WithReceivedPacket(
+ packets[i].sequence_number, packets[i].arrival_time_ms * 1000));
+
+ rtc::scoped_ptr<rtcp::RawPacket> raw_packet = feedback->Build();
+ feedback = rtcp::TransportFeedback::ParseFrom(raw_packet->Buffer(),
+ raw_packet->Length());
+
+ std::vector<PacketInfo> expected_packets;
+ expected_packets.push_back(packets[i]);
+
+ EXPECT_CALL(*bitrate_estimator_, IncomingPacketFeedbackVector(_))
+ .Times(1)
+ .WillOnce(Invoke([expected_packets, this](
+ const std::vector<PacketInfo>& feedback_vector) {
+ ComparePacketVectors(expected_packets, feedback_vector);
+ }));
+ adapter_->OnTransportFeedback(*feedback.get());
+ }
+}
+
+TEST_F(TransportFeedbackAdapterTest, TimestampDeltas) {
+ std::vector<PacketInfo> sent_packets;
+ const int64_t kSmallDeltaUs =
+ rtcp::TransportFeedback::kDeltaScaleFactor * ((1 << 8) - 1);
+ const int64_t kLargePositiveDeltaUs =
+ rtcp::TransportFeedback::kDeltaScaleFactor *
+ std::numeric_limits<int16_t>::max();
+ const int64_t kLargeNegativeDeltaUs =
+ rtcp::TransportFeedback::kDeltaScaleFactor *
+ std::numeric_limits<int16_t>::min();
+
+ PacketInfo info(100, 200, 0, 1500, true);
+ sent_packets.push_back(info);
+
+ info.send_time_ms += kSmallDeltaUs / 1000;
+ info.arrival_time_ms += kSmallDeltaUs / 1000;
+ ++info.sequence_number;
+ sent_packets.push_back(info);
+
+ info.send_time_ms += kLargePositiveDeltaUs / 1000;
+ info.arrival_time_ms += kLargePositiveDeltaUs / 1000;
+ ++info.sequence_number;
+ sent_packets.push_back(info);
+
+ info.send_time_ms += kLargeNegativeDeltaUs / 1000;
+ info.arrival_time_ms += kLargeNegativeDeltaUs / 1000;
+ ++info.sequence_number;
+ sent_packets.push_back(info);
+
+ // Too large, delta - will need two feedback messages.
+ info.send_time_ms += (kLargePositiveDeltaUs + 1000) / 1000;
+ info.arrival_time_ms += (kLargePositiveDeltaUs + 1000) / 1000;
+ ++info.sequence_number;
+
+ // Packets will be added to send history.
+ for (const PacketInfo& packet : sent_packets)
+ OnPacketSent(packet);
+ OnPacketSent(info);
+
+ // Create expected feedback and send into adapter.
+ rtc::scoped_ptr<rtcp::TransportFeedback> feedback(
+ new rtcp::TransportFeedback());
+ feedback->WithBase(sent_packets[0].sequence_number,
+ sent_packets[0].arrival_time_ms * 1000);
+
+ for (const PacketInfo& packet : sent_packets) {
+ EXPECT_TRUE(feedback->WithReceivedPacket(packet.sequence_number,
+ packet.arrival_time_ms * 1000));
+ }
+ EXPECT_FALSE(feedback->WithReceivedPacket(info.sequence_number,
+ info.arrival_time_ms * 1000));
+
+ rtc::scoped_ptr<rtcp::RawPacket> raw_packet = feedback->Build();
+ feedback = rtcp::TransportFeedback::ParseFrom(raw_packet->Buffer(),
+ raw_packet->Length());
+
+ std::vector<PacketInfo> received_feedback;
+
+ EXPECT_TRUE(feedback.get() != nullptr);
+ EXPECT_CALL(*bitrate_estimator_, IncomingPacketFeedbackVector(_))
+ .Times(1)
+ .WillOnce(Invoke([sent_packets, &received_feedback](
+ const std::vector<PacketInfo>& feedback_vector) {
+ EXPECT_EQ(sent_packets.size(), feedback_vector.size());
+ received_feedback = feedback_vector;
+ }));
+ adapter_->OnTransportFeedback(*feedback.get());
+
+ // Create a new feedback message and add the trailing item.
+ feedback.reset(new rtcp::TransportFeedback());
+ feedback->WithBase(info.sequence_number, info.arrival_time_ms * 1000);
+ EXPECT_TRUE(feedback->WithReceivedPacket(info.sequence_number,
+ info.arrival_time_ms * 1000));
+ raw_packet = feedback->Build();
+ feedback = rtcp::TransportFeedback::ParseFrom(raw_packet->Buffer(),
+ raw_packet->Length());
+
+ EXPECT_TRUE(feedback.get() != nullptr);
+ EXPECT_CALL(*bitrate_estimator_, IncomingPacketFeedbackVector(_))
+ .Times(1)
+ .WillOnce(Invoke(
+ [&received_feedback](const std::vector<PacketInfo>& feedback_vector) {
+ EXPECT_EQ(1u, feedback_vector.size());
+ received_feedback.push_back(feedback_vector[0]);
+ }));
+ adapter_->OnTransportFeedback(*feedback.get());
+
+ sent_packets.push_back(info);
+
+ ComparePacketVectors(sent_packets, received_feedback);
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/BUILD.gn b/chromium/third_party/webrtc/modules/rtp_rtcp/BUILD.gn
index 0eda287a4ce..c6514246114 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/BUILD.gn
@@ -32,9 +32,13 @@ source_set("rtp_rtcp") {
"source/forward_error_correction.h",
"source/forward_error_correction_internal.cc",
"source/forward_error_correction_internal.h",
+ "source/h264_bitstream_parser.cc",
+ "source/h264_bitstream_parser.h",
"source/h264_sps_parser.cc",
"source/h264_sps_parser.h",
"source/mock/mock_rtp_payload_strategy.h",
+ "source/packet_loss_stats.cc",
+ "source/packet_loss_stats.h",
"source/producer_fec.cc",
"source/producer_fec.h",
"source/receive_statistics_impl.cc",
@@ -42,6 +46,8 @@ source_set("rtp_rtcp") {
"source/remote_ntp_time_estimator.cc",
"source/rtcp_packet.cc",
"source/rtcp_packet.h",
+ "source/rtcp_packet/transport_feedback.cc",
+ "source/rtcp_packet/transport_feedback.h",
"source/rtcp_receiver.cc",
"source/rtcp_receiver.h",
"source/rtcp_receiver_help.cc",
@@ -58,6 +64,8 @@ source_set("rtp_rtcp") {
"source/rtp_format_video_generic.h",
"source/rtp_format_vp8.cc",
"source/rtp_format_vp8.h",
+ "source/rtp_format_vp9.cc",
+ "source/rtp_format_vp9.h",
"source/rtp_header_extension.cc",
"source/rtp_header_extension.h",
"source/rtp_header_parser.cc",
@@ -104,7 +112,6 @@ source_set("rtp_rtcp") {
deps = [
"../..:webrtc_common",
"../../system_wrappers",
- "../pacing",
"../remote_bitrate_estimator",
]
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h
index 63949f7619a..b1bc09a9a35 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/remote_ntp_time_estimator.h
@@ -43,7 +43,7 @@ class RemoteNtpTimeEstimator {
rtc::scoped_ptr<TimestampExtrapolator> ts_extrapolator_;
RtcpList rtcp_list_;
int64_t last_timing_log_ms_;
- DISALLOW_COPY_AND_ASSIGN(RemoteNtpTimeEstimator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RemoteNtpTimeEstimator);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h
index 62835667df0..2fb8ac5d617 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_receiver.h
@@ -37,14 +37,14 @@ class RtpReceiver {
public:
// Creates a video-enabled RTP receiver.
static RtpReceiver* CreateVideoReceiver(
- int id, Clock* clock,
+ Clock* clock,
RtpData* incoming_payload_callback,
RtpFeedback* incoming_messages_callback,
RTPPayloadRegistry* rtp_payload_registry);
// Creates an audio-enabled RTP receiver.
static RtpReceiver* CreateAudioReceiver(
- int id, Clock* clock,
+ Clock* clock,
RtpAudioFeedback* incoming_audio_feedback,
RtpData* incoming_payload_callback,
RtpFeedback* incoming_messages_callback,
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
index 98f7c26b553..f907408573b 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h
@@ -19,11 +19,13 @@
namespace webrtc {
// Forward declarations.
-class PacedSender;
class ReceiveStatistics;
class RemoteBitrateEstimator;
class RtpReceiver;
class Transport;
+namespace rtcp {
+class TransportFeedback;
+}
class RtpRtcp : public Module {
public:
@@ -53,7 +55,6 @@ class RtpRtcp : public Module {
* paced_sender - Spread any bursts of packets into smaller
* bursts to minimize packet loss.
*/
- int32_t id;
bool audio;
bool receiver_only;
Clock* clock;
@@ -61,11 +62,13 @@ class RtpRtcp : public Module {
Transport* outgoing_transport;
RtcpIntraFrameObserver* intra_frame_callback;
RtcpBandwidthObserver* bandwidth_callback;
+ TransportFeedbackObserver* transport_feedback_callback;
RtcpRttStats* rtt_stats;
RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer;
RtpAudioFeedback* audio_messages;
RemoteBitrateEstimator* remote_bitrate_estimator;
- PacedSender* paced_sender;
+ RtpPacketSender* paced_sender;
+ TransportSequenceNumberAllocator* transport_sequence_number_allocator;
BitrateStatisticsObserver* send_bitrate_observer;
FrameCountObserver* send_frame_count_observer;
SendSideDelayObserver* send_side_delay_observer;
@@ -324,14 +327,14 @@ class RtpRtcp : public Module {
/*
* Get RTCP status
*/
- virtual RTCPMethod RTCP() const = 0;
+ virtual RtcpMode RTCP() const = 0;
/*
* configure RTCP status i.e on(compound or non- compound)/off
*
* method - RTCP method to use
*/
- virtual void SetRTCPStatus(RTCPMethod method) = 0;
+ virtual void SetRTCPStatus(RtcpMode method) = 0;
/*
* Set RTCP CName (i.e unique identifier)
@@ -431,6 +434,14 @@ class RtpRtcp : public Module {
StreamDataCounters* rtx_counters) const = 0;
/*
+ * Get packet loss statistics for the RTP stream.
+ */
+ virtual void GetRtpPacketLossStats(
+ bool outgoing,
+ uint32_t ssrc,
+ struct RtpPacketLossStats* loss_stats) const = 0;
+
+ /*
* Get received RTCP sender info
*
* return -1 on failure else 0
@@ -480,13 +491,6 @@ class RtpRtcp : public Module {
const std::vector<uint32_t>& ssrcs) = 0;
/*
- * (IJ) Extended jitter report.
- */
- virtual bool IJ() const = 0;
-
- virtual void SetIJStatus(bool enable) = 0;
-
- /*
* (TMMBR) Temporary Max Media Bit Rate
*/
virtual bool TMMBR() const = 0;
@@ -538,6 +542,8 @@ class RtpRtcp : public Module {
RtcpStatisticsCallback* callback) = 0;
virtual RtcpStatisticsCallback*
GetRtcpStatisticsCallback() = 0;
+ // BWE feedback packets.
+ virtual bool SendFeedbackPacket(const rtcp::TransportFeedback& packet) = 0;
/**************************************************************************
*
@@ -600,19 +606,15 @@ class RtpRtcp : public Module {
/*
* Turn on/off generic FEC
- *
- * return -1 on failure else 0
*/
- virtual int32_t SetGenericFECStatus(bool enable,
- uint8_t payloadTypeRED,
- uint8_t payloadTypeFEC) = 0;
+ virtual void SetGenericFECStatus(bool enable,
+ uint8_t payload_type_red,
+ uint8_t payload_type_fec) = 0;
/*
* Get generic FEC setting
- *
- * return -1 on failure else 0
*/
- virtual int32_t GenericFECStatus(bool& enable,
+ virtual void GenericFECStatus(bool& enable,
uint8_t& payloadTypeRED,
uint8_t& payloadTypeFEC) = 0;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
index ed7dfe06a09..fdca434ca42 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h
@@ -24,6 +24,9 @@
#define TIMEOUT_SEI_MESSAGES_MS 30000 // in milliseconds
namespace webrtc {
+namespace rtcp {
+class TransportFeedback;
+}
const int kVideoPayloadTypeFrequency = 90000;
@@ -49,13 +52,6 @@ union PayloadUnion
VideoPayload Video;
};
-enum RTCPMethod
-{
- kRtcpOff = 0,
- kRtcpCompound = 1,
- kRtcpNonCompound = 2
-};
-
enum RTPAliveType
{
kRtpDead = 0,
@@ -108,7 +104,8 @@ enum RTCPPacketType : uint32_t {
kRtcpRemb = 0x10000,
kRtcpTransmissionTimeOffset = 0x20000,
kRtcpXrReceiverReferenceTime = 0x40000,
- kRtcpXrDlrrReportBlock = 0x80000
+ kRtcpXrDlrrReportBlock = 0x80000,
+ kRtcpTransportFeedback = 0x100000,
};
enum KeyFrameRequestMethod
@@ -239,28 +236,24 @@ public:
* channels - number of channels in codec (1 = mono, 2 = stereo)
*/
virtual int32_t OnInitializeDecoder(
- const int32_t id,
const int8_t payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int frequency,
const uint8_t channels,
const uint32_t rate) = 0;
- virtual void OnIncomingSSRCChanged( const int32_t id,
- const uint32_t ssrc) = 0;
+ virtual void OnIncomingSSRCChanged(const uint32_t ssrc) = 0;
- virtual void OnIncomingCSRCChanged( const int32_t id,
- const uint32_t CSRC,
- const bool added) = 0;
+ virtual void OnIncomingCSRCChanged(const uint32_t CSRC,
+ const bool added) = 0;
};
class RtpAudioFeedback {
public:
-
- virtual void OnPlayTelephoneEvent(const int32_t id,
- const uint8_t event,
+ virtual void OnPlayTelephoneEvent(const uint8_t event,
const uint16_t lengthMs,
const uint8_t volume) = 0;
+
protected:
virtual ~RtpAudioFeedback() {}
};
@@ -293,6 +286,44 @@ class RtcpBandwidthObserver {
virtual ~RtcpBandwidthObserver() {}
};
+struct PacketInfo {
+ PacketInfo(int64_t arrival_time_ms,
+ int64_t send_time_ms,
+ uint16_t sequence_number,
+ size_t payload_size,
+ bool was_paced)
+ : arrival_time_ms(arrival_time_ms),
+ send_time_ms(send_time_ms),
+ sequence_number(sequence_number),
+ payload_size(payload_size),
+ was_paced(was_paced) {}
+ // Time corresponding to when the packet was received. Timestamped with the
+ // receiver's clock.
+ int64_t arrival_time_ms;
+ // Time corresponding to when the packet was sent, timestamped with the
+ // sender's clock.
+ int64_t send_time_ms;
+ // Packet identifier, incremented with 1 for every packet generated by the
+ // sender.
+ uint16_t sequence_number;
+ // Size of the packet excluding RTP headers.
+ size_t payload_size;
+ // True if the packet was paced out by the pacer.
+ bool was_paced;
+};
+
+class TransportFeedbackObserver {
+ public:
+ TransportFeedbackObserver() {}
+ virtual ~TransportFeedbackObserver() {}
+
+ // Note: Transport-wide sequence number as sequence number. Arrival time
+ // must be set to 0.
+ virtual void OnPacketSent(const PacketInfo& info) = 0;
+
+ virtual void OnTransportFeedback(const rtcp::TransportFeedback& feedback) = 0;
+};
+
class RtcpRttStats {
public:
virtual void OnRttUpdate(int64_t rtt) = 0;
@@ -307,8 +338,7 @@ class NullRtpFeedback : public RtpFeedback {
public:
virtual ~NullRtpFeedback() {}
- int32_t OnInitializeDecoder(const int32_t id,
- const int8_t payloadType,
+ int32_t OnInitializeDecoder(const int8_t payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int frequency,
const uint8_t channels,
@@ -316,11 +346,8 @@ class NullRtpFeedback : public RtpFeedback {
return 0;
}
- void OnIncomingSSRCChanged(const int32_t id, const uint32_t ssrc) override {}
-
- void OnIncomingCSRCChanged(const int32_t id,
- const uint32_t CSRC,
- const bool added) override {}
+ void OnIncomingSSRCChanged(const uint32_t ssrc) override {}
+ void OnIncomingCSRCChanged(const uint32_t CSRC, const bool added) override {}
};
// Null object version of RtpData.
@@ -344,11 +371,54 @@ class NullRtpAudioFeedback : public RtpAudioFeedback {
public:
virtual ~NullRtpAudioFeedback() {}
- void OnPlayTelephoneEvent(const int32_t id,
- const uint8_t event,
+ void OnPlayTelephoneEvent(const uint8_t event,
const uint16_t lengthMs,
const uint8_t volume) override {}
};
+// Statistics about packet loss for a single directional connection. All values
+// are totals since the connection initiated.
+struct RtpPacketLossStats {
+ // The number of packets lost in events where no adjacent packets were also
+ // lost.
+ uint64_t single_packet_loss_count;
+ // The number of events in which more than one adjacent packet was lost.
+ uint64_t multiple_packet_loss_event_count;
+ // The number of packets lost in events where more than one adjacent packet
+ // was lost.
+ uint64_t multiple_packet_loss_packet_count;
+};
+
+class RtpPacketSender {
+ public:
+ RtpPacketSender() {}
+ virtual ~RtpPacketSender() {}
+
+ enum Priority {
+ kHighPriority = 0, // Pass through; will be sent immediately.
+ kNormalPriority = 2, // Put in back of the line.
+ kLowPriority = 3, // Put in back of the low priority line.
+ };
+ // Low priority packets are mixed with the normal priority packets
+ // while we are paused.
+
+ // Returns true if we send the packet now, else it will add the packet
+ // information to the queue and call TimeToSendPacket when it's time to send.
+ virtual bool SendPacket(Priority priority,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t bytes,
+ bool retransmission) = 0;
+};
+
+class TransportSequenceNumberAllocator {
+ public:
+ TransportSequenceNumberAllocator() {}
+ virtual ~TransportSequenceNumberAllocator() {}
+
+ virtual uint16_t AllocateSequenceNumber() = 0;
+};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_INTERFACE_RTP_RTCP_DEFINES_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/chromium/third_party/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index 99e5b1c15a5..bc4aec8967e 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -16,6 +16,7 @@
#include "webrtc/modules/interface/module.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
@@ -129,9 +130,8 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_METHOD2(RegisterRtcpObservers,
void(RtcpIntraFrameObserver* intraFrameCallback,
RtcpBandwidthObserver* bandwidthCallback));
- MOCK_CONST_METHOD0(RTCP,
- RTCPMethod());
- MOCK_METHOD1(SetRTCPStatus, void(const RTCPMethod method));
+ MOCK_CONST_METHOD0(RTCP, RtcpMode());
+ MOCK_METHOD1(SetRTCPStatus, void(const RtcpMode method));
MOCK_METHOD1(SetCNAME,
int32_t(const char cName[RTCP_CNAME_SIZE]));
MOCK_CONST_METHOD2(RemoteCNAME,
@@ -165,6 +165,8 @@ class MockRtpRtcp : public RtpRtcp {
int32_t(size_t *bytesSent, uint32_t *packetsSent));
MOCK_CONST_METHOD2(GetSendStreamDataCounters,
void(StreamDataCounters*, StreamDataCounters*));
+ MOCK_CONST_METHOD3(GetRtpPacketLossStats,
+ void(bool, uint32_t, struct RtpPacketLossStats*));
MOCK_METHOD1(RemoteRTCPStat,
int32_t(RTCPSenderInfo* senderInfo));
MOCK_CONST_METHOD1(RemoteRTCPStat,
@@ -183,9 +185,6 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_METHOD2(SetREMBData,
void(const uint32_t bitrate,
const std::vector<uint32_t>& ssrcs));
- MOCK_CONST_METHOD0(IJ,
- bool());
- MOCK_METHOD1(SetIJStatus, void(const bool));
MOCK_CONST_METHOD0(TMMBR,
bool());
MOCK_METHOD1(SetTMMBRStatus, void(const bool enable));
@@ -206,6 +205,7 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_CONST_METHOD0(StorePackets, bool());
MOCK_METHOD1(RegisterRtcpStatisticsCallback, void(RtcpStatisticsCallback*));
MOCK_METHOD0(GetRtcpStatisticsCallback, RtcpStatisticsCallback*());
+ MOCK_METHOD1(SendFeedbackPacket, bool(const rtcp::TransportFeedback& packet));
MOCK_METHOD1(RegisterAudioCallback,
int32_t(RtpAudioFeedback* messagesCallback));
MOCK_METHOD1(SetAudioPacketSize,
@@ -225,11 +225,13 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_METHOD1(SetTargetSendBitrate,
void(uint32_t bitrate_bps));
MOCK_METHOD3(SetGenericFECStatus,
- int32_t(const bool enable,
- const uint8_t payloadTypeRED,
- const uint8_t payloadTypeFEC));
+ void(const bool enable,
+ const uint8_t payload_type_red,
+ const uint8_t payload_type_fec));
MOCK_METHOD3(GenericFECStatus,
- int32_t(bool& enable, uint8_t& payloadTypeRED, uint8_t& payloadTypeFEC));
+ void(bool& enable,
+ uint8_t& payloadTypeRED,
+ uint8_t& payloadTypeFEC));
MOCK_METHOD2(SetFecParameters,
int32_t(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params));
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi b/chromium/third_party/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
index e73b43adfb9..e35a75cd0f0 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/rtp_rtcp.gypi
@@ -13,7 +13,6 @@
'type': 'static_library',
'dependencies': [
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/modules/modules.gyp:paced_sender',
'<(webrtc_root)/modules/modules.gyp:remote_bitrate_estimator',
],
'sources': [
@@ -31,6 +30,8 @@
'source/byte_io.h',
'source/fec_receiver_impl.cc',
'source/fec_receiver_impl.h',
+ 'source/packet_loss_stats.cc',
+ 'source/packet_loss_stats.h',
'source/receive_statistics_impl.cc',
'source/receive_statistics_impl.h',
'source/remote_ntp_time_estimator.cc',
@@ -40,6 +41,8 @@
'source/rtp_rtcp_impl.h',
'source/rtcp_packet.cc',
'source/rtcp_packet.h',
+ 'source/rtcp_packet/transport_feedback.cc',
+ 'source/rtcp_packet/transport_feedback.h',
'source/rtcp_receiver.cc',
'source/rtcp_receiver.h',
'source/rtcp_receiver_help.cc',
@@ -74,6 +77,8 @@
'source/forward_error_correction.h',
'source/forward_error_correction_internal.cc',
'source/forward_error_correction_internal.h',
+ 'source/h264_bitstream_parser.cc',
+ 'source/h264_bitstream_parser.h',
'source/h264_sps_parser.cc',
'source/h264_sps_parser.h',
'source/producer_fec.cc',
@@ -94,6 +99,8 @@
'source/rtp_format_h264.h',
'source/rtp_format_vp8.cc',
'source/rtp_format_vp8.h',
+ 'source/rtp_format_vp9.cc',
+ 'source/rtp_format_vp9.h',
'source/rtp_format_video_generic.cc',
'source/rtp_format_video_generic.h',
'source/vp8_partition_aggregator.cc',
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/byte_io.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/byte_io.h
index 2617806dd94..d8903b8483c 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/byte_io.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/byte_io.h
@@ -42,40 +42,110 @@
namespace webrtc {
+// According to ISO C standard ISO/IEC 9899, section 6.2.6.2 (2), the three
+// representations of signed integers allowed are two's complement, one's
+// complement and sign/magnitude. We can detect which is used by looking at
+// the two last bits of -1, which will be 11 in two's complement, 10 in one's
+// complement and 01 in sign/magnitude.
+// TODO(sprang): In the unlikely event that we actually need to support a
+// platform that doesn't use two's complement, implement conversion to/from
+// wire format.
+
+namespace {
+inline void AssertTwosComplement() {
+ // Assume the if any one signed integer type is two's complement, then all
+ // other will be too.
+ static_assert(
+ (-1 & 0x03) == 0x03,
+ "Only two's complement representation of signed integers supported.");
+}
+// Plain const char* won't work for static_assert, use #define instead.
+#define kSizeErrorMsg "Byte size must be less than or equal to data type size."
+}
+
+// Utility class for getting the unsigned equivalent of a signed type.
+template <typename T>
+struct UnsignedOf;
+
// Class for reading integers from a sequence of bytes.
-// T = type of integer, B = bytes to read, is_signed = true if signed integer
-// If is_signed is true and B < sizeof(T), sign extension might be needed
-template<typename T, unsigned int B = sizeof(T),
- bool is_signed = std::numeric_limits<T>::is_signed>
-class ByteReader {
+// T = type of integer, B = bytes to read, is_signed = true if signed integer.
+// If is_signed is true and B < sizeof(T), sign extension might be needed.
+template <typename T,
+ unsigned int B = sizeof(T),
+ bool is_signed = std::numeric_limits<T>::is_signed>
+class ByteReader;
+
+// Specialization of ByteReader for unsigned types.
+template <typename T, unsigned int B>
+class ByteReader<T, B, false> {
public:
static T ReadBigEndian(const uint8_t* data) {
- if (is_signed && B < sizeof(T)) {
- return SignExtend(InternalReadBigEndian(data));
- }
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
return InternalReadBigEndian(data);
}
static T ReadLittleEndian(const uint8_t* data) {
- if (is_signed && B < sizeof(T)) {
- return SignExtend(InternalReadLittleEndian(data));
- }
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
return InternalReadLittleEndian(data);
}
private:
static T InternalReadBigEndian(const uint8_t* data) {
T val(0);
- for (unsigned int i = 0; i < B; ++i) {
+ for (unsigned int i = 0; i < B; ++i)
val |= static_cast<T>(data[i]) << ((B - 1 - i) * 8);
- }
return val;
}
static T InternalReadLittleEndian(const uint8_t* data) {
T val(0);
- for (unsigned int i = 0; i < B; ++i) {
+ for (unsigned int i = 0; i < B; ++i)
val |= static_cast<T>(data[i]) << (i * 8);
+ return val;
+ }
+};
+
+// Specialization of ByteReader for signed types.
+template <typename T, unsigned int B>
+class ByteReader<T, B, true> {
+ public:
+ typedef typename UnsignedOf<T>::Type U;
+
+ static T ReadBigEndian(const uint8_t* data) {
+ U unsigned_val = ByteReader<T, B, false>::ReadBigEndian(data);
+ if (B < sizeof(T))
+ unsigned_val = SignExtend(unsigned_val);
+ return ReinterpretAsSigned(unsigned_val);
+ }
+
+ static T ReadLittleEndian(const uint8_t* data) {
+ U unsigned_val = ByteReader<T, B, false>::ReadLittleEndian(data);
+ if (B < sizeof(T))
+ unsigned_val = SignExtend(unsigned_val);
+ return ReinterpretAsSigned(unsigned_val);
+ }
+
+ private:
+ // As a hack to avoid implementation-specific or undefined behavior when
+ // bit-shifting or casting signed integers, read as a signed equivalent
+ // instead and convert to signed. This is safe since we have asserted that
+ // two's complement for is used.
+ static T ReinterpretAsSigned(U unsigned_val) {
+ // An unsigned value with only the highest order bit set (ex 0x80).
+ const U kUnsignedHighestBitMask =
+ static_cast<U>(1) << ((sizeof(U) * 8) - 1);
+ // A signed value with only the highest bit set. Since this is two's
+ // complement form, we can use the min value from std::numeric_limits.
+ const T kSignedHighestBitMask = std::numeric_limits<T>::min();
+
+ T val;
+ if ((unsigned_val & kUnsignedHighestBitMask) != 0) {
+ // Casting is only safe when unsigned value can be represented in the
+ // signed target type, so mask out highest bit and mask it back manually.
+ val = static_cast<T>(unsigned_val & ~kUnsignedHighestBitMask);
+ val |= kSignedHighestBitMask;
+ } else {
+ val = static_cast<T>(unsigned_val);
}
return val;
}
@@ -85,16 +155,16 @@ class ByteReader {
// extend the remaining byte(s) with ones so that the correct negative
// number is retained.
// Ex: 0x810A0B -> 0xFF810A0B, but 0x710A0B -> 0x00710A0B
- static T SignExtend(const T val) {
- uint8_t msb = static_cast<uint8_t>(val >> ((B - 1) * 8));
- if (msb & 0x80) {
- // Sign extension is -1 (all ones) shifted left B bytes.
- // The "B % sizeof(T)"-part is there to avoid compiler warning for
- // shifting the whole size of the data type.
- T sign_extend = (sizeof(T) == B ? 0 :
- (static_cast<T>(-1L) << ((B % sizeof(T)) * 8)));
-
- return val | sign_extend;
+ static U SignExtend(const U val) {
+ const uint8_t kMsb = static_cast<uint8_t>(val >> ((B - 1) * 8));
+ if ((kMsb & 0x80) != 0) {
+ // Create a mask where all bits used by the B bytes are set to one,
+ // for instance 0x00FFFFFF for B = 3. Bit-wise invert that mask (to
+ // (0xFF000000 in the example above) and add it to the input value.
+ // The "B % sizeof(T)" is a workaround to undefined values warnings for
+ // B == sizeof(T), in which case this code won't be called anyway.
+ const U kUsedBitsMask = (1 << ((B % sizeof(T)) * 8)) - 1;
+ return ~kUsedBitsMask | val;
}
return val;
}
@@ -102,71 +172,162 @@ class ByteReader {
// Class for writing integers to a sequence of bytes
// T = type of integer, B = bytes to write
-template<typename T, unsigned int B = sizeof(T)>
-class ByteWriter {
+template <typename T,
+ unsigned int B = sizeof(T),
+ bool is_signed = std::numeric_limits<T>::is_signed>
+class ByteWriter;
+
+// Specialization of ByteWriter for unsigned types.
+template <typename T, unsigned int B>
+class ByteWriter<T, B, false> {
public:
static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
for (unsigned int i = 0; i < B; ++i) {
data[i] = val >> ((B - 1 - i) * 8);
}
}
static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(B <= sizeof(T), kSizeErrorMsg);
for (unsigned int i = 0; i < B; ++i) {
data[i] = val >> (i * 8);
}
}
};
+// Specialization of ByteWriter for signed types.
+template <typename T, unsigned int B>
+class ByteWriter<T, B, true> {
+ public:
+ typedef typename UnsignedOf<T>::Type U;
+
+ static void WriteBigEndian(uint8_t* data, T val) {
+ ByteWriter<U, B, false>::WriteBigEndian(data, ReinterpretAsUnsigned(val));
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ ByteWriter<U, B, false>::WriteLittleEndian(data,
+ ReinterpretAsUnsigned(val));
+ }
+
+ private:
+ static U ReinterpretAsUnsigned(T val) {
+ // According to ISO C standard ISO/IEC 9899, section 6.3.1.3 (1, 2) a
+ // conversion from signed to unsigned keeps the value if the new type can
+ // represent it, and otherwise adds one more than the max value of T until
+ // the value is in range. For two's complement, this fortunately means
+ // that the bit-wise value will be intact. Thus, since we have asserted that
+ // two's complement form is actually used, a simple cast is sufficient.
+ return static_cast<U>(val);
+ }
+};
+
+// ----- Below follows specializations of UnsignedOf utility class -----
+
+template <>
+struct UnsignedOf<int8_t> {
+ typedef uint8_t Type;
+};
+template <>
+struct UnsignedOf<int16_t> {
+ typedef uint16_t Type;
+};
+template <>
+struct UnsignedOf<int32_t> {
+ typedef uint32_t Type;
+};
+template <>
+struct UnsignedOf<int64_t> {
+ typedef uint64_t Type;
+};
+
+// ----- Below follows specializations for unsigned, B in { 1, 2, 4, 8 } -----
+
+// TODO(sprang): Check if these actually help or if generic cases will be
+// unrolled to and optimized to similar performance.
-// -------- Below follows specializations for B in { 2, 4, 8 } --------
+// Specializations for single bytes
+template <typename T>
+class ByteReader<T, 1, false> {
+ public:
+ static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ return data[0];
+ }
+ static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ return data[0];
+ }
+};
+
+template <typename T>
+class ByteWriter<T, 1, false> {
+ public:
+ static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ data[0] = val;
+ }
+
+ static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) == 1, kSizeErrorMsg);
+ data[0] = val;
+ }
+};
// Specializations for two byte words
-template<typename T, bool is_signed>
-class ByteReader<T, 2, is_signed> {
+template <typename T>
+class ByteReader<T, 2, false> {
public:
static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
return (data[0] << 8) | data[1];
}
static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
return data[0] | (data[1] << 8);
}
};
-template<typename T>
-class ByteWriter<T, 2> {
+template <typename T>
+class ByteWriter<T, 2, false> {
public:
static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
data[0] = val >> 8;
data[1] = val;
}
static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 2, kSizeErrorMsg);
data[0] = val;
data[1] = val >> 8;
}
};
// Specializations for four byte words.
-template<typename T, bool is_signed>
-class ByteReader<T, 4, is_signed> {
+template <typename T>
+class ByteReader<T, 4, false> {
public:
static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
}
static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
}
};
// Specializations for four byte words.
-template<typename T>
-class ByteWriter<T, 4> {
+template <typename T>
+class ByteWriter<T, 4, false> {
public:
static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
data[0] = val >> 24;
data[1] = val >> 16;
data[2] = val >> 8;
@@ -174,6 +335,7 @@ class ByteWriter<T, 4> {
}
static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 4, kSizeErrorMsg);
data[0] = val;
data[1] = val >> 8;
data[2] = val >> 16;
@@ -182,10 +344,11 @@ class ByteWriter<T, 4> {
};
// Specializations for eight byte words.
-template<typename T, bool is_signed>
-class ByteReader<T, 8, is_signed> {
+template <typename T>
+class ByteReader<T, 8, false> {
public:
static T ReadBigEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
return
(Get(data, 0) << 56) | (Get(data, 1) << 48) |
(Get(data, 2) << 40) | (Get(data, 3) << 32) |
@@ -194,6 +357,7 @@ class ByteReader<T, 8, is_signed> {
}
static T ReadLittleEndian(const uint8_t* data) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
return
Get(data, 0) | (Get(data, 1) << 8) |
(Get(data, 2) << 16) | (Get(data, 3) << 24) |
@@ -207,10 +371,11 @@ class ByteReader<T, 8, is_signed> {
}
};
-template<typename T>
-class ByteWriter<T, 8> {
+template <typename T>
+class ByteWriter<T, 8, false> {
public:
static void WriteBigEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
data[0] = val >> 56;
data[1] = val >> 48;
data[2] = val >> 40;
@@ -222,6 +387,7 @@ class ByteWriter<T, 8> {
}
static void WriteLittleEndian(uint8_t* data, T val) {
+ static_assert(sizeof(T) >= 8, kSizeErrorMsg);
data[0] = val;
data[1] = val >> 8;
data[2] = val >> 16;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.cc
new file mode 100644
index 00000000000..dfbb6b7c524
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.cc
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#include "webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h"
+
+#include <vector>
+
+#include "webrtc/base/bitbuffer.h"
+#include "webrtc/base/bytebuffer.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+
+namespace webrtc {
+namespace {
+// The size of a NALU header {0 0 0 1}.
+static const size_t kNaluHeaderSize = 4;
+
+// The size of a NALU header plus the type byte.
+static const size_t kNaluHeaderAndTypeSize = kNaluHeaderSize + 1;
+
+// The NALU type.
+static const uint8_t kNaluSps = 0x7;
+static const uint8_t kNaluPps = 0x8;
+static const uint8_t kNaluIdr = 0x5;
+static const uint8_t kNaluTypeMask = 0x1F;
+
+static const uint8_t kSliceTypeP = 0x0;
+static const uint8_t kSliceTypeB = 0x1;
+static const uint8_t kSliceTypeSp = 0x3;
+
+// Returns a vector of the NALU start sequences (0 0 0 1) in the given buffer.
+std::vector<size_t> FindNaluStartSequences(const uint8_t* buffer,
+ size_t buffer_size) {
+ std::vector<size_t> sequences;
+ // This is sorta like Boyer-Moore, but with only the first optimization step:
+ // given a 4-byte sequence we're looking at, if the 4th byte isn't 1 or 0,
+ // skip ahead to the next 4-byte sequence. 0s and 1s are relatively rare, so
+ // this will skip the majority of reads/checks.
+ const uint8_t* end = buffer + buffer_size - 4;
+ for (const uint8_t* head = buffer; head < end;) {
+ if (head[3] > 1) {
+ head += 4;
+ } else if (head[3] == 1 && head[2] == 0 && head[1] == 0 && head[0] == 0) {
+ sequences.push_back(static_cast<size_t>(head - buffer));
+ head += 4;
+ } else {
+ head++;
+ }
+ }
+
+ return sequences;
+}
+} // namespace
+
+// Parses RBSP from source bytes. Removes emulation bytes, but leaves the
+// rbsp_trailing_bits() in the stream, since none of the parsing reads all the
+// way to the end of a parsed RBSP sequence. When writing, that means the
+// rbsp_trailing_bits() should be preserved and don't need to be restored (i.e.
+// the rbsp_stop_one_bit, which is just a 1, then zero padded), and alignment
+// should "just work".
+// TODO(pbos): Make parsing RBSP something that can be integrated into BitBuffer
+// so we don't have to copy the entire frames when only interested in the
+// headers.
+rtc::ByteBuffer* ParseRbsp(const uint8_t* bytes, size_t length) {
+ // Copied from webrtc::H264SpsParser::Parse.
+ rtc::ByteBuffer* rbsp_buffer = new rtc::ByteBuffer;
+ for (size_t i = 0; i < length;) {
+ if (length - i >= 3 && bytes[i] == 0 && bytes[i + 1] == 0 &&
+ bytes[i + 2] == 3) {
+ rbsp_buffer->WriteBytes(reinterpret_cast<const char*>(bytes) + i, 2);
+ i += 3;
+ } else {
+ rbsp_buffer->WriteBytes(reinterpret_cast<const char*>(bytes) + i, 1);
+ i++;
+ }
+ }
+ return rbsp_buffer;
+}
+
+#define RETURN_FALSE_ON_FAIL(x) \
+ if (!(x)) { \
+ LOG_F(LS_ERROR) << "FAILED: " #x; \
+ return false; \
+ }
+
+H264BitstreamParser::PpsState::PpsState() {
+}
+
+H264BitstreamParser::SpsState::SpsState() {
+}
+
+// These functions are similar to webrtc::H264SpsParser::Parse, and based on the
+// same version of the H.264 standard. You can find it here:
+// http://www.itu.int/rec/T-REC-H.264
+bool H264BitstreamParser::ParseSpsNalu(const uint8_t* sps, size_t length) {
+ // Reset SPS state.
+ sps_ = SpsState();
+ sps_parsed_ = false;
+ // Parse out the SPS RBSP. It should be small, so it's ok that we create a
+ // copy. We'll eventually write this back.
+ rtc::scoped_ptr<rtc::ByteBuffer> sps_rbsp(
+ ParseRbsp(sps + kNaluHeaderAndTypeSize, length - kNaluHeaderAndTypeSize));
+ rtc::BitBuffer sps_parser(reinterpret_cast<const uint8*>(sps_rbsp->Data()),
+ sps_rbsp->Length());
+
+ uint8_t byte_tmp;
+ uint32_t golomb_tmp;
+ uint32_t bits_tmp;
+
+ // profile_idc: u(8).
+ uint8 profile_idc;
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadUInt8(&profile_idc));
+ // constraint_set0_flag through constraint_set5_flag + reserved_zero_2bits
+ // 1 bit each for the flags + 2 bits = 8 bits = 1 byte.
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadUInt8(&byte_tmp));
+ // level_idc: u(8)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadUInt8(&byte_tmp));
+ // seq_parameter_set_id: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ sps_.separate_colour_plane_flag = 0;
+ // See if profile_idc has chroma format information.
+ if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
+ profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
+ profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
+ profile_idc == 138 || profile_idc == 139 || profile_idc == 134) {
+ // chroma_format_idc: ue(v)
+ uint32 chroma_format_idc;
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&chroma_format_idc));
+ if (chroma_format_idc == 3) {
+ // separate_colour_plane_flag: u(1)
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadBits(&sps_.separate_colour_plane_flag, 1));
+ }
+ // bit_depth_luma_minus8: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // bit_depth_chroma_minus8: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // qpprime_y_zero_transform_bypass_flag: u(1)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadBits(&bits_tmp, 1));
+ // seq_scaling_matrix_present_flag: u(1)
+ uint32_t seq_scaling_matrix_present_flag;
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadBits(&seq_scaling_matrix_present_flag, 1));
+ if (seq_scaling_matrix_present_flag) {
+ // seq_scaling_list_present_flags. Either 8 or 12, depending on
+ // chroma_format_idc.
+ uint32_t seq_scaling_list_present_flags;
+ if (chroma_format_idc != 3) {
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadBits(&seq_scaling_list_present_flags, 8));
+ } else {
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadBits(&seq_scaling_list_present_flags, 12));
+ }
+ // TODO(pbos): Support parsing scaling lists if they're seen in practice.
+ RTC_CHECK(seq_scaling_list_present_flags == 0)
+ << "SPS contains scaling lists, which are unsupported.";
+ }
+ }
+ // log2_max_frame_num_minus4: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadExponentialGolomb(&sps_.log2_max_frame_num_minus4));
+ // pic_order_cnt_type: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadExponentialGolomb(&sps_.pic_order_cnt_type));
+
+ if (sps_.pic_order_cnt_type == 0) {
+ // log2_max_pic_order_cnt_lsb_minus4: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(
+ &sps_.log2_max_pic_order_cnt_lsb_minus4));
+ } else if (sps_.pic_order_cnt_type == 1) {
+ // delta_pic_order_always_zero_flag: u(1)
+ RETURN_FALSE_ON_FAIL(
+ sps_parser.ReadBits(&sps_.delta_pic_order_always_zero_flag, 1));
+ // offset_for_non_ref_pic: se(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // offset_for_top_to_bottom_field: se(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ uint32_t num_ref_frames_in_pic_order_cnt_cycle;
+ // num_ref_frames_in_pic_order_cnt_cycle: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(
+ &num_ref_frames_in_pic_order_cnt_cycle));
+ for (uint32_t i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
+ // offset_for_ref_frame[i]: se(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ }
+ }
+ // max_num_ref_frames: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // gaps_in_frame_num_value_allowed_flag: u(1)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadBits(&bits_tmp, 1));
+ // pic_width_in_mbs_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // pic_height_in_map_units_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadExponentialGolomb(&golomb_tmp));
+ // frame_mbs_only_flag: u(1)
+ RETURN_FALSE_ON_FAIL(sps_parser.ReadBits(&sps_.frame_mbs_only_flag, 1));
+ sps_parsed_ = true;
+ return true;
+}
+
+bool H264BitstreamParser::ParsePpsNalu(const uint8_t* pps, size_t length) {
+ RTC_CHECK(sps_parsed_);
+ // We're starting a new stream, so reset picture type rewriting values.
+ pps_ = PpsState();
+ pps_parsed_ = false;
+ rtc::scoped_ptr<rtc::ByteBuffer> buffer(
+ ParseRbsp(pps + kNaluHeaderAndTypeSize, length - kNaluHeaderAndTypeSize));
+ rtc::BitBuffer parser(reinterpret_cast<const uint8*>(buffer->Data()),
+ buffer->Length());
+
+ uint32_t bits_tmp;
+ uint32_t golomb_ignored;
+ // pic_parameter_set_id: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // seq_parameter_set_id: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // entropy_coding_mode_flag: u(1)
+ uint32_t entropy_coding_mode_flag;
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&entropy_coding_mode_flag, 1));
+ // TODO(pbos): Implement CABAC support if spotted in the wild.
+ RTC_CHECK(entropy_coding_mode_flag == 0)
+ << "Don't know how to parse CABAC streams.";
+ // bottom_field_pic_order_in_frame_present_flag: u(1)
+ uint32_t bottom_field_pic_order_in_frame_present_flag;
+ RETURN_FALSE_ON_FAIL(
+ parser.ReadBits(&bottom_field_pic_order_in_frame_present_flag, 1));
+ pps_.bottom_field_pic_order_in_frame_present_flag =
+ bottom_field_pic_order_in_frame_present_flag != 0;
+
+ // num_slice_groups_minus1: ue(v)
+ uint32_t num_slice_groups_minus1;
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&num_slice_groups_minus1));
+ if (num_slice_groups_minus1 > 0) {
+ uint32_t slice_group_map_type;
+ // slice_group_map_type: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&slice_group_map_type));
+ if (slice_group_map_type == 0) {
+ for (uint32_t i_group = 0; i_group <= num_slice_groups_minus1;
+ ++i_group) {
+ // run_length_minus1[iGroup]: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ }
+ } else if (slice_group_map_type == 2) {
+ for (uint32_t i_group = 0; i_group <= num_slice_groups_minus1;
+ ++i_group) {
+ // top_left[iGroup]: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // bottom_right[iGroup]: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ }
+ } else if (slice_group_map_type == 3 || slice_group_map_type == 4 ||
+ slice_group_map_type == 5) {
+ // slice_group_change_direction_flag: u(1)
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&bits_tmp, 1));
+ // slice_group_change_rate_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ } else if (slice_group_map_type == 6) {
+ // pic_size_in_map_units_minus1: ue(v)
+ uint32_t pic_size_in_map_units_minus1;
+ RETURN_FALSE_ON_FAIL(
+ parser.ReadExponentialGolomb(&pic_size_in_map_units_minus1));
+ uint32_t slice_group_id_bits = 0;
+ uint32_t num_slice_groups = num_slice_groups_minus1 + 1;
+ // If num_slice_groups is not a power of two an additional bit is required
+ // to account for the ceil() of log2() below.
+ if ((num_slice_groups & (num_slice_groups - 1)) != 0)
+ ++slice_group_id_bits;
+ while (num_slice_groups > 0) {
+ num_slice_groups >>= 1;
+ ++slice_group_id_bits;
+ }
+ for (uint32_t i = 0; i <= pic_size_in_map_units_minus1; i++) {
+ // slice_group_id[i]: u(v)
+ // Represented by ceil(log2(num_slice_groups_minus1 + 1)) bits.
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&bits_tmp, slice_group_id_bits));
+ }
+ }
+ }
+ // num_ref_idx_l0_default_active_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // num_ref_idx_l1_default_active_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // weighted_pred_flag: u(1)
+ uint32_t weighted_pred_flag;
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&weighted_pred_flag, 1));
+ pps_.weighted_pred_flag = weighted_pred_flag != 0;
+ // weighted_bipred_idc: u(2)
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&pps_.weighted_bipred_idc, 2));
+
+ // pic_init_qp_minus26: se(v)
+ RETURN_FALSE_ON_FAIL(
+ parser.ReadSignedExponentialGolomb(&pps_.pic_init_qp_minus26));
+ // pic_init_qs_minus26: se(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // chroma_qp_index_offset: se(v)
+ RETURN_FALSE_ON_FAIL(parser.ReadExponentialGolomb(&golomb_ignored));
+ // deblocking_filter_control_present_flag: u(1)
+ // constrained_intra_pred_flag: u(1)
+ RETURN_FALSE_ON_FAIL(parser.ReadBits(&bits_tmp, 2));
+ // redundant_pic_cnt_present_flag: u(1)
+ RETURN_FALSE_ON_FAIL(
+ parser.ReadBits(&pps_.redundant_pic_cnt_present_flag, 1));
+
+ pps_parsed_ = true;
+ return true;
+}
+
+bool H264BitstreamParser::ParseNonParameterSetNalu(const uint8_t* source,
+ size_t source_length,
+ uint8_t nalu_type) {
+ RTC_CHECK(sps_parsed_);
+ RTC_CHECK(pps_parsed_);
+ last_slice_qp_delta_parsed_ = false;
+ rtc::scoped_ptr<rtc::ByteBuffer> slice_rbsp(ParseRbsp(
+ source + kNaluHeaderAndTypeSize, source_length - kNaluHeaderAndTypeSize));
+ rtc::BitBuffer slice_reader(
+ reinterpret_cast<const uint8*>(slice_rbsp->Data()), slice_rbsp->Length());
+ // Check to see if this is an IDR slice, which has an extra field to parse
+ // out.
+ bool is_idr = (source[kNaluHeaderSize] & 0x0F) == kNaluIdr;
+ uint8_t nal_ref_idc = (source[kNaluHeaderSize] & 0x60) >> 5;
+ uint32_t golomb_tmp;
+ uint32_t bits_tmp;
+
+ // first_mb_in_slice: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ // slice_type: ue(v)
+ uint32_t slice_type;
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&slice_type));
+ // slice_type's 5..9 range is used to indicate that all slices of a picture
+ // have the same value of slice_type % 5, we don't care about that, so we map
+ // to the corresponding 0..4 range.
+ slice_type %= 5;
+ // pic_parameter_set_id: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ if (sps_.separate_colour_plane_flag == 1) {
+ // colour_plane_id
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 2));
+ }
+ // frame_num: u(v)
+ // Represented by log2_max_frame_num_minus4 + 4 bits.
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadBits(&bits_tmp, sps_.log2_max_frame_num_minus4 + 4));
+ uint32 field_pic_flag = 0;
+ if (sps_.frame_mbs_only_flag == 0) {
+ // field_pic_flag: u(1)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(&field_pic_flag, 1));
+ if (field_pic_flag != 0) {
+ // bottom_field_flag: u(1)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 1));
+ }
+ }
+ if (is_idr) {
+ // idr_pic_id: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ // pic_order_cnt_lsb: u(v)
+ // Represented by sps_.log2_max_pic_order_cnt_lsb_minus4 + 4 bits.
+ if (sps_.pic_order_cnt_type == 0) {
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(
+ &bits_tmp, sps_.log2_max_pic_order_cnt_lsb_minus4 + 4));
+ if (pps_.bottom_field_pic_order_in_frame_present_flag &&
+ field_pic_flag == 0) {
+ // delta_pic_order_cnt_bottom: se(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ }
+ if (sps_.pic_order_cnt_type == 1 && !sps_.delta_pic_order_always_zero_flag) {
+ // delta_pic_order_cnt[0]: se(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ if (pps_.bottom_field_pic_order_in_frame_present_flag && !field_pic_flag) {
+ // delta_pic_order_cnt[1]: se(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ }
+ if (pps_.redundant_pic_cnt_present_flag) {
+ // redundant_pic_cnt: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ if (slice_type == kSliceTypeB) {
+ // direct_spatial_mv_pred_flag: u(1)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 1));
+ }
+ if (slice_type == kSliceTypeP || slice_type == kSliceTypeSp ||
+ slice_type == kSliceTypeB) {
+ uint32_t num_ref_idx_active_override_flag;
+ // num_ref_idx_active_override_flag: u(1)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadBits(&num_ref_idx_active_override_flag, 1));
+ if (num_ref_idx_active_override_flag != 0) {
+ // num_ref_idx_l0_active_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ if (slice_type == kSliceTypeB) {
+ // num_ref_idx_l1_active_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ }
+ }
+ // assume nal_unit_type != 20 && nal_unit_type != 21:
+ RTC_CHECK_NE(nalu_type, 20);
+ RTC_CHECK_NE(nalu_type, 21);
+ // if (nal_unit_type == 20 || nal_unit_type == 21)
+ // ref_pic_list_mvc_modification()
+ // else
+ {
+ // ref_pic_list_modification():
+ // |slice_type| checks here don't use named constants as they aren't named
+ // in the spec for this segment. Keeping them consistent makes it easier to
+ // verify that they are both the same.
+ if (slice_type % 5 != 2 && slice_type % 5 != 4) {
+ // ref_pic_list_modification_flag_l0: u(1)
+ uint32_t ref_pic_list_modification_flag_l0;
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadBits(&ref_pic_list_modification_flag_l0, 1));
+ if (ref_pic_list_modification_flag_l0) {
+ uint32_t modification_of_pic_nums_idc;
+ do {
+ // modification_of_pic_nums_idc: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(
+ &modification_of_pic_nums_idc));
+ if (modification_of_pic_nums_idc == 0 ||
+ modification_of_pic_nums_idc == 1) {
+ // abs_diff_pic_num_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ } else if (modification_of_pic_nums_idc == 2) {
+ // long_term_pic_num: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ } while (modification_of_pic_nums_idc != 3);
+ }
+ }
+ if (slice_type % 5 == 1) {
+ // ref_pic_list_modification_flag_l1: u(1)
+ uint32_t ref_pic_list_modification_flag_l1;
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadBits(&ref_pic_list_modification_flag_l1, 1));
+ if (ref_pic_list_modification_flag_l1) {
+ uint32_t modification_of_pic_nums_idc;
+ do {
+ // modification_of_pic_nums_idc: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(
+ &modification_of_pic_nums_idc));
+ if (modification_of_pic_nums_idc == 0 ||
+ modification_of_pic_nums_idc == 1) {
+ // abs_diff_pic_num_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ } else if (modification_of_pic_nums_idc == 2) {
+ // long_term_pic_num: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ } while (modification_of_pic_nums_idc != 3);
+ }
+ }
+ }
+ // TODO(pbos): Do we need support for pred_weight_table()?
+ RTC_CHECK(!((pps_.weighted_pred_flag &&
+ (slice_type == kSliceTypeP || slice_type == kSliceTypeSp)) ||
+ (pps_.weighted_bipred_idc != 0 && slice_type == kSliceTypeB)))
+ << "Missing support for pred_weight_table().";
+ // if ((weighted_pred_flag && (slice_type == P || slice_type == SP)) ||
+ // (weighted_bipred_idc == 1 && slice_type == B)) {
+ // pred_weight_table()
+ // }
+ if (nal_ref_idc != 0) {
+ // dec_ref_pic_marking():
+ if (is_idr) {
+ // no_output_of_prior_pics_flag: u(1)
+ // long_term_reference_flag: u(1)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 2));
+ } else {
+ // adaptive_ref_pic_marking_mode_flag: u(1)
+ uint32_t adaptive_ref_pic_marking_mode_flag;
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadBits(&adaptive_ref_pic_marking_mode_flag, 1));
+ if (adaptive_ref_pic_marking_mode_flag) {
+ uint32_t memory_management_control_operation;
+ do {
+ // memory_management_control_operation: ue(v)
+ RETURN_FALSE_ON_FAIL(slice_reader.ReadExponentialGolomb(
+ &memory_management_control_operation));
+ if (memory_management_control_operation == 1 ||
+ memory_management_control_operation == 3) {
+ // difference_of_pic_nums_minus1: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ if (memory_management_control_operation == 2) {
+ // long_term_pic_num: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ if (memory_management_control_operation == 3 ||
+ memory_management_control_operation == 6) {
+ // long_term_frame_idx: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ if (memory_management_control_operation == 4) {
+ // max_long_term_frame_idx_plus1: ue(v)
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadExponentialGolomb(&golomb_tmp));
+ }
+ } while (memory_management_control_operation != 0);
+ }
+ }
+ }
+ // cabac not supported: entropy_coding_mode_flag == 0 asserted above.
+ // if (entropy_coding_mode_flag && slice_type != I && slice_type != SI)
+ // cabac_init_idc
+ RETURN_FALSE_ON_FAIL(
+ slice_reader.ReadSignedExponentialGolomb(&last_slice_qp_delta_));
+ last_slice_qp_delta_parsed_ = true;
+ return true;
+}
+
+void H264BitstreamParser::ParseSlice(const uint8_t* slice, size_t length) {
+ uint8_t nalu_type = slice[4] & kNaluTypeMask;
+ switch (nalu_type) {
+ case kNaluSps:
+ RTC_CHECK(ParseSpsNalu(slice, length))
+ << "Failed to parse bitstream SPS.";
+ break;
+ case kNaluPps:
+ RTC_CHECK(ParsePpsNalu(slice, length))
+ << "Failed to parse bitstream PPS.";
+ break;
+ default:
+ RTC_CHECK(ParseNonParameterSetNalu(slice, length, nalu_type))
+ << "Failed to parse picture slice.";
+ break;
+ }
+}
+
+void H264BitstreamParser::ParseBitstream(const uint8_t* bitstream,
+ size_t length) {
+ RTC_CHECK_GE(length, 4u);
+ std::vector<size_t> slice_markers = FindNaluStartSequences(bitstream, length);
+ RTC_CHECK(!slice_markers.empty());
+ for (size_t i = 0; i < slice_markers.size() - 1; ++i) {
+ ParseSlice(bitstream + slice_markers[i],
+ slice_markers[i + 1] - slice_markers[i]);
+ }
+ // Parse the last slice.
+ ParseSlice(bitstream + slice_markers.back(), length - slice_markers.back());
+}
+
+bool H264BitstreamParser::GetLastSliceQp(int* qp) const {
+ if (!last_slice_qp_delta_parsed_)
+ return false;
+ *qp = 26 + pps_.pic_init_qp_minus26 + last_slice_qp_delta_;
+ return true;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h
new file mode 100644
index 00000000000..53ef2a61f4d
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_H264_BITSTREAM_PARSER_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_H264_BITSTREAM_PARSER_H_
+
+#include <stdint.h>
+#include <stddef.h>
+
+namespace rtc {
+class BitBuffer;
+}
+
+namespace webrtc {
+
+// Stateful H264 bitstream parser (due to SPS/PPS). Used to parse out QP values
+// from the bitstream.
+// TODO(pbos): Unify with RTP SPS parsing and only use one H264 parser.
+// TODO(pbos): If/when this gets used on the receiver side CHECKs must be
+// removed and gracefully abort as we have no control over receive-side
+// bitstreams.
+class H264BitstreamParser {
+ public:
+ // Parse an additional chunk of H264 bitstream.
+ void ParseBitstream(const uint8_t* bitstream, size_t length);
+
+ // Get the last extracted QP value from the parsed bitstream.
+ bool GetLastSliceQp(int* qp) const;
+
+ private:
+ // Captured in SPS and used when parsing slice NALUs.
+ struct SpsState {
+ SpsState();
+
+ uint32_t delta_pic_order_always_zero_flag = 0;
+ uint32_t separate_colour_plane_flag = 0;
+ uint32_t frame_mbs_only_flag = 0;
+ uint32_t log2_max_frame_num_minus4 = 0;
+ uint32_t log2_max_pic_order_cnt_lsb_minus4 = 0;
+ uint32_t pic_order_cnt_type = 0;
+ };
+
+ struct PpsState {
+ PpsState();
+
+ bool bottom_field_pic_order_in_frame_present_flag = false;
+ bool weighted_pred_flag = false;
+ uint32_t weighted_bipred_idc = false;
+ uint32_t redundant_pic_cnt_present_flag = 0;
+ int pic_init_qp_minus26 = 0;
+ };
+
+ void ParseSlice(const uint8_t* slice, size_t length);
+ bool ParseSpsNalu(const uint8_t* sps_nalu, size_t length);
+ bool ParsePpsNalu(const uint8_t* pps_nalu, size_t length);
+ bool ParseNonParameterSetNalu(const uint8_t* source,
+ size_t source_length,
+ uint8_t nalu_type);
+
+ // SPS/PPS state, updated when parsing new SPS/PPS, used to parse slices.
+ bool sps_parsed_ = false;
+ SpsState sps_;
+ bool pps_parsed_ = false;
+ PpsState pps_;
+
+ // Last parsed slice QP.
+ bool last_slice_qp_delta_parsed_ = false;
+ int32_t last_slice_qp_delta_ = 0;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_H264_BITSTREAM_PARSER_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser_unittest.cc
new file mode 100644
index 00000000000..6c726c3120f
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_bitstream_parser_unittest.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/h264_bitstream_parser.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+
+// SPS/PPS part of below chunk.
+uint8_t kH264SpsPps[] = {0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x80, 0x20, 0xda,
+ 0x01, 0x40, 0x16, 0xe8, 0x06, 0xd0, 0xa1, 0x35, 0x00,
+ 0x00, 0x00, 0x01, 0x68, 0xce, 0x06, 0xe2};
+
+// Contains enough of the image slice to contain slice QP.
+uint8_t kH264BitstreamChunk[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0x80, 0x20, 0xda, 0x01, 0x40, 0x16,
+ 0xe8, 0x06, 0xd0, 0xa1, 0x35, 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x06,
+ 0xe2, 0x00, 0x00, 0x00, 0x01, 0x65, 0xb8, 0x40, 0xf0, 0x8c, 0x03, 0xf2,
+ 0x75, 0x67, 0xad, 0x41, 0x64, 0x24, 0x0e, 0xa0, 0xb2, 0x12, 0x1e, 0xf8,
+};
+
+// Contains enough of the image slice to contain slice QP.
+uint8_t kH264BitstreamNextImageSliceChunk[] = {
+ 0x00, 0x00, 0x00, 0x01, 0x41, 0xe2, 0x01, 0x16, 0x0e, 0x3e, 0x2b, 0x86,
+};
+
+TEST(H264BitstreamParserTest, ReportsNoQpWithoutParsedSlices) {
+ H264BitstreamParser h264_parser;
+ int qp;
+ EXPECT_FALSE(h264_parser.GetLastSliceQp(&qp));
+}
+
+TEST(H264BitstreamParserTest, ReportsNoQpWithOnlyParsedPpsAndSpsSlices) {
+ H264BitstreamParser h264_parser;
+ h264_parser.ParseBitstream(kH264SpsPps, sizeof(kH264SpsPps));
+ int qp;
+ EXPECT_FALSE(h264_parser.GetLastSliceQp(&qp));
+}
+
+TEST(H264BitstreamParserTest, ReportsLastSliceQpForImageSlices) {
+ H264BitstreamParser h264_parser;
+ h264_parser.ParseBitstream(kH264BitstreamChunk, sizeof(kH264BitstreamChunk));
+ int qp;
+ ASSERT_TRUE(h264_parser.GetLastSliceQp(&qp));
+ EXPECT_EQ(35, qp);
+
+ // Parse an additional image slice.
+ h264_parser.ParseBitstream(kH264BitstreamNextImageSliceChunk,
+ sizeof(kH264BitstreamNextImageSliceChunk));
+ ASSERT_TRUE(h264_parser.GetLastSliceQp(&qp));
+ EXPECT_EQ(37, qp);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_sps_parser.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_sps_parser.cc
index 034e761dcd4..d8f9afdd048 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_sps_parser.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/h264_sps_parser.cc
@@ -36,7 +36,11 @@ bool H264SpsParser::Parse() {
// section 7.3.1 of the H.264 standard.
rtc::ByteBuffer rbsp_buffer;
for (size_t i = 0; i < byte_length_;) {
- if (i + 3 < byte_length_ && sps_[i] == 0 && sps_[i + 1] == 0 &&
+ // Be careful about over/underflow here. byte_length_ - 3 can underflow, and
+ // i + 3 can overflow, but byte_length_ - i can't, because i < byte_length_
+ // above, and that expression will produce the number of bytes left in
+ // the stream including the byte at i.
+ if (byte_length_ - i >= 3 && sps_[i] == 0 && sps_[i + 1] == 0 &&
sps_[i + 2] == 3) {
// Two rbsp bytes + the emulation byte.
rbsp_buffer.WriteBytes(sps_bytes + i, 2);
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
index 0dc33227711..483ee133d5b 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/nack_rtx_unittest.cc
@@ -22,11 +22,11 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/transport.h"
using namespace webrtc;
const int kVideoNackListSize = 30;
-const int kTestId = 123;
const uint32_t kTestSsrc = 3456;
const uint16_t kTestSequenceNumber = 2345;
const uint32_t kTestNumberOfPackets = 1350;
@@ -57,7 +57,7 @@ class TestRtpFeedback : public NullRtpFeedback {
TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
virtual ~TestRtpFeedback() {}
- void OnIncomingSSRCChanged(const int32_t id, const uint32_t ssrc) override {
+ void OnIncomingSSRCChanged(const uint32_t ssrc) override {
rtp_rtcp_->SetRemoteSSRC(ssrc);
}
@@ -96,22 +96,14 @@ class RtxLoopBackTransport : public webrtc::Transport {
packet_loss_ = 0;
}
- int SendPacket(int channel, const void* data, size_t len) override {
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
count_++;
const unsigned char* ptr = static_cast<const unsigned char*>(data);
uint32_t ssrc = (ptr[8] << 24) + (ptr[9] << 16) + (ptr[10] << 8) + ptr[11];
if (ssrc == rtx_ssrc_) count_rtx_ssrc_++;
uint16_t sequence_number = (ptr[2] << 8) + ptr[3];
- expected_sequence_numbers_.insert(expected_sequence_numbers_.end(),
- sequence_number);
- if (packet_loss_ > 0) {
- if ((count_ % packet_loss_) == 0) {
- return static_cast<int>(len);
- }
- } else if (count_ >= consecutive_drop_start_ &&
- count_ < consecutive_drop_end_) {
- return static_cast<int>(len);
- }
size_t packet_length = len;
// TODO(pbos): Figure out why this needs to be initialized. Likely this
// is hiding a bug either in test setup or other code.
@@ -121,7 +113,22 @@ class RtxLoopBackTransport : public webrtc::Transport {
RTPHeader header;
rtc::scoped_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
if (!parser->Parse(ptr, len, &header)) {
- return -1;
+ return false;
+ }
+
+ if (!rtp_payload_registry_->IsRtx(header)) {
+ // Don't store retransmitted packets since we compare it to the list
+ // created by the receiver.
+ expected_sequence_numbers_.insert(expected_sequence_numbers_.end(),
+ sequence_number);
+ }
+ if (packet_loss_ > 0) {
+ if ((count_ % packet_loss_) == 0) {
+ return true;
+ }
+ } else if (count_ >= consecutive_drop_start_ &&
+ count_ < consecutive_drop_end_) {
+ return true;
}
if (rtp_payload_registry_->IsRtx(header)) {
// Remove the RTX header and parse the original RTP header.
@@ -129,7 +136,7 @@ class RtxLoopBackTransport : public webrtc::Transport {
&restored_packet_ptr, ptr, &packet_length, rtp_receiver_->SSRC(),
header));
if (!parser->Parse(restored_packet_ptr, packet_length, &header)) {
- return -1;
+ return false;
}
} else {
rtp_payload_registry_->SetIncomingPayloadType(header);
@@ -140,21 +147,18 @@ class RtxLoopBackTransport : public webrtc::Transport {
PayloadUnion payload_specific;
if (!rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
&payload_specific)) {
- return -1;
+ return false;
}
if (!rtp_receiver_->IncomingRtpPacket(header, restored_packet_ptr,
packet_length, payload_specific,
true)) {
- return -1;
+ return false;
}
- return static_cast<int>(len);
+ return true;
}
- int SendRTCPPacket(int channel, const void* data, size_t len) override {
- if (module_->IncomingRtcpPacket((const uint8_t*)data, len) == 0) {
- return static_cast<int>(len);
- }
- return -1;
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ return module_->IncomingRtcpPacket((const uint8_t*)data, len) == 0;
}
int count_;
int packet_loss_;
@@ -181,7 +185,6 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
void SetUp() override {
RtpRtcp::Configuration configuration;
- configuration.id = kTestId;
configuration.audio = false;
configuration.clock = &fake_clock;
receive_statistics_.reset(ReceiveStatistics::Create(&fake_clock));
@@ -192,11 +195,11 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
rtp_feedback_.reset(new TestRtpFeedback(rtp_rtcp_module_));
rtp_receiver_.reset(RtpReceiver::CreateVideoReceiver(
- kTestId, &fake_clock, &receiver_, rtp_feedback_.get(),
+ &fake_clock, &receiver_, rtp_feedback_.get(),
&rtp_payload_registry_));
rtp_rtcp_module_->SetSSRC(kTestSsrc);
- rtp_rtcp_module_->SetRTCPStatus(kRtcpCompound);
+ rtp_rtcp_module_->SetRTCPStatus(RtcpMode::kCompound);
rtp_receiver_->SetNACKStatus(kNackRtcp);
rtp_rtcp_module_->SetStorePacketsStatus(true, 600);
EXPECT_EQ(0, rtp_rtcp_module_->SetSendingStatus(true));
@@ -257,7 +260,9 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
receiver_.sequence_numbers_.end(),
std::back_inserter(received_sorted));
received_sorted.sort();
- return std::equal(received_sorted.begin(), received_sorted.end(),
+ return received_sorted.size() ==
+ transport_.expected_sequence_numbers_.size() &&
+ std::equal(received_sorted.begin(), received_sorted.end(),
transport_.expected_sequence_numbers_.begin());
}
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
new file mode 100644
index 00000000000..1def671f200
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.cc
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
+
+#include <vector>
+
+#include "webrtc/base/checks.h"
+
+// After this many packets are added, adding additional packets will cause the
+// oldest packets to be pruned from the buffer.
+static const int kBufferSize = 100;
+
+namespace webrtc {
+
+PacketLossStats::PacketLossStats()
+ : single_loss_historic_count_(0),
+ multiple_loss_historic_event_count_(0),
+ multiple_loss_historic_packet_count_(0) {
+}
+
+void PacketLossStats::AddLostPacket(uint16_t sequence_number) {
+ // Detect sequence number wrap around.
+ if (!lost_packets_buffer_.empty() &&
+ static_cast<int>(*(lost_packets_buffer_.rbegin())) - sequence_number
+ > 0x8000) {
+ // The buffer contains large numbers and this is a small number.
+ lost_packets_wrapped_buffer_.insert(sequence_number);
+ } else {
+ lost_packets_buffer_.insert(sequence_number);
+ }
+ if (lost_packets_wrapped_buffer_.size() + lost_packets_buffer_.size()
+ > kBufferSize || (!lost_packets_wrapped_buffer_.empty() &&
+ *(lost_packets_wrapped_buffer_.rbegin()) > 0x4000)) {
+ PruneBuffer();
+ }
+}
+
+int PacketLossStats::GetSingleLossCount() const {
+ int single_loss_count, unused1, unused2;
+ ComputeLossCounts(&single_loss_count, &unused1, &unused2);
+ return single_loss_count;
+}
+
+int PacketLossStats::GetMultipleLossEventCount() const {
+ int event_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &event_count, &unused2);
+ return event_count;
+}
+
+int PacketLossStats::GetMultipleLossPacketCount() const {
+ int packet_count, unused1, unused2;
+ ComputeLossCounts(&unused1, &unused2, &packet_count);
+ return packet_count;
+}
+
+void PacketLossStats::ComputeLossCounts(
+ int* out_single_loss_count,
+ int* out_multiple_loss_event_count,
+ int* out_multiple_loss_packet_count) const {
+ *out_single_loss_count = single_loss_historic_count_;
+ *out_multiple_loss_event_count = multiple_loss_historic_event_count_;
+ *out_multiple_loss_packet_count = multiple_loss_historic_packet_count_;
+ if (lost_packets_buffer_.empty()) {
+ RTC_DCHECK(lost_packets_wrapped_buffer_.empty());
+ return;
+ }
+ uint16_t last_num = 0;
+ int sequential_count = 0;
+ std::vector<const std::set<uint16_t>*> buffers;
+ buffers.push_back(&lost_packets_buffer_);
+ buffers.push_back(&lost_packets_wrapped_buffer_);
+ for (auto buffer : buffers) {
+ for (auto it = buffer->begin(); it != buffer->end(); ++it) {
+ uint16_t current_num = *it;
+ if (sequential_count > 0 && current_num != ((last_num + 1) & 0xFFFF)) {
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
+ sequential_count = 0;
+ }
+ sequential_count++;
+ last_num = current_num;
+ }
+ }
+ if (sequential_count == 1) {
+ (*out_single_loss_count)++;
+ } else if (sequential_count > 1) {
+ (*out_multiple_loss_event_count)++;
+ *out_multiple_loss_packet_count += sequential_count;
+ }
+}
+
+void PacketLossStats::PruneBuffer() {
+ // Remove the oldest lost packet and any contiguous packets and move them
+ // into the historic counts.
+ auto it = lost_packets_buffer_.begin();
+ uint16_t last_removed = 0;
+ int remove_count = 0;
+ // Count adjacent packets and continue counting if it is wrap around by
+ // swapping in the wrapped buffer and letting our value wrap as well.
+ while (remove_count == 0 || (!lost_packets_buffer_.empty() &&
+ *it == ((last_removed + 1) & 0xFFFF))) {
+ last_removed = *it;
+ remove_count++;
+ auto to_erase = it++;
+ lost_packets_buffer_.erase(to_erase);
+ if (lost_packets_buffer_.empty()) {
+ lost_packets_buffer_.swap(lost_packets_wrapped_buffer_);
+ it = lost_packets_buffer_.begin();
+ }
+ }
+ if (remove_count > 1) {
+ multiple_loss_historic_event_count_++;
+ multiple_loss_historic_packet_count_ += remove_count;
+ } else {
+ single_loss_historic_count_++;
+ }
+ // Continue pruning if the wrapped buffer is beyond a threshold and there are
+ // things left in the pre-wrapped buffer.
+ if (!lost_packets_wrapped_buffer_.empty() &&
+ *(lost_packets_wrapped_buffer_.rbegin()) > 0x4000) {
+ PruneBuffer();
+ }
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.h
new file mode 100644
index 00000000000..2eab043c0d0
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
+
+#include <stdint.h>
+#include <set>
+
+namespace webrtc {
+
+// Keeps track of statistics of packet loss including whether losses are a
+// single packet or multiple packets in a row.
+class PacketLossStats {
+ public:
+ PacketLossStats();
+ ~PacketLossStats() {}
+
+ // Adds a lost packet to the stats by sequence number.
+ void AddLostPacket(uint16_t sequence_number);
+
+ // Queries the number of packets that were lost by themselves, no neighboring
+ // packets were lost.
+ int GetSingleLossCount() const;
+
+ // Queries the number of times that multiple packets with sequential numbers
+ // were lost. This is the number of events with more than one packet lost,
+ // regardless of the size of the event;
+ int GetMultipleLossEventCount() const;
+
+ // Queries the number of packets lost in multiple packet loss events. Combined
+ // with the event count, this can be used to determine the average event size.
+ int GetMultipleLossPacketCount() const;
+
+ private:
+ std::set<uint16_t> lost_packets_buffer_;
+ std::set<uint16_t> lost_packets_wrapped_buffer_;
+ int single_loss_historic_count_;
+ int multiple_loss_historic_event_count_;
+ int multiple_loss_historic_packet_count_;
+
+ void ComputeLossCounts(int* out_single_loss_count,
+ int* out_multiple_loss_event_count,
+ int* out_multiple_loss_packet_count) const;
+ void PruneBuffer();
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_PACKET_LOSS_STATS_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc
new file mode 100644
index 00000000000..660628242d1
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/packet_loss_stats_unittest.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
+
+namespace webrtc {
+
+class PacketLossStatsTest : public ::testing::Test {
+ protected:
+ PacketLossStats stats_;
+};
+
+// Add a lost packet as every other packet, they should all count as single
+// losses.
+TEST_F(PacketLossStatsTest, EveryOtherPacket) {
+ for (int i = 0; i < 1000; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as every other packet, but such that the sequence numbers
+// will wrap around while they are being added.
+TEST_F(PacketLossStatsTest, EveryOtherPacketWrapped) {
+ for (int i = 65500; i < 66500; i += 2) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as every other packet, but such that the sequence numbers
+// will wrap around close to the very end, such that the buffer contains packets
+// on either side of the wrapping.
+TEST_F(PacketLossStatsTest, EveryOtherPacketWrappedAtEnd) {
+ for (int i = 64600; i < 65600; i += 2) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(0, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets. Each set of
+// three should count as a multiple loss event and three multiple loss packets.
+TEST_F(PacketLossStatsTest, FirstThreeOfEight) {
+ for (int i = 0; i < 1000; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets such that the
+// sequence numbers wrap in the middle of adding them.
+TEST_F(PacketLossStatsTest, FirstThreeOfEightWrapped) {
+ for (int i = 65500; i < 66500; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add a lost packet as the first three of every eight packets such that the
+// sequence numbers wrap near the end of adding them and there are still numbers
+// in the buffer from before the wrapping.
+TEST_F(PacketLossStatsTest, FirstThreeOfEightWrappedAtEnd) {
+ for (int i = 64600; i < 65600; ++i) {
+ if ((i & 7) < 3) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(0, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets. The
+// set of three should be multiple loss and the fifth should be single loss.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEight) {
+ for (int i = 0; i < 1000; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets such
+// that the sequence numbers wrap in the middle of adding them.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEightWrapped) {
+ for (int i = 65500; i < 66500; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets as the first three and the fifth of every eight packets such
+// that the sequence numbers wrap near the end of adding them and there are
+// packets from before the wrapping still in the buffer.
+TEST_F(PacketLossStatsTest, FirstThreeAndFifthOfEightWrappedAtEnd) {
+ for (int i = 64600; i < 65600; ++i) {
+ if ((i & 7) < 3 || (i & 7) == 4) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ }
+ EXPECT_EQ(125, stats_.GetSingleLossCount());
+ EXPECT_EQ(125, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(375, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets such that there is a multiple loss event that continues
+// around the wrapping of sequence numbers.
+TEST_F(PacketLossStatsTest, MultipleLossEventWrapped) {
+ for (int i = 60000; i < 60500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ for (int i = 65530; i < 65540; ++i) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ EXPECT_EQ(250, stats_.GetSingleLossCount());
+ EXPECT_EQ(1, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(10, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets such that there is a multiple loss event that continues
+// around the wrapping of sequence numbers and then is pushed out of the buffer.
+TEST_F(PacketLossStatsTest, MultipleLossEventWrappedPushedOut) {
+ for (int i = 60000; i < 60500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ for (int i = 65530; i < 65540; ++i) {
+ stats_.AddLostPacket(i & 0xFFFF);
+ }
+ for (int i = 1000; i < 1500; i += 2) {
+ stats_.AddLostPacket(i);
+ }
+ EXPECT_EQ(500, stats_.GetSingleLossCount());
+ EXPECT_EQ(1, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(10, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets out of order and ensure that they still get counted
+// correctly as single or multiple loss events.
+TEST_F(PacketLossStatsTest, OutOfOrder) {
+ for (int i = 0; i < 1000; i += 10) {
+ stats_.AddLostPacket(i + 5);
+ stats_.AddLostPacket(i + 7);
+ stats_.AddLostPacket(i + 4);
+ stats_.AddLostPacket(i + 1);
+ stats_.AddLostPacket(i + 2);
+ }
+ EXPECT_EQ(100, stats_.GetSingleLossCount());
+ EXPECT_EQ(200, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(400, stats_.GetMultipleLossPacketCount());
+}
+
+// Add loss packets out of order and ensure that they still get counted
+// correctly as single or multiple loss events, and wrap in the middle of
+// adding.
+TEST_F(PacketLossStatsTest, OutOfOrderWrapped) {
+ for (int i = 65000; i < 66000; i += 10) {
+ stats_.AddLostPacket((i + 5) & 0xFFFF);
+ stats_.AddLostPacket((i + 7) & 0xFFFF);
+ stats_.AddLostPacket((i + 4) & 0xFFFF);
+ stats_.AddLostPacket((i + 1) & 0xFFFF);
+ stats_.AddLostPacket((i + 2) & 0xFFFF);
+ }
+ EXPECT_EQ(100, stats_.GetSingleLossCount());
+ EXPECT_EQ(200, stats_.GetMultipleLossEventCount());
+ EXPECT_EQ(400, stats_.GetMultipleLossPacketCount());
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
index 394fa688491..7a7645fd1bc 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_format_remb_unittest.cc
@@ -18,6 +18,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+#include "webrtc/test/null_transport.h"
#include "webrtc/typedefs.h"
namespace {
@@ -31,14 +32,12 @@ class TestTransport : public Transport {
rtcp_receiver_(rtcp_receiver) {
}
- int SendPacket(int /*channel*/,
- const void* /*data*/,
- size_t /*len*/) override {
- return -1;
+ bool SendRtp(const uint8_t* /*data*/,
+ size_t /*len*/,
+ const PacketOptions& options) override {
+ return false;
}
- int SendRTCPPacket(int /*channel*/,
- const void* packet,
- size_t packetLength) override {
+ bool SendRtcp(const uint8_t* packet, size_t packetLength) override {
RTCPUtility::RTCPParserV2 rtcpParser((uint8_t*)packet,
packetLength,
true); // Allow non-compound RTCP
@@ -52,7 +51,7 @@ class TestTransport : public Transport {
rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpRemb);
EXPECT_EQ((uint32_t)1234,
rtcpPacketInformation.receiverEstimatedMaxBitrate);
- return static_cast<int>(packetLength);
+ return true;
}
private:
RTCPReceiver* rtcp_receiver_;
@@ -61,17 +60,18 @@ class TestTransport : public Transport {
class RtcpFormatRembTest : public ::testing::Test {
protected:
- static const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
-
RtcpFormatRembTest()
: over_use_detector_options_(),
system_clock_(Clock::GetRealTimeClock()),
+ dummy_rtp_rtcp_impl_(nullptr),
receive_statistics_(ReceiveStatistics::Create(system_clock_)),
+ rtcp_sender_(nullptr),
+ rtcp_receiver_(nullptr),
+ test_transport_(nullptr),
remote_bitrate_observer_(),
- remote_bitrate_estimator_(new RemoteBitrateEstimatorSingleStream(
- &remote_bitrate_observer_,
- system_clock_,
- kRemoteBitrateEstimatorMinBitrateBps)) {}
+ remote_bitrate_estimator_(
+ new RemoteBitrateEstimatorSingleStream(&remote_bitrate_observer_,
+ system_clock_)) {}
void SetUp() override;
void TearDown() override;
@@ -82,24 +82,23 @@ class RtcpFormatRembTest : public ::testing::Test {
RTCPSender* rtcp_sender_;
RTCPReceiver* rtcp_receiver_;
TestTransport* test_transport_;
+ test::NullTransport null_transport_;
MockRemoteBitrateObserver remote_bitrate_observer_;
rtc::scoped_ptr<RemoteBitrateEstimator> remote_bitrate_estimator_;
};
void RtcpFormatRembTest::SetUp() {
RtpRtcp::Configuration configuration;
- configuration.id = 0;
configuration.audio = false;
configuration.clock = system_clock_;
configuration.remote_bitrate_estimator = remote_bitrate_estimator_.get();
+ configuration.outgoing_transport = &null_transport_;
dummy_rtp_rtcp_impl_ = new ModuleRtpRtcpImpl(configuration);
- rtcp_sender_ = new RTCPSender(0, false, system_clock_,
- receive_statistics_.get(), NULL);
- rtcp_receiver_ = new RTCPReceiver(0, system_clock_, false, NULL, NULL, NULL,
- dummy_rtp_rtcp_impl_);
+ rtcp_receiver_ = new RTCPReceiver(system_clock_, false, nullptr, nullptr,
+ nullptr, nullptr, dummy_rtp_rtcp_impl_);
test_transport_ = new TestTransport(rtcp_receiver_);
-
- EXPECT_EQ(0, rtcp_sender_->RegisterSendTransport(test_transport_));
+ rtcp_sender_ = new RTCPSender(false, system_clock_, receive_statistics_.get(),
+ nullptr, test_transport_);
}
void RtcpFormatRembTest::TearDown() {
@@ -119,7 +118,7 @@ TEST_F(RtcpFormatRembTest, TestRembStatus) {
TEST_F(RtcpFormatRembTest, TestNonCompund) {
uint32_t SSRC = 456789;
- rtcp_sender_->SetRTCPStatus(kRtcpNonCompound);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
rtcp_sender_->SetREMBData(1234, std::vector<uint32_t>(1, SSRC));
RTCPSender::FeedbackState feedback_state =
dummy_rtp_rtcp_impl_->GetFeedbackState();
@@ -128,7 +127,7 @@ TEST_F(RtcpFormatRembTest, TestNonCompund) {
TEST_F(RtcpFormatRembTest, TestCompund) {
uint32_t SSRCs[2] = {456789, 98765};
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
rtcp_sender_->SetREMBData(1234, std::vector<uint32_t>(SSRCs, SSRCs + 2));
RTCPSender::FeedbackState feedback_state =
dummy_rtp_rtcp_impl_->GetFeedbackState();
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
index 7038532f2e9..d25a754f415 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.cc
@@ -92,34 +92,6 @@ void ComputeMantissaAnd6bitBase2Exponent(uint32_t input_base10,
*mantissa = (input_base10 >> exponent);
}
-size_t BlockToHeaderLength(size_t length_in_bytes) {
- // Length in 32-bit words minus 1.
- assert(length_in_bytes > 0);
- assert(length_in_bytes % 4 == 0);
- return (length_in_bytes / 4) - 1;
-}
-
-// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
-//
-// RTP header format.
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// |V=2|P| RC/FMT | PT | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-void CreateHeader(uint8_t count_or_format, // Depends on packet type.
- uint8_t packet_type,
- size_t length,
- uint8_t* buffer,
- size_t* pos) {
- assert(length <= 0xffff);
- const uint8_t kVersion = 2;
- AssignUWord8(buffer, pos, (kVersion << 6) + count_or_format);
- AssignUWord8(buffer, pos, packet_type);
- AssignUWord16(buffer, pos, length);
-}
-
// Sender report (SR) (RFC 3550).
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -140,10 +112,8 @@ void CreateHeader(uint8_t count_or_format, // Depends on packet type.
// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
void CreateSenderReport(const RTCPPacketSR& sr,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(sr.NumberOfReportBlocks, PT_SR, length, buffer, pos);
AssignUWord32(buffer, pos, sr.SenderSSRC);
AssignUWord32(buffer, pos, sr.NTPMostSignificant);
AssignUWord32(buffer, pos, sr.NTPLeastSignificant);
@@ -162,10 +132,8 @@ void CreateSenderReport(const RTCPPacketSR& sr,
// +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
void CreateReceiverReport(const RTCPPacketRR& rr,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(rr.NumberOfReportBlocks, PT_RR, length, buffer, pos);
AssignUWord32(buffer, pos, rr.SenderSSRC);
}
@@ -219,12 +187,8 @@ void CreateReportBlocks(const std::vector<RTCPPacketReportBlockItem>& blocks,
void CreateIj(const std::vector<uint32_t>& ij_items,
uint8_t* buffer,
size_t* pos) {
- size_t length = ij_items.size();
- CreateHeader(length, PT_IJ, length, buffer, pos);
- for (std::vector<uint32_t>::const_iterator it = ij_items.begin();
- it != ij_items.end(); ++it) {
- AssignUWord32(buffer, pos, *it);
- }
+ for (uint32_t item : ij_items)
+ AssignUWord32(buffer, pos, item);
}
// Source Description (SDES) (RFC 3550).
@@ -254,10 +218,8 @@ void CreateIj(const std::vector<uint32_t>& ij_items,
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
void CreateSdes(const std::vector<Sdes::Chunk>& chunks,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(chunks.size(), PT_SDES, length, buffer, pos);
const uint8_t kSdesItemType = 1;
for (std::vector<Sdes::Chunk>::const_iterator it = chunks.begin();
it != chunks.end(); ++it) {
@@ -286,10 +248,8 @@ void CreateSdes(const std::vector<Sdes::Chunk>& chunks,
void CreateBye(const RTCPPacketBYE& bye,
const std::vector<uint32_t>& csrcs,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(length, PT_BYE, length, buffer, pos);
AssignUWord32(buffer, pos, bye.SenderSSRC);
for (uint32_t csrc : csrcs)
AssignUWord32(buffer, pos, csrc);
@@ -311,10 +271,8 @@ void CreateBye(const RTCPPacketBYE& bye,
void CreateApp(const RTCPPacketAPP& app,
uint32_t ssrc,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(app.SubType, PT_APP, length, buffer, pos);
AssignUWord32(buffer, pos, ssrc);
AssignUWord32(buffer, pos, app.Name);
memcpy(buffer + *pos, app.Data, app.Size);
@@ -343,11 +301,8 @@ void CreateApp(const RTCPPacketAPP& app,
// FCI: no feedback control information.
void CreatePli(const RTCPPacketPSFBPLI& pli,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 1;
- CreateHeader(kFmt, PT_PSFB, length, buffer, pos);
AssignUWord32(buffer, pos, pli.SenderSSRC);
AssignUWord32(buffer, pos, pli.MediaSSRC);
}
@@ -364,11 +319,8 @@ void CreatePli(const RTCPPacketPSFBPLI& pli,
void CreateSli(const RTCPPacketPSFBSLI& sli,
const RTCPPacketPSFBSLIItem& sli_item,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 2;
- CreateHeader(kFmt, PT_PSFB, length, buffer, pos);
AssignUWord32(buffer, pos, sli.SenderSSRC);
AssignUWord32(buffer, pos, sli.MediaSSRC);
@@ -393,11 +345,8 @@ void CreateNack(const RTCPPacketRTPFBNACK& nack,
const std::vector<RTCPPacketRTPFBNACKItem>& nack_fields,
size_t start_index,
size_t end_index,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 1;
- CreateHeader(kFmt, PT_RTPFB, length, buffer, pos);
AssignUWord32(buffer, pos, nack.SenderSSRC);
AssignUWord32(buffer, pos, nack.MediaSSRC);
for (size_t i = start_index; i < end_index; ++i) {
@@ -421,13 +370,10 @@ void CreateNack(const RTCPPacketRTPFBNACK& nack,
void CreateRpsi(const RTCPPacketPSFBRPSI& rpsi,
uint8_t padding_bytes,
- size_t length,
uint8_t* buffer,
size_t* pos) {
// Native bit string should be a multiple of 8 bits.
assert(rpsi.NumberOfValidBits % 8 == 0);
- const uint8_t kFmt = 3;
- CreateHeader(kFmt, PT_PSFB, length, buffer, pos);
AssignUWord32(buffer, pos, rpsi.SenderSSRC);
AssignUWord32(buffer, pos, rpsi.MediaSSRC);
AssignUWord8(buffer, pos, padding_bytes * 8);
@@ -452,11 +398,8 @@ void CreateRpsi(const RTCPPacketPSFBRPSI& rpsi,
void CreateFir(const RTCPPacketPSFBFIR& fir,
const RTCPPacketPSFBFIRItem& fir_item,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 4;
- CreateHeader(kFmt, PT_PSFB, length, buffer, pos);
AssignUWord32(buffer, pos, fir.SenderSSRC);
AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
AssignUWord32(buffer, pos, fir_item.SSRC);
@@ -494,11 +437,8 @@ void CreateTmmbrItem(const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
void CreateTmmbr(const RTCPPacketRTPFBTMMBR& tmmbr,
const RTCPPacketRTPFBTMMBRItem& tmmbr_item,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 3;
- CreateHeader(kFmt, PT_RTPFB, length, buffer, pos);
AssignUWord32(buffer, pos, tmmbr.SenderSSRC);
AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
CreateTmmbrItem(tmmbr_item, buffer, pos);
@@ -518,11 +458,8 @@ void CreateTmmbr(const RTCPPacketRTPFBTMMBR& tmmbr,
void CreateTmmbn(const RTCPPacketRTPFBTMMBN& tmmbn,
const std::vector<RTCPPacketRTPFBTMMBRItem>& tmmbn_items,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- const uint8_t kFmt = 4;
- CreateHeader(kFmt, PT_RTPFB, length, buffer, pos);
AssignUWord32(buffer, pos, tmmbn.SenderSSRC);
AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
for (uint8_t i = 0; i < tmmbn_items.size(); ++i) {
@@ -551,15 +488,12 @@ void CreateTmmbn(const RTCPPacketRTPFBTMMBN& tmmbn,
void CreateRemb(const RTCPPacketPSFBAPP& remb,
const RTCPPacketPSFBREMBItem& remb_item,
- size_t length,
uint8_t* buffer,
size_t* pos) {
uint32_t mantissa = 0;
uint8_t exp = 0;
ComputeMantissaAnd6bitBase2Exponent(remb_item.BitRate, 18, &mantissa, &exp);
- const uint8_t kFmt = 15;
- CreateHeader(kFmt, PT_PSFB, length, buffer, pos);
AssignUWord32(buffer, pos, remb.SenderSSRC);
AssignUWord32(buffer, pos, kUnusedMediaSourceSsrc0);
AssignUWord8(buffer, pos, 'R');
@@ -590,10 +524,8 @@ void CreateRemb(const RTCPPacketPSFBAPP& remb,
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
void CreateXrHeader(const RTCPPacketXR& header,
- size_t length,
uint8_t* buffer,
size_t* pos) {
- CreateHeader(0U, PT_XR, length, buffer, pos);
AssignUWord32(buffer, pos, header.OriginatorSSRC);
}
@@ -738,7 +670,7 @@ rtc::scoped_ptr<RawPacket> RtcpPacket::Build() const {
: called_(false), packet_(packet) {}
virtual ~PacketVerifier() {}
void OnPacketReady(uint8_t* data, size_t length) override {
- CHECK(!called_) << "Fragmentation not supported.";
+ RTC_CHECK(!called_) << "Fragmentation not supported.";
called_ = true;
packet_->SetLength(length);
}
@@ -790,6 +722,35 @@ bool RtcpPacket::OnBufferFull(uint8_t* packet,
return true;
}
+size_t RtcpPacket::HeaderLength() const {
+ size_t length_in_bytes = BlockLength();
+ // Length in 32-bit words minus 1.
+ assert(length_in_bytes > 0);
+ return ((length_in_bytes + 3) / 4) - 1;
+}
+
+// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
+//
+// RTP header format.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| RC/FMT | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+void RtcpPacket::CreateHeader(
+ uint8_t count_or_format, // Depends on packet type.
+ uint8_t packet_type,
+ size_t length,
+ uint8_t* buffer,
+ size_t* pos) {
+ assert(length <= 0xffff);
+ const uint8_t kVersion = 2;
+ AssignUWord8(buffer, pos, (kVersion << 6) + count_or_format);
+ AssignUWord8(buffer, pos, packet_type);
+ AssignUWord16(buffer, pos, length);
+}
+
bool Empty::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -797,6 +758,10 @@ bool Empty::Create(uint8_t* packet,
return true;
}
+size_t Empty::BlockLength() const {
+ return 0;
+}
+
bool SenderReport::Create(uint8_t* packet,
size_t* index,
size_t max_length,
@@ -805,7 +770,8 @@ bool SenderReport::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateSenderReport(sr_, BlockToHeaderLength(BlockLength()), packet, index);
+ CreateHeader(sr_.NumberOfReportBlocks, PT_SR, HeaderLength(), packet, index);
+ CreateSenderReport(sr_, packet, index);
CreateReportBlocks(report_blocks_, packet, index);
return true;
}
@@ -828,7 +794,8 @@ bool ReceiverReport::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateReceiverReport(rr_, BlockToHeaderLength(BlockLength()), packet, index);
+ CreateHeader(rr_.NumberOfReportBlocks, PT_RR, HeaderLength(), packet, index);
+ CreateReceiverReport(rr_, packet, index);
CreateReportBlocks(report_blocks_, packet, index);
return true;
}
@@ -851,6 +818,8 @@ bool Ij::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
+ size_t length = ij_items_.size();
+ CreateHeader(length, PT_IJ, length, packet, index);
CreateIj(ij_items_, packet, index);
return true;
}
@@ -873,7 +842,8 @@ bool Sdes::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateSdes(chunks_, BlockToHeaderLength(BlockLength()), packet, index);
+ CreateHeader(chunks_.size(), PT_SDES, HeaderLength(), packet, index);
+ CreateSdes(chunks_, packet, index);
return true;
}
@@ -914,7 +884,9 @@ bool Bye::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateBye(bye_, csrcs_, BlockToHeaderLength(BlockLength()), packet, index);
+ size_t length = HeaderLength();
+ CreateHeader(length, PT_BYE, length, packet, index);
+ CreateBye(bye_, csrcs_, packet, index);
return true;
}
@@ -935,7 +907,8 @@ bool App::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateApp(app_, ssrc_, BlockToHeaderLength(BlockLength()), packet, index);
+ CreateHeader(app_.SubType, PT_APP, HeaderLength(), packet, index);
+ CreateApp(app_, ssrc_, packet, index);
return true;
}
@@ -947,7 +920,9 @@ bool Pli::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreatePli(pli_, BlockToHeaderLength(BlockLength()), packet, index);
+ const uint8_t kFmt = 1;
+ CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
+ CreatePli(pli_, packet, index);
return true;
}
@@ -959,7 +934,9 @@ bool Sli::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateSli(sli_, sli_item_, BlockToHeaderLength(BlockLength()), packet, index);
+ const uint8_t kFmt = 2;
+ CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
+ CreateSli(sli_, sli_item_, packet, index);
return true;
}
@@ -981,8 +958,11 @@ bool Nack::Create(uint8_t* packet,
std::min((bytes_left_in_buffer - kCommonFbFmtLength) / 4,
nack_fields_.size() - nack_index);
+ const uint8_t kFmt = 1;
+ size_t size_bytes = (num_nack_fields * 4) + kCommonFbFmtLength;
+ size_t header_length = ((size_bytes + 3) / 4) - 1; // As 32bit words - 1
+ CreateHeader(kFmt, PT_RTPFB, header_length, packet, index);
CreateNack(nack_, nack_fields_, nack_index, nack_index + num_nack_fields,
- BlockToHeaderLength((num_nack_fields * 4) + kCommonFbFmtLength),
packet, index);
nack_index += num_nack_fields;
@@ -991,6 +971,10 @@ bool Nack::Create(uint8_t* packet,
return true;
}
+size_t Nack::BlockLength() const {
+ return (nack_fields_.size() * 4) + kCommonFbFmtLength;
+}
+
void Nack::WithList(const uint16_t* nack_list, int length) {
assert(nack_list);
assert(nack_fields_.empty());
@@ -1024,8 +1008,9 @@ bool Rpsi::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateRpsi(rpsi_, padding_bytes_, BlockToHeaderLength(BlockLength()), packet,
- index);
+ const uint8_t kFmt = 3;
+ CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
+ CreateRpsi(rpsi_, padding_bytes_, packet, index);
return true;
}
@@ -1064,7 +1049,9 @@ bool Fir::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateFir(fir_, fir_item_, BlockToHeaderLength(BlockLength()), packet, index);
+ const uint8_t kFmt = 4;
+ CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
+ CreateFir(fir_, fir_item_, packet, index);
return true;
}
@@ -1076,8 +1063,9 @@ bool Remb::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateRemb(remb_, remb_item_, BlockToHeaderLength(BlockLength()), packet,
- index);
+ const uint8_t kFmt = 15;
+ CreateHeader(kFmt, PT_PSFB, HeaderLength(), packet, index);
+ CreateRemb(remb_, remb_item_, packet, index);
return true;
}
@@ -1097,8 +1085,9 @@ bool Tmmbr::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateTmmbr(tmmbr_, tmmbr_item_, BlockToHeaderLength(BlockLength()), packet,
- index);
+ const uint8_t kFmt = 3;
+ CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
+ CreateTmmbr(tmmbr_, tmmbr_item_, packet, index);
return true;
}
@@ -1124,8 +1113,9 @@ bool Tmmbn::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateTmmbn(tmmbn_, tmmbn_items_, BlockToHeaderLength(BlockLength()), packet,
- index);
+ const uint8_t kFmt = 4;
+ CreateHeader(kFmt, PT_RTPFB, HeaderLength(), packet, index);
+ CreateTmmbn(tmmbn_, tmmbn_items_, packet, index);
return true;
}
@@ -1137,7 +1127,8 @@ bool Xr::Create(uint8_t* packet,
if (!OnBufferFull(packet, index, callback))
return false;
}
- CreateXrHeader(xr_header_, BlockToHeaderLength(BlockLength()), packet, index);
+ CreateHeader(0U, PT_XR, HeaderLength(), packet, index);
+ CreateXrHeader(xr_header_, packet, index);
CreateRrtr(rrtr_blocks_, packet, index);
CreateDlrr(dlrr_blocks_, packet, index);
CreateVoipMetric(voip_metric_blocks_, packet, index);
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
index d212497edde..3c34957c361 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet.h
@@ -91,19 +91,30 @@ class RtcpPacket {
size_t max_length,
PacketReadyCallback* callback) const;
+ // Size of this packet in bytes (including headers, excluding nested packets).
+ virtual size_t BlockLength() const = 0;
+
protected:
- RtcpPacket() : kHeaderLength(4) {}
+ RtcpPacket() {}
virtual bool Create(uint8_t* packet,
size_t* index,
size_t max_length,
PacketReadyCallback* callback) const = 0;
+ static void CreateHeader(uint8_t count_or_format,
+ uint8_t packet_type,
+ size_t block_length, // Size in 32bit words - 1.
+ uint8_t* buffer,
+ size_t* pos);
+
bool OnBufferFull(uint8_t* packet,
size_t* index,
RtcpPacket::PacketReadyCallback* callback) const;
- const size_t kHeaderLength;
+ size_t HeaderLength() const;
+
+ static const size_t kHeaderLength = 4;
private:
bool CreateAndAddAppended(uint8_t* packet,
@@ -114,6 +125,8 @@ class RtcpPacket {
std::vector<RtcpPacket*> appended_packets_;
};
+// TODO(sprang): Move RtcpPacket subclasses out to separate files.
+
class Empty : public RtcpPacket {
public:
Empty() : RtcpPacket() {}
@@ -126,8 +139,10 @@ class Empty : public RtcpPacket {
size_t max_length,
RtcpPacket::PacketReadyCallback* callback) const override;
+ size_t BlockLength() const override;
+
private:
- DISALLOW_COPY_AND_ASSIGN(Empty);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Empty);
};
// From RFC 3550, RTP: A Transport Protocol for Real-Time Applications.
@@ -255,7 +270,7 @@ class SenderReport : public RtcpPacket {
RTCPUtility::RTCPPacketSR sr_;
std::vector<RTCPUtility::RTCPPacketReportBlockItem> report_blocks_;
- DISALLOW_COPY_AND_ASSIGN(SenderReport);
+ RTC_DISALLOW_COPY_AND_ASSIGN(SenderReport);
};
//
@@ -301,7 +316,7 @@ class ReceiverReport : public RtcpPacket {
RTCPUtility::RTCPPacketRR rr_;
std::vector<RTCPUtility::RTCPPacketReportBlockItem> report_blocks_;
- DISALLOW_COPY_AND_ASSIGN(ReceiverReport);
+ RTC_DISALLOW_COPY_AND_ASSIGN(ReceiverReport);
};
// Transmission Time Offsets in RTP Streams (RFC 5450).
@@ -346,7 +361,7 @@ class Ij : public RtcpPacket {
std::vector<uint32_t> ij_items_;
- DISALLOW_COPY_AND_ASSIGN(Ij);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Ij);
};
// Source Description (SDES) (RFC 3550).
@@ -402,7 +417,7 @@ class Sdes : public RtcpPacket {
std::vector<Chunk> chunks_;
- DISALLOW_COPY_AND_ASSIGN(Sdes);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Sdes);
};
//
@@ -452,7 +467,7 @@ class Bye : public RtcpPacket {
RTCPUtility::RTCPPacketBYE bye_;
std::vector<uint32_t> csrcs_;
- DISALLOW_COPY_AND_ASSIGN(Bye);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Bye);
};
// Application-Defined packet (APP) (RFC 3550).
@@ -511,7 +526,7 @@ class App : public RtcpPacket {
uint32_t ssrc_;
RTCPUtility::RTCPPacketAPP app_;
- DISALLOW_COPY_AND_ASSIGN(App);
+ RTC_DISALLOW_COPY_AND_ASSIGN(App);
};
// RFC 4585: Feedback format.
@@ -562,7 +577,7 @@ class Pli : public RtcpPacket {
RTCPUtility::RTCPPacketPSFBPLI pli_;
- DISALLOW_COPY_AND_ASSIGN(Pli);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Pli);
};
// Slice loss indication (SLI) (RFC 4585).
@@ -617,7 +632,7 @@ class Sli : public RtcpPacket {
RTCPUtility::RTCPPacketPSFBSLI sli_;
RTCPUtility::RTCPPacketPSFBSLIItem sli_item_;
- DISALLOW_COPY_AND_ASSIGN(Sli);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Sli);
};
// Generic NACK (RFC 4585).
@@ -651,12 +666,14 @@ class Nack : public RtcpPacket {
size_t max_length,
RtcpPacket::PacketReadyCallback* callback) const override;
+ size_t BlockLength() const override;
+
private:
RTCPUtility::RTCPPacketRTPFBNACK nack_;
std::vector<RTCPUtility::RTCPPacketRTPFBNACKItem> nack_fields_;
- DISALLOW_COPY_AND_ASSIGN(Nack);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Nack);
};
// Reference picture selection indication (RPSI) (RFC 4585).
@@ -708,7 +725,7 @@ class Rpsi : public RtcpPacket {
uint8_t padding_bytes_;
RTCPUtility::RTCPPacketPSFBRPSI rpsi_;
- DISALLOW_COPY_AND_ASSIGN(Rpsi);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Rpsi);
};
// Full intra request (FIR) (RFC 5104).
@@ -808,7 +825,7 @@ class Tmmbr : public RtcpPacket {
RTCPUtility::RTCPPacketRTPFBTMMBR tmmbr_;
RTCPUtility::RTCPPacketRTPFBTMMBRItem tmmbr_item_;
- DISALLOW_COPY_AND_ASSIGN(Tmmbr);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbr);
};
// Temporary Maximum Media Stream Bit Rate Notification (TMMBN) (RFC 5104).
@@ -854,7 +871,7 @@ class Tmmbn : public RtcpPacket {
RTCPUtility::RTCPPacketRTPFBTMMBN tmmbn_;
std::vector<RTCPUtility::RTCPPacketRTPFBTMMBRItem> tmmbn_items_;
- DISALLOW_COPY_AND_ASSIGN(Tmmbn);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Tmmbn);
};
// Receiver Estimated Max Bitrate (REMB) (draft-alvestrand-rmcat-remb).
@@ -910,7 +927,7 @@ class Remb : public RtcpPacket {
RTCPUtility::RTCPPacketPSFBAPP remb_;
RTCPUtility::RTCPPacketPSFBREMBItem remb_item_;
- DISALLOW_COPY_AND_ASSIGN(Remb);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Remb);
};
// From RFC 3611: RTP Control Protocol Extended Reports (RTCP XR).
@@ -978,7 +995,7 @@ class Xr : public RtcpPacket {
std::vector<DlrrBlock> dlrr_blocks_;
std::vector<RTCPUtility::RTCPPacketXRVOIPMetricItem> voip_metric_blocks_;
- DISALLOW_COPY_AND_ASSIGN(Xr);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Xr);
};
// Receiver Reference Time Report Block (RFC 3611).
@@ -1011,7 +1028,7 @@ class Rrtr {
friend class Xr;
RTCPUtility::RTCPPacketXRReceiverReferenceTimeItem rrtr_block_;
- DISALLOW_COPY_AND_ASSIGN(Rrtr);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Rrtr);
};
// DLRR Report Block (RFC 3611).
@@ -1045,7 +1062,7 @@ class Dlrr {
std::vector<RTCPUtility::RTCPPacketXRDLRRReportBlockItem> dlrr_block_;
- DISALLOW_COPY_AND_ASSIGN(Dlrr);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Dlrr);
};
// VoIP Metrics Report Block (RFC 3611).
@@ -1115,7 +1132,7 @@ class VoipMetric {
friend class Xr;
RTCPUtility::RTCPPacketXRVOIPMetricItem metric_;
- DISALLOW_COPY_AND_ASSIGN(VoipMetric);
+ RTC_DISALLOW_COPY_AND_ASSIGN(VoipMetric);
};
// Class holding a RTCP packet.
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
new file mode 100644
index 00000000000..4ad49561b80
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.cc
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+
+namespace webrtc {
+namespace rtcp {
+
+// Header size:
+// * 12 bytes Common Packet Format for RTCP Feedback Messages
+// * 8 bytes FeedbackPacket header
+static const uint32_t kHeaderSizeBytes = 12 + 8;
+static const uint32_t kChunkSizeBytes = 2;
+static const uint32_t kOneBitVectorCapacity = 14;
+static const uint32_t kTwoBitVectorCapacity = 7;
+static const uint32_t kRunLengthCapacity = 0x1FFF;
+// TODO(sprang): Add support for dynamic max size for easier fragmentation,
+// eg. set it to what's left in the buffer or IP_PACKET_SIZE.
+// Size constraint imposed by RTCP common header: 16bit size field interpreted
+// as number of four byte words minus the first header word.
+static const uint32_t kMaxSizeBytes = (1 << 16) * 4;
+static const uint32_t kMinSizeBytes = kHeaderSizeBytes + kChunkSizeBytes;
+static const uint32_t kBaseScaleFactor =
+ TransportFeedback::kDeltaScaleFactor * (1 << 8);
+
+class PacketStatusChunk {
+ public:
+ virtual ~PacketStatusChunk() {}
+ virtual uint16_t NumSymbols() const = 0;
+ virtual void AppendSymbolsTo(
+ std::vector<TransportFeedback::StatusSymbol>* vec) const = 0;
+ virtual void WriteTo(uint8_t* buffer) const = 0;
+};
+
+uint8_t EncodeSymbol(TransportFeedback::StatusSymbol symbol) {
+ switch (symbol) {
+ case TransportFeedback::StatusSymbol::kNotReceived:
+ return 0;
+ case TransportFeedback::StatusSymbol::kReceivedSmallDelta:
+ return 1;
+ case TransportFeedback::StatusSymbol::kReceivedLargeDelta:
+ return 2;
+ default:
+ RTC_NOTREACHED();
+ return 0;
+ }
+}
+
+TransportFeedback::StatusSymbol DecodeSymbol(uint8_t value) {
+ switch (value) {
+ case 0:
+ return TransportFeedback::StatusSymbol::kNotReceived;
+ case 1:
+ return TransportFeedback::StatusSymbol::kReceivedSmallDelta;
+ case 2:
+ return TransportFeedback::StatusSymbol::kReceivedLargeDelta;
+ default:
+ RTC_NOTREACHED();
+ return TransportFeedback::StatusSymbol::kNotReceived;
+ }
+}
+
+TransportFeedback::TransportFeedback()
+ : packet_sender_ssrc_(0),
+ media_source_ssrc_(0),
+ base_seq_(-1),
+ base_time_(-1),
+ feedback_seq_(0),
+ last_seq_(-1),
+ last_timestamp_(-1),
+ first_symbol_cardinality_(0),
+ vec_needs_two_bit_symbols_(false),
+ size_bytes_(kHeaderSizeBytes) {
+}
+
+TransportFeedback::~TransportFeedback() {
+ for (PacketStatusChunk* chunk : status_chunks_)
+ delete chunk;
+}
+
+// One Bit Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T|S| symbol list |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 1
+// S = 0
+// symbol list = 14 entries where 0 = not received, 1 = received
+
+class OneBitVectorChunk : public PacketStatusChunk {
+ public:
+ static const int kCapacity = 14;
+
+ explicit OneBitVectorChunk(
+ std::deque<TransportFeedback::StatusSymbol>* symbols) {
+ size_t input_size = symbols->size();
+ for (size_t i = 0; i < kCapacity; ++i) {
+ if (i < input_size) {
+ symbols_[i] = symbols->front();
+ symbols->pop_front();
+ } else {
+ symbols_[i] = TransportFeedback::StatusSymbol::kNotReceived;
+ }
+ }
+ }
+
+ virtual ~OneBitVectorChunk() {}
+
+ uint16_t NumSymbols() const override { return kCapacity; }
+
+ void AppendSymbolsTo(
+ std::vector<TransportFeedback::StatusSymbol>* vec) const override {
+ vec->insert(vec->end(), &symbols_[0], &symbols_[kCapacity]);
+ }
+
+ void WriteTo(uint8_t* buffer) const override {
+ const int kSymbolsInFirstByte = 6;
+ const int kSymbolsInSecondByte = 8;
+ buffer[0] = 0x80u;
+ for (int i = 0; i < kSymbolsInFirstByte; ++i) {
+ uint8_t encoded_symbol = EncodeSymbol(symbols_[i]);
+ RTC_DCHECK_LE(encoded_symbol, 1u);
+ buffer[0] |= encoded_symbol << (kSymbolsInFirstByte - (i + 1));
+ }
+ buffer[1] = 0x00u;
+ for (int i = 0; i < kSymbolsInSecondByte; ++i) {
+ uint8_t encoded_symbol = EncodeSymbol(symbols_[i + kSymbolsInFirstByte]);
+ RTC_DCHECK_LE(encoded_symbol, 1u);
+ buffer[1] |= encoded_symbol << (kSymbolsInSecondByte - (i + 1));
+ }
+ }
+
+ static OneBitVectorChunk* ParseFrom(const uint8_t* data) {
+ OneBitVectorChunk* chunk = new OneBitVectorChunk();
+
+ size_t index = 0;
+ for (int i = 5; i >= 0; --i) // Last 5 bits from first byte.
+ chunk->symbols_[index++] = DecodeSymbol((data[0] >> i) & 0x01);
+ for (int i = 7; i >= 0; --i) // 8 bits from the last byte.
+ chunk->symbols_[index++] = DecodeSymbol((data[1] >> i) & 0x01);
+
+ return chunk;
+ }
+
+ private:
+ OneBitVectorChunk() {}
+
+ TransportFeedback::StatusSymbol symbols_[kCapacity];
+};
+
+// Two Bit Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T|S| symbol list |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 1
+// S = 1
+// symbol list = 7 entries of two bits each, see (Encode|Decode)Symbol
+
+class TwoBitVectorChunk : public PacketStatusChunk {
+ public:
+ static const int kCapacity = 7;
+
+ explicit TwoBitVectorChunk(
+ std::deque<TransportFeedback::StatusSymbol>* symbols) {
+ size_t input_size = symbols->size();
+ for (size_t i = 0; i < kCapacity; ++i) {
+ if (i < input_size) {
+ symbols_[i] = symbols->front();
+ symbols->pop_front();
+ } else {
+ symbols_[i] = TransportFeedback::StatusSymbol::kNotReceived;
+ }
+ }
+ }
+
+ virtual ~TwoBitVectorChunk() {}
+
+ uint16_t NumSymbols() const override { return kCapacity; }
+
+ void AppendSymbolsTo(
+ std::vector<TransportFeedback::StatusSymbol>* vec) const override {
+ vec->insert(vec->end(), &symbols_[0], &symbols_[kCapacity]);
+ }
+
+ void WriteTo(uint8_t* buffer) const override {
+ buffer[0] = 0xC0;
+ buffer[0] |= EncodeSymbol(symbols_[0]) << 4;
+ buffer[0] |= EncodeSymbol(symbols_[1]) << 2;
+ buffer[0] |= EncodeSymbol(symbols_[2]);
+ buffer[1] = EncodeSymbol(symbols_[3]) << 6;
+ buffer[1] |= EncodeSymbol(symbols_[4]) << 4;
+ buffer[1] |= EncodeSymbol(symbols_[5]) << 2;
+ buffer[1] |= EncodeSymbol(symbols_[6]);
+ }
+
+ static TwoBitVectorChunk* ParseFrom(const uint8_t* buffer) {
+ TwoBitVectorChunk* chunk = new TwoBitVectorChunk();
+
+ chunk->symbols_[0] = DecodeSymbol((buffer[0] >> 4) & 0x03);
+ chunk->symbols_[1] = DecodeSymbol((buffer[0] >> 2) & 0x03);
+ chunk->symbols_[2] = DecodeSymbol(buffer[0] & 0x03);
+ chunk->symbols_[3] = DecodeSymbol((buffer[1] >> 6) & 0x03);
+ chunk->symbols_[4] = DecodeSymbol((buffer[1] >> 4) & 0x03);
+ chunk->symbols_[5] = DecodeSymbol((buffer[1] >> 2) & 0x03);
+ chunk->symbols_[6] = DecodeSymbol(buffer[1] & 0x03);
+
+ return chunk;
+ }
+
+ private:
+ TwoBitVectorChunk() {}
+
+ TransportFeedback::StatusSymbol symbols_[kCapacity];
+};
+
+// Two Bit Status Vector Chunk
+//
+// 0 1
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |T| S | Run Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// T = 0
+// S = symbol, see (Encode|Decode)Symbol
+// Run Length = Unsigned integer denoting the run length of the symbol
+
+class RunLengthChunk : public PacketStatusChunk {
+ public:
+ RunLengthChunk(TransportFeedback::StatusSymbol symbol, size_t size)
+ : symbol_(symbol), size_(size) {
+ RTC_DCHECK_LE(size, 0x1FFFu);
+ }
+
+ virtual ~RunLengthChunk() {}
+
+ uint16_t NumSymbols() const override { return size_; }
+
+ void AppendSymbolsTo(
+ std::vector<TransportFeedback::StatusSymbol>* vec) const override {
+ vec->insert(vec->end(), size_, symbol_);
+ }
+
+ void WriteTo(uint8_t* buffer) const override {
+ buffer[0] = EncodeSymbol(symbol_) << 5; // Write S (T = 0 implicitly)
+ buffer[0] |= (size_ >> 8) & 0x1F; // 5 most significant bits of run length.
+ buffer[1] = size_ & 0xFF; // 8 least significant bits of run length.
+ }
+
+ static RunLengthChunk* ParseFrom(const uint8_t* buffer) {
+ RTC_DCHECK_EQ(0, buffer[0] & 0x80);
+ TransportFeedback::StatusSymbol symbol =
+ DecodeSymbol((buffer[0] >> 5) & 0x03);
+ uint16_t count = (static_cast<uint16_t>(buffer[0] & 0x1F) << 8) | buffer[1];
+
+ return new RunLengthChunk(symbol, count);
+ }
+
+ private:
+ const TransportFeedback::StatusSymbol symbol_;
+ const size_t size_;
+};
+
+// Unwrap to a larger type, for easier handling of wraps.
+int64_t TransportFeedback::Unwrap(uint16_t sequence_number) {
+ if (last_seq_ == -1)
+ return sequence_number;
+
+ int64_t delta = sequence_number - last_seq_;
+ if (IsNewerSequenceNumber(sequence_number,
+ static_cast<uint16_t>(last_seq_))) {
+ if (delta < 0)
+ delta += (1 << 16);
+ } else if (delta > 0) {
+ delta -= (1 << 16);
+ }
+
+ return last_seq_ + delta;
+}
+
+void TransportFeedback::WithPacketSenderSsrc(uint32_t ssrc) {
+ packet_sender_ssrc_ = ssrc;
+}
+
+void TransportFeedback::WithMediaSourceSsrc(uint32_t ssrc) {
+ media_source_ssrc_ = ssrc;
+}
+
+uint32_t TransportFeedback::GetPacketSenderSsrc() const {
+ return packet_sender_ssrc_;
+}
+
+uint32_t TransportFeedback::GetMediaSourceSsrc() const {
+ return media_source_ssrc_;
+}
+void TransportFeedback::WithBase(uint16_t base_sequence,
+ int64_t ref_timestamp_us) {
+ RTC_DCHECK_EQ(-1, base_seq_);
+ RTC_DCHECK_NE(-1, ref_timestamp_us);
+ base_seq_ = base_sequence;
+ last_seq_ = base_sequence;
+ base_time_ = ref_timestamp_us / kBaseScaleFactor;
+ last_timestamp_ = base_time_ * kBaseScaleFactor;
+}
+
+void TransportFeedback::WithFeedbackSequenceNumber(uint8_t feedback_sequence) {
+ feedback_seq_ = feedback_sequence;
+}
+
+bool TransportFeedback::WithReceivedPacket(uint16_t sequence_number,
+ int64_t timestamp) {
+ RTC_DCHECK_NE(-1, base_seq_);
+ int64_t seq = Unwrap(sequence_number);
+ if (seq != base_seq_ && seq <= last_seq_)
+ return false;
+
+ // Convert to ticks and round.
+ int64_t delta_full = timestamp - last_timestamp_;
+ delta_full +=
+ delta_full < 0 ? -(kDeltaScaleFactor / 2) : kDeltaScaleFactor / 2;
+ delta_full /= kDeltaScaleFactor;
+
+ int16_t delta = static_cast<int16_t>(delta_full);
+ // If larger than 16bit signed, we can't represent it - need new fb packet.
+ if (delta != delta_full) {
+ LOG(LS_WARNING) << "Delta value too large ( >= 2^16 ticks )";
+ return false;
+ }
+
+ StatusSymbol symbol;
+ if (delta >= 0 && delta <= 0xFF) {
+ symbol = StatusSymbol::kReceivedSmallDelta;
+ } else {
+ symbol = StatusSymbol::kReceivedLargeDelta;
+ }
+
+ if (!AddSymbol(symbol, seq))
+ return false;
+
+ receive_deltas_.push_back(delta);
+ last_timestamp_ += delta * kDeltaScaleFactor;
+ return true;
+}
+
+// Add a symbol for a received packet, with the given sequence number. This
+// method will add any "packet not received" symbols needed before this one.
+bool TransportFeedback::AddSymbol(StatusSymbol symbol, int64_t seq) {
+ while (last_seq_ < seq - 1) {
+ if (!Encode(StatusSymbol::kNotReceived))
+ return false;
+ ++last_seq_;
+ }
+
+ if (!Encode(symbol))
+ return false;
+
+ last_seq_ = seq;
+ return true;
+}
+
+// Append a symbol to the internal symbol vector. If the new state cannot be
+// represented using a single status chunk, a chunk will first be emitted and
+// the associated symbols removed from the internal symbol vector.
+bool TransportFeedback::Encode(StatusSymbol symbol) {
+ if (last_seq_ - base_seq_ + 1 > 0xFFFF) {
+ LOG(LS_WARNING) << "Packet status count too large ( >= 2^16 )";
+ return false;
+ }
+
+ bool is_two_bit;
+ int delta_size;
+ switch (symbol) {
+ case StatusSymbol::kReceivedSmallDelta:
+ delta_size = 1;
+ is_two_bit = false;
+ break;
+ case StatusSymbol::kReceivedLargeDelta:
+ delta_size = 2;
+ is_two_bit = true;
+ break;
+ case StatusSymbol::kNotReceived:
+ is_two_bit = false;
+ delta_size = 0;
+ break;
+ default:
+ RTC_NOTREACHED();
+ return false;
+ }
+
+ if (symbol_vec_.empty()) {
+ if (size_bytes_ + delta_size + kChunkSizeBytes > kMaxSizeBytes)
+ return false;
+
+ symbol_vec_.push_back(symbol);
+ vec_needs_two_bit_symbols_ = is_two_bit;
+ first_symbol_cardinality_ = 1;
+ size_bytes_ += delta_size + kChunkSizeBytes;
+ return true;
+ }
+ if (size_bytes_ + delta_size > kMaxSizeBytes)
+ return false;
+
+ // Capacity, in number of symbols, that a vector chunk could hold.
+ size_t capacity = vec_needs_two_bit_symbols_ ? kTwoBitVectorCapacity
+ : kOneBitVectorCapacity;
+
+ // first_symbol_cardinality_ is the number of times the first symbol in
+ // symbol_vec is repeated. So if that is equal to the size of symbol_vec,
+ // there is only one kind of symbol - we can potentially RLE encode it.
+ // If we have less than (capacity) symbols in symbol_vec, we can't know
+ // for certain this will be RLE-encoded; if a different symbol is added
+ // these symbols will be needed to emit a vector chunk instead. However,
+ // if first_symbol_cardinality_ > capacity, then we cannot encode the
+ // current state as a vector chunk - we must first emit symbol_vec as an
+ // RLE-chunk and then add the new symbol.
+ bool rle_candidate = symbol_vec_.size() == first_symbol_cardinality_ ||
+ first_symbol_cardinality_ > capacity;
+ if (rle_candidate) {
+ if (symbol_vec_.back() == symbol) {
+ ++first_symbol_cardinality_;
+ if (first_symbol_cardinality_ <= capacity) {
+ symbol_vec_.push_back(symbol);
+ } else if (first_symbol_cardinality_ == kRunLengthCapacity) {
+ // Max length for an RLE-chunk reached.
+ EmitRunLengthChunk();
+ }
+ size_bytes_ += delta_size;
+ return true;
+ } else {
+ // New symbol does not match what's already in symbol_vec.
+ if (first_symbol_cardinality_ >= capacity) {
+ // Symbols in symbol_vec can only be RLE-encoded. Emit the RLE-chunk
+ // and re-add input. symbol_vec is then guaranteed to have room for the
+ // symbol, so recursion cannot continue.
+ EmitRunLengthChunk();
+ return Encode(symbol);
+ }
+ // Fall through and treat state as non RLE-candidate.
+ }
+ }
+
+ // If this code point is reached, symbols in symbol_vec cannot be RLE-encoded.
+
+ if (is_two_bit && !vec_needs_two_bit_symbols_) {
+ // If the symbols in symbol_vec can be encoded using a one-bit chunk but
+ // the input symbol cannot, first check if we can simply change target type.
+ vec_needs_two_bit_symbols_ = true;
+ if (symbol_vec_.size() >= kTwoBitVectorCapacity) {
+ // symbol_vec contains more symbols than we can encode in a single
+ // two-bit chunk. Emit a new vector append to the remains, if any.
+ if (size_bytes_ + delta_size + kChunkSizeBytes > kMaxSizeBytes)
+ return false;
+ EmitVectorChunk();
+ // If symbol_vec isn't empty after emitting a vector chunk, we need to
+ // account for chunk size (otherwise handled by Encode method).
+ if (!symbol_vec_.empty())
+ size_bytes_ += kChunkSizeBytes;
+ return Encode(symbol);
+ }
+ // symbol_vec symbols fit within a single two-bit vector chunk.
+ capacity = kTwoBitVectorCapacity;
+ }
+
+ symbol_vec_.push_back(symbol);
+ if (symbol_vec_.size() == capacity)
+ EmitVectorChunk();
+
+ size_bytes_ += delta_size;
+ return true;
+}
+
+// Upon packet completion, emit any remaining symbols in symbol_vec that have
+// not yet been emitted in a status chunk.
+void TransportFeedback::EmitRemaining() {
+ if (symbol_vec_.empty())
+ return;
+
+ size_t capacity = vec_needs_two_bit_symbols_ ? kTwoBitVectorCapacity
+ : kOneBitVectorCapacity;
+ if (first_symbol_cardinality_ > capacity) {
+ EmitRunLengthChunk();
+ } else {
+ EmitVectorChunk();
+ }
+}
+
+void TransportFeedback::EmitVectorChunk() {
+ if (vec_needs_two_bit_symbols_) {
+ status_chunks_.push_back(new TwoBitVectorChunk(&symbol_vec_));
+ } else {
+ status_chunks_.push_back(new OneBitVectorChunk(&symbol_vec_));
+ }
+ // Update first symbol cardinality to match what is potentially left in in
+ // symbol_vec.
+ first_symbol_cardinality_ = 1;
+ for (size_t i = 1; i < symbol_vec_.size(); ++i) {
+ if (symbol_vec_[i] != symbol_vec_[0])
+ break;
+ ++first_symbol_cardinality_;
+ }
+}
+
+void TransportFeedback::EmitRunLengthChunk() {
+ RTC_DCHECK_GE(first_symbol_cardinality_, symbol_vec_.size());
+ status_chunks_.push_back(
+ new RunLengthChunk(symbol_vec_.front(), first_symbol_cardinality_));
+ symbol_vec_.clear();
+}
+
+size_t TransportFeedback::BlockLength() const {
+ return size_bytes_;
+}
+
+uint16_t TransportFeedback::GetBaseSequence() const {
+ return base_seq_;
+}
+
+int64_t TransportFeedback::GetBaseTimeUs() const {
+ return base_time_ * kBaseScaleFactor;
+}
+
+std::vector<TransportFeedback::StatusSymbol>
+TransportFeedback::GetStatusVector() const {
+ std::vector<TransportFeedback::StatusSymbol> symbols;
+ for (PacketStatusChunk* chunk : status_chunks_)
+ chunk->AppendSymbolsTo(&symbols);
+ int64_t status_count = last_seq_ - base_seq_ + 1;
+ // If packet ends with a vector chunk, it may contain extraneous "packet not
+ // received"-symbols at the end. Crop any such symbols.
+ symbols.erase(symbols.begin() + status_count, symbols.end());
+ return symbols;
+}
+
+std::vector<int16_t> TransportFeedback::GetReceiveDeltas() const {
+ return receive_deltas_;
+}
+
+std::vector<int64_t> TransportFeedback::GetReceiveDeltasUs() const {
+ if (receive_deltas_.empty())
+ return std::vector<int64_t>();
+
+ std::vector<int64_t> us_deltas;
+ for (int16_t delta : receive_deltas_)
+ us_deltas.push_back(static_cast<int64_t>(delta) * kDeltaScaleFactor);
+
+ return us_deltas;
+}
+
+// Serialize packet.
+bool TransportFeedback::Create(uint8_t* packet,
+ size_t* position,
+ size_t max_length,
+ PacketReadyCallback* callback) const {
+ if (base_seq_ == -1)
+ return false;
+
+ while (*position + size_bytes_ > max_length) {
+ if (!OnBufferFull(packet, position, callback))
+ return false;
+ }
+
+ CreateHeader(kFeedbackMessageType, kPayloadType, HeaderLength(), packet,
+ position);
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*position], packet_sender_ssrc_);
+ *position += 4;
+ ByteWriter<uint32_t>::WriteBigEndian(&packet[*position], media_source_ssrc_);
+ *position += 4;
+
+ RTC_DCHECK_LE(base_seq_, 0xFFFF);
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], base_seq_);
+ *position += 2;
+
+ int64_t status_count = last_seq_ - base_seq_ + 1;
+ RTC_DCHECK_LE(status_count, 0xFFFF);
+ ByteWriter<uint16_t>::WriteBigEndian(&packet[*position], status_count);
+ *position += 2;
+
+ ByteWriter<int32_t, 3>::WriteBigEndian(&packet[*position],
+ static_cast<int32_t>(base_time_));
+ *position += 3;
+
+ packet[(*position)++] = feedback_seq_;
+
+ // TODO(sprang): Get rid of this cast.
+ const_cast<TransportFeedback*>(this)->EmitRemaining();
+ for (PacketStatusChunk* chunk : status_chunks_) {
+ chunk->WriteTo(&packet[*position]);
+ *position += 2;
+ }
+
+ for (int16_t delta : receive_deltas_) {
+ if (delta >= 0 && delta <= 0xFF) {
+ packet[(*position)++] = delta;
+ } else {
+ ByteWriter<int16_t>::WriteBigEndian(&packet[*position], delta);
+ *position += 2;
+ }
+ }
+
+ while ((*position % 4) != 0)
+ packet[(*position)++] = 0;
+
+ return true;
+}
+
+// Message format
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| FMT=15 | PT=205 | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of packet sender |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | SSRC of media source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | base sequence number | packet status count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | reference time | fb pkt. count |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | packet chunk | packet chunk |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// . .
+// . .
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | packet chunk | recv delta | recv delta |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// . .
+// . .
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | recv delta | recv delta | zero padding |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// De-serialize packet.
+rtc::scoped_ptr<TransportFeedback> TransportFeedback::ParseFrom(
+ const uint8_t* buffer,
+ size_t length) {
+ rtc::scoped_ptr<TransportFeedback> packet(new TransportFeedback());
+
+ if (length < kMinSizeBytes) {
+ LOG(LS_WARNING) << "Buffer too small (" << length
+ << " bytes) to fit a "
+ "FeedbackPacket. Minimum size = " << kMinSizeBytes;
+ return nullptr;
+ }
+
+ RTCPUtility::RtcpCommonHeader header;
+ if (!RtcpParseCommonHeader(buffer, length, &header))
+ return nullptr;
+
+ if (header.count_or_format != kFeedbackMessageType) {
+ LOG(LS_WARNING) << "Invalid RTCP header: FMT must be "
+ << kFeedbackMessageType << " but was "
+ << header.count_or_format;
+ return nullptr;
+ }
+
+ if (header.packet_type != kPayloadType) {
+ LOG(LS_WARNING) << "Invalid RTCP header: PT must be " << kPayloadType
+ << " but was " << header.packet_type;
+ return nullptr;
+ }
+
+ packet->packet_sender_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[4]);
+ packet->media_source_ssrc_ = ByteReader<uint32_t>::ReadBigEndian(&buffer[8]);
+ packet->base_seq_ = ByteReader<uint16_t>::ReadBigEndian(&buffer[12]);
+ uint16_t num_packets = ByteReader<uint16_t>::ReadBigEndian(&buffer[14]);
+ packet->base_time_ = ByteReader<int32_t, 3>::ReadBigEndian(&buffer[16]);
+ packet->feedback_seq_ = buffer[19];
+ size_t index = 20;
+ const size_t end_index = kHeaderLength + header.payload_size_bytes;
+
+ if (num_packets == 0) {
+ LOG(LS_WARNING) << "Empty feedback messages not allowed.";
+ return nullptr;
+ }
+ packet->last_seq_ = packet->base_seq_ + num_packets - 1;
+
+ size_t packets_read = 0;
+ while (packets_read < num_packets) {
+ if (index + 2 > end_index) {
+ LOG(LS_WARNING) << "Buffer overflow while parsing packet.";
+ return nullptr;
+ }
+
+ PacketStatusChunk* chunk =
+ ParseChunk(&buffer[index], num_packets - packets_read);
+ if (chunk == nullptr)
+ return nullptr;
+
+ index += 2;
+ packet->status_chunks_.push_back(chunk);
+ packets_read += chunk->NumSymbols();
+ }
+
+ std::vector<StatusSymbol> symbols = packet->GetStatusVector();
+
+ RTC_DCHECK_EQ(num_packets, symbols.size());
+
+ for (StatusSymbol symbol : symbols) {
+ switch (symbol) {
+ case StatusSymbol::kReceivedSmallDelta:
+ if (index + 1 > end_index) {
+ LOG(LS_WARNING) << "Buffer overflow while parsing packet.";
+ return nullptr;
+ }
+ packet->receive_deltas_.push_back(buffer[index]);
+ ++index;
+ break;
+ case StatusSymbol::kReceivedLargeDelta:
+ if (index + 2 > end_index) {
+ LOG(LS_WARNING) << "Buffer overflow while parsing packet.";
+ return nullptr;
+ }
+ packet->receive_deltas_.push_back(
+ ByteReader<int16_t>::ReadBigEndian(&buffer[index]));
+ index += 2;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ RTC_DCHECK_GE(index, end_index - 3);
+ RTC_DCHECK_LE(index, end_index);
+
+ return packet;
+}
+
+PacketStatusChunk* TransportFeedback::ParseChunk(const uint8_t* buffer,
+ size_t max_size) {
+ if (buffer[0] & 0x80) {
+ // First bit set => vector chunk.
+ std::deque<StatusSymbol> symbols;
+ if (buffer[0] & 0x40) {
+ // Second bit set => two bits per symbol vector.
+ return TwoBitVectorChunk::ParseFrom(buffer);
+ }
+
+ // Second bit not set => one bit per symbol vector.
+ return OneBitVectorChunk::ParseFrom(buffer);
+ }
+
+ // First bit not set => RLE chunk.
+ RunLengthChunk* rle_chunk = RunLengthChunk::ParseFrom(buffer);
+ if (rle_chunk->NumSymbols() > max_size) {
+ LOG(LS_WARNING) << "Header/body mismatch. "
+ "RLE block of size " << rle_chunk->NumSymbols()
+ << " but only " << max_size << " left to read.";
+ delete rle_chunk;
+ return nullptr;
+ }
+ return rle_chunk;
+}
+
+} // namespace rtcp
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
new file mode 100644
index 00000000000..4cc1f384793
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
+
+#include <deque>
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
+
+namespace webrtc {
+namespace rtcp {
+
+class PacketStatusChunk;
+
+class TransportFeedback : public RtcpPacket {
+ public:
+ TransportFeedback();
+ virtual ~TransportFeedback();
+
+ void WithPacketSenderSsrc(uint32_t ssrc);
+ void WithMediaSourceSsrc(uint32_t ssrc);
+ void WithBase(uint16_t base_sequence, // Seq# of first packet in this msg.
+ int64_t ref_timestamp_us); // Reference timestamp for this msg.
+ void WithFeedbackSequenceNumber(uint8_t feedback_sequence);
+ // NOTE: This method requires increasing sequence numbers (excepting wraps).
+ bool WithReceivedPacket(uint16_t sequence_number, int64_t timestamp_us);
+
+ enum class StatusSymbol {
+ kNotReceived,
+ kReceivedSmallDelta,
+ kReceivedLargeDelta,
+ };
+
+ uint16_t GetBaseSequence() const;
+ std::vector<TransportFeedback::StatusSymbol> GetStatusVector() const;
+ std::vector<int16_t> GetReceiveDeltas() const;
+
+ // Get the reference time in microseconds, including any precision loss.
+ int64_t GetBaseTimeUs() const;
+ // Convenience method for getting all deltas as microseconds. The first delta
+ // is relative the base time.
+ std::vector<int64_t> GetReceiveDeltasUs() const;
+
+ uint32_t GetPacketSenderSsrc() const;
+ uint32_t GetMediaSourceSsrc() const;
+ static const int kDeltaScaleFactor = 250; // Convert to multiples of 0.25ms.
+ static const uint8_t kFeedbackMessageType = 15; // TODO(sprang): IANA reg?
+ static const uint8_t kPayloadType = 205; // RTPFB, see RFC4585.
+
+ static rtc::scoped_ptr<TransportFeedback> ParseFrom(const uint8_t* buffer,
+ size_t length);
+
+ protected:
+ bool Create(uint8_t* packet,
+ size_t* position,
+ size_t max_length,
+ PacketReadyCallback* callback) const override;
+
+ size_t BlockLength() const override;
+
+ private:
+ static PacketStatusChunk* ParseChunk(const uint8_t* buffer, size_t max_size);
+
+ int64_t Unwrap(uint16_t sequence_number);
+ bool AddSymbol(StatusSymbol symbol, int64_t seq);
+ bool Encode(StatusSymbol symbol);
+ bool HandleRleCandidate(StatusSymbol symbol,
+ int current_capacity,
+ int delta_size);
+ void EmitRemaining();
+ void EmitVectorChunk();
+ void EmitRunLengthChunk();
+
+ uint32_t packet_sender_ssrc_;
+ uint32_t media_source_ssrc_;
+ int32_t base_seq_;
+ int64_t base_time_;
+ uint8_t feedback_seq_;
+ std::vector<PacketStatusChunk*> status_chunks_;
+ std::vector<int16_t> receive_deltas_;
+
+ int64_t last_seq_;
+ int64_t last_timestamp_;
+ std::deque<StatusSymbol> symbol_vec_;
+ uint16_t first_symbol_cardinality_;
+ bool vec_needs_two_bit_symbols_;
+ uint32_t size_bytes_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(TransportFeedback);
+};
+
+} // namespace rtcp
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_TRANSPORT_FEEDBACK_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
new file mode 100644
index 00000000000..ceb911d3089
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback_unittest.cc
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
+
+#include <limits>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
+using webrtc::rtcp::TransportFeedback;
+
+namespace webrtc {
+namespace {
+
+static const int kHeaderSize = 20;
+static const int kStatusChunkSize = 2;
+static const int kSmallDeltaSize = 1;
+static const int kLargeDeltaSize = 2;
+
+static const int64_t kDeltaLimit = 0xFF * TransportFeedback::kDeltaScaleFactor;
+
+class FeedbackTester {
+ public:
+ FeedbackTester()
+ : expected_size_(kAnySize),
+ default_delta_(TransportFeedback::kDeltaScaleFactor * 4) {}
+
+ void WithExpectedSize(size_t expected_size) {
+ expected_size_ = expected_size;
+ }
+
+ void WithDefaultDelta(int64_t delta) { default_delta_ = delta; }
+
+ void WithInput(const uint16_t received_seq[],
+ const int64_t received_ts[],
+ uint16_t length) {
+ rtc::scoped_ptr<int64_t[]> temp_deltas;
+ if (received_ts == nullptr) {
+ temp_deltas.reset(new int64_t[length]);
+ GenerateDeltas(received_seq, length, temp_deltas.get());
+ received_ts = temp_deltas.get();
+ }
+
+ expected_seq_.clear();
+ expected_deltas_.clear();
+ feedback_.reset(new TransportFeedback());
+
+ feedback_->WithBase(received_seq[0], received_ts[0]);
+ int64_t last_time = feedback_->GetBaseTimeUs();
+ for (int i = 0; i < length; ++i) {
+ int64_t time = received_ts[i];
+ EXPECT_TRUE(feedback_->WithReceivedPacket(received_seq[i], time));
+
+ if (last_time != -1) {
+ int64_t delta = time - last_time;
+ expected_deltas_.push_back(delta);
+ }
+ last_time = time;
+ }
+ expected_seq_.insert(expected_seq_.begin(), &received_seq[0],
+ &received_seq[length]);
+ }
+
+ void VerifyPacket() {
+ serialized_ = feedback_->Build();
+ VerifyInternal();
+ feedback_ = TransportFeedback::ParseFrom(serialized_->Buffer(),
+ serialized_->Length());
+ ASSERT_NE(nullptr, feedback_.get());
+ VerifyInternal();
+ }
+
+ static const size_t kAnySize = static_cast<size_t>(0) - 1;
+
+ private:
+ void VerifyInternal() {
+ if (expected_size_ != kAnySize) {
+ // Round up to whole 32-bit words.
+ size_t expected_size_words = (expected_size_ + 3) / 4;
+ size_t expected_size_bytes = expected_size_words * 4;
+ EXPECT_EQ(expected_size_bytes, serialized_->Length());
+ }
+
+ std::vector<TransportFeedback::StatusSymbol> symbols =
+ feedback_->GetStatusVector();
+ uint16_t seq = feedback_->GetBaseSequence();
+ auto seq_it = expected_seq_.begin();
+ for (TransportFeedback::StatusSymbol symbol : symbols) {
+ bool received =
+ (symbol == TransportFeedback::StatusSymbol::kReceivedSmallDelta ||
+ symbol == TransportFeedback::StatusSymbol::kReceivedLargeDelta);
+ if (seq_it != expected_seq_.end()) {
+ if (seq == *seq_it) {
+ ASSERT_NE(expected_seq_.end(), seq_it);
+ ASSERT_TRUE(received) << "Expected received packet @ " << seq;
+ ++seq_it;
+ } else {
+ ASSERT_FALSE(received) << "Did not expect received packet @ " << seq;
+ }
+ }
+ ++seq;
+ }
+ ASSERT_EQ(expected_seq_.end(), seq_it);
+
+ std::vector<int64_t> deltas = feedback_->GetReceiveDeltasUs();
+ ASSERT_EQ(expected_deltas_.size(), deltas.size());
+ for (size_t i = 0; i < expected_deltas_.size(); ++i)
+ EXPECT_EQ(expected_deltas_[i], deltas[i]) << "Delta mismatch @ " << i;
+ }
+
+ void GenerateDeltas(const uint16_t seq[],
+ const size_t length,
+ int64_t* deltas) {
+ uint16_t last_seq = seq[0];
+ int64_t offset = 0;
+
+ for (size_t i = 0; i < length; ++i) {
+ if (seq[i] < last_seq)
+ offset += 0x10000 * default_delta_;
+ last_seq = seq[i];
+
+ deltas[i] = offset + (last_seq * default_delta_);
+ }
+ }
+
+ std::vector<uint16_t> expected_seq_;
+ std::vector<int64_t> expected_deltas_;
+ size_t expected_size_;
+ int64_t default_delta_;
+ rtc::scoped_ptr<TransportFeedback> feedback_;
+ rtc::scoped_ptr<rtcp::RawPacket> serialized_;
+};
+
+TEST(RtcpPacketTest, TransportFeedback_OneBitVector) {
+ const uint16_t kReceived[] = {1, 2, 7, 8, 9, 10, 13};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_FullOneBitVector) {
+ const uint16_t kReceived[] = {1, 2, 7, 8, 9, 10, 13, 14};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_OneBitVector_WrapReceived) {
+ const uint16_t kMax = 0xFFFF;
+ const uint16_t kReceived[] = {kMax - 2, kMax - 1, kMax, 0, 1, 2};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_OneBitVector_WrapMissing) {
+ const uint16_t kMax = 0xFFFF;
+ const uint16_t kReceived[] = {kMax - 2, kMax - 1, 1, 2};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_TwoBitVector) {
+ const uint16_t kReceived[] = {1, 2, 6, 7};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (kLength * kLargeDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithDefaultDelta(kDeltaLimit + TransportFeedback::kDeltaScaleFactor);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_TwoBitVectorFull) {
+ const uint16_t kReceived[] = {1, 2, 6, 7, 8};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (2 * kStatusChunkSize) + (kLength * kLargeDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithDefaultDelta(kDeltaLimit + TransportFeedback::kDeltaScaleFactor);
+ test.WithInput(kReceived, nullptr, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_LargeAndNegativeDeltas) {
+ const uint16_t kReceived[] = {1, 2, 6, 7, 8};
+ const int64_t kReceiveTimes[] = {
+ 2000,
+ 1000,
+ 4000,
+ 3000,
+ 3000 + TransportFeedback::kDeltaScaleFactor * (1 << 8)};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + (3 * kLargeDeltaSize) + kSmallDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_MaxRle) {
+ // Expected chunks created:
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+ // * RLE chunk of max length for dropped symbol
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+
+ const size_t kPacketCount = (1 << 13) - 1 + 14;
+ const uint16_t kReceived[] = {0, kPacketCount};
+ const int64_t kReceiveTimes[] = {1000, 2000};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (3 * kStatusChunkSize) + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_MinRle) {
+ // Expected chunks created:
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+ // * RLE chunk of length 15 for dropped symbol
+ // * 1-bit vector chunk (1xreceived + 13xdropped)
+
+ const uint16_t kReceived[] = {0, (14 * 2) + 1};
+ const int64_t kReceiveTimes[] = {1000, 2000};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (3 * kStatusChunkSize) + (kLength * kSmallDeltaSize);
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_OneToTwoBitVector) {
+ const size_t kTwoBitVectorCapacity = 7;
+ const uint16_t kReceived[] = {0, kTwoBitVectorCapacity - 1};
+ const int64_t kReceiveTimes[] = {
+ 0, kDeltaLimit + TransportFeedback::kDeltaScaleFactor};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + kSmallDeltaSize + kLargeDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_OneToTwoBitVectorSimpleSplit) {
+ const size_t kTwoBitVectorCapacity = 7;
+ const uint16_t kReceived[] = {0, kTwoBitVectorCapacity};
+ const int64_t kReceiveTimes[] = {
+ 0, kDeltaLimit + TransportFeedback::kDeltaScaleFactor};
+ const size_t kLength = sizeof(kReceived) / sizeof(uint16_t);
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + (kStatusChunkSize * 2) + kSmallDeltaSize + kLargeDeltaSize;
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kLength);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_OneToTwoBitVectorSplit) {
+ // With received small delta = S, received large delta = L, use input
+ // SSSSSSSSLSSSSSSSSSSSS. This will cause a 1:2 split at the L.
+ // After split there will be two symbols in symbol_vec: SL.
+
+ const int64_t kLargeDelta = TransportFeedback::kDeltaScaleFactor * (1 << 8);
+ const size_t kNumPackets = (3 * 7) + 1;
+ const size_t kExpectedSizeBytes = kHeaderSize + (kStatusChunkSize * 3) +
+ (kSmallDeltaSize * (kNumPackets - 1)) +
+ (kLargeDeltaSize * 1);
+
+ uint16_t kReceived[kNumPackets];
+ for (size_t i = 0; i < kNumPackets; ++i)
+ kReceived[i] = i;
+
+ int64_t kReceiveTimes[kNumPackets];
+ kReceiveTimes[0] = 1000;
+ for (size_t i = 1; i < kNumPackets; ++i) {
+ int delta = (i == 8) ? kLargeDelta : 1000;
+ kReceiveTimes[i] = kReceiveTimes[i - 1] + delta;
+ }
+
+ FeedbackTester test;
+ test.WithExpectedSize(kExpectedSizeBytes);
+ test.WithInput(kReceived, kReceiveTimes, kNumPackets);
+ test.VerifyPacket();
+}
+
+TEST(RtcpPacketTest, TransportFeedback_Aliasing) {
+ TransportFeedback feedback;
+ feedback.WithBase(0, 0);
+
+ const int kSamples = 100;
+ const int64_t kTooSmallDelta = TransportFeedback::kDeltaScaleFactor / 3;
+
+ for (int i = 0; i < kSamples; ++i)
+ feedback.WithReceivedPacket(i, i * kTooSmallDelta);
+
+ feedback.Build();
+ std::vector<int64_t> deltas = feedback.GetReceiveDeltasUs();
+
+ int64_t accumulated_delta = 0;
+ int num_samples = 0;
+ for (int64_t delta : deltas) {
+ accumulated_delta += delta;
+ int64_t expected_time = num_samples * kTooSmallDelta;
+ ++num_samples;
+
+ EXPECT_NEAR(expected_time, accumulated_delta,
+ TransportFeedback::kDeltaScaleFactor / 2);
+ }
+}
+
+TEST(RtcpPacketTest, TransportFeedback_Limits) {
+ // Sequence number wrap above 0x8000.
+ rtc::scoped_ptr<TransportFeedback> packet(new TransportFeedback());
+ packet->WithBase(0, 0);
+ EXPECT_TRUE(packet->WithReceivedPacket(0x8000, 1000));
+
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, 0);
+ EXPECT_FALSE(packet->WithReceivedPacket(0x8000 + 1, 1000));
+
+ // Packet status count max 0xFFFF.
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, 0);
+ EXPECT_TRUE(packet->WithReceivedPacket(0x8000, 1000));
+ EXPECT_TRUE(packet->WithReceivedPacket(0xFFFF, 2000));
+ EXPECT_FALSE(packet->WithReceivedPacket(0, 3000));
+
+ // Too large delta.
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, 0);
+ int64_t kMaxPositiveTimeDelta = std::numeric_limits<int16_t>::max() *
+ TransportFeedback::kDeltaScaleFactor;
+ EXPECT_FALSE(packet->WithReceivedPacket(
+ 1, kMaxPositiveTimeDelta + TransportFeedback::kDeltaScaleFactor));
+ EXPECT_TRUE(packet->WithReceivedPacket(1, kMaxPositiveTimeDelta));
+
+ // Too large negative delta.
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, 0);
+ int64_t kMaxNegativeTimeDelta = std::numeric_limits<int16_t>::min() *
+ TransportFeedback::kDeltaScaleFactor;
+ EXPECT_FALSE(packet->WithReceivedPacket(
+ 1, kMaxNegativeTimeDelta - TransportFeedback::kDeltaScaleFactor));
+ EXPECT_TRUE(packet->WithReceivedPacket(1, kMaxNegativeTimeDelta));
+
+ // Base time at maximum value.
+ int64_t kMaxBaseTime =
+ static_cast<int64_t>(TransportFeedback::kDeltaScaleFactor) * (1L << 8) *
+ ((1L << 23) - 1);
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, kMaxBaseTime);
+ packet->WithReceivedPacket(0, kMaxBaseTime);
+ // Serialize and de-serialize (verify 24bit parsing).
+ rtc::scoped_ptr<rtcp::RawPacket> raw_packet = packet->Build();
+ packet =
+ TransportFeedback::ParseFrom(raw_packet->Buffer(), raw_packet->Length());
+ EXPECT_EQ(kMaxBaseTime, packet->GetBaseTimeUs());
+
+ // Base time above maximum value.
+ int64_t kTooLargeBaseTime =
+ kMaxBaseTime + (TransportFeedback::kDeltaScaleFactor * (1L << 8));
+ packet.reset(new TransportFeedback());
+ packet->WithBase(0, kTooLargeBaseTime);
+ packet->WithReceivedPacket(0, kTooLargeBaseTime);
+ raw_packet = packet->Build();
+ packet =
+ TransportFeedback::ParseFrom(raw_packet->Buffer(), raw_packet->Length());
+ EXPECT_NE(kTooLargeBaseTime, packet->GetBaseTimeUs());
+
+ // TODO(sprang): Once we support max length lower than RTCP length limit,
+ // add back test for max size in bytes.
+}
+
+TEST(RtcpPacketTest, TransportFeedback_Padding) {
+ const size_t kExpectedSizeBytes =
+ kHeaderSize + kStatusChunkSize + kSmallDeltaSize;
+ const size_t kExpectedSizeWords = (kExpectedSizeBytes + 3) / 4;
+
+ TransportFeedback feedback;
+ feedback.WithBase(0, 0);
+ EXPECT_TRUE(feedback.WithReceivedPacket(0, 0));
+
+ rtc::scoped_ptr<rtcp::RawPacket> packet(feedback.Build());
+ EXPECT_EQ(kExpectedSizeWords * 4, packet->Length());
+ ASSERT_GT(kExpectedSizeWords * 4, kExpectedSizeBytes);
+ for (size_t i = kExpectedSizeBytes; i < kExpectedSizeWords * 4; ++i)
+ EXPECT_EQ(0u, packet->Buffer()[i]);
+
+ // Modify packet by adding 4 bytes of padding at the end. Not currently used
+ // when we're sending, but need to be able to handle it when receiving.
+
+ const int kPaddingBytes = 4;
+ const size_t kExpectedSizeWithPadding =
+ (kExpectedSizeWords * 4) + kPaddingBytes;
+ uint8_t mod_buffer[kExpectedSizeWithPadding];
+ memcpy(mod_buffer, packet->Buffer(), kExpectedSizeWords * 4);
+ memset(&mod_buffer[kExpectedSizeWords * 4], 0, kPaddingBytes - 1);
+ mod_buffer[kExpectedSizeWithPadding - 1] = kPaddingBytes;
+ const uint8_t padding_flag = 1 << 5;
+ mod_buffer[0] |= padding_flag;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &mod_buffer[2], ByteReader<uint16_t>::ReadBigEndian(&mod_buffer[2]) +
+ ((kPaddingBytes + 3) / 4));
+
+ rtc::scoped_ptr<TransportFeedback> parsed_packet(
+ TransportFeedback::ParseFrom(mod_buffer, kExpectedSizeWithPadding));
+ ASSERT_TRUE(parsed_packet.get() != nullptr);
+ EXPECT_EQ(kExpectedSizeWords * 4, packet->Length()); // Padding not included.
+}
+
+TEST(RtcpPacketTest, TransportFeedback_CorrectlySplitsVectorChunks) {
+ const int kOneBitVectorCapacity = 14;
+ const int64_t kLargeTimeDelta =
+ TransportFeedback::kDeltaScaleFactor * (1 << 8);
+
+ // Test that a number of small deltas followed by a large delta results in a
+ // correct split into multiple chunks, as needed.
+
+ for (int deltas = 0; deltas <= kOneBitVectorCapacity + 1; ++deltas) {
+ TransportFeedback feedback;
+ feedback.WithBase(0, 0);
+ for (int i = 0; i < deltas; ++i)
+ feedback.WithReceivedPacket(i, i * 1000);
+ feedback.WithReceivedPacket(deltas, deltas * 1000 + kLargeTimeDelta);
+
+ rtc::scoped_ptr<rtcp::RawPacket> serialized_packet = feedback.Build();
+ EXPECT_TRUE(serialized_packet.get() != nullptr);
+ rtc::scoped_ptr<TransportFeedback> deserialized_packet =
+ TransportFeedback::ParseFrom(serialized_packet->Buffer(),
+ serialized_packet->Length());
+ EXPECT_TRUE(deserialized_packet.get() != nullptr);
+ }
+}
+
+} // namespace
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
index 00971596447..77520b633bc 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_packet_unittest.cc
@@ -1084,4 +1084,5 @@ TEST(RtcpPacketTest, XrWithTooManyBlocks) {
EXPECT_TRUE(xr.WithVoipMetric(&voip_metric));
EXPECT_FALSE(xr.WithVoipMetric(&voip_metric));
}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
index 55974bf74bd..fb1f9b25037 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -17,6 +17,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -29,24 +30,27 @@ using namespace RTCPHelp;
// The number of RTCP time intervals needed to trigger a timeout.
const int kRrTimeoutIntervals = 3;
+const int64_t kMaxWarningLogIntervalMs = 10000;
+
RTCPReceiver::RTCPReceiver(
- int32_t id,
Clock* clock,
bool receiver_only,
RtcpPacketTypeCounterObserver* packet_type_counter_observer,
RtcpBandwidthObserver* rtcp_bandwidth_observer,
RtcpIntraFrameObserver* rtcp_intra_frame_observer,
+ TransportFeedbackObserver* transport_feedback_observer,
ModuleRtpRtcpImpl* owner)
: TMMBRHelp(),
_clock(clock),
receiver_only_(receiver_only),
- _method(kRtcpOff),
+ _method(RtcpMode::kOff),
_lastReceived(0),
_rtpRtcp(*owner),
_criticalSectionFeedbacks(
CriticalSectionWrapper::CreateCriticalSection()),
_cbRtcpBandwidthObserver(rtcp_bandwidth_observer),
_cbRtcpIntraFrameObserver(rtcp_intra_frame_observer),
+ _cbTransportFeedbackObserver(transport_feedback_observer),
_criticalSectionRTCPReceiver(
CriticalSectionWrapper::CreateCriticalSection()),
main_ssrc_(0),
@@ -62,7 +66,9 @@ RTCPReceiver::RTCPReceiver(
_lastReceivedRrMs(0),
_lastIncreasedSequenceNumberMs(0),
stats_callback_(NULL),
- packet_type_counter_observer_(packet_type_counter_observer) {
+ packet_type_counter_observer_(packet_type_counter_observer),
+ num_skipped_packets_(0),
+ last_skipped_packets_warning_(clock->TimeInMilliseconds()) {
memset(&_remoteSenderInfo, 0, sizeof(_remoteSenderInfo));
}
@@ -93,12 +99,12 @@ RTCPReceiver::~RTCPReceiver() {
}
}
-RTCPMethod RTCPReceiver::Status() const {
+RtcpMode RTCPReceiver::Status() const {
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
return _method;
}
-void RTCPReceiver::SetRTCPStatus(RTCPMethod method) {
+void RTCPReceiver::SetRTCPStatus(RtcpMode method) {
CriticalSectionScoped lock(_criticalSectionRTCPReceiver);
_method = method;
}
@@ -295,7 +301,7 @@ RTCPReceiver::IncomingRTCPPacket(RTCPPacketInformation& rtcpPacketInformation,
HandleSenderReceiverReport(*rtcpParser, rtcpPacketInformation);
break;
case RTCPPacketTypes::kSdes:
- HandleSDES(*rtcpParser);
+ HandleSDES(*rtcpParser, rtcpPacketInformation);
break;
case RTCPPacketTypes::kXrHeader:
HandleXrHeader(*rtcpParser, rtcpPacketInformation);
@@ -350,6 +356,9 @@ RTCPReceiver::IncomingRTCPPacket(RTCPPacketInformation& rtcpPacketInformation,
// generic application messages
HandleAPPItem(*rtcpParser, rtcpPacketInformation);
break;
+ case RTCPPacketTypes::kTransportFeedback:
+ HandleTransportFeedback(rtcpParser, &rtcpPacketInformation);
+ break;
default:
rtcpParser->Iterate();
break;
@@ -362,6 +371,19 @@ RTCPReceiver::IncomingRTCPPacket(RTCPPacketInformation& rtcpPacketInformation,
main_ssrc_, packet_type_counter_);
}
+ num_skipped_packets_ += rtcpParser->NumSkippedBlocks();
+
+ int64_t now = _clock->TimeInMilliseconds();
+ if (now - last_skipped_packets_warning_ >= kMaxWarningLogIntervalMs &&
+ num_skipped_packets_ > 0) {
+ last_skipped_packets_warning_ = now;
+ LOG(LS_WARNING)
+ << num_skipped_packets_
+ << " RTCP blocks were skipped due to being malformed or of "
+ "unrecognized/unsupported type, during the past "
+ << (kMaxWarningLogIntervalMs / 1000) << " second period.";
+ }
+
return 0;
}
@@ -754,12 +776,14 @@ int32_t RTCPReceiver::BoundingSet(bool &tmmbrOwner, TMMBRSet* boundingSetRec) {
}
// no need for critsect we have _criticalSectionRTCPReceiver
-void RTCPReceiver::HandleSDES(RTCPUtility::RTCPParserV2& rtcpParser) {
+void RTCPReceiver::HandleSDES(RTCPUtility::RTCPParserV2& rtcpParser,
+ RTCPPacketInformation& rtcpPacketInformation) {
RTCPUtility::RTCPPacketTypes pktType = rtcpParser.Iterate();
while (pktType == RTCPPacketTypes::kSdesChunk) {
HandleSDESChunk(rtcpParser);
pktType = rtcpParser.Iterate();
}
+ rtcpPacketInformation.rtcpPacketTypeFlags |= kRtcpSdes;
}
// no need for critsect we have _criticalSectionRTCPReceiver
@@ -1251,6 +1275,17 @@ void RTCPReceiver::HandleAPPItem(RTCPUtility::RTCPParserV2& rtcpParser,
rtcpParser.Iterate();
}
+void RTCPReceiver::HandleTransportFeedback(
+ RTCPUtility::RTCPParserV2* rtcp_parser,
+ RTCPHelp::RTCPPacketInformation* rtcp_packet_information) {
+ rtcp::RtcpPacket* packet = rtcp_parser->ReleaseRtcpPacket();
+ RTC_DCHECK(packet != nullptr);
+ rtcp_packet_information->rtcpPacketTypeFlags |= kRtcpTransportFeedback;
+ rtcp_packet_information->transport_feedback_.reset(
+ static_cast<rtcp::TransportFeedback*>(packet));
+
+ rtcp_parser->Iterate();
+}
int32_t RTCPReceiver::UpdateTMMBR() {
int32_t numBoundingSet = 0;
uint32_t bitrate = 0;
@@ -1320,11 +1355,11 @@ void RTCPReceiver::TriggerCallbacksFromRTCPPacket(
local_ssrc = main_ssrc_;
}
if (!receiver_only_ &&
- rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpSrReq) {
+ (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpSrReq)) {
_rtpRtcp.OnRequestSendReport();
}
if (!receiver_only_ &&
- rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpNack) {
+ (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpNack)) {
if (rtcpPacketInformation.nackSequenceNumbers.size() > 0) {
LOG(LS_VERBOSE) << "Incoming NACK length: "
<< rtcpPacketInformation.nackSequenceNumbers.size();
@@ -1337,7 +1372,7 @@ void RTCPReceiver::TriggerCallbacksFromRTCPPacket(
// report can generate several RTCP packets, based on number relayed/mixed
// a send report block should go out to all receivers.
if (_cbRtcpIntraFrameObserver) {
- DCHECK(!receiver_only_);
+ RTC_DCHECK(!receiver_only_);
if ((rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpPli) ||
(rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpFir)) {
if (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpPli) {
@@ -1359,7 +1394,7 @@ void RTCPReceiver::TriggerCallbacksFromRTCPPacket(
}
}
if (_cbRtcpBandwidthObserver) {
- DCHECK(!receiver_only_);
+ RTC_DCHECK(!receiver_only_);
if (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpRemb) {
LOG(LS_VERBOSE) << "Incoming REMB: "
<< rtcpPacketInformation.receiverEstimatedMaxBitrate;
@@ -1375,6 +1410,17 @@ void RTCPReceiver::TriggerCallbacksFromRTCPPacket(
now);
}
}
+ if (_cbTransportFeedbackObserver &&
+ (rtcpPacketInformation.rtcpPacketTypeFlags & kRtcpTransportFeedback)) {
+ uint32_t media_source_ssrc =
+ rtcpPacketInformation.transport_feedback_->GetMediaSourceSsrc();
+ if (media_source_ssrc == main_ssrc_ ||
+ registered_ssrcs_.find(media_source_ssrc) !=
+ registered_ssrcs_.end()) {
+ _cbTransportFeedbackObserver->OnTransportFeedback(
+ *rtcpPacketInformation.transport_feedback_.get());
+ }
+ }
}
if (!receiver_only_) {
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
index 569af901307..2883f3af349 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -29,17 +29,17 @@ class ModuleRtpRtcpImpl;
class RTCPReceiver : public TMMBRHelp
{
public:
- RTCPReceiver(int32_t id,
- Clock* clock,
+ RTCPReceiver(Clock* clock,
bool receiver_only,
RtcpPacketTypeCounterObserver* packet_type_counter_observer,
RtcpBandwidthObserver* rtcp_bandwidth_observer,
RtcpIntraFrameObserver* rtcp_intra_frame_observer,
+ TransportFeedbackObserver* transport_feedback_observer,
ModuleRtpRtcpImpl* owner);
virtual ~RTCPReceiver();
- RTCPMethod Status() const;
- void SetRTCPStatus(RTCPMethod method);
+ RtcpMode Status() const;
+ void SetRTCPStatus(RtcpMode method);
int64_t LastReceived();
int64_t LastReceivedReceiverReport() const;
@@ -131,7 +131,8 @@ protected:
RTCPHelp::RTCPPacketInformation& rtcpPacketInformation,
uint32_t remoteSSRC);
- void HandleSDES(RTCPUtility::RTCPParserV2& rtcpParser);
+ void HandleSDES(RTCPUtility::RTCPParserV2& rtcpParser,
+ RTCPHelp::RTCPPacketInformation& rtcpPacketInformation);
void HandleSDESChunk(RTCPUtility::RTCPParserV2& rtcpParser);
@@ -216,6 +217,10 @@ protected:
void HandleAPPItem(RTCPUtility::RTCPParserV2& rtcpParser,
RTCPHelp::RTCPPacketInformation& rtcpPacketInformation);
+ void HandleTransportFeedback(
+ RTCPUtility::RTCPParserV2* rtcp_parser,
+ RTCPHelp::RTCPPacketInformation* rtcp_packet_information);
+
private:
typedef std::map<uint32_t, RTCPHelp::RTCPReceiveInformation*>
ReceivedInfoMap;
@@ -234,13 +239,14 @@ protected:
Clock* const _clock;
const bool receiver_only_;
- RTCPMethod _method;
+ RtcpMode _method;
int64_t _lastReceived;
ModuleRtpRtcpImpl& _rtpRtcp;
CriticalSectionWrapper* _criticalSectionFeedbacks;
RtcpBandwidthObserver* const _cbRtcpBandwidthObserver;
RtcpIntraFrameObserver* const _cbRtcpIntraFrameObserver;
+ TransportFeedbackObserver* const _cbTransportFeedbackObserver;
CriticalSectionWrapper* _criticalSectionRTCPReceiver;
uint32_t main_ssrc_;
@@ -282,6 +288,9 @@ protected:
RtcpPacketTypeCounter packet_type_counter_;
RTCPUtility::NackStats nack_stats_;
+
+ size_t num_skipped_packets_;
+ int64_t last_skipped_packets_warning_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTCP_RECEIVER_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
index b86e5cc2d8a..718990d10b5 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.cc
@@ -13,6 +13,7 @@
#include <assert.h> // assert
#include <string.h> // memset
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
namespace webrtc {
@@ -36,8 +37,7 @@ RTCPPacketInformation::RTCPPacketInformation()
rtp_timestamp(0),
xr_originator_ssrc(0),
xr_dlrr_item(false),
- VoIPMetric(NULL) {
-}
+ VoIPMetric(nullptr) {}
RTCPPacketInformation::~RTCPPacketInformation()
{
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
index a7f3c9fa64a..37b7b883706 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_help.h
@@ -20,6 +20,9 @@
#include "webrtc/typedefs.h"
namespace webrtc {
+namespace rtcp {
+class TransportFeedback;
+}
namespace RTCPHelp
{
@@ -84,8 +87,10 @@ public:
bool xr_dlrr_item;
RTCPVoIPMetric* VoIPMetric;
+ rtc::scoped_ptr<rtcp::TransportFeedback> transport_feedback_;
+
private:
- DISALLOW_COPY_AND_ASSIGN(RTCPPacketInformation);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RTCPPacketInformation);
};
class RTCPReceiveInformation
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
index ce66613ec05..15818454762 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
@@ -19,11 +19,13 @@
#include "webrtc/common_types.h"
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h"
#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
namespace webrtc {
@@ -33,23 +35,20 @@ namespace { // Anonymous namespace; hide utility functions and classes.
class TestTransport : public Transport,
public NullRtpData {
public:
- explicit TestTransport()
- : rtcp_receiver_(NULL) {
- }
+ explicit TestTransport() : rtcp_receiver_(nullptr) {}
void SetRTCPReceiver(RTCPReceiver* rtcp_receiver) {
rtcp_receiver_ = rtcp_receiver;
}
- int SendPacket(int /*ch*/, const void* /*data*/, size_t /*len*/) override {
+ bool SendRtp(const uint8_t* /*data*/,
+ size_t /*len*/,
+ const PacketOptions& options) override {
ADD_FAILURE(); // FAIL() gives a compile error.
- return -1;
+ return false;
}
- // Injects an RTCP packet into the receiver.
- int SendRTCPPacket(int /* ch */,
- const void* packet,
- size_t packet_len) override {
+ bool SendRtcp(const uint8_t* packet, size_t packet_len) override {
ADD_FAILURE();
- return 0;
+ return true;
}
int OnReceivedPayloadData(const uint8_t* payloadData,
@@ -63,27 +62,23 @@ class TestTransport : public Transport,
class RtcpReceiverTest : public ::testing::Test {
protected:
- static const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
-
RtcpReceiverTest()
: over_use_detector_options_(),
system_clock_(1335900000),
remote_bitrate_observer_(),
- remote_bitrate_estimator_(new RemoteBitrateEstimatorSingleStream(
- &remote_bitrate_observer_,
- &system_clock_,
- kRemoteBitrateEstimatorMinBitrateBps)) {
+ remote_bitrate_estimator_(
+ new RemoteBitrateEstimatorSingleStream(&remote_bitrate_observer_,
+ &system_clock_)) {
test_transport_ = new TestTransport();
RtpRtcp::Configuration configuration;
- configuration.id = 0;
configuration.audio = false;
configuration.clock = &system_clock_;
configuration.outgoing_transport = test_transport_;
configuration.remote_bitrate_estimator = remote_bitrate_estimator_.get();
rtp_rtcp_impl_ = new ModuleRtpRtcpImpl(configuration);
- rtcp_receiver_ = new RTCPReceiver(0, &system_clock_, false, NULL, NULL,
- NULL, rtp_rtcp_impl_);
+ rtcp_receiver_ = new RTCPReceiver(&system_clock_, false, nullptr, nullptr,
+ nullptr, nullptr, rtp_rtcp_impl_);
test_transport_->SetRTCPReceiver(rtcp_receiver_);
}
~RtcpReceiverTest() {
@@ -126,9 +121,10 @@ class RtcpReceiverTest : public ::testing::Test {
rtcp_packet_info_.ntp_frac = rtcpPacketInformation.ntp_frac;
rtcp_packet_info_.rtp_timestamp = rtcpPacketInformation.rtp_timestamp;
rtcp_packet_info_.xr_dlrr_item = rtcpPacketInformation.xr_dlrr_item;
- if (rtcpPacketInformation.VoIPMetric) {
+ if (rtcpPacketInformation.VoIPMetric)
rtcp_packet_info_.AddVoIPMetric(rtcpPacketInformation.VoIPMetric);
- }
+ rtcp_packet_info_.transport_feedback_.reset(
+ rtcpPacketInformation.transport_feedback_.release());
return 0;
}
@@ -366,7 +362,8 @@ TEST_F(RtcpReceiverTest, GetRtt) {
rtcp_receiver_->SetSsrcs(kSourceSsrc, ssrcs);
// No report block received.
- EXPECT_EQ(-1, rtcp_receiver_->RTT(kSenderSsrc, NULL, NULL, NULL, NULL));
+ EXPECT_EQ(
+ -1, rtcp_receiver_->RTT(kSenderSsrc, nullptr, nullptr, nullptr, nullptr));
rtcp::ReportBlock rb;
rb.To(kSourceSsrc);
@@ -378,10 +375,12 @@ TEST_F(RtcpReceiverTest, GetRtt) {
EXPECT_EQ(kSenderSsrc, rtcp_packet_info_.remoteSSRC);
EXPECT_EQ(kRtcpRr, rtcp_packet_info_.rtcpPacketTypeFlags);
EXPECT_EQ(1u, rtcp_packet_info_.report_blocks.size());
- EXPECT_EQ(0, rtcp_receiver_->RTT(kSenderSsrc, NULL, NULL, NULL, NULL));
+ EXPECT_EQ(
+ 0, rtcp_receiver_->RTT(kSenderSsrc, nullptr, nullptr, nullptr, nullptr));
// Report block not received.
- EXPECT_EQ(-1, rtcp_receiver_->RTT(kSenderSsrc + 1, NULL, NULL, NULL, NULL));
+ EXPECT_EQ(-1, rtcp_receiver_->RTT(kSenderSsrc + 1, nullptr, nullptr, nullptr,
+ nullptr));
}
TEST_F(RtcpReceiverTest, InjectIjWithNoItem) {
@@ -593,7 +592,7 @@ TEST_F(RtcpReceiverTest, InjectXrVoipPacket) {
xr.WithVoipMetric(&voip_metric);
rtc::scoped_ptr<rtcp::RawPacket> packet(xr.Build());
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
- ASSERT_TRUE(rtcp_packet_info_.VoIPMetric != NULL);
+ ASSERT_TRUE(rtcp_packet_info_.VoIPMetric != nullptr);
EXPECT_EQ(kLossRate, rtcp_packet_info_.VoIPMetric->lossRate);
EXPECT_EQ(kRtcpXrVoipMetric, rtcp_packet_info_.rtcpPacketTypeFlags);
}
@@ -847,7 +846,7 @@ TEST_F(RtcpReceiverTest, ReceiveReportTimeout) {
TEST_F(RtcpReceiverTest, TmmbrReceivedWithNoIncomingPacket) {
// This call is expected to fail because no data has arrived.
- EXPECT_EQ(-1, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(-1, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
}
TEST_F(RtcpReceiverTest, TmmbrPacketAccepted) {
@@ -868,7 +867,7 @@ TEST_F(RtcpReceiverTest, TmmbrPacketAccepted) {
rtc::scoped_ptr<rtcp::RawPacket> packet(sr.Build());
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
- EXPECT_EQ(1, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(1, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
TMMBRSet candidate_set;
candidate_set.VerifyAndAllocateSet(1);
EXPECT_EQ(1, rtcp_receiver_->TMMBRReceived(1, 0, &candidate_set));
@@ -894,7 +893,7 @@ TEST_F(RtcpReceiverTest, TmmbrPacketNotForUsIgnored) {
ssrcs.insert(kMediaFlowSsrc);
rtcp_receiver_->SetSsrcs(kMediaFlowSsrc, ssrcs);
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
- EXPECT_EQ(0, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(0, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
}
TEST_F(RtcpReceiverTest, TmmbrPacketZeroRateIgnored) {
@@ -915,7 +914,7 @@ TEST_F(RtcpReceiverTest, TmmbrPacketZeroRateIgnored) {
rtc::scoped_ptr<rtcp::RawPacket> packet(sr.Build());
EXPECT_EQ(0, InjectRtcpPacket(packet->Buffer(), packet->Length()));
- EXPECT_EQ(0, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(0, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
}
TEST_F(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) {
@@ -942,7 +941,7 @@ TEST_F(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) {
system_clock_.AdvanceTimeMilliseconds(5000);
}
// It is now starttime + 15.
- EXPECT_EQ(3, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(3, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
TMMBRSet candidate_set;
candidate_set.VerifyAndAllocateSet(3);
EXPECT_EQ(3, rtcp_receiver_->TMMBRReceived(3, 0, &candidate_set));
@@ -951,7 +950,7 @@ TEST_F(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) {
// seconds, timing out the first packet.
system_clock_.AdvanceTimeMilliseconds(12000);
// Odd behaviour: Just counting them does not trigger the timeout.
- EXPECT_EQ(3, rtcp_receiver_->TMMBRReceived(0, 0, NULL));
+ EXPECT_EQ(3, rtcp_receiver_->TMMBRReceived(0, 0, nullptr));
EXPECT_EQ(2, rtcp_receiver_->TMMBRReceived(3, 0, &candidate_set));
EXPECT_EQ(kSenderSsrc + 1, candidate_set.Ssrc(0));
}
@@ -1012,7 +1011,7 @@ TEST_F(RtcpReceiverTest, Callbacks) {
EXPECT_TRUE(callback.Matches(kSourceSsrc, kSequenceNumber, kFractionLoss,
kCumulativeLoss, kJitter));
- rtcp_receiver_->RegisterRtcpStatisticsCallback(NULL);
+ rtcp_receiver_->RegisterRtcpStatisticsCallback(nullptr);
// Add arbitrary numbers, callback should not be called (retain old values).
rtcp::ReportBlock rb2;
@@ -1031,6 +1030,68 @@ TEST_F(RtcpReceiverTest, Callbacks) {
kCumulativeLoss, kJitter));
}
+TEST_F(RtcpReceiverTest, ReceivesTransportFeedback) {
+ const uint32_t kSenderSsrc = 0x10203;
+ const uint32_t kSourceSsrc = 0x123456;
+
+ std::set<uint32_t> ssrcs;
+ ssrcs.insert(kSourceSsrc);
+ rtcp_receiver_->SetSsrcs(kSourceSsrc, ssrcs);
+
+ rtcp::TransportFeedback packet;
+ packet.WithMediaSourceSsrc(kSourceSsrc);
+ packet.WithPacketSenderSsrc(kSenderSsrc);
+ packet.WithBase(1, 1000);
+ packet.WithReceivedPacket(1, 1000);
+
+ rtc::scoped_ptr<rtcp::RawPacket> built_packet = packet.Build();
+ ASSERT_TRUE(built_packet.get() != nullptr);
+
+ EXPECT_EQ(0,
+ InjectRtcpPacket(built_packet->Buffer(), built_packet->Length()));
+
+ EXPECT_NE(0u, rtcp_packet_info_.rtcpPacketTypeFlags & kRtcpTransportFeedback);
+ EXPECT_TRUE(rtcp_packet_info_.transport_feedback_.get() != nullptr);
+}
+
+TEST_F(RtcpReceiverTest, HandlesInvalidTransportFeedback) {
+ const uint32_t kSenderSsrc = 0x10203;
+ const uint32_t kSourceSsrc = 0x123456;
+
+ std::set<uint32_t> ssrcs;
+ ssrcs.insert(kSourceSsrc);
+ rtcp_receiver_->SetSsrcs(kSourceSsrc, ssrcs);
+
+ // Send a compound packet with a TransportFeedback followed by something else.
+ rtcp::TransportFeedback packet;
+ packet.WithMediaSourceSsrc(kSourceSsrc);
+ packet.WithPacketSenderSsrc(kSenderSsrc);
+ packet.WithBase(1, 1000);
+ packet.WithReceivedPacket(1, 1000);
+
+ static uint32_t kBitrateBps = 50000;
+ rtcp::Remb remb;
+ remb.From(kSourceSsrc);
+ remb.WithBitrateBps(kBitrateBps);
+ packet.Append(&remb);
+
+ rtc::scoped_ptr<rtcp::RawPacket> built_packet = packet.Build();
+ ASSERT_TRUE(built_packet.get() != nullptr);
+
+ // Modify the TransportFeedback packet so that it is invalid.
+ const size_t kStatusCountOffset = 14;
+ ByteWriter<uint16_t>::WriteBigEndian(
+ &built_packet->MutableBuffer()[kStatusCountOffset], 42);
+
+ EXPECT_EQ(0,
+ InjectRtcpPacket(built_packet->Buffer(), built_packet->Length()));
+
+ // Transport feedback should be ignored, but next packet should work.
+ EXPECT_EQ(0u, rtcp_packet_info_.rtcpPacketTypeFlags & kRtcpTransportFeedback);
+ EXPECT_NE(0u, rtcp_packet_info_.rtcpPacketTypeFlags & kRtcpRemb);
+ EXPECT_EQ(kBitrateBps, rtcp_packet_info_.receiverEstimatedMaxBitrate);
+}
+
} // Anonymous namespace
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
index d15de162d96..e437a15a460 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -21,6 +21,7 @@
#include "webrtc/common_types.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
@@ -90,11 +91,10 @@ struct RTCPSender::RtcpContext {
buffer_size(buffer_size),
ntp_sec(0),
ntp_frac(0),
- jitter_transmission_offset(0),
position(0) {}
uint8_t* AllocateData(uint32_t bytes) {
- DCHECK_LE(position + bytes, buffer_size);
+ RTC_DCHECK_LE(position + bytes, buffer_size);
uint8_t* ptr = &buffer[position];
position += bytes;
return ptr;
@@ -109,7 +109,6 @@ struct RTCPSender::RtcpContext {
uint32_t buffer_size;
uint32_t ntp_sec;
uint32_t ntp_frac;
- uint32_t jitter_transmission_offset;
uint32_t position;
};
@@ -122,31 +121,32 @@ class RTCPSender::PacketBuiltCallback
void OnPacketReady(uint8_t* data, size_t length) override {
context_->position += length;
}
+ bool BuildPacket(const rtcp::RtcpPacket& packet) {
+ return packet.BuildExternalBuffer(
+ &context_->buffer[context_->position],
+ context_->buffer_size - context_->position, this);
+ }
private:
RtcpContext* const context_;
};
RTCPSender::RTCPSender(
- int32_t id,
bool audio,
Clock* clock,
ReceiveStatistics* receive_statistics,
- RtcpPacketTypeCounterObserver* packet_type_counter_observer)
- : id_(id),
- audio_(audio),
+ RtcpPacketTypeCounterObserver* packet_type_counter_observer,
+ Transport* outgoing_transport)
+ : audio_(audio),
clock_(clock),
- method_(kRtcpOff),
- critical_section_transport_(
- CriticalSectionWrapper::CreateCriticalSection()),
- cbTransport_(nullptr),
+ method_(RtcpMode::kOff),
+ transport_(outgoing_transport),
critical_section_rtcp_sender_(
CriticalSectionWrapper::CreateCriticalSection()),
using_nack_(false),
sending_(false),
remb_enabled_(false),
- extended_jitter_report_enabled_(false),
next_time_to_send_rtcp_(0),
start_timestamp_(0),
last_rtp_timestamp_(0),
@@ -172,12 +172,11 @@ RTCPSender::RTCPSender(
packet_type_counter_observer_(packet_type_counter_observer) {
memset(last_send_report_, 0, sizeof(last_send_report_));
memset(last_rtcp_time_, 0, sizeof(last_rtcp_time_));
+ RTC_DCHECK(transport_ != nullptr);
builders_[kRtcpSr] = &RTCPSender::BuildSR;
builders_[kRtcpRr] = &RTCPSender::BuildRR;
builders_[kRtcpSdes] = &RTCPSender::BuildSDES;
- builders_[kRtcpTransmissionTimeOffset] =
- &RTCPSender::BuildExtendedJitterReport;
builders_[kRtcpPli] = &RTCPSender::BuildPLI;
builders_[kRtcpFir] = &RTCPSender::BuildFIR;
builders_[kRtcpSli] = &RTCPSender::BuildSLI;
@@ -197,22 +196,16 @@ RTCPSender::RTCPSender(
RTCPSender::~RTCPSender() {
}
-int32_t RTCPSender::RegisterSendTransport(Transport* outgoingTransport) {
- CriticalSectionScoped lock(critical_section_transport_.get());
- cbTransport_ = outgoingTransport;
- return 0;
-}
-
-RTCPMethod RTCPSender::Status() const {
+RtcpMode RTCPSender::Status() const {
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
return method_;
}
-void RTCPSender::SetRTCPStatus(RTCPMethod method) {
+void RTCPSender::SetRTCPStatus(RtcpMode method) {
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
method_ = method;
- if (method == kRtcpOff)
+ if (method == RtcpMode::kOff)
return;
next_time_to_send_rtcp_ =
clock_->TimeInMilliseconds() +
@@ -230,7 +223,7 @@ int32_t RTCPSender::SetSendingStatus(const FeedbackState& feedback_state,
{
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
- if (method_ != kRtcpOff) {
+ if (method_ != RtcpMode::kOff) {
if (sending == false && sending_ == true) {
// Trigger RTCP bye
sendRTCPBye = true;
@@ -280,16 +273,6 @@ void RTCPSender::SetTMMBRStatus(bool enable) {
}
}
-bool RTCPSender::IJ() const {
- CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
- return extended_jitter_report_enabled_;
-}
-
-void RTCPSender::SetIJStatus(bool enable) {
- CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
- extended_jitter_report_enabled_ = enable;
-}
-
void RTCPSender::SetStartTimestamp(uint32_t start_timestamp) {
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
start_timestamp_ = start_timestamp;
@@ -328,7 +311,7 @@ int32_t RTCPSender::SetCNAME(const char* c_name) {
if (!c_name)
return -1;
- DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
+ RTC_DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
cname_ = c_name;
return 0;
@@ -336,7 +319,7 @@ int32_t RTCPSender::SetCNAME(const char* c_name) {
int32_t RTCPSender::AddMixedCNAME(uint32_t SSRC, const char* c_name) {
assert(c_name);
- DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
+ RTC_DCHECK_LT(strlen(c_name), static_cast<size_t>(RTCP_CNAME_SIZE));
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
if (csrc_cnames_.size() >= kRtpCsrcSize)
return -1;
@@ -419,7 +402,7 @@ From RFC 3550
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
- if (method_ == kRtcpOff)
+ if (method_ == RtcpMode::kOff)
return false;
if (!audio_ && sendKeyframeBeforeRTP) {
@@ -516,11 +499,8 @@ RTCPSender::BuildResult RTCPSender::BuildSR(RtcpContext* ctx) {
report.WithReportBlock(it.second);
PacketBuiltCallback callback(ctx);
- if (!report.BuildExternalBuffer(&ctx->buffer[ctx->position],
- ctx->buffer_size - ctx->position,
- &callback)) {
+ if (!callback.BuildPacket(report))
return BuildResult::kTruncated;
- }
report_blocks_.clear();
return BuildResult::kSuccess;
@@ -528,7 +508,7 @@ RTCPSender::BuildResult RTCPSender::BuildSR(RtcpContext* ctx) {
RTCPSender::BuildResult RTCPSender::BuildSDES(RtcpContext* ctx) {
size_t length_cname = cname_.length();
- CHECK_LT(length_cname, static_cast<size_t>(RTCP_CNAME_SIZE));
+ RTC_CHECK_LT(length_cname, static_cast<size_t>(RTCP_CNAME_SIZE));
rtcp::Sdes sdes;
sdes.WithCName(ssrc_, cname_);
@@ -537,10 +517,8 @@ RTCPSender::BuildResult RTCPSender::BuildSDES(RtcpContext* ctx) {
sdes.WithCName(it.first, it.second);
PacketBuiltCallback callback(ctx);
- if (!sdes.BuildExternalBuffer(&ctx->buffer[ctx->position],
- ctx->buffer_size - ctx->position, &callback)) {
+ if (!callback.BuildPacket(sdes))
return BuildResult::kTruncated;
- }
return BuildResult::kSuccess;
}
@@ -552,75 +530,22 @@ RTCPSender::BuildResult RTCPSender::BuildRR(RtcpContext* ctx) {
report.WithReportBlock(it.second);
PacketBuiltCallback callback(ctx);
- if (!report.BuildExternalBuffer(&ctx->buffer[ctx->position],
- ctx->buffer_size - ctx->position,
- &callback)) {
+ if (!callback.BuildPacket(report))
return BuildResult::kTruncated;
- }
report_blocks_.clear();
return BuildResult::kSuccess;
}
-// From RFC 5450: Transmission Time Offsets in RTP Streams.
-// 0 1 2 3
-// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// hdr |V=2|P| RC | PT=IJ=195 | length |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-// . .
-// . .
-// . .
-// | inter-arrival jitter |
-// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-//
-// If present, this RTCP packet must be placed after a receiver report
-// (inside a compound RTCP packet), and MUST have the same value for RC
-// (reception report count) as the receiver report.
-
-RTCPSender::BuildResult RTCPSender::BuildExtendedJitterReport(
- RtcpContext* ctx) {
- // sanity
- if (ctx->position + 8 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // add picture loss indicator
- uint8_t RC = 1;
- *ctx->AllocateData(1) = 0x80 + RC;
- *ctx->AllocateData(1) = 195;
-
- // Used fixed length of 2
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 1;
-
- // Add inter-arrival jitter
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4),
- ctx->jitter_transmission_offset);
- return BuildResult::kSuccess;
-}
-
RTCPSender::BuildResult RTCPSender::BuildPLI(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 12 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // add picture loss indicator
- uint8_t FMT = 1;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 206;
+ rtcp::Pli pli;
+ pli.From(ssrc_);
+ pli.To(remote_ssrc_);
- // Used fixed length of 2
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 2;
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add the remote SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(pli))
+ return BuildResult::kTruncated;
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::PLI");
@@ -632,36 +557,17 @@ RTCPSender::BuildResult RTCPSender::BuildPLI(RtcpContext* ctx) {
}
RTCPSender::BuildResult RTCPSender::BuildFIR(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 20 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
if (!ctx->repeat)
- sequence_number_fir_++; // do not increase if repetition
-
- // add full intra request indicator
- uint8_t FMT = 4;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 206;
-
- //Length of 4
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 4;
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // RFC 5104 4.3.1.2. Semantics
- // SSRC of media source
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), 0);
+ ++sequence_number_fir_; // Do not increase if repetition.
- // Additional Feedback Control Information (FCI)
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
+ rtcp::Fir fir;
+ fir.From(ssrc_);
+ fir.To(remote_ssrc_);
+ fir.WithCommandSeqNum(sequence_number_fir_);
- *ctx->AllocateData(1) = sequence_number_fir_;
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 0;
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(fir))
+ return BuildResult::kTruncated;
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::FIR");
@@ -680,30 +586,17 @@ RTCPSender::BuildResult RTCPSender::BuildFIR(RtcpContext* ctx) {
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
RTCPSender::BuildResult RTCPSender::BuildSLI(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 16 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
+ rtcp::Sli sli;
+ sli.From(ssrc_);
+ sli.To(remote_ssrc_);
+ // Crop picture id to 6 least significant bits.
+ sli.WithPictureId(ctx->picture_id & 0x3F);
+ sli.WithFirstMb(0);
+ sli.WithNumberOfMb(0x1FFF); // 13 bits, only ones for now.
- // add slice loss indicator
- uint8_t FMT = 2;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 206;
-
- // Used fixed length of 3
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 3;
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add the remote SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
-
- // Add first, number & picture ID 6 bits
- // first = 0, 13 - bits
- // number = 0x1fff, 13 - bits only ones for now
- uint32_t sliField = (0x1fff << 6) + (0x3f & ctx->picture_id);
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), sliField);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(sli))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
@@ -724,106 +617,29 @@ RTCPSender::BuildResult RTCPSender::BuildRPSI(RtcpContext* ctx) {
if (ctx->feedback_state.send_payload_type == 0xFF)
return BuildResult::kError;
- // sanity
- if (ctx->position + 24 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // add Reference Picture Selection Indication
- uint8_t FMT = 3;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 206;
-
- // calc length
- uint32_t bitsRequired = 7;
- uint8_t bytesRequired = 1;
- while ((ctx->picture_id >> bitsRequired) > 0) {
- bitsRequired += 7;
- bytesRequired++;
- }
-
- uint8_t size = 3;
- if (bytesRequired > 6) {
- size = 5;
- } else if (bytesRequired > 2) {
- size = 4;
- }
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = size;
+ rtcp::Rpsi rpsi;
+ rpsi.From(ssrc_);
+ rpsi.To(remote_ssrc_);
+ rpsi.WithPayloadType(ctx->feedback_state.send_payload_type);
+ rpsi.WithPictureId(ctx->picture_id);
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add the remote SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
-
- // calc padding length
- uint8_t paddingBytes = 4 - ((2 + bytesRequired) % 4);
- if (paddingBytes == 4)
- paddingBytes = 0;
- // add padding length in bits
- *ctx->AllocateData(1) = paddingBytes * 8; // padding can be 0, 8, 16 or 24
-
- // add payload type
- *ctx->AllocateData(1) = ctx->feedback_state.send_payload_type;
-
- // add picture ID
- for (int i = bytesRequired - 1; i > 0; --i) {
- *ctx->AllocateData(1) =
- 0x80 | static_cast<uint8_t>(ctx->picture_id >> (i * 7));
- }
- // add last byte of picture ID
- *ctx->AllocateData(1) = static_cast<uint8_t>(ctx->picture_id & 0x7f);
-
- // add padding
- for (int j = 0; j < paddingBytes; j++) {
- *ctx->AllocateData(1) = 0;
- }
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(rpsi))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
RTCPSender::BuildResult RTCPSender::BuildREMB(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 20 + 4 * remb_ssrcs_.size() >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // add application layer feedback
- uint8_t FMT = 15;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 206;
+ rtcp::Remb remb;
+ remb.From(ssrc_);
+ for (uint32_t ssrc : remb_ssrcs_)
+ remb.AppliesTo(ssrc);
+ remb.WithBitrateBps(remb_bitrate_);
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = static_cast<uint8_t>(remb_ssrcs_.size() + 4);
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Remote SSRC must be 0
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), 0);
-
- *ctx->AllocateData(1) = 'R';
- *ctx->AllocateData(1) = 'E';
- *ctx->AllocateData(1) = 'M';
- *ctx->AllocateData(1) = 'B';
-
- *ctx->AllocateData(1) = remb_ssrcs_.size();
- // 6 bit Exp
- // 18 bit mantissa
- uint8_t brExp = 0;
- for (uint32_t i = 0; i < 64; i++) {
- if (remb_bitrate_ <= (0x3FFFFu << i)) {
- brExp = i;
- break;
- }
- }
- const uint32_t brMantissa = (remb_bitrate_ >> brExp);
- *ctx->AllocateData(1) =
- static_cast<uint8_t>((brExp << 2) + ((brMantissa >> 16) & 0x03));
- *ctx->AllocateData(1) = static_cast<uint8_t>(brMantissa >> 8);
- *ctx->AllocateData(1) = static_cast<uint8_t>(brMantissa);
-
- for (size_t i = 0; i < remb_ssrcs_.size(); i++)
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remb_ssrcs_[i]);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(remb))
+ return BuildResult::kTruncated;
TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTCPSender::REMB");
@@ -883,46 +699,15 @@ RTCPSender::BuildResult RTCPSender::BuildTMMBR(RtcpContext* ctx) {
}
if (tmmbr_send_) {
- // sanity
- if (ctx->position + 20 >= IP_PACKET_SIZE)
+ rtcp::Tmmbr tmmbr;
+ tmmbr.From(ssrc_);
+ tmmbr.To(remote_ssrc_);
+ tmmbr.WithBitrateKbps(tmmbr_send_);
+ tmmbr.WithOverhead(packet_oh_send_);
+
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(tmmbr))
return BuildResult::kTruncated;
-
- // add TMMBR indicator
- uint8_t FMT = 3;
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 205;
-
- // Length of 4
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = 4;
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // RFC 5104 4.2.1.2. Semantics
-
- // SSRC of media source
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), 0);
-
- // Additional Feedback Control Information (FCI)
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
-
- uint32_t bitRate = tmmbr_send_ * 1000;
- uint32_t mmbrExp = 0;
- for (uint32_t i = 0; i < 64; i++) {
- if (bitRate <= (0x1FFFFu << i)) {
- mmbrExp = i;
- break;
- }
- }
- uint32_t mmbrMantissa = (bitRate >> mmbrExp);
-
- *ctx->AllocateData(1) =
- static_cast<uint8_t>((mmbrExp << 2) + ((mmbrMantissa >> 15) & 0x03));
- *ctx->AllocateData(1) = static_cast<uint8_t>(mmbrMantissa >> 7);
- *ctx->AllocateData(1) = static_cast<uint8_t>(
- (mmbrMantissa << 1) + ((packet_oh_send_ >> 8) & 0x01));
- *ctx->AllocateData(1) = static_cast<uint8_t>(packet_oh_send_);
}
return BuildResult::kSuccess;
}
@@ -932,90 +717,32 @@ RTCPSender::BuildResult RTCPSender::BuildTMMBN(RtcpContext* ctx) {
if (boundingSet == NULL)
return BuildResult::kError;
- // sanity
- if (ctx->position + 12 + boundingSet->lengthOfSet() * 8 >= IP_PACKET_SIZE) {
- LOG(LS_WARNING) << "Failed to build TMMBN.";
- return BuildResult::kTruncated;
- }
-
- uint8_t FMT = 4;
- // add TMMBN indicator
- *ctx->AllocateData(1) = 0x80 + FMT;
- *ctx->AllocateData(1) = 205;
-
- // Add length later
- int posLength = ctx->position;
- ctx->AllocateData(2);
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // RFC 5104 4.2.2.2. Semantics
-
- // SSRC of media source
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), 0);
-
- // Additional Feedback Control Information (FCI)
- int numBoundingSet = 0;
- for (uint32_t n = 0; n < boundingSet->lengthOfSet(); n++) {
- if (boundingSet->Tmmbr(n) > 0) {
- uint32_t tmmbrSSRC = boundingSet->Ssrc(n);
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), tmmbrSSRC);
-
- uint32_t bitRate = boundingSet->Tmmbr(n) * 1000;
- uint32_t mmbrExp = 0;
- for (int i = 0; i < 64; i++) {
- if (bitRate <= (0x1FFFFu << i)) {
- mmbrExp = i;
- break;
- }
- }
- uint32_t mmbrMantissa = (bitRate >> mmbrExp);
- uint32_t measuredOH = boundingSet->PacketOH(n);
-
- *ctx->AllocateData(1) =
- static_cast<uint8_t>((mmbrExp << 2) + ((mmbrMantissa >> 15) & 0x03));
- *ctx->AllocateData(1) = static_cast<uint8_t>(mmbrMantissa >> 7);
- *ctx->AllocateData(1) = static_cast<uint8_t>((mmbrMantissa << 1) +
- ((measuredOH >> 8) & 0x01));
- *ctx->AllocateData(1) = static_cast<uint8_t>(measuredOH);
- numBoundingSet++;
+ rtcp::Tmmbn tmmbn;
+ tmmbn.From(ssrc_);
+ for (uint32_t i = 0; i < boundingSet->lengthOfSet(); i++) {
+ if (boundingSet->Tmmbr(i) > 0) {
+ tmmbn.WithTmmbr(boundingSet->Ssrc(i), boundingSet->Tmmbr(i),
+ boundingSet->PacketOH(i));
}
}
- uint16_t length = static_cast<uint16_t>(2 + 2 * numBoundingSet);
- ctx->buffer[posLength++] = static_cast<uint8_t>(length >> 8);
- ctx->buffer[posLength] = static_cast<uint8_t>(length);
+
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(tmmbn))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
RTCPSender::BuildResult RTCPSender::BuildAPP(RtcpContext* ctx) {
- // sanity
- if (app_data_ == NULL) {
- LOG(LS_WARNING) << "Failed to build app specific.";
- return BuildResult::kError;
- }
- if (ctx->position + 12 + app_length_ >= IP_PACKET_SIZE) {
- LOG(LS_WARNING) << "Failed to build app specific.";
- return BuildResult::kTruncated;
- }
- *ctx->AllocateData(1) = 0x80 + app_sub_type_;
+ rtcp::App app;
+ app.From(ssrc_);
+ app.WithSubType(app_sub_type_);
+ app.WithName(app_name_);
+ app.WithData(app_data_.get(), app_length_);
- // Add APP ID
- *ctx->AllocateData(1) = 204;
-
- uint16_t length = (app_length_ >> 2) + 2; // include SSRC and name
- *ctx->AllocateData(1) = static_cast<uint8_t>(length >> 8);
- *ctx->AllocateData(1) = static_cast<uint8_t>(length);
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add our application name
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), app_name_);
-
- // Add the data
- memcpy(ctx->AllocateData(app_length_), app_data_.get(), app_length_);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(app))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
@@ -1093,34 +820,20 @@ RTCPSender::BuildResult RTCPSender::BuildNACK(RtcpContext* ctx) {
}
RTCPSender::BuildResult RTCPSender::BuildBYE(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 8 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // Add a bye packet
- // Number of SSRC + CSRCs.
- *ctx->AllocateData(1) = static_cast<uint8_t>(0x80 + 1 + csrcs_.size());
- *ctx->AllocateData(1) = 203;
+ rtcp::Bye bye;
+ bye.From(ssrc_);
+ for (uint32_t csrc : csrcs_)
+ bye.WithCsrc(csrc);
- // length
- *ctx->AllocateData(1) = 0;
- *ctx->AllocateData(1) = static_cast<uint8_t>(1 + csrcs_.size());
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // add CSRCs
- for (size_t i = 0; i < csrcs_.size(); i++)
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), csrcs_[i]);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(bye))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
RTCPSender::BuildResult RTCPSender::BuildReceiverReferenceTime(
RtcpContext* ctx) {
- const int kRrTimeBlockLength = 20;
- if (ctx->position + kRrTimeBlockLength >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
if (last_xr_rr_.size() >= RTCP_NUMBER_OF_SR)
last_xr_rr_.erase(last_xr_rr_.begin());
@@ -1128,146 +841,74 @@ RTCPSender::BuildResult RTCPSender::BuildReceiverReferenceTime(
RTCPUtility::MidNtp(ctx->ntp_sec, ctx->ntp_frac),
Clock::NtpToMs(ctx->ntp_sec, ctx->ntp_frac)));
- // Add XR header.
- *ctx->AllocateData(1) = 0x80;
- *ctx->AllocateData(1) = 207;
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- 4); // XR packet length.
+ rtcp::Xr xr;
+ xr.From(ssrc_);
- // Add our own SSRC.
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
+ rtcp::Rrtr rrtr;
+ rrtr.WithNtpSec(ctx->ntp_sec);
+ rrtr.WithNtpFrac(ctx->ntp_frac);
+
+ xr.WithRrtr(&rrtr);
+
+ // TODO(sprang): Merge XR report sending to contain all of RRTR, DLRR, VOIP?
- // 0 1 2 3
- // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | BT=4 | reserved | block length = 2 |
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | NTP timestamp, most significant word |
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | NTP timestamp, least significant word |
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
- // Add Receiver Reference Time Report block.
- *ctx->AllocateData(1) = 4; // BT.
- *ctx->AllocateData(1) = 0; // Reserved.
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- 2); // Block length.
-
- // NTP timestamp.
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ctx->ntp_sec);
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ctx->ntp_frac);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(xr))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
RTCPSender::BuildResult RTCPSender::BuildDlrr(RtcpContext* ctx) {
- const int kDlrrBlockLength = 24;
- if (ctx->position + kDlrrBlockLength >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
+ rtcp::Xr xr;
+ xr.From(ssrc_);
- // Add XR header.
- *ctx->AllocateData(1) = 0x80;
- *ctx->AllocateData(1) = 207;
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- 5); // XR packet length.
-
- // Add our own SSRC.
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
+ rtcp::Dlrr dlrr;
+ const RtcpReceiveTimeInfo& info = ctx->feedback_state.last_xr_rr;
+ dlrr.WithDlrrItem(info.sourceSSRC, info.lastRR, info.delaySinceLastRR);
- // 0 1 2 3
- // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | BT=5 | reserved | block length |
- // +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
- // | SSRC_1 (SSRC of first receiver) | sub-
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
- // | last RR (LRR) | 1
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // | delay since last RR (DLRR) |
- // +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
- // | SSRC_2 (SSRC of second receiver) | sub-
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ block
- // : ... : 2
-
- // Add DLRR sub block.
- *ctx->AllocateData(1) = 5; // BT.
- *ctx->AllocateData(1) = 0; // Reserved.
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- 3); // Block length.
-
- // NTP timestamp.
+ xr.WithDlrr(&dlrr);
- const RtcpReceiveTimeInfo& info = ctx->feedback_state.last_xr_rr;
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), info.sourceSSRC);
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), info.lastRR);
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4),
- info.delaySinceLastRR);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(xr))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
// TODO(sprang): Add a unit test for this, or remove if the code isn't used.
RTCPSender::BuildResult RTCPSender::BuildVoIPMetric(RtcpContext* ctx) {
- // sanity
- if (ctx->position + 44 >= IP_PACKET_SIZE)
- return BuildResult::kTruncated;
-
- // Add XR header
- *ctx->AllocateData(1) = 0x80;
- *ctx->AllocateData(1) = 207;
+ rtcp::Xr xr;
+ xr.From(ssrc_);
+
+ rtcp::VoipMetric voip;
+ voip.To(remote_ssrc_);
+ voip.LossRate(xr_voip_metric_.lossRate);
+ voip.DiscardRate(xr_voip_metric_.discardRate);
+ voip.BurstDensity(xr_voip_metric_.burstDensity);
+ voip.GapDensity(xr_voip_metric_.gapDensity);
+ voip.BurstDuration(xr_voip_metric_.burstDuration);
+ voip.GapDuration(xr_voip_metric_.gapDuration);
+ voip.RoundTripDelay(xr_voip_metric_.roundTripDelay);
+ voip.EndSystemDelay(xr_voip_metric_.endSystemDelay);
+ voip.SignalLevel(xr_voip_metric_.signalLevel);
+ voip.NoiseLevel(xr_voip_metric_.noiseLevel);
+ voip.Rerl(xr_voip_metric_.RERL);
+ voip.Gmin(xr_voip_metric_.Gmin);
+ voip.Rfactor(xr_voip_metric_.Rfactor);
+ voip.ExtRfactor(xr_voip_metric_.extRfactor);
+ voip.MosLq(xr_voip_metric_.MOSLQ);
+ voip.MosCq(xr_voip_metric_.MOSCQ);
+ voip.RxConfig(xr_voip_metric_.RXconfig);
+ voip.JbNominal(xr_voip_metric_.JBnominal);
+ voip.JbMax(xr_voip_metric_.JBmax);
+ voip.JbAbsMax(xr_voip_metric_.JBabsMax);
+
+ xr.WithVoipMetric(&voip);
- uint32_t XRLengthPos = ctx->position;
-
- // handle length later on
- ctx->AllocateData(2);
-
- // Add our own SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), ssrc_);
-
- // Add a VoIP metrics block
- *ctx->AllocateData(1) = 7;
- *ctx->AllocateData(1) = 0;
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2), 8);
-
- // Add the remote SSRC
- ByteWriter<uint32_t>::WriteBigEndian(ctx->AllocateData(4), remote_ssrc_);
-
- *ctx->AllocateData(1) = xr_voip_metric_.lossRate;
- *ctx->AllocateData(1) = xr_voip_metric_.discardRate;
- *ctx->AllocateData(1) = xr_voip_metric_.burstDensity;
- *ctx->AllocateData(1) = xr_voip_metric_.gapDensity;
-
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.burstDuration);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.gapDuration);
-
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.roundTripDelay);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.endSystemDelay);
-
- *ctx->AllocateData(1) = xr_voip_metric_.signalLevel;
- *ctx->AllocateData(1) = xr_voip_metric_.noiseLevel;
- *ctx->AllocateData(1) = xr_voip_metric_.RERL;
- *ctx->AllocateData(1) = xr_voip_metric_.Gmin;
-
- *ctx->AllocateData(1) = xr_voip_metric_.Rfactor;
- *ctx->AllocateData(1) = xr_voip_metric_.extRfactor;
- *ctx->AllocateData(1) = xr_voip_metric_.MOSLQ;
- *ctx->AllocateData(1) = xr_voip_metric_.MOSCQ;
-
- *ctx->AllocateData(1) = xr_voip_metric_.RXconfig;
- *ctx->AllocateData(1) = 0; // reserved
-
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.JBnominal);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.JBmax);
- ByteWriter<uint16_t>::WriteBigEndian(ctx->AllocateData(2),
- xr_voip_metric_.JBabsMax);
-
- ByteWriter<uint16_t>::WriteBigEndian(&ctx->buffer[XRLengthPos], 10);
+ PacketBuiltCallback callback(ctx);
+ if (!callback.BuildPacket(xr))
+ return BuildResult::kTruncated;
return BuildResult::kSuccess;
}
@@ -1292,7 +933,7 @@ int32_t RTCPSender::SendCompoundRTCP(
uint64_t pictureID) {
{
CriticalSectionScoped lock(critical_section_rtcp_sender_.get());
- if (method_ == kRtcpOff) {
+ if (method_ == RtcpMode::kOff) {
LOG(LS_WARNING) << "Can't send rtcp if it is disabled.";
return -1;
}
@@ -1333,11 +974,11 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
if (IsFlagPresent(kRtcpSr) || IsFlagPresent(kRtcpRr)) {
// Report type already explicitly set, don't automatically populate.
generate_report = true;
- DCHECK(ConsumeFlag(kRtcpReport) == false);
+ RTC_DCHECK(ConsumeFlag(kRtcpReport) == false);
} else {
generate_report =
- (ConsumeFlag(kRtcpReport) && method_ == kRtcpNonCompound) ||
- method_ == kRtcpCompound;
+ (ConsumeFlag(kRtcpReport) && method_ == RtcpMode::kReducedSize) ||
+ method_ == RtcpMode::kCompound;
if (generate_report)
SetFlag(sending_ ? kRtcpSr : kRtcpRr, true);
}
@@ -1386,15 +1027,13 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
AddReportBlock(report_block);
}
}
- if (extended_jitter_report_enabled_)
- SetFlag(kRtcpTransmissionTimeOffset, true);
}
}
auto it = report_flags_.begin();
while (it != report_flags_.end()) {
auto builder = builders_.find(it->type);
- DCHECK(builder != builders_.end());
+ RTC_DCHECK(builder != builders_.end());
if (it->is_volatile) {
report_flags_.erase(it++);
} else {
@@ -1423,7 +1062,7 @@ int RTCPSender::PrepareRTCP(const FeedbackState& feedback_state,
remote_ssrc_, packet_type_counter_);
}
- DCHECK(AllVolatileFlagsConsumed());
+ RTC_DCHECK(AllVolatileFlagsConsumed());
return context.position;
}
@@ -1470,11 +1109,8 @@ bool RTCPSender::PrepareReport(const FeedbackState& feedback_state,
}
int32_t RTCPSender::SendToNetwork(const uint8_t* dataBuffer, size_t length) {
- CriticalSectionScoped lock(critical_section_transport_.get());
- if (cbTransport_) {
- if (cbTransport_->SendRTCPPacket(id_, dataBuffer, length) > 0)
- return 0;
- }
+ if (transport_->SendRtcp(dataBuffer, length))
+ return 0;
return -1;
}
@@ -1564,4 +1200,24 @@ bool RTCPSender::AllVolatileFlagsConsumed() const {
return true;
}
+bool RTCPSender::SendFeedbackPacket(const rtcp::TransportFeedback& packet) {
+ class Sender : public rtcp::RtcpPacket::PacketReadyCallback {
+ public:
+ Sender(Transport* transport)
+ : transport_(transport), send_failure_(false) {}
+
+ void OnPacketReady(uint8_t* data, size_t length) override {
+ if (!transport_->SendRtcp(data, length))
+ send_failure_ = true;
+ }
+
+ Transport* const transport_;
+ bool send_failure_;
+ } sender(transport_);
+
+ uint8_t buffer[IP_PACKET_SIZE];
+ return packet.BuildExternalBuffer(buffer, IP_PACKET_SIZE, &sender) &&
+ !sender.send_failure_;
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
index afe2eaeaf1f..9ec928363b6 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender.h
@@ -26,6 +26,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/tmmbr_help.h"
+#include "webrtc/transport.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -70,17 +71,15 @@ public:
ModuleRtpRtcpImpl* module;
};
- RTCPSender(int32_t id,
- bool audio,
+ RTCPSender(bool audio,
Clock* clock,
ReceiveStatistics* receive_statistics,
- RtcpPacketTypeCounterObserver* packet_type_counter_observer);
+ RtcpPacketTypeCounterObserver* packet_type_counter_observer,
+ Transport* outgoing_transport);
virtual ~RTCPSender();
- int32_t RegisterSendTransport(Transport* outgoingTransport);
-
- RTCPMethod Status() const;
- void SetRTCPStatus(RTCPMethod method);
+ RtcpMode Status() const;
+ void SetRTCPStatus(RtcpMode method);
bool Sending() const;
int32_t SetSendingStatus(const FeedbackState& feedback_state,
@@ -134,11 +133,6 @@ public:
int32_t SetTMMBN(const TMMBRSet* boundingSet, uint32_t maxBitrateKbit);
- // Extended jitter report
- bool IJ() const;
-
- void SetIJStatus(bool enable);
-
int32_t SetApplicationSpecificData(uint8_t subType,
uint32_t name,
const uint8_t* data,
@@ -152,6 +146,7 @@ public:
void SetCsrcs(const std::vector<uint32_t>& csrcs);
void SetTargetBitrate(unsigned int target_bitrate);
+ bool SendFeedbackPacket(const rtcp::TransportFeedback& packet);
private:
struct RtcpContext;
@@ -198,8 +193,6 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
BuildResult BuildRR(RtcpContext* context)
EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
- BuildResult BuildExtendedJitterReport(RtcpContext* context)
- EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
BuildResult BuildSDES(RtcpContext* context)
EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
BuildResult BuildPLI(RtcpContext* context)
@@ -230,19 +223,16 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_);
private:
- const int32_t id_;
const bool audio_;
Clock* const clock_;
- RTCPMethod method_ GUARDED_BY(critical_section_rtcp_sender_);
+ RtcpMode method_ GUARDED_BY(critical_section_rtcp_sender_);
- rtc::scoped_ptr<CriticalSectionWrapper> critical_section_transport_;
- Transport* cbTransport_ GUARDED_BY(critical_section_transport_);
+ Transport* const transport_;
rtc::scoped_ptr<CriticalSectionWrapper> critical_section_rtcp_sender_;
bool using_nack_ GUARDED_BY(critical_section_rtcp_sender_);
bool sending_ GUARDED_BY(critical_section_rtcp_sender_);
bool remb_enabled_ GUARDED_BY(critical_section_rtcp_sender_);
- bool extended_jitter_report_enabled_ GUARDED_BY(critical_section_rtcp_sender_);
int64_t next_time_to_send_rtcp_ GUARDED_BY(critical_section_rtcp_sender_);
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
index a0a79aa020a..ba42c8dd501 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -17,16 +17,11 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h"
-#include "webrtc/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_payload_registry.h"
-#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
-#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
-#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h"
-#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
+#include "webrtc/test/rtcp_packet_parser.h"
+
+using ::testing::ElementsAre;
namespace webrtc {
@@ -185,257 +180,466 @@ TEST(NACKStringBuilderTest, TestCase13) {
EXPECT_EQ(std::string("5-6,9"), builder.GetResult());
}
-void CreateRtpPacket(const bool marker_bit, const uint8_t payload_type,
- const uint16_t seq_num, const uint32_t timestamp,
- const uint32_t ssrc, uint8_t* array,
- size_t* cur_pos) {
- ASSERT_LE(payload_type, 127);
- array[(*cur_pos)++] = 0x80;
- array[(*cur_pos)++] = payload_type | (marker_bit ? 0x80 : 0);
- array[(*cur_pos)++] = seq_num >> 8;
- array[(*cur_pos)++] = seq_num & 0xFF;
- array[(*cur_pos)++] = timestamp >> 24;
- array[(*cur_pos)++] = (timestamp >> 16) & 0xFF;
- array[(*cur_pos)++] = (timestamp >> 8) & 0xFF;
- array[(*cur_pos)++] = timestamp & 0xFF;
- array[(*cur_pos)++] = ssrc >> 24;
- array[(*cur_pos)++] = (ssrc >> 16) & 0xFF;
- array[(*cur_pos)++] = (ssrc >> 8) & 0xFF;
- array[(*cur_pos)++] = ssrc & 0xFF;
- // VP8 payload header
- array[(*cur_pos)++] = 0x90; // X bit = 1
- array[(*cur_pos)++] = 0x20; // T bit = 1
- array[(*cur_pos)++] = 0x00; // TID = 0
- array[(*cur_pos)++] = 0x00; // Key frame
- array[(*cur_pos)++] = 0x00;
- array[(*cur_pos)++] = 0x00;
- array[(*cur_pos)++] = 0x9d;
- array[(*cur_pos)++] = 0x01;
- array[(*cur_pos)++] = 0x2a;
- array[(*cur_pos)++] = 128;
- array[(*cur_pos)++] = 0;
- array[(*cur_pos)++] = 96;
- array[(*cur_pos)++] = 0;
-}
+class RtcpPacketTypeCounterObserverImpl : public RtcpPacketTypeCounterObserver {
+ public:
+ RtcpPacketTypeCounterObserverImpl() : ssrc_(0) {}
+ virtual ~RtcpPacketTypeCounterObserverImpl() {}
+ void RtcpPacketTypesCounterUpdated(
+ uint32_t ssrc,
+ const RtcpPacketTypeCounter& packet_counter) override {
+ ssrc_ = ssrc;
+ counter_ = packet_counter;
+ }
+ uint32_t ssrc_;
+ RtcpPacketTypeCounter counter_;
+};
class TestTransport : public Transport,
public NullRtpData {
public:
- TestTransport()
- : rtcp_receiver_(NULL) {
- }
- void SetRTCPReceiver(RTCPReceiver* rtcp_receiver) {
- rtcp_receiver_ = rtcp_receiver;
- }
- int SendPacket(int /*ch*/, const void* /*data*/, size_t /*len*/) override {
- return -1;
- }
+ TestTransport() {}
- int SendRTCPPacket(int /*ch*/,
- const void* packet,
- size_t packet_len) override {
- RTCPUtility::RTCPParserV2 rtcpParser((uint8_t*)packet,
- packet_len,
- true); // Allow non-compound RTCP
-
- EXPECT_TRUE(rtcpParser.IsValid());
- RTCPHelp::RTCPPacketInformation rtcpPacketInformation;
- EXPECT_EQ(0, rtcp_receiver_->IncomingRTCPPacket(rtcpPacketInformation,
- &rtcpParser));
- rtcp_packet_info_.rtcpPacketTypeFlags =
- rtcpPacketInformation.rtcpPacketTypeFlags;
- rtcp_packet_info_.remoteSSRC = rtcpPacketInformation.remoteSSRC;
- rtcp_packet_info_.applicationSubType =
- rtcpPacketInformation.applicationSubType;
- rtcp_packet_info_.applicationName = rtcpPacketInformation.applicationName;
- rtcp_packet_info_.report_blocks = rtcpPacketInformation.report_blocks;
- rtcp_packet_info_.rtt = rtcpPacketInformation.rtt;
- rtcp_packet_info_.interArrivalJitter =
- rtcpPacketInformation.interArrivalJitter;
- rtcp_packet_info_.sliPictureId = rtcpPacketInformation.sliPictureId;
- rtcp_packet_info_.rpsiPictureId = rtcpPacketInformation.rpsiPictureId;
- rtcp_packet_info_.receiverEstimatedMaxBitrate =
- rtcpPacketInformation.receiverEstimatedMaxBitrate;
- rtcp_packet_info_.ntp_secs = rtcpPacketInformation.ntp_secs;
- rtcp_packet_info_.ntp_frac = rtcpPacketInformation.ntp_frac;
- rtcp_packet_info_.rtp_timestamp = rtcpPacketInformation.rtp_timestamp;
-
- return static_cast<int>(packet_len);
+ bool SendRtp(const uint8_t* /*data*/,
+ size_t /*len*/,
+ const PacketOptions& options) override {
+ return false;
}
-
- int OnReceivedPayloadData(const uint8_t* payloadData,
- const size_t payloadSize,
- const WebRtcRTPHeader* rtpHeader) override {
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ parser_.Parse(static_cast<const uint8_t*>(data), len);
+ return true;
+ }
+ int OnReceivedPayloadData(const uint8_t* payload_data,
+ const size_t payload_size,
+ const WebRtcRTPHeader* rtp_header) override {
return 0;
}
- RTCPReceiver* rtcp_receiver_;
- RTCPHelp::RTCPPacketInformation rtcp_packet_info_;
+ test::RtcpPacketParser parser_;
};
+namespace {
+static const uint32_t kSenderSsrc = 0x11111111;
+static const uint32_t kRemoteSsrc = 0x22222222;
+}
+
class RtcpSenderTest : public ::testing::Test {
protected:
- static const uint32_t kRemoteBitrateEstimatorMinBitrateBps = 30000;
-
RtcpSenderTest()
- : over_use_detector_options_(),
- clock_(1335900000),
- rtp_payload_registry_(
- new RTPPayloadRegistry(RTPPayloadStrategy::CreateStrategy(false))),
- remote_bitrate_observer_(),
- remote_bitrate_estimator_(new RemoteBitrateEstimatorSingleStream(
- &remote_bitrate_observer_,
- &clock_,
- kRemoteBitrateEstimatorMinBitrateBps)),
+ : clock_(1335900000),
receive_statistics_(ReceiveStatistics::Create(&clock_)) {
- test_transport_ = new TestTransport();
-
RtpRtcp::Configuration configuration;
- configuration.id = 0;
configuration.audio = false;
configuration.clock = &clock_;
- configuration.outgoing_transport = test_transport_;
- configuration.remote_bitrate_estimator = remote_bitrate_estimator_.get();
-
- rtp_rtcp_impl_ = new ModuleRtpRtcpImpl(configuration);
- rtp_receiver_.reset(RtpReceiver::CreateVideoReceiver(
- 0, &clock_, test_transport_, NULL, rtp_payload_registry_.get()));
- rtcp_sender_ =
- new RTCPSender(0, false, &clock_, receive_statistics_.get(), NULL);
- rtcp_receiver_ =
- new RTCPReceiver(0, &clock_, false, NULL, NULL, NULL, rtp_rtcp_impl_);
- test_transport_->SetRTCPReceiver(rtcp_receiver_);
- // Initialize
- EXPECT_EQ(0, rtcp_sender_->RegisterSendTransport(test_transport_));
+ configuration.outgoing_transport = &test_transport_;
+
+ rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl(configuration));
+ rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
+ nullptr, &test_transport_));
+ rtcp_sender_->SetSSRC(kSenderSsrc);
+ rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
}
- ~RtcpSenderTest() {
- delete rtcp_sender_;
- delete rtcp_receiver_;
- delete rtp_rtcp_impl_;
- delete test_transport_;
+
+ void InsertIncomingPacket(uint32_t remote_ssrc, uint16_t seq_num) {
+ RTPHeader header;
+ header.ssrc = remote_ssrc;
+ header.sequenceNumber = seq_num;
+ header.timestamp = 12345;
+ header.headerLength = 12;
+ size_t kPacketLength = 100;
+ receive_statistics_->IncomingPacket(header, kPacketLength, false);
}
- // Helper function: Incoming RTCP has a specific packet type.
- bool gotPacketType(RTCPPacketType packet_type) {
- return ((test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags) &
- packet_type) != 0U;
+ test::RtcpPacketParser* parser() { return &test_transport_.parser_; }
+
+ RTCPSender::FeedbackState feedback_state() {
+ return rtp_rtcp_impl_->GetFeedbackState();
}
- OverUseDetectorOptions over_use_detector_options_;
SimulatedClock clock_;
- rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
- rtc::scoped_ptr<RtpReceiver> rtp_receiver_;
- ModuleRtpRtcpImpl* rtp_rtcp_impl_;
- RTCPSender* rtcp_sender_;
- RTCPReceiver* rtcp_receiver_;
- TestTransport* test_transport_;
- MockRemoteBitrateObserver remote_bitrate_observer_;
- rtc::scoped_ptr<RemoteBitrateEstimator> remote_bitrate_estimator_;
+ TestTransport test_transport_;
rtc::scoped_ptr<ReceiveStatistics> receive_statistics_;
-
- enum {kMaxPacketLength = 1500};
- uint8_t packet_[kMaxPacketLength];
+ rtc::scoped_ptr<ModuleRtpRtcpImpl> rtp_rtcp_impl_;
+ rtc::scoped_ptr<RTCPSender> rtcp_sender_;
};
-TEST_F(RtcpSenderTest, RtcpOff) {
- rtcp_sender_->SetRTCPStatus(kRtcpOff);
- RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
-}
-
-TEST_F(RtcpSenderTest, IJStatus) {
- ASSERT_FALSE(rtcp_sender_->IJ());
- rtcp_sender_->SetIJStatus(true);
- EXPECT_TRUE(rtcp_sender_->IJ());
-}
-
-TEST_F(RtcpSenderTest, TestCompound) {
- const bool marker_bit = false;
- const uint8_t payload_type = 100;
- const uint16_t seq_num = 11111;
- const uint32_t timestamp = 1234567;
- const uint32_t ssrc = 0x11111111;
- size_t packet_length = 0;
- CreateRtpPacket(marker_bit, payload_type, seq_num, timestamp, ssrc, packet_,
- &packet_length);
- EXPECT_EQ(25u, packet_length);
-
- VideoCodec codec_inst;
- strncpy(codec_inst.plName, "VP8", webrtc::kPayloadNameSize - 1);
- codec_inst.codecType = webrtc::kVideoCodecVP8;
- codec_inst.plType = payload_type;
- EXPECT_EQ(0, rtp_receiver_->RegisterReceivePayload(codec_inst.plName,
- codec_inst.plType,
- 90000,
- 0,
- codec_inst.maxBitrate));
-
- // Make sure RTP packet has been received.
- rtc::scoped_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
- RTPHeader header;
- EXPECT_TRUE(parser->Parse(packet_, packet_length, &header));
- PayloadUnion payload_specific;
- EXPECT_TRUE(rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
- &payload_specific));
- receive_statistics_->IncomingPacket(header, packet_length, false);
- EXPECT_TRUE(rtp_receiver_->IncomingRtpPacket(header, packet_, packet_length,
- payload_specific, true));
-
- rtcp_sender_->SetIJStatus(true);
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
- RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
+TEST_F(RtcpSenderTest, SetRtcpStatus) {
+ EXPECT_EQ(RtcpMode::kOff, rtcp_sender_->Status());
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(RtcpMode::kReducedSize, rtcp_sender_->Status());
+}
+
+TEST_F(RtcpSenderTest, SetSendingStatus) {
+ EXPECT_FALSE(rtcp_sender_->Sending());
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true));
+ EXPECT_TRUE(rtcp_sender_->Sending());
+}
- // Transmission time offset packet should be received.
- ASSERT_TRUE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpTransmissionTimeOffset);
+TEST_F(RtcpSenderTest, NoPacketSentIfOff) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kOff);
+ EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr));
}
-TEST_F(RtcpSenderTest, TestCompound_NoRtpReceived) {
- rtcp_sender_->SetIJStatus(true);
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+TEST_F(RtcpSenderTest, SendSr) {
+ const uint32_t kPacketCount = 0x12345;
+ const uint32_t kOctetCount = 0x23456;
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRr));
+ feedback_state.packets_sent = kPacketCount;
+ feedback_state.media_bytes_sent = kOctetCount;
+ uint32_t ntp_secs;
+ uint32_t ntp_frac;
+ clock_.CurrentNtp(ntp_secs, ntp_frac);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->sender_report()->Ssrc());
+ EXPECT_EQ(ntp_secs, parser()->sender_report()->NtpSec());
+ EXPECT_EQ(ntp_frac, parser()->sender_report()->NtpFrac());
+ EXPECT_EQ(kPacketCount, parser()->sender_report()->PacketCount());
+ EXPECT_EQ(kOctetCount, parser()->sender_report()->OctetCount());
+ EXPECT_EQ(0, parser()->report_block()->num_packets());
+}
- // Transmission time offset packet should not be received.
- ASSERT_FALSE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpTransmissionTimeOffset);
+TEST_F(RtcpSenderTest, SendRr) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->Ssrc());
+ EXPECT_EQ(0, parser()->report_block()->num_packets());
}
-TEST_F(RtcpSenderTest, TestXrReceiverReferenceTime) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
- RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
- rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
+TEST_F(RtcpSenderTest, SendRrWithOneReportBlock) {
+ const uint16_t kSeqNum = 11111;
+ InsertIncomingPacket(kRemoteSsrc, kSeqNum);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->Ssrc());
+ EXPECT_EQ(1, parser()->report_block()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, parser()->report_block()->Ssrc());
+ EXPECT_EQ(0U, parser()->report_block()->FractionLost());
+ EXPECT_EQ(0U, parser()->report_block()->CumPacketLost());
+ EXPECT_EQ(kSeqNum, parser()->report_block()->ExtHighestSeqNum());
+}
- EXPECT_TRUE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpXrReceiverReferenceTime);
+TEST_F(RtcpSenderTest, SendRrWithTwoReportBlocks) {
+ const uint16_t kSeqNum = 11111;
+ InsertIncomingPacket(kRemoteSsrc, kSeqNum);
+ InsertIncomingPacket(kRemoteSsrc + 1, kSeqNum + 1);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->Ssrc());
+ EXPECT_EQ(2, parser()->report_block()->num_packets());
+ EXPECT_EQ(1, parser()->report_blocks_per_ssrc(kRemoteSsrc));
+ EXPECT_EQ(1, parser()->report_blocks_per_ssrc(kRemoteSsrc + 1));
}
-TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfSending) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+TEST_F(RtcpSenderTest, SendSdes) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SetCNAME("alice@host"));
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSdes));
+ EXPECT_EQ(1, parser()->sdes()->num_packets());
+ EXPECT_EQ(1, parser()->sdes_chunk()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->sdes_chunk()->Ssrc());
+ EXPECT_EQ("alice@host", parser()->sdes_chunk()->Cname());
+}
+
+TEST_F(RtcpSenderTest, SdesIncludedInCompoundPacket) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SetCNAME("alice@host"));
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->receiver_report()->num_packets());
+ EXPECT_EQ(1, parser()->sdes()->num_packets());
+ EXPECT_EQ(1, parser()->sdes_chunk()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SendBye) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpBye));
+ EXPECT_EQ(1, parser()->bye()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->bye()->Ssrc());
+}
+
+TEST_F(RtcpSenderTest, StopSendingTriggersBye) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true));
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false));
+ EXPECT_EQ(1, parser()->bye()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->bye()->Ssrc());
+}
+
+TEST_F(RtcpSenderTest, SendApp) {
+ const uint8_t kSubType = 30;
+ uint32_t name = 'n' << 24;
+ name += 'a' << 16;
+ name += 'm' << 8;
+ name += 'e';
+ const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't', 'a'};
+ const uint16_t kDataLength = sizeof(kData) / sizeof(kData[0]);
+ EXPECT_EQ(0, rtcp_sender_->SetApplicationSpecificData(kSubType, name, kData,
+ kDataLength));
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpApp));
+ EXPECT_EQ(1, parser()->app()->num_packets());
+ EXPECT_EQ(kSubType, parser()->app()->SubType());
+ EXPECT_EQ(name, parser()->app()->Name());
+ EXPECT_EQ(1, parser()->app_item()->num_packets());
+ EXPECT_EQ(kDataLength, parser()->app_item()->DataLength());
+ EXPECT_EQ(0, strncmp(reinterpret_cast<const char*>(kData),
+ reinterpret_cast<const char*>(parser()->app_item()->Data()),
+ parser()->app_item()->DataLength()));
+}
+
+TEST_F(RtcpSenderTest, SendEmptyApp) {
+ const uint8_t kSubType = 30;
+ const uint32_t kName = 0x6E616D65;
+
+ EXPECT_EQ(
+ 0, rtcp_sender_->SetApplicationSpecificData(kSubType, kName, nullptr, 0));
+
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpApp));
+ EXPECT_EQ(1, parser()->app()->num_packets());
+ EXPECT_EQ(kSubType, parser()->app()->SubType());
+ EXPECT_EQ(kName, parser()->app()->Name());
+ EXPECT_EQ(0, parser()->app_item()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SetInvalidApplicationSpecificData) {
+ const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't'};
+ const uint16_t kInvalidDataLength = sizeof(kData) / sizeof(kData[0]);
+ EXPECT_EQ(-1, rtcp_sender_->SetApplicationSpecificData(
+ 0, 0, kData, kInvalidDataLength)); // Should by multiple of 4.
+}
+
+TEST_F(RtcpSenderTest, SendFirNonRepeat) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir));
+ EXPECT_EQ(1, parser()->fir()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->fir()->Ssrc());
+ EXPECT_EQ(1, parser()->fir_item()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, parser()->fir_item()->Ssrc());
+ uint8_t seq = parser()->fir_item()->SeqNum();
+ // Sends non-repeat FIR as default.
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir));
+ EXPECT_EQ(2, parser()->fir()->num_packets());
+ EXPECT_EQ(2, parser()->fir_item()->num_packets());
+ EXPECT_EQ(seq + 1, parser()->fir_item()->SeqNum());
+}
+
+TEST_F(RtcpSenderTest, SendFirRepeat) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir));
+ EXPECT_EQ(1, parser()->fir()->num_packets());
+ EXPECT_EQ(1, parser()->fir_item()->num_packets());
+ uint8_t seq = parser()->fir_item()->SeqNum();
+ const bool kRepeat = true;
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir, 0, nullptr,
+ kRepeat));
+ EXPECT_EQ(2, parser()->fir()->num_packets());
+ EXPECT_EQ(2, parser()->fir_item()->num_packets());
+ EXPECT_EQ(seq, parser()->fir_item()->SeqNum());
+}
+
+TEST_F(RtcpSenderTest, SendPli) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(1, parser()->pli()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->pli()->Ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->pli()->MediaSsrc());
+}
+
+TEST_F(RtcpSenderTest, SendRpsi) {
+ const uint64_t kPictureId = 0x41;
+ const int8_t kPayloadType = 100;
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, true));
- rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
+ feedback_state.send_payload_type = kPayloadType;
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpRpsi, 0, nullptr,
+ false, kPictureId));
+ EXPECT_EQ(kPayloadType, parser()->rpsi()->PayloadType());
+ EXPECT_EQ(kPictureId, parser()->rpsi()->PictureId());
+}
- EXPECT_FALSE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpXrReceiverReferenceTime);
+TEST_F(RtcpSenderTest, SendSli) {
+ const uint16_t kFirstMb = 0;
+ const uint16_t kNumberOfMb = 0x1FFF;
+ const uint8_t kPictureId = 60;
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSli, 0, nullptr,
+ false, kPictureId));
+ EXPECT_EQ(1, parser()->sli()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->sli()->Ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->sli()->MediaSsrc());
+ EXPECT_EQ(1, parser()->sli_item()->num_packets());
+ EXPECT_EQ(kFirstMb, parser()->sli_item()->FirstMb());
+ EXPECT_EQ(kNumberOfMb, parser()->sli_item()->NumberOfMb());
+ EXPECT_EQ(kPictureId, parser()->sli_item()->PictureId());
}
-TEST_F(RtcpSenderTest, TestNoXrReceiverReferenceTimeIfNotEnabled) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+TEST_F(RtcpSenderTest, SendNack) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ const uint16_t kList[] = {0, 1, 16};
+ const int32_t kListLength = sizeof(kList) / sizeof(kList[0]);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpNack, kListLength,
+ kList));
+ EXPECT_EQ(1, parser()->nack()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->nack()->Ssrc());
+ EXPECT_EQ(kRemoteSsrc, parser()->nack()->MediaSsrc());
+ EXPECT_EQ(1, parser()->nack_item()->num_packets());
+ EXPECT_THAT(parser()->nack_item()->last_nack_list(), ElementsAre(0, 1, 16));
+}
+
+TEST_F(RtcpSenderTest, SendRemb) {
+ const int kBitrate = 261011;
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(kRemoteSsrc);
+ ssrcs.push_back(kRemoteSsrc + 1);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender_->SetREMBData(kBitrate, ssrcs);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRemb));
+ EXPECT_EQ(1, parser()->psfb_app()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->psfb_app()->Ssrc());
+ EXPECT_EQ(1, parser()->remb_item()->num_packets());
+ EXPECT_EQ(kBitrate, parser()->remb_item()->last_bitrate_bps());
+ EXPECT_THAT(parser()->remb_item()->last_ssrc_list(),
+ ElementsAre(kRemoteSsrc, kRemoteSsrc + 1));
+}
+
+TEST_F(RtcpSenderTest, RembIncludedInCompoundPacketIfEnabled) {
+ const int kBitrate = 261011;
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(kRemoteSsrc);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender_->SetREMBStatus(true);
+ EXPECT_TRUE(rtcp_sender_->REMB());
+ rtcp_sender_->SetREMBData(kBitrate, ssrcs);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->psfb_app()->num_packets());
+ EXPECT_EQ(1, parser()->remb_item()->num_packets());
+ // REMB should be included in each compound packet.
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(2, parser()->psfb_app()->num_packets());
+ EXPECT_EQ(2, parser()->remb_item()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, RembNotIncludedInCompoundPacketIfNotEnabled) {
+ const int kBitrate = 261011;
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(kRemoteSsrc);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender_->SetREMBData(kBitrate, ssrcs);
+ EXPECT_FALSE(rtcp_sender_->REMB());
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->psfb_app()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, SendXrWithVoipMetric) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ RTCPVoIPMetric metric;
+ metric.lossRate = 1;
+ metric.discardRate = 2;
+ metric.burstDensity = 3;
+ metric.gapDensity = 4;
+ metric.burstDuration = 0x1111;
+ metric.gapDuration = 0x2222;
+ metric.roundTripDelay = 0x3333;
+ metric.endSystemDelay = 0x4444;
+ metric.signalLevel = 5;
+ metric.noiseLevel = 6;
+ metric.RERL = 7;
+ metric.Gmin = 8;
+ metric.Rfactor = 9;
+ metric.extRfactor = 10;
+ metric.MOSLQ = 11;
+ metric.MOSCQ = 12;
+ metric.RXconfig = 13;
+ metric.JBnominal = 0x5555;
+ metric.JBmax = 0x6666;
+ metric.JBabsMax = 0x7777;
+ EXPECT_EQ(0, rtcp_sender_->SetRTCPVoIPMetrics(&metric));
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpXrVoipMetric));
+ EXPECT_EQ(1, parser()->xr_header()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr_header()->Ssrc());
+ EXPECT_EQ(1, parser()->voip_metric()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, parser()->voip_metric()->Ssrc());
+ EXPECT_EQ(metric.lossRate, parser()->voip_metric()->LossRate());
+ EXPECT_EQ(metric.discardRate, parser()->voip_metric()->DiscardRate());
+ EXPECT_EQ(metric.burstDensity, parser()->voip_metric()->BurstDensity());
+ EXPECT_EQ(metric.gapDensity, parser()->voip_metric()->GapDensity());
+ EXPECT_EQ(metric.burstDuration, parser()->voip_metric()->BurstDuration());
+ EXPECT_EQ(metric.gapDuration, parser()->voip_metric()->GapDuration());
+ EXPECT_EQ(metric.roundTripDelay, parser()->voip_metric()->RoundTripDelay());
+ EXPECT_EQ(metric.endSystemDelay, parser()->voip_metric()->EndSystemDelay());
+ EXPECT_EQ(metric.signalLevel, parser()->voip_metric()->SignalLevel());
+ EXPECT_EQ(metric.noiseLevel, parser()->voip_metric()->NoiseLevel());
+ EXPECT_EQ(metric.RERL, parser()->voip_metric()->Rerl());
+ EXPECT_EQ(metric.Gmin, parser()->voip_metric()->Gmin());
+ EXPECT_EQ(metric.Rfactor, parser()->voip_metric()->Rfactor());
+ EXPECT_EQ(metric.extRfactor, parser()->voip_metric()->ExtRfactor());
+ EXPECT_EQ(metric.MOSLQ, parser()->voip_metric()->MosLq());
+ EXPECT_EQ(metric.MOSCQ, parser()->voip_metric()->MosCq());
+ EXPECT_EQ(metric.RXconfig, parser()->voip_metric()->RxConfig());
+ EXPECT_EQ(metric.JBnominal, parser()->voip_metric()->JbNominal());
+ EXPECT_EQ(metric.JBmax, parser()->voip_metric()->JbMax());
+ EXPECT_EQ(metric.JBabsMax, parser()->voip_metric()->JbAbsMax());
+}
+
+TEST_F(RtcpSenderTest, SendXrWithDlrr) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
- rtcp_sender_->SendRtcpXrReceiverReferenceTime(false);
+ feedback_state.has_last_xr_rr = true;
+ RtcpReceiveTimeInfo last_xr_rr;
+ last_xr_rr.sourceSSRC = 0x11111111;
+ last_xr_rr.lastRR = 0x22222222;
+ last_xr_rr.delaySinceLastRR = 0x33333333;
+ feedback_state.last_xr_rr = last_xr_rr;
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
+ EXPECT_EQ(1, parser()->xr_header()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr_header()->Ssrc());
+ EXPECT_EQ(1, parser()->dlrr()->num_packets());
+ EXPECT_EQ(1, parser()->dlrr_items()->num_packets());
+ EXPECT_EQ(last_xr_rr.sourceSSRC, parser()->dlrr_items()->Ssrc(0));
+ EXPECT_EQ(last_xr_rr.lastRR, parser()->dlrr_items()->LastRr(0));
+ EXPECT_EQ(last_xr_rr.delaySinceLastRR,
+ parser()->dlrr_items()->DelayLastRr(0));
+}
- EXPECT_FALSE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpXrReceiverReferenceTime);
+TEST_F(RtcpSenderTest, SendXrWithRrtr) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false));
+ rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
+ uint32_t ntp_secs;
+ uint32_t ntp_frac;
+ clock_.CurrentNtp(ntp_secs, ntp_frac);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->xr_header()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->xr_header()->Ssrc());
+ EXPECT_EQ(0, parser()->dlrr()->num_packets());
+ EXPECT_EQ(1, parser()->rrtr()->num_packets());
+ EXPECT_EQ(ntp_secs, parser()->rrtr()->NtpSec());
+ EXPECT_EQ(ntp_frac, parser()->rrtr()->NtpFrac());
+}
+
+TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfSending) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true));
+ rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->xr_header()->num_packets());
+ EXPECT_EQ(0, parser()->rrtr()->num_packets());
}
-TEST_F(RtcpSenderTest, TestSendTimeOfXrRrReport) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfNotEnabled) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false));
+ rtcp_sender_->SendRtcpXrReceiverReferenceTime(false);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(0, parser()->xr_header()->num_packets());
+ EXPECT_EQ(0, parser()->rrtr()->num_packets());
+}
+
+TEST_F(RtcpSenderTest, TestSendTimeOfXrRrtr) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state, false));
rtcp_sender_->SendRtcpXrReceiverReferenceTime(true);
@@ -451,20 +655,81 @@ TEST_F(RtcpSenderTest, TestSendTimeOfXrRrReport) {
// Send XR RR packets.
for (int i = 0; i <= RTCP_NUMBER_OF_SR; ++i) {
EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport));
- EXPECT_TRUE(test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags &
- kRtcpXrReceiverReferenceTime);
-
+ EXPECT_EQ(i + 1, test_transport_.parser_.rrtr()->num_packets());
clock_.CurrentNtp(ntp_sec, ntp_frac);
uint32_t mid_ntp = RTCPUtility::MidNtp(ntp_sec, ntp_frac);
EXPECT_TRUE(rtcp_sender_->SendTimeOfXrRrReport(mid_ntp, &time_ms));
EXPECT_EQ(clock_.CurrentNtpInMilliseconds(), time_ms);
clock_.AdvanceTimeMilliseconds(1000);
}
-
// The first report should no longer be stored.
EXPECT_FALSE(rtcp_sender_->SendTimeOfXrRrReport(initial_mid_ntp, &time_ms));
}
+TEST_F(RtcpSenderTest, TestRegisterRtcpPacketTypeObserver) {
+ RtcpPacketTypeCounterObserverImpl observer;
+ rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
+ &observer, &test_transport_));
+ rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli));
+ EXPECT_EQ(1, parser()->pli()->num_packets());
+ EXPECT_EQ(kRemoteSsrc, observer.ssrc_);
+ EXPECT_EQ(1U, observer.counter_.pli_packets);
+ EXPECT_EQ(clock_.TimeInMilliseconds(),
+ observer.counter_.first_packet_time_ms);
+}
+
+TEST_F(RtcpSenderTest, SendTmmbr) {
+ const unsigned int kBitrateBps = 312000;
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
+ rtcp_sender_->SetTargetBitrate(kBitrateBps);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpTmmbr));
+ EXPECT_EQ(1, parser()->tmmbr()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbr()->Ssrc());
+ EXPECT_EQ(1, parser()->tmmbr_item()->num_packets());
+ EXPECT_EQ(kBitrateBps / 1000, parser()->tmmbr_item()->BitrateKbps());
+ // TODO(asapersson): tmmbr_item()->Overhead() looks broken, always zero.
+}
+
+TEST_F(RtcpSenderTest, TmmbrIncludedInCompoundPacketIfEnabled) {
+ const unsigned int kBitrateBps = 312000;
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_FALSE(rtcp_sender_->TMMBR());
+ rtcp_sender_->SetTMMBRStatus(true);
+ EXPECT_TRUE(rtcp_sender_->TMMBR());
+ rtcp_sender_->SetTargetBitrate(kBitrateBps);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(1, parser()->tmmbr()->num_packets());
+ EXPECT_EQ(1, parser()->tmmbr_item()->num_packets());
+ // TMMBR should be included in each compound packet.
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport));
+ EXPECT_EQ(2, parser()->tmmbr()->num_packets());
+ EXPECT_EQ(2, parser()->tmmbr_item()->num_packets());
+
+ rtcp_sender_->SetTMMBRStatus(false);
+ EXPECT_FALSE(rtcp_sender_->TMMBR());
+}
+
+TEST_F(RtcpSenderTest, SendTmmbn) {
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ TMMBRSet bounding_set;
+ bounding_set.VerifyAndAllocateSet(1);
+ const uint32_t kBitrateKbps = 32768;
+ const uint32_t kPacketOh = 40;
+ const uint32_t kSourceSsrc = 12345;
+ bounding_set.AddEntry(kBitrateKbps, kPacketOh, kSourceSsrc);
+ EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 0));
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(1, parser()->tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->Ssrc());
+ EXPECT_EQ(1, parser()->tmmbn_items()->num_packets());
+ EXPECT_EQ(kBitrateKbps, parser()->tmmbn_items()->BitrateKbps(0));
+ EXPECT_EQ(kPacketOh, parser()->tmmbn_items()->Overhead(0));
+ EXPECT_EQ(kSourceSsrc, parser()->tmmbn_items()->Ssrc(0));
+}
+
// This test is written to verify actual behaviour. It does not seem
// to make much sense to send an empty TMMBN, since there is no place
// to put an actual limit here. It's just information that no limit
@@ -472,44 +737,28 @@ TEST_F(RtcpSenderTest, TestSendTimeOfXrRrReport) {
// See http://code.google.com/p/webrtc/issues/detail?id=468 for one
// situation where this caused confusion.
TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndEmpty) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
TMMBRSet bounding_set;
EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
- ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state,kRtcpSr));
- // We now expect the packet to show up in the rtcp_packet_info_ of
- // test_transport_.
- ASSERT_NE(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- EXPECT_TRUE(gotPacketType(kRtcpTmmbn));
- TMMBRSet* incoming_set = NULL;
- bool owner = false;
- // The BoundingSet function returns the number of members of the
- // bounding set, and touches the incoming set only if there's > 1.
- EXPECT_EQ(0, test_transport_->rtcp_receiver_->BoundingSet(owner,
- incoming_set));
-}
-
-TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndValid) {
- rtcp_sender_->SetRTCPStatus(kRtcpCompound);
- TMMBRSet bounding_set;
- bounding_set.VerifyAndAllocateSet(1);
- const uint32_t kSourceSsrc = 12345;
- bounding_set.AddEntry(32768, 0, kSourceSsrc);
+ EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr));
+ EXPECT_EQ(1, parser()->sender_report()->num_packets());
+ EXPECT_EQ(1, parser()->tmmbn()->num_packets());
+ EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->Ssrc());
+ EXPECT_EQ(0, parser()->tmmbn_items()->num_packets());
+}
- EXPECT_EQ(0, rtcp_sender_->SetTMMBN(&bounding_set, 3));
- ASSERT_EQ(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState();
- EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr));
- // We now expect the packet to show up in the rtcp_packet_info_ of
- // test_transport_.
- ASSERT_NE(0U, test_transport_->rtcp_packet_info_.rtcpPacketTypeFlags);
- EXPECT_TRUE(gotPacketType(kRtcpTmmbn));
- TMMBRSet incoming_set;
- bool owner = false;
- // We expect 1 member of the incoming set.
- EXPECT_EQ(1, test_transport_->rtcp_receiver_->BoundingSet(owner,
- &incoming_set));
- EXPECT_EQ(kSourceSsrc, incoming_set.Ssrc(0));
+TEST_F(RtcpSenderTest, SendCompoundPliRemb) {
+ const int kBitrate = 261011;
+ std::vector<uint32_t> ssrcs;
+ ssrcs.push_back(kRemoteSsrc);
+ rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
+ rtcp_sender_->SetREMBData(kBitrate, ssrcs);
+ std::set<RTCPPacketType> packet_types;
+ packet_types.insert(kRtcpRemb);
+ packet_types.insert(kRtcpPli);
+ EXPECT_EQ(0, rtcp_sender_->SendCompoundRTCP(feedback_state(), packet_types));
+ EXPECT_EQ(1, parser()->remb_item()->num_packets());
+ EXPECT_EQ(1, parser()->pli()->num_packets());
}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
index 4e37cf3716d..d2b80438ccc 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.cc
@@ -8,12 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+#include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h"
#include <assert.h>
#include <math.h> // ceil
#include <string.h> // memcpy
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
+
namespace webrtc {
namespace RTCPUtility {
@@ -51,6 +57,7 @@ RTCPUtility::RTCPParserV2::RTCPParserV2(const uint8_t* rtcpData,
_ptrRTCPBlockEnd(NULL),
_state(ParseState::State_TopLevel),
_numberOfBlocks(0),
+ num_skipped_blocks_(0),
_packetType(RTCPPacketTypes::kInvalid) {
Validate();
}
@@ -76,6 +83,9 @@ RTCPUtility::RTCPParserV2::Packet() const
return _packet;
}
+rtcp::RtcpPacket* RTCPUtility::RTCPParserV2::ReleaseRtcpPacket() {
+ return rtcp_packet_.release();
+}
RTCPUtility::RTCPPacketTypes
RTCPUtility::RTCPParserV2::Begin()
{
@@ -143,7 +153,7 @@ RTCPUtility::RTCPParserV2::Iterate()
IterateAppItem();
break;
default:
- assert(false); // Invalid state!
+ RTC_NOTREACHED() << "Invalid state!";
break;
}
}
@@ -155,43 +165,40 @@ RTCPUtility::RTCPParserV2::IterateTopLevel()
{
for (;;)
{
- RTCPCommonHeader header;
-
- const bool success = RTCPParseCommonHeader(_ptrRTCPData,
- _ptrRTCPDataEnd,
- header);
+ RtcpCommonHeader header;
+ if (_ptrRTCPDataEnd <= _ptrRTCPData)
+ return;
- if (!success)
- {
+ if (!RtcpParseCommonHeader(_ptrRTCPData, _ptrRTCPDataEnd - _ptrRTCPData,
+ &header)) {
return;
}
- _ptrRTCPBlockEnd = _ptrRTCPData + header.LengthInOctets;
+ _ptrRTCPBlockEnd = _ptrRTCPData + header.BlockSize();
if (_ptrRTCPBlockEnd > _ptrRTCPDataEnd)
{
- // Bad block!
+ ++num_skipped_blocks_;
return;
}
- switch (header.PT)
- {
+ switch (header.packet_type) {
case PT_SR:
{
// number of Report blocks
- _numberOfBlocks = header.IC;
+ _numberOfBlocks = header.count_or_format;
ParseSR();
return;
}
case PT_RR:
{
// number of Report blocks
- _numberOfBlocks = header.IC;
+ _numberOfBlocks = header.count_or_format;
ParseRR();
return;
}
case PT_SDES:
{
// number of SDES blocks
- _numberOfBlocks = header.IC;
+ _numberOfBlocks = header.count_or_format;
const bool ok = ParseSDES();
if (!ok)
{
@@ -202,7 +209,7 @@ RTCPUtility::RTCPParserV2::IterateTopLevel()
}
case PT_BYE:
{
- _numberOfBlocks = header.IC;
+ _numberOfBlocks = header.count_or_format;
const bool ok = ParseBYE();
if (!ok)
{
@@ -214,20 +221,19 @@ RTCPUtility::RTCPParserV2::IterateTopLevel()
case PT_IJ:
{
// number of Report blocks
- _numberOfBlocks = header.IC;
+ _numberOfBlocks = header.count_or_format;
ParseIJ();
return;
}
- case PT_RTPFB: // Fall through!
+ case PT_RTPFB:
+ FALLTHROUGH();
case PT_PSFB:
{
- const bool ok = ParseFBCommon(header);
- if (!ok)
- {
- // Nothing supported found, continue to next block!
- break;
- }
- return;
+ if (!ParseFBCommon(header)) {
+ // Nothing supported found, continue to next block!
+ break;
+ }
+ return;
}
case PT_APP:
{
@@ -251,6 +257,7 @@ RTCPUtility::RTCPParserV2::IterateTopLevel()
}
default:
// Not supported! Skip!
+ ++num_skipped_blocks_;
EndCurrentBlock();
break;
}
@@ -410,20 +417,16 @@ RTCPUtility::RTCPParserV2::IterateAppItem()
void
RTCPUtility::RTCPParserV2::Validate()
{
- if (_ptrRTCPData == NULL)
- {
- return; // NOT VALID
- }
+ if (_ptrRTCPData == nullptr)
+ return; // NOT VALID
- RTCPCommonHeader header;
- const bool success = RTCPParseCommonHeader(_ptrRTCPDataBegin,
- _ptrRTCPDataEnd,
- header);
+ RtcpCommonHeader header;
+ if (_ptrRTCPDataEnd <= _ptrRTCPDataBegin)
+ return; // NOT VALID
- if (!success)
- {
- return; // NOT VALID!
- }
+ if (!RtcpParseCommonHeader(_ptrRTCPDataBegin,
+ _ptrRTCPDataEnd - _ptrRTCPDataBegin, &header))
+ return; // NOT VALID!
// * if (!reducedSize) : first packet must be RR or SR.
//
@@ -437,8 +440,7 @@ RTCPUtility::RTCPParserV2::Validate()
if (!_RTCPReducedSizeEnable)
{
- if ((header.PT != PT_SR) && (header.PT != PT_RR))
- {
+ if ((header.packet_type != PT_SR) && (header.packet_type != PT_RR)) {
return; // NOT VALID
}
}
@@ -458,48 +460,74 @@ RTCPUtility::RTCPParserV2::EndCurrentBlock()
_ptrRTCPData = _ptrRTCPBlockEnd;
}
-bool
-RTCPUtility::RTCPParseCommonHeader( const uint8_t* ptrDataBegin,
- const uint8_t* ptrDataEnd,
- RTCPCommonHeader& parsedHeader)
-{
- if (!ptrDataBegin || !ptrDataEnd)
- {
- return false;
- }
-
- // 0 1 2 3
- // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- // |V=2|P| IC | PT | length |
- // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- //
- // Common header for all RTCP packets, 4 octets.
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |V=2|P| IC | PT | length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Common header for all RTCP packets, 4 octets.
+
+bool RTCPUtility::RtcpParseCommonHeader(const uint8_t* packet,
+ size_t size_bytes,
+ RtcpCommonHeader* parsed_header) {
+ RTC_DCHECK(parsed_header != nullptr);
+ if (size_bytes < RtcpCommonHeader::kHeaderSizeBytes) {
+ LOG(LS_WARNING) << "Too little data (" << size_bytes << " byte"
+ << (size_bytes != 1 ? "s" : "")
+ << ") remaining in buffer to parse RTCP header (4 bytes).";
+ return false;
+ }
- if ((ptrDataEnd - ptrDataBegin) < 4)
- {
- return false;
- }
+ const uint8_t kRtcpVersion = 2;
+ uint8_t version = packet[0] >> 6;
+ if (version != kRtcpVersion) {
+ LOG(LS_WARNING) << "Invalid RTCP header: Version must be "
+ << static_cast<int>(kRtcpVersion) << " but was "
+ << static_cast<int>(version);
+ return false;
+ }
- parsedHeader.V = ptrDataBegin[0] >> 6;
- parsedHeader.P = ((ptrDataBegin[0] & 0x20) == 0) ? false : true;
- parsedHeader.IC = ptrDataBegin[0] & 0x1f;
- parsedHeader.PT = ptrDataBegin[1];
+ bool has_padding = (packet[0] & 0x20) != 0;
+ uint8_t format = packet[0] & 0x1F;
+ uint8_t packet_type = packet[1];
+ size_t packet_size_words =
+ ByteReader<uint16_t>::ReadBigEndian(&packet[2]) + 1;
- parsedHeader.LengthInOctets = (ptrDataBegin[2] << 8) + ptrDataBegin[3] + 1;
- parsedHeader.LengthInOctets *= 4;
+ if (size_bytes < packet_size_words * 4) {
+ LOG(LS_WARNING) << "Buffer too small (" << size_bytes
+ << " bytes) to fit an RtcpPacket of " << packet_size_words
+ << " 32bit words.";
+ return false;
+ }
- if(parsedHeader.LengthInOctets == 0)
- {
- return false;
+ size_t payload_size = packet_size_words * 4;
+ size_t padding_bytes = 0;
+ if (has_padding) {
+ if (payload_size <= RtcpCommonHeader::kHeaderSizeBytes) {
+ LOG(LS_WARNING) << "Invalid RTCP header: Padding bit set but 0 payload "
+ "size specified.";
+ return false;
}
- // Check if RTP version field == 2
- if (parsedHeader.V != 2)
- {
- return false;
+
+ padding_bytes = packet[payload_size - 1];
+ if (RtcpCommonHeader::kHeaderSizeBytes + padding_bytes > payload_size) {
+ LOG(LS_WARNING) << "Invalid RTCP header: Too many padding bytes ("
+ << padding_bytes << ") for a packet size of "
+ << payload_size << "bytes.";
+ return false;
}
+ payload_size -= padding_bytes;
+ }
+ payload_size -= RtcpCommonHeader::kHeaderSizeBytes;
- return true;
+ parsed_header->version = kRtcpVersion;
+ parsed_header->count_or_format = format;
+ parsed_header->packet_type = packet_type;
+ parsed_header->payload_size_bytes = payload_size;
+ parsed_header->padding_bytes = padding_bytes;
+
+ return true;
}
bool
@@ -1137,37 +1165,32 @@ bool RTCPUtility::RTCPParserV2::ParseXrUnsupportedBlockType(
return false;
}
-bool
-RTCPUtility::RTCPParserV2::ParseFBCommon(const RTCPCommonHeader& header)
-{
- assert((header.PT == PT_RTPFB) || (header.PT == PT_PSFB)); // Parser logic check
+bool RTCPUtility::RTCPParserV2::ParseFBCommon(const RtcpCommonHeader& header) {
+ RTC_CHECK((header.packet_type == PT_RTPFB) ||
+ (header.packet_type == PT_PSFB)); // Parser logic check
const ptrdiff_t length = _ptrRTCPBlockEnd - _ptrRTCPData;
- if (length < 12) // 4 * 3, RFC4585 section 6.1
- {
- EndCurrentBlock();
+ // 4 * 3, RFC4585 section 6.1
+ if (length < 12) {
+ LOG(LS_WARNING)
+ << "Invalid RTCP packet: Too little data (" << length
+ << " bytes) left in buffer to parse a 12 byte RTPFB/PSFB message.";
return false;
}
_ptrRTCPData += 4; // Skip RTCP header
- uint32_t senderSSRC = *_ptrRTCPData++ << 24;
- senderSSRC += *_ptrRTCPData++ << 16;
- senderSSRC += *_ptrRTCPData++ << 8;
- senderSSRC += *_ptrRTCPData++;
+ uint32_t senderSSRC = ByteReader<uint32_t>::ReadBigEndian(_ptrRTCPData);
+ _ptrRTCPData += 4;
- uint32_t mediaSSRC = *_ptrRTCPData++ << 24;
- mediaSSRC += *_ptrRTCPData++ << 16;
- mediaSSRC += *_ptrRTCPData++ << 8;
- mediaSSRC += *_ptrRTCPData++;
+ uint32_t mediaSSRC = ByteReader<uint32_t>::ReadBigEndian(_ptrRTCPData);
+ _ptrRTCPData += 4;
- if (header.PT == PT_RTPFB)
- {
+ if (header.packet_type == PT_RTPFB) {
// Transport layer feedback
- switch (header.IC)
- {
+ switch (header.count_or_format) {
case 1:
{
// NACK
@@ -1179,12 +1202,6 @@ RTCPUtility::RTCPParserV2::ParseFBCommon(const RTCPCommonHeader& header)
return true;
}
- case 2:
- {
- // used to be ACK is this code point, which is removed
- // conficts with http://tools.ietf.org/html/draft-levin-avt-rtcp-burst-00
- break;
- }
case 3:
{
// TMMBR
@@ -1217,17 +1234,27 @@ RTCPUtility::RTCPParserV2::ParseFBCommon(const RTCPCommonHeader& header)
// Note: No state transition, SR REQ is empty!
return true;
}
+ case 15: {
+ rtcp_packet_ =
+ rtcp::TransportFeedback::ParseFrom(_ptrRTCPData - 12, length);
+ // Since we parse the whole packet here, keep the TopLevel state and
+ // just end the current block.
+ EndCurrentBlock();
+ if (rtcp_packet_.get()) {
+ _packetType = RTCPPacketTypes::kTransportFeedback;
+ return true;
+ }
+ break;
+ }
default:
break;
}
- EndCurrentBlock();
+ // Unsupported RTPFB message. Skip and move to next block.
+ ++num_skipped_blocks_;
return false;
- }
- else if (header.PT == PT_PSFB)
- {
+ } else if (header.packet_type == PT_PSFB) {
// Payload specific feedback
- switch (header.IC)
- {
+ switch (header.count_or_format) {
case 1:
// PLI
_packetType = RTCPPacketTypes::kPsfbPli;
@@ -1271,14 +1298,11 @@ RTCPUtility::RTCPParserV2::ParseFBCommon(const RTCPCommonHeader& header)
break;
}
- EndCurrentBlock();
return false;
}
else
{
- assert(false);
-
- EndCurrentBlock();
+ RTC_NOTREACHED();
return false;
}
}
@@ -1579,9 +1603,7 @@ RTCPUtility::RTCPParserV2::ParseFIRItem()
return true;
}
-bool
-RTCPUtility::RTCPParserV2::ParseAPP( const RTCPCommonHeader& header)
-{
+bool RTCPUtility::RTCPParserV2::ParseAPP(const RtcpCommonHeader& header) {
ptrdiff_t length = _ptrRTCPBlockEnd - _ptrRTCPData;
if (length < 12) // 4 * 3, RFC 3550 6.7 APP: Application-Defined RTCP Packet
@@ -1606,7 +1628,7 @@ RTCPUtility::RTCPParserV2::ParseAPP( const RTCPCommonHeader& header)
_packetType = RTCPPacketTypes::kApp;
- _packet.APP.SubType = header.IC;
+ _packet.APP.SubType = header.count_or_format;
_packet.APP.Name = name;
_state = ParseState::State_AppItem;
@@ -1640,6 +1662,10 @@ RTCPUtility::RTCPParserV2::ParseAPPItem()
return true;
}
+size_t RTCPUtility::RTCPParserV2::NumSkippedBlocks() const {
+ return num_skipped_blocks_;
+}
+
RTCPUtility::RTCPPacketIterator::RTCPPacketIterator(uint8_t* rtcpData,
size_t rtcpDataLength)
: _ptrBegin(rtcpData),
@@ -1651,37 +1677,31 @@ RTCPUtility::RTCPPacketIterator::RTCPPacketIterator(uint8_t* rtcpData,
RTCPUtility::RTCPPacketIterator::~RTCPPacketIterator() {
}
-const RTCPUtility::RTCPCommonHeader*
-RTCPUtility::RTCPPacketIterator::Begin()
-{
+const RTCPUtility::RtcpCommonHeader* RTCPUtility::RTCPPacketIterator::Begin() {
_ptrBlock = _ptrBegin;
return Iterate();
}
-const RTCPUtility::RTCPCommonHeader*
-RTCPUtility::RTCPPacketIterator::Iterate()
-{
- const bool success = RTCPParseCommonHeader(_ptrBlock, _ptrEnd, _header);
- if (!success)
- {
- _ptrBlock = NULL;
- return NULL;
- }
- _ptrBlock += _header.LengthInOctets;
+const RTCPUtility::RtcpCommonHeader*
+RTCPUtility::RTCPPacketIterator::Iterate() {
+ if ((_ptrEnd <= _ptrBlock) ||
+ !RtcpParseCommonHeader(_ptrBlock, _ptrEnd - _ptrBlock, &_header)) {
+ _ptrBlock = nullptr;
+ return nullptr;
+ }
+ _ptrBlock += _header.BlockSize();
- if (_ptrBlock > _ptrEnd)
- {
- _ptrBlock = NULL;
- return NULL;
- }
+ if (_ptrBlock > _ptrEnd) {
+ _ptrBlock = nullptr;
+ return nullptr;
+ }
- return &_header;
+ return &_header;
}
-const RTCPUtility::RTCPCommonHeader*
-RTCPUtility::RTCPPacketIterator::Current()
-{
+const RTCPUtility::RtcpCommonHeader*
+RTCPUtility::RTCPPacketIterator::Current() {
if (!_ptrBlock)
{
return NULL;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
index fcafe5960ee..f05d5129192 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility.h
@@ -13,11 +13,15 @@
#include <stddef.h> // size_t, ptrdiff_t
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/typedefs.h"
namespace webrtc {
+namespace rtcp {
+class RtcpPacket;
+}
namespace RTCPUtility {
class NackStats {
@@ -294,6 +298,9 @@ enum class RTCPPacketTypes {
kApp,
kAppItem,
+
+ // draft-holmer-rmcat-transport-wide-cc-extensions
+ kTransportFeedback,
};
struct RTCPRawPacket {
@@ -306,12 +313,24 @@ struct RTCPModRawPacket {
uint8_t* _ptrPacketEnd;
};
-struct RTCPCommonHeader {
- uint8_t V; // Version
- bool P; // Padding
- uint8_t IC; // Item count/subtype
- uint8_t PT; // Packet Type
- uint16_t LengthInOctets;
+struct RtcpCommonHeader {
+ static const uint8_t kHeaderSizeBytes = 4;
+ RtcpCommonHeader()
+ : version(2),
+ count_or_format(0),
+ packet_type(0),
+ payload_size_bytes(0),
+ padding_bytes(0) {}
+
+ uint32_t BlockSize() const {
+ return kHeaderSizeBytes + payload_size_bytes + padding_bytes;
+ }
+
+ uint8_t version;
+ uint8_t count_or_format;
+ uint8_t packet_type;
+ uint32_t payload_size_bytes;
+ uint8_t padding_bytes;
};
enum RTCPPT : uint8_t {
@@ -333,9 +352,9 @@ enum RtcpXrBlockType : uint8_t {
kBtVoipMetric = 7
};
-bool RTCPParseCommonHeader(const uint8_t* ptrDataBegin,
- const uint8_t* ptrDataEnd,
- RTCPCommonHeader& parsedHeader);
+bool RtcpParseCommonHeader(const uint8_t* buffer,
+ size_t size_bytes,
+ RtcpCommonHeader* parsed_header);
class RTCPParserV2 {
public:
@@ -347,10 +366,12 @@ class RTCPParserV2 {
RTCPPacketTypes PacketType() const;
const RTCPPacket& Packet() const;
+ rtcp::RtcpPacket* ReleaseRtcpPacket();
const RTCPRawPacket& RawPacket() const;
ptrdiff_t LengthLeft() const;
bool IsValid() const;
+ size_t NumSkippedBlocks() const;
RTCPPacketTypes Begin();
RTCPPacketTypes Iterate();
@@ -418,7 +439,7 @@ class RTCPParserV2 {
bool ParseXrVoipMetricItem(int block_length_4bytes);
bool ParseXrUnsupportedBlockType(int block_length_4bytes);
- bool ParseFBCommon(const RTCPCommonHeader& header);
+ bool ParseFBCommon(const RtcpCommonHeader& header);
bool ParseNACKItem();
bool ParseTMMBRItem();
bool ParseTMMBNItem();
@@ -428,7 +449,7 @@ class RTCPParserV2 {
bool ParsePsfbAppItem();
bool ParsePsfbREMBItem();
- bool ParseAPP(const RTCPCommonHeader& header);
+ bool ParseAPP(const RtcpCommonHeader& header);
bool ParseAPPItem();
private:
@@ -442,9 +463,11 @@ class RTCPParserV2 {
ParseState _state;
uint8_t _numberOfBlocks;
+ size_t num_skipped_blocks_;
RTCPPacketTypes _packetType;
RTCPPacket _packet;
+ rtc::scoped_ptr<webrtc::rtcp::RtcpPacket> rtcp_packet_;
};
class RTCPPacketIterator {
@@ -452,9 +475,9 @@ class RTCPPacketIterator {
RTCPPacketIterator(uint8_t* rtcpData, size_t rtcpDataLength);
~RTCPPacketIterator();
- const RTCPCommonHeader* Begin();
- const RTCPCommonHeader* Iterate();
- const RTCPCommonHeader* Current();
+ const RtcpCommonHeader* Begin();
+ const RtcpCommonHeader* Iterate();
+ const RtcpCommonHeader* Current();
private:
uint8_t* const _ptrBegin;
@@ -462,7 +485,7 @@ class RTCPPacketIterator {
uint8_t* _ptrBlock;
- RTCPCommonHeader _header;
+ RtcpCommonHeader _header;
};
} // RTCPUtility
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility_unittest.cc
index 275b007bef2..1a13812f020 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtcp_utility_unittest.cc
@@ -10,10 +10,16 @@
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
namespace webrtc {
+using RTCPUtility::RtcpCommonHeader;
+
+namespace rtcp {
+
TEST(RtcpUtilityTest, MidNtp) {
const uint32_t kNtpSec = 0x12345678;
const uint32_t kNtpFrac = 0x23456789;
@@ -68,5 +74,88 @@ TEST(RtcpUtilityTest, NackRequestsWithWrap) {
EXPECT_EQ(8U, stats.requests());
}
+class RtcpParseCommonHeaderTest : public ::testing::Test {
+ public:
+ RtcpParseCommonHeaderTest() { memset(buffer, 0, kBufferCapacityBytes); }
+ virtual ~RtcpParseCommonHeaderTest() {}
+
+ protected:
+ static const size_t kBufferCapacityBytes = 40;
+ uint8_t buffer[kBufferCapacityBytes];
+ RtcpCommonHeader header;
+};
+
+TEST_F(RtcpParseCommonHeaderTest, TooSmallBuffer) {
+ // Buffer needs to be able to hold the header.
+ for (size_t i = 0; i < RtcpCommonHeader::kHeaderSizeBytes; ++i)
+ EXPECT_FALSE(RtcpParseCommonHeader(buffer, i, &header));
+}
+
+TEST_F(RtcpParseCommonHeaderTest, Version) {
+ // Version 2 is the only allowed for now.
+ for (int v = 0; v < 4; ++v) {
+ buffer[0] = v << 6;
+ EXPECT_EQ(v == 2, RtcpParseCommonHeader(
+ buffer, RtcpCommonHeader::kHeaderSizeBytes, &header));
+ }
+}
+
+TEST_F(RtcpParseCommonHeaderTest, PacketSize) {
+ // Set v = 2, leave p, fmt, pt as 0.
+ buffer[0] = 2 << 6;
+
+ const size_t kBlockSize = 3;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], kBlockSize);
+ const size_t kSizeInBytes = (kBlockSize + 1) * 4;
+
+ EXPECT_FALSE(RtcpParseCommonHeader(buffer, kSizeInBytes - 1, &header));
+ EXPECT_TRUE(RtcpParseCommonHeader(buffer, kSizeInBytes, &header));
+}
+
+TEST_F(RtcpParseCommonHeaderTest, PayloadSize) {
+ // Set v = 2, p = 1, but leave fmt, pt as 0.
+ buffer[0] = (2 << 6) | (1 << 5);
+
+ // Padding bit set, but no byte for padding (can't specify padding length).
+ EXPECT_FALSE(RtcpParseCommonHeader(buffer, 4, &header));
+
+ const size_t kBlockSize = 3;
+ ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], kBlockSize);
+ const size_t kSizeInBytes = (kBlockSize + 1) * 4;
+ const size_t kPayloadSizeBytes =
+ kSizeInBytes - RtcpCommonHeader::kHeaderSizeBytes;
+
+ // Padding one byte larger than possible.
+ buffer[kSizeInBytes - 1] = kPayloadSizeBytes + 1;
+ EXPECT_FALSE(RtcpParseCommonHeader(buffer, kSizeInBytes, &header));
+
+ // Pure padding packet?
+ buffer[kSizeInBytes - 1] = kPayloadSizeBytes;
+ EXPECT_TRUE(RtcpParseCommonHeader(buffer, kSizeInBytes, &header));
+ EXPECT_EQ(kPayloadSizeBytes, header.padding_bytes);
+ EXPECT_EQ(0u, header.payload_size_bytes);
+
+ // Single byte of actual data.
+ buffer[kSizeInBytes - 1] = kPayloadSizeBytes - 1;
+ EXPECT_TRUE(RtcpParseCommonHeader(buffer, kSizeInBytes, &header));
+ EXPECT_EQ(kPayloadSizeBytes - 1, header.padding_bytes);
+ EXPECT_EQ(1u, header.payload_size_bytes);
+}
+
+TEST_F(RtcpParseCommonHeaderTest, FormatAndPayloadType) {
+ // Format/count and packet type both set to max values.
+ const uint8_t kCountOrFormat = 0x1F;
+ const uint8_t kPacketType = 0xFF;
+ buffer[0] = 2 << 6; // V = 2.
+ buffer[0] |= kCountOrFormat;
+ buffer[1] = kPacketType;
+
+ EXPECT_TRUE(RtcpParseCommonHeader(buffer, RtcpCommonHeader::kHeaderSizeBytes,
+ &header));
+ EXPECT_EQ(kCountOrFormat, header.count_or_format);
+ EXPECT_EQ(kPacketType, header.packet_type);
+}
+
+} // namespace rtcp
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format.cc
index d03e38c3879..cdb9c4920e3 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format.cc
@@ -13,6 +13,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
namespace webrtc {
RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
@@ -25,6 +26,9 @@ RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
case kRtpVideoVp8:
assert(rtp_type_header != NULL);
return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len);
+ case kRtpVideoVp9:
+ assert(rtp_type_header != NULL);
+ return new RtpPacketizerVp9(rtp_type_header->VP9, max_payload_len);
case kRtpVideoGeneric:
return new RtpPacketizerGeneric(frame_type, max_payload_len);
case kRtpVideoNone:
@@ -39,6 +43,8 @@ RtpDepacketizer* RtpDepacketizer::Create(RtpVideoCodecTypes type) {
return new RtpDepacketizerH264();
case kRtpVideoVp8:
return new RtpDepacketizerVp8();
+ case kRtpVideoVp9:
+ return new RtpDepacketizerVp9();
case kRtpVideoGeneric:
return new RtpDepacketizerGeneric();
case kRtpVideoNone:
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
index ba41c620c52..aeef44364a4 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.cc
@@ -40,6 +40,23 @@ enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
// Bit masks for FU (A and B) headers.
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
+// TODO(pbos): Avoid parsing this here as well as inside the jitter buffer.
+bool VerifyStapANaluLengths(const uint8_t* nalu_ptr, size_t length_remaining) {
+ while (length_remaining > 0) {
+ // Buffer doesn't contain room for additional nalu length.
+ if (length_remaining < sizeof(uint16_t))
+ return false;
+ uint16_t nalu_size = nalu_ptr[0] << 8 | nalu_ptr[1];
+ nalu_ptr += sizeof(uint16_t);
+ length_remaining -= sizeof(uint16_t);
+ if (nalu_size > length_remaining)
+ return false;
+ nalu_ptr += nalu_size;
+ length_remaining -= nalu_size;
+ }
+ return true;
+}
+
bool ParseSingleNalu(RtpDepacketizer::ParsedPayload* parsed_payload,
const uint8_t* payload_data,
size_t payload_data_length) {
@@ -59,6 +76,11 @@ bool ParseSingleNalu(RtpDepacketizer::ParsedPayload* parsed_payload,
LOG(LS_ERROR) << "StapA header truncated.";
return false;
}
+ if (!VerifyStapANaluLengths(nalu_start, nalu_length)) {
+ LOG(LS_ERROR) << "StapA packet with incorrect NALU packet lengths.";
+ return false;
+ }
+
nal_type = payload_data[kStapAHeaderSize] & kTypeMask;
nalu_start += kStapAHeaderSize;
nalu_length -= kStapAHeaderSize;
@@ -131,8 +153,7 @@ RtpPacketizerH264::RtpPacketizerH264(FrameType frame_type,
size_t max_payload_len)
: payload_data_(NULL),
payload_size_(0),
- max_payload_len_(max_payload_len),
- frame_type_(frame_type) {
+ max_payload_len_(max_payload_len) {
}
RtpPacketizerH264::~RtpPacketizerH264() {
@@ -310,8 +331,7 @@ void RtpPacketizerH264::NextFragmentPacket(uint8_t* buffer,
}
ProtectionType RtpPacketizerH264::GetProtectionType() {
- return (frame_type_ == kVideoFrameKey) ? kProtectedPacket
- : kUnprotectedPacket;
+ return kProtectedPacket;
}
StorageType RtpPacketizerH264::GetStorageType(
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h
index 4e92895c61d..e32433fe904 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_h264.h
@@ -84,9 +84,8 @@ class RtpPacketizerH264 : public RtpPacketizer {
const size_t max_payload_len_;
RTPFragmentationHeader fragmentation_;
PacketQueue packets_;
- FrameType frame_type_;
- DISALLOW_COPY_AND_ASSIGN(RtpPacketizerH264);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpPacketizerH264);
};
// Depacketizer for H264.
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h
index 2e7bca5c482..3bf72e9dd35 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h
@@ -59,7 +59,7 @@ class RtpPacketizerGeneric : public RtpPacketizer {
size_t payload_length_;
uint8_t generic_header_;
- DISALLOW_COPY_AND_ASSIGN(RtpPacketizerGeneric);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpPacketizerGeneric);
};
// Depacketizer for generic codec.
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc
index 1dc799968d0..19c82623c63 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.cc
@@ -234,16 +234,16 @@ ProtectionType RtpPacketizerVp8::GetProtectionType() {
}
StorageType RtpPacketizerVp8::GetStorageType(uint32_t retransmission_settings) {
- StorageType storage = kAllowRetransmission;
if (hdr_info_.temporalIdx == 0 &&
!(retransmission_settings & kRetransmitBaseLayer)) {
- storage = kDontRetransmit;
- } else if (hdr_info_.temporalIdx != kNoTemporalIdx &&
+ return kDontRetransmit;
+ }
+ if (hdr_info_.temporalIdx != kNoTemporalIdx &&
hdr_info_.temporalIdx > 0 &&
!(retransmission_settings & kRetransmitHigherLayers)) {
- storage = kDontRetransmit;
+ return kDontRetransmit;
}
- return storage;
+ return kAllowRetransmission;
}
std::string RtpPacketizerVp8::ToString() {
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
index d73dfc1b508..63db349c742 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h
@@ -212,7 +212,7 @@ class RtpPacketizerVp8 : public RtpPacketizer {
InfoQueue packets_;
bool packets_calculated_;
- DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp8);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp8);
};
// Depacketizer for VP8.
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
index 2454fb70cc5..2fe963251f8 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h
@@ -65,7 +65,7 @@ class RtpFormatVp8TestHelper {
bool sloppy_partitioning_;
bool inited_;
- DISALLOW_COPY_AND_ASSIGN(RtpFormatVp8TestHelper);
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpFormatVp8TestHelper);
};
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
new file mode 100644
index 00000000000..ed30fc1c718
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.cc
@@ -0,0 +1,765 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include <cmath>
+
+#include "webrtc/base/bitbuffer.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+
+#define RETURN_FALSE_ON_ERROR(x) \
+ if (!(x)) { \
+ return false; \
+ }
+
+namespace webrtc {
+namespace {
+// Length of VP9 payload descriptors' fixed part.
+const size_t kFixedPayloadDescriptorBytes = 1;
+
+// Packet fragmentation mode. If true, packets are split into (almost) equal
+// sizes. Otherwise, as many bytes as possible are fit into one packet.
+const bool kBalancedMode = true;
+
+const uint32_t kReservedBitValue0 = 0;
+
+uint8_t TemporalIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.temporal_idx == kNoTemporalIdx) ? def : hdr.temporal_idx;
+}
+
+uint8_t SpatialIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.spatial_idx == kNoSpatialIdx) ? def : hdr.spatial_idx;
+}
+
+int16_t Tl0PicIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.tl0_pic_idx == kNoTl0PicIdx) ? def : hdr.tl0_pic_idx;
+}
+
+uint8_t GofIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
+ return (hdr.gof_idx == kNoGofIdx) ? def : hdr.gof_idx;
+}
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+size_t PictureIdLength(const RTPVideoHeaderVP9& hdr) {
+ if (hdr.picture_id == kNoPictureId)
+ return 0;
+ return (hdr.max_picture_id == kMaxOneBytePictureId) ? 1 : 2;
+}
+
+bool PictureIdPresent(const RTPVideoHeaderVP9& hdr) {
+ return PictureIdLength(hdr) > 0;
+}
+
+// Layer indices:
+//
+// Flexible mode (F=1): Non-flexible mode (F=0):
+//
+// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| |GOF_IDX| S |D|
+// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
+// | TL0PICIDX |
+// +-+-+-+-+-+-+-+-+
+//
+size_t LayerInfoLength(const RTPVideoHeaderVP9& hdr) {
+ if (hdr.flexible_mode) {
+ return (hdr.temporal_idx == kNoTemporalIdx &&
+ hdr.spatial_idx == kNoSpatialIdx) ? 0 : 1;
+ } else {
+ return (hdr.gof_idx == kNoGofIdx &&
+ hdr.spatial_idx == kNoSpatialIdx) ? 0 : 2;
+ }
+}
+
+bool LayerInfoPresent(const RTPVideoHeaderVP9& hdr) {
+ return LayerInfoLength(hdr) > 0;
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ -| P=1,F=1: At least one reference index
+// P,F: | P_DIFF |X|N| . has to be specified.
+// +-+-+-+-+-+-+-+-+ . up to 3 times
+// X: |EXTENDED P_DIFF| . X=1: Extended P_DIFF is used (14
+// +-+-+-+-+-+-+-+-+ -| bits). Else 6 bits are used.
+// N=1: An additional P_DIFF follows
+// current P_DIFF.
+size_t RefIndicesLength(const RTPVideoHeaderVP9& hdr) {
+ if (!hdr.inter_pic_predicted || !hdr.flexible_mode)
+ return 0;
+
+ RTC_DCHECK_GT(hdr.num_ref_pics, 0U);
+ RTC_DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics);
+ size_t length = 0;
+ for (size_t i = 0; i < hdr.num_ref_pics; ++i) {
+ length += hdr.pid_diff[i] > 0x3F ? 2 : 1; // P_DIFF > 6 bits => extended
+ }
+ return length;
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y| N_G |
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G + 1 times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+size_t SsDataLength(const RTPVideoHeaderVP9& hdr) {
+ if (!hdr.ss_data_available)
+ return 0;
+
+ RTC_DCHECK_GT(hdr.num_spatial_layers, 0U);
+ RTC_DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+ RTC_DCHECK_GT(hdr.gof.num_frames_in_gof, 0U);
+ RTC_DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+ size_t length = 1; // V
+ if (hdr.spatial_layer_resolution_present) {
+ length += 4 * hdr.num_spatial_layers; // Y
+ }
+ // N_G
+ length += hdr.gof.num_frames_in_gof; // T, U, R
+ for (size_t i = 0; i < hdr.gof.num_frames_in_gof; ++i) {
+ RTC_DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics);
+ length += hdr.gof.num_ref_pics[i]; // R times
+ }
+ return length;
+}
+
+size_t PayloadDescriptorLengthMinusSsData(const RTPVideoHeaderVP9& hdr) {
+ return kFixedPayloadDescriptorBytes + PictureIdLength(hdr) +
+ LayerInfoLength(hdr) + RefIndicesLength(hdr);
+}
+
+size_t PayloadDescriptorLength(const RTPVideoHeaderVP9& hdr) {
+ return PayloadDescriptorLengthMinusSsData(hdr) + SsDataLength(hdr);
+}
+
+void QueuePacket(size_t start_pos,
+ size_t size,
+ bool layer_begin,
+ bool layer_end,
+ RtpPacketizerVp9::PacketInfoQueue* packets) {
+ RtpPacketizerVp9::PacketInfo packet_info;
+ packet_info.payload_start_pos = start_pos;
+ packet_info.size = size;
+ packet_info.layer_begin = layer_begin;
+ packet_info.layer_end = layer_end;
+ packets->push(packet_info);
+}
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+bool WritePictureId(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ bool m_bit = (PictureIdLength(vp9) == 2);
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(m_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.picture_id, m_bit ? 15 : 7));
+ return true;
+}
+
+// Layer indices:
+//
+// Flexible mode (F=1):
+//
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D|
+// +-+-+-+-+-+-+-+-+
+//
+bool WriteLayerInfoFlexibleMode(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(TemporalIdxField(vp9, 0), 3));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.temporal_up_switch ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.inter_layer_predicted ? 1: 0, 1));
+ return true;
+}
+
+// Non-flexible mode (F=0):
+//
+// +-+-+-+-+-+-+-+-+
+// L: |GOF_IDX| S |D|
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX |
+// +-+-+-+-+-+-+-+-+
+//
+bool WriteLayerInfoNonFlexibleMode(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(GofIdxField(vp9, 0), 4));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.inter_layer_predicted ? 1: 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(Tl0PicIdxField(vp9, 0)));
+ return true;
+}
+
+bool WriteLayerInfo(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ if (vp9.flexible_mode) {
+ return WriteLayerInfoFlexibleMode(vp9, writer);
+ } else {
+ return WriteLayerInfoNonFlexibleMode(vp9, writer);
+ }
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ -| P=1,F=1: At least one reference index
+// P,F: | P_DIFF |X|N| . has to be specified.
+// +-+-+-+-+-+-+-+-+ . up to 3 times
+// X: |EXTENDED P_DIFF| . X=1: Extended P_DIFF is used (14
+// +-+-+-+-+-+-+-+-+ -| bits). Else 6 bits are used.
+// N=1: An additional P_DIFF follows
+// current P_DIFF.
+bool WriteRefIndices(const RTPVideoHeaderVP9& vp9,
+ rtc::BitBufferWriter* writer) {
+ if (!PictureIdPresent(vp9) ||
+ vp9.num_ref_pics == 0 || vp9.num_ref_pics > kMaxVp9RefPics) {
+ return false;
+ }
+ for (size_t i = 0; i < vp9.num_ref_pics; ++i) {
+ bool x_bit = (vp9.pid_diff[i] > 0x3F);
+ bool n_bit = !(i == vp9.num_ref_pics - 1);
+ if (x_bit) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i] >> 8, 6));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(x_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.pid_diff[i]));
+ } else {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i], 6));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(x_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1));
+ }
+ }
+ return true;
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y| N_G |
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G + 1 times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) {
+ RTC_DCHECK_GT(vp9.num_spatial_layers, 0U);
+ RTC_DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
+ RTC_DCHECK_GT(vp9.gof.num_frames_in_gof, 0U);
+ RTC_DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof);
+
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.num_spatial_layers - 1, 3));
+ RETURN_FALSE_ON_ERROR(
+ writer->WriteBits(vp9.spatial_layer_resolution_present ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_frames_in_gof - 1, 4));
+
+ if (vp9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < vp9.num_spatial_layers; ++i) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.width[i]));
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.height[i]));
+ }
+ }
+ for (size_t i = 0; i < vp9.gof.num_frames_in_gof; ++i) {
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.temporal_idx[i], 3));
+ RETURN_FALSE_ON_ERROR(
+ writer->WriteBits(vp9.gof.temporal_up_switch[i] ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_ref_pics[i], 2));
+ RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 2));
+ for (size_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) {
+ RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.pid_diff[i][r]));
+ }
+ }
+ return true;
+}
+
+// Picture ID:
+//
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
+// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
+// M: | EXTENDED PID |
+// +-+-+-+-+-+-+-+-+
+//
+bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
+ uint32_t picture_id;
+ uint32_t m_bit;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&m_bit, 1));
+ if (m_bit) {
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 15));
+ vp9->max_picture_id = kMaxTwoBytePictureId;
+ } else {
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 7));
+ vp9->max_picture_id = kMaxOneBytePictureId;
+ }
+ vp9->picture_id = picture_id;
+ return true;
+}
+
+// Layer indices (flexible mode):
+//
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D|
+// +-+-+-+-+-+-+-+-+
+//
+bool ParseLayerInfoFlexibleMode(rtc::BitBuffer* parser,
+ RTPVideoHeaderVP9* vp9) {
+ uint32_t t, u_bit, s, d_bit;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1));
+ vp9->temporal_idx = t;
+ vp9->temporal_up_switch = u_bit ? true : false;
+ vp9->spatial_idx = s;
+ vp9->inter_layer_predicted = d_bit ? true : false;
+ return true;
+}
+
+// Layer indices (non-flexible mode):
+//
+// +-+-+-+-+-+-+-+-+
+// L: |GOF_IDX| S |D|
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX |
+// +-+-+-+-+-+-+-+-+
+//
+bool ParseLayerInfoNonFlexibleMode(rtc::BitBuffer* parser,
+ RTPVideoHeaderVP9* vp9) {
+ uint32_t gof_idx, s, d_bit;
+ uint8_t tl0picidx;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&gof_idx, 4));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&tl0picidx));
+ vp9->gof_idx = gof_idx;
+ vp9->spatial_idx = s;
+ vp9->inter_layer_predicted = d_bit ? true : false;
+ vp9->tl0_pic_idx = tl0picidx;
+ return true;
+}
+
+bool ParseLayerInfo(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
+ if (vp9->flexible_mode) {
+ return ParseLayerInfoFlexibleMode(parser, vp9);
+ } else {
+ return ParseLayerInfoNonFlexibleMode(parser, vp9);
+ }
+}
+
+// Reference indices:
+//
+// +-+-+-+-+-+-+-+-+ -| P=1,F=1: At least one reference index
+// P,F: | P_DIFF |X|N| . has to be specified.
+// +-+-+-+-+-+-+-+-+ . up to 3 times
+// X: |EXTENDED P_DIFF| . X=1: Extended P_DIFF is used (14
+// +-+-+-+-+-+-+-+-+ -| bits). Else 6 bits are used.
+// N=1: An additional P_DIFF follows
+// current P_DIFF.
+bool ParseRefIndices(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
+ if (vp9->picture_id == kNoPictureId)
+ return false;
+
+ vp9->num_ref_pics = 0;
+ uint32_t n_bit;
+ do {
+ if (vp9->num_ref_pics == kMaxVp9RefPics)
+ return false;
+
+ uint32_t p_diff, x_bit;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&p_diff, 6));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&x_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_bit, 1));
+
+ if (x_bit) {
+ // P_DIFF is 14 bits.
+ uint8_t ext_p_diff;
+ RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&ext_p_diff));
+ p_diff = (p_diff << 8) + ext_p_diff;
+ }
+
+ vp9->pid_diff[vp9->num_ref_pics] = p_diff;
+ uint32_t scaled_pid = vp9->picture_id;
+ while (p_diff > scaled_pid) {
+ scaled_pid += vp9->max_picture_id + 1;
+ }
+ vp9->ref_picture_id[vp9->num_ref_pics++] = scaled_pid - p_diff;
+ } while (n_bit);
+
+ return true;
+}
+
+// Scalability structure (SS).
+//
+// +-+-+-+-+-+-+-+-+
+// V: | N_S |Y| N_G |
+// +-+-+-+-+-+-+-+-+ -|
+// Y: | WIDTH | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ . N_S + 1 times
+// | HEIGHT | (OPTIONAL) .
+// + + .
+// | | (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| -|
+// N_G: | T |U| R |-|-| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -| . N_G + 1 times
+// | P_DIFF | (OPTIONAL) . R times .
+// +-+-+-+-+-+-+-+-+ -| -|
+//
+bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
+ uint32_t n_s, y_bit, n_g;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_s, 3));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&y_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_g, 4));
+ vp9->num_spatial_layers = n_s + 1;
+ vp9->spatial_layer_resolution_present = y_bit ? true : false;
+ vp9->gof.num_frames_in_gof = n_g + 1;
+
+ if (y_bit) {
+ for (size_t i = 0; i < vp9->num_spatial_layers; ++i) {
+ RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->width[i]));
+ RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->height[i]));
+ }
+ }
+ for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) {
+ uint32_t t, u_bit, r;
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser->ReadBits(&r, 2));
+ RETURN_FALSE_ON_ERROR(parser->ConsumeBits(2));
+ vp9->gof.temporal_idx[i] = t;
+ vp9->gof.temporal_up_switch[i] = u_bit ? true : false;
+ vp9->gof.num_ref_pics[i] = r;
+
+ for (size_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) {
+ uint8_t p_diff;
+ RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&p_diff));
+ vp9->gof.pid_diff[i][p] = p_diff;
+ }
+ }
+ return true;
+}
+
+// Gets the size of next payload chunk to send. Returns 0 on error.
+size_t CalcNextSize(size_t max_length, size_t rem_bytes) {
+ if (max_length == 0 || rem_bytes == 0) {
+ return 0;
+ }
+ if (kBalancedMode) {
+ size_t num_frags = std::ceil(static_cast<double>(rem_bytes) / max_length);
+ return static_cast<size_t>(
+ static_cast<double>(rem_bytes) / num_frags + 0.5);
+ }
+ return max_length >= rem_bytes ? rem_bytes : max_length;
+}
+} // namespace
+
+
+RtpPacketizerVp9::RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr,
+ size_t max_payload_length)
+ : hdr_(hdr),
+ max_payload_length_(max_payload_length),
+ payload_(nullptr),
+ payload_size_(0) {
+}
+
+RtpPacketizerVp9::~RtpPacketizerVp9() {
+}
+
+ProtectionType RtpPacketizerVp9::GetProtectionType() {
+ bool protect =
+ hdr_.temporal_idx == 0 || hdr_.temporal_idx == kNoTemporalIdx;
+ return protect ? kProtectedPacket : kUnprotectedPacket;
+}
+
+StorageType RtpPacketizerVp9::GetStorageType(uint32_t retransmission_settings) {
+ StorageType storage = kAllowRetransmission;
+ if (hdr_.temporal_idx == 0 &&
+ !(retransmission_settings & kRetransmitBaseLayer)) {
+ storage = kDontRetransmit;
+ } else if (hdr_.temporal_idx != kNoTemporalIdx && hdr_.temporal_idx > 0 &&
+ !(retransmission_settings & kRetransmitHigherLayers)) {
+ storage = kDontRetransmit;
+ }
+ return storage;
+}
+
+std::string RtpPacketizerVp9::ToString() {
+ return "RtpPacketizerVp9";
+}
+
+void RtpPacketizerVp9::SetPayloadData(
+ const uint8_t* payload,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) {
+ payload_ = payload;
+ payload_size_ = payload_size;
+ GeneratePackets();
+}
+
+void RtpPacketizerVp9::GeneratePackets() {
+ if (max_payload_length_ < PayloadDescriptorLength(hdr_) + 1) {
+ LOG(LS_ERROR) << "Payload header and one payload byte won't fit.";
+ return;
+ }
+ size_t bytes_processed = 0;
+ while (bytes_processed < payload_size_) {
+ size_t rem_bytes = payload_size_ - bytes_processed;
+ size_t rem_payload_len = max_payload_length_ -
+ (bytes_processed ? PayloadDescriptorLengthMinusSsData(hdr_)
+ : PayloadDescriptorLength(hdr_));
+
+ size_t packet_bytes = CalcNextSize(rem_payload_len, rem_bytes);
+ if (packet_bytes == 0) {
+ LOG(LS_ERROR) << "Failed to generate VP9 packets.";
+ while (!packets_.empty())
+ packets_.pop();
+ return;
+ }
+ QueuePacket(bytes_processed, packet_bytes, bytes_processed == 0,
+ rem_bytes == packet_bytes, &packets_);
+ bytes_processed += packet_bytes;
+ }
+ assert(bytes_processed == payload_size_);
+}
+
+bool RtpPacketizerVp9::NextPacket(uint8_t* buffer,
+ size_t* bytes_to_send,
+ bool* last_packet) {
+ if (packets_.empty()) {
+ return false;
+ }
+ PacketInfo packet_info = packets_.front();
+ packets_.pop();
+
+ if (!WriteHeaderAndPayload(packet_info, buffer, bytes_to_send)) {
+ return false;
+ }
+ *last_packet =
+ packets_.empty() && (hdr_.spatial_idx == kNoSpatialIdx ||
+ hdr_.spatial_idx == hdr_.num_spatial_layers - 1);
+ return true;
+}
+
+// VP9 format:
+//
+// Payload descriptor for F = 1 (flexible mode)
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|-| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+ -|
+// P,F: | P_DIFF |X|N| (CONDITIONALLY RECOMMENDED) .
+// +-+-+-+-+-+-+-+-+ . up to 3 times
+// X: |EXTENDED P_DIFF| .
+// +-+-+-+-+-+-+-+-+ -|
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+//
+// Payload descriptor for F = 0 (non-flexible mode)
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|-| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: |GOF_IDX| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX | (CONDITIONALLY REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+
+bool RtpPacketizerVp9::WriteHeaderAndPayload(const PacketInfo& packet_info,
+ uint8_t* buffer,
+ size_t* bytes_to_send) const {
+ size_t header_length;
+ if (!WriteHeader(packet_info, buffer, &header_length))
+ return false;
+
+ // Copy payload data.
+ memcpy(&buffer[header_length],
+ &payload_[packet_info.payload_start_pos], packet_info.size);
+
+ *bytes_to_send = header_length + packet_info.size;
+ return true;
+}
+
+bool RtpPacketizerVp9::WriteHeader(const PacketInfo& packet_info,
+ uint8_t* buffer,
+ size_t* header_length) const {
+ // Required payload descriptor byte.
+ bool i_bit = PictureIdPresent(hdr_);
+ bool p_bit = hdr_.inter_pic_predicted;
+ bool l_bit = LayerInfoPresent(hdr_);
+ bool f_bit = hdr_.flexible_mode;
+ bool b_bit = packet_info.layer_begin;
+ bool e_bit = packet_info.layer_end;
+ bool v_bit = hdr_.ss_data_available && b_bit;
+
+ rtc::BitBufferWriter writer(buffer, max_payload_length_);
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(i_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(p_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(l_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(f_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(b_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(e_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(v_bit ? 1 : 0, 1));
+ RETURN_FALSE_ON_ERROR(writer.WriteBits(kReservedBitValue0, 1));
+
+ // Add fields that are present.
+ if (i_bit && !WritePictureId(hdr_, &writer)) {
+ LOG(LS_ERROR) << "Failed writing VP9 picture id.";
+ return false;
+ }
+ if (l_bit && !WriteLayerInfo(hdr_, &writer)) {
+ LOG(LS_ERROR) << "Failed writing VP9 layer info.";
+ return false;
+ }
+ if (p_bit && f_bit && !WriteRefIndices(hdr_, &writer)) {
+ LOG(LS_ERROR) << "Failed writing VP9 ref indices.";
+ return false;
+ }
+ if (v_bit && !WriteSsData(hdr_, &writer)) {
+ LOG(LS_ERROR) << "Failed writing VP9 SS data.";
+ return false;
+ }
+
+ size_t offset_bytes = 0;
+ size_t offset_bits = 0;
+ writer.GetCurrentOffset(&offset_bytes, &offset_bits);
+ assert(offset_bits == 0);
+
+ *header_length = offset_bytes;
+ return true;
+}
+
+bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
+ const uint8_t* payload,
+ size_t payload_length) {
+ assert(parsed_payload != nullptr);
+ if (payload_length == 0) {
+ LOG(LS_ERROR) << "Payload length is zero.";
+ return false;
+ }
+
+ // Parse mandatory first byte of payload descriptor.
+ rtc::BitBuffer parser(payload, payload_length);
+ uint32_t i_bit, p_bit, l_bit, f_bit, b_bit, e_bit, v_bit;
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&i_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&p_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&l_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&f_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&b_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&e_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ReadBits(&v_bit, 1));
+ RETURN_FALSE_ON_ERROR(parser.ConsumeBits(1));
+
+ // Parsed payload.
+ parsed_payload->type.Video.width = 0;
+ parsed_payload->type.Video.height = 0;
+ parsed_payload->type.Video.simulcastIdx = 0;
+ parsed_payload->type.Video.codec = kRtpVideoVp9;
+
+ parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
+
+ RTPVideoHeaderVP9* vp9 = &parsed_payload->type.Video.codecHeader.VP9;
+ vp9->InitRTPVideoHeaderVP9();
+ vp9->inter_pic_predicted = p_bit ? true : false;
+ vp9->flexible_mode = f_bit ? true : false;
+ vp9->beginning_of_frame = b_bit ? true : false;
+ vp9->end_of_frame = e_bit ? true : false;
+ vp9->ss_data_available = v_bit ? true : false;
+ vp9->spatial_idx = 0;
+
+ // Parse fields that are present.
+ if (i_bit && !ParsePictureId(&parser, vp9)) {
+ LOG(LS_ERROR) << "Failed parsing VP9 picture id.";
+ return false;
+ }
+ if (l_bit && !ParseLayerInfo(&parser, vp9)) {
+ LOG(LS_ERROR) << "Failed parsing VP9 layer info.";
+ return false;
+ }
+ if (p_bit && f_bit && !ParseRefIndices(&parser, vp9)) {
+ LOG(LS_ERROR) << "Failed parsing VP9 ref indices.";
+ return false;
+ }
+ if (v_bit) {
+ if (!ParseSsData(&parser, vp9)) {
+ LOG(LS_ERROR) << "Failed parsing VP9 SS data.";
+ return false;
+ }
+ if (vp9->spatial_layer_resolution_present) {
+ // TODO(asapersson): Add support for spatial layers.
+ parsed_payload->type.Video.width = vp9->width[0];
+ parsed_payload->type.Video.height = vp9->height[0];
+ }
+ }
+ parsed_payload->type.Video.isFirstPacket = b_bit && (vp9->spatial_idx == 0);
+
+ uint64_t rem_bits = parser.RemainingBitCount();
+ assert(rem_bits % 8 == 0);
+ parsed_payload->payload_length = rem_bits / 8;
+ if (parsed_payload->payload_length == 0) {
+ LOG(LS_ERROR) << "Failed parsing VP9 payload data.";
+ return false;
+ }
+ parsed_payload->payload =
+ payload + payload_length - parsed_payload->payload_length;
+
+ return true;
+}
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
new file mode 100644
index 00000000000..abce7e77919
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+//
+// This file contains the declaration of the VP9 packetizer class.
+// A packetizer object is created for each encoded video frame. The
+// constructor is called with the payload data and size.
+//
+// After creating the packetizer, the method NextPacket is called
+// repeatedly to get all packets for the frame. The method returns
+// false as long as there are more packets left to fetch.
+//
+
+#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
+#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
+
+#include <queue>
+#include <string>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class RtpPacketizerVp9 : public RtpPacketizer {
+ public:
+ RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr, size_t max_payload_length);
+
+ virtual ~RtpPacketizerVp9();
+
+ ProtectionType GetProtectionType() override;
+
+ StorageType GetStorageType(uint32_t retransmission_settings) override;
+
+ std::string ToString() override;
+
+ // The payload data must be one encoded VP9 frame.
+ void SetPayloadData(const uint8_t* payload,
+ size_t payload_size,
+ const RTPFragmentationHeader* fragmentation) override;
+
+ // Gets the next payload with VP9 payload header.
+ // |buffer| is a pointer to where the output will be written.
+ // |bytes_to_send| is an output variable that will contain number of bytes
+ // written to buffer.
+ // |last_packet| is true for the last packet of the frame, false otherwise
+ // (i.e. call the function again to get the next packet).
+ // Returns true on success, false otherwise.
+ bool NextPacket(uint8_t* buffer,
+ size_t* bytes_to_send,
+ bool* last_packet) override;
+
+ typedef struct {
+ size_t payload_start_pos;
+ size_t size;
+ bool layer_begin;
+ bool layer_end;
+ } PacketInfo;
+ typedef std::queue<PacketInfo> PacketInfoQueue;
+
+ private:
+ // Calculates all packet sizes and loads info to packet queue.
+ void GeneratePackets();
+
+ // Writes the payload descriptor header and copies payload to the |buffer|.
+ // |packet_info| determines which part of the payload to write.
+ // |bytes_to_send| contains the number of written bytes to the buffer.
+ // Returns true on success, false otherwise.
+ bool WriteHeaderAndPayload(const PacketInfo& packet_info,
+ uint8_t* buffer,
+ size_t* bytes_to_send) const;
+
+ // Writes payload descriptor header to |buffer|.
+ // Returns true on success, false otherwise.
+ bool WriteHeader(const PacketInfo& packet_info,
+ uint8_t* buffer,
+ size_t* header_length) const;
+
+ const RTPVideoHeaderVP9 hdr_;
+ const size_t max_payload_length_; // The max length in bytes of one packet.
+ const uint8_t* payload_; // The payload data to be packetized.
+ size_t payload_size_; // The size in bytes of the payload data.
+ PacketInfoQueue packets_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp9);
+};
+
+
+class RtpDepacketizerVp9 : public RtpDepacketizer {
+ public:
+ virtual ~RtpDepacketizerVp9() {}
+
+ bool Parse(ParsedPayload* parsed_payload,
+ const uint8_t* payload,
+ size_t payload_length) override;
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
new file mode 100644
index 00000000000..fad0d1b4355
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_format_vp9_unittest.cc
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+namespace {
+void VerifyHeader(const RTPVideoHeaderVP9& expected,
+ const RTPVideoHeaderVP9& actual) {
+ EXPECT_EQ(expected.inter_layer_predicted, actual.inter_layer_predicted);
+ EXPECT_EQ(expected.inter_pic_predicted, actual.inter_pic_predicted);
+ EXPECT_EQ(expected.flexible_mode, actual.flexible_mode);
+ EXPECT_EQ(expected.beginning_of_frame, actual.beginning_of_frame);
+ EXPECT_EQ(expected.end_of_frame, actual.end_of_frame);
+ EXPECT_EQ(expected.ss_data_available, actual.ss_data_available);
+ EXPECT_EQ(expected.picture_id, actual.picture_id);
+ EXPECT_EQ(expected.max_picture_id, actual.max_picture_id);
+ EXPECT_EQ(expected.temporal_idx, actual.temporal_idx);
+ EXPECT_EQ(expected.spatial_idx == kNoSpatialIdx ? 0 : expected.spatial_idx,
+ actual.spatial_idx);
+ EXPECT_EQ(expected.gof_idx, actual.gof_idx);
+ EXPECT_EQ(expected.tl0_pic_idx, actual.tl0_pic_idx);
+ EXPECT_EQ(expected.temporal_up_switch, actual.temporal_up_switch);
+
+ EXPECT_EQ(expected.num_ref_pics, actual.num_ref_pics);
+ for (uint8_t i = 0; i < expected.num_ref_pics; ++i) {
+ EXPECT_EQ(expected.pid_diff[i], actual.pid_diff[i]);
+ EXPECT_EQ(expected.ref_picture_id[i], actual.ref_picture_id[i]);
+ }
+ if (expected.ss_data_available) {
+ EXPECT_EQ(expected.spatial_layer_resolution_present,
+ actual.spatial_layer_resolution_present);
+ EXPECT_EQ(expected.num_spatial_layers, actual.num_spatial_layers);
+ if (expected.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < expected.num_spatial_layers; i++) {
+ EXPECT_EQ(expected.width[i], actual.width[i]);
+ EXPECT_EQ(expected.height[i], actual.height[i]);
+ }
+ }
+ EXPECT_EQ(expected.gof.num_frames_in_gof, actual.gof.num_frames_in_gof);
+ for (size_t i = 0; i < expected.gof.num_frames_in_gof; i++) {
+ EXPECT_EQ(expected.gof.temporal_up_switch[i],
+ actual.gof.temporal_up_switch[i]);
+ EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]);
+ EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]);
+ for (size_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
+ EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]);
+ }
+ }
+ }
+}
+
+void VerifyPayload(const RtpDepacketizer::ParsedPayload& parsed,
+ const uint8_t* payload,
+ size_t payload_length) {
+ EXPECT_EQ(payload, parsed.payload);
+ EXPECT_EQ(payload_length, parsed.payload_length);
+ EXPECT_THAT(std::vector<uint8_t>(parsed.payload,
+ parsed.payload + parsed.payload_length),
+ ::testing::ElementsAreArray(payload, payload_length));
+}
+
+void ParseAndCheckPacket(const uint8_t* packet,
+ const RTPVideoHeaderVP9& expected,
+ size_t expected_hdr_length,
+ size_t expected_length) {
+ rtc::scoped_ptr<RtpDepacketizer> depacketizer(new RtpDepacketizerVp9());
+ RtpDepacketizer::ParsedPayload parsed;
+ ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
+ EXPECT_EQ(kRtpVideoVp9, parsed.type.Video.codec);
+ VerifyHeader(expected, parsed.type.Video.codecHeader.VP9);
+ const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
+ VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
+}
+} // namespace
+
+// Payload descriptor for flexible mode
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|-| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+ -|
+// P,F: | P_DIFF |X|N| (CONDITIONALLY RECOMMENDED) .
+// +-+-+-+-+-+-+-+-+ . up to 3 times
+// X: |EXTENDED P_DIFF| (OPTIONAL) .
+// +-+-+-+-+-+-+-+-+ -|
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+//
+// Payload descriptor for non-flexible mode
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |I|P|L|F|B|E|V|-| (REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// I: |M| PICTURE ID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// M: | EXTENDED PID | (RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// L: |GOF_IDX| S |D| (CONDITIONALLY RECOMMENDED)
+// +-+-+-+-+-+-+-+-+
+// | TL0PICIDX | (CONDITIONALLY REQUIRED)
+// +-+-+-+-+-+-+-+-+
+// V: | SS |
+// | .. |
+// +-+-+-+-+-+-+-+-+
+
+class RtpPacketizerVp9Test : public ::testing::Test {
+ protected:
+ RtpPacketizerVp9Test() {}
+ virtual void SetUp() {
+ expected_.InitRTPVideoHeaderVP9();
+ }
+
+ rtc::scoped_ptr<uint8_t[]> packet_;
+ rtc::scoped_ptr<uint8_t[]> payload_;
+ size_t payload_size_;
+ size_t payload_pos_;
+ RTPVideoHeaderVP9 expected_;
+ rtc::scoped_ptr<RtpPacketizerVp9> packetizer_;
+
+ void Init(size_t payload_size, size_t packet_size) {
+ payload_.reset(new uint8_t[payload_size]);
+ memset(payload_.get(), 7, payload_size);
+ payload_size_ = payload_size;
+ payload_pos_ = 0;
+ packetizer_.reset(new RtpPacketizerVp9(expected_, packet_size));
+ packetizer_->SetPayloadData(payload_.get(), payload_size_, NULL);
+
+ const int kMaxPayloadDescriptorLength = 100;
+ packet_.reset(new uint8_t[payload_size_ + kMaxPayloadDescriptorLength]);
+ }
+
+ void CheckPayload(const uint8_t* packet,
+ size_t start_pos,
+ size_t end_pos,
+ bool last) {
+ for (size_t i = start_pos; i < end_pos; ++i) {
+ EXPECT_EQ(packet[i], payload_[payload_pos_++]);
+ }
+ EXPECT_EQ(last, payload_pos_ == payload_size_);
+ }
+
+ void CreateParseAndCheckPackets(const size_t* expected_hdr_sizes,
+ const size_t* expected_sizes,
+ size_t expected_num_packets) {
+ ASSERT_TRUE(packetizer_.get() != NULL);
+ size_t length = 0;
+ bool last = false;
+ if (expected_num_packets == 0) {
+ EXPECT_FALSE(packetizer_->NextPacket(packet_.get(), &length, &last));
+ return;
+ }
+ for (size_t i = 0; i < expected_num_packets; ++i) {
+ EXPECT_TRUE(packetizer_->NextPacket(packet_.get(), &length, &last));
+ EXPECT_EQ(expected_sizes[i], length);
+ RTPVideoHeaderVP9 hdr = expected_;
+ hdr.beginning_of_frame = (i == 0);
+ hdr.end_of_frame = last;
+ ParseAndCheckPacket(packet_.get(), hdr, expected_hdr_sizes[i], length);
+ CheckPayload(packet_.get(), expected_hdr_sizes[i], length, last);
+ }
+ EXPECT_TRUE(last);
+ }
+};
+
+TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_OnePacket) {
+ const size_t kFrameSize = 25;
+ const size_t kPacketSize = 26;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:0 (1hdr + 25 payload)
+ const size_t kExpectedHdrSizes[] = {1};
+ const size_t kExpectedSizes[] = {26};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_TwoPackets) {
+ const size_t kFrameSize = 27;
+ const size_t kPacketSize = 27;
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // I:0, P:0, L:0, F:0, B:1, E:0, V:0 (1hdr + 14 payload)
+ // I:0, P:0, L:0, F:0, B:0, E:1, V:0 (1hdr + 13 payload)
+ const size_t kExpectedHdrSizes[] = {1, 1};
+ const size_t kExpectedSizes[] = {15, 14};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestTooShortBufferToFitPayload) {
+ const size_t kFrameSize = 1;
+ const size_t kPacketSize = 1;
+ Init(kFrameSize, kPacketSize); // 1hdr + 1 payload
+
+ const size_t kExpectedNum = 0;
+ CreateParseAndCheckPackets(NULL, NULL, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestOneBytePictureId) {
+ const size_t kFrameSize = 30;
+ const size_t kPacketSize = 12;
+
+ expected_.picture_id = kMaxOneBytePictureId; // 2 byte payload descriptor
+ expected_.max_picture_id = kMaxOneBytePictureId;
+ Init(kFrameSize, kPacketSize);
+
+ // Three packets:
+ // I:1, P:0, L:0, F:0, B:1, E:0, V:0 (2hdr + 10 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (2hdr + 10 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:1, V:0 (2hdr + 10 payload)
+ const size_t kExpectedHdrSizes[] = {2, 2, 2};
+ const size_t kExpectedSizes[] = {12, 12, 12};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestTwoBytePictureId) {
+ const size_t kFrameSize = 31;
+ const size_t kPacketSize = 13;
+
+ expected_.picture_id = kMaxTwoBytePictureId; // 3 byte payload descriptor
+ Init(kFrameSize, kPacketSize);
+
+ // Four packets:
+ // I:1, P:0, L:0, F:0, B:1, E:0, V:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload)
+ // I:1, P:0, L:0, F:0, B:0, E:1, V:0 (3hdr + 7 payload)
+ const size_t kExpectedHdrSizes[] = {3, 3, 3, 3};
+ const size_t kExpectedSizes[] = {11, 11, 11, 10};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) {
+ const size_t kFrameSize = 30;
+ const size_t kPacketSize = 25;
+
+ expected_.gof_idx = 3;
+ expected_.num_spatial_layers = 3;
+ expected_.spatial_idx = 2;
+ expected_.inter_layer_predicted = true; // D
+ expected_.tl0_pic_idx = 117;
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // | I:0, P:0, L:1, F:0, B:1, E:0, V:0 | (3hdr + 15 payload)
+ // L: | GOF_IDX:3, S:2, D:1 | TL0PICIDX:117 |
+ // | I:0, P:0, L:1, F:0, B:0, E:1, V:0 | (3hdr + 15 payload)
+ // L: | GOF_IDX:3, S:2, D:1 | TL0PICIDX:117 |
+ const size_t kExpectedHdrSizes[] = {3, 3};
+ const size_t kExpectedSizes[] = {18, 18};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithFlexibleMode) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 23;
+
+ expected_.flexible_mode = true;
+ expected_.temporal_idx = 3;
+ expected_.temporal_up_switch = true; // U
+ expected_.num_spatial_layers = 3;
+ expected_.spatial_idx = 2;
+ expected_.inter_layer_predicted = false; // D
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 21 payload)
+ // L: T:3, U:1, S:2, D:0
+ const size_t kExpectedHdrSizes[] = {2};
+ const size_t kExpectedSizes[] = {23};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestRefIdx) {
+ const size_t kFrameSize = 16;
+ const size_t kPacketSize = 22;
+
+ expected_.inter_pic_predicted = true; // P
+ expected_.flexible_mode = true; // F
+ expected_.picture_id = 100;
+ expected_.num_ref_pics = 2;
+ expected_.pid_diff[0] = 3;
+ expected_.pid_diff[1] = 1171;
+ expected_.ref_picture_id[0] = 97; // 100 - 3 = 97
+ expected_.ref_picture_id[1] = 31697; // 0x7FFF + 1 + 100 - 1171 = 31697
+ Init(kFrameSize, kPacketSize);
+
+ // Two packets:
+ // I:1, P:1, L:0, F:1, B:1, E:1, V:0 (6hdr + 16 payload)
+ // I: 100 (2 bytes)
+ // P,F: P_DIFF:3, X:0, N:1
+ // P_DIFF:1171, X:1, N:0 (2 bytes)
+ const size_t kExpectedHdrSizes[] = {6};
+ const size_t kExpectedSizes[] = {22};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestRefIdxFailsWithoutPictureId) {
+ const size_t kFrameSize = 16;
+ const size_t kPacketSize = 22;
+
+ expected_.inter_pic_predicted = true;
+ expected_.flexible_mode = true;
+ expected_.num_ref_pics = 1;
+ expected_.pid_diff[0] = 3;
+ Init(kFrameSize, kPacketSize);
+
+ const size_t kExpectedNum = 0;
+ CreateParseAndCheckPackets(NULL, NULL, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutSpatialResolutionPresent) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 25;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 1;
+ expected_.spatial_layer_resolution_present = false;
+ expected_.gof.num_frames_in_gof = 1;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.num_ref_pics[0] = 1;
+ expected_.gof.pid_diff[0][0] = 4;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1 (4hdr + 21 payload)
+ // N_S:0, Y:0, N_G:0
+ // T:0, U:1, R:1 | P_DIFF[0][0]:4
+ const size_t kExpectedHdrSizes[] = {4};
+ const size_t kExpectedSizes[] = {25};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestSsData) {
+ const size_t kFrameSize = 21;
+ const size_t kPacketSize = 39;
+
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = 2;
+ expected_.spatial_layer_resolution_present = true;
+ expected_.width[0] = 640;
+ expected_.width[1] = 1280;
+ expected_.height[0] = 360;
+ expected_.height[1] = 720;
+ expected_.gof.num_frames_in_gof = 3;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_idx[1] = 1;
+ expected_.gof.temporal_idx[2] = 2;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.temporal_up_switch[1] = true;
+ expected_.gof.temporal_up_switch[2] = false;
+ expected_.gof.num_ref_pics[0] = 0;
+ expected_.gof.num_ref_pics[1] = 3;
+ expected_.gof.num_ref_pics[2] = 2;
+ expected_.gof.pid_diff[1][0] = 5;
+ expected_.gof.pid_diff[1][1] = 6;
+ expected_.gof.pid_diff[1][2] = 7;
+ expected_.gof.pid_diff[2][0] = 8;
+ expected_.gof.pid_diff[2][1] = 9;
+ Init(kFrameSize, kPacketSize);
+
+ // One packet:
+ // I:0, P:0, L:0, F:0, B:1, E:1, V:1 (18hdr + 21 payload)
+ // N_S:1, Y:1, N_G:2
+ // WIDTH:640 // 2 bytes
+ // HEIGHT:360 // 2 bytes
+ // WIDTH:1280 // 2 bytes
+ // HEIGHT:720 // 2 bytes
+ // T:0, U:1, R:0
+ // T:1, U:1, R:3 | P_DIFF[1][0]:5 | P_DIFF[1][1]:6 | P_DIFF[1][2]:7
+ // T:2, U:0, R:2 | P_DIFF[2][0]:8 | P_DIFF[2][0]:9
+ const size_t kExpectedHdrSizes[] = {18};
+ const size_t kExpectedSizes[] = {39};
+ const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
+ CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
+}
+
+TEST_F(RtpPacketizerVp9Test, TestBaseLayerProtectionAndStorageType) {
+ const size_t kFrameSize = 10;
+ const size_t kPacketSize = 12;
+
+ // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
+ // L: T:0, U:0, S:0, D:0
+ expected_.flexible_mode = true;
+ expected_.temporal_idx = 0;
+ Init(kFrameSize, kPacketSize);
+ EXPECT_EQ(kProtectedPacket, packetizer_->GetProtectionType());
+ EXPECT_EQ(kAllowRetransmission,
+ packetizer_->GetStorageType(kRetransmitBaseLayer));
+ EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitOff));
+}
+
+TEST_F(RtpPacketizerVp9Test, TestHigherLayerProtectionAndStorageType) {
+ const size_t kFrameSize = 10;
+ const size_t kPacketSize = 12;
+
+ // I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
+ // L: T:1, U:0, S:0, D:0
+ expected_.flexible_mode = true;
+ expected_.temporal_idx = 1;
+ Init(kFrameSize, kPacketSize);
+ EXPECT_EQ(kUnprotectedPacket, packetizer_->GetProtectionType());
+ EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitBaseLayer));
+ EXPECT_EQ(kAllowRetransmission,
+ packetizer_->GetStorageType(kRetransmitHigherLayers));
+}
+
+
+class RtpDepacketizerVp9Test : public ::testing::Test {
+ protected:
+ RtpDepacketizerVp9Test()
+ : depacketizer_(new RtpDepacketizerVp9()) {}
+
+ virtual void SetUp() {
+ expected_.InitRTPVideoHeaderVP9();
+ }
+
+ RTPVideoHeaderVP9 expected_;
+ rtc::scoped_ptr<RtpDepacketizer> depacketizer_;
+};
+
+TEST_F(RtpDepacketizerVp9Test, ParseBasicHeader) {
+ const uint8_t kHeaderLength = 1;
+ uint8_t packet[4] = {0};
+ packet[0] = 0x0C; // I:0 P:0 L:0 F:0 B:1 E:1 V:0 R:0
+ expected_.beginning_of_frame = true;
+ expected_.end_of_frame = true;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseOneBytePictureId) {
+ const uint8_t kHeaderLength = 2;
+ uint8_t packet[10] = {0};
+ packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0
+ packet[1] = kMaxOneBytePictureId;
+
+ expected_.picture_id = kMaxOneBytePictureId;
+ expected_.max_picture_id = kMaxOneBytePictureId;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseTwoBytePictureId) {
+ const uint8_t kHeaderLength = 3;
+ uint8_t packet[10] = {0};
+ packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0
+ packet[1] = 0x80 | ((kMaxTwoBytePictureId >> 8) & 0x7F);
+ packet[2] = kMaxTwoBytePictureId & 0xFF;
+
+ expected_.picture_id = kMaxTwoBytePictureId;
+ expected_.max_picture_id = kMaxTwoBytePictureId;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithNonFlexibleMode) {
+ const uint8_t kHeaderLength = 3;
+ const uint8_t kGofIdx = 7;
+ const uint8_t kSpatialIdx = 1;
+ const uint8_t kDbit = 1;
+ const uint8_t kTl0PicIdx = 17;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x20; // I:0 P:0 L:1 F:0 B:0 E:0 V:0 R:0
+ packet[1] = (kGofIdx << 4) | (kSpatialIdx << 1) | kDbit; // GOF_IDX:7 S:1 D:1
+ packet[2] = kTl0PicIdx; // TL0PICIDX:17
+
+ expected_.gof_idx = kGofIdx;
+ expected_.spatial_idx = kSpatialIdx;
+ expected_.inter_layer_predicted = kDbit ? true : false;
+ expected_.tl0_pic_idx = kTl0PicIdx;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithFlexibleMode) {
+ const uint8_t kHeaderLength = 2;
+ const uint8_t kTemporalIdx = 2;
+ const uint8_t kUbit = 1;
+ const uint8_t kSpatialIdx = 0;
+ const uint8_t kDbit = 0;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x38; // I:0 P:0 L:1 F:1 B:1 E:0 V:0 R:0
+ packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
+
+ // I:0 P:0 L:1 F:1 B:1 E:0 V:0
+ // L: T:2 U:1 S:0 D:0
+ expected_.beginning_of_frame = true;
+ expected_.flexible_mode = true;
+ expected_.temporal_idx = kTemporalIdx;
+ expected_.temporal_up_switch = kUbit ? true : false;
+ expected_.spatial_idx = kSpatialIdx;
+ expected_.inter_layer_predicted = kDbit ? true : false;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseRefIdx) {
+ const uint8_t kHeaderLength = 7;
+ const int16_t kPictureId = 17;
+ const int16_t kPdiff1 = 17;
+ const int16_t kPdiff2 = 18;
+ const int16_t kExtPdiff3 = 2171;
+ uint8_t packet[13] = {0};
+ packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
+ packet[1] = 0x80 | ((kPictureId >> 8) & 0x7F); // Two byte pictureID.
+ packet[2] = kPictureId;
+ packet[3] = (kPdiff1 << 2) | (0 << 1) | 1; // P_DIFF X:0 N:1
+ packet[4] = (kPdiff2 << 2) | (0 << 1) | 1; // P_DIFF X:0 N:1
+ packet[5] = ((kExtPdiff3 >> 8) << 2) | (1 << 1) | 0; // P_DIFF X:1 N:0
+ packet[6] = kExtPdiff3 & 0xff; // EXTENDED P_DIFF
+
+ // I:1 P:1 L:0 F:1 B:1 E:0 V:0
+ // I: PICTURE ID:17
+ // I:
+ // P,F: P_DIFF:17 X:0 N:1 => refPictureId = 17 - 17 = 0
+ // P,F: P_DIFF:18 X:0 N:1 => refPictureId = 0x7FFF + 1 + 17 - 18 = 0x7FFF
+ // P,F: P_DIFF:2171 X:1 N:0 => refPictureId = 0x7FFF + 1 + 17 - 2171 = 30614
+ expected_.beginning_of_frame = true;
+ expected_.inter_pic_predicted = true;
+ expected_.flexible_mode = true;
+ expected_.picture_id = kPictureId;
+ expected_.num_ref_pics = 3;
+ expected_.pid_diff[0] = kPdiff1;
+ expected_.pid_diff[1] = kPdiff2;
+ expected_.pid_diff[2] = kExtPdiff3;
+ expected_.ref_picture_id[0] = 0;
+ expected_.ref_picture_id[1] = 0x7FFF;
+ expected_.ref_picture_id[2] = 30614;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) {
+ const int16_t kPdiff = 3;
+ uint8_t packet[13] = {0};
+ packet[0] = 0x58; // I:0 P:1 L:0 F:1 B:1 E:0 V:0 R:0
+ packet[1] = (kPdiff << 2) | (0 << 1) | 0; // P,F: P_DIFF:3 X:0 N:0
+
+ RtpDepacketizer::ParsedPayload parsed;
+ EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithTooManyRefPics) {
+ const int16_t kPdiff = 3;
+ uint8_t packet[13] = {0};
+ packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
+ packet[1] = kMaxOneBytePictureId; // I: PICTURE ID:127
+ packet[2] = (kPdiff << 2) | (0 << 1) | 1; // P,F: P_DIFF:3 X:0 N:1
+ packet[3] = (kPdiff << 2) | (0 << 1) | 1; // P,F: P_DIFF:3 X:0 N:1
+ packet[4] = (kPdiff << 2) | (0 << 1) | 1; // P,F: P_DIFF:3 X:0 N:1
+ packet[5] = (kPdiff << 2) | (0 << 1) | 0; // P,F: P_DIFF:3 X:0 N:0
+
+ RtpDepacketizer::ParsedPayload parsed;
+ EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseSsData) {
+ const uint8_t kHeaderLength = 5;
+ const uint8_t kYbit = 0;
+ const size_t kNs = 2;
+ const size_t kNg = 2;
+ uint8_t packet[23] = {0};
+ packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0
+ packet[1] = ((kNs - 1) << 5) | (kYbit << 4) | (kNg - 1); // N_S Y N_G
+ packet[2] = (0 << 5) | (1 << 4) | (0 << 2) | 0; // T:0 U:1 R:0 -
+ packet[3] = (2 << 5) | (0 << 4) | (1 << 2) | 0; // T:2 U:0 R:1 -
+ packet[4] = 33;
+
+ expected_.beginning_of_frame = true;
+ expected_.ss_data_available = true;
+ expected_.num_spatial_layers = kNs;
+ expected_.spatial_layer_resolution_present = kYbit ? true : false;
+ expected_.gof.num_frames_in_gof = kNg;
+ expected_.gof.temporal_idx[0] = 0;
+ expected_.gof.temporal_idx[1] = 2;
+ expected_.gof.temporal_up_switch[0] = true;
+ expected_.gof.temporal_up_switch[1] = false;
+ expected_.gof.num_ref_pics[0] = 0;
+ expected_.gof.num_ref_pics[1] = 1;
+ expected_.gof.pid_diff[1][0] = 33;
+ ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
+ uint8_t packet[2] = {0};
+ packet[0] = 0x08; // I:0 P:0 L:0 F:0 B:1 E:0 V:0 R:0
+
+ RtpDepacketizer::ParsedPayload parsed;
+ ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+ EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
+ EXPECT_TRUE(parsed.type.Video.isFirstPacket);
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
+ uint8_t packet[2] = {0};
+ packet[0] = 0x44; // I:0 P:1 L:0 F:0 B:0 E:1 V:0 R:0
+
+ RtpDepacketizer::ParsedPayload parsed;
+ ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+ EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
+ EXPECT_FALSE(parsed.type.Video.isFirstPacket);
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
+ const uint16_t kWidth[2] = {640, 1280};
+ const uint16_t kHeight[2] = {360, 720};
+ uint8_t packet[20] = {0};
+ packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0
+ packet[1] = (1 << 5) | (1 << 4) | 0; // N_S:1 Y:1 N_G:0
+ packet[2] = kWidth[0] >> 8;
+ packet[3] = kWidth[0] & 0xFF;
+ packet[4] = kHeight[0] >> 8;
+ packet[5] = kHeight[0] & 0xFF;
+ packet[6] = kWidth[1] >> 8;
+ packet[7] = kWidth[1] & 0xFF;
+ packet[8] = kHeight[1] >> 8;
+ packet[9] = kHeight[1] & 0xFF;
+ packet[10] = 0; // T:0 U:0 R:0 -
+
+ RtpDepacketizer::ParsedPayload parsed;
+ ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+ EXPECT_EQ(kWidth[0], parsed.type.Video.width);
+ EXPECT_EQ(kHeight[0], parsed.type.Video.height);
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
+ uint8_t packet[1] = {0};
+ RtpDepacketizer::ParsedPayload parsed;
+ EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, 0));
+}
+
+TEST_F(RtpDepacketizerVp9Test, ParseFailsForTooShortBufferToFitPayload) {
+ const uint8_t kHeaderLength = 1;
+ uint8_t packet[kHeaderLength] = {0};
+ RtpDepacketizer::ParsedPayload parsed;
+ EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
index 8fb183543a0..cc0cc83bb49 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.cc
@@ -25,12 +25,10 @@ namespace webrtc {
static const int kMinPacketRequestBytes = 50;
RTPPacketHistory::RTPPacketHistory(Clock* clock)
- : clock_(clock),
- critsect_(CriticalSectionWrapper::CreateCriticalSection()),
- store_(false),
- prev_index_(0),
- max_packet_length_(0) {
-}
+ : clock_(clock),
+ critsect_(CriticalSectionWrapper::CreateCriticalSection()),
+ store_(false),
+ prev_index_(0) {}
RTPPacketHistory::~RTPPacketHistory() {
}
@@ -55,11 +53,6 @@ void RTPPacketHistory::Allocate(size_t number_to_store) {
assert(number_to_store <= kMaxHistoryCapacity);
store_ = true;
stored_packets_.resize(number_to_store);
- stored_seq_nums_.resize(number_to_store);
- stored_lengths_.resize(number_to_store);
- stored_times_.resize(number_to_store);
- stored_send_times_.resize(number_to_store);
- stored_types_.resize(number_to_store);
}
void RTPPacketHistory::Free() {
@@ -67,21 +60,10 @@ void RTPPacketHistory::Free() {
return;
}
- std::vector<std::vector<uint8_t> >::iterator it;
- for (it = stored_packets_.begin(); it != stored_packets_.end(); ++it) {
- it->clear();
- }
-
stored_packets_.clear();
- stored_seq_nums_.clear();
- stored_lengths_.clear();
- stored_times_.clear();
- stored_send_times_.clear();
- stored_types_.clear();
store_ = false;
prev_index_ = 0;
- max_packet_length_ = 0;
}
bool RTPPacketHistory::StorePackets() const {
@@ -89,31 +71,8 @@ bool RTPPacketHistory::StorePackets() const {
return store_;
}
-void RTPPacketHistory::VerifyAndAllocatePacketLength(size_t packet_length,
- uint32_t start_index) {
- assert(packet_length > 0);
- if (!store_) {
- return;
- }
-
- // If start_index > 0 this is a resize and we must check any new (empty)
- // packets created during the resize.
- if (start_index == 0 && packet_length <= max_packet_length_) {
- return;
- }
-
- max_packet_length_ = std::max(packet_length, max_packet_length_);
-
- std::vector<std::vector<uint8_t> >::iterator it;
- for (it = stored_packets_.begin() + start_index; it != stored_packets_.end();
- ++it) {
- it->resize(max_packet_length_);
- }
-}
-
int32_t RTPPacketHistory::PutRTPPacket(const uint8_t* packet,
size_t packet_length,
- size_t max_packet_length,
int64_t capture_time_ms,
StorageType type) {
if (type == kDontStore) {
@@ -128,9 +87,7 @@ int32_t RTPPacketHistory::PutRTPPacket(const uint8_t* packet,
assert(packet);
assert(packet_length > 3);
- VerifyAndAllocatePacketLength(max_packet_length, 0);
-
- if (packet_length > max_packet_length_) {
+ if (packet_length > IP_PACKET_SIZE) {
LOG(LS_WARNING) << "Failed to store RTP packet with length: "
<< packet_length;
return -1;
@@ -141,14 +98,13 @@ int32_t RTPPacketHistory::PutRTPPacket(const uint8_t* packet,
// If index we're about to overwrite contains a packet that has not
// yet been sent (probably pending in paced sender), we need to expand
// the buffer.
- if (stored_lengths_[prev_index_] > 0 &&
- stored_send_times_[prev_index_] == 0) {
+ if (stored_packets_[prev_index_].length > 0 &&
+ stored_packets_[prev_index_].send_time == 0) {
size_t current_size = static_cast<uint16_t>(stored_packets_.size());
if (current_size < kMaxHistoryCapacity) {
size_t expanded_size = std::max(current_size * 3 / 2, current_size + 1);
expanded_size = std::min(expanded_size, kMaxHistoryCapacity);
Allocate(expanded_size);
- VerifyAndAllocatePacketLength(max_packet_length, current_size);
// Causes discontinuity, but that's OK-ish. FindSeqNum() will still work,
// but may be slower - at least until buffer has wrapped around once.
prev_index_ = current_size;
@@ -156,21 +112,19 @@ int32_t RTPPacketHistory::PutRTPPacket(const uint8_t* packet,
}
// Store packet
- std::vector<std::vector<uint8_t> >::iterator it =
- stored_packets_.begin() + prev_index_;
// TODO(sprang): Overhaul this class and get rid of this copy step.
// (Finally introduce the RtpPacket class?)
- std::copy(packet, packet + packet_length, it->begin());
+ memcpy(stored_packets_[prev_index_].data, packet, packet_length);
+ stored_packets_[prev_index_].length = packet_length;
- stored_seq_nums_[prev_index_] = seq_num;
- stored_lengths_[prev_index_] = packet_length;
- stored_times_[prev_index_] = (capture_time_ms > 0) ? capture_time_ms :
- clock_->TimeInMilliseconds();
- stored_send_times_[prev_index_] = 0; // Packet not sent.
- stored_types_[prev_index_] = type;
+ stored_packets_[prev_index_].sequence_number = seq_num;
+ stored_packets_[prev_index_].time_ms =
+ (capture_time_ms > 0) ? capture_time_ms : clock_->TimeInMilliseconds();
+ stored_packets_[prev_index_].send_time = 0; // Packet not sent.
+ stored_packets_[prev_index_].storage_type = type;
++prev_index_;
- if (prev_index_ >= stored_seq_nums_.size()) {
+ if (prev_index_ >= stored_packets_.size()) {
prev_index_ = 0;
}
return 0;
@@ -188,8 +142,7 @@ bool RTPPacketHistory::HasRTPPacket(uint16_t sequence_number) const {
return false;
}
- size_t length = stored_lengths_.at(index);
- if (length == 0 || length > max_packet_length_) {
+ if (stored_packets_[index].length == 0) {
// Invalid length.
return false;
}
@@ -209,11 +162,11 @@ bool RTPPacketHistory::SetSent(uint16_t sequence_number) {
}
// Send time already set.
- if (stored_send_times_[index] != 0) {
+ if (stored_packets_[index].send_time != 0) {
return false;
}
- stored_send_times_[index] = clock_->TimeInMilliseconds();
+ stored_packets_[index].send_time = clock_->TimeInMilliseconds();
return true;
}
@@ -224,7 +177,7 @@ bool RTPPacketHistory::GetPacketAndSetSendTime(uint16_t sequence_number,
size_t* packet_length,
int64_t* stored_time_ms) {
CriticalSectionScoped cs(critsect_.get());
- assert(*packet_length >= max_packet_length_);
+ assert(*packet_length >= IP_PACKET_SIZE);
if (!store_) {
return false;
}
@@ -236,8 +189,8 @@ bool RTPPacketHistory::GetPacketAndSetSendTime(uint16_t sequence_number,
return false;
}
- size_t length = stored_lengths_.at(index);
- assert(length <= max_packet_length_);
+ size_t length = stored_packets_[index].length;
+ assert(length <= IP_PACKET_SIZE);
if (length == 0) {
LOG(LS_WARNING) << "No match for getting seqNum " << sequence_number
<< ", len " << length;
@@ -247,16 +200,16 @@ bool RTPPacketHistory::GetPacketAndSetSendTime(uint16_t sequence_number,
// Verify elapsed time since last retrieve.
int64_t now = clock_->TimeInMilliseconds();
if (min_elapsed_time_ms > 0 &&
- ((now - stored_send_times_.at(index)) < min_elapsed_time_ms)) {
+ ((now - stored_packets_[index].send_time) < min_elapsed_time_ms)) {
return false;
}
- if (retransmit && stored_types_.at(index) == kDontRetransmit) {
+ if (retransmit && stored_packets_[index].storage_type == kDontRetransmit) {
// No bytes copied since this packet shouldn't be retransmitted or is
// of zero size.
return false;
}
- stored_send_times_[index] = clock_->TimeInMilliseconds();
+ stored_packets_[index].send_time = clock_->TimeInMilliseconds();
GetPacket(index, packet, packet_length, stored_time_ms);
return true;
}
@@ -266,13 +219,10 @@ void RTPPacketHistory::GetPacket(int index,
size_t* packet_length,
int64_t* stored_time_ms) const {
// Get packet.
- size_t length = stored_lengths_.at(index);
- std::vector<std::vector<uint8_t> >::const_iterator it_found_packet =
- stored_packets_.begin() + index;
- std::copy(it_found_packet->begin(), it_found_packet->begin() + length,
- packet);
+ size_t length = stored_packets_[index].length;
+ memcpy(packet, stored_packets_[index].data, length);
*packet_length = length;
- *stored_time_ms = stored_times_.at(index);
+ *stored_time_ms = stored_packets_[index].time_ms;
}
bool RTPPacketHistory::GetBestFittingPacket(uint8_t* packet,
@@ -294,24 +244,24 @@ bool RTPPacketHistory::FindSeqNum(uint16_t sequence_number,
uint16_t temp_sequence_number = 0;
if (prev_index_ > 0) {
*index = prev_index_ - 1;
- temp_sequence_number = stored_seq_nums_[*index];
+ temp_sequence_number = stored_packets_[*index].sequence_number;
} else {
- *index = stored_seq_nums_.size() - 1;
- temp_sequence_number = stored_seq_nums_[*index]; // wrap
+ *index = stored_packets_.size() - 1;
+ temp_sequence_number = stored_packets_[*index].sequence_number; // wrap
}
int32_t idx = (prev_index_ - 1) - (temp_sequence_number - sequence_number);
- if (idx >= 0 && idx < static_cast<int>(stored_seq_nums_.size())) {
+ if (idx >= 0 && idx < static_cast<int>(stored_packets_.size())) {
*index = idx;
- temp_sequence_number = stored_seq_nums_[*index];
+ temp_sequence_number = stored_packets_[*index].sequence_number;
}
if (temp_sequence_number != sequence_number) {
// We did not found a match, search all.
- for (uint16_t m = 0; m < stored_seq_nums_.size(); m++) {
- if (stored_seq_nums_[m] == sequence_number) {
+ for (uint16_t m = 0; m < stored_packets_.size(); m++) {
+ if (stored_packets_[m].sequence_number == sequence_number) {
*index = m;
- temp_sequence_number = stored_seq_nums_[*index];
+ temp_sequence_number = stored_packets_[*index].sequence_number;
break;
}
}
@@ -324,15 +274,16 @@ bool RTPPacketHistory::FindSeqNum(uint16_t sequence_number,
}
int RTPPacketHistory::FindBestFittingPacket(size_t size) const {
- if (size < kMinPacketRequestBytes || stored_lengths_.empty())
+ if (size < kMinPacketRequestBytes || stored_packets_.empty())
return -1;
size_t min_diff = std::numeric_limits<size_t>::max();
int best_index = -1; // Returned unchanged if we don't find anything.
- for (size_t i = 0; i < stored_lengths_.size(); ++i) {
- if (stored_lengths_[i] == 0)
+ for (size_t i = 0; i < stored_packets_.size(); ++i) {
+ if (stored_packets_[i].length == 0)
continue;
- size_t diff = (stored_lengths_[i] > size) ?
- (stored_lengths_[i] - size) : (size - stored_lengths_[i]);
+ size_t diff = (stored_packets_[i].length > size)
+ ? (stored_packets_[i].length - size)
+ : (size - stored_packets_[i].length);
if (diff < min_diff) {
min_diff = diff;
best_index = static_cast<int>(i);
@@ -340,4 +291,7 @@ int RTPPacketHistory::FindBestFittingPacket(size_t size) const {
}
return best_index;
}
+
+RTPPacketHistory::StoredPacket::StoredPacket() {}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
index 212aa212669..4a99e16977f 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history.h
@@ -39,7 +39,6 @@ class RTPPacketHistory {
// Stores RTP packet.
int32_t PutRTPPacket(const uint8_t* packet,
size_t packet_length,
- size_t max_packet_length,
int64_t capture_time_ms,
StorageType type);
@@ -88,14 +87,18 @@ class RTPPacketHistory {
rtc::scoped_ptr<CriticalSectionWrapper> critsect_;
bool store_ GUARDED_BY(critsect_);
uint32_t prev_index_ GUARDED_BY(critsect_);
- size_t max_packet_length_ GUARDED_BY(critsect_);
-
- std::vector<std::vector<uint8_t> > stored_packets_ GUARDED_BY(critsect_);
- std::vector<uint16_t> stored_seq_nums_ GUARDED_BY(critsect_);
- std::vector<size_t> stored_lengths_ GUARDED_BY(critsect_);
- std::vector<int64_t> stored_times_ GUARDED_BY(critsect_);
- std::vector<int64_t> stored_send_times_ GUARDED_BY(critsect_);
- std::vector<StorageType> stored_types_ GUARDED_BY(critsect_);
+
+ struct StoredPacket {
+ StoredPacket();
+ uint16_t sequence_number = 0;
+ int64_t time_ms = 0;
+ int64_t send_time = 0;
+ StorageType storage_type = kDontStore;
+
+ uint8_t data[IP_PACKET_SIZE];
+ size_t length = 0;
+ };
+ std::vector<StoredPacket> stored_packets_ GUARDED_BY(critsect_);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_RTP_PACKET_HISTORY_H_
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
index fe33b01e069..f3b5556a974 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_packet_history_unittest.cc
@@ -70,8 +70,8 @@ TEST_F(RtpPacketHistoryTest, NoStoreStatus) {
size_t len = 0;
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
// Packet should not be stored.
len = kMaxPacketLength;
int64_t time;
@@ -84,8 +84,7 @@ TEST_F(RtpPacketHistoryTest, DontStore) {
size_t len = 0;
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kDontStore));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms, kDontStore));
// Packet should not be stored.
len = kMaxPacketLength;
@@ -97,11 +96,8 @@ TEST_F(RtpPacketHistoryTest, DontStore) {
TEST_F(RtpPacketHistoryTest, PutRtpPacket_TooLargePacketLength) {
hist_->SetStorePacketsStatus(true, 10);
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
- EXPECT_EQ(-1, hist_->PutRTPPacket(packet_,
- kMaxPacketLength + 1,
- kMaxPacketLength,
- capture_time_ms,
- kAllowRetransmission));
+ EXPECT_EQ(-1, hist_->PutRTPPacket(packet_, kMaxPacketLength + 1,
+ capture_time_ms, kAllowRetransmission));
}
TEST_F(RtpPacketHistoryTest, GetRtpPacket_NotStored) {
@@ -119,8 +115,8 @@ TEST_F(RtpPacketHistoryTest, PutRtpPacket) {
EXPECT_FALSE(hist_->HasRTPPacket(kSeqNum));
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
EXPECT_TRUE(hist_->HasRTPPacket(kSeqNum));
}
@@ -129,8 +125,8 @@ TEST_F(RtpPacketHistoryTest, GetRtpPacket) {
size_t len = 0;
int64_t capture_time_ms = 1;
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
size_t len_out = kMaxPacketLength;
int64_t time;
@@ -149,8 +145,7 @@ TEST_F(RtpPacketHistoryTest, NoCaptureTime) {
fake_clock_.AdvanceTimeMilliseconds(1);
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- -1, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, -1, kAllowRetransmission));
size_t len_out = kMaxPacketLength;
int64_t time;
@@ -168,8 +163,8 @@ TEST_F(RtpPacketHistoryTest, DontRetransmit) {
size_t len = 0;
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kDontRetransmit));
+ EXPECT_EQ(
+ 0, hist_->PutRTPPacket(packet_, len, capture_time_ms, kDontRetransmit));
size_t len_out = kMaxPacketLength;
int64_t time;
@@ -184,8 +179,8 @@ TEST_F(RtpPacketHistoryTest, MinResendTime) {
size_t len = 0;
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
CreateRtpPacket(kSeqNum, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
int64_t time;
len = kMaxPacketLength;
@@ -215,8 +210,8 @@ TEST_F(RtpPacketHistoryTest, DynamicExpansion) {
for (int i = 0; i < 4; ++i) {
len = 0;
CreateRtpPacket(kSeqNum + i, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
}
for (int i = 0; i < 4; ++i) {
len = kMaxPacketLength;
@@ -230,8 +225,8 @@ TEST_F(RtpPacketHistoryTest, DynamicExpansion) {
for (int i = 4; i < 20; ++i) {
len = 0;
CreateRtpPacket(kSeqNum + i, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
}
for (int i = 4; i < 20; ++i) {
len = kMaxPacketLength;
@@ -257,8 +252,8 @@ TEST_F(RtpPacketHistoryTest, FullExpansion) {
for (size_t i = 0; i < kMaxHistoryCapacity + 1; ++i) {
len = 0;
CreateRtpPacket(kSeqNum + i, kSsrc, kPayload, kTimestamp, packet_, &len);
- EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, kMaxPacketLength,
- capture_time_ms, kAllowRetransmission));
+ EXPECT_EQ(0, hist_->PutRTPPacket(packet_, len, capture_time_ms,
+ kAllowRetransmission));
}
fake_clock_.AdvanceTimeMilliseconds(100);
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
index 8e2ff1742ef..20e650c04b4 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_payload_registry.cc
@@ -433,6 +433,8 @@ class RTPPayloadVideoStrategy : public RTPPayloadStrategy {
if (RtpUtility::StringCompare(payloadName, "VP8", 3)) {
videoType = kRtpVideoVp8;
+ } else if (RtpUtility::StringCompare(payloadName, "VP9", 3)) {
+ videoType = kRtpVideoVp9;
} else if (RtpUtility::StringCompare(payloadName, "H264", 4)) {
videoType = kRtpVideoH264;
} else if (RtpUtility::StringCompare(payloadName, "I420", 4)) {
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc
index c9a1adf1967..d7bf4059405 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.cc
@@ -20,17 +20,15 @@
namespace webrtc {
RTPReceiverStrategy* RTPReceiverStrategy::CreateAudioStrategy(
- int32_t id, RtpData* data_callback,
+ RtpData* data_callback,
RtpAudioFeedback* incoming_messages_callback) {
- return new RTPReceiverAudio(id, data_callback, incoming_messages_callback);
+ return new RTPReceiverAudio(data_callback, incoming_messages_callback);
}
-RTPReceiverAudio::RTPReceiverAudio(const int32_t id,
- RtpData* data_callback,
+RTPReceiverAudio::RTPReceiverAudio(RtpData* data_callback,
RtpAudioFeedback* incoming_messages_callback)
: RTPReceiverStrategy(data_callback),
TelephoneEventHandler(),
- id_(id),
last_received_frequency_(8000),
telephone_event_forward_to_decoder_(false),
telephone_event_payload_type_(-1),
@@ -263,16 +261,13 @@ int RTPReceiverAudio::Energy(uint8_t array_of_energy[kRtpCsrcSize]) const {
int32_t RTPReceiverAudio::InvokeOnInitializeDecoder(
RtpFeedback* callback,
- int32_t id,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const {
- if (-1 == callback->OnInitializeDecoder(id,
- payload_type,
- payload_name,
- specific_payload.Audio.frequency,
- specific_payload.Audio.channels,
- specific_payload.Audio.rate)) {
+ if (-1 ==
+ callback->OnInitializeDecoder(
+ payload_type, payload_name, specific_payload.Audio.frequency,
+ specific_payload.Audio.channels, specific_payload.Audio.rate)) {
LOG(LS_ERROR) << "Failed to create decoder for payload type: "
<< payload_name << "/" << static_cast<int>(payload_type);
return -1;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
index a7efcbba344..176852e01ef 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_audio.h
@@ -28,8 +28,7 @@ class CriticalSectionWrapper;
class RTPReceiverAudio : public RTPReceiverStrategy,
public TelephoneEventHandler {
public:
- RTPReceiverAudio(const int32_t id,
- RtpData* data_callback,
+ RTPReceiverAudio(RtpData* data_callback,
RtpAudioFeedback* incoming_messages_callback);
virtual ~RTPReceiverAudio() {}
@@ -74,7 +73,6 @@ class RTPReceiverAudio : public RTPReceiverStrategy,
int32_t InvokeOnInitializeDecoder(
RtpFeedback* callback,
- int32_t id,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const override;
@@ -106,8 +104,6 @@ class RTPReceiverAudio : public RTPReceiverStrategy,
const AudioPayload& audio_specific,
bool is_red);
- int32_t id_;
-
uint32_t last_received_frequency_;
bool telephone_event_forward_to_decoder_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
index 6be0c5a8273..40612a6ddfe 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.cc
@@ -27,7 +27,7 @@ using RtpUtility::Payload;
using RtpUtility::StringCompare;
RtpReceiver* RtpReceiver::CreateVideoReceiver(
- int id, Clock* clock,
+ Clock* clock,
RtpData* incoming_payload_callback,
RtpFeedback* incoming_messages_callback,
RTPPayloadRegistry* rtp_payload_registry) {
@@ -36,13 +36,13 @@ RtpReceiver* RtpReceiver::CreateVideoReceiver(
if (!incoming_messages_callback)
incoming_messages_callback = NullObjectRtpFeedback();
return new RtpReceiverImpl(
- id, clock, NullObjectRtpAudioFeedback(), incoming_messages_callback,
+ clock, NullObjectRtpAudioFeedback(), incoming_messages_callback,
rtp_payload_registry,
RTPReceiverStrategy::CreateVideoStrategy(incoming_payload_callback));
}
RtpReceiver* RtpReceiver::CreateAudioReceiver(
- int id, Clock* clock,
+ Clock* clock,
RtpAudioFeedback* incoming_audio_feedback,
RtpData* incoming_payload_callback,
RtpFeedback* incoming_messages_callback,
@@ -54,25 +54,24 @@ RtpReceiver* RtpReceiver::CreateAudioReceiver(
if (!incoming_messages_callback)
incoming_messages_callback = NullObjectRtpFeedback();
return new RtpReceiverImpl(
- id, clock, incoming_audio_feedback, incoming_messages_callback,
+ clock, incoming_audio_feedback, incoming_messages_callback,
rtp_payload_registry,
- RTPReceiverStrategy::CreateAudioStrategy(id, incoming_payload_callback,
+ RTPReceiverStrategy::CreateAudioStrategy(incoming_payload_callback,
incoming_audio_feedback));
}
-RtpReceiverImpl::RtpReceiverImpl(int32_t id,
- Clock* clock,
- RtpAudioFeedback* incoming_audio_messages_callback,
- RtpFeedback* incoming_messages_callback,
- RTPPayloadRegistry* rtp_payload_registry,
- RTPReceiverStrategy* rtp_media_receiver)
+RtpReceiverImpl::RtpReceiverImpl(
+ Clock* clock,
+ RtpAudioFeedback* incoming_audio_messages_callback,
+ RtpFeedback* incoming_messages_callback,
+ RTPPayloadRegistry* rtp_payload_registry,
+ RTPReceiverStrategy* rtp_media_receiver)
: clock_(clock),
rtp_payload_registry_(rtp_payload_registry),
rtp_media_receiver_(rtp_media_receiver),
- id_(id),
cb_rtp_feedback_(incoming_messages_callback),
critical_section_rtp_receiver_(
- CriticalSectionWrapper::CreateCriticalSection()),
+ CriticalSectionWrapper::CreateCriticalSection()),
last_receive_time_(0),
last_received_payload_length_(0),
ssrc_(0),
@@ -90,8 +89,7 @@ RtpReceiverImpl::RtpReceiverImpl(int32_t id,
RtpReceiverImpl::~RtpReceiverImpl() {
for (int i = 0; i < num_csrcs_; ++i) {
- cb_rtp_feedback_->OnIncomingCSRCChanged(id_, current_remote_csrc_[i],
- false);
+ cb_rtp_feedback_->OnIncomingCSRCChanged(current_remote_csrc_[i], false);
}
}
@@ -299,13 +297,14 @@ void RtpReceiverImpl::CheckSSRCChanged(const RTPHeader& rtp_header) {
if (new_ssrc) {
// We need to get this to our RTCP sender and receiver.
// We need to do this outside critical section.
- cb_rtp_feedback_->OnIncomingSSRCChanged(id_, rtp_header.ssrc);
+ cb_rtp_feedback_->OnIncomingSSRCChanged(rtp_header.ssrc);
}
if (re_initialize_decoder) {
- if (-1 == cb_rtp_feedback_->OnInitializeDecoder(
- id_, rtp_header.payloadType, payload_name,
- rtp_header.payload_type_frequency, channels, rate)) {
+ if (-1 ==
+ cb_rtp_feedback_->OnInitializeDecoder(
+ rtp_header.payloadType, payload_name,
+ rtp_header.payload_type_frequency, channels, rate)) {
// New stream, same codec.
LOG(LS_ERROR) << "Failed to create decoder for payload type: "
<< static_cast<int>(rtp_header.payloadType);
@@ -397,9 +396,9 @@ int32_t RtpReceiverImpl::CheckPayloadChanged(const RTPHeader& rtp_header,
} // End critsect.
if (re_initialize_decoder) {
- if (-1 == rtp_media_receiver_->InvokeOnInitializeDecoder(
- cb_rtp_feedback_, id_, payload_type, payload_name,
- *specific_payload)) {
+ if (-1 ==
+ rtp_media_receiver_->InvokeOnInitializeDecoder(
+ cb_rtp_feedback_, payload_type, payload_name, *specific_payload)) {
return -1; // Wrong payload type.
}
}
@@ -456,7 +455,7 @@ void RtpReceiverImpl::CheckCSRC(const WebRtcRTPHeader& rtp_header) {
if (!found_match && csrc) {
// Didn't find it, report it as new.
have_called_callback = true;
- cb_rtp_feedback_->OnIncomingCSRCChanged(id_, csrc, true);
+ cb_rtp_feedback_->OnIncomingCSRCChanged(csrc, true);
}
}
// Search for old CSRC in new array.
@@ -473,7 +472,7 @@ void RtpReceiverImpl::CheckCSRC(const WebRtcRTPHeader& rtp_header) {
if (!found_match && csrc) {
// Did not find it, report as removed.
have_called_callback = true;
- cb_rtp_feedback_->OnIncomingCSRCChanged(id_, csrc, false);
+ cb_rtp_feedback_->OnIncomingCSRCChanged(csrc, false);
}
}
if (!have_called_callback) {
@@ -481,9 +480,9 @@ void RtpReceiverImpl::CheckCSRC(const WebRtcRTPHeader& rtp_header) {
// Using CSRC 0 to signal this event, not interop safe, other
// implementations might have CSRC 0 as a valid value.
if (num_csrcs_diff > 0) {
- cb_rtp_feedback_->OnIncomingCSRCChanged(id_, 0, true);
+ cb_rtp_feedback_->OnIncomingCSRCChanged(0, true);
} else if (num_csrcs_diff < 0) {
- cb_rtp_feedback_->OnIncomingCSRCChanged(id_, 0, false);
+ cb_rtp_feedback_->OnIncomingCSRCChanged(0, false);
}
}
}
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
index c904e1fcac9..d6fbf2e468b 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_impl.h
@@ -25,8 +25,7 @@ class RtpReceiverImpl : public RtpReceiver {
// Callbacks passed in here may not be NULL (use Null Object callbacks if you
// want callbacks to do nothing). This class takes ownership of the media
// receiver but nothing else.
- RtpReceiverImpl(int32_t id,
- Clock* clock,
+ RtpReceiverImpl(Clock* clock,
RtpAudioFeedback* incoming_audio_messages_callback,
RtpFeedback* incoming_messages_callback,
RTPPayloadRegistry* rtp_payload_registry,
@@ -79,8 +78,6 @@ class RtpReceiverImpl : public RtpReceiver {
RTPPayloadRegistry* rtp_payload_registry_;
rtc::scoped_ptr<RTPReceiverStrategy> rtp_media_receiver_;
- int32_t id_;
-
RtpFeedback* cb_rtp_feedback_;
rtc::scoped_ptr<CriticalSectionWrapper> critical_section_rtp_receiver_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
index 9c09f8e43fb..a9e85ec450a 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h
@@ -28,7 +28,7 @@ class RTPReceiverStrategy {
public:
static RTPReceiverStrategy* CreateVideoStrategy(RtpData* data_callback);
static RTPReceiverStrategy* CreateAudioStrategy(
- int32_t id, RtpData* data_callback,
+ RtpData* data_callback,
RtpAudioFeedback* incoming_messages_callback);
virtual ~RTPReceiverStrategy() {}
@@ -70,7 +70,6 @@ class RTPReceiverStrategy {
// Invokes the OnInitializeDecoder callback in a media-specific way.
virtual int32_t InvokeOnInitializeDecoder(
RtpFeedback* callback,
- int32_t id,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const = 0;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
index ff64e49cafa..a8db0d293f6 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.cc
@@ -61,7 +61,7 @@ int32_t RTPReceiverVideo::ParseRtpPacket(WebRtcRTPHeader* rtp_header,
rtp_header->header.timestamp);
rtp_header->type.Video.codec = specific_payload.Video.videoCodecType;
- DCHECK_GE(payload_length, rtp_header->header.paddingLength);
+ RTC_DCHECK_GE(payload_length, rtp_header->header.paddingLength);
const size_t payload_data_length =
payload_length - rtp_header->header.paddingLength;
@@ -111,14 +111,13 @@ RTPAliveType RTPReceiverVideo::ProcessDeadOrAlive(
int32_t RTPReceiverVideo::InvokeOnInitializeDecoder(
RtpFeedback* callback,
- int32_t id,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const {
// For video we just go with default values.
if (-1 ==
- callback->OnInitializeDecoder(
- id, payload_type, payload_name, kVideoPayloadTypeFrequency, 1, 0)) {
+ callback->OnInitializeDecoder(payload_type, payload_name,
+ kVideoPayloadTypeFrequency, 1, 0)) {
LOG(LS_ERROR) << "Failed to created decoder for payload type: "
<< static_cast<int>(payload_type);
return -1;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
index 8528a7d6b7f..23128df6e1e 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_receiver_video.h
@@ -49,7 +49,6 @@ class RTPReceiverVideo : public RTPReceiverStrategy {
int32_t InvokeOnInitializeDecoder(
RtpFeedback* callback,
- int32_t id,
int8_t payload_type,
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
const PayloadUnion& specific_payload) const override;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index d2e224dfa4e..6840d81e3e1 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -27,23 +27,23 @@
namespace webrtc {
RtpRtcp::Configuration::Configuration()
- : id(-1),
- audio(false),
+ : audio(false),
receiver_only(false),
- clock(NULL),
+ clock(nullptr),
receive_statistics(NullObjectReceiveStatistics()),
- outgoing_transport(NULL),
- intra_frame_callback(NULL),
- bandwidth_callback(NULL),
- rtt_stats(NULL),
- rtcp_packet_type_counter_observer(NULL),
+ outgoing_transport(nullptr),
+ intra_frame_callback(nullptr),
+ bandwidth_callback(nullptr),
+ transport_feedback_callback(nullptr),
+ rtt_stats(nullptr),
+ rtcp_packet_type_counter_observer(nullptr),
audio_messages(NullObjectRtpAudioFeedback()),
- remote_bitrate_estimator(NULL),
- paced_sender(NULL),
- send_bitrate_observer(NULL),
- send_frame_count_observer(NULL),
- send_side_delay_observer(NULL) {
-}
+ remote_bitrate_estimator(nullptr),
+ paced_sender(nullptr),
+ transport_sequence_number_allocator(nullptr),
+ send_bitrate_observer(nullptr),
+ send_frame_count_observer(nullptr),
+ send_side_delay_observer(nullptr) {}
RtpRtcp* RtpRtcp::CreateRtpRtcp(const RtpRtcp::Configuration& configuration) {
if (configuration.clock) {
@@ -59,29 +59,29 @@ RtpRtcp* RtpRtcp::CreateRtpRtcp(const RtpRtcp::Configuration& configuration) {
}
ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration)
- : rtp_sender_(configuration.id,
- configuration.audio,
+ : rtp_sender_(configuration.audio,
configuration.clock,
configuration.outgoing_transport,
configuration.audio_messages,
configuration.paced_sender,
+ configuration.transport_sequence_number_allocator,
+ configuration.transport_feedback_callback,
configuration.send_bitrate_observer,
configuration.send_frame_count_observer,
configuration.send_side_delay_observer),
- rtcp_sender_(configuration.id,
- configuration.audio,
+ rtcp_sender_(configuration.audio,
configuration.clock,
configuration.receive_statistics,
- configuration.rtcp_packet_type_counter_observer),
- rtcp_receiver_(configuration.id,
- configuration.clock,
+ configuration.rtcp_packet_type_counter_observer,
+ configuration.outgoing_transport),
+ rtcp_receiver_(configuration.clock,
configuration.receiver_only,
configuration.rtcp_packet_type_counter_observer,
configuration.bandwidth_callback,
configuration.intra_frame_callback,
+ configuration.transport_feedback_callback,
this),
clock_(configuration.clock),
- id_(configuration.id),
audio_(configuration.audio),
collision_detected_(false),
last_process_time_(configuration.clock->TimeInMilliseconds()),
@@ -100,9 +100,6 @@ ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration)
rtt_ms_(0) {
send_video_codec_.codecType = kVideoCodecUnknown;
- // TODO(pwestin) move to constructors of each rtp/rtcp sender/receiver object.
- rtcp_sender_.RegisterSendTransport(configuration.outgoing_transport);
-
// Make sure that RTCP objects are aware of our SSRC.
uint32_t SSRC = rtp_sender_.SSRC();
rtcp_sender_.SetSSRC(SSRC);
@@ -472,23 +469,20 @@ int32_t ModuleRtpRtcpImpl::SetTransportOverhead(
}
int32_t ModuleRtpRtcpImpl::SetMaxTransferUnit(const uint16_t mtu) {
- if (mtu > IP_PACKET_SIZE) {
- LOG(LS_ERROR) << "Invalid mtu: " << mtu;
- return -1;
- }
+ RTC_DCHECK_LE(mtu, IP_PACKET_SIZE) << "Invalid mtu: " << mtu;
return rtp_sender_.SetMaxPayloadLength(mtu - packet_overhead_,
packet_overhead_);
}
-RTCPMethod ModuleRtpRtcpImpl::RTCP() const {
- if (rtcp_sender_.Status() != kRtcpOff) {
+RtcpMode ModuleRtpRtcpImpl::RTCP() const {
+ if (rtcp_sender_.Status() != RtcpMode::kOff) {
return rtcp_receiver_.Status();
}
- return kRtcpOff;
+ return RtcpMode::kOff;
}
// Configure RTCP status i.e on/off.
-void ModuleRtpRtcpImpl::SetRTCPStatus(const RTCPMethod method) {
+void ModuleRtpRtcpImpl::SetRTCPStatus(const RtcpMode method) {
rtcp_sender_.SetRTCPStatus(method);
rtcp_receiver_.SetRTCPStatus(method);
}
@@ -604,6 +598,31 @@ void ModuleRtpRtcpImpl::GetSendStreamDataCounters(
rtp_sender_.GetDataCounters(rtp_counters, rtx_counters);
}
+void ModuleRtpRtcpImpl::GetRtpPacketLossStats(
+ bool outgoing,
+ uint32_t ssrc,
+ struct RtpPacketLossStats* loss_stats) const {
+ if (!loss_stats) return;
+ const PacketLossStats* stats_source = NULL;
+ if (outgoing) {
+ if (SSRC() == ssrc) {
+ stats_source = &send_loss_stats_;
+ }
+ } else {
+ if (rtcp_receiver_.RemoteSSRC() == ssrc) {
+ stats_source = &receive_loss_stats_;
+ }
+ }
+ if (stats_source) {
+ loss_stats->single_packet_loss_count =
+ stats_source->GetSingleLossCount();
+ loss_stats->multiple_packet_loss_event_count =
+ stats_source->GetMultipleLossEventCount();
+ loss_stats->multiple_packet_loss_packet_count =
+ stats_source->GetMultipleLossPacketCount();
+ }
+}
+
int32_t ModuleRtpRtcpImpl::RemoteRTCPStat(RTCPSenderInfo* sender_info) {
return rtcp_receiver_.SenderInfoReceived(sender_info);
}
@@ -628,15 +647,6 @@ void ModuleRtpRtcpImpl::SetREMBData(const uint32_t bitrate,
rtcp_sender_.SetREMBData(bitrate, ssrcs);
}
-// (IJ) Extended jitter report.
-bool ModuleRtpRtcpImpl::IJ() const {
- return rtcp_sender_.IJ();
-}
-
-void ModuleRtpRtcpImpl::SetIJStatus(const bool enable) {
- rtcp_sender_.SetIJStatus(enable);
-}
-
int32_t ModuleRtpRtcpImpl::RegisterSendRtpHeaderExtension(
const RTPExtensionType type,
const uint8_t id) {
@@ -677,6 +687,9 @@ int ModuleRtpRtcpImpl::SetSelectiveRetransmissions(uint8_t settings) {
// Send a Negative acknowledgment packet.
int32_t ModuleRtpRtcpImpl::SendNACK(const uint16_t* nack_list,
const uint16_t size) {
+ for (int i = 0; i < size; ++i) {
+ receive_loss_stats_.AddLostPacket(nack_list[i]);
+ }
uint16_t nack_length = size;
uint16_t start_id = 0;
int64_t now = clock_->TimeInMilliseconds();
@@ -749,6 +762,11 @@ RtcpStatisticsCallback* ModuleRtpRtcpImpl::GetRtcpStatisticsCallback() {
return rtcp_receiver_.GetRtcpStatisticsCallback();
}
+bool ModuleRtpRtcpImpl::SendFeedbackPacket(
+ const rtcp::TransportFeedback& packet) {
+ return rtcp_sender_.SendFeedbackPacket(packet);
+}
+
// Send a TelephoneEvent tone using RFC 2833 (4733).
int32_t ModuleRtpRtcpImpl::SendTelephoneEventOutband(
const uint8_t key,
@@ -809,20 +827,17 @@ int32_t ModuleRtpRtcpImpl::SendRTCPSliceLossIndication(
GetFeedbackState(), kRtcpSli, 0, 0, false, picture_id);
}
-int32_t ModuleRtpRtcpImpl::SetGenericFECStatus(
+void ModuleRtpRtcpImpl::SetGenericFECStatus(
const bool enable,
const uint8_t payload_type_red,
const uint8_t payload_type_fec) {
- return rtp_sender_.SetGenericFECStatus(enable,
- payload_type_red,
- payload_type_fec);
+ rtp_sender_.SetGenericFECStatus(enable, payload_type_red, payload_type_fec);
}
-int32_t ModuleRtpRtcpImpl::GenericFECStatus(
- bool& enable,
- uint8_t& payload_type_red,
- uint8_t& payload_type_fec) {
- return rtp_sender_.GenericFECStatus(&enable, &payload_type_red,
+void ModuleRtpRtcpImpl::GenericFECStatus(bool& enable,
+ uint8_t& payload_type_red,
+ uint8_t& payload_type_fec) {
+ rtp_sender_.GenericFECStatus(&enable, &payload_type_red,
&payload_type_fec);
}
@@ -846,7 +861,7 @@ void ModuleRtpRtcpImpl::SetRemoteSSRC(const uint32_t ssrc) {
// Configured via API ignore.
return;
}
- if (kRtcpOff != rtcp_sender_.Status()) {
+ if (RtcpMode::kOff != rtcp_sender_.Status()) {
// Send RTCP bye on the current SSRC.
SendRTCP(kRtcpBye);
}
@@ -892,6 +907,9 @@ bool ModuleRtpRtcpImpl::SendTimeOfXrRrReport(
void ModuleRtpRtcpImpl::OnReceivedNACK(
const std::list<uint16_t>& nack_sequence_numbers) {
+ for (uint16_t nack_sequence_number : nack_sequence_numbers) {
+ send_loss_stats_.AddLostPacket(nack_sequence_number);
+ }
if (!rtp_sender_.StorePackets() ||
nack_sequence_numbers.size() == 0) {
return;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index 9cd7e702632..c9b6686c0a2 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -16,6 +16,7 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
+#include "webrtc/modules/rtp_rtcp/source/packet_loss_stats.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_sender.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_sender.h"
@@ -125,10 +126,10 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
// RTCP part.
// Get RTCP status.
- RTCPMethod RTCP() const override;
+ RtcpMode RTCP() const override;
// Configure RTCP status i.e on/off.
- void SetRTCPStatus(RTCPMethod method) override;
+ void SetRTCPStatus(RtcpMode method) override;
// Set RTCP CName.
int32_t SetCNAME(const char* c_name) override;
@@ -170,6 +171,11 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
StreamDataCounters* rtp_counters,
StreamDataCounters* rtx_counters) const override;
+ void GetRtpPacketLossStats(
+ bool outgoing,
+ uint32_t ssrc,
+ struct RtpPacketLossStats* loss_stats) const override;
+
// Get received RTCP report, sender info.
int32_t RemoteRTCPStat(RTCPSenderInfo* sender_info) override;
@@ -185,11 +191,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
void SetREMBData(uint32_t bitrate,
const std::vector<uint32_t>& ssrcs) override;
- // (IJ) Extended jitter report.
- bool IJ() const override;
-
- void SetIJStatus(bool enable) override;
-
// (TMMBR) Temporary Max Media Bit Rate.
bool TMMBR() const override;
@@ -227,6 +228,7 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
RtcpStatisticsCallback* callback) override;
RtcpStatisticsCallback* GetRtcpStatisticsCallback() override;
+ bool SendFeedbackPacket(const rtcp::TransportFeedback& packet) override;
// (APP) Application specific data.
int32_t SetRTCPApplicationSpecificData(uint8_t sub_type,
uint32_t name,
@@ -274,13 +276,13 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
void SetTargetSendBitrate(uint32_t bitrate_bps) override;
- int32_t SetGenericFECStatus(bool enable,
- uint8_t payload_type_red,
- uint8_t payload_type_fec) override;
+ void SetGenericFECStatus(bool enable,
+ uint8_t payload_type_red,
+ uint8_t payload_type_fec) override;
- int32_t GenericFECStatus(bool& enable,
- uint8_t& payload_type_red,
- uint8_t& payload_type_fec) override;
+ void GenericFECStatus(bool& enable,
+ uint8_t& payload_type_red,
+ uint8_t& payload_type_fec) override;
int32_t SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) override;
@@ -351,7 +353,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
bool TimeToSendFullNackList(int64_t now) const;
- int32_t id_;
const bool audio_;
bool collision_detected_;
int64_t last_process_time_;
@@ -374,6 +375,9 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
RtcpRttStats* rtt_stats_;
+ PacketLossStats send_loss_stats_;
+ PacketLossStats receive_loss_stats_;
+
// The processed RTT from RtcpRttStats.
rtc::scoped_ptr<CriticalSectionWrapper> critical_section_rtt_;
int64_t rtt_ms_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 12630f79a9a..03b91a9e33a 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -12,7 +12,6 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/pacing/include/mock/mock_paced_sender.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/rtcp_packet.h"
@@ -62,15 +61,17 @@ class SendTransport : public Transport,
clock_ = clock;
delay_ms_ = delay_ms;
}
- int SendPacket(int /*ch*/, const void* data, size_t len) override {
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
RTPHeader header;
rtc::scoped_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
EXPECT_TRUE(parser->Parse(static_cast<const uint8_t*>(data), len, &header));
++rtp_packets_sent_;
last_rtp_header_ = header;
- return static_cast<int>(len);
+ return true;
}
- int SendRTCPPacket(int /*ch*/, const void* data, size_t len) override {
+ bool SendRtcp(const uint8_t* data, size_t len) override {
test::RtcpPacketParser parser;
parser.Parse(static_cast<const uint8_t*>(data), len);
last_nack_list_ = parser.nack_item()->last_nack_list();
@@ -81,7 +82,7 @@ class SendTransport : public Transport,
EXPECT_TRUE(receiver_ != NULL);
EXPECT_EQ(0, receiver_->IncomingRtcpPacket(
static_cast<const uint8_t*>(data), len));
- return static_cast<int>(len);
+ return true;
}
ModuleRtpRtcpImpl* receiver_;
SimulatedClock* clock_;
@@ -104,7 +105,7 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver {
config.rtt_stats = &rtt_stats_;
impl_.reset(new ModuleRtpRtcpImpl(config));
- impl_->SetRTCPStatus(kRtcpCompound);
+ impl_->SetRTCPStatus(RtcpMode::kCompound);
transport_.SimulateNetworkDelay(kOneWayNetworkDelayMs, clock);
}
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 0456688a898..252ffb2a3e5 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -97,15 +97,17 @@ class BitrateAggregator {
uint32_t ssrc_;
};
-RTPSender::RTPSender(int32_t id,
- bool audio,
- Clock* clock,
- Transport* transport,
- RtpAudioFeedback* audio_feedback,
- PacedSender* paced_sender,
- BitrateStatisticsObserver* bitrate_callback,
- FrameCountObserver* frame_count_observer,
- SendSideDelayObserver* send_side_delay_observer)
+RTPSender::RTPSender(
+ bool audio,
+ Clock* clock,
+ Transport* transport,
+ RtpAudioFeedback* audio_feedback,
+ RtpPacketSender* paced_sender,
+ TransportSequenceNumberAllocator* sequence_number_allocator,
+ TransportFeedbackObserver* transport_feedback_observer,
+ BitrateStatisticsObserver* bitrate_callback,
+ FrameCountObserver* frame_count_observer,
+ SendSideDelayObserver* send_side_delay_observer)
: clock_(clock),
// TODO(holmer): Remove this conversion when we remove the use of
// TickTime.
@@ -113,12 +115,12 @@ RTPSender::RTPSender(int32_t id,
TickTime::MillisecondTimestamp()),
bitrates_(new BitrateAggregator(bitrate_callback)),
total_bitrate_sent_(clock, bitrates_->total_bitrate_observer()),
- id_(id),
audio_configured_(audio),
- audio_(audio ? new RTPSenderAudio(id, clock, this, audio_feedback)
- : nullptr),
+ audio_(audio ? new RTPSenderAudio(clock, this, audio_feedback) : nullptr),
video_(audio ? nullptr : new RTPSenderVideo(clock, this)),
paced_sender_(paced_sender),
+ transport_sequence_number_allocator_(sequence_number_allocator),
+ transport_feedback_observer_(transport_feedback_observer),
last_capture_time_ms_sent_(0),
send_critsect_(CriticalSectionWrapper::CreateCriticalSection()),
transport_(transport),
@@ -355,10 +357,8 @@ int RTPSender::SendPayloadFrequency() const {
int32_t RTPSender::SetMaxPayloadLength(size_t max_payload_length,
uint16_t packet_over_head) {
// Sanity check.
- if (max_payload_length < 100 || max_payload_length > IP_PACKET_SIZE) {
- LOG(LS_ERROR) << "Invalid max payload length: " << max_payload_length;
- return -1;
- }
+ RTC_DCHECK(max_payload_length >= 100 && max_payload_length <= IP_PACKET_SIZE)
+ << "Invalid max payload length: " << max_payload_length;
CriticalSectionScoped cs(send_critsect_.get());
max_payload_length_ = max_payload_length;
packet_over_head_ = packet_over_head;
@@ -409,8 +409,8 @@ uint32_t RTPSender::RtxSsrc() const {
void RTPSender::SetRtxPayloadType(int payload_type,
int associated_payload_type) {
CriticalSectionScoped cs(send_critsect_.get());
- DCHECK_LE(payload_type, 127);
- DCHECK_LE(associated_payload_type, 127);
+ RTC_DCHECK_LE(payload_type, 127);
+ RTC_DCHECK_LE(associated_payload_type, 127);
if (payload_type < 0) {
LOG(LS_ERROR) << "Invalid RTX payload type: " << payload_type;
return;
@@ -504,7 +504,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
return -1;
}
- uint32_t ret_val;
+ int32_t ret_val;
if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
"Send", "type", FrameTypeToString(frame_type));
@@ -567,48 +567,39 @@ size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send) {
return bytes_to_send - bytes_left;
}
-size_t RTPSender::BuildPaddingPacket(uint8_t* packet, size_t header_length) {
- size_t padding_bytes_in_packet = kMaxPaddingLength;
+void RTPSender::BuildPaddingPacket(uint8_t* packet,
+ size_t header_length,
+ size_t padding_length) {
packet[0] |= 0x20; // Set padding bit.
int32_t *data =
reinterpret_cast<int32_t *>(&(packet[header_length]));
// Fill data buffer with random data.
- for (size_t j = 0; j < (padding_bytes_in_packet >> 2); ++j) {
+ for (size_t j = 0; j < (padding_length >> 2); ++j) {
data[j] = rand(); // NOLINT
}
// Set number of padding bytes in the last byte of the packet.
- packet[header_length + padding_bytes_in_packet - 1] =
- static_cast<uint8_t>(padding_bytes_in_packet);
- return padding_bytes_in_packet;
-}
-
-size_t RTPSender::TrySendPadData(size_t bytes) {
- int64_t capture_time_ms;
- uint32_t timestamp;
- {
- CriticalSectionScoped cs(send_critsect_.get());
- timestamp = timestamp_;
- capture_time_ms = capture_time_ms_;
- if (last_timestamp_time_ms_ > 0) {
- timestamp +=
- (clock_->TimeInMilliseconds() - last_timestamp_time_ms_) * 90;
- capture_time_ms +=
- (clock_->TimeInMilliseconds() - last_timestamp_time_ms_);
- }
- }
- return SendPadData(timestamp, capture_time_ms, bytes);
-}
-
-size_t RTPSender::SendPadData(uint32_t timestamp,
- int64_t capture_time_ms,
- size_t bytes) {
- size_t padding_bytes_in_packet = 0;
+ packet[header_length + padding_length - 1] =
+ static_cast<uint8_t>(padding_length);
+}
+
+size_t RTPSender::SendPadData(size_t bytes,
+ bool timestamp_provided,
+ uint32_t timestamp,
+ int64_t capture_time_ms) {
+ // Always send full padding packets. This is accounted for by the
+ // RtpPacketSender,
+ // which will make sure we don't send too much padding even if a single packet
+ // is larger than requested.
+ size_t padding_bytes_in_packet =
+ std::min(MaxDataPayloadLength(), kMaxPaddingLength);
size_t bytes_sent = 0;
+ bool using_transport_seq = rtp_header_extension_map_.IsRegistered(
+ kRtpExtensionTransportSequenceNumber) &&
+ transport_sequence_number_allocator_;
for (; bytes > 0; bytes -= padding_bytes_in_packet) {
- // Always send full padding packets.
- if (bytes < kMaxPaddingLength)
- bytes = kMaxPaddingLength;
+ if (bytes < padding_bytes_in_packet)
+ bytes = padding_bytes_in_packet;
uint32_t ssrc;
uint16_t sequence_number;
@@ -616,8 +607,10 @@ size_t RTPSender::SendPadData(uint32_t timestamp,
bool over_rtx;
{
CriticalSectionScoped cs(send_critsect_.get());
- // Only send padding packets following the last packet of a frame,
- // indicated by the marker bit.
+ if (!timestamp_provided) {
+ timestamp = timestamp_;
+ capture_time_ms = capture_time_ms_;
+ }
if (rtx_ == kRtxOff) {
// Without RTX we can't send padding in the middle of frames.
if (!last_packet_marker_bit_)
@@ -633,6 +626,15 @@ size_t RTPSender::SendPadData(uint32_t timestamp,
if (!media_has_been_sent_ && !rtp_header_extension_map_.IsRegistered(
kRtpExtensionAbsoluteSendTime))
return 0;
+ // Only change change the timestamp of padding packets sent over RTX.
+ // Padding only packets over RTP has to be sent as part of a media
+ // frame (and therefore the same timestamp).
+ if (last_timestamp_time_ms_ > 0) {
+ timestamp +=
+ (clock_->TimeInMilliseconds() - last_timestamp_time_ms_) * 90;
+ capture_time_ms +=
+ (clock_->TimeInMilliseconds() - last_timestamp_time_ms_);
+ }
ssrc = ssrc_rtx_;
sequence_number = sequence_number_rtx_;
++sequence_number_rtx_;
@@ -645,9 +647,7 @@ size_t RTPSender::SendPadData(uint32_t timestamp,
size_t header_length =
CreateRtpHeader(padding_packet, payload_type, ssrc, false, timestamp,
sequence_number, std::vector<uint32_t>());
- assert(header_length != static_cast<size_t>(-1));
- padding_bytes_in_packet = BuildPaddingPacket(padding_packet, header_length);
- assert(padding_bytes_in_packet <= bytes);
+ BuildPaddingPacket(padding_packet, header_length, padding_bytes_in_packet);
size_t length = padding_bytes_in_packet + header_length;
int64_t now_ms = clock_->TimeInMilliseconds();
@@ -661,8 +661,21 @@ size_t RTPSender::SendPadData(uint32_t timestamp,
}
UpdateAbsoluteSendTime(padding_packet, length, rtp_header, now_ms);
- if (!SendPacketToNetwork(padding_packet, length))
+
+ PacketOptions options;
+ if (using_transport_seq) {
+ options.packet_id =
+ UpdateTransportSequenceNumber(padding_packet, length, rtp_header);
+ }
+
+ if (!SendPacketToNetwork(padding_packet, length, options))
break;
+
+ if (using_transport_seq && transport_feedback_observer_) {
+ transport_feedback_observer_->OnPacketSent(PacketInfo(
+ 0, now_ms, options.packet_id, length, true));
+ }
+
bytes_sent += padding_bytes_in_packet;
UpdateRtpStats(padding_packet, length, rtp_header, over_rtx, false);
}
@@ -700,7 +713,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, int64_t min_resend_time) {
// TickTime.
int64_t corrected_capture_tims_ms = capture_time_ms + clock_delta_ms_;
if (!paced_sender_->SendPacket(
- PacedSender::kHighPriority, header.ssrc, header.sequenceNumber,
+ RtpPacketSender::kHighPriority, header.ssrc, header.sequenceNumber,
corrected_capture_tims_ms, length - header.headerLength, true)) {
// We can't send the packet right now.
// We will be called when it is time.
@@ -712,15 +725,21 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id, int64_t min_resend_time) {
CriticalSectionScoped lock(send_critsect_.get());
rtx = rtx_;
}
- return PrepareAndSendPacket(data_buffer, length, capture_time_ms,
- (rtx & kRtxRetransmitted) > 0, true) ?
- static_cast<int32_t>(length) : -1;
+ if (!PrepareAndSendPacket(data_buffer, length, capture_time_ms,
+ (rtx & kRtxRetransmitted) > 0, true)) {
+ return -1;
+ }
+ return static_cast<int32_t>(length);
}
-bool RTPSender::SendPacketToNetwork(const uint8_t *packet, size_t size) {
+bool RTPSender::SendPacketToNetwork(const uint8_t* packet,
+ size_t size,
+ const PacketOptions& options) {
int bytes_sent = -1;
if (transport_) {
- bytes_sent = transport_->SendPacket(id_, packet, size);
+ bytes_sent = transport_->SendRtp(packet, size, options)
+ ? static_cast<int>(size)
+ : -1;
}
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"RTPSender::SendPacketToNetwork", "size", size, "sent",
@@ -899,11 +918,27 @@ bool RTPSender::PrepareAndSendPacket(uint8_t* buffer,
UpdateTransmissionTimeOffset(buffer_to_send_ptr, length, rtp_header,
diff_ms);
UpdateAbsoluteSendTime(buffer_to_send_ptr, length, rtp_header, now_ms);
- bool ret = SendPacketToNetwork(buffer_to_send_ptr, length);
+
+ // TODO(sprang): Potentially too much overhead in IsRegistered()?
+ bool using_transport_seq = rtp_header_extension_map_.IsRegistered(
+ kRtpExtensionTransportSequenceNumber) &&
+ transport_sequence_number_allocator_ &&
+ !is_retransmit;
+ PacketOptions options;
+ if (using_transport_seq) {
+ options.packet_id =
+ UpdateTransportSequenceNumber(buffer_to_send_ptr, length, rtp_header);
+ }
+
+ bool ret = SendPacketToNetwork(buffer_to_send_ptr, length, options);
if (ret) {
CriticalSectionScoped lock(send_critsect_.get());
media_has_been_sent_ = true;
}
+ if (using_transport_seq && transport_feedback_observer_) {
+ transport_feedback_observer_->OnPacketSent(
+ PacketInfo(0, now_ms, options.packet_id, length, true));
+ }
UpdateRtpStats(buffer_to_send_ptr, length, rtp_header, send_over_rtx,
is_retransmit);
return ret;
@@ -962,19 +997,22 @@ size_t RTPSender::TimeToSendPadding(size_t bytes) {
return 0;
{
CriticalSectionScoped cs(send_critsect_.get());
- if (!sending_media_) return 0;
+ if (!sending_media_)
+ return 0;
}
size_t bytes_sent = TrySendRedundantPayloads(bytes);
if (bytes_sent < bytes)
- bytes_sent += TrySendPadData(bytes - bytes_sent);
+ bytes_sent += SendPadData(bytes - bytes_sent, false, 0, 0);
return bytes_sent;
}
// TODO(pwestin): send in the RtpHeaderParser to avoid parsing it again.
-int32_t RTPSender::SendToNetwork(
- uint8_t *buffer, size_t payload_length, size_t rtp_header_length,
- int64_t capture_time_ms, StorageType storage,
- PacedSender::Priority priority) {
+int32_t RTPSender::SendToNetwork(uint8_t* buffer,
+ size_t payload_length,
+ size_t rtp_header_length,
+ int64_t capture_time_ms,
+ StorageType storage,
+ RtpPacketSender::Priority priority) {
RtpUtility::RtpHeaderParser rtp_parser(buffer,
payload_length + rtp_header_length);
RTPHeader rtp_header;
@@ -995,8 +1033,7 @@ int32_t RTPSender::SendToNetwork(
// Used for NACK and to spread out the transmission of packets.
if (packet_history_.PutRTPPacket(buffer, rtp_header_length + payload_length,
- max_payload_length_, capture_time_ms,
- storage) != 0) {
+ capture_time_ms, storage) != 0) {
return -1;
}
@@ -1024,7 +1061,7 @@ int32_t RTPSender::SendToNetwork(
}
size_t length = payload_length + rtp_header_length;
- bool sent = SendPacketToNetwork(buffer, length);
+ bool sent = SendPacketToNetwork(buffer, length, PacketOptions());
if (storage != kDontStore) {
// Mark the packet as sent in the history even if send failed. Dropping a
@@ -1214,7 +1251,8 @@ uint16_t RTPSender::BuildRTPHeaderExtension(uint8_t* data_buffer,
block_length = BuildVideoRotationExtension(extension_data);
break;
case kRtpExtensionTransportSequenceNumber:
- block_length = BuildTransportSequenceNumberExtension(extension_data);
+ block_length = BuildTransportSequenceNumberExtension(
+ extension_data, transport_sequence_number_);
break;
default:
assert(false);
@@ -1367,7 +1405,8 @@ uint8_t RTPSender::BuildVideoRotationExtension(uint8_t* data_buffer) const {
}
uint8_t RTPSender::BuildTransportSequenceNumberExtension(
- uint8_t* data_buffer) const {
+ uint8_t* data_buffer,
+ uint16_t sequence_number) const {
// 0 1 2
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -1384,8 +1423,7 @@ uint8_t RTPSender::BuildTransportSequenceNumberExtension(
size_t pos = 0;
const uint8_t len = 1;
data_buffer[pos++] = (id << 4) + len;
- ByteWriter<uint16_t>::WriteBigEndian(data_buffer + pos,
- transport_sequence_number_);
+ ByteWriter<uint16_t>::WriteBigEndian(data_buffer + pos, sequence_number);
pos += 2;
assert(pos == kTransportSequenceNumberLength);
return kTransportSequenceNumberLength;
@@ -1428,35 +1466,62 @@ bool RTPSender::FindHeaderExtensionPosition(RTPExtensionType type,
return true;
}
-void RTPSender::UpdateTransmissionTimeOffset(uint8_t* rtp_packet,
- size_t rtp_packet_length,
- const RTPHeader& rtp_header,
- int64_t time_diff_ms) const {
- CriticalSectionScoped cs(send_critsect_.get());
+RTPSender::ExtensionStatus RTPSender::VerifyExtension(
+ RTPExtensionType extension_type,
+ uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header,
+ size_t extension_length_bytes,
+ size_t* extension_offset) const {
// Get id.
uint8_t id = 0;
- if (rtp_header_extension_map_.GetId(kRtpExtensionTransmissionTimeOffset,
- &id) != 0) {
- // Not registered.
- return;
- }
+ if (rtp_header_extension_map_.GetId(extension_type, &id) != 0)
+ return ExtensionStatus::kNotRegistered;
size_t block_pos = 0;
- if (!FindHeaderExtensionPosition(kRtpExtensionTransmissionTimeOffset,
- rtp_packet, rtp_packet_length, rtp_header,
- &block_pos)) {
- LOG(LS_WARNING) << "Failed to update transmission time offset.";
- return;
+ if (!FindHeaderExtensionPosition(extension_type, rtp_packet,
+ rtp_packet_length, rtp_header, &block_pos))
+ return ExtensionStatus::kError;
+
+ // Verify that header contains extension.
+ if (!((rtp_packet[kRtpHeaderLength + rtp_header.numCSRCs] == 0xBE) &&
+ (rtp_packet[kRtpHeaderLength + rtp_header.numCSRCs + 1] == 0xDE))) {
+ LOG(LS_WARNING)
+ << "Failed to update absolute send time, hdr extension not found.";
+ return ExtensionStatus::kError;
}
// Verify first byte in block.
- const uint8_t first_block_byte = (id << 4) + 2;
- if (rtp_packet[block_pos] != first_block_byte) {
- LOG(LS_WARNING) << "Failed to update transmission time offset.";
- return;
+ const uint8_t first_block_byte = (id << 4) + (extension_length_bytes - 2);
+ if (rtp_packet[block_pos] != first_block_byte)
+ return ExtensionStatus::kError;
+
+ *extension_offset = block_pos;
+ return ExtensionStatus::kOk;
+}
+
+void RTPSender::UpdateTransmissionTimeOffset(uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header,
+ int64_t time_diff_ms) const {
+ size_t offset;
+ CriticalSectionScoped cs(send_critsect_.get());
+ switch (VerifyExtension(kRtpExtensionTransmissionTimeOffset, rtp_packet,
+ rtp_packet_length, rtp_header,
+ kTransmissionTimeOffsetLength, &offset)) {
+ case ExtensionStatus::kNotRegistered:
+ return;
+ case ExtensionStatus::kError:
+ LOG(LS_WARNING) << "Failed to update transmission time offset.";
+ return;
+ case ExtensionStatus::kOk:
+ break;
+ default:
+ RTC_NOTREACHED();
}
+
// Update transmission offset field (converting to a 90 kHz timestamp).
- ByteWriter<int32_t, 3>::WriteBigEndian(rtp_packet + block_pos + 1,
+ ByteWriter<int32_t, 3>::WriteBigEndian(rtp_packet + offset + 1,
time_diff_ms * 90); // RTP timestamp.
}
@@ -1465,29 +1530,24 @@ bool RTPSender::UpdateAudioLevel(uint8_t* rtp_packet,
const RTPHeader& rtp_header,
bool is_voiced,
uint8_t dBov) const {
+ size_t offset;
CriticalSectionScoped cs(send_critsect_.get());
- // Get id.
- uint8_t id = 0;
- if (rtp_header_extension_map_.GetId(kRtpExtensionAudioLevel, &id) != 0) {
- // Not registered.
- return false;
- }
-
- size_t block_pos = 0;
- if (!FindHeaderExtensionPosition(kRtpExtensionAudioLevel, rtp_packet,
- rtp_packet_length, rtp_header, &block_pos)) {
- LOG(LS_WARNING) << "Failed to update audio level.";
- return false;
+ switch (VerifyExtension(kRtpExtensionAudioLevel, rtp_packet,
+ rtp_packet_length, rtp_header, kAudioLevelLength,
+ &offset)) {
+ case ExtensionStatus::kNotRegistered:
+ return false;
+ case ExtensionStatus::kError:
+ LOG(LS_WARNING) << "Failed to update audio level.";
+ return false;
+ case ExtensionStatus::kOk:
+ break;
+ default:
+ RTC_NOTREACHED();
}
- // Verify first byte in block.
- const uint8_t first_block_byte = (id << 4) + 0;
- if (rtp_packet[block_pos] != first_block_byte) {
- LOG(LS_WARNING) << "Failed to update audio level.";
- return false;
- }
- rtp_packet[block_pos + 1] = (is_voiced ? 0x80 : 0x00) + (dBov & 0x7f);
+ rtp_packet[offset + 1] = (is_voiced ? 0x80 : 0x00) + (dBov & 0x7f);
return true;
}
@@ -1495,37 +1555,24 @@ bool RTPSender::UpdateVideoRotation(uint8_t* rtp_packet,
size_t rtp_packet_length,
const RTPHeader& rtp_header,
VideoRotation rotation) const {
+ size_t offset;
CriticalSectionScoped cs(send_critsect_.get());
- // Get id.
- uint8_t id = 0;
- if (rtp_header_extension_map_.GetId(kRtpExtensionVideoRotation, &id) != 0) {
- // Not registered.
- return false;
- }
-
- size_t block_pos = 0;
- if (!FindHeaderExtensionPosition(kRtpExtensionVideoRotation, rtp_packet,
- rtp_packet_length, rtp_header, &block_pos)) {
- LOG(LS_WARNING) << "Failed to update video rotation (CVO).";
- return false;
- }
- // Get length until start of header extension block.
- int extension_block_pos =
- rtp_header_extension_map_.GetLengthUntilBlockStartInBytes(
- kRtpExtensionVideoRotation);
- if (extension_block_pos < 0) {
- // The feature is not enabled.
- return false;
+ switch (VerifyExtension(kRtpExtensionVideoRotation, rtp_packet,
+ rtp_packet_length, rtp_header, kVideoRotationLength,
+ &offset)) {
+ case ExtensionStatus::kNotRegistered:
+ return false;
+ case ExtensionStatus::kError:
+ LOG(LS_WARNING) << "Failed to update CVO.";
+ return false;
+ case ExtensionStatus::kOk:
+ break;
+ default:
+ RTC_NOTREACHED();
}
- // Verify first byte in block.
- const uint8_t first_block_byte = (id << 4) + 0;
- if (rtp_packet[block_pos] != first_block_byte) {
- LOG(LS_WARNING) << "Failed to update CVO.";
- return false;
- }
- rtp_packet[block_pos + 1] = ConvertVideoRotationToCVOByte(rotation);
+ rtp_packet[offset + 1] = ConvertVideoRotationToCVOByte(rotation);
return true;
}
@@ -1533,49 +1580,55 @@ void RTPSender::UpdateAbsoluteSendTime(uint8_t* rtp_packet,
size_t rtp_packet_length,
const RTPHeader& rtp_header,
int64_t now_ms) const {
+ size_t offset;
CriticalSectionScoped cs(send_critsect_.get());
- // Get id.
- uint8_t id = 0;
- if (rtp_header_extension_map_.GetId(kRtpExtensionAbsoluteSendTime,
- &id) != 0) {
- // Not registered.
- return;
- }
- // Get length until start of header extension block.
- int extension_block_pos =
- rtp_header_extension_map_.GetLengthUntilBlockStartInBytes(
- kRtpExtensionAbsoluteSendTime);
- if (extension_block_pos < 0) {
- // The feature is not enabled.
- return;
- }
- size_t block_pos =
- kRtpHeaderLength + rtp_header.numCSRCs + extension_block_pos;
- if (rtp_packet_length < block_pos + kAbsoluteSendTimeLength ||
- rtp_header.headerLength < block_pos + kAbsoluteSendTimeLength) {
- LOG(LS_WARNING) << "Failed to update absolute send time, invalid length.";
- return;
- }
- // Verify that header contains extension.
- if (!((rtp_packet[kRtpHeaderLength + rtp_header.numCSRCs] == 0xBE) &&
- (rtp_packet[kRtpHeaderLength + rtp_header.numCSRCs + 1] == 0xDE))) {
- LOG(LS_WARNING)
- << "Failed to update absolute send time, hdr extension not found.";
- return;
- }
- // Verify first byte in block.
- const uint8_t first_block_byte = (id << 4) + 2;
- if (rtp_packet[block_pos] != first_block_byte) {
- LOG(LS_WARNING) << "Failed to update absolute send time.";
- return;
+ switch (VerifyExtension(kRtpExtensionAbsoluteSendTime, rtp_packet,
+ rtp_packet_length, rtp_header,
+ kAbsoluteSendTimeLength, &offset)) {
+ case ExtensionStatus::kNotRegistered:
+ return;
+ case ExtensionStatus::kError:
+ LOG(LS_WARNING) << "Failed to update absolute send time";
+ return;
+ case ExtensionStatus::kOk:
+ break;
+ default:
+ RTC_NOTREACHED();
}
+
// Update absolute send time field (convert ms to 24-bit unsigned with 18 bit
// fractional part).
- ByteWriter<uint32_t, 3>::WriteBigEndian(rtp_packet + block_pos + 1,
+ ByteWriter<uint32_t, 3>::WriteBigEndian(rtp_packet + offset + 1,
((now_ms << 18) / 1000) & 0x00ffffff);
}
+uint16_t RTPSender::UpdateTransportSequenceNumber(
+ uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header) const {
+ size_t offset;
+ CriticalSectionScoped cs(send_critsect_.get());
+
+ switch (VerifyExtension(kRtpExtensionTransportSequenceNumber, rtp_packet,
+ rtp_packet_length, rtp_header,
+ kTransportSequenceNumberLength, &offset)) {
+ case ExtensionStatus::kNotRegistered:
+ return 0;
+ case ExtensionStatus::kError:
+ LOG(LS_WARNING) << "Failed to update transport sequence number";
+ return 0;
+ case ExtensionStatus::kOk:
+ break;
+ default:
+ RTC_NOTREACHED();
+ }
+
+ uint16_t seq = transport_sequence_number_allocator_->AllocateSequenceNumber();
+ BuildTransportSequenceNumberExtension(rtp_packet + offset, seq);
+ return seq;
+}
+
void RTPSender::SetSendingStatus(bool enabled) {
if (enabled) {
uint32_t frequency_hz = SendPayloadFrequency();
@@ -1738,24 +1791,18 @@ int32_t RTPSender::SendRTPIntraRequest() {
return video_->SendRTPIntraRequest();
}
-int32_t RTPSender::SetGenericFECStatus(bool enable,
- uint8_t payload_type_red,
- uint8_t payload_type_fec) {
- if (audio_configured_) {
- return -1;
- }
+void RTPSender::SetGenericFECStatus(bool enable,
+ uint8_t payload_type_red,
+ uint8_t payload_type_fec) {
+ RTC_DCHECK(!audio_configured_);
video_->SetGenericFECStatus(enable, payload_type_red, payload_type_fec);
- return 0;
}
-int32_t RTPSender::GenericFECStatus(bool* enable,
+void RTPSender::GenericFECStatus(bool* enable,
uint8_t* payload_type_red,
uint8_t* payload_type_fec) const {
- if (audio_configured_) {
- return -1;
- }
+ RTC_DCHECK(!audio_configured_);
video_->GenericFECStatus(*enable, *payload_type_red, *payload_type_fec);
- return 0;
}
int32_t RTPSender::SetFecParameters(
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h
index 61a1fb5ec79..57516f8fe4f 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h
@@ -17,7 +17,6 @@
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
-#include "webrtc/modules/pacing/include/paced_sender.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/bitrate.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_header_extension.h"
@@ -25,6 +24,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_rtcp_config.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_utility.h"
#include "webrtc/modules/rtp_rtcp/source/ssrc_database.h"
+#include "webrtc/transport.h"
#define MAX_INIT_RTP_SEQ_NUMBER 32767 // 2^15 -1.
@@ -70,10 +70,12 @@ class RTPSenderInterface {
virtual uint16_t PacketOverHead() const = 0;
virtual uint16_t ActualSendBitrateKbit() const = 0;
- virtual int32_t SendToNetwork(
- uint8_t *data_buffer, size_t payload_length, size_t rtp_header_length,
- int64_t capture_time_ms, StorageType storage,
- PacedSender::Priority priority) = 0;
+ virtual int32_t SendToNetwork(uint8_t* data_buffer,
+ size_t payload_length,
+ size_t rtp_header_length,
+ int64_t capture_time_ms,
+ StorageType storage,
+ RtpPacketSender::Priority priority) = 0;
virtual bool UpdateVideoRotation(uint8_t* rtp_packet,
size_t rtp_packet_length,
@@ -85,12 +87,13 @@ class RTPSenderInterface {
class RTPSender : public RTPSenderInterface {
public:
- RTPSender(int32_t id,
- bool audio,
+ RTPSender(bool audio,
Clock* clock,
Transport* transport,
RtpAudioFeedback* audio_feedback,
- PacedSender* paced_sender,
+ RtpPacketSender* paced_sender,
+ TransportSequenceNumberAllocator* sequence_number_allocator,
+ TransportFeedbackObserver* transport_feedback_callback,
BitrateStatisticsObserver* bitrate_callback,
FrameCountObserver* frame_count_observer,
SendSideDelayObserver* send_side_delay_observer);
@@ -171,7 +174,27 @@ class RTPSender : public RTPSenderInterface {
uint8_t BuildAudioLevelExtension(uint8_t* data_buffer) const;
uint8_t BuildAbsoluteSendTimeExtension(uint8_t* data_buffer) const;
uint8_t BuildVideoRotationExtension(uint8_t* data_buffer) const;
- uint8_t BuildTransportSequenceNumberExtension(uint8_t* data_buffer) const;
+ uint8_t BuildTransportSequenceNumberExtension(uint8_t* data_buffer,
+ uint16_t sequence_number) const;
+
+ // Verifies that the specified extension is registered, and that it is
+ // present in rtp packet. If extension is not registered kNotRegistered is
+ // returned. If extension cannot be found in the rtp header, or if it is
+ // malformed, kError is returned. Otherwise *extension_offset is set to the
+ // offset of the extension from the beginning of the rtp packet and kOk is
+ // returned.
+ enum class ExtensionStatus {
+ kNotRegistered,
+ kOk,
+ kError,
+ };
+ ExtensionStatus VerifyExtension(RTPExtensionType extension_type,
+ uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header,
+ size_t extension_length_bytes,
+ size_t* extension_offset) const
+ EXCLUSIVE_LOCKS_REQUIRED(send_critsect_.get());
bool UpdateAudioLevel(uint8_t* rtp_packet,
size_t rtp_packet_length,
@@ -235,7 +258,7 @@ class RTPSender : public RTPSenderInterface {
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
- PacedSender::Priority priority) override;
+ RtpPacketSender::Priority priority) override;
// Audio.
@@ -263,19 +286,21 @@ class RTPSender : public RTPSenderInterface {
int32_t SendRTPIntraRequest();
// FEC.
- int32_t SetGenericFECStatus(bool enable,
- uint8_t payload_type_red,
- uint8_t payload_type_fec);
+ void SetGenericFECStatus(bool enable,
+ uint8_t payload_type_red,
+ uint8_t payload_type_fec);
- int32_t GenericFECStatus(bool *enable, uint8_t *payload_type_red,
- uint8_t *payload_type_fec) const;
+ void GenericFECStatus(bool* enable,
+ uint8_t* payload_type_red,
+ uint8_t* payload_type_fec) const;
int32_t SetFecParameters(const FecProtectionParams *delta_params,
const FecProtectionParams *key_params);
- size_t SendPadData(uint32_t timestamp,
- int64_t capture_time_ms,
- size_t bytes);
+ size_t SendPadData(size_t bytes,
+ bool timestamp_provided,
+ uint32_t timestamp,
+ int64_t capture_time_ms);
// Called on update of RTP statistics.
void RegisterRtpStatisticsCallback(StreamDataCountersCallback* callback);
@@ -317,14 +342,17 @@ class RTPSender : public RTPSenderInterface {
// Return the number of bytes sent. Note that both of these functions may
// return a larger value that their argument.
size_t TrySendRedundantPayloads(size_t bytes);
- size_t TrySendPadData(size_t bytes);
- size_t BuildPaddingPacket(uint8_t* packet, size_t header_length);
+ void BuildPaddingPacket(uint8_t* packet,
+ size_t header_length,
+ size_t padding_length);
void BuildRtxPacket(uint8_t* buffer, size_t* length,
uint8_t* buffer_rtx);
- bool SendPacketToNetwork(const uint8_t *packet, size_t size);
+ bool SendPacketToNetwork(const uint8_t* packet,
+ size_t size,
+ const PacketOptions& options);
void UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms);
@@ -344,6 +372,12 @@ class RTPSender : public RTPSenderInterface {
size_t rtp_packet_length,
const RTPHeader& rtp_header,
int64_t now_ms) const;
+ // Update the transport sequence number of the packet using a new sequence
+ // number allocated by SequenceNumberAllocator. Returns the assigned sequence
+ // number, or 0 if extension could not be updated.
+ uint16_t UpdateTransportSequenceNumber(uint8_t* rtp_packet,
+ size_t rtp_packet_length,
+ const RTPHeader& rtp_header) const;
void UpdateRtpStats(const uint8_t* buffer,
size_t packet_length,
@@ -358,13 +392,13 @@ class RTPSender : public RTPSenderInterface {
rtc::scoped_ptr<BitrateAggregator> bitrates_;
Bitrate total_bitrate_sent_;
- int32_t id_;
-
const bool audio_configured_;
rtc::scoped_ptr<RTPSenderAudio> audio_;
rtc::scoped_ptr<RTPSenderVideo> video_;
- PacedSender *paced_sender_;
+ RtpPacketSender* const paced_sender_;
+ TransportSequenceNumberAllocator* const transport_sequence_number_allocator_;
+ TransportFeedbackObserver* const transport_feedback_observer_;
int64_t last_capture_time_ms_sent_;
rtc::scoped_ptr<CriticalSectionWrapper> send_critsect_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
index de728f08605..3f55db40389 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -13,6 +13,7 @@
#include <assert.h> //assert
#include <string.h> //memcpy
+#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
@@ -20,34 +21,31 @@ namespace webrtc {
static const int kDtmfFrequencyHz = 8000;
-RTPSenderAudio::RTPSenderAudio(const int32_t id,
- Clock* clock,
+RTPSenderAudio::RTPSenderAudio(Clock* clock,
RTPSender* rtpSender,
- RtpAudioFeedback* audio_feedback) :
- _id(id),
- _clock(clock),
- _rtpSender(rtpSender),
- _audioFeedback(audio_feedback),
- _sendAudioCritsect(CriticalSectionWrapper::CreateCriticalSection()),
- _packetSizeSamples(160),
- _dtmfEventIsOn(false),
- _dtmfEventFirstPacketSent(false),
- _dtmfPayloadType(-1),
- _dtmfTimestamp(0),
- _dtmfKey(0),
- _dtmfLengthSamples(0),
- _dtmfLevel(0),
- _dtmfTimeLastSent(0),
- _dtmfTimestampLastSent(0),
- _REDPayloadType(-1),
- _inbandVADactive(false),
- _cngNBPayloadType(-1),
- _cngWBPayloadType(-1),
- _cngSWBPayloadType(-1),
- _cngFBPayloadType(-1),
- _lastPayloadType(-1),
- _audioLevel_dBov(0) {
-}
+ RtpAudioFeedback* audio_feedback)
+ : _clock(clock),
+ _rtpSender(rtpSender),
+ _audioFeedback(audio_feedback),
+ _sendAudioCritsect(CriticalSectionWrapper::CreateCriticalSection()),
+ _packetSizeSamples(160),
+ _dtmfEventIsOn(false),
+ _dtmfEventFirstPacketSent(false),
+ _dtmfPayloadType(-1),
+ _dtmfTimestamp(0),
+ _dtmfKey(0),
+ _dtmfLengthSamples(0),
+ _dtmfLevel(0),
+ _dtmfTimeLastSent(0),
+ _dtmfTimestampLastSent(0),
+ _REDPayloadType(-1),
+ _inbandVADactive(false),
+ _cngNBPayloadType(-1),
+ _cngWBPayloadType(-1),
+ _cngSWBPayloadType(-1),
+ _cngFBPayloadType(-1),
+ _lastPayloadType(-1),
+ _audioLevel_dBov(0) {}
RTPSenderAudio::~RTPSenderAudio() {
}
@@ -204,7 +202,7 @@ int32_t RTPSenderAudio::SendAudio(
}
if (dtmfToneStarted) {
if (_audioFeedback)
- _audioFeedback->OnPlayTelephoneEvent(_id, key, dtmfLengthMS, _dtmfLevel);
+ _audioFeedback->OnPlayTelephoneEvent(key, dtmfLengthMS, _dtmfLevel);
}
// A source MAY send events and coded audio packets for the same time
@@ -371,7 +369,7 @@ int32_t RTPSenderAudio::SendAudio(
_rtpSender->SequenceNumber());
return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength,
-1, kAllowRetransmission,
- PacedSender::kHighPriority);
+ RtpPacketSender::kHighPriority);
}
// Audio level magnitude and voice activity flag are set for each RTP packet
@@ -480,7 +478,7 @@ RTPSenderAudio::SendTelephoneEventPacket(bool ended,
_rtpSender->SequenceNumber());
retVal = _rtpSender->SendToNetwork(dtmfbuffer, 4, 12, -1,
kAllowRetransmission,
- PacedSender::kHighPriority);
+ RtpPacketSender::kHighPriority);
sendCount--;
}while (sendCount > 0 && retVal == 0);
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
index 762668a4e4d..dd16fe51b40 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -22,10 +22,9 @@ namespace webrtc {
class RTPSenderAudio: public DTMFqueue
{
public:
- RTPSenderAudio(const int32_t id,
- Clock* clock,
- RTPSender* rtpSender,
- RtpAudioFeedback* audio_feedback);
+ RTPSenderAudio(Clock* clock,
+ RTPSender* rtpSender,
+ RtpAudioFeedback* audio_feedback);
virtual ~RTPSenderAudio();
int32_t RegisterAudioPayload(const char payloadName[RTP_PAYLOAD_NAME_SIZE],
@@ -73,7 +72,6 @@ protected:
const int8_t payloadType);
private:
- const int32_t _id;
Clock* const _clock;
RTPSender* const _rtpSender;
RtpAudioFeedback* const _audioFeedback;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 23300bbff60..305ea132e27 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -12,11 +12,11 @@
* This file includes unit tests for the RTPSender.
*/
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/pacing/include/mock/mock_paced_sender.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_cvo.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
@@ -73,12 +73,14 @@ class LoopbackTransportTest : public webrtc::Transport {
: packets_sent_(0),
last_sent_packet_len_(0),
total_bytes_sent_(0),
- last_sent_packet_(NULL) {}
+ last_sent_packet_(nullptr) {}
~LoopbackTransportTest() {
STLDeleteContainerPointers(sent_packets_.begin(), sent_packets_.end());
}
- int SendPacket(int channel, const void *data, size_t len) override {
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override {
packets_sent_++;
rtc::Buffer* buffer =
new rtc::Buffer(reinterpret_cast<const uint8_t*>(data), len);
@@ -86,10 +88,10 @@ class LoopbackTransportTest : public webrtc::Transport {
last_sent_packet_len_ = len;
total_bytes_sent_ += len;
sent_packets_.push_back(buffer);
- return static_cast<int>(len);
+ return true;
}
- int SendRTCPPacket(int channel, const void* data, size_t len) override {
- return -1;
+ bool SendRtcp(const uint8_t* data, size_t len) override {
+ return false;
}
int packets_sent_;
size_t last_sent_packet_len_;
@@ -100,6 +102,20 @@ class LoopbackTransportTest : public webrtc::Transport {
} // namespace
+class MockRtpPacketSender : public RtpPacketSender {
+ public:
+ MockRtpPacketSender() {}
+ virtual ~MockRtpPacketSender() {}
+
+ MOCK_METHOD6(SendPacket,
+ bool(Priority priority,
+ uint32_t ssrc,
+ uint16_t sequence_number,
+ int64_t capture_time_ms,
+ size_t bytes,
+ bool retransmission));
+};
+
class RtpSenderTest : public ::testing::Test {
protected:
RtpSenderTest()
@@ -114,13 +130,14 @@ class RtpSenderTest : public ::testing::Test {
}
void SetUp() override {
- rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, NULL, NULL, NULL));
+ rtp_sender_.reset(new RTPSender(false, &fake_clock_, &transport_, nullptr,
+ &mock_paced_sender_, nullptr, nullptr,
+ nullptr, nullptr, nullptr));
rtp_sender_->SetSequenceNumber(kSeqNum);
}
SimulatedClock fake_clock_;
- MockPacedSender mock_paced_sender_;
+ MockRtpPacketSender mock_paced_sender_;
rtc::scoped_ptr<RTPSender> rtp_sender_;
int payload_;
LoopbackTransportTest transport_;
@@ -151,12 +168,9 @@ class RtpSenderTest : public ::testing::Test {
ASSERT_GE(rtp_length, 0);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_,
- payload_length,
- rtp_length,
- capture_time_ms,
- kAllowRetransmission,
- PacedSender::kNormalPriority));
+ EXPECT_EQ(0, rtp_sender_->SendToNetwork(
+ packet_, payload_length, rtp_length, capture_time_ms,
+ kAllowRetransmission, RtpPacketSender::kNormalPriority));
}
};
@@ -308,7 +322,7 @@ TEST_F(RtpSenderTest, BuildRTPPacket) {
webrtc::RtpUtility::RtpHeaderParser rtp_parser(packet_, length);
webrtc::RTPHeader rtp_header;
- const bool valid_rtp_header = rtp_parser.Parse(rtp_header, NULL);
+ const bool valid_rtp_header = rtp_parser.Parse(rtp_header, nullptr);
ASSERT_TRUE(valid_rtp_header);
ASSERT_FALSE(rtp_parser.RTCP());
@@ -319,6 +333,7 @@ TEST_F(RtpSenderTest, BuildRTPPacket) {
EXPECT_FALSE(rtp_header.extension.hasAudioLevel);
EXPECT_EQ(0, rtp_header.extension.transmissionTimeOffset);
EXPECT_EQ(0u, rtp_header.extension.absoluteSendTime);
+ EXPECT_FALSE(rtp_header.extension.voiceActivity);
EXPECT_EQ(0u, rtp_header.extension.audioLevel);
EXPECT_EQ(0u, rtp_header.extension.videoRotation);
}
@@ -351,7 +366,7 @@ TEST_F(RtpSenderTest, BuildRTPPacketWithTransmissionOffsetExtension) {
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, NULL);
+ const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -415,7 +430,7 @@ TEST_F(RtpSenderTest, BuildRTPPacketWithAbsoluteSendTimeExtension) {
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, NULL);
+ const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -503,18 +518,18 @@ TEST_F(RtpSenderTest, BuildRTPPacketWithAudioLevelExtension) {
VerifyRTPHeaderCommon(rtp_header);
EXPECT_EQ(length, rtp_header.headerLength);
EXPECT_TRUE(rtp_header.extension.hasAudioLevel);
- // Expect kAudioLevel + 0x80 because we set "voiced" to true in the call to
- // UpdateAudioLevel(), above.
- EXPECT_EQ(kAudioLevel + 0x80u, rtp_header.extension.audioLevel);
+ EXPECT_TRUE(rtp_header.extension.voiceActivity);
+ EXPECT_EQ(kAudioLevel, rtp_header.extension.audioLevel);
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, NULL);
+ const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
EXPECT_EQ(length, rtp_header2.headerLength);
EXPECT_FALSE(rtp_header2.extension.hasAudioLevel);
+ EXPECT_FALSE(rtp_header2.extension.voiceActivity);
EXPECT_EQ(0u, rtp_header2.extension.audioLevel);
}
@@ -565,13 +580,14 @@ TEST_F(RtpSenderTest, BuildRTPPacketWithHeaderExtensions) {
EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber);
EXPECT_EQ(kTimeOffset, rtp_header.extension.transmissionTimeOffset);
EXPECT_EQ(kAbsoluteSendTime, rtp_header.extension.absoluteSendTime);
- EXPECT_EQ(kAudioLevel + 0x80u, rtp_header.extension.audioLevel);
+ EXPECT_TRUE(rtp_header.extension.voiceActivity);
+ EXPECT_EQ(kAudioLevel, rtp_header.extension.audioLevel);
EXPECT_EQ(kTransportSequenceNumber,
rtp_header.extension.transportSequenceNumber);
// Parse without map extension
webrtc::RTPHeader rtp_header2;
- const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, NULL);
+ const bool valid_rtp_header2 = rtp_parser.Parse(rtp_header2, nullptr);
ASSERT_TRUE(valid_rtp_header2);
VerifyRTPHeaderCommon(rtp_header2);
@@ -583,14 +599,15 @@ TEST_F(RtpSenderTest, BuildRTPPacketWithHeaderExtensions) {
EXPECT_EQ(0, rtp_header2.extension.transmissionTimeOffset);
EXPECT_EQ(0u, rtp_header2.extension.absoluteSendTime);
+ EXPECT_FALSE(rtp_header2.extension.voiceActivity);
EXPECT_EQ(0u, rtp_header2.extension.audioLevel);
EXPECT_EQ(0u, rtp_header2.extension.transportSequenceNumber);
}
TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
EXPECT_CALL(mock_paced_sender_,
- SendPacket(PacedSender::kNormalPriority, _, kSeqNum, _, _, _)).
- WillOnce(testing::Return(false));
+ SendPacket(RtpPacketSender::kNormalPriority, _, kSeqNum, _, _, _))
+ .WillOnce(testing::Return(false));
rtp_sender_->SetStorePacketsStatus(true, 10);
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
@@ -605,12 +622,9 @@ TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_,
- 0,
- rtp_length,
- capture_time_ms,
- kAllowRetransmission,
- PacedSender::kNormalPriority));
+ EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
@@ -642,8 +656,8 @@ TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
EXPECT_CALL(mock_paced_sender_,
- SendPacket(PacedSender::kNormalPriority, _, kSeqNum, _, _, _)).
- WillOnce(testing::Return(false));
+ SendPacket(RtpPacketSender::kNormalPriority, _, kSeqNum, _, _, _))
+ .WillOnce(testing::Return(false));
rtp_sender_->SetStorePacketsStatus(true, 10);
EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
@@ -658,18 +672,15 @@ TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_,
- 0,
- rtp_length,
- capture_time_ms,
- kAllowRetransmission,
- PacedSender::kNormalPriority));
+ EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
EXPECT_CALL(mock_paced_sender_,
- SendPacket(PacedSender::kHighPriority, _, kSeqNum, _, _, _)).
- WillOnce(testing::Return(false));
+ SendPacket(RtpPacketSender::kHighPriority, _, kSeqNum, _, _, _))
+ .WillOnce(testing::Return(false));
const int kStoredTimeInMs = 100;
fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs);
@@ -706,8 +717,8 @@ TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
TEST_F(RtpSenderTest, SendPadding) {
// Make all (non-padding) packets go to send queue.
EXPECT_CALL(mock_paced_sender_,
- SendPacket(PacedSender::kNormalPriority, _, _, _, _, _)).
- WillRepeatedly(testing::Return(false));
+ SendPacket(RtpPacketSender::kNormalPriority, _, _, _, _, _))
+ .WillRepeatedly(testing::Return(false));
uint16_t seq_num = kSeqNum;
uint32_t timestamp = kTimestamp;
@@ -724,7 +735,7 @@ TEST_F(RtpSenderTest, SendPadding) {
// Create and set up parser.
rtc::scoped_ptr<webrtc::RtpHeaderParser> rtp_parser(
webrtc::RtpHeaderParser::Create());
- ASSERT_TRUE(rtp_parser.get() != NULL);
+ ASSERT_TRUE(rtp_parser.get() != nullptr);
rtp_parser->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
rtp_parser->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
@@ -735,16 +746,14 @@ TEST_F(RtpSenderTest, SendPadding) {
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
int rtp_length_int = rtp_sender_->BuildRTPheader(
packet_, kPayload, kMarkerBit, timestamp, capture_time_ms);
+ const uint32_t media_packet_timestamp = timestamp;
ASSERT_NE(-1, rtp_length_int);
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_,
- 0,
- rtp_length,
- capture_time_ms,
- kAllowRetransmission,
- PacedSender::kNormalPriority));
+ EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
int total_packets_sent = 0;
EXPECT_EQ(total_packets_sent, transport_.packets_sent_);
@@ -775,11 +784,13 @@ TEST_F(RtpSenderTest, SendPadding) {
&rtp_header));
EXPECT_EQ(kMaxPaddingLength, rtp_header.paddingLength);
- // Verify sequence number and timestamp.
+ // Verify sequence number and timestamp. The timestamp should be the same
+ // as the last media packet.
EXPECT_EQ(seq_num++, rtp_header.sequenceNumber);
- EXPECT_EQ(timestamp, rtp_header.timestamp);
+ EXPECT_EQ(media_packet_timestamp, rtp_header.timestamp);
// Verify transmission time offset.
- EXPECT_EQ(0, rtp_header.extension.transmissionTimeOffset);
+ int offset = timestamp - media_packet_timestamp;
+ EXPECT_EQ(offset, rtp_header.extension.transmissionTimeOffset);
uint64_t expected_send_time =
ConvertMsToAbsSendTime(fake_clock_.TimeInMilliseconds());
EXPECT_EQ(expected_send_time, rtp_header.extension.absoluteSendTime);
@@ -795,12 +806,9 @@ TEST_F(RtpSenderTest, SendPadding) {
rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
- EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_,
- 0,
- rtp_length,
- capture_time_ms,
- kAllowRetransmission,
- PacedSender::kNormalPriority));
+ EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
+ capture_time_ms, kAllowRetransmission,
+ RtpPacketSender::kNormalPriority));
rtp_sender_->TimeToSendPacket(seq_num, capture_time_ms, false);
// Process send bucket.
@@ -822,14 +830,15 @@ TEST_F(RtpSenderTest, SendPadding) {
TEST_F(RtpSenderTest, SendRedundantPayloads) {
MockTransport transport;
- rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport, NULL,
- &mock_paced_sender_, NULL, NULL, NULL));
+ rtp_sender_.reset(new RTPSender(false, &fake_clock_, &transport, nullptr,
+ &mock_paced_sender_, nullptr, nullptr,
+ nullptr, nullptr, nullptr));
rtp_sender_->SetSequenceNumber(kSeqNum);
rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload);
// Make all packets go through the pacer.
EXPECT_CALL(mock_paced_sender_,
- SendPacket(PacedSender::kNormalPriority, _, _, _, _, _)).
- WillRepeatedly(testing::Return(false));
+ SendPacket(RtpPacketSender::kNormalPriority, _, _, _, _, _))
+ .WillRepeatedly(testing::Return(false));
uint16_t seq_num = kSeqNum;
rtp_sender_->SetStorePacketsStatus(true, 10);
@@ -845,7 +854,7 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
// Create and set up parser.
rtc::scoped_ptr<webrtc::RtpHeaderParser> rtp_parser(
webrtc::RtpHeaderParser::Create());
- ASSERT_TRUE(rtp_parser.get() != NULL);
+ ASSERT_TRUE(rtp_parser.get() != nullptr);
rtp_parser->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset,
kTransmissionTimeOffsetExtensionId);
rtp_parser->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteSendTime,
@@ -857,28 +866,27 @@ TEST_F(RtpSenderTest, SendRedundantPayloads) {
// Send 10 packets of increasing size.
for (size_t i = 0; i < kNumPayloadSizes; ++i) {
int64_t capture_time_ms = fake_clock_.TimeInMilliseconds();
- EXPECT_CALL(transport, SendPacket(_, _, _))
- .WillOnce(testing::ReturnArg<2>());
+ EXPECT_CALL(transport, SendRtp(_, _, _)).WillOnce(testing::Return(true));
SendPacket(capture_time_ms, kPayloadSizes[i]);
rtp_sender_->TimeToSendPacket(seq_num++, capture_time_ms, false);
fake_clock_.AdvanceTimeMilliseconds(33);
}
// The amount of padding to send it too small to send a payload packet.
- EXPECT_CALL(transport,
- SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
- .WillOnce(testing::ReturnArg<2>());
+ EXPECT_CALL(transport, SendRtp(_, kMaxPaddingSize + rtp_header_len, _))
+ .WillOnce(testing::Return(true));
EXPECT_EQ(kMaxPaddingSize, rtp_sender_->TimeToSendPadding(49));
- EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[0] +
- rtp_header_len + kRtxHeaderSize))
- .WillOnce(testing::ReturnArg<2>());
+ EXPECT_CALL(transport,
+ SendRtp(_, kPayloadSizes[0] + rtp_header_len + kRtxHeaderSize, _))
+ .WillOnce(testing::Return(true));
EXPECT_EQ(kPayloadSizes[0], rtp_sender_->TimeToSendPadding(500));
- EXPECT_CALL(transport, SendPacket(_, _, kPayloadSizes[kNumPayloadSizes - 1] +
- rtp_header_len + kRtxHeaderSize))
- .WillOnce(testing::ReturnArg<2>());
- EXPECT_CALL(transport, SendPacket(_, _, kMaxPaddingSize + rtp_header_len))
- .WillOnce(testing::ReturnArg<2>());
+ EXPECT_CALL(transport, SendRtp(_, kPayloadSizes[kNumPayloadSizes - 1] +
+ rtp_header_len + kRtxHeaderSize,
+ _))
+ .WillOnce(testing::Return(true));
+ EXPECT_CALL(transport, SendRtp(_, kMaxPaddingSize + rtp_header_len, _))
+ .WillOnce(testing::Return(true));
EXPECT_EQ(kPayloadSizes[kNumPayloadSizes - 1] + kMaxPaddingSize,
rtp_sender_->TimeToSendPadding(999));
}
@@ -891,9 +899,9 @@ TEST_F(RtpSenderTest, SendGenericVideo) {
uint8_t payload[] = {47, 11, 32, 93, 89};
// Send keyframe
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- NULL));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, 4321,
+ payload, sizeof(payload), nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -919,7 +927,7 @@ TEST_F(RtpSenderTest, SendGenericVideo) {
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameDelta, payload_type,
1234, 4321, payload,
- sizeof(payload), NULL));
+ sizeof(payload), nullptr));
RtpUtility::RtpHeaderParser rtp_parser2(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -955,8 +963,9 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
FrameCounts frame_counts_;
} callback;
- rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, NULL, &callback, NULL));
+ rtp_sender_.reset(new RTPSender(false, &fake_clock_, &transport_, nullptr,
+ &mock_paced_sender_, nullptr, nullptr,
+ nullptr, &callback, nullptr));
char payload_name[RTP_PAYLOAD_NAME_SIZE] = "GENERIC";
const uint8_t payload_type = 127;
@@ -966,18 +975,18 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
rtp_sender_->SetStorePacketsStatus(true, 1);
uint32_t ssrc = rtp_sender_->SSRC();
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- NULL));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, 4321,
+ payload, sizeof(payload), nullptr));
EXPECT_EQ(1U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
EXPECT_EQ(1, callback.frame_counts_.key_frames);
EXPECT_EQ(0, callback.frame_counts_.delta_frames);
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameDelta,
- payload_type, 1234, 4321, payload,
- sizeof(payload), NULL));
+ ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameDelta, payload_type,
+ 1234, 4321, payload,
+ sizeof(payload), nullptr));
EXPECT_EQ(2U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
@@ -1007,8 +1016,9 @@ TEST_F(RtpSenderTest, BitrateCallbacks) {
BitrateStatistics total_stats_;
BitrateStatistics retransmit_stats_;
} callback;
- rtp_sender_.reset(new RTPSender(0, false, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, &callback, NULL, NULL));
+ rtp_sender_.reset(new RTPSender(false, &fake_clock_, &transport_, nullptr,
+ &mock_paced_sender_, nullptr, nullptr,
+ &callback, nullptr, nullptr));
// Simulate kNumPackets sent with kPacketInterval ms intervals.
const uint32_t kNumPackets = 15;
@@ -1065,8 +1075,9 @@ class RtpSenderAudioTest : public RtpSenderTest {
void SetUp() override {
payload_ = kAudioPayload;
- rtp_sender_.reset(new RTPSender(0, true, &fake_clock_, &transport_, NULL,
- &mock_paced_sender_, NULL, NULL, NULL));
+ rtp_sender_.reset(new RTPSender(true, &fake_clock_, &transport_, nullptr,
+ &mock_paced_sender_, nullptr, nullptr,
+ nullptr, nullptr, nullptr));
rtp_sender_->SetSequenceNumber(kSeqNum);
}
};
@@ -1117,9 +1128,9 @@ TEST_F(RtpSenderTest, StreamDataCountersCallbacks) {
rtp_sender_->RegisterRtpStatisticsCallback(&callback);
// Send a frame.
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
- 4321, payload, sizeof(payload),
- NULL));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, 4321,
+ payload, sizeof(payload), nullptr));
StreamDataCounters expected;
expected.transmitted.payload_bytes = 6;
expected.transmitted.header_bytes = 12;
@@ -1162,14 +1173,14 @@ TEST_F(RtpSenderTest, StreamDataCountersCallbacks) {
rtp_sender_->SetFecParameters(&fec_params, &fec_params);
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameDelta, payload_type,
1234, 4321, payload,
- sizeof(payload), NULL));
+ sizeof(payload), nullptr));
expected.transmitted.payload_bytes = 40;
expected.transmitted.header_bytes = 60;
expected.transmitted.packets = 5;
expected.fec.packets = 1;
callback.Matches(ssrc, expected);
- rtp_sender_->RegisterRtpStatisticsCallback(NULL);
+ rtp_sender_->RegisterRtpStatisticsCallback(nullptr);
}
TEST_F(RtpSenderAudioTest, SendAudio) {
@@ -1179,9 +1190,9 @@ TEST_F(RtpSenderAudioTest, SendAudio) {
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
- 4321, payload, sizeof(payload),
- NULL));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234, 4321,
+ payload, sizeof(payload), nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1208,9 +1219,9 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
- ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
- 4321, payload, sizeof(payload),
- NULL));
+ ASSERT_EQ(
+ 0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234, 4321,
+ payload, sizeof(payload), nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@@ -1259,19 +1270,17 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
// The duration is calculated as the difference of current and last sent
// timestamp. So for first call it will skip since the duration is zero.
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
- capture_time_ms,
- 0, NULL, 0,
- NULL));
+ capture_time_ms, 0, nullptr, 0,
+ nullptr));
// DTMF Sample Length is (Frequency/1000) * Duration.
// So in this case, it is (8000/1000) * 500 = 4000.
// Sending it as two packets.
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
- capture_time_ms+2000,
- 0, NULL, 0,
- NULL));
+ capture_time_ms + 2000, 0, nullptr,
+ 0, nullptr));
rtc::scoped_ptr<webrtc::RtpHeaderParser> rtp_parser(
webrtc::RtpHeaderParser::Create());
- ASSERT_TRUE(rtp_parser.get() != NULL);
+ ASSERT_TRUE(rtp_parser.get() != nullptr);
webrtc::RTPHeader rtp_header;
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
transport_.last_sent_packet_len_,
@@ -1280,9 +1289,8 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
EXPECT_TRUE(rtp_header.markerBit);
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
- capture_time_ms+4000,
- 0, NULL, 0,
- NULL));
+ capture_time_ms + 4000, 0, nullptr,
+ 0, nullptr));
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
transport_.last_sent_packet_len_,
&rtp_header));
@@ -1343,6 +1351,35 @@ TEST_F(RtpSenderTest, BytesReportedCorrectly) {
rtx_stats.transmitted.TotalBytes());
}
+TEST_F(RtpSenderTest, RespectsNackBitrateLimit) {
+ const int32_t kPacketSize = 1400;
+ const int32_t kNumPackets = 30;
+
+ rtp_sender_->SetStorePacketsStatus(true, kNumPackets);
+ // Set bitrate (in kbps) to fit kNumPackets á kPacketSize bytes in one second.
+ rtp_sender_->SetTargetBitrate(kNumPackets * kPacketSize * 8);
+ const uint16_t kStartSequenceNumber = rtp_sender_->SequenceNumber();
+ std::list<uint16_t> sequence_numbers;
+ for (int32_t i = 0; i < kNumPackets; ++i) {
+ sequence_numbers.push_back(kStartSequenceNumber + i);
+ fake_clock_.AdvanceTimeMilliseconds(1);
+ SendPacket(fake_clock_.TimeInMilliseconds(), kPacketSize);
+ }
+ EXPECT_EQ(kNumPackets, transport_.packets_sent_);
+
+ fake_clock_.AdvanceTimeMilliseconds(1000 - kNumPackets);
+
+ // Resending should work - brings the bandwidth up to the limit.
+ // NACK bitrate is capped to the same bitrate as the encoder, since the max
+ // protection overhead is 50% (see MediaOptimization::SetTargetRates).
+ rtp_sender_->OnReceivedNACK(sequence_numbers, 0);
+ EXPECT_EQ(kNumPackets * 2, transport_.packets_sent_);
+
+ // Resending should not work, bandwidth exceeded.
+ rtp_sender_->OnReceivedNACK(sequence_numbers, 0);
+ EXPECT_EQ(kNumPackets * 2, transport_.packets_sent_);
+}
+
// Verify that all packets of a frame have CVO byte set.
TEST_F(RtpSenderVideoTest, SendVideoWithCVO) {
RTPVideoHeader hdr = {0};
@@ -1357,7 +1394,7 @@ TEST_F(RtpSenderVideoTest, SendVideoWithCVO) {
rtp_sender_->RtpHeaderExtensionTotalLength());
rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload,
- kTimestamp, 0, packet_, sizeof(packet_), NULL,
+ kTimestamp, 0, packet_, sizeof(packet_), nullptr,
&hdr);
RtpHeaderExtensionMap map;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
index 88bb5bbee14..a2008bf168a 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_sender_video.cc
@@ -21,6 +21,7 @@
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
+#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
#include "webrtc/system_wrappers/interface/trace_event.h"
@@ -76,6 +77,8 @@ RtpUtility::Payload* RTPSenderVideo::CreateVideoPayload(
RtpVideoCodecTypes videoType = kRtpVideoGeneric;
if (RtpUtility::StringCompare(payloadName, "VP8", 3)) {
videoType = kRtpVideoVp8;
+ } else if (RtpUtility::StringCompare(payloadName, "VP9", 3)) {
+ videoType = kRtpVideoVp9;
} else if (RtpUtility::StringCompare(payloadName, "H264", 4)) {
videoType = kRtpVideoH264;
} else if (RtpUtility::StringCompare(payloadName, "I420", 4)) {
@@ -101,7 +104,7 @@ void RTPSenderVideo::SendVideoPacket(uint8_t* data_buffer,
StorageType storage) {
if (_rtpSender.SendToNetwork(data_buffer, payload_length, rtp_header_length,
capture_time_ms, storage,
- PacedSender::kNormalPriority) == 0) {
+ RtpPacketSender::kNormalPriority) == 0) {
_videoBitrate.Update(payload_length + rtp_header_length);
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
@@ -139,7 +142,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
fec_packets = producer_fec_.GetFecPackets(
_payloadTypeRED, _payloadTypeFEC, next_fec_sequence_number,
rtp_header_length);
- DCHECK_EQ(num_fec_packets, fec_packets.size());
+ RTC_DCHECK_EQ(num_fec_packets, fec_packets.size());
if (_retransmissionSettings & kRetransmitFECPackets)
fec_storage = kAllowRetransmission;
}
@@ -147,7 +150,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
if (_rtpSender.SendToNetwork(
red_packet->data(), red_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, media_packet_storage,
- PacedSender::kNormalPriority) == 0) {
+ RtpPacketSender::kNormalPriority) == 0) {
_videoBitrate.Update(red_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketRed", "timestamp", capture_timestamp,
@@ -159,7 +162,7 @@ void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
if (_rtpSender.SendToNetwork(
fec_packet->data(), fec_packet->length() - rtp_header_length,
rtp_header_length, capture_time_ms, fec_storage,
- PacedSender::kNormalPriority) == 0) {
+ RtpPacketSender::kNormalPriority) == 0) {
_fecOverheadRate.Update(fec_packet->length());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketFec", "timestamp", capture_timestamp,
@@ -189,8 +192,8 @@ int32_t RTPSenderVideo::SendRTPIntraRequest() {
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::IntraRequest", "seqnum",
_rtpSender.SequenceNumber());
- return _rtpSender.SendToNetwork(
- data, 0, length, -1, kDontStore, PacedSender::kNormalPriority);
+ return _rtpSender.SendToNetwork(data, 0, length, -1, kDontStore,
+ RtpPacketSender::kNormalPriority);
}
void RTPSenderVideo::SetGenericFECStatus(const bool enable,
@@ -233,8 +236,8 @@ size_t RTPSenderVideo::FECPacketOverhead() const {
void RTPSenderVideo::SetFecParameters(const FecProtectionParams* delta_params,
const FecProtectionParams* key_params) {
CriticalSectionScoped cs(crit_.get());
- DCHECK(delta_params);
- DCHECK(key_params);
+ RTC_DCHECK(delta_params);
+ RTC_DCHECK(key_params);
delta_fec_params_ = *delta_params;
key_fec_params_ = *key_params;
}
@@ -310,14 +313,14 @@ int32_t RTPSenderVideo::SendVideo(const RtpVideoCodecTypes videoType,
// value sent.
// Here we are adding it to every packet of every frame at this point.
if (!rtpHdr) {
- DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
+ RTC_DCHECK(!_rtpSender.IsRtpHeaderExtensionRegistered(
kRtpExtensionVideoRotation));
} else if (cvo_mode == RTPSenderInterface::kCVOActivated) {
// Checking whether CVO header extension is registered will require taking
// a lock. It'll be a no-op if it's not registered.
// TODO(guoweis): For now, all packets sent will carry the CVO such that
// the RTP header length is consistent, although the receiver side will
- // only exam the packets with market bit set.
+ // only exam the packets with marker bit set.
size_t packetSize = payloadSize + rtp_header_length;
RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
RTPHeader rtp_header;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_utility.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
index 0d083bd92a5..2727e7b8bc1 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/rtp_utility.cc
@@ -312,6 +312,7 @@ bool RtpHeaderParser::Parse(RTPHeader& header,
// May not be present in packet.
header.extension.hasAudioLevel = false;
+ header.extension.voiceActivity = false;
header.extension.audioLevel = 0;
// May not be present in packet.
@@ -423,14 +424,8 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
// | ID | len=0 |V| level |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
-
- // Parse out the fields but only use it for debugging for now.
- // const uint8_t V = (*ptr & 0x80) >> 7;
- // const uint8_t level = (*ptr & 0x7f);
- // DEBUG_PRINT("RTP_AUDIO_LEVEL_UNIQUE_ID: ID=%u, len=%u, V=%u,
- // level=%u", ID, len, V, level);
-
- header.extension.audioLevel = ptr[0];
+ header.extension.audioLevel = ptr[0] & 0x7f;
+ header.extension.voiceActivity = (ptr[0] & 0x80) != 0;
header.extension.hasAudioLevel = true;
break;
}
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h b/chromium/third_party/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
index 67babcb330d..53b678f3b91 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/source/vp8_partition_aggregator.h
@@ -78,7 +78,7 @@ class PartitionTreeNode {
int min_parent_size_;
bool packet_start_;
- DISALLOW_COPY_AND_ASSIGN(PartitionTreeNode);
+ RTC_DISALLOW_COPY_AND_ASSIGN(PartitionTreeNode);
};
// Class that calculates the optimal aggregation of VP8 partitions smaller than
@@ -130,7 +130,7 @@ class Vp8PartitionAggregator {
size_t* size_vector_;
size_t largest_partition_size_;
- DISALLOW_COPY_AND_ASSIGN(Vp8PartitionAggregator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Vp8PartitionAggregator);
};
} // namespace
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc
index e55d3634676..f3ca282667e 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/BWEStandAlone/TestSenderReceiver.cc
@@ -127,8 +127,7 @@ int32_t TestSenderReceiver::InitReceiver (const uint16_t rtpPort,
exit(1);
}
- if (_rtp->SetRTCPStatus(kRtcpNonCompound) != 0)
- {
+ if (_rtp->SetRTCPStatus(RtcpMode::kReducedSize) != 0) {
throw "_rtp->SetRTCPStatus";
exit(1);
}
@@ -343,8 +342,7 @@ int32_t TestSenderReceiver::InitSender (const uint32_t startBitrateKbps,
exit(1);
}
- if (_rtp->SetRTCPStatus(kRtcpNonCompound) != 0)
- {
+ if (_rtp->SetRTCPStatus(RtcpMode::kReducedSize) != 0) {
throw "_rtp->SetRTCPStatus";
exit(1);
}
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
index 5731efde9e4..0270e55802b 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.cc
@@ -9,6 +9,7 @@
*/
#include "webrtc/modules/rtp_rtcp/test/testAPI/test_api.h"
+#include "webrtc/test/null_transport.h"
#include <algorithm>
#include <vector>
@@ -30,39 +31,39 @@ void LoopBackTransport::DropEveryNthPacket(int n) {
packet_loss_ = n;
}
-int LoopBackTransport::SendPacket(int channel, const void* data, size_t len) {
+bool LoopBackTransport::SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) {
count_++;
if (packet_loss_ > 0) {
if ((count_ % packet_loss_) == 0) {
- return len;
+ return true;
}
}
RTPHeader header;
rtc::scoped_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
if (!parser->Parse(static_cast<const uint8_t*>(data), len, &header)) {
- return -1;
+ return false;
}
PayloadUnion payload_specific;
if (!rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
&payload_specific)) {
- return -1;
+ return false;
}
receive_statistics_->IncomingPacket(header, len, false);
if (!rtp_receiver_->IncomingRtpPacket(header,
static_cast<const uint8_t*>(data), len,
payload_specific, true)) {
- return -1;
+ return false;
}
- return len;
+ return true;
}
-int LoopBackTransport::SendRTCPPacket(int channel,
- const void* data,
- size_t len) {
+bool LoopBackTransport::SendRtcp(const uint8_t* data, size_t len) {
if (rtp_rtcp_module_->IncomingRtcpPacket((const uint8_t*)data, len) < 0) {
- return -1;
+ return false;
}
- return static_cast<int>(len);
+ return true;
}
int32_t TestRtpReceiver::OnReceivedPayloadData(
@@ -82,7 +83,6 @@ class RtpRtcpAPITest : public ::testing::Test {
RtpRtcpAPITest() : fake_clock_(123456) {
test_csrcs_.push_back(1234);
test_csrcs_.push_back(2345);
- test_id = 123;
test_ssrc_ = 3456;
test_timestamp_ = 4567;
test_sequence_number_ = 2345;
@@ -91,17 +91,16 @@ class RtpRtcpAPITest : public ::testing::Test {
void SetUp() override {
RtpRtcp::Configuration configuration;
- configuration.id = test_id;
configuration.audio = true;
configuration.clock = &fake_clock_;
+ configuration.outgoing_transport = &null_transport_;
module_.reset(RtpRtcp::CreateRtpRtcp(configuration));
rtp_payload_registry_.reset(new RTPPayloadRegistry(
RTPPayloadStrategy::CreateStrategy(true)));
rtp_receiver_.reset(RtpReceiver::CreateAudioReceiver(
- test_id, &fake_clock_, NULL, NULL, NULL, rtp_payload_registry_.get()));
+ &fake_clock_, NULL, NULL, NULL, rtp_payload_registry_.get()));
}
- int test_id;
rtc::scoped_ptr<RTPPayloadRegistry> rtp_payload_registry_;
rtc::scoped_ptr<RtpReceiver> rtp_receiver_;
rtc::scoped_ptr<RtpRtcp> module_;
@@ -110,6 +109,7 @@ class RtpRtcpAPITest : public ::testing::Test {
uint16_t test_sequence_number_;
std::vector<uint32_t> test_csrcs_;
SimulatedClock fake_clock_;
+ test::NullTransport null_transport_;
};
TEST_F(RtpRtcpAPITest, Basic) {
@@ -125,8 +125,6 @@ TEST_F(RtpRtcpAPITest, Basic) {
}
TEST_F(RtpRtcpAPITest, MTU) {
- EXPECT_EQ(-1, module_->SetMaxTransferUnit(10));
- EXPECT_EQ(-1, module_->SetMaxTransferUnit(IP_PACKET_SIZE + 1));
EXPECT_EQ(0, module_->SetMaxTransferUnit(1234));
EXPECT_EQ(1234 - 20 - 8, module_->MaxPayloadLength());
@@ -143,9 +141,9 @@ TEST_F(RtpRtcpAPITest, SSRC) {
}
TEST_F(RtpRtcpAPITest, RTCP) {
- EXPECT_EQ(kRtcpOff, module_->RTCP());
- module_->SetRTCPStatus(kRtcpCompound);
- EXPECT_EQ(kRtcpCompound, module_->RTCP());
+ EXPECT_EQ(RtcpMode::kOff, module_->RTCP());
+ module_->SetRTCPStatus(RtcpMode::kCompound);
+ EXPECT_EQ(RtcpMode::kCompound, module_->RTCP());
EXPECT_EQ(0, module_->SetCNAME("john.doe@test.test"));
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
index 069cdc77df4..73334a8b261 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api.h
@@ -17,12 +17,13 @@
#include "webrtc/modules/rtp_rtcp/interface/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
+#include "webrtc/transport.h"
namespace webrtc {
// This class sends all its packet straight to the provided RtpRtcp module.
// with optional packet loss.
-class LoopBackTransport : public webrtc::Transport {
+class LoopBackTransport : public Transport {
public:
LoopBackTransport()
: count_(0),
@@ -35,8 +36,10 @@ class LoopBackTransport : public webrtc::Transport {
RtpReceiver* receiver,
ReceiveStatistics* receive_statistics);
void DropEveryNthPacket(int n);
- int SendPacket(int channel, const void* data, size_t len) override;
- int SendRTCPPacket(int channel, const void* data, size_t len) override;
+ bool SendRtp(const uint8_t* data,
+ size_t len,
+ const PacketOptions& options) override;
+ bool SendRtcp(const uint8_t* data, size_t len) override;
private:
int count_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
index 61923aa4476..745386d485c 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_audio.cc
@@ -61,8 +61,7 @@ class VerifyingAudioReceiver : public NullRtpData {
class RTPCallback : public NullRtpFeedback {
public:
- int32_t OnInitializeDecoder(const int32_t id,
- const int8_t payloadType,
+ int32_t OnInitializeDecoder(const int8_t payloadType,
const char payloadName[RTP_PAYLOAD_NAME_SIZE],
const int frequency,
const uint8_t channels,
@@ -80,7 +79,6 @@ class RtpRtcpAudioTest : public ::testing::Test {
RtpRtcpAudioTest() : fake_clock(123456) {
test_CSRC[0] = 1234;
test_CSRC[2] = 2345;
- test_id = 123;
test_ssrc = 3456;
test_timestamp = 4567;
test_sequence_number = 2345;
@@ -104,7 +102,6 @@ class RtpRtcpAudioTest : public ::testing::Test {
RTPPayloadStrategy::CreateStrategy(true)));
RtpRtcp::Configuration configuration;
- configuration.id = test_id;
configuration.audio = true;
configuration.clock = &fake_clock;
configuration.receive_statistics = receive_statistics1_.get();
@@ -113,18 +110,17 @@ class RtpRtcpAudioTest : public ::testing::Test {
module1 = RtpRtcp::CreateRtpRtcp(configuration);
rtp_receiver1_.reset(RtpReceiver::CreateAudioReceiver(
- test_id, &fake_clock, audioFeedback, data_receiver1, NULL,
+ &fake_clock, audioFeedback, data_receiver1, NULL,
rtp_payload_registry1_.get()));
- configuration.id = test_id + 1;
configuration.receive_statistics = receive_statistics2_.get();
configuration.outgoing_transport = transport2;
configuration.audio_messages = audioFeedback;
module2 = RtpRtcp::CreateRtpRtcp(configuration);
rtp_receiver2_.reset(RtpReceiver::CreateAudioReceiver(
- test_id + 1, &fake_clock, audioFeedback, data_receiver2, NULL,
- rtp_payload_registry2_.get()));
+ &fake_clock, audioFeedback, data_receiver2, NULL,
+ rtp_payload_registry2_.get()));
transport1->SetSendModule(module2, rtp_payload_registry2_.get(),
rtp_receiver2_.get(), receive_statistics2_.get());
@@ -143,7 +139,6 @@ class RtpRtcpAudioTest : public ::testing::Test {
delete rtp_callback;
}
- int test_id;
RtpRtcp* module1;
RtpRtcp* module2;
rtc::scoped_ptr<ReceiveStatistics> receive_statistics1_;
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
index 741b7ac0bd9..e9d81122b13 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_rtcp.cc
@@ -23,6 +23,7 @@
using namespace webrtc;
const uint64_t kTestPictureId = 12345678;
+const uint8_t kSliPictureId = 156;
class RtcpCallback : public RtcpIntraFrameObserver {
public:
@@ -38,7 +39,7 @@ class RtcpCallback : public RtcpIntraFrameObserver {
};
virtual void OnReceivedSLI(uint32_t ssrc,
uint8_t pictureId) {
- EXPECT_EQ(28, pictureId);
+ EXPECT_EQ(kSliPictureId & 0x3f, pictureId);
};
virtual void OnReceivedRPSI(uint32_t ssrc,
uint64_t pictureId) {
@@ -54,8 +55,7 @@ class TestRtpFeedback : public NullRtpFeedback {
TestRtpFeedback(RtpRtcp* rtp_rtcp) : rtp_rtcp_(rtp_rtcp) {}
virtual ~TestRtpFeedback() {}
- virtual void OnIncomingSSRCChanged(const int32_t id,
- const uint32_t ssrc) {
+ void OnIncomingSSRCChanged(const uint32_t ssrc) override {
rtp_rtcp_->SetRemoteSSRC(ssrc);
}
@@ -68,7 +68,6 @@ class RtpRtcpRtcpTest : public ::testing::Test {
RtpRtcpRtcpTest() : fake_clock(123456) {
test_csrcs.push_back(1234);
test_csrcs.push_back(2345);
- test_id = 123;
test_ssrc = 3456;
test_timestamp = 4567;
test_sequence_number = 2345;
@@ -86,7 +85,6 @@ class RtpRtcpRtcpTest : public ::testing::Test {
receive_statistics2_.reset(ReceiveStatistics::Create(&fake_clock));
RtpRtcp::Configuration configuration;
- configuration.id = test_id;
configuration.audio = true;
configuration.clock = &fake_clock;
configuration.receive_statistics = receive_statistics1_.get();
@@ -103,11 +101,10 @@ class RtpRtcpRtcpTest : public ::testing::Test {
rtp_feedback1_.reset(new TestRtpFeedback(module1));
rtp_receiver1_.reset(RtpReceiver::CreateAudioReceiver(
- test_id, &fake_clock, NULL, receiver, rtp_feedback1_.get(),
+ &fake_clock, NULL, receiver, rtp_feedback1_.get(),
rtp_payload_registry1_.get()));
configuration.receive_statistics = receive_statistics2_.get();
- configuration.id = test_id + 1;
configuration.outgoing_transport = transport2;
configuration.intra_frame_callback = myRTCPFeedback2;
@@ -116,7 +113,7 @@ class RtpRtcpRtcpTest : public ::testing::Test {
rtp_feedback2_.reset(new TestRtpFeedback(module2));
rtp_receiver2_.reset(RtpReceiver::CreateAudioReceiver(
- test_id + 1, &fake_clock, NULL, receiver, rtp_feedback2_.get(),
+ &fake_clock, NULL, receiver, rtp_feedback2_.get(),
rtp_payload_registry2_.get()));
transport1->SetSendModule(module2, rtp_payload_registry2_.get(),
@@ -126,8 +123,8 @@ class RtpRtcpRtcpTest : public ::testing::Test {
myRTCPFeedback1->SetModule(module1);
myRTCPFeedback2->SetModule(module2);
- module1->SetRTCPStatus(kRtcpCompound);
- module2->SetRTCPStatus(kRtcpCompound);
+ module1->SetRTCPStatus(RtcpMode::kCompound);
+ module2->SetRTCPStatus(RtcpMode::kCompound);
module2->SetSSRC(test_ssrc + 1);
module1->SetSSRC(test_ssrc);
@@ -178,7 +175,6 @@ class RtpRtcpRtcpTest : public ::testing::Test {
delete receiver;
}
- int test_id;
rtc::scoped_ptr<TestRtpFeedback> rtp_feedback1_;
rtc::scoped_ptr<TestRtpFeedback> rtp_feedback2_;
rtc::scoped_ptr<ReceiveStatistics> receive_statistics1_;
@@ -204,7 +200,7 @@ class RtpRtcpRtcpTest : public ::testing::Test {
TEST_F(RtpRtcpRtcpTest, RTCP_PLI_RPSI) {
EXPECT_EQ(0, module1->SendRTCPReferencePictureSelection(kTestPictureId));
- EXPECT_EQ(0, module1->SendRTCPSliceLossIndication(156));
+ EXPECT_EQ(0, module1->SendRTCPSliceLossIndication(kSliPictureId));
}
TEST_F(RtpRtcpRtcpTest, RTCP_CNAME) {
diff --git a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
index e28d5ceaf52..30a6a1c303f 100644
--- a/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
+++ b/chromium/third_party/webrtc/modules/rtp_rtcp/test/testAPI/test_api_video.cc
@@ -33,13 +33,11 @@ namespace webrtc {
class RtpRtcpVideoTest : public ::testing::Test {
protected:
RtpRtcpVideoTest()
- : test_id_(123),
- rtp_payload_registry_(RTPPayloadStrategy::CreateStrategy(false)),
+ : rtp_payload_registry_(RTPPayloadStrategy::CreateStrategy(false)),
test_ssrc_(3456),
test_timestamp_(4567),
test_sequence_number_(2345),
- fake_clock(123456) {
- }
+ fake_clock(123456) {}
~RtpRtcpVideoTest() {}
virtual void SetUp() {
@@ -47,16 +45,15 @@ class RtpRtcpVideoTest : public ::testing::Test {
receiver_ = new TestRtpReceiver();
receive_statistics_.reset(ReceiveStatistics::Create(&fake_clock));
RtpRtcp::Configuration configuration;
- configuration.id = test_id_;
configuration.audio = false;
configuration.clock = &fake_clock;
configuration.outgoing_transport = transport_;
video_module_ = RtpRtcp::CreateRtpRtcp(configuration);
rtp_receiver_.reset(RtpReceiver::CreateVideoReceiver(
- test_id_, &fake_clock, receiver_, NULL, &rtp_payload_registry_));
+ &fake_clock, receiver_, NULL, &rtp_payload_registry_));
- video_module_->SetRTCPStatus(kRtcpCompound);
+ video_module_->SetRTCPStatus(RtcpMode::kCompound);
video_module_->SetSSRC(test_ssrc_);
rtp_receiver_->SetNACKStatus(kNackRtcp);
video_module_->SetStorePacketsStatus(true, 600);
diff --git a/chromium/third_party/webrtc/modules/utility/interface/audio_frame_operations.h b/chromium/third_party/webrtc/modules/utility/interface/audio_frame_operations.h
index f439dacbcff..c2af68ab1be 100644
--- a/chromium/third_party/webrtc/modules/utility/interface/audio_frame_operations.h
+++ b/chromium/third_party/webrtc/modules/utility/interface/audio_frame_operations.h
@@ -26,7 +26,7 @@ class AudioFrameOperations {
// operation, meaning src_audio and dst_audio must point to different
// buffers. It is the caller's responsibility to ensure that |dst_audio| is
// sufficiently large.
- static void MonoToStereo(const int16_t* src_audio, int samples_per_channel,
+ static void MonoToStereo(const int16_t* src_audio, size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks for sufficient
// buffer size and that |num_channels_| is mono.
@@ -35,7 +35,7 @@ class AudioFrameOperations {
// Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
// operation, meaning |src_audio| and |dst_audio| may point to the same
// buffer.
- static void StereoToMono(const int16_t* src_audio, int samples_per_channel,
+ static void StereoToMono(const int16_t* src_audio, size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is stereo.
diff --git a/chromium/third_party/webrtc/modules/utility/interface/file_player.h b/chromium/third_party/webrtc/modules/utility/interface/file_player.h
index fdce277bdb7..44f03e475a0 100644
--- a/chromium/third_party/webrtc/modules/utility/interface/file_player.h
+++ b/chromium/third_party/webrtc/modules/utility/interface/file_player.h
@@ -38,7 +38,7 @@ public:
// channel).
virtual int Get10msAudioFromFile(
int16_t* outBuffer,
- int& lengthInSamples,
+ size_t& lengthInSamples,
int frequencyInHz) = 0;
// Register callback for receiving file playing notifications.
diff --git a/chromium/third_party/webrtc/modules/utility/interface/file_recorder.h b/chromium/third_party/webrtc/modules/utility/interface/file_recorder.h
index 009fa7510e2..1e12673eebc 100644
--- a/chromium/third_party/webrtc/modules/utility/interface/file_recorder.h
+++ b/chromium/third_party/webrtc/modules/utility/interface/file_recorder.h
@@ -13,7 +13,6 @@
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
-#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/media_file/interface/media_file_defines.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
@@ -40,14 +39,12 @@ public:
virtual int32_t StartRecordingAudioFile(
const char* fileName,
const CodecInst& codecInst,
- uint32_t notification,
- ACMAMRPackingFormat amrFormat = AMRFileStorage) = 0;
+ uint32_t notification) = 0;
virtual int32_t StartRecordingAudioFile(
OutStream& destStream,
const CodecInst& codecInst,
- uint32_t notification,
- ACMAMRPackingFormat amrFormat = AMRFileStorage) = 0;
+ uint32_t notification) = 0;
// Stop recording.
// Note: this API is for both audio and video.
@@ -74,7 +71,6 @@ public:
const char* fileName,
const CodecInst& audioCodecInst,
const VideoCodec& videoCodecInst,
- ACMAMRPackingFormat amrFormat = AMRFileStorage,
bool videoOnly = false) = 0;
// Record the video frame in videoFrame to AVI file.
diff --git a/chromium/third_party/webrtc/modules/utility/interface/helpers_android.h b/chromium/third_party/webrtc/modules/utility/interface/helpers_android.h
index 19ff09869ef..5c73fe45662 100644
--- a/chromium/third_party/webrtc/modules/utility/interface/helpers_android.h
+++ b/chromium/third_party/webrtc/modules/utility/interface/helpers_android.h
@@ -16,8 +16,8 @@
// Abort the process if |jni| has a Java exception pending.
// TODO(henrika): merge with CHECK_JNI_EXCEPTION() in jni_helpers.h.
-#define CHECK_EXCEPTION(jni) \
- CHECK(!jni->ExceptionCheck()) \
+#define CHECK_EXCEPTION(jni) \
+ RTC_CHECK(!jni->ExceptionCheck()) \
<< (jni->ExceptionDescribe(), jni->ExceptionClear(), "")
namespace webrtc {
@@ -31,8 +31,8 @@ JNIEnv* GetEnv(JavaVM* jvm);
jlong PointerTojlong(void* ptr);
// JNIEnv-helper methods that wraps the API which uses the JNI interface
-// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
-// is thrown while calling the method.
+// pointer (JNIEnv*). It allows us to RTC_CHECK success and that no Java
+// exception is thrown while calling the method.
jmethodID GetMethodID(
JNIEnv* jni, jclass c, const char* name, const char* signature);
diff --git a/chromium/third_party/webrtc/modules/utility/interface/helpers_ios.h b/chromium/third_party/webrtc/modules/utility/interface/helpers_ios.h
new file mode 100644
index 00000000000..1e6075faae9
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/utility/interface/helpers_ios.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+#define WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
+
+#if defined(WEBRTC_IOS)
+
+#include <string>
+
+namespace webrtc {
+namespace ios {
+
+bool CheckAndLogError(BOOL success, NSError* error);
+
+// Return thread ID as a string.
+std::string GetThreadId();
+
+// Return thread ID as string suitable for debug logging.
+std::string GetThreadInfo();
+
+// Returns [NSThread currentThread] description as string.
+// Example: <NSThread: 0x170066d80>{number = 1, name = main}
+std::string GetCurrentThreadDescription();
+
+// Returns the current name of the operating system.
+std::string GetSystemName();
+
+// Returns the current version of the operating system.
+std::string GetSystemVersion();
+
+// Returns the version of the operating system as a floating point value.
+float GetSystemVersionAsFloat();
+
+// Returns the device type.
+// Examples: ”iPhone” and ”iPod touch”.
+std::string GetDeviceType();
+
+// Returns a more detailed device name.
+// Examples: "iPhone 5s (GSM)" and "iPhone 6 Plus".
+std::string GetDeviceName();
+
+} // namespace ios
+} // namespace webrtc
+
+#endif // defined(WEBRTC_IOS)
+
+#endif // WEBRTC_MODULES_UTILITY_INTERFACE_HELPERS_IOS_H_
diff --git a/chromium/third_party/webrtc/modules/utility/interface/process_thread.h b/chromium/third_party/webrtc/modules/utility/interface/process_thread.h
index 0e84506f1a6..451a5a301b5 100644
--- a/chromium/third_party/webrtc/modules/utility/interface/process_thread.h
+++ b/chromium/third_party/webrtc/modules/utility/interface/process_thread.h
@@ -29,7 +29,7 @@ class ProcessThread {
public:
virtual ~ProcessThread();
- static rtc::scoped_ptr<ProcessThread> Create();
+ static rtc::scoped_ptr<ProcessThread> Create(const char* thread_name);
// Starts the worker thread. Must be called from the construction thread.
virtual void Start() = 0;
diff --git a/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations.cc b/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations.cc
index e3b00104761..c07ca1fdf60 100644
--- a/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations.cc
@@ -14,9 +14,9 @@
namespace webrtc {
void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
- int samples_per_channel,
+ size_t samples_per_channel,
int16_t* dst_audio) {
- for (int i = 0; i < samples_per_channel; i++) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
dst_audio[2 * i] = src_audio[i];
dst_audio[2 * i + 1] = src_audio[i];
}
@@ -41,9 +41,9 @@ int AudioFrameOperations::MonoToStereo(AudioFrame* frame) {
}
void AudioFrameOperations::StereoToMono(const int16_t* src_audio,
- int samples_per_channel,
+ size_t samples_per_channel,
int16_t* dst_audio) {
- for (int i = 0; i < samples_per_channel; i++) {
+ for (size_t i = 0; i < samples_per_channel; i++) {
dst_audio[i] = (src_audio[2 * i] + src_audio[2 * i + 1]) >> 1;
}
}
@@ -62,7 +62,7 @@ int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
if (frame->num_channels_ != 2) return;
- for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+ for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
int16_t temp_data = frame->data_[i];
frame->data_[i] = frame->data_[i + 1];
frame->data_[i + 1] = temp_data;
@@ -79,7 +79,7 @@ int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
return -1;
}
- for (int i = 0; i < frame.samples_per_channel_; i++) {
+ for (size_t i = 0; i < frame.samples_per_channel_; i++) {
frame.data_[2 * i] =
static_cast<int16_t>(left * frame.data_[2 * i]);
frame.data_[2 * i + 1] =
@@ -92,7 +92,7 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
int32_t temp_data = 0;
// Ensure that the output result is saturated [-32768, +32767].
- for (int i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
+ for (size_t i = 0; i < frame.samples_per_channel_ * frame.num_channels_;
i++) {
temp_data = static_cast<int32_t>(scale * frame.data_[i]);
if (temp_data < -32768) {
diff --git a/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations_unittest.cc b/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
index f4d881cf871..c278cdddcdc 100644
--- a/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/audio_frame_operations_unittest.cc
@@ -28,14 +28,14 @@ class AudioFrameOperationsTest : public ::testing::Test {
};
void SetFrameData(AudioFrame* frame, int16_t left, int16_t right) {
- for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
+ for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
frame->data_[i] = left;
frame->data_[i + 1] = right;
}
}
void SetFrameData(AudioFrame* frame, int16_t data) {
- for (int i = 0; i < frame->samples_per_channel_; i++) {
+ for (size_t i = 0; i < frame->samples_per_channel_; i++) {
frame->data_[i] = data;
}
}
@@ -45,7 +45,7 @@ void VerifyFramesAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
EXPECT_EQ(frame1.samples_per_channel_,
frame2.samples_per_channel_);
- for (int i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
+ for (size_t i = 0; i < frame1.samples_per_channel_ * frame1.num_channels_;
i++) {
EXPECT_EQ(frame1.data_[i], frame2.data_[i]);
}
diff --git a/chromium/third_party/webrtc/modules/utility/source/coder.cc b/chromium/third_party/webrtc/modules/utility/source/coder.cc
index 1baeaef7214..4ec5f9b4e27 100644
--- a/chromium/third_party/webrtc/modules/utility/source/coder.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/coder.cc
@@ -29,8 +29,7 @@ AudioCoder::~AudioCoder()
{
}
-int32_t AudioCoder::SetEncodeCodec(const CodecInst& codecInst,
- ACMAMRPackingFormat amrFormat)
+int32_t AudioCoder::SetEncodeCodec(const CodecInst& codecInst)
{
if(_acm->RegisterSendCodec((CodecInst&)codecInst) == -1)
{
@@ -39,8 +38,7 @@ int32_t AudioCoder::SetEncodeCodec(const CodecInst& codecInst,
return 0;
}
-int32_t AudioCoder::SetDecodeCodec(const CodecInst& codecInst,
- ACMAMRPackingFormat amrFormat)
+int32_t AudioCoder::SetDecodeCodec(const CodecInst& codecInst)
{
if(_acm->RegisterReceiveCodec((CodecInst&)codecInst) == -1)
{
diff --git a/chromium/third_party/webrtc/modules/utility/source/coder.h b/chromium/third_party/webrtc/modules/utility/source/coder.h
index 57eada18cc2..a57ba86d839 100644
--- a/chromium/third_party/webrtc/modules/utility/source/coder.h
+++ b/chromium/third_party/webrtc/modules/utility/source/coder.h
@@ -25,13 +25,9 @@ public:
AudioCoder(uint32_t instanceID);
~AudioCoder();
- int32_t SetEncodeCodec(
- const CodecInst& codecInst,
- ACMAMRPackingFormat amrFormat = AMRBandwidthEfficient);
+ int32_t SetEncodeCodec(const CodecInst& codecInst);
- int32_t SetDecodeCodec(
- const CodecInst& codecInst,
- ACMAMRPackingFormat amrFormat = AMRBandwidthEfficient);
+ int32_t SetDecodeCodec(const CodecInst& codecInst);
int32_t Decode(AudioFrame& decodedAudio, uint32_t sampFreqHz,
const int8_t* incomingPayload, size_t payloadLength);
diff --git a/chromium/third_party/webrtc/modules/utility/source/file_player_impl.cc b/chromium/third_party/webrtc/modules/utility/source/file_player_impl.cc
index df6a5bfbcaf..29ad9e3fe18 100644
--- a/chromium/third_party/webrtc/modules/utility/source/file_player_impl.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/file_player_impl.cc
@@ -95,7 +95,7 @@ int32_t FilePlayerImpl::AudioCodec(CodecInst& audioCodec) const
int32_t FilePlayerImpl::Get10msAudioFromFile(
int16_t* outBuffer,
- int& lengthInSamples,
+ size_t& lengthInSamples,
int frequencyInHz)
{
if(_codec.plfreq == 0)
@@ -127,8 +127,7 @@ int32_t FilePlayerImpl::Get10msAudioFromFile(
return 0;
}
// One sample is two bytes.
- unresampledAudioFrame.samples_per_channel_ =
- (uint16_t)lengthInBytes >> 1;
+ unresampledAudioFrame.samples_per_channel_ = lengthInBytes >> 1;
} else {
// Decode will generate 10 ms of audio data. PlayoutAudioData(..)
@@ -156,14 +155,14 @@ int32_t FilePlayerImpl::Get10msAudioFromFile(
}
}
- int outLen = 0;
+ size_t outLen = 0;
if(_resampler.ResetIfNeeded(unresampledAudioFrame.sample_rate_hz_,
frequencyInHz, 1))
{
LOG(LS_WARNING) << "Get10msAudioFromFile() unexpected codec.";
// New sampling frequency. Update state.
- outLen = frequencyInHz / 100;
+ outLen = static_cast<size_t>(frequencyInHz / 100);
memset(outBuffer, 0, outLen * sizeof(int16_t));
return 0;
}
@@ -177,7 +176,7 @@ int32_t FilePlayerImpl::Get10msAudioFromFile(
if(_scaling != 1.0)
{
- for (int i = 0;i < outLen; i++)
+ for (size_t i = 0;i < outLen; i++)
{
outBuffer[i] = (int16_t)(outBuffer[i] * _scaling);
}
@@ -390,7 +389,7 @@ int32_t FilePlayerImpl::SetUpAudioDecoder()
return -1;
}
if( STR_CASE_CMP(_codec.plname, "L16") != 0 &&
- _audioDecoder.SetDecodeCodec(_codec,AMRFileStorage) == -1)
+ _audioDecoder.SetDecodeCodec(_codec) == -1)
{
LOG(LS_WARNING) << "SetUpAudioDecoder() codec " << _codec.plname
<< " not supported.";
diff --git a/chromium/third_party/webrtc/modules/utility/source/file_player_impl.h b/chromium/third_party/webrtc/modules/utility/source/file_player_impl.h
index f81e7101eab..8818b5caad7 100644
--- a/chromium/third_party/webrtc/modules/utility/source/file_player_impl.h
+++ b/chromium/third_party/webrtc/modules/utility/source/file_player_impl.h
@@ -31,7 +31,7 @@ public:
virtual int Get10msAudioFromFile(
int16_t* outBuffer,
- int& lengthInSamples,
+ size_t& lengthInSamples,
int frequencyInHz);
virtual int32_t RegisterModuleFileCallback(FileCallback* callback);
virtual int32_t StartPlayingFile(
diff --git a/chromium/third_party/webrtc/modules/utility/source/file_player_unittests.cc b/chromium/third_party/webrtc/modules/utility/source/file_player_unittests.cc
index c5f6fbaa55d..4b65acdeef0 100644
--- a/chromium/third_party/webrtc/modules/utility/source/file_player_unittests.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/file_player_unittests.cc
@@ -62,12 +62,12 @@ class FilePlayerTest : public ::testing::Test {
rtc::Md5Digest checksum;
for (int i = 0; i < output_length_ms / 10; ++i) {
int16_t out[10 * kSampleRateHz / 1000] = {0};
- int num_samples;
+ size_t num_samples;
EXPECT_EQ(0,
player_->Get10msAudioFromFile(out, num_samples, kSampleRateHz));
checksum.Update(out, num_samples * sizeof(out[0]));
if (FLAGS_file_player_output) {
- ASSERT_EQ(static_cast<size_t>(num_samples),
+ ASSERT_EQ(num_samples,
fwrite(out, sizeof(out[0]), num_samples, output_file_));
}
}
diff --git a/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.cc b/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.cc
index e86afc69722..c11bbe993ff 100644
--- a/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.cc
@@ -32,7 +32,6 @@ FileRecorderImpl::FileRecorderImpl(uint32_t instanceID,
_fileFormat(fileFormat),
_moduleFile(MediaFile::CreateMediaFile(_instanceID)),
codec_info_(),
- _amrFormat(AMRFileStorage),
_audioBuffer(),
_audioEncoder(instanceID),
_audioResampler()
@@ -62,16 +61,13 @@ int32_t FileRecorderImpl::RegisterModuleFileCallback(
int32_t FileRecorderImpl::StartRecordingAudioFile(
const char* fileName,
const CodecInst& codecInst,
- uint32_t notificationTimeMs,
- ACMAMRPackingFormat amrFormat)
+ uint32_t notificationTimeMs)
{
if(_moduleFile == NULL)
{
return -1;
}
codec_info_ = codecInst;
- _amrFormat = amrFormat;
-
int32_t retVal = 0;
retVal =_moduleFile->StartRecordingAudioFile(fileName, _fileFormat,
codecInst,
@@ -97,12 +93,9 @@ int32_t FileRecorderImpl::StartRecordingAudioFile(
int32_t FileRecorderImpl::StartRecordingAudioFile(
OutStream& destStream,
const CodecInst& codecInst,
- uint32_t notificationTimeMs,
- ACMAMRPackingFormat amrFormat)
+ uint32_t notificationTimeMs)
{
codec_info_ = codecInst;
- _amrFormat = amrFormat;
-
int32_t retVal = _moduleFile->StartRecordingAudioStream(
destStream,
_fileFormat,
@@ -156,7 +149,7 @@ int32_t FileRecorderImpl::RecordAudioToFile(
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
tempAudioFrame.samples_per_channel_ =
incomingAudioFrame.samples_per_channel_;
- for (uint16_t i = 0;
+ for (size_t i = 0;
i < (incomingAudioFrame.samples_per_channel_); i++)
{
// Sample value is the average of left and right buffer rounded to
@@ -174,7 +167,7 @@ int32_t FileRecorderImpl::RecordAudioToFile(
tempAudioFrame.sample_rate_hz_ = incomingAudioFrame.sample_rate_hz_;
tempAudioFrame.samples_per_channel_ =
incomingAudioFrame.samples_per_channel_;
- for (uint16_t i = 0;
+ for (size_t i = 0;
i < (incomingAudioFrame.samples_per_channel_); i++)
{
// Duplicate sample to both channels
@@ -210,7 +203,7 @@ int32_t FileRecorderImpl::RecordAudioToFile(
return -1;
}
} else {
- int outLen = 0;
+ size_t outLen = 0;
_audioResampler.ResetIfNeeded(ptrAudioFrame->sample_rate_hz_,
codec_info_.plfreq,
ptrAudioFrame->num_channels_);
@@ -240,7 +233,7 @@ int32_t FileRecorderImpl::SetUpAudioEncoder()
if (_fileFormat == kFileFormatPreencodedFile ||
STR_CASE_CMP(codec_info_.plname, "L16") != 0)
{
- if(_audioEncoder.SetEncodeCodec(codec_info_,_amrFormat) == -1)
+ if(_audioEncoder.SetEncodeCodec(codec_info_) == -1)
{
LOG(LS_ERROR) << "SetUpAudioEncoder() codec "
<< codec_info_.plname << " not supported.";
diff --git a/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.h b/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.h
index 1e83899be44..b4f507e6a74 100644
--- a/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.h
+++ b/chromium/third_party/webrtc/modules/utility/source/file_recorder_impl.h
@@ -50,13 +50,11 @@ public:
virtual int32_t StartRecordingAudioFile(
const char* fileName,
const CodecInst& codecInst,
- uint32_t notificationTimeMs,
- ACMAMRPackingFormat amrFormat = AMRFileStorage);
+ uint32_t notificationTimeMs) override;
virtual int32_t StartRecordingAudioFile(
OutStream& destStream,
const CodecInst& codecInst,
- uint32_t notificationTimeMs,
- ACMAMRPackingFormat amrFormat = AMRFileStorage);
+ uint32_t notificationTimeMs) override;
virtual int32_t StopRecording();
virtual bool IsRecording() const;
virtual int32_t codec_info(CodecInst& codecInst) const;
@@ -67,8 +65,7 @@ public:
const char* fileName,
const CodecInst& audioCodecInst,
const VideoCodec& videoCodecInst,
- ACMAMRPackingFormat amrFormat = AMRFileStorage,
- bool videoOnly = false)
+ bool videoOnly = false) override
{
return -1;
}
@@ -88,8 +85,6 @@ protected:
private:
CodecInst codec_info_;
- ACMAMRPackingFormat _amrFormat;
-
int8_t _audioBuffer[MAX_AUDIO_BUFFER_IN_BYTES];
AudioCoder _audioEncoder;
Resampler _audioResampler;
diff --git a/chromium/third_party/webrtc/modules/utility/source/helpers_android.cc b/chromium/third_party/webrtc/modules/utility/source/helpers_android.cc
index 175dd23f415..25652f237eb 100644
--- a/chromium/third_party/webrtc/modules/utility/source/helpers_android.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/helpers_android.cc
@@ -25,8 +25,8 @@ namespace webrtc {
JNIEnv* GetEnv(JavaVM* jvm) {
void* env = NULL;
jint status = jvm->GetEnv(&env, JNI_VERSION_1_6);
- CHECK(((env != NULL) && (status == JNI_OK)) ||
- ((env == NULL) && (status == JNI_EDETACHED)))
+ RTC_CHECK(((env != NULL) && (status == JNI_OK)) ||
+ ((env == NULL) && (status == JNI_EDETACHED)))
<< "Unexpected GetEnv return: " << status << ":" << env;
return reinterpret_cast<JNIEnv*>(env);
}
@@ -41,7 +41,7 @@ jlong PointerTojlong(void* ptr) {
// conversion from pointer to integral type. intptr_t to jlong is a standard
// widening by the static_assert above.
jlong ret = reinterpret_cast<intptr_t>(ptr);
- DCHECK(reinterpret_cast<void*>(ret) == ptr);
+ RTC_DCHECK(reinterpret_cast<void*>(ret) == ptr);
return ret;
}
@@ -50,7 +50,7 @@ jmethodID GetMethodID (
jmethodID m = jni->GetMethodID(c, name, signature);
CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
<< signature;
- CHECK(m) << name << ", " << signature;
+ RTC_CHECK(m) << name << ", " << signature;
return m;
}
@@ -59,21 +59,21 @@ jmethodID GetStaticMethodID (
jmethodID m = jni->GetStaticMethodID(c, name, signature);
CHECK_EXCEPTION(jni) << "Error during GetStaticMethodID: " << name << ", "
<< signature;
- CHECK(m) << name << ", " << signature;
+ RTC_CHECK(m) << name << ", " << signature;
return m;
}
jclass FindClass(JNIEnv* jni, const char* name) {
jclass c = jni->FindClass(name);
CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
- CHECK(c) << name;
+ RTC_CHECK(c) << name;
return c;
}
jobject NewGlobalRef(JNIEnv* jni, jobject o) {
jobject ret = jni->NewGlobalRef(o);
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
- CHECK(ret);
+ RTC_CHECK(ret);
return ret;
}
@@ -85,8 +85,9 @@ void DeleteGlobalRef(JNIEnv* jni, jobject o) {
std::string GetThreadId() {
char buf[21]; // Big enough to hold a kuint64max plus terminating NULL.
int thread_id = gettid();
- CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
- static_cast<int>(sizeof(buf))) << "Thread id is bigger than uint64??";
+ RTC_CHECK_LT(snprintf(buf, sizeof(buf), "%i", thread_id),
+ static_cast<int>(sizeof(buf)))
+ << "Thread id is bigger than uint64??";
return std::string(buf);
}
@@ -104,7 +105,7 @@ AttachThreadScoped::AttachThreadScoped(JavaVM* jvm)
ALOGD("Attaching thread to JVM%s", GetThreadInfo().c_str());
jint res = jvm->AttachCurrentThread(&env_, NULL);
attached_ = (res == JNI_OK);
- CHECK(attached_) << "AttachCurrentThread failed: " << res;
+ RTC_CHECK(attached_) << "AttachCurrentThread failed: " << res;
}
}
@@ -112,8 +113,8 @@ AttachThreadScoped::~AttachThreadScoped() {
if (attached_) {
ALOGD("Detaching thread from JVM%s", GetThreadInfo().c_str());
jint res = jvm_->DetachCurrentThread();
- CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
- CHECK(!GetEnv(jvm_));
+ RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+ RTC_CHECK(!GetEnv(jvm_));
}
}
diff --git a/chromium/third_party/webrtc/modules/utility/source/helpers_ios.mm b/chromium/third_party/webrtc/modules/utility/source/helpers_ios.mm
new file mode 100644
index 00000000000..d36253072d7
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/utility/source/helpers_ios.mm
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_IOS)
+
+#import <Foundation/Foundation.h>
+#import <sys/sysctl.h>
+#import <UIKit/UIKit.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/utility/interface/helpers_ios.h"
+
+namespace webrtc {
+namespace ios {
+
+// TODO(henrika): move to shared location.
+// See https://code.google.com/p/webrtc/issues/detail?id=4773 for details.
+NSString* NSStringFromStdString(const std::string& stdString) {
+ // std::string may contain null termination character so we construct
+ // using length.
+ return [[NSString alloc] initWithBytes:stdString.data()
+ length:stdString.length()
+ encoding:NSUTF8StringEncoding];
+}
+
+std::string StdStringFromNSString(NSString* nsString) {
+ NSData* charData = [nsString dataUsingEncoding:NSUTF8StringEncoding];
+ return std::string(reinterpret_cast<const char*>([charData bytes]),
+ [charData length]);
+}
+
+bool CheckAndLogError(BOOL success, NSError* error) {
+ if (!success) {
+ NSString* msg =
+ [NSString stringWithFormat:@"Error: %ld, %@, %@", (long)error.code,
+ error.localizedDescription,
+ error.localizedFailureReason];
+ LOG(LS_ERROR) << StdStringFromNSString(msg);
+ return false;
+ }
+ return true;
+}
+
+// TODO(henrika): see if it is possible to move to GetThreadName in
+// platform_thread.h and base it on pthread methods instead.
+std::string GetCurrentThreadDescription() {
+ NSString* name = [NSString stringWithFormat:@"%@", [NSThread currentThread]];
+ return StdStringFromNSString(name);
+}
+
+std::string GetSystemName() {
+ NSString* osName = [[UIDevice currentDevice] systemName];
+ return StdStringFromNSString(osName);
+}
+
+std::string GetSystemVersion() {
+ NSString* osVersion = [[UIDevice currentDevice] systemVersion];
+ return StdStringFromNSString(osVersion);
+}
+
+float GetSystemVersionAsFloat() {
+ NSString* osVersion = [[UIDevice currentDevice] systemVersion];
+ return osVersion.floatValue;
+}
+
+std::string GetDeviceType() {
+ NSString* deviceModel = [[UIDevice currentDevice] model];
+ return StdStringFromNSString(deviceModel);
+}
+
+std::string GetDeviceName() {
+ size_t size;
+ sysctlbyname("hw.machine", NULL, &size, NULL, 0);
+ rtc::scoped_ptr<char[]> machine;
+ machine.reset(new char[size]);
+ sysctlbyname("hw.machine", machine.get(), &size, NULL, 0);
+ std::string raw_name(machine.get());
+ if (!raw_name.compare("iPhone1,1"))
+ return std::string("iPhone 1G");
+ if (!raw_name.compare("iPhone1,2"))
+ return std::string("iPhone 3G");
+ if (!raw_name.compare("iPhone2,1"))
+ return std::string("iPhone 3GS");
+ if (!raw_name.compare("iPhone3,1"))
+ return std::string("iPhone 4");
+ if (!raw_name.compare("iPhone3,3"))
+ return std::string("Verizon iPhone 4");
+ if (!raw_name.compare("iPhone4,1"))
+ return std::string("iPhone 4S");
+ if (!raw_name.compare("iPhone5,1"))
+ return std::string("iPhone 5 (GSM)");
+ if (!raw_name.compare("iPhone5,2"))
+ return std::string("iPhone 5 (GSM+CDMA)");
+ if (!raw_name.compare("iPhone5,3"))
+ return std::string("iPhone 5c (GSM)");
+ if (!raw_name.compare("iPhone5,4"))
+ return std::string("iPhone 5c (GSM+CDMA)");
+ if (!raw_name.compare("iPhone6,1"))
+ return std::string("iPhone 5s (GSM)");
+ if (!raw_name.compare("iPhone6,2"))
+ return std::string("iPhone 5s (GSM+CDMA)");
+ if (!raw_name.compare("iPhone7,1"))
+ return std::string("iPhone 6 Plus");
+ if (!raw_name.compare("iPhone7,2"))
+ return std::string("iPhone 6");
+ if (!raw_name.compare("iPod1,1"))
+ return std::string("iPod Touch 1G");
+ if (!raw_name.compare("iPod2,1"))
+ return std::string("iPod Touch 2G");
+ if (!raw_name.compare("iPod3,1"))
+ return std::string("iPod Touch 3G");
+ if (!raw_name.compare("iPod4,1"))
+ return std::string("iPod Touch 4G");
+ if (!raw_name.compare("iPod5,1"))
+ return std::string("iPod Touch 5G");
+ if (!raw_name.compare("iPad1,1"))
+ return std::string("iPad");
+ if (!raw_name.compare("iPad2,1"))
+ return std::string("iPad 2 (WiFi)");
+ if (!raw_name.compare("iPad2,2"))
+ return std::string("iPad 2 (GSM)");
+ if (!raw_name.compare("iPad2,3"))
+ return std::string("iPad 2 (CDMA)");
+ if (!raw_name.compare("iPad2,4"))
+ return std::string("iPad 2 (WiFi)");
+ if (!raw_name.compare("iPad2,5"))
+ return std::string("iPad Mini (WiFi)");
+ if (!raw_name.compare("iPad2,6"))
+ return std::string("iPad Mini (GSM)");
+ if (!raw_name.compare("iPad2,7"))
+ return std::string("iPad Mini (GSM+CDMA)");
+ if (!raw_name.compare("iPad3,1"))
+ return std::string("iPad 3 (WiFi)");
+ if (!raw_name.compare("iPad3,2"))
+ return std::string("iPad 3 (GSM+CDMA)");
+ if (!raw_name.compare("iPad3,3"))
+ return std::string("iPad 3 (GSM)");
+ if (!raw_name.compare("iPad3,4"))
+ return std::string("iPad 4 (WiFi)");
+ if (!raw_name.compare("iPad3,5"))
+ return std::string("iPad 4 (GSM)");
+ if (!raw_name.compare("iPad3,6"))
+ return std::string("iPad 4 (GSM+CDMA)");
+ if (!raw_name.compare("iPad4,1"))
+ return std::string("iPad Air (WiFi)");
+ if (!raw_name.compare("iPad4,2"))
+ return std::string("iPad Air (Cellular)");
+ if (!raw_name.compare("iPad4,4"))
+ return std::string("iPad mini 2G (WiFi)");
+ if (!raw_name.compare("iPad4,5"))
+ return std::string("iPad mini 2G (Cellular)");
+ if (!raw_name.compare("i386"))
+ return std::string("Simulator");
+ if (!raw_name.compare("x86_64"))
+ return std::string("Simulator");
+ LOG(LS_WARNING) << "Failed to find device name";
+ return raw_name;
+}
+
+} // namespace ios
+} // namespace webrtc
+
+#endif // defined(WEBRTC_IOS)
diff --git a/chromium/third_party/webrtc/modules/utility/source/jvm_android.cc b/chromium/third_party/webrtc/modules/utility/source/jvm_android.cc
index 777b8d5fe71..648c1685eae 100644
--- a/chromium/third_party/webrtc/modules/utility/source/jvm_android.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/jvm_android.cc
@@ -41,10 +41,10 @@ void LoadClasses(JNIEnv* jni) {
for (auto& c : loaded_classes) {
jclass localRef = FindClass(jni, c.name);
CHECK_EXCEPTION(jni) << "Error during FindClass: " << c.name;
- CHECK(localRef) << c.name;
+ RTC_CHECK(localRef) << c.name;
jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
CHECK_EXCEPTION(jni) << "Error during NewGlobalRef: " << c.name;
- CHECK(globalRef) << c.name;
+ RTC_CHECK(globalRef) << c.name;
c.clazz = globalRef;
}
}
@@ -61,7 +61,7 @@ jclass LookUpClass(const char* name) {
if (strcmp(c.name, name) == 0)
return c.clazz;
}
- CHECK(false) << "Unable to find class in lookup table";
+ RTC_CHECK(false) << "Unable to find class in lookup table";
return 0;
}
@@ -70,7 +70,7 @@ AttachCurrentThreadIfNeeded::AttachCurrentThreadIfNeeded()
: attached_(false) {
ALOGD("AttachCurrentThreadIfNeeded::ctor%s", GetThreadInfo().c_str());
JavaVM* jvm = JVM::GetInstance()->jvm();
- CHECK(jvm);
+ RTC_CHECK(jvm);
JNIEnv* jni = GetEnv(jvm);
if (!jni) {
ALOGD("Attaching thread to JVM");
@@ -82,11 +82,11 @@ AttachCurrentThreadIfNeeded::AttachCurrentThreadIfNeeded()
AttachCurrentThreadIfNeeded::~AttachCurrentThreadIfNeeded() {
ALOGD("AttachCurrentThreadIfNeeded::dtor%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (attached_) {
ALOGD("Detaching thread from JVM");
jint res = JVM::GetInstance()->jvm()->DetachCurrentThread();
- CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+ RTC_CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
}
}
@@ -178,13 +178,13 @@ JNIEnvironment::JNIEnvironment(JNIEnv* jni) : jni_(jni) {
JNIEnvironment::~JNIEnvironment() {
ALOGD("JNIEnvironment::dtor%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
}
rtc::scoped_ptr<NativeRegistration> JNIEnvironment::RegisterNatives(
const char* name, const JNINativeMethod *methods, int num_methods) {
ALOGD("JNIEnvironment::RegisterNatives(%s)", name);
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
jclass clazz = LookUpClass(name);
jni_->RegisterNatives(clazz, methods, num_methods);
CHECK_EXCEPTION(jni_) << "Error during RegisterNatives";
@@ -193,7 +193,7 @@ rtc::scoped_ptr<NativeRegistration> JNIEnvironment::RegisterNatives(
}
std::string JNIEnvironment::JavaToStdString(const jstring& j_string) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
const char* jchars = jni_->GetStringUTFChars(j_string, nullptr);
CHECK_EXCEPTION(jni_);
const int size = jni_->GetStringUTFLength(j_string);
@@ -207,35 +207,35 @@ std::string JNIEnvironment::JavaToStdString(const jstring& j_string) {
// static
void JVM::Initialize(JavaVM* jvm, jobject context) {
ALOGD("JVM::Initialize%s", GetThreadInfo().c_str());
- CHECK(!g_jvm);
+ RTC_CHECK(!g_jvm);
g_jvm = new JVM(jvm, context);
}
// static
void JVM::Uninitialize() {
ALOGD("JVM::Uninitialize%s", GetThreadInfo().c_str());
- DCHECK(g_jvm);
+ RTC_DCHECK(g_jvm);
delete g_jvm;
g_jvm = nullptr;
}
// static
JVM* JVM::GetInstance() {
- DCHECK(g_jvm);
+ RTC_DCHECK(g_jvm);
return g_jvm;
}
JVM::JVM(JavaVM* jvm, jobject context)
: jvm_(jvm) {
ALOGD("JVM::JVM%s", GetThreadInfo().c_str());
- CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
+ RTC_CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
context_ = NewGlobalRef(jni(), context);
LoadClasses(jni());
}
JVM::~JVM() {
ALOGD("JVM::~JVM%s", GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
FreeClassReferences(jni());
DeleteGlobalRef(jni(), context_);
}
@@ -257,7 +257,7 @@ rtc::scoped_ptr<JNIEnvironment> JVM::environment() {
JavaClass JVM::GetClass(const char* name) {
ALOGD("JVM::GetClass(%s)%s", name, GetThreadInfo().c_str());
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
return JavaClass(jni(), LookUpClass(name));
}
diff --git a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.cc b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.cc
index 4ce1675030a..df56fe39be6 100644
--- a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.cc
@@ -25,12 +25,9 @@ const int64_t kCallProcessImmediately = -1;
int64_t GetNextCallbackTime(Module* module, int64_t time_now) {
int64_t interval = module->TimeUntilNextProcess();
- // Currently some implementations erroneously return error codes from
- // TimeUntilNextProcess(). So, as is, we correct that and log an error.
if (interval < 0) {
- LOG(LS_ERROR) << "TimeUntilNextProcess returned an invalid value "
- << interval;
- interval = 0;
+ // Falling behind, we should call the callback now.
+ return time_now;
}
return time_now + interval;
}
@@ -39,18 +36,21 @@ int64_t GetNextCallbackTime(Module* module, int64_t time_now) {
ProcessThread::~ProcessThread() {}
// static
-rtc::scoped_ptr<ProcessThread> ProcessThread::Create() {
- return rtc::scoped_ptr<ProcessThread>(new ProcessThreadImpl()).Pass();
+rtc::scoped_ptr<ProcessThread> ProcessThread::Create(
+ const char* thread_name) {
+ return rtc::scoped_ptr<ProcessThread>(new ProcessThreadImpl(thread_name))
+ .Pass();
}
-ProcessThreadImpl::ProcessThreadImpl()
- : wake_up_(EventWrapper::Create()), stop_(false) {
-}
+ProcessThreadImpl::ProcessThreadImpl(const char* thread_name)
+ : wake_up_(EventWrapper::Create()),
+ stop_(false),
+ thread_name_(thread_name) {}
ProcessThreadImpl::~ProcessThreadImpl() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!thread_.get());
- DCHECK(!stop_);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!thread_.get());
+ RTC_DCHECK(!stop_);
while (!queue_.empty()) {
delete queue_.front();
@@ -59,12 +59,12 @@ ProcessThreadImpl::~ProcessThreadImpl() {
}
void ProcessThreadImpl::Start() {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(!thread_.get());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(!thread_.get());
if (thread_.get())
return;
- DCHECK(!stop_);
+ RTC_DCHECK(!stop_);
{
// TODO(tommi): Since DeRegisterModule is currently being called from
@@ -76,13 +76,13 @@ void ProcessThreadImpl::Start() {
m.module->ProcessThreadAttached(this);
}
- thread_ = ThreadWrapper::CreateThread(
- &ProcessThreadImpl::Run, this, "ProcessThread");
- CHECK(thread_->Start());
+ thread_ = ThreadWrapper::CreateThread(&ProcessThreadImpl::Run, this,
+ thread_name_);
+ RTC_CHECK(thread_->Start());
}
void ProcessThreadImpl::Stop() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
if(!thread_.get())
return;
@@ -93,7 +93,7 @@ void ProcessThreadImpl::Stop() {
wake_up_->Set();
- CHECK(thread_->Stop());
+ RTC_CHECK(thread_->Stop());
stop_ = false;
// TODO(tommi): Since DeRegisterModule is currently being called from
@@ -130,15 +130,15 @@ void ProcessThreadImpl::PostTask(rtc::scoped_ptr<ProcessTask> task) {
}
void ProcessThreadImpl::RegisterModule(Module* module) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(module);
+ RTC_DCHECK(thread_checker_.CalledOnValidThread());
+ RTC_DCHECK(module);
#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
{
// Catch programmer error.
rtc::CritScope lock(&lock_);
for (const ModuleCallback& mc : modules_)
- DCHECK(mc.module != module);
+ RTC_DCHECK(mc.module != module);
}
#endif
@@ -162,7 +162,7 @@ void ProcessThreadImpl::RegisterModule(Module* module) {
void ProcessThreadImpl::DeRegisterModule(Module* module) {
// Allowed to be called on any thread.
// TODO(tommi): Disallow this ^^^
- DCHECK(module);
+ RTC_DCHECK(module);
{
rtc::CritScope lock(&lock_);
diff --git a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.h b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.h
index 1fd2bf3adc5..5101ea9ad00 100644
--- a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.h
+++ b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl.h
@@ -25,7 +25,7 @@ namespace webrtc {
class ProcessThreadImpl : public ProcessThread {
public:
- ProcessThreadImpl();
+ explicit ProcessThreadImpl(const char* thread_name);
~ProcessThreadImpl() override;
void Start() override;
@@ -76,6 +76,7 @@ class ProcessThreadImpl : public ProcessThread {
// TODO(tommi): Support delayed tasks.
std::queue<ProcessTask*> queue_;
bool stop_;
+ const char* thread_name_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl_unittest.cc b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl_unittest.cc
index cd1f956dd59..457a3693168 100644
--- a/chromium/third_party/webrtc/modules/utility/source/process_thread_impl_unittest.cc
+++ b/chromium/third_party/webrtc/modules/utility/source/process_thread_impl_unittest.cc
@@ -52,13 +52,13 @@ ACTION_P(SetTimestamp, ptr) {
}
TEST(ProcessThreadImpl, StartStop) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
thread.Start();
thread.Stop();
}
TEST(ProcessThreadImpl, MultipleStartStop) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
for (int i = 0; i < 5; ++i) {
thread.Start();
thread.Stop();
@@ -67,7 +67,7 @@ TEST(ProcessThreadImpl, MultipleStartStop) {
// Verifies that we get at least call back to Process() on the worker thread.
TEST(ProcessThreadImpl, ProcessCall) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
thread.Start();
rtc::scoped_ptr<EventWrapper> event(EventWrapper::Create());
@@ -89,7 +89,7 @@ TEST(ProcessThreadImpl, ProcessCall) {
// Same as ProcessCall except the module is registered before the
// call to Start().
TEST(ProcessThreadImpl, ProcessCall2) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
rtc::scoped_ptr<EventWrapper> event(EventWrapper::Create());
MockModule module;
@@ -111,7 +111,7 @@ TEST(ProcessThreadImpl, ProcessCall2) {
// Tests setting up a module for callbacks and then unregister that module.
// After unregistration, we should not receive any further callbacks.
TEST(ProcessThreadImpl, Deregister) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
rtc::scoped_ptr<EventWrapper> event(EventWrapper::Create());
int process_count = 0;
@@ -146,7 +146,7 @@ TEST(ProcessThreadImpl, Deregister) {
// time. There's some variance of timing built into it to reduce chance of
// flakiness on bots.
void ProcessCallAfterAFewMs(int64_t milliseconds) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
thread.Start();
rtc::scoped_ptr<EventWrapper> event(EventWrapper::Create());
@@ -211,7 +211,7 @@ TEST(ProcessThreadImpl, DISABLED_ProcessCallAfter200ms) {
// build bots.
// TODO(tommi): Fix.
TEST(ProcessThreadImpl, DISABLED_Process50Times) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
thread.Start();
rtc::scoped_ptr<EventWrapper> event(EventWrapper::Create());
@@ -244,7 +244,7 @@ TEST(ProcessThreadImpl, DISABLED_Process50Times) {
// Tests that we can wake up the worker thread to give us a callback right
// away when we know the thread is sleeping.
TEST(ProcessThreadImpl, WakeUp) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
thread.Start();
rtc::scoped_ptr<EventWrapper> started(EventWrapper::Create());
@@ -292,7 +292,7 @@ TEST(ProcessThreadImpl, WakeUp) {
// Tests that we can post a task that gets run straight away on the worker
// thread.
TEST(ProcessThreadImpl, PostTask) {
- ProcessThreadImpl thread;
+ ProcessThreadImpl thread("ProcessThread");
rtc::scoped_ptr<EventWrapper> task_ran(EventWrapper::Create());
rtc::scoped_ptr<RaiseEventTask> task(new RaiseEventTask(task_ran.get()));
thread.Start();
diff --git a/chromium/third_party/webrtc/modules/utility/utility.gypi b/chromium/third_party/webrtc/modules/utility/utility.gypi
index 1a203bf773e..38c9e3ebd96 100644
--- a/chromium/third_party/webrtc/modules/utility/utility.gypi
+++ b/chromium/third_party/webrtc/modules/utility/utility.gypi
@@ -22,6 +22,7 @@
'interface/file_player.h',
'interface/file_recorder.h',
'interface/helpers_android.h',
+ 'interface/helpers_ios.h',
'interface/jvm_android.h',
'interface/process_thread.h',
'source/audio_frame_operations.cc',
@@ -32,6 +33,7 @@
'source/file_recorder_impl.cc',
'source/file_recorder_impl.h',
'source/helpers_android.cc',
+ 'source/helpers_ios.mm',
'source/jvm_android.cc',
'source/process_thread_impl.cc',
'source/process_thread_impl.h',
diff --git a/chromium/third_party/webrtc/modules/video_capture/BUILD.gn b/chromium/third_party/webrtc/modules/video_capture/BUILD.gn
index f29e5b6c679..b0ed6f4e6ce 100644
--- a/chromium/third_party/webrtc/modules/video_capture/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/video_capture/BUILD.gn
@@ -130,21 +130,6 @@ if (!build_with_chromium) {
deps += [ "//third_party/winsdk_samples" ]
}
- if (is_android) {
- sources = [
- "android/device_info_android.cc",
- "android/device_info_android.h",
- "android/video_capture_android.cc",
- "android/video_capture_android.h",
- ]
-
- if (rtc_build_json) {
- deps += [ "//third_party/jsoncpp" ]
- }
- if (rtc_build_icu) {
- deps += [ "//third_party/icu:icuuc" ]
- }
- }
if (is_ios) {
sources = [
"ios/device_info_ios.h",
diff --git a/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.cc b/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.cc
deleted file mode 100644
index 02075d5e194..00000000000
--- a/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.cc
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_capture/android/device_info_android.h"
-
-#include <algorithm>
-#include <sstream>
-#include <vector>
-
-#include "json/json.h"
-#include "unicode/unistr.h"
-#include "webrtc/modules/video_capture/android/video_capture_android.h"
-#include "webrtc/system_wrappers/interface/logging.h"
-#include "webrtc/system_wrappers/interface/ref_count.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-namespace webrtc {
-
-namespace videocapturemodule {
-
-// Helper for storing lists of pairs of ints. Used e.g. for resolutions & FPS
-// ranges.
-typedef std::pair<int, int> IntPair;
-typedef std::vector<IntPair> IntPairs;
-
-static std::string IntPairsToString(const IntPairs& pairs, char separator) {
- std::stringstream stream;
- for (size_t i = 0; i < pairs.size(); ++i) {
- if (i > 0)
- stream << ", ";
- stream << "(" << pairs[i].first << separator << pairs[i].second << ")";
- }
- return stream.str();
-}
-
-struct AndroidCameraInfo {
- std::string name;
- bool front_facing;
- int orientation;
- IntPairs resolutions; // Pairs are: (width,height).
- // Pairs are (min,max) in units of FPS*1000 ("milli-frame-per-second").
- IntPairs mfpsRanges;
-
- std::string ToString() {
- std::stringstream stream;
- stream << "Name: [" << name << "], MFPS ranges: ["
- << IntPairsToString(mfpsRanges, ':')
- << "], front_facing: " << front_facing
- << ", orientation: " << orientation << ", resolutions: ["
- << IntPairsToString(resolutions, 'x') << "]";
- return stream.str();
- }
-};
-
-// Camera info; populated during DeviceInfoAndroid::Initialize() and immutable
-// thereafter.
-static std::vector<AndroidCameraInfo>* g_camera_info = NULL;
-
-// Set |*index| to the index of |name| in g_camera_info or return false if no
-// match found.
-static bool FindCameraIndexByName(const std::string& name, size_t* index) {
- for (size_t i = 0; i < g_camera_info->size(); ++i) {
- if (g_camera_info->at(i).name == name) {
- *index = i;
- return true;
- }
- }
- return false;
-}
-
-// Returns a pointer to the named member of g_camera_info, or NULL if no match
-// is found.
-static AndroidCameraInfo* FindCameraInfoByName(const std::string& name) {
- size_t index = 0;
- if (FindCameraIndexByName(name, &index))
- return &g_camera_info->at(index);
- return NULL;
-}
-
-// static
-void DeviceInfoAndroid::Initialize(JNIEnv* jni) {
- // TODO(henrike): this "if" would make a lot more sense as an assert, but
- // Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_GetVideoEngine() and
- // Java_org_webrtc_videoengineapp_ViEAndroidJavaAPI_Terminate() conspire to
- // prevent this. Once that code is made to only
- // VideoEngine::SetAndroidObjects() once per process, this can turn into an
- // assert.
- if (g_camera_info)
- return;
-
- g_camera_info = new std::vector<AndroidCameraInfo>();
- jclass j_info_class =
- jni->FindClass("org/webrtc/videoengine/VideoCaptureDeviceInfoAndroid");
- assert(j_info_class);
- jmethodID j_initialize = jni->GetStaticMethodID(
- j_info_class, "getDeviceInfo", "()Ljava/lang/String;");
- jstring j_json_info = static_cast<jstring>(
- jni->CallStaticObjectMethod(j_info_class, j_initialize));
-
- const jchar* jchars = jni->GetStringChars(j_json_info, NULL);
- icu::UnicodeString ustr(jchars, jni->GetStringLength(j_json_info));
- jni->ReleaseStringChars(j_json_info, jchars);
- std::string json_info;
- ustr.toUTF8String(json_info);
-
- Json::Value cameras;
- Json::Reader reader(Json::Features::strictMode());
- bool parsed = reader.parse(json_info, cameras);
- if (!parsed) {
- std::stringstream stream;
- stream << "Failed to parse configuration:\n"
- << reader.getFormattedErrorMessages();
- assert(false);
- return;
- }
- for (Json::ArrayIndex i = 0; i < cameras.size(); ++i) {
- const Json::Value& camera = cameras[i];
- AndroidCameraInfo info;
- info.name = camera["name"].asString();
- info.front_facing = camera["front_facing"].asBool();
- info.orientation = camera["orientation"].asInt();
- Json::Value sizes = camera["sizes"];
- for (Json::ArrayIndex j = 0; j < sizes.size(); ++j) {
- const Json::Value& size = sizes[j];
- info.resolutions.push_back(std::make_pair(
- size["width"].asInt(), size["height"].asInt()));
- }
- Json::Value mfpsRanges = camera["mfpsRanges"];
- for (Json::ArrayIndex j = 0; j < mfpsRanges.size(); ++j) {
- const Json::Value& mfpsRange = mfpsRanges[j];
- info.mfpsRanges.push_back(std::make_pair(mfpsRange["min_mfps"].asInt(),
- mfpsRange["max_mfps"].asInt()));
- }
- g_camera_info->push_back(info);
- }
-}
-
-void DeviceInfoAndroid::DeInitialize() {
- if (g_camera_info) {
- delete g_camera_info;
- g_camera_info = NULL;
- }
-}
-
-VideoCaptureModule::DeviceInfo* VideoCaptureImpl::CreateDeviceInfo(
- const int32_t id) {
- return new videocapturemodule::DeviceInfoAndroid(id);
-}
-
-DeviceInfoAndroid::DeviceInfoAndroid(const int32_t id) :
- DeviceInfoImpl(id) {
-}
-
-DeviceInfoAndroid::~DeviceInfoAndroid() {
-}
-
-bool DeviceInfoAndroid::FindCameraIndex(const char* deviceUniqueIdUTF8,
- size_t* index) {
- return FindCameraIndexByName(deviceUniqueIdUTF8, index);
-}
-
-int32_t DeviceInfoAndroid::Init() {
- return 0;
-}
-
-uint32_t DeviceInfoAndroid::NumberOfDevices() {
- return g_camera_info->size();
-}
-
-int32_t DeviceInfoAndroid::GetDeviceName(
- uint32_t deviceNumber,
- char* deviceNameUTF8,
- uint32_t deviceNameLength,
- char* deviceUniqueIdUTF8,
- uint32_t deviceUniqueIdUTF8Length,
- char* /*productUniqueIdUTF8*/,
- uint32_t /*productUniqueIdUTF8Length*/) {
- if (deviceNumber >= g_camera_info->size())
- return -1;
- const AndroidCameraInfo& info = g_camera_info->at(deviceNumber);
- if (info.name.length() + 1 > deviceNameLength ||
- info.name.length() + 1 > deviceUniqueIdUTF8Length) {
- return -1;
- }
- memcpy(deviceNameUTF8, info.name.c_str(), info.name.length() + 1);
- memcpy(deviceUniqueIdUTF8, info.name.c_str(), info.name.length() + 1);
- return 0;
-}
-
-int32_t DeviceInfoAndroid::CreateCapabilityMap(
- const char* deviceUniqueIdUTF8) {
- _captureCapabilities.clear();
- const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
- if (info == NULL)
- return -1;
-
- for (size_t i = 0; i < info->resolutions.size(); ++i) {
- for (size_t j = 0; j < info->mfpsRanges.size(); ++j) {
- const IntPair& size = info->resolutions[i];
- const IntPair& mfpsRange = info->mfpsRanges[j];
- VideoCaptureCapability cap;
- cap.width = size.first;
- cap.height = size.second;
- cap.maxFPS = mfpsRange.second / 1000;
- cap.expectedCaptureDelay = kExpectedCaptureDelay;
- cap.rawType = kVideoNV21;
- _captureCapabilities.push_back(cap);
- }
- }
- return _captureCapabilities.size();
-}
-
-int32_t DeviceInfoAndroid::GetOrientation(const char* deviceUniqueIdUTF8,
- VideoRotation& orientation) {
- const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
- if (info == NULL ||
- VideoCaptureImpl::RotationFromDegrees(info->orientation,
- &orientation) != 0) {
- return -1;
- }
- return 0;
-}
-
-void DeviceInfoAndroid::GetMFpsRange(const char* deviceUniqueIdUTF8,
- int max_fps_to_match,
- int* min_mfps, int* max_mfps) {
- const AndroidCameraInfo* info = FindCameraInfoByName(deviceUniqueIdUTF8);
- if (info == NULL)
- return;
- int desired_mfps = max_fps_to_match * 1000;
- int best_diff_mfps = 0;
- LOG(LS_INFO) << "Search for best target mfps " << desired_mfps;
- // Search for best fps range with preference shifted to constant fps modes.
- for (size_t i = 0; i < info->mfpsRanges.size(); ++i) {
- int diff_mfps = abs(info->mfpsRanges[i].first - desired_mfps) +
- abs(info->mfpsRanges[i].second - desired_mfps) +
- (info->mfpsRanges[i].second - info->mfpsRanges[i].first) / 2;
- LOG(LS_INFO) << "Fps range " << info->mfpsRanges[i].first << ":" <<
- info->mfpsRanges[i].second << ". Distance: " << diff_mfps;
- if (i == 0 || diff_mfps < best_diff_mfps) {
- best_diff_mfps = diff_mfps;
- *min_mfps = info->mfpsRanges[i].first;
- *max_mfps = info->mfpsRanges[i].second;
- }
- }
-}
-
-} // namespace videocapturemodule
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.h b/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.h
deleted file mode 100644
index 581312bcb31..00000000000
--- a/chromium/third_party/webrtc/modules/video_capture/android/device_info_android.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
-#define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
-
-#include <jni.h>
-
-#include "webrtc/modules/video_capture/device_info_impl.h"
-#include "webrtc/modules/video_capture/video_capture_impl.h"
-
-namespace webrtc
-{
-namespace videocapturemodule
-{
-
-class DeviceInfoAndroid : public DeviceInfoImpl {
- public:
- static void Initialize(JNIEnv* env);
- static void DeInitialize();
-
- DeviceInfoAndroid(int32_t id);
- virtual ~DeviceInfoAndroid();
-
- // Set |*index| to the index of the camera matching |deviceUniqueIdUTF8|, or
- // return false if no match.
- bool FindCameraIndex(const char* deviceUniqueIdUTF8, size_t* index);
-
- virtual int32_t Init();
- virtual uint32_t NumberOfDevices();
- virtual int32_t GetDeviceName(
- uint32_t deviceNumber,
- char* deviceNameUTF8,
- uint32_t deviceNameLength,
- char* deviceUniqueIdUTF8,
- uint32_t deviceUniqueIdUTF8Length,
- char* productUniqueIdUTF8 = 0,
- uint32_t productUniqueIdUTF8Length = 0);
- virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8);
-
- virtual int32_t DisplayCaptureSettingsDialogBox(
- const char* /*deviceUniqueIdUTF8*/,
- const char* /*dialogTitleUTF8*/,
- void* /*parentWindow*/,
- uint32_t /*positionX*/,
- uint32_t /*positionY*/) { return -1; }
- virtual int32_t GetOrientation(const char* deviceUniqueIdUTF8,
- VideoRotation& orientation);
-
- // Populate |min_mfps| and |max_mfps| with the closest supported range of the
- // device to |max_fps_to_match|.
- void GetMFpsRange(const char* deviceUniqueIdUTF8,
- int max_fps_to_match,
- int* min_mfps,
- int* max_mfps);
-
- private:
- enum { kExpectedCaptureDelay = 190};
-};
-
-} // namespace videocapturemodule
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_DEVICE_INFO_ANDROID_H_
diff --git a/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.cc b/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.cc
deleted file mode 100644
index 272cec42f9f..00000000000
--- a/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.cc
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/video_capture/android/video_capture_android.h"
-
-#include "webrtc/base/common.h"
-#include "webrtc/modules/utility/interface/helpers_android.h"
-#include "webrtc/modules/video_capture/android/device_info_android.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/logcat_trace_context.h"
-#include "webrtc/system_wrappers/interface/logging.h"
-#include "webrtc/system_wrappers/interface/ref_count.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-static JavaVM* g_jvm = NULL;
-static jclass g_java_capturer_class = NULL; // VideoCaptureAndroid.class.
-static jobject g_context = NULL; // Owned android.content.Context.
-
-namespace webrtc {
-
-// Called by Java to get the global application context.
-jobject JNICALL GetContext(JNIEnv* env, jclass) {
- assert(g_context);
- return g_context;
-}
-
-// Called by Java when the camera has a new frame to deliver.
-void JNICALL ProvideCameraFrame(
- JNIEnv* env,
- jobject,
- jbyteArray javaCameraFrame,
- jint length,
- jint rotation,
- jlong timeStamp,
- jlong context) {
- webrtc::videocapturemodule::VideoCaptureAndroid* captureModule =
- reinterpret_cast<webrtc::videocapturemodule::VideoCaptureAndroid*>(
- context);
- jbyte* cameraFrame = env->GetByteArrayElements(javaCameraFrame, NULL);
- captureModule->OnIncomingFrame(
- reinterpret_cast<uint8_t*>(cameraFrame), length, rotation, 0);
- env->ReleaseByteArrayElements(javaCameraFrame, cameraFrame, JNI_ABORT);
-}
-
-int32_t SetCaptureAndroidVM(JavaVM* javaVM, jobject context) {
- if (javaVM) {
- assert(!g_jvm);
- g_jvm = javaVM;
- AttachThreadScoped ats(g_jvm);
- g_context = ats.env()->NewGlobalRef(context);
-
- videocapturemodule::DeviceInfoAndroid::Initialize(ats.env());
-
- jclass j_capture_class =
- ats.env()->FindClass("org/webrtc/videoengine/VideoCaptureAndroid");
- assert(j_capture_class);
- g_java_capturer_class =
- reinterpret_cast<jclass>(ats.env()->NewGlobalRef(j_capture_class));
- assert(g_java_capturer_class);
-
- JNINativeMethod native_methods[] = {
- {"GetContext",
- "()Landroid/content/Context;",
- reinterpret_cast<void*>(&GetContext)},
- {"ProvideCameraFrame",
- "([BIIJJ)V",
- reinterpret_cast<void*>(&ProvideCameraFrame)}};
- if (ats.env()->RegisterNatives(g_java_capturer_class,
- native_methods, 2) != 0)
- assert(false);
- } else {
- if (g_jvm) {
- AttachThreadScoped ats(g_jvm);
- ats.env()->UnregisterNatives(g_java_capturer_class);
- ats.env()->DeleteGlobalRef(g_java_capturer_class);
- g_java_capturer_class = NULL;
- ats.env()->DeleteGlobalRef(g_context);
- g_context = NULL;
- videocapturemodule::DeviceInfoAndroid::DeInitialize();
- g_jvm = NULL;
- }
- }
-
- return 0;
-}
-
-namespace videocapturemodule {
-
-VideoCaptureModule* VideoCaptureImpl::Create(
- const int32_t id,
- const char* deviceUniqueIdUTF8) {
- RefCountImpl<videocapturemodule::VideoCaptureAndroid>* implementation =
- new RefCountImpl<videocapturemodule::VideoCaptureAndroid>(id);
- if (implementation->Init(id, deviceUniqueIdUTF8) != 0) {
- delete implementation;
- implementation = NULL;
- }
- return implementation;
-}
-
-int32_t VideoCaptureAndroid::OnIncomingFrame(uint8_t* videoFrame,
- size_t videoFrameLength,
- int32_t degrees,
- int64_t captureTime) {
- if (!_captureStarted)
- return 0;
- VideoRotation current_rotation =
- (degrees <= 45 || degrees > 315) ? kVideoRotation_0 :
- (degrees > 45 && degrees <= 135) ? kVideoRotation_90 :
- (degrees > 135 && degrees <= 225) ? kVideoRotation_180 :
- (degrees > 225 && degrees <= 315) ? kVideoRotation_270 :
- kVideoRotation_0; // Impossible.
- if (_rotation != current_rotation) {
- LOG(LS_INFO) << "New camera rotation: " << degrees;
- _rotation = current_rotation;
- int32_t status = VideoCaptureImpl::SetCaptureRotation(_rotation);
- if (status != 0)
- return status;
- }
- return IncomingFrame(
- videoFrame, videoFrameLength, _captureCapability, captureTime);
-}
-
-VideoCaptureAndroid::VideoCaptureAndroid(const int32_t id)
- : VideoCaptureImpl(id),
- _deviceInfo(id),
- _jCapturer(NULL),
- _captureStarted(false) {
-}
-
-int32_t VideoCaptureAndroid::Init(const int32_t id,
- const char* deviceUniqueIdUTF8) {
- const int nameLength = strlen(deviceUniqueIdUTF8);
- if (nameLength >= kVideoCaptureUniqueNameLength)
- return -1;
-
- // Store the device name
- LOG(LS_INFO) << "VideoCaptureAndroid::Init: " << deviceUniqueIdUTF8;
- size_t camera_id = 0;
- if (!_deviceInfo.FindCameraIndex(deviceUniqueIdUTF8, &camera_id))
- return -1;
- _deviceUniqueId = new char[nameLength + 1];
- memcpy(_deviceUniqueId, deviceUniqueIdUTF8, nameLength + 1);
-
- AttachThreadScoped ats(g_jvm);
- JNIEnv* env = ats.env();
- jmethodID ctor = env->GetMethodID(g_java_capturer_class, "<init>", "(IJ)V");
- assert(ctor);
- jlong j_this = reinterpret_cast<intptr_t>(this);
- _jCapturer = env->NewGlobalRef(
- env->NewObject(g_java_capturer_class, ctor, camera_id, j_this));
- assert(_jCapturer);
- _rotation = kVideoRotation_0;
- return 0;
-}
-
-VideoCaptureAndroid::~VideoCaptureAndroid() {
- // Ensure Java camera is released even if our caller didn't explicitly Stop.
- if (_captureStarted)
- StopCapture();
- AttachThreadScoped ats(g_jvm);
- ats.env()->DeleteGlobalRef(_jCapturer);
-}
-
-int32_t VideoCaptureAndroid::StartCapture(
- const VideoCaptureCapability& capability) {
- CriticalSectionScoped cs(&_apiCs);
- AttachThreadScoped ats(g_jvm);
- JNIEnv* env = ats.env();
-
- if (_deviceInfo.GetBestMatchedCapability(
- _deviceUniqueId, capability, _captureCapability) < 0) {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, -1,
- "%s: GetBestMatchedCapability failed: %dx%d",
- __FUNCTION__, capability.width, capability.height);
- return -1;
- }
-
- _captureDelay = _captureCapability.expectedCaptureDelay;
-
- jmethodID j_start =
- env->GetMethodID(g_java_capturer_class, "startCapture", "(IIII)Z");
- assert(j_start);
- int min_mfps = 0;
- int max_mfps = 0;
- _deviceInfo.GetMFpsRange(_deviceUniqueId, _captureCapability.maxFPS,
- &min_mfps, &max_mfps);
- bool started = env->CallBooleanMethod(_jCapturer, j_start,
- _captureCapability.width,
- _captureCapability.height,
- min_mfps, max_mfps);
- if (started) {
- _requestedCapability = capability;
- _captureStarted = true;
- }
- return started ? 0 : -1;
-}
-
-int32_t VideoCaptureAndroid::StopCapture() {
- _apiCs.Enter();
- AttachThreadScoped ats(g_jvm);
- JNIEnv* env = ats.env();
-
- memset(&_requestedCapability, 0, sizeof(_requestedCapability));
- memset(&_captureCapability, 0, sizeof(_captureCapability));
- _captureStarted = false;
- // Exit critical section to avoid blocking camera thread inside
- // onIncomingFrame() call.
- _apiCs.Leave();
-
- jmethodID j_stop =
- env->GetMethodID(g_java_capturer_class, "stopCapture", "()Z");
- return env->CallBooleanMethod(_jCapturer, j_stop) ? 0 : -1;
-}
-
-bool VideoCaptureAndroid::CaptureStarted() {
- CriticalSectionScoped cs(&_apiCs);
- return _captureStarted;
-}
-
-int32_t VideoCaptureAndroid::CaptureSettings(
- VideoCaptureCapability& settings) {
- CriticalSectionScoped cs(&_apiCs);
- settings = _requestedCapability;
- return 0;
-}
-
-int32_t VideoCaptureAndroid::SetCaptureRotation(VideoRotation rotation) {
- int32_t status = VideoCaptureImpl::SetCaptureRotation(rotation);
- if (status != 0)
- return status;
-
- AttachThreadScoped ats(g_jvm);
- JNIEnv* env = ats.env();
-
- jmethodID j_spr =
- env->GetMethodID(g_java_capturer_class, "setPreviewRotation", "(I)V");
- assert(j_spr);
- int rotation_degrees;
- if (RotationInDegrees(rotation, &rotation_degrees) != 0) {
- assert(false);
- }
- env->CallVoidMethod(_jCapturer, j_spr, rotation_degrees);
- return 0;
-}
-
-} // namespace videocapturemodule
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.h b/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.h
deleted file mode 100644
index 8c1e7d3c8ba..00000000000
--- a/chromium/third_party/webrtc/modules/video_capture/android/video_capture_android.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
-#define WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
-
-#include <jni.h>
-
-#include "webrtc/modules/video_capture/android/device_info_android.h"
-#include "webrtc/modules/video_capture/video_capture_impl.h"
-
-namespace webrtc {
-namespace videocapturemodule {
-
-class VideoCaptureAndroid : public VideoCaptureImpl {
- public:
- VideoCaptureAndroid(const int32_t id);
- virtual int32_t Init(const int32_t id, const char* deviceUniqueIdUTF8);
-
- virtual int32_t StartCapture(const VideoCaptureCapability& capability);
- virtual int32_t StopCapture();
- virtual bool CaptureStarted();
- virtual int32_t CaptureSettings(VideoCaptureCapability& settings);
- virtual int32_t SetCaptureRotation(VideoRotation rotation);
-
- int32_t OnIncomingFrame(uint8_t* videoFrame,
- size_t videoFrameLength,
- int32_t degrees,
- int64_t captureTime = 0);
-
- protected:
- virtual ~VideoCaptureAndroid();
-
- DeviceInfoAndroid _deviceInfo;
- jobject _jCapturer; // Global ref to Java VideoCaptureAndroid object.
- VideoCaptureCapability _captureCapability;
- VideoRotation _rotation;
- bool _captureStarted;
-};
-
-} // namespace videocapturemodule
-} // namespace webrtc
-#endif // WEBRTC_MODULES_VIDEO_CAPTURE_MAIN_SOURCE_ANDROID_VIDEO_CAPTURE_ANDROID_H_
diff --git a/chromium/third_party/webrtc/modules/video_capture/ensure_initialized.cc b/chromium/third_party/webrtc/modules/video_capture/ensure_initialized.cc
deleted file mode 100644
index 9d43d9f1bdf..00000000000
--- a/chromium/third_party/webrtc/modules/video_capture/ensure_initialized.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// Platform-specific initialization bits, if any, go here.
-
-#ifndef ANDROID
-
-namespace webrtc {
-namespace videocapturemodule {
-void EnsureInitialized() {}
-} // namespace videocapturemodule
-} // namespace webrtc
-
-#else
-
-#include <pthread.h>
-
-#include "base/android/jni_android.h"
-#include "webrtc/base/checks.h"
-#include "webrtc/modules/video_capture/video_capture_internal.h"
-
-namespace webrtc {
-namespace videocapturemodule {
-
-static pthread_once_t g_initialize_once = PTHREAD_ONCE_INIT;
-
-void EnsureInitializedOnce() {
- JNIEnv* jni = ::base::android::AttachCurrentThread();
- jobject context = ::base::android::GetApplicationContext();
- JavaVM* jvm = NULL;
- CHECK_EQ(0, jni->GetJavaVM(&jvm));
- CHECK_EQ(0, webrtc::SetCaptureAndroidVM(jvm, context));
-}
-
-void EnsureInitialized() {
- CHECK_EQ(0, pthread_once(&g_initialize_once, &EnsureInitializedOnce));
-}
-
-} // namespace videocapturemodule
-} // namespace webrtc
-
-#endif // !ANDROID
diff --git a/chromium/third_party/webrtc/modules/video_capture/linux/device_info_linux.cc b/chromium/third_party/webrtc/modules/video_capture/linux/device_info_linux.cc
index aac85d1d794..10c0981e34a 100644
--- a/chromium/third_party/webrtc/modules/video_capture/linux/device_info_linux.cc
+++ b/chromium/third_party/webrtc/modules/video_capture/linux/device_info_linux.cc
@@ -257,11 +257,12 @@ int32_t DeviceInfoLinux::FillCapabilities(int fd)
video_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
video_fmt.fmt.pix.sizeimage = 0;
- int totalFmts = 3;
+ int totalFmts = 4;
unsigned int videoFormats[] = {
V4L2_PIX_FMT_MJPEG,
V4L2_PIX_FMT_YUV420,
- V4L2_PIX_FMT_YUYV };
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_UYVY };
int sizes = 13;
unsigned int size[][2] = { { 128, 96 }, { 160, 120 }, { 176, 144 },
@@ -300,6 +301,10 @@ int32_t DeviceInfoLinux::FillCapabilities(int fd)
{
cap.rawType = kVideoMJPEG;
}
+ else if (videoFormats[fmts] == V4L2_PIX_FMT_UYVY)
+ {
+ cap.rawType = kVideoUYVY;
+ }
// get fps of current camera mode
// V4l2 does not have a stable method of knowing so we just guess.
diff --git a/chromium/third_party/webrtc/modules/video_capture/test/video_capture_unittest.cc b/chromium/third_party/webrtc/modules/video_capture/test/video_capture_unittest.cc
index 6d9b112bc55..81380f438c5 100644
--- a/chromium/third_party/webrtc/modules/video_capture/test/video_capture_unittest.cc
+++ b/chromium/third_party/webrtc/modules/video_capture/test/video_capture_unittest.cc
@@ -18,7 +18,6 @@
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/utility/interface/process_thread.h"
-#include "webrtc/modules/video_capture/ensure_initialized.h"
#include "webrtc/modules/video_capture/include/video_capture.h"
#include "webrtc/modules/video_capture/include/video_capture_factory.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
@@ -235,7 +234,6 @@ class VideoCaptureTest : public testing::Test {
VideoCaptureTest() : number_of_devices_(0) {}
void SetUp() {
- webrtc::videocapturemodule::EnsureInitialized();
device_info_.reset(VideoCaptureFactory::CreateDeviceInfo(0));
assert(device_info_.get());
number_of_devices_ = device_info_->NumberOfDevices();
@@ -429,7 +427,7 @@ class VideoCaptureExternalTest : public testing::Test {
public:
void SetUp() {
capture_module_ = VideoCaptureFactory::Create(0, capture_input_interface_);
- process_module_ = webrtc::ProcessThread::Create();
+ process_module_ = webrtc::ProcessThread::Create("ProcessThread");
process_module_->Start();
process_module_->RegisterModule(capture_module_);
diff --git a/chromium/third_party/webrtc/modules/video_capture/video_capture.gypi b/chromium/third_party/webrtc/modules/video_capture/video_capture.gypi
index 9163c1cd254..f552df7758d 100644
--- a/chromium/third_party/webrtc/modules/video_capture/video_capture.gypi
+++ b/chromium/third_party/webrtc/modules/video_capture/video_capture.gypi
@@ -17,7 +17,6 @@
'type': 'static_library',
'dependencies': [
'webrtc_utility',
- '<(webrtc_root)/common.gyp:webrtc_common',
'<(webrtc_root)/common_video/common_video.gyp:common_video',
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
],
@@ -54,11 +53,13 @@
{
'target_name': 'video_capture_module_internal_impl',
'type': 'static_library',
- 'dependencies': [
- 'video_capture_module',
- '<(webrtc_root)/common.gyp:webrtc_common',
- ],
'conditions': [
+ ['OS!="android"', {
+ 'dependencies': [
+ 'video_capture_module',
+ '<(webrtc_root)/common.gyp:webrtc_common',
+ ],
+ }],
['OS=="linux"', {
'sources': [
'linux/device_info_linux.cc',
@@ -115,26 +116,6 @@
],
},
}], # win
- ['OS=="android"', {
- 'sources': [
- 'android/device_info_android.cc',
- 'android/device_info_android.h',
- 'android/video_capture_android.cc',
- 'android/video_capture_android.h',
- ],
- 'conditions': [
- ['build_json==1', {
- 'dependencies': [
- '<(DEPTH)/third_party/jsoncpp/jsoncpp.gyp:jsoncpp',
- ],
- }],
- ['build_icu==1', {
- 'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
- ],
- }],
- ],
- }], # android
['OS=="ios"', {
'sources': [
'ios/device_info_ios.h',
@@ -164,7 +145,7 @@
},
],
}], # build_with_chromium==0
- ['include_tests==1', {
+ ['include_tests==1 and OS!="android"', {
'targets': [
{
'target_name': 'video_capture_tests',
@@ -177,8 +158,6 @@
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'sources': [
- 'ensure_initialized.cc',
- 'ensure_initialized.h',
'test/video_capture_unittest.cc',
'test/video_capture_main_mac.mm',
],
@@ -198,18 +177,6 @@
'-lX11',
],
}],
- ['OS=="android"', {
- 'dependencies': [
- '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',
- ],
- # Need to disable error due to the line in
- # base/android/jni_android.h triggering it:
- # const BASE_EXPORT jobject GetApplicationContext()
- # error: type qualifiers ignored on function return type
- 'cflags': [
- '-Wno-ignored-qualifiers',
- ],
- }],
['OS=="mac"', {
'dependencies': [
# Link with a special main for mac so we can use the webcam.
@@ -231,36 +198,6 @@
] # conditions
},
], # targets
- 'conditions': [
- ['OS=="android"', {
- 'targets': [
- {
- 'target_name': 'video_capture_tests_apk_target',
- 'type': 'none',
- 'dependencies': [
- '<(apk_tests_path):video_capture_tests_apk',
- ],
- },
- ],
- }],
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'video_capture_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'video_capture_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- 'video_capture_tests.isolate',
- ],
- },
- ],
- }],
- ],
}],
],
}
diff --git a/chromium/third_party/webrtc/modules/video_capture/video_capture_factory.cc b/chromium/third_party/webrtc/modules/video_capture/video_capture_factory.cc
index 5b44a6c706c..f88f916ba47 100644
--- a/chromium/third_party/webrtc/modules/video_capture/video_capture_factory.cc
+++ b/chromium/third_party/webrtc/modules/video_capture/video_capture_factory.cc
@@ -17,7 +17,11 @@ namespace webrtc
VideoCaptureModule* VideoCaptureFactory::Create(const int32_t id,
const char* deviceUniqueIdUTF8) {
+#if defined(ANDROID)
+ return nullptr;
+#else
return videocapturemodule::VideoCaptureImpl::Create(id, deviceUniqueIdUTF8);
+#endif
}
VideoCaptureModule* VideoCaptureFactory::Create(const int32_t id,
@@ -27,7 +31,11 @@ VideoCaptureModule* VideoCaptureFactory::Create(const int32_t id,
VideoCaptureModule::DeviceInfo* VideoCaptureFactory::CreateDeviceInfo(
const int32_t id) {
+#if defined(ANDROID)
+ return nullptr;
+#else
return videocapturemodule::VideoCaptureImpl::CreateDeviceInfo(id);
+#endif
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264.cc
index d4123a2e777..645ed2cad7a 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264.cc
@@ -36,7 +36,7 @@ bool IsH264CodecSupported() {
}
H264Encoder* H264Encoder::Create() {
- DCHECK(H264Encoder::IsSupported());
+ RTC_DCHECK(H264Encoder::IsSupported());
#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
return new H264VideoToolboxEncoder();
#else
@@ -50,7 +50,7 @@ bool H264Encoder::IsSupported() {
}
H264Decoder* H264Decoder::Create() {
- DCHECK(H264Decoder::IsSupported());
+ RTC_DCHECK(H264Decoder::IsSupported());
#if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
return new H264VideoToolboxDecoder();
#else
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
index e905fd0199c..36646a98771 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.cc
@@ -47,9 +47,9 @@ struct FrameDecodeParams {
// instead once the pipeline supports it.
rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
CVPixelBufferRef pixel_buffer) {
- DCHECK(pixel_buffer);
- DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
- kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ RTC_DCHECK(pixel_buffer);
+ RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
size_t width = CVPixelBufferGetWidthOfPlane(pixel_buffer, 0);
size_t height = CVPixelBufferGetHeightOfPlane(pixel_buffer, 0);
// TODO(tkchin): Use a frame buffer pool.
@@ -64,9 +64,9 @@ rtc::scoped_refptr<webrtc::VideoFrameBuffer> VideoFrameBufferForPixelBuffer(
int src_uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
int ret = libyuv::NV12ToI420(
src_y, src_y_stride, src_uv, src_uv_stride,
- buffer->data(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
- buffer->data(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
- buffer->data(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
+ buffer->MutableData(webrtc::kYPlane), buffer->stride(webrtc::kYPlane),
+ buffer->MutableData(webrtc::kUPlane), buffer->stride(webrtc::kUPlane),
+ buffer->MutableData(webrtc::kVPlane), buffer->stride(webrtc::kVPlane),
width, height);
CVPixelBufferUnlockBaseAddress(pixel_buffer, kCVPixelBufferLock_ReadOnly);
if (ret) {
@@ -125,7 +125,7 @@ int H264VideoToolboxDecoder::Decode(
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
- DCHECK(input_image._buffer);
+ RTC_DCHECK(input_image._buffer);
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
@@ -134,7 +134,7 @@ int H264VideoToolboxDecoder::Decode(
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
- DCHECK(sample_buffer);
+ RTC_DCHECK(sample_buffer);
// Check if the video format has changed, and reinitialize decoder if needed.
CMVideoFormatDescriptionRef description =
CMSampleBufferGetFormatDescription(sample_buffer);
@@ -160,7 +160,7 @@ int H264VideoToolboxDecoder::Decode(
int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
- DCHECK(!callback_);
+ RTC_DCHECK(!callback_);
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -238,7 +238,7 @@ int H264VideoToolboxDecoder::ResetDecompressionSession() {
}
void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
- DCHECK(decompression_session_);
+ RTC_DCHECK(decompression_session_);
#if defined(WEBRTC_IOS)
VTSessionSetProperty(decompression_session_,
kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index 3dfd6cf4386..fec32261b76 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -35,7 +35,7 @@ inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
// Copies characters from a CFStringRef into a std::string.
std::string CFStringToString(const CFStringRef cf_string) {
- DCHECK(cf_string);
+ RTC_DCHECK(cf_string);
std::string std_string;
// Get the size needed for UTF8 plus terminating character.
size_t buffer_size =
@@ -123,13 +123,13 @@ struct FrameEncodeParams {
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
CVPixelBufferRef pixel_buffer) {
- DCHECK(pixel_buffer);
- DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
- kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
- DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
- static_cast<size_t>(frame.height()));
- DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
- static_cast<size_t>(frame.width()));
+ RTC_DCHECK(pixel_buffer);
+ RTC_DCHECK(CVPixelBufferGetPixelFormatType(pixel_buffer) ==
+ kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
+ RTC_DCHECK(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.height()));
+ RTC_DCHECK(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0) ==
+ static_cast<size_t>(frame.width()));
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
if (cvRet != kCVReturnSuccess) {
@@ -224,8 +224,8 @@ H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) {
- DCHECK(codec_settings);
- DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
+ RTC_DCHECK(codec_settings);
+ RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
// TODO(tkchin): We may need to enforce width/height dimension restrictions
// to match what the encoder supports.
width_ = codec_settings->width;
@@ -266,7 +266,7 @@ int H264VideoToolboxEncoder::Encode(
// that the pool is empty.
return WEBRTC_VIDEO_CODEC_ERROR;
}
- DCHECK(pixel_buffer);
+ RTC_DCHECK(pixel_buffer);
if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) {
LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixel_buffer);
@@ -397,7 +397,7 @@ int H264VideoToolboxEncoder::ResetCompressionSession() {
}
void H264VideoToolboxEncoder::ConfigureCompressionSession() {
- DCHECK(compression_session_);
+ RTC_DCHECK(compression_session_);
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_RealTime, true);
internal::SetVTSessionProperty(compression_session_,
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
index 7d595a88ee3..43a7de0458a 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_nalu.cc
@@ -29,8 +29,8 @@ bool H264CMSampleBufferToAnnexBBuffer(
bool is_keyframe,
rtc::Buffer* annexb_buffer,
webrtc::RTPFragmentationHeader** out_header) {
- DCHECK(avcc_sample_buffer);
- DCHECK(out_header);
+ RTC_DCHECK(avcc_sample_buffer);
+ RTC_DCHECK(out_header);
*out_header = nullptr;
// Get format description from the sample buffer.
@@ -51,8 +51,8 @@ bool H264CMSampleBufferToAnnexBBuffer(
return false;
}
// TODO(tkchin): handle other potential sizes.
- DCHECK_EQ(nalu_header_size, 4);
- DCHECK_EQ(param_set_count, 2u);
+ RTC_DCHECK_EQ(nalu_header_size, 4);
+ RTC_DCHECK_EQ(param_set_count, 2u);
// Truncate any previous data in the buffer without changing its capacity.
annexb_buffer->SetSize(0);
@@ -122,7 +122,7 @@ bool H264CMSampleBufferToAnnexBBuffer(
// The size type here must match |nalu_header_size|, we expect 4 bytes.
// Read the length of the next packet of data. Must convert from big endian
// to host endian.
- DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
+ RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);
uint32_t* uint32_data_ptr = reinterpret_cast<uint32*>(data_ptr);
uint32_t packet_size = CFSwapInt32BigToHost(*uint32_data_ptr);
// Update buffer.
@@ -137,12 +137,12 @@ bool H264CMSampleBufferToAnnexBBuffer(
bytes_remaining -= bytes_written;
data_ptr += bytes_written;
}
- DCHECK_EQ(bytes_remaining, (size_t)0);
+ RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
rtc::scoped_ptr<webrtc::RTPFragmentationHeader> header;
header.reset(new webrtc::RTPFragmentationHeader());
header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
- DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
+ RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
for (size_t i = 0; i < frag_offsets.size(); ++i) {
header->fragmentationOffset[i] = frag_offsets[i];
header->fragmentationLength[i] = frag_lengths[i];
@@ -159,8 +159,8 @@ bool H264AnnexBBufferToCMSampleBuffer(
size_t annexb_buffer_size,
CMVideoFormatDescriptionRef video_format,
CMSampleBufferRef* out_sample_buffer) {
- DCHECK(annexb_buffer);
- DCHECK(out_sample_buffer);
+ RTC_DCHECK(annexb_buffer);
+ RTC_DCHECK(out_sample_buffer);
*out_sample_buffer = nullptr;
// The buffer we receive via RTP has 00 00 00 01 start code artifically
@@ -193,7 +193,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
return false;
}
} else {
- DCHECK(video_format);
+ RTC_DCHECK(video_format);
description = video_format;
// We don't need to retain, but it makes logic easier since we are creating
// in the other block.
@@ -241,7 +241,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
CFRelease(contiguous_buffer);
return false;
}
- DCHECK(block_buffer_size == reader.BytesRemaining());
+ RTC_DCHECK(block_buffer_size == reader.BytesRemaining());
// Write Avcc NALUs into block buffer memory.
AvccBufferWriter writer(reinterpret_cast<uint8_t*>(data_ptr),
@@ -272,7 +272,7 @@ bool H264AnnexBBufferToCMSampleBuffer(
AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
size_t length)
: start_(annexb_buffer), offset_(0), next_offset_(0), length_(length) {
- DCHECK(annexb_buffer);
+ RTC_DCHECK(annexb_buffer);
offset_ = FindNextNaluHeader(start_, length_, 0);
next_offset_ =
FindNextNaluHeader(start_, length_, offset_ + sizeof(kAnnexBHeaderBytes));
@@ -280,8 +280,8 @@ AnnexBBufferReader::AnnexBBufferReader(const uint8_t* annexb_buffer,
bool AnnexBBufferReader::ReadNalu(const uint8_t** out_nalu,
size_t* out_length) {
- DCHECK(out_nalu);
- DCHECK(out_length);
+ RTC_DCHECK(out_nalu);
+ RTC_DCHECK(out_length);
*out_nalu = nullptr;
*out_length = 0;
@@ -304,7 +304,7 @@ size_t AnnexBBufferReader::BytesRemaining() const {
size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
size_t length,
size_t offset) const {
- DCHECK(start);
+ RTC_DCHECK(start);
if (offset + sizeof(kAnnexBHeaderBytes) > length) {
return length;
}
@@ -329,7 +329,7 @@ size_t AnnexBBufferReader::FindNextNaluHeader(const uint8_t* start,
AvccBufferWriter::AvccBufferWriter(uint8_t* const avcc_buffer, size_t length)
: start_(avcc_buffer), offset_(0), length_(length) {
- DCHECK(avcc_buffer);
+ RTC_DCHECK(avcc_buffer);
}
bool AvccBufferWriter::WriteNalu(const uint8_t* data, size_t data_size) {
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h b/chromium/third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
index 6acd2d43201..6363ab7332b 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/interface/video_codec_interface.h
@@ -43,16 +43,31 @@ struct CodecSpecificInfoVP8 {
};
struct CodecSpecificInfoVP9 {
- bool hasReceivedSLI;
- uint8_t pictureIdSLI;
- bool hasReceivedRPSI;
- uint64_t pictureIdRPSI;
- int16_t pictureId; // Negative value to skip pictureId.
- bool nonReference;
- uint8_t temporalIdx;
- bool layerSync;
- int tl0PicIdx; // Negative value to skip tl0PicIdx.
- int8_t keyIdx; // Negative value to skip keyIdx.
+ bool has_received_sli;
+ uint8_t picture_id_sli;
+ bool has_received_rpsi;
+ uint64_t picture_id_rpsi;
+ int16_t picture_id; // Negative value to skip pictureId.
+
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
+ bool flexible_mode;
+ bool ss_data_available;
+
+ int tl0_pic_idx; // Negative value to skip tl0PicIdx.
+ uint8_t temporal_idx;
+ uint8_t spatial_idx;
+ bool temporal_up_switch;
+ bool inter_layer_predicted; // Frame is dependent on directly lower spatial
+ // layer frame.
+ uint8_t gof_idx;
+
+ // SS data.
+ size_t num_spatial_layers; // Always populated.
+ bool spatial_layer_resolution_present;
+ uint16_t width[kMaxVp9NumberOfSpatialLayers];
+ uint16_t height[kMaxVp9NumberOfSpatialLayers];
+ GofInfoVP9 gof;
};
struct CodecSpecificInfoGeneric {
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index e788f23b9de..888adb8939e 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -59,6 +59,7 @@ VideoProcessorImpl::VideoProcessorImpl(webrtc::VideoEncoder* encoder,
last_frame_missing_(false),
initialized_(false),
encoded_frame_size_(0),
+ encoded_frame_type_(kKeyFrame),
prev_time_stamp_(0),
num_dropped_frames_(0),
num_spatial_resizes_(0),
@@ -161,6 +162,10 @@ size_t VideoProcessorImpl::EncodedFrameSize() {
return encoded_frame_size_;
}
+VideoFrameType VideoProcessorImpl::EncodedFrameType() {
+ return encoded_frame_type_;
+}
+
int VideoProcessorImpl::NumberDroppedFrames() {
return num_dropped_frames_;
}
@@ -202,6 +207,7 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
// For dropped frames, we regard them as zero size encoded frames.
encoded_frame_size_ = 0;
+ encoded_frame_type_ = kDeltaFrame;
int32_t encode_result = encoder_->Encode(source_frame_, NULL, &frame_types);
@@ -233,6 +239,8 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
// (encoder callback is only called for non-zero length frames).
encoded_frame_size_ = encoded_image._length;
+ encoded_frame_type_ = encoded_image._frameType;
+
TickTime encode_stop = TickTime::Now();
int frame_number = encoded_image._timeStamp;
FrameStatistic& stat = stats_->stats_[frame_number];
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 186b84066a1..8c9cb1211ab 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -146,6 +146,9 @@ class VideoProcessor {
// encoder are regarded as zero size.
virtual size_t EncodedFrameSize() = 0;
+ // Return the encoded frame type (key or delta).
+ virtual VideoFrameType EncodedFrameType() = 0;
+
// Return the number of dropped frames.
virtual int NumberDroppedFrames() = 0;
@@ -179,6 +182,8 @@ class VideoProcessorImpl : public VideoProcessor {
void SetRates(int bit_rate, int frame_rate) override;
// Return the size of the encoded frame in bytes.
size_t EncodedFrameSize() override;
+ // Return the encoded frame type (key or delta).
+ VideoFrameType EncodedFrameType() override;
// Return the number of dropped frames.
int NumberDroppedFrames() override;
// Return the number of spatial resizes.
@@ -207,6 +212,7 @@ class VideoProcessorImpl : public VideoProcessor {
// If Init() has executed successfully.
bool initialized_;
size_t encoded_frame_size_;
+ VideoFrameType encoded_frame_type_;
int prev_time_stamp_;
int num_dropped_frames_;
int num_spatial_resizes_;
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 6c0e1254fca..9f8ff49bee6 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -78,6 +78,7 @@ struct RateControlMetrics {
int max_encoding_rate_mismatch;
int max_time_hit_target;
int num_spatial_resizes;
+ int num_key_frames;
};
@@ -208,6 +209,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
num_temporal_layers_;
config_.codec_settings->codecSpecific.VP9.frameDroppingOn =
frame_dropper_on_;
+ config_.codec_settings->codecSpecific.VP9.automaticResizeOn =
+ spatial_resize_on_;
config_.codec_settings->codecSpecific.VP9.keyFrameInterval =
kBaseKeyFrameInterval;
break;
@@ -307,7 +310,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
int max_encoding_rate_mismatch,
int max_time_hit_target,
int max_num_dropped_frames,
- int num_spatial_resizes) {
+ int num_spatial_resizes,
+ int num_key_frames) {
int num_dropped_frames = processor_->NumberDroppedFrames();
int num_resize_actions = processor_->NumberSpatialResizes();
printf("For update #: %d,\n "
@@ -354,6 +358,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
EXPECT_LE(num_frames_to_hit_target_, max_time_hit_target);
EXPECT_LE(num_dropped_frames, max_num_dropped_frames);
EXPECT_EQ(num_resize_actions, num_spatial_resizes);
+ EXPECT_EQ(num_key_frames_, num_key_frames);
}
// Layer index corresponding to frame number, for up to 3 layers.
@@ -406,15 +411,6 @@ class VideoProcessorIntegrationTest: public testing::Test {
}
}
- VideoFrameType FrameType(int frame_number) {
- if (frame_number == 0 || ((frame_number) % key_frame_interval_ == 0 &&
- key_frame_interval_ > 0)) {
- return kKeyFrame;
- } else {
- return kDeltaFrame;
- }
- }
-
void TearDown() {
delete processor_;
delete packet_manipulator_;
@@ -459,7 +455,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
frame_number < num_frames) {
// Get the layer index for the frame |frame_number|.
LayerIndexForFrame(frame_number);
- frame_type = FrameType(frame_number);
+ // Get the frame_type.
+ frame_type = processor_->EncodedFrameType();
// Counter for whole sequence run.
++frame_number;
// Counters for each rate update.
@@ -477,7 +474,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
rc_metrics[update_index].max_num_dropped_frames,
- rc_metrics[update_index].num_spatial_resizes);
+ rc_metrics[update_index].num_spatial_resizes,
+ rc_metrics[update_index].num_key_frames);
// Update layer rates and the codec with new rates.
++update_index;
bit_rate_ = rate_profile.target_bit_rate[update_index];
@@ -495,7 +493,8 @@ class VideoProcessorIntegrationTest: public testing::Test {
rc_metrics[update_index].max_encoding_rate_mismatch,
rc_metrics[update_index].max_time_hit_target,
rc_metrics[update_index].max_num_dropped_frames,
- rc_metrics[update_index].num_spatial_resizes);
+ rc_metrics[update_index].num_spatial_resizes,
+ rc_metrics[update_index].num_key_frames);
EXPECT_EQ(num_frames, frame_number);
EXPECT_EQ(num_frames + 1, static_cast<int>(stats_.stats_.size()));
@@ -576,7 +575,8 @@ void SetRateControlMetrics(RateControlMetrics* rc_metrics,
int max_delta_frame_size_mismatch,
int max_encoding_rate_mismatch,
int max_time_hit_target,
- int num_spatial_resizes) {
+ int num_spatial_resizes,
+ int num_key_frames) {
rc_metrics[update_index].max_num_dropped_frames = max_num_dropped_frames;
rc_metrics[update_index].max_key_frame_size_mismatch =
max_key_frame_size_mismatch;
@@ -586,6 +586,7 @@ void SetRateControlMetrics(RateControlMetrics* rc_metrics,
max_encoding_rate_mismatch;
rc_metrics[update_index].max_time_hit_target = max_time_hit_target;
rc_metrics[update_index].num_spatial_resizes = num_spatial_resizes;
+ rc_metrics[update_index].num_key_frames = num_key_frames;
}
// VP9: Run with no packet loss and fixed bitrate. Quality should be very high.
@@ -606,7 +607,7 @@ TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossVP9) {
SetQualityMetrics(&quality_metrics, 37.0, 36.0, 0.93, 0.92);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -630,7 +631,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLossVP9) {
SetQualityMetrics(&quality_metrics, 17.0, 14.0, 0.45, 0.36);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -659,9 +660,9 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossChangeBitRateVP9) {
SetQualityMetrics(&quality_metrics, 35.9, 30.0, 0.90, 0.85);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0);
- SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0);
- SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 30, 20, 20, 30, 0, 1);
+ SetRateControlMetrics(rc_metrics, 1, 2, 0, 20, 20, 60, 0, 0);
+ SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 20, 40, 0, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -694,9 +695,9 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 31.5, 18.0, 0.80, 0.44);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 35, 50, 70, 15, 45, 0);
- SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0);
- SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0);
+ SetRateControlMetrics(rc_metrics, 0, 35, 50, 70, 15, 45, 0, 1);
+ SetRateControlMetrics(rc_metrics, 1, 10, 0, 40, 10, 30, 0, 0);
+ SetRateControlMetrics(rc_metrics, 2, 5, 0, 30, 5, 20, 0, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -719,7 +720,33 @@ TEST_F(VideoProcessorIntegrationTest, ProcessNoLossDenoiserOnVP9) {
SetQualityMetrics(&quality_metrics, 36.8, 35.8, 0.92, 0.91);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 20, 0, 1);
+ ProcessFramesAndVerify(quality_metrics,
+ rate_profile,
+ process_settings,
+ rc_metrics);
+}
+
+// Run with no packet loss, at low bitrate.
+// spatial_resize is on, so expect one resize during the sequence,
+// resize happens on delta frame. Expect only one key frame (first frame).
+TEST_F(VideoProcessorIntegrationTest, ProcessNoLossSpatialResizeFrameDropVP9) {
+ config_.networking_config.packet_loss_probability = 0;
+ // Bitrate and frame rate profile.
+ RateProfile rate_profile;
+ SetRateProfilePars(&rate_profile, 0, 50, 30, 0);
+ rate_profile.frame_index_rate_update[1] = kNbrFramesLong + 1;
+ rate_profile.num_frames = kNbrFramesLong;
+ // Codec/network settings.
+ CodecConfigPars process_settings;
+ SetCodecParameters(&process_settings, kVideoCodecVP9, 0.0f, -1,
+ 1, false, false, true, true);
+ // Metrics for expected quality.
+ QualityMetrics quality_metrics;
+ SetQualityMetrics(&quality_metrics, 25.0, 13.0, 0.70, 0.40);
+ // Metrics for rate control.
+ RateControlMetrics rc_metrics[1];
+ SetRateControlMetrics(rc_metrics, 0, 170, 70, 120, 10, 80, 1, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -747,7 +774,7 @@ TEST_F(VideoProcessorIntegrationTest, ProcessZeroPacketLoss) {
SetQualityMetrics(&quality_metrics, 34.95, 33.0, 0.90, 0.89);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -771,7 +798,7 @@ TEST_F(VideoProcessorIntegrationTest, Process5PercentPacketLoss) {
SetQualityMetrics(&quality_metrics, 20.0, 16.0, 0.60, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -795,7 +822,7 @@ TEST_F(VideoProcessorIntegrationTest, Process10PercentPacketLoss) {
SetQualityMetrics(&quality_metrics, 19.0, 16.0, 0.50, 0.35);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 40, 20, 10, 15, 0, 1);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -833,9 +860,9 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 34.0, 32.0, 0.85, 0.80);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0);
- SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0);
- SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 45, 20, 10, 15, 0, 1);
+ SetRateControlMetrics(rc_metrics, 1, 0, 0, 25, 20, 10, 0, 0);
+ SetRateControlMetrics(rc_metrics, 2, 0, 0, 25, 15, 10, 0, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -868,9 +895,9 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 31.0, 22.0, 0.80, 0.65);
// Metrics for rate control.
RateControlMetrics rc_metrics[3];
- SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0);
- SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0);
- SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0);
+ SetRateControlMetrics(rc_metrics, 0, 40, 20, 75, 15, 60, 0, 1);
+ SetRateControlMetrics(rc_metrics, 1, 10, 0, 25, 10, 35, 0, 0);
+ SetRateControlMetrics(rc_metrics, 2, 0, 0, 20, 10, 15, 0, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -878,7 +905,7 @@ TEST_F(VideoProcessorIntegrationTest,
}
// Run with no packet loss, at low bitrate. During this time we should've
-// resized once.
+// resized once. Expect 2 key frames generated (first and one for resize).
TEST_F(VideoProcessorIntegrationTest,
DISABLED_ON_ANDROID(ProcessNoLossSpatialResizeFrameDropVP8)) {
config_.networking_config.packet_loss_probability = 0;
@@ -889,14 +916,14 @@ TEST_F(VideoProcessorIntegrationTest,
rate_profile.num_frames = kNbrFramesLong;
// Codec/network settings.
CodecConfigPars process_settings;
- SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, kNbrFramesLong,
+ SetCodecParameters(&process_settings, kVideoCodecVP8, 0.0f, -1,
1, false, true, true, true);
// Metrics for expected quality.
QualityMetrics quality_metrics;
SetQualityMetrics(&quality_metrics, 25.0, 15.0, 0.70, 0.40);
// Metrics for rate control.
RateControlMetrics rc_metrics[1];
- SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1);
+ SetRateControlMetrics(rc_metrics, 0, 160, 60, 120, 20, 70, 1, 2);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
@@ -926,8 +953,8 @@ TEST_F(VideoProcessorIntegrationTest,
SetQualityMetrics(&quality_metrics, 32.5, 30.0, 0.85, 0.80);
// Metrics for rate control.
RateControlMetrics rc_metrics[2];
- SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0);
- SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0);
+ SetRateControlMetrics(rc_metrics, 0, 0, 20, 30, 10, 10, 0, 1);
+ SetRateControlMetrics(rc_metrics, 1, 0, 0, 30, 15, 10, 0, 0);
ProcessFramesAndVerify(quality_metrics,
rate_profile,
process_settings,
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
index ecaf3dd4a59..0fbb2a6c401 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -132,13 +132,17 @@ bool ScreenshareLayers::ConfigureBitrates(int bitrate_kbps,
int target_bitrate_kbps = bitrate_kbps;
if (cfg != nullptr) {
- // Calculate a codec target bitrate. This may be higher than TL0, gaining
- // quality at the expense of frame rate at TL0. Constraints:
- // - TL0 frame rate should not be less than framerate / kMaxTL0FpsReduction.
- // - Target rate * kAcceptableTargetOvershoot should not exceed TL1 rate.
- target_bitrate_kbps =
- std::min(bitrate_kbps * kMaxTL0FpsReduction,
- max_bitrate_kbps / kAcceptableTargetOvershoot);
+ if (number_of_temporal_layers_ > 1) {
+ // Calculate a codec target bitrate. This may be higher than TL0, gaining
+ // quality at the expense of frame rate at TL0. Constraints:
+ // - TL0 frame rate no less than framerate / kMaxTL0FpsReduction.
+ // - Target rate * kAcceptableTargetOvershoot should not exceed TL1 rate.
+ target_bitrate_kbps =
+ std::min(bitrate_kbps * kMaxTL0FpsReduction,
+ max_bitrate_kbps / kAcceptableTargetOvershoot);
+
+ cfg->rc_target_bitrate = std::max(bitrate_kbps, target_bitrate_kbps);
+ }
// Don't reconfigure qp limits during quality boost frames.
if (layers_[active_layer_].state != TemporalLayer::State::kQualityBoost) {
@@ -152,8 +156,6 @@ bool ScreenshareLayers::ConfigureBitrates(int bitrate_kbps,
layers_[0].enhanced_max_qp = min_qp_ + (((max_qp_ - min_qp_) * 80) / 100);
layers_[1].enhanced_max_qp = min_qp_ + (((max_qp_ - min_qp_) * 85) / 100);
}
-
- cfg->rc_target_bitrate = std::max(bitrate_kbps, target_bitrate_kbps);
}
int avg_frame_size = (target_bitrate_kbps * 1000) / (8 * framerate);
@@ -169,6 +171,7 @@ void ScreenshareLayers::FrameEncoded(unsigned int size,
layers_[active_layer_].state = TemporalLayer::State::kDropped;
return;
}
+
if (layers_[active_layer_].state == TemporalLayer::State::kDropped) {
layers_[active_layer_].state = TemporalLayer::State::kQualityBoost;
}
@@ -217,14 +220,14 @@ bool ScreenshareLayers::TimeToSync(int64_t timestamp) const {
RTC_NOTREACHED();
return false;
}
- DCHECK_NE(-1, layers_[0].last_qp);
+ RTC_DCHECK_NE(-1, layers_[0].last_qp);
if (layers_[1].last_qp == -1) {
// First frame in TL1 should only depend on TL0 since there are no
// previous frames in TL1.
return true;
}
- DCHECK_NE(-1, last_sync_timestamp_);
+ RTC_DCHECK_NE(-1, last_sync_timestamp_);
int64_t timestamp_diff = timestamp - last_sync_timestamp_;
if (timestamp_diff > kMaxTimeBetweenSyncs) {
// After a certain time, force a sync frame.
@@ -241,7 +244,7 @@ bool ScreenshareLayers::TimeToSync(int64_t timestamp) const {
}
bool ScreenshareLayers::UpdateConfiguration(vpx_codec_enc_cfg_t* cfg) {
- if (max_qp_ == -1)
+ if (max_qp_ == -1 || number_of_temporal_layers_ <= 1)
return false;
// If layer is in the quality boost state (following a dropped frame), update
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
index 198be2a09fb..628e3365685 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/screenshare_layers_unittest.cc
@@ -112,15 +112,16 @@ TEST_F(ScreenshareLayerTest, 1Layer) {
CodecSpecificInfoVP8 vp8_info;
// One layer screenshare should not use the frame dropper as all frames will
// belong to the base layer.
+ const int kSingleLayerFlags = 0;
flags = layers_->EncodeFlags(timestamp);
- EXPECT_EQ(0, flags);
+ EXPECT_EQ(kSingleLayerFlags, flags);
layers_->PopulateCodecSpecific(false, &vp8_info, timestamp);
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info.temporalIdx);
EXPECT_FALSE(vp8_info.layerSync);
EXPECT_EQ(kNoTl0PicIdx, vp8_info.tl0PicIdx);
layers_->FrameEncoded(frame_size_, timestamp, kDefaultQp);
flags = layers_->EncodeFlags(timestamp);
- EXPECT_EQ(0, flags);
+ EXPECT_EQ(kSingleLayerFlags, flags);
timestamp += kTimestampDelta5Fps;
layers_->PopulateCodecSpecific(false, &vp8_info, timestamp);
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info.temporalIdx);
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index ee7fd859b53..7c4164c8ced 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -504,4 +504,8 @@ void SimulcastEncoderAdapter::OnDroppedFrame() {
streaminfos_[0].encoder->OnDroppedFrame();
}
+int SimulcastEncoderAdapter::GetTargetFramerate() {
+ return streaminfos_[0].encoder->GetTargetFramerate();
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index cd782342b55..bca1e00a71f 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -57,6 +57,8 @@ class SimulcastEncoderAdapter : public VP8Encoder {
void OnDroppedFrame() override;
+ int GetTargetFramerate() override;
+
private:
struct StreamInfo {
StreamInfo()
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index 2e436a91fca..8e365a9ee69 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -17,7 +17,6 @@
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/common.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/experiments.h"
#include "webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8.gyp b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
index c92509c8b54..a60a4766da1 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8.gyp
@@ -23,7 +23,7 @@
'conditions': [
['build_libvpx==1', {
'dependencies': [
- '<(libvpx_dir)/libvpx.gyp:libvpx',
+ '<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
],
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 05e0799b527..5714a07f2e3 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -23,7 +23,6 @@
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
-#include "webrtc/experiments.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
@@ -580,7 +579,11 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
}
rps_.Init();
- quality_scaler_.Init(codec_.qpMax / QualityScaler::kDefaultLowQpDenominator);
+ // Disable both high-QP limits and framedropping. Both are handled by libvpx
+ // internally.
+ const int kDisabledBadQpThreshold = 64;
+ quality_scaler_.Init(codec_.qpMax / QualityScaler::kDefaultLowQpDenominator,
+ kDisabledBadQpThreshold, false);
quality_scaler_.ReportFramerate(codec_.maxFramerate);
return InitAndSetControlSettings();
@@ -661,7 +664,9 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
denoiser_state : kDenoiserOff);
}
for (size_t i = 0; i < encoders_.size(); ++i) {
- vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD, 1);
+ // Allow more screen content to be detected as static.
+ vpx_codec_control(&(encoders_[i]), VP8E_SET_STATIC_THRESHOLD,
+ codec_.mode == kScreensharing ? 300 : 1);
vpx_codec_control(&(encoders_[i]), VP8E_SET_CPUUSED, cpu_speed_[i]);
vpx_codec_control(&(encoders_[i]), VP8E_SET_TOKEN_PARTITIONS,
static_cast<vp8e_token_partitions>(token_partitions_));
@@ -709,6 +714,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
const bool use_quality_scaler = encoders_.size() == 1 &&
configurations_[0].rc_dropframe_thresh > 0 &&
codec_.codecSpecific.VP8.automaticResizeOn;
+ if (use_quality_scaler)
+ quality_scaler_.OnEncodeFrame(frame);
const VideoFrame& input_image =
use_quality_scaler ? quality_scaler_.GetScaledFrame(frame) : frame;
@@ -723,8 +730,8 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
// |raw_images_[0]|, the resolution of these frames must match. Note that
// |input_image| might be scaled from |frame|. In that case, the resolution of
// |raw_images_[0]| should have been updated in UpdateCodecFrameSize.
- DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
- DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
+ RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w));
+ RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h));
// Image in vpx_image_t format.
// Input image is const. VP8's raw image is not defined as const.
@@ -889,6 +896,11 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
int VP8EncoderImpl::UpdateCodecFrameSize(const VideoFrame& input_image) {
codec_.width = input_image.width();
codec_.height = input_image.height();
+ if (codec_.numberOfSimulcastStreams <= 1) {
+ // For now scaling is only used for single-layer streams.
+ codec_.simulcastStream[0].width = input_image.width();
+ codec_.simulcastStream[0].height = input_image.height();
+ }
// Update the cpu_speed setting for resolution change.
vpx_codec_control(&(encoders_[0]),
VP8E_SET_CPUUSED,
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9.gyp b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
index ac7e67a79c4..752521c5cb8 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9.gyp
@@ -22,7 +22,7 @@
'conditions': [
['build_libvpx==1', {
'dependencies': [
- '<(libvpx_dir)/libvpx.gyp:libvpx',
+ '<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
['build_vp9==1', {
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
index 6e16bc1468f..ce600ec1a5b 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc
@@ -34,7 +34,7 @@ void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
bool Vp9FrameBufferPool::InitializeVpxUsePool(
vpx_codec_ctx* vpx_codec_context) {
- DCHECK(vpx_codec_context);
+ RTC_DCHECK(vpx_codec_context);
// Tell libvpx to use this pool.
if (vpx_codec_set_frame_buffer_functions(
// In which context to use these callback functions.
@@ -53,7 +53,7 @@ bool Vp9FrameBufferPool::InitializeVpxUsePool(
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
- DCHECK_GT(min_size, 0u);
+ RTC_DCHECK_GT(min_size, 0u);
rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
{
rtc::CritScope cs(&buffers_lock_);
@@ -101,8 +101,8 @@ void Vp9FrameBufferPool::ClearPool() {
int32 Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
size_t min_size,
vpx_codec_frame_buffer* fb) {
- DCHECK(user_priv);
- DCHECK(fb);
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
@@ -120,8 +120,8 @@ int32 Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
// static
int32 Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb) {
- DCHECK(user_priv);
- DCHECK(fb);
+ RTC_DCHECK(user_priv);
+ RTC_DCHECK(fb);
Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
if (buffer != nullptr) {
buffer->Release();
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index cd91fa3bdee..d4a4d7d7e33 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -57,6 +57,12 @@ VP9Encoder* VP9Encoder::Create() {
return new VP9EncoderImpl();
}
+void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data) {
+ VP9EncoderImpl* enc = (VP9EncoderImpl*)(user_data);
+ enc->GetEncodedLayerFrame(pkt);
+}
+
VP9EncoderImpl::VP9EncoderImpl()
: encoded_image_(),
encoded_complete_callback_(NULL),
@@ -67,7 +73,12 @@ VP9EncoderImpl::VP9EncoderImpl()
rc_max_intra_target_(0),
encoder_(NULL),
config_(NULL),
- raw_(NULL) {
+ raw_(NULL),
+ input_image_(NULL),
+ tl0_pic_idx_(0),
+ gof_idx_(0),
+ num_temporal_layers_(0),
+ num_spatial_layers_(0) {
memset(&codec_, 0, sizeof(codec_));
uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
srand(seed);
@@ -101,6 +112,55 @@ int VP9EncoderImpl::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
+bool VP9EncoderImpl::SetSvcRates() {
+ float rate_ratio[VPX_MAX_LAYERS] = {0};
+ float total = 0;
+ uint8_t i = 0;
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ if (svc_internal_.svc_params.scaling_factor_num[i] <= 0 ||
+ svc_internal_.svc_params.scaling_factor_den[i] <= 0) {
+ return false;
+ }
+ rate_ratio[i] = static_cast<float>(
+ svc_internal_.svc_params.scaling_factor_num[i]) /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ total += rate_ratio[i];
+ }
+
+ for (i = 0; i < num_spatial_layers_; ++i) {
+ config_->ss_target_bitrate[i] = static_cast<unsigned int>(
+ config_->rc_target_bitrate * rate_ratio[i] / total);
+ if (num_temporal_layers_ == 1) {
+ config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 2) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] * 2 / 3;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->ss_target_bitrate[i];
+ } else if (num_temporal_layers_ == 3) {
+ config_->layer_target_bitrate[i * num_temporal_layers_] =
+ config_->ss_target_bitrate[i] / 2;
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
+ config_->layer_target_bitrate[i * num_temporal_layers_] +
+ (config_->ss_target_bitrate[i] / 4);
+ config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
+ config_->ss_target_bitrate[i];
+ } else {
+ return false;
+ }
+ }
+
+ // For now, temporal layers only supported when having one spatial layer.
+ if (num_spatial_layers_ == 1) {
+ for (i = 0; i < num_temporal_layers_; ++i) {
+ config_->ts_target_bitrate[i] = config_->layer_target_bitrate[i];
+ }
+ }
+
+ return true;
+}
+
int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
uint32_t new_framerate) {
if (!inited_) {
@@ -118,6 +178,11 @@ int VP9EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
}
config_->rc_target_bitrate = new_bitrate_kbit;
codec_.maxFramerate = new_framerate;
+
+ if (!SetSvcRates()) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
// Update encoder context
if (vpx_codec_enc_config_set(encoder_, config_)) {
return WEBRTC_VIDEO_CODEC_ERROR;
@@ -144,6 +209,13 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
+ if (inst->codecSpecific.VP9.numberOfTemporalLayers > 3) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+ // libvpx currently supports only one or two spatial layers.
+ if (inst->codecSpecific.VP9.numberOfSpatialLayers > 2) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
int retVal = Release();
if (retVal < 0) {
return retVal;
@@ -158,6 +230,12 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (&codec_ != inst) {
codec_ = *inst;
}
+
+ num_spatial_layers_ = inst->codecSpecific.VP9.numberOfSpatialLayers;
+ num_temporal_layers_ = inst->codecSpecific.VP9.numberOfTemporalLayers;
+ if (num_temporal_layers_ == 0)
+ num_temporal_layers_ = 1;
+
// Random start 16 bits is enough.
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
// Allocate memory for encoded image
@@ -205,17 +283,63 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
} else {
config_->kf_mode = VPX_KF_DISABLED;
}
+ config_->rc_resize_allowed = inst->codecSpecific.VP9.automaticResizeOn ?
+ 1 : 0;
// Determine number of threads based on the image size and #cores.
config_->g_threads = NumberOfThreads(config_->g_w,
config_->g_h,
number_of_cores);
+
cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h);
+
+ // TODO(asapersson): Check configuration of temporal switch up and increase
+ // pattern length.
+ if (num_temporal_layers_ == 1) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode1);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
+ config_->ts_number_layers = 1;
+ config_->ts_rate_decimator[0] = 1;
+ config_->ts_periodicity = 1;
+ config_->ts_layer_id[0] = 0;
+ } else if (num_temporal_layers_ == 2) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode2);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101;
+ config_->ts_number_layers = 2;
+ config_->ts_rate_decimator[0] = 2;
+ config_->ts_rate_decimator[1] = 1;
+ config_->ts_periodicity = 2;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 1;
+ } else if (num_temporal_layers_ == 3) {
+ gof_.SetGofInfoVP9(kTemporalStructureMode3);
+ config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212;
+ config_->ts_number_layers = 3;
+ config_->ts_rate_decimator[0] = 4;
+ config_->ts_rate_decimator[1] = 2;
+ config_->ts_rate_decimator[2] = 1;
+ config_->ts_periodicity = 4;
+ config_->ts_layer_id[0] = 0;
+ config_->ts_layer_id[1] = 2;
+ config_->ts_layer_id[2] = 1;
+ config_->ts_layer_id[3] = 2;
+ } else {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
+ tl0_pic_idx_ = static_cast<uint8_t>(rand());
+
return InitAndSetControlSettings(inst);
}
int VP9EncoderImpl::NumberOfThreads(int width,
int height,
int number_of_cores) {
+ // For the current libvpx library, only 1 thread is supported when SVC is
+ // turned on.
+ if (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) {
+ return 1;
+ }
+
// Keep the number of encoder threads equal to the possible number of column
// tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS.
if (width * height >= 1280 * 720 && number_of_cores > 4) {
@@ -229,6 +353,22 @@ int VP9EncoderImpl::NumberOfThreads(int width,
}
int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
+ config_->ss_number_layers = num_spatial_layers_;
+
+ int scaling_factor_num = 256;
+ for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
+ svc_internal_.svc_params.max_quantizers[i] = config_->rc_max_quantizer;
+ svc_internal_.svc_params.min_quantizers[i] = config_->rc_min_quantizer;
+ // 1:2 scaling in each dimension.
+ svc_internal_.svc_params.scaling_factor_num[i] = scaling_factor_num;
+ svc_internal_.svc_params.scaling_factor_den[i] = 256;
+ scaling_factor_num /= 2;
+ }
+
+ if (!SetSvcRates()) {
+ return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+ }
+
if (vpx_codec_enc_init(encoder_, vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -237,6 +377,19 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
rc_max_intra_target_);
vpx_codec_control(encoder_, VP9E_SET_AQ_MODE,
inst->codecSpecific.VP9.adaptiveQpMode ? 3 : 0);
+
+ vpx_codec_control(
+ encoder_, VP9E_SET_SVC,
+ (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) ? 1 : 0);
+ if (num_temporal_layers_ > 1 || num_spatial_layers_ > 1) {
+ vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS,
+ &svc_internal_.svc_params);
+ }
+ // Register callback for getting each spatial layer.
+ vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
+ VP9EncoderImpl::EncoderOutputCodedPacketCallback, (void*)(this)};
+ vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, (void*)(&cbp));
+
// Control function to set the number of column tiles in encoding a frame, in
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
// The number tile columns will be capped by the encoder based on image size
@@ -248,6 +401,12 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
inst->codecSpecific.VP9.denoisingOn ? 1 : 0);
#endif
+ if (codec_.mode == kScreensharing) {
+ // Adjust internal parameters to screen content.
+ vpx_codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1);
+ }
+ // Enable encoder skip of static/low content blocks.
+ vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
inited_ = true;
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -284,8 +443,15 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
if (frame_types && frame_types->size() > 0) {
frame_type = (*frame_types)[0];
}
- DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
- DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
+ RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w));
+ RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h));
+
+ // Set input image for use in the callback.
+ // This was necessary since you need some information from input_image.
+ // You can save only the necessary information (such as timestamp) instead of
+ // doing this.
+ input_image_ = &input_image;
+
// Image in vpx_image_t format.
// Input image is const. VPX's raw image is not defined as const.
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
@@ -308,7 +474,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
return WEBRTC_VIDEO_CODEC_ERROR;
}
timestamp_ += duration;
- return GetEncodedPartitions(input_image);
+
+ return WEBRTC_VIDEO_CODEC_OK;
}
void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
@@ -317,20 +484,87 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
assert(codec_specific != NULL);
codec_specific->codecType = kVideoCodecVP9;
CodecSpecificInfoVP9 *vp9_info = &(codec_specific->codecSpecific.VP9);
- vp9_info->pictureId = picture_id_;
- vp9_info->keyIdx = kNoKeyIdx;
- vp9_info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0;
- // TODO(marpan): Temporal layers are supported in the current VP9 version,
- // but for now use 1 temporal layer encoding. Will update this when temporal
- // layer support for VP9 is added in webrtc.
- vp9_info->temporalIdx = kNoTemporalIdx;
- vp9_info->layerSync = false;
- vp9_info->tl0PicIdx = kNoTl0PicIdx;
- picture_id_ = (picture_id_ + 1) & 0x7FFF;
+ // TODO(asapersson): Set correct values.
+ vp9_info->inter_pic_predicted =
+ (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? false : true;
+ vp9_info->flexible_mode = codec_.codecSpecific.VP9.flexibleMode;
+ vp9_info->ss_data_available = ((pkt.data.frame.flags & VPX_FRAME_IS_KEY) &&
+ !codec_.codecSpecific.VP9.flexibleMode)
+ ? true
+ : false;
+ if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
+ gof_idx_ = 0;
+ }
+
+ vpx_svc_layer_id_t layer_id = {0};
+ vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
+
+ assert(num_temporal_layers_ > 0);
+ assert(num_spatial_layers_ > 0);
+ if (num_temporal_layers_ == 1) {
+ assert(layer_id.temporal_layer_id == 0);
+ vp9_info->temporal_idx = kNoTemporalIdx;
+ } else {
+ vp9_info->temporal_idx = layer_id.temporal_layer_id;
+ }
+ if (num_spatial_layers_ == 1) {
+ assert(layer_id.spatial_layer_id == 0);
+ vp9_info->spatial_idx = kNoSpatialIdx;
+ } else {
+ vp9_info->spatial_idx = layer_id.spatial_layer_id;
+ }
+ if (layer_id.spatial_layer_id != 0) {
+ vp9_info->ss_data_available = false;
+ }
+
+ if (vp9_info->flexible_mode) {
+ vp9_info->gof_idx = kNoGofIdx;
+ } else {
+ vp9_info->gof_idx =
+ static_cast<uint8_t>(gof_idx_++ % gof_.num_frames_in_gof);
+ }
+
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->temporal_up_switch = true;
+
+ if (layer_id.spatial_layer_id == 0) {
+ picture_id_ = (picture_id_ + 1) & 0x7FFF;
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->inter_layer_predicted = false;
+ } else {
+ // TODO(asapersson): this info has to be obtained from the encoder.
+ vp9_info->inter_layer_predicted = true;
+ }
+
+ vp9_info->picture_id = picture_id_;
+
+ if (!vp9_info->flexible_mode) {
+ if (layer_id.temporal_layer_id == 0 && layer_id.spatial_layer_id == 0) {
+ tl0_pic_idx_++;
+ }
+ vp9_info->tl0_pic_idx = tl0_pic_idx_;
+ }
+
+ // Always populate this, so that the packetizer can properly set the marker
+ // bit.
+ vp9_info->num_spatial_layers = num_spatial_layers_;
+ if (vp9_info->ss_data_available) {
+ vp9_info->spatial_layer_resolution_present = true;
+ for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
+ vp9_info->width[i] = codec_.width *
+ svc_internal_.svc_params.scaling_factor_num[i] /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ vp9_info->height[i] = codec_.height *
+ svc_internal_.svc_params.scaling_factor_num[i] /
+ svc_internal_.svc_params.scaling_factor_den[i];
+ }
+ if (!vp9_info->flexible_mode) {
+ vp9_info->gof.CopyGofInfoVP9(gof_);
+ }
+ }
}
-int VP9EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image) {
- vpx_codec_iter_t iter = NULL;
+int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
encoded_image_._length = 0;
encoded_image_._frameType = kDeltaFrame;
RTPFragmentationHeader frag_info;
@@ -339,44 +573,33 @@ int VP9EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image) {
frag_info.VerifyAndAllocateFragmentationHeader(1);
int part_idx = 0;
CodecSpecificInfo codec_specific;
- const vpx_codec_cx_pkt_t *pkt = NULL;
- while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
- switch (pkt->kind) {
- case VPX_CODEC_CX_FRAME_PKT: {
- memcpy(&encoded_image_._buffer[encoded_image_._length],
- pkt->data.frame.buf,
- pkt->data.frame.sz);
- frag_info.fragmentationOffset[part_idx] = encoded_image_._length;
- frag_info.fragmentationLength[part_idx] =
- static_cast<uint32_t>(pkt->data.frame.sz);
- frag_info.fragmentationPlType[part_idx] = 0;
- frag_info.fragmentationTimeDiff[part_idx] = 0;
- encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
- assert(encoded_image_._length <= encoded_image_._size);
- break;
- }
- default: {
- break;
- }
- }
- // End of frame.
- if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
- // Check if encoded frame is a key frame.
- if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
- encoded_image_._frameType = kKeyFrame;
- }
- PopulateCodecSpecific(&codec_specific, *pkt, input_image.timestamp());
- break;
- }
+
+ assert(pkt->kind == VPX_CODEC_CX_FRAME_PKT);
+ memcpy(&encoded_image_._buffer[encoded_image_._length], pkt->data.frame.buf,
+ pkt->data.frame.sz);
+ frag_info.fragmentationOffset[part_idx] = encoded_image_._length;
+ frag_info.fragmentationLength[part_idx] =
+ static_cast<uint32_t>(pkt->data.frame.sz);
+ frag_info.fragmentationPlType[part_idx] = 0;
+ frag_info.fragmentationTimeDiff[part_idx] = 0;
+ encoded_image_._length += static_cast<uint32_t>(pkt->data.frame.sz);
+ assert(encoded_image_._length <= encoded_image_._size);
+
+ // End of frame.
+ // Check if encoded frame is a key frame.
+ if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
+ encoded_image_._frameType = kKeyFrame;
}
+ PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp());
+
if (encoded_image_._length > 0) {
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
- encoded_image_._timeStamp = input_image.timestamp();
- encoded_image_.capture_time_ms_ = input_image.render_time_ms();
+ encoded_image_._timeStamp = input_image_->timestamp();
+ encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_._encodedHeight = raw_->d_h;
encoded_image_._encodedWidth = raw_->d_w;
encoded_complete_callback_->Encoded(encoded_image_, &codec_specific,
- &frag_info);
+ &frag_info);
}
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -524,7 +747,6 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img, uint32_t timestamp) {
rtc::scoped_refptr<WrappedI420Buffer> img_wrapped_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
img->d_w, img->d_h,
- img->d_w, img->d_h,
img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index 5775952d60c..c164a63980f 100644
--- a/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/chromium/third_party/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -15,6 +15,7 @@
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
+#include "vpx/svc_context.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_encoder.h"
@@ -55,7 +56,13 @@ class VP9EncoderImpl : public VP9Encoder {
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp);
- int GetEncodedPartitions(const VideoFrame& input_image);
+ bool SetSvcRates();
+
+ virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt);
+
+ // Callback function for outputting packets per spatial layer.
+ static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
+ void* user_data);
// Determine maximum target for Intra frames
//
@@ -76,6 +83,14 @@ class VP9EncoderImpl : public VP9Encoder {
vpx_codec_ctx_t* encoder_;
vpx_codec_enc_cfg_t* config_;
vpx_image_t* raw_;
+ SvcInternal_t svc_internal_;
+ const VideoFrame* input_image_;
+ GofInfoVP9 gof_; // Contains each frame's temporal information for
+ // non-flexible mode.
+ uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
+ size_t gof_idx_; // Only used in non-flexible mode.
+ uint8_t num_temporal_layers_;
+ uint8_t num_spatial_layers_;
};
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/interface/video_coding_defines.h b/chromium/third_party/webrtc/modules/video_coding/main/interface/video_coding_defines.h
index 259e75e6a8d..fd38d644156 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/interface/video_coding_defines.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/interface/video_coding_defines.h
@@ -40,13 +40,9 @@ enum { kDefaultStartBitrateKbps = 300 };
enum VCMVideoProtection {
kProtectionNone,
- kProtectionNack, // Both send-side and receive-side
- kProtectionNackSender, // Send-side only
- kProtectionNackReceiver, // Receive-side only
+ kProtectionNack,
kProtectionFEC,
kProtectionNackFEC,
- kProtectionKeyOnLoss,
- kProtectionKeyOnKeyLoss,
};
enum VCMTemporalDecimation {
@@ -80,7 +76,7 @@ class VCMReceiveCallback {
return -1;
}
// Called when the current receive codec changes.
- virtual void IncomingCodecChanged(const VideoCodec& codec) {}
+ virtual void OnIncomingPayloadType(int payload_type) {}
protected:
virtual ~VCMReceiveCallback() {
@@ -182,6 +178,8 @@ class VCMQMSettingsCallback {
const uint32_t width,
const uint32_t height) = 0;
+ virtual void SetTargetFramerate(int frame_rate) = 0;
+
protected:
virtual ~VCMQMSettingsCallback() {
}
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/codec_database.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/codec_database.cc
index 5f899368b17..e27314d535c 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -61,7 +61,9 @@ VideoCodecVP9 VideoEncoder::GetDefaultVp9Settings() {
vp9_settings.frameDroppingOn = true;
vp9_settings.keyFrameInterval = 3000;
vp9_settings.adaptiveQpMode = true;
-
+ vp9_settings.automaticResizeOn = false;
+ vp9_settings.numberOfSpatialLayers = 1;
+ vp9_settings.flexibleMode = false;
return vp9_settings;
}
@@ -240,15 +242,15 @@ bool VCMCodecDataBase::SetSendCodec(
int number_of_cores,
size_t max_payload_size,
VCMEncodedFrameCallback* encoded_frame_callback) {
- DCHECK(send_codec);
+ RTC_DCHECK(send_codec);
if (max_payload_size == 0) {
max_payload_size = kDefaultPayloadSize;
}
- DCHECK_GE(number_of_cores, 1);
- DCHECK_GE(send_codec->plType, 1);
+ RTC_DCHECK_GE(number_of_cores, 1);
+ RTC_DCHECK_GE(send_codec->plType, 1);
// Make sure the start bit rate is sane...
- DCHECK_LE(send_codec->startBitrate, 1000000u);
- DCHECK(send_codec->codecType != kVideoCodecUnknown);
+ RTC_DCHECK_LE(send_codec->startBitrate, 1000000u);
+ RTC_DCHECK(send_codec->codecType != kVideoCodecUnknown);
bool reset_required = pending_encoder_reset_;
if (number_of_cores_ != number_of_cores) {
number_of_cores_ = number_of_cores;
@@ -580,7 +582,7 @@ VCMGenericDecoder* VCMCodecDataBase::GetDecoder(
return NULL;
}
VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
- if (callback) callback->IncomingCodecChanged(receive_codec_);
+ if (callback) callback->OnIncomingPayloadType(receive_codec_.plType);
if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback)
< 0) {
ReleaseDecoder(ptr_decoder_);
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/encoded_frame.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/encoded_frame.cc
index 2830399dd6e..0fa44250d7e 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/encoded_frame.cc
@@ -132,6 +132,67 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header)
}
break;
}
+ case kRtpVideoVp9: {
+ if (_codecSpecificInfo.codecType != kVideoCodecVP9) {
+ // This is the first packet for this frame.
+ _codecSpecificInfo.codecSpecific.VP9.picture_id = -1;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.spatial_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx = 0;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted = false;
+ _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx = -1;
+ _codecSpecificInfo.codecType = kVideoCodecVP9;
+ }
+ _codecSpecificInfo.codecSpecific.VP9.inter_pic_predicted =
+ header->codecHeader.VP9.inter_pic_predicted;
+ _codecSpecificInfo.codecSpecific.VP9.flexible_mode =
+ header->codecHeader.VP9.flexible_mode;
+ _codecSpecificInfo.codecSpecific.VP9.ss_data_available =
+ header->codecHeader.VP9.ss_data_available;
+ if (header->codecHeader.VP9.picture_id != kNoPictureId) {
+ _codecSpecificInfo.codecSpecific.VP9.picture_id =
+ header->codecHeader.VP9.picture_id;
+ }
+ if (header->codecHeader.VP9.tl0_pic_idx != kNoTl0PicIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.tl0_pic_idx =
+ header->codecHeader.VP9.tl0_pic_idx;
+ }
+ if (header->codecHeader.VP9.temporal_idx != kNoTemporalIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.temporal_idx =
+ header->codecHeader.VP9.temporal_idx;
+ _codecSpecificInfo.codecSpecific.VP9.temporal_up_switch =
+ header->codecHeader.VP9.temporal_up_switch;
+ }
+ if (header->codecHeader.VP9.spatial_idx != kNoSpatialIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.spatial_idx =
+ header->codecHeader.VP9.spatial_idx;
+ _codecSpecificInfo.codecSpecific.VP9.inter_layer_predicted =
+ header->codecHeader.VP9.inter_layer_predicted;
+ }
+ if (header->codecHeader.VP9.gof_idx != kNoGofIdx) {
+ _codecSpecificInfo.codecSpecific.VP9.gof_idx =
+ header->codecHeader.VP9.gof_idx;
+ }
+ if (header->codecHeader.VP9.ss_data_available) {
+ _codecSpecificInfo.codecSpecific.VP9.num_spatial_layers =
+ header->codecHeader.VP9.num_spatial_layers;
+ _codecSpecificInfo.codecSpecific.VP9
+ .spatial_layer_resolution_present =
+ header->codecHeader.VP9.spatial_layer_resolution_present;
+ if (header->codecHeader.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < header->codecHeader.VP9.num_spatial_layers;
+ ++i) {
+ _codecSpecificInfo.codecSpecific.VP9.width[i] =
+ header->codecHeader.VP9.width[i];
+ _codecSpecificInfo.codecSpecific.VP9.height[i] =
+ header->codecHeader.VP9.height[i];
+ }
+ }
+ _codecSpecificInfo.codecSpecific.VP9.gof.CopyGofInfoVP9(
+ header->codecHeader.VP9.gof);
+ }
+ break;
+ }
case kRtpVideoH264: {
_codecSpecificInfo.codecType = kVideoCodecH264;
break;
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/frame_buffer.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/frame_buffer.cc
index 8bd375893d9..82a755ab4f4 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -154,7 +154,7 @@ VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
// frame (I-frame or IDR frame in H.264 (AVC), or an IRAP picture in H.265
// (HEVC)).
if (packet.markerBit) {
- DCHECK(!_rotation_set);
+ RTC_DCHECK(!_rotation_set);
_rotation = packet.codecSpecificHeader.rotation;
_rotation_set = true;
}
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.cc
index 1cd67dfddfb..f874e163c83 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.cc
@@ -53,8 +53,7 @@ int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
VCMReceiveCallback* callback;
{
CriticalSectionScoped cs(_critSect);
- frameInfo = static_cast<VCMFrameInformation*>(
- _timestampMap.Pop(decodedImage.timestamp()));
+ frameInfo = _timestampMap.Pop(decodedImage.timestamp());
callback = _receiveCallback;
}
@@ -103,10 +102,10 @@ uint64_t VCMDecodedFrameCallback::LastReceivedPictureID() const
return _lastReceivedPictureID;
}
-int32_t VCMDecodedFrameCallback::Map(uint32_t timestamp, VCMFrameInformation* frameInfo)
-{
- CriticalSectionScoped cs(_critSect);
- return _timestampMap.Add(timestamp, frameInfo);
+void VCMDecodedFrameCallback::Map(uint32_t timestamp,
+ VCMFrameInformation* frameInfo) {
+ CriticalSectionScoped cs(_critSect);
+ _timestampMap.Add(timestamp, frameInfo);
}
int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp)
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.h b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.h
index ce649988ef4..09929e64f47 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_decoder.h
@@ -46,7 +46,7 @@ public:
uint64_t LastReceivedPictureID() const;
- int32_t Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
+ void Map(uint32_t timestamp, VCMFrameInformation* frameInfo);
int32_t Pop(uint32_t timestamp);
private:
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.cc
index f2cdd599ae8..31c3f1715f8 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -21,7 +21,7 @@ namespace {
// Map information from info into rtp. If no relevant information is found
// in info, rtp is set to NULL.
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
- DCHECK(info);
+ RTC_DCHECK(info);
switch (info->codecType) {
case kVideoCodecVP8: {
rtp->codec = kRtpVideoVp8;
@@ -36,6 +36,43 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
return;
}
+ case kVideoCodecVP9: {
+ rtp->codec = kRtpVideoVp9;
+ rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
+ rtp->codecHeader.VP9.inter_pic_predicted =
+ info->codecSpecific.VP9.inter_pic_predicted;
+ rtp->codecHeader.VP9.flexible_mode =
+ info->codecSpecific.VP9.flexible_mode;
+ rtp->codecHeader.VP9.ss_data_available =
+ info->codecSpecific.VP9.ss_data_available;
+ rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
+ rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
+ rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
+ rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
+ rtp->codecHeader.VP9.temporal_up_switch =
+ info->codecSpecific.VP9.temporal_up_switch;
+ rtp->codecHeader.VP9.inter_layer_predicted =
+ info->codecSpecific.VP9.inter_layer_predicted;
+ rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
+
+ // Packetizer needs to know the number of spatial layers to correctly set
+ // the marker bit, even when the number won't be written in the packet.
+ rtp->codecHeader.VP9.num_spatial_layers =
+ info->codecSpecific.VP9.num_spatial_layers;
+ if (info->codecSpecific.VP9.ss_data_available) {
+ rtp->codecHeader.VP9.spatial_layer_resolution_present =
+ info->codecSpecific.VP9.spatial_layer_resolution_present;
+ if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
+ for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
+ ++i) {
+ rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
+ rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
+ }
+ }
+ rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
+ }
+ return;
+ }
case kVideoCodecH264:
rtp->codec = kRtpVideoH264;
return;
@@ -214,6 +251,10 @@ bool VCMGenericEncoder::SupportsNativeHandle() const {
return encoder_->SupportsNativeHandle();
}
+int VCMGenericEncoder::GetTargetFramerate() {
+ return encoder_->GetTargetFramerate();
+}
+
/***************************
* Callback Implementation
***************************/
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.h b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.h
index 862c06ba206..25235b6b467 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/generic_encoder.h
@@ -140,6 +140,8 @@ public:
bool SupportsNativeHandle() const;
+ int GetTargetFramerate();
+
private:
VideoEncoder* const encoder_;
VideoEncoderRateObserver* const rate_observer_;
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.cc
index 9156cc1a61d..49c2325d80a 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -125,6 +125,8 @@ VCMJitterBuffer::VCMJitterBuffer(Clock* clock,
incomplete_frames_(),
last_decoded_state_(),
first_packet_since_reset_(true),
+ last_gof_timestamp_(0),
+ last_gof_valid_(false),
stats_callback_(NULL),
incoming_frame_rate_(0),
incoming_frame_count_(0),
@@ -220,6 +222,7 @@ void VCMJitterBuffer::Start() {
first_packet_since_reset_ = true;
rtt_ms_ = kDefaultRtt;
last_decoded_state_.Reset();
+ last_gof_valid_ = false;
}
void VCMJitterBuffer::Stop() {
@@ -227,6 +230,8 @@ void VCMJitterBuffer::Stop() {
UpdateHistograms();
running_ = false;
last_decoded_state_.Reset();
+ last_gof_valid_ = false;
+
// Make sure all frames are free and reset.
for (FrameList::iterator it = decodable_frames_.begin();
it != decodable_frames_.end(); ++it) {
@@ -257,6 +262,7 @@ void VCMJitterBuffer::Flush() {
decodable_frames_.Reset(&free_frames_);
incomplete_frames_.Reset(&free_frames_);
last_decoded_state_.Reset(); // TODO(mikhal): sync reset.
+ last_gof_valid_ = false;
num_consecutive_old_packets_ = 0;
// Also reset the jitter and delay estimates
jitter_estimate_.Reset();
@@ -586,6 +592,38 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
return kOldPacket;
}
+ if (packet.codec == kVideoCodecVP9) {
+ // TODO(asapersson): Move this code to appropriate place.
+ // TODO(asapersson): Handle out of order GOF.
+ if (packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ // TODO(asapersson): Add support for flexible mode.
+ return kGeneralError;
+ }
+ if (packet.codecSpecificHeader.codecHeader.VP9.ss_data_available) {
+ if (!last_gof_valid_ ||
+ IsNewerTimestamp(packet.timestamp, last_gof_timestamp_)) {
+ last_gof_.CopyGofInfoVP9(
+ packet.codecSpecificHeader.codecHeader.VP9.gof);
+ last_gof_timestamp_ = packet.timestamp;
+ last_gof_valid_ = true;
+ }
+ }
+ if (last_gof_valid_ &&
+ !packet.codecSpecificHeader.codecHeader.VP9.flexible_mode) {
+ uint8_t gof_idx = packet.codecSpecificHeader.codecHeader.VP9.gof_idx;
+ if (gof_idx != kNoGofIdx) {
+ if (gof_idx >= last_gof_.num_frames_in_gof) {
+ LOG(LS_WARNING) << "Incorrect gof_idx: " << gof_idx;
+ return kGeneralError;
+ }
+ RTPVideoTypeHeader* hdr = const_cast<RTPVideoTypeHeader*>(
+ &packet.codecSpecificHeader.codecHeader);
+ hdr->VP9.temporal_idx = last_gof_.temporal_idx[gof_idx];
+ hdr->VP9.temporal_up_switch = last_gof_.temporal_up_switch[gof_idx];
+ }
+ }
+ }
+
num_consecutive_old_packets_ = 0;
VCMFrameBuffer* frame;
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.h b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.h
index 455ac2643cf..8a05f1ff178 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer.h
@@ -307,6 +307,10 @@ class VCMJitterBuffer {
FrameList incomplete_frames_ GUARDED_BY(crit_sect_);
VCMDecodingState last_decoded_state_ GUARDED_BY(crit_sect_);
bool first_packet_since_reset_;
+ // Contains last received frame's temporal information for non-flexible mode.
+ GofInfoVP9 last_gof_;
+ uint32_t last_gof_timestamp_;
+ bool last_gof_valid_;
// Statistics.
VCMReceiveStatisticsCallback* stats_callback_ GUARDED_BY(crit_sect_);
@@ -356,7 +360,7 @@ class VCMJitterBuffer {
// average_packets_per_frame converges fast if we have fewer than this many
// frames.
int frame_counter_;
- DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
+ RTC_DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer_common.h b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
index 49a5cb97d2c..97af78087af 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/jitter_buffer_common.h
@@ -26,11 +26,13 @@ enum { kPacketsPerFrameMultiplier = 5 };
enum { kFastConvergeThreshold = 5};
enum VCMJitterBufferEnum {
- kMaxConsecutiveOldFrames = 60,
- kMaxConsecutiveOldPackets = 300,
- kMaxPacketsInSession = 800,
- kBufferIncStepSizeBytes = 30000, // >20 packets.
- kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte.
+ kMaxConsecutiveOldFrames = 60,
+ kMaxConsecutiveOldPackets = 300,
+ // TODO(sprang): Reduce this limit once codecs don't sometimes wildly
+ // overshoot bitrate target.
+ kMaxPacketsInSession = 1400, // Allows ~2MB frames.
+ kBufferIncStepSizeBytes = 30000, // >20 packets.
+ kMaxJBFrameSizeBytes = 4000000 // sanity don't go above 4Mbyte.
};
enum VCMFrameBufferEnum {
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.cc
index d929cbc35a3..51decbed97d 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.cc
@@ -519,7 +519,6 @@ VCMFecMethod::UpdateParameters(const VCMProtectionParameters* parameters)
return true;
}
VCMLossProtectionLogic::VCMLossProtectionLogic(int64_t nowMs):
-_selectedMethod(NULL),
_currentParameters(),
_rtt(0),
_lossPr(0.0f),
@@ -548,25 +547,21 @@ VCMLossProtectionLogic::~VCMLossProtectionLogic()
void VCMLossProtectionLogic::SetMethod(
enum VCMProtectionMethodEnum newMethodType) {
- if (_selectedMethod != nullptr) {
- if (_selectedMethod->Type() == newMethodType)
- return;
- // Remove old method.
- delete _selectedMethod;
- }
+ if (_selectedMethod && _selectedMethod->Type() == newMethodType)
+ return;
switch(newMethodType) {
case kNack:
- _selectedMethod = new VCMNackMethod();
+ _selectedMethod.reset(new VCMNackMethod());
break;
case kFec:
- _selectedMethod = new VCMFecMethod();
+ _selectedMethod.reset(new VCMFecMethod());
break;
case kNackFec:
- _selectedMethod = new VCMNackFecMethod(kLowRttNackMs, -1);
+ _selectedMethod.reset(new VCMNackFecMethod(kLowRttNackMs, -1));
break;
case kNone:
- _selectedMethod = nullptr;
+ _selectedMethod.reset();
break;
}
UpdateMethod();
@@ -726,10 +721,8 @@ void VCMLossProtectionLogic::UpdateNumLayers(int numLayers) {
bool
VCMLossProtectionLogic::UpdateMethod()
{
- if (_selectedMethod == NULL)
- {
- return false;
- }
+ if (!_selectedMethod)
+ return false;
_currentParameters.rtt = _rtt;
_currentParameters.lossPr = _lossPr;
_currentParameters.bitRate = _bitRate;
@@ -748,11 +741,11 @@ VCMLossProtectionLogic::UpdateMethod()
VCMProtectionMethod*
VCMLossProtectionLogic::SelectedMethod() const
{
- return _selectedMethod;
+ return _selectedMethod.get();
}
VCMProtectionMethodEnum VCMLossProtectionLogic::SelectedType() const {
- return _selectedMethod == nullptr ? kNone : _selectedMethod->Type();
+ return _selectedMethod ? _selectedMethod->Type() : kNone;
}
void
@@ -773,11 +766,8 @@ VCMLossProtectionLogic::Reset(int64_t nowMs)
Release();
}
-void
-VCMLossProtectionLogic::Release()
-{
- delete _selectedMethod;
- _selectedMethod = NULL;
+void VCMLossProtectionLogic::Release() {
+ _selectedMethod.reset();
}
} // namespace media_optimization
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.h b/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.h
index 498238768f6..62d067ab3ad 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/media_opt_util.h
@@ -15,6 +15,7 @@
#include <stdlib.h>
#include "webrtc/base/exp_filter.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/modules/video_coding/main/source/qm_select.h"
#include "webrtc/system_wrappers/interface/trace.h"
@@ -335,7 +336,7 @@ private:
// Sets the available loss protection methods.
void UpdateMaxLossHistory(uint8_t lossPr255, int64_t now);
uint8_t MaxFilteredLossPr(int64_t nowMs) const;
- VCMProtectionMethod* _selectedMethod;
+ rtc::scoped_ptr<VCMProtectionMethod> _selectedMethod;
VCMProtectionParameters _currentParameters;
int64_t _rtt;
float _lossPr;
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.cc
index 4dbdf44d588..7f60c6c999d 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.cc
@@ -317,13 +317,8 @@ uint32_t MediaOptimization::SetTargetRates(
return target_bit_rate_;
}
-void MediaOptimization::EnableProtectionMethod(bool enable,
- VCMProtectionMethodEnum method) {
+void MediaOptimization::SetProtectionMethod(VCMProtectionMethodEnum method) {
CriticalSectionScoped lock(crit_sect_.get());
- if (!enable && loss_prot_logic_->SelectedType() != method)
- return;
- if (!enable)
- method = kNone;
loss_prot_logic_->SetMethod(method);
}
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.h b/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.h
index e0010db4e2b..c3bb3a85994 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/media_optimization.h
@@ -62,7 +62,7 @@ class MediaOptimization {
VCMProtectionCallback* protection_callback,
VCMQMSettingsCallback* qmsettings_callback);
- void EnableProtectionMethod(bool enable, VCMProtectionMethodEnum method);
+ void SetProtectionMethod(VCMProtectionMethodEnum method);
void EnableQM(bool enable);
void EnableFrameDropper(bool enable);
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/packet.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/packet.cc
index c9eb482ed0e..88838f35f8a 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/packet.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/packet.cc
@@ -118,6 +118,18 @@ void VCMPacket::CopyCodecSpecifics(const RTPVideoHeader& videoHeader) {
codec = kVideoCodecVP8;
return;
+ case kRtpVideoVp9:
+ if (isFirstPacket && markerBit)
+ completeNALU = kNaluComplete;
+ else if (isFirstPacket)
+ completeNALU = kNaluStart;
+ else if (markerBit)
+ completeNALU = kNaluEnd;
+ else
+ completeNALU = kNaluIncomplete;
+
+ codec = kVideoCodecVP9;
+ return;
case kRtpVideoH264:
isFirstPacket = videoHeader.isFirstPacket;
if (isFirstPacket)
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/receiver_unittest.cc
index dc63e810029..eb5e4718ce7 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/receiver_unittest.cc
@@ -348,7 +348,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
bool frame_injected = false;
while (!timestamps_.empty() &&
timestamps_.front().arrive_time <= end_time) {
- DCHECK(timestamps_.front().arrive_time >= start_time);
+ RTC_DCHECK(timestamps_.front().arrive_time >= start_time);
SimulatedClock::AdvanceTimeMicroseconds(timestamps_.front().arrive_time -
TimeInMicroseconds());
@@ -376,7 +376,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
size_t size) {
int64_t previous_arrive_timestamp = 0;
for (size_t i = 0; i < size; i++) {
- CHECK(arrive_timestamps[i] >= previous_arrive_timestamp);
+ RTC_CHECK(arrive_timestamps[i] >= previous_arrive_timestamp);
timestamps_.push(TimestampPair(arrive_timestamps[i] * 1000,
render_timestamps[i] * 1000));
previous_arrive_timestamp = arrive_timestamps[i];
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/session_info.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/session_info.cc
index 8eba432643c..bf6bcb3c00e 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/session_info.cc
@@ -59,31 +59,52 @@ int VCMSessionInfo::HighSequenceNumber() const {
}
int VCMSessionInfo::PictureId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoPictureId;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.pictureId;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.picture_id;
+ } else {
+ return kNoPictureId;
+ }
}
int VCMSessionInfo::TemporalId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoTemporalIdx;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.temporalIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_idx;
+ } else {
+ return kNoTemporalIdx;
+ }
}
bool VCMSessionInfo::LayerSync() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
+ return false;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return
+ packets_.front().codecSpecificHeader.codecHeader.VP9.temporal_up_switch;
+ } else {
return false;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.layerSync;
+ }
}
int VCMSessionInfo::Tl0PicId() const {
- if (packets_.empty() ||
- packets_.front().codecSpecificHeader.codec != kRtpVideoVp8)
+ if (packets_.empty())
return kNoTl0PicIdx;
- return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
+ if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp8) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP8.tl0PicIdx;
+ } else if (packets_.front().codecSpecificHeader.codec == kRtpVideoVp9) {
+ return packets_.front().codecSpecificHeader.codecHeader.VP9.tl0_pic_idx;
+ } else {
+ return kNoTl0PicIdx;
+ }
}
bool VCMSessionInfo::NonReference() const {
@@ -133,6 +154,8 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
// We handle H.264 STAP-A packets in a special way as we need to remove the
// two length bytes between each NAL unit, and potentially add start codes.
+ // TODO(pbos): Remove H264 parsing from this step and use a fragmentation
+ // header supplied by the H264 depacketizer.
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
if (packet.codecSpecificHeader.codec == kRtpVideoH264 &&
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/test/stream_generator.h b/chromium/third_party/webrtc/modules/video_coding/main/source/test/stream_generator.h
index e3a2e79cd37..7902d167061 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/test/stream_generator.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/test/stream_generator.h
@@ -64,7 +64,7 @@ class StreamGenerator {
int64_t start_time_;
uint8_t packet_buffer_[kMaxPacketSize];
- DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
+ RTC_DISALLOW_COPY_AND_ASSIGN(StreamGenerator);
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.cc
index f3806bb87fe..c68a5af7ba4 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.cc
@@ -10,90 +10,56 @@
#include <assert.h>
#include <stdlib.h>
+
+#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/video_coding/main/source/timestamp_map.h"
namespace webrtc {
-// Constructor. Optional parameter specifies maximum number of
-// coexisting timers.
-VCMTimestampMap::VCMTimestampMap(int32_t length):
- _nextAddIx(0),
- _nextPopIx(0)
-{
- if (length <= 0)
- {
- // default
- length = 10;
- }
-
- _map = new VCMTimestampDataTuple[length];
- _length = length;
-}
-
-// Destructor.
-VCMTimestampMap::~VCMTimestampMap()
-{
- delete [] _map;
+VCMTimestampMap::VCMTimestampMap(size_t capacity)
+ : ring_buffer_(new TimestampDataTuple[capacity]),
+ capacity_(capacity),
+ next_add_idx_(0),
+ next_pop_idx_(0) {
}
-// Empty the list of timers.
-void
-VCMTimestampMap::Reset()
-{
- _nextAddIx = 0;
- _nextPopIx = 0;
+VCMTimestampMap::~VCMTimestampMap() {
}
-int32_t
-VCMTimestampMap::Add(uint32_t timestamp, void* data)
-{
- _map[_nextAddIx].timestamp = timestamp;
- _map[_nextAddIx].data = data;
- _nextAddIx = (_nextAddIx + 1) % _length;
+void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) {
+ ring_buffer_[next_add_idx_].timestamp = timestamp;
+ ring_buffer_[next_add_idx_].data = data;
+ next_add_idx_ = (next_add_idx_ + 1) % capacity_;
- if (_nextAddIx == _nextPopIx)
- {
- // Circular list full; forget oldest entry
- _nextPopIx = (_nextPopIx + 1) % _length;
- return -1;
- }
- return 0;
+ if (next_add_idx_ == next_pop_idx_) {
+ // Circular list full; forget oldest entry.
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ }
}
-void*
-VCMTimestampMap::Pop(uint32_t timestamp)
-{
- while (!IsEmpty())
- {
- if (_map[_nextPopIx].timestamp == timestamp)
- {
- // found start time for this timestamp
- void* data = _map[_nextPopIx].data;
- _map[_nextPopIx].data = NULL;
- _nextPopIx = (_nextPopIx + 1) % _length;
- return data;
- }
- else if (_map[_nextPopIx].timestamp > timestamp)
- {
- // the timestamp we are looking for is not in the list
- assert(_nextPopIx < _length && _nextPopIx >= 0);
- return NULL;
- }
-
- // not in this position, check next (and forget this position)
- _nextPopIx = (_nextPopIx + 1) % _length;
+VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) {
+ while (!IsEmpty()) {
+ if (ring_buffer_[next_pop_idx_].timestamp == timestamp) {
+ // Found start time for this timestamp.
+ VCMFrameInformation* data = ring_buffer_[next_pop_idx_].data;
+ ring_buffer_[next_pop_idx_].data = nullptr;
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ return data;
+ } else if (IsNewerTimestamp(ring_buffer_[next_pop_idx_].timestamp,
+ timestamp)) {
+ // The timestamp we are looking for is not in the list.
+ return nullptr;
}
- // could not find matching timestamp in list
- assert(_nextPopIx < _length && _nextPopIx >= 0);
- return NULL;
-}
+ // Not in this position, check next (and forget this position).
+ next_pop_idx_ = (next_pop_idx_ + 1) % capacity_;
+ }
-// Check if no timers are currently running
-bool
-VCMTimestampMap::IsEmpty() const
-{
- return (_nextAddIx == _nextPopIx);
+ // Could not find matching timestamp in list.
+ return nullptr;
}
+bool VCMTimestampMap::IsEmpty() const {
+ return (next_add_idx_ == next_pop_idx_);
+}
}
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.h b/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.h
index 14e06290ff9..3d6f1bca0fb 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/timestamp_map.h
@@ -11,40 +11,35 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
#define WEBRTC_MODULES_VIDEO_CODING_TIMESTAMP_MAP_H_
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/typedefs.h"
-namespace webrtc
-{
+namespace webrtc {
-struct VCMTimestampDataTuple
-{
- uint32_t timestamp;
- void* data;
-};
-
-class VCMTimestampMap
-{
-public:
- // Constructor. Optional parameter specifies maximum number of
- // timestamps in map.
- VCMTimestampMap(const int32_t length = 10);
+struct VCMFrameInformation;
- // Destructor.
- ~VCMTimestampMap();
+class VCMTimestampMap {
+ public:
+ explicit VCMTimestampMap(size_t capacity);
+ ~VCMTimestampMap();
- // Empty the map
- void Reset();
+ // Empty the map.
+ void Reset();
- int32_t Add(uint32_t timestamp, void* data);
- void* Pop(uint32_t timestamp);
+ void Add(uint32_t timestamp, VCMFrameInformation* data);
+ VCMFrameInformation* Pop(uint32_t timestamp);
-private:
- bool IsEmpty() const;
+ private:
+ struct TimestampDataTuple {
+ uint32_t timestamp;
+ VCMFrameInformation* data;
+ };
+ bool IsEmpty() const;
- VCMTimestampDataTuple* _map;
- int32_t _nextAddIx;
- int32_t _nextPopIx;
- int32_t _length;
+ rtc::scoped_ptr<TimestampDataTuple[]> ring_buffer_;
+ const size_t capacity_;
+ size_t next_add_idx_;
+ size_t next_pop_idx_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.cc
index c207f00f0e6..e0cf4796231 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.cc
@@ -168,7 +168,9 @@ class VideoCodingModuleImpl : public VideoCodingModule {
int32_t SetVideoProtection(VCMVideoProtection videoProtection,
bool enable) override {
- sender_->SetVideoProtection(enable, videoProtection);
+ // TODO(pbos): Remove enable from receive-side protection modes as well.
+ if (enable)
+ sender_->SetVideoProtection(videoProtection);
return receiver_->SetVideoProtection(videoProtection, enable);
}
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.h b/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.h
index d738a7cef27..86a8ca01cb8 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/video_coding_impl.h
@@ -93,12 +93,11 @@ class VideoSender {
int32_t SetChannelParameters(uint32_t target_bitrate, // bits/s.
uint8_t lossRate,
int64_t rtt);
- int32_t UpdateEncoderParameters();
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
- void SetVideoProtection(bool enable, VCMVideoProtection videoProtection);
+ void SetVideoProtection(VCMVideoProtection videoProtection);
int32_t AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* _contentMetrics,
@@ -114,17 +113,28 @@ class VideoSender {
int32_t Process();
private:
- Clock* clock_;
+ struct EncoderParameters {
+ uint32_t target_bitrate;
+ uint8_t loss_rate;
+ int64_t rtt;
+ uint32_t input_frame_rate;
+ bool updated;
+ };
+
+ void SetEncoderParameters(EncoderParameters params)
+ EXCLUSIVE_LOCKS_REQUIRED(send_crit_);
+
+ Clock* const clock_;
rtc::scoped_ptr<CriticalSectionWrapper> process_crit_sect_;
- CriticalSectionWrapper* _sendCritSect;
+ mutable rtc::CriticalSection send_crit_;
VCMGenericEncoder* _encoder;
VCMEncodedFrameCallback _encodedFrameCallback;
std::vector<FrameType> _nextFrameTypes;
media_optimization::MediaOptimization _mediaOpt;
- VCMSendStatisticsCallback* _sendStatsCallback;
- VCMCodecDataBase _codecDataBase;
- bool frame_dropper_enabled_;
+ VCMSendStatisticsCallback* _sendStatsCallback GUARDED_BY(process_crit_sect_);
+ VCMCodecDataBase _codecDataBase GUARDED_BY(send_crit_);
+ bool frame_dropper_enabled_ GUARDED_BY(send_crit_);
VCMProcessTimer _sendStatsTimer;
// Must be accessed on the construction thread of VideoSender.
@@ -135,13 +145,7 @@ class VideoSender {
VCMProtectionCallback* protection_callback_;
rtc::CriticalSection params_lock_;
- struct EncoderParameters {
- uint32_t target_bitrate;
- uint8_t loss_rate;
- int64_t rtt;
- uint32_t input_frame_rate;
- bool updated;
- } encoder_params_ GUARDED_BY(params_lock_);
+ EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
};
class VideoReceiver {
@@ -206,14 +210,6 @@ class VideoReceiver {
int32_t RequestSliceLossIndication(const uint64_t pictureID) const;
private:
- enum VCMKeyRequestMode {
- kKeyOnError, // Normal mode, request key frames on decoder error
- kKeyOnKeyLoss, // Request key frames on decoder error and on packet loss
- // in key frames.
- kKeyOnLoss, // Request key frames on decoder error and on packet loss
- // in any frame
- };
-
Clock* const clock_;
rtc::scoped_ptr<CriticalSectionWrapper> process_crit_sect_;
CriticalSectionWrapper* _receiveCritSect;
@@ -234,7 +230,6 @@ class VideoReceiver {
FILE* _bitStreamBeforeDecoder;
#endif
VCMFrameBuffer _frameFromFile;
- VCMKeyRequestMode _keyRequestMode;
bool _scheduleKeyRequest GUARDED_BY(process_crit_sect_);
size_t max_nack_list_size_ GUARDED_BY(process_crit_sect_);
EncodedImageCallback* pre_decode_image_callback_ GUARDED_BY(_receiveCritSect);
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/video_receiver.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/video_receiver.cc
index 08e6208c367..7371f9d3372 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/video_receiver.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/video_receiver.cc
@@ -8,6 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/base/checks.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
@@ -41,7 +42,6 @@ VideoReceiver::VideoReceiver(Clock* clock, EventFactory* event_factory)
_bitStreamBeforeDecoder(NULL),
#endif
_frameFromFile(),
- _keyRequestMode(kKeyOnError),
_scheduleKeyRequest(false),
max_nack_list_size_(0),
pre_decode_image_callback_(NULL),
@@ -187,64 +187,25 @@ int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
// By default, do not decode with errors.
_receiver.SetDecodeErrorMode(kNoErrors);
switch (videoProtection) {
- case kProtectionNack:
- case kProtectionNackReceiver: {
- CriticalSectionScoped cs(_receiveCritSect);
- if (enable) {
- // Enable NACK and always wait for retransmits.
- _receiver.SetNackMode(kNack, -1, -1);
- } else {
- _receiver.SetNackMode(kNoNack, -1, -1);
- }
- break;
- }
-
- case kProtectionKeyOnLoss: {
- CriticalSectionScoped cs(_receiveCritSect);
- if (enable) {
- _keyRequestMode = kKeyOnLoss;
- _receiver.SetDecodeErrorMode(kWithErrors);
- } else if (_keyRequestMode == kKeyOnLoss) {
- _keyRequestMode = kKeyOnError; // default mode
- } else {
- return VCM_PARAMETER_ERROR;
- }
- break;
- }
-
- case kProtectionKeyOnKeyLoss: {
- CriticalSectionScoped cs(_receiveCritSect);
- if (enable) {
- _keyRequestMode = kKeyOnKeyLoss;
- } else if (_keyRequestMode == kKeyOnKeyLoss) {
- _keyRequestMode = kKeyOnError; // default mode
- } else {
- return VCM_PARAMETER_ERROR;
- }
+ case kProtectionNack: {
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNack, -1, -1);
break;
}
case kProtectionNackFEC: {
CriticalSectionScoped cs(_receiveCritSect);
- if (enable) {
- // Enable hybrid NACK/FEC. Always wait for retransmissions
- // and don't add extra delay when RTT is above
- // kLowRttNackMs.
- _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
- _receiver.SetDecodeErrorMode(kNoErrors);
- _receiver.SetDecodeErrorMode(kNoErrors);
- } else {
- _receiver.SetNackMode(kNoNack, -1, -1);
- }
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
+ _receiver.SetDecodeErrorMode(kNoErrors);
break;
}
- case kProtectionNackSender:
case kProtectionFEC:
- // Ignore encoder modes.
- return VCM_OK;
case kProtectionNone:
- // TODO(pbos): Implement like sender and remove enable parameter. Ignored
- // for now.
+ // No receiver-side protection.
+ RTC_DCHECK(enable);
+ _receiver.SetNackMode(kNoNack, -1, -1);
+ _receiver.SetDecodeErrorMode(kWithErrors);
break;
}
return VCM_OK;
@@ -432,22 +393,8 @@ int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
_decodedFrameCallback.LastReceivedPictureID() + 1);
}
if (!frame.Complete() || frame.MissingFrame()) {
- switch (_keyRequestMode) {
- case kKeyOnKeyLoss: {
- if (frame.FrameType() == kVideoFrameKey) {
- request_key_frame = true;
- ret = VCM_OK;
- }
- break;
- }
- case kKeyOnLoss: {
- request_key_frame = true;
- ret = VCM_OK;
- break;
- }
- default:
- break;
- }
+ request_key_frame = true;
+ ret = VCM_OK;
}
if (request_key_frame) {
CriticalSectionScoped cs(process_crit_sect_.get());
@@ -565,16 +512,10 @@ int VideoReceiver::SetReceiverRobustnessMode(
switch (robustnessMode) {
case VideoCodingModule::kNone:
_receiver.SetNackMode(kNoNack, -1, -1);
- if (decode_error_mode == kNoErrors) {
- _keyRequestMode = kKeyOnLoss;
- } else {
- _keyRequestMode = kKeyOnError;
- }
break;
case VideoCodingModule::kHardNack:
// Always wait for retransmissions (except when decoding with errors).
_receiver.SetNackMode(kNack, -1, -1);
- _keyRequestMode = kKeyOnError; // TODO(hlundin): On long NACK list?
break;
case VideoCodingModule::kSoftNack:
#if 1
@@ -584,7 +525,6 @@ int VideoReceiver::SetReceiverRobustnessMode(
// Enable hybrid NACK/FEC. Always wait for retransmissions and don't add
// extra delay when RTT is above kLowRttNackMs.
_receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
- _keyRequestMode = kKeyOnError;
break;
#endif
case VideoCodingModule::kReferenceSelection:
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/source/video_sender.cc b/chromium/third_party/webrtc/modules/video_coding/main/source/video_sender.cc
index fb04b0e871e..c59d05afcd8 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/source/video_sender.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/source/video_sender.cc
@@ -30,7 +30,6 @@ VideoSender::VideoSender(Clock* clock,
VCMQMSettingsCallback* qm_settings_callback)
: clock_(clock),
process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- _sendCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_encoder(nullptr),
_encodedFrameCallback(post_encode_callback),
_nextFrameTypes(1, kVideoFrameDelta),
@@ -51,9 +50,7 @@ VideoSender::VideoSender(Clock* clock,
main_thread_.DetachFromThread();
}
-VideoSender::~VideoSender() {
- delete _sendCritSect;
-}
+VideoSender::~VideoSender() {}
int32_t VideoSender::Process() {
int32_t returnValue = VCM_OK;
@@ -87,8 +84,8 @@ int64_t VideoSender::TimeUntilNextProcess() {
int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
uint32_t maxPayloadSize) {
- DCHECK(main_thread_.CalledOnValidThread());
- CriticalSectionScoped cs(_sendCritSect);
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
+ rtc::CritScope lock(&send_crit_);
if (sendCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
@@ -136,12 +133,12 @@ int32_t VideoSender::RegisterSendCodec(const VideoCodec* sendCodec,
}
const VideoCodec& VideoSender::GetSendCodec() const {
- DCHECK(main_thread_.CalledOnValidThread());
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
return current_codec_;
}
int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
if (currentSendCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
@@ -149,7 +146,7 @@ int32_t VideoSender::SendCodecBlocking(VideoCodec* currentSendCodec) const {
}
VideoCodecType VideoSender::SendCodecBlocking() const {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
return _codecDataBase.SendCodec();
}
@@ -158,9 +155,9 @@ VideoCodecType VideoSender::SendCodecBlocking() const {
int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
uint8_t payloadType,
bool internalSource /*= false*/) {
- DCHECK(main_thread_.CalledOnValidThread());
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
if (externalEncoder == nullptr) {
bool wasSendCodec = false;
@@ -180,7 +177,7 @@ int32_t VideoSender::RegisterExternalEncoder(VideoEncoder* externalEncoder,
// Get codec config parameters
int32_t VideoSender::CodecConfigParameters(uint8_t* buffer,
int32_t size) const {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
if (_encoder != nullptr) {
return _encoder->CodecConfigParameters(buffer, size);
}
@@ -196,7 +193,7 @@ int32_t VideoSender::SentFrameCount(VCMFrameCount* frameCount) {
// Get encode bitrate
int VideoSender::Bitrate(unsigned int* bitrate) const {
- DCHECK(main_thread_.CalledOnValidThread());
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
// Since we're running on the thread that's the only thread known to modify
// the value of _encoder, we don't need to grab the lock here.
@@ -210,7 +207,7 @@ int VideoSender::Bitrate(unsigned int* bitrate) const {
// Get encode frame rate
int VideoSender::FrameRate(unsigned int* framerate) const {
- DCHECK(main_thread_.CalledOnValidThread());
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
// Since we're running on the thread that's the only thread known to modify
// the value of _encoder, we don't need to grab the lock here.
@@ -238,37 +235,24 @@ int32_t VideoSender::SetChannelParameters(uint32_t target_bitrate,
return VCM_OK;
}
-int32_t VideoSender::UpdateEncoderParameters() {
- EncoderParameters params;
- {
- rtc::CritScope cs(&params_lock_);
- params = encoder_params_;
- encoder_params_.updated = false;
- }
-
+void VideoSender::SetEncoderParameters(EncoderParameters params) {
if (!params.updated || params.target_bitrate == 0)
- return VCM_OK;
-
- CriticalSectionScoped sendCs(_sendCritSect);
- int32_t ret = VCM_UNINITIALIZED;
- static_assert(VCM_UNINITIALIZED < 0, "VCM_UNINITIALIZED must be negative.");
+ return;
if (params.input_frame_rate == 0) {
// No frame rate estimate available, use default.
params.input_frame_rate = current_codec_.maxFramerate;
}
if (_encoder != nullptr) {
- ret = _encoder->SetChannelParameters(params.loss_rate, params.rtt);
- if (ret >= 0) {
- ret = _encoder->SetRates(params.target_bitrate, params.input_frame_rate);
- }
+ _encoder->SetChannelParameters(params.loss_rate, params.rtt);
+ _encoder->SetRates(params.target_bitrate, params.input_frame_rate);
}
- return ret;
+ return;
}
int32_t VideoSender::RegisterTransportCallback(
VCMPacketizationCallback* transport) {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
_encodedFrameCallback.SetMediaOpt(&_mediaOpt);
_encodedFrameCallback.SetTransportCallback(transport);
return VCM_OK;
@@ -290,42 +274,41 @@ int32_t VideoSender::RegisterSendStatisticsCallback(
// used in this class.
int32_t VideoSender::RegisterProtectionCallback(
VCMProtectionCallback* protection_callback) {
- DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
+ RTC_DCHECK(protection_callback == nullptr || protection_callback_ == nullptr);
protection_callback_ = protection_callback;
return VCM_OK;
}
// Enable or disable a video protection method.
-void VideoSender::SetVideoProtection(bool enable,
- VCMVideoProtection videoProtection) {
- CriticalSectionScoped cs(_sendCritSect);
+void VideoSender::SetVideoProtection(VCMVideoProtection videoProtection) {
+ rtc::CritScope lock(&send_crit_);
switch (videoProtection) {
case kProtectionNone:
- _mediaOpt.EnableProtectionMethod(enable, media_optimization::kNone);
+ _mediaOpt.SetProtectionMethod(media_optimization::kNone);
break;
case kProtectionNack:
- case kProtectionNackSender:
- _mediaOpt.EnableProtectionMethod(enable, media_optimization::kNack);
+ _mediaOpt.SetProtectionMethod(media_optimization::kNack);
break;
case kProtectionNackFEC:
- _mediaOpt.EnableProtectionMethod(enable, media_optimization::kNackFec);
+ _mediaOpt.SetProtectionMethod(media_optimization::kNackFec);
break;
case kProtectionFEC:
- _mediaOpt.EnableProtectionMethod(enable, media_optimization::kFec);
+ _mediaOpt.SetProtectionMethod(media_optimization::kFec);
break;
- case kProtectionNackReceiver:
- case kProtectionKeyOnLoss:
- case kProtectionKeyOnKeyLoss:
- // Ignore receiver modes.
- return;
}
}
// Add one raw video frame to the encoder, blocking.
int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
const VideoContentMetrics* contentMetrics,
const CodecSpecificInfo* codecSpecificInfo) {
- UpdateEncoderParameters();
- CriticalSectionScoped cs(_sendCritSect);
+ EncoderParameters encoder_params;
+ {
+ rtc::CritScope lock(&params_lock_);
+ encoder_params = encoder_params_;
+ encoder_params_.updated = false;
+ }
+ rtc::CritScope lock(&send_crit_);
+ SetEncoderParameters(encoder_params);
if (_encoder == nullptr) {
return VCM_UNINITIALIZED;
}
@@ -351,7 +334,7 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
// This module only supports software encoding.
// TODO(pbos): Offload conversion from the encoder thread.
converted_frame = converted_frame.ConvertNativeToI420Frame();
- CHECK(!converted_frame.IsZeroSize())
+ RTC_CHECK(!converted_frame.IsZeroSize())
<< "Frame conversion failed, won't be able to encode frame.";
}
int32_t ret =
@@ -363,11 +346,13 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
for (size_t i = 0; i < _nextFrameTypes.size(); ++i) {
_nextFrameTypes[i] = kVideoFrameDelta; // Default frame type.
}
+ if (qm_settings_callback_)
+ qm_settings_callback_->SetTargetFramerate(_encoder->GetTargetFramerate());
return VCM_OK;
}
int32_t VideoSender::IntraFrameRequest(int stream_index) {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
if (stream_index < 0 ||
static_cast<unsigned int>(stream_index) >= _nextFrameTypes.size()) {
return -1;
@@ -384,14 +369,14 @@ int32_t VideoSender::IntraFrameRequest(int stream_index) {
}
int32_t VideoSender::EnableFrameDropper(bool enable) {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
frame_dropper_enabled_ = enable;
_mediaOpt.EnableFrameDropper(enable);
return VCM_OK;
}
void VideoSender::SuspendBelowMinBitrate() {
- DCHECK(main_thread_.CalledOnValidThread());
+ RTC_DCHECK(main_thread_.CalledOnValidThread());
int threshold_bps;
if (current_codec_.numberOfSimulcastStreams == 0) {
threshold_bps = current_codec_.minBitrate * 1000;
@@ -405,7 +390,7 @@ void VideoSender::SuspendBelowMinBitrate() {
}
bool VideoSender::VideoSuspended() const {
- CriticalSectionScoped cs(_sendCritSect);
+ rtc::CritScope lock(&send_crit_);
return _mediaOpt.IsVideoSuspended();
}
} // namespace vcm
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/test/rtp_player.cc b/chromium/third_party/webrtc/modules/video_coding/main/test/rtp_player.cc
index c7a2f660d71..74a5b95877c 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/test/rtp_player.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/test/rtp_player.cc
@@ -66,7 +66,7 @@ class RawRtpPacket {
uint32_t ssrc_;
uint16_t seq_num_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RawRtpPacket);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RawRtpPacket);
};
class LostPackets {
@@ -182,7 +182,7 @@ class LostPackets {
Clock* clock_;
int64_t rtt_ms_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(LostPackets);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(LostPackets);
};
class SsrcHandlers {
@@ -217,11 +217,10 @@ class SsrcHandlers {
RtpRtcp::Configuration configuration;
configuration.clock = clock;
- configuration.id = 1;
configuration.audio = false;
handler->rtp_module_.reset(RtpReceiver::CreateVideoReceiver(
- configuration.id, configuration.clock, handler->payload_sink_.get(),
- NULL, handler->rtp_payload_registry_.get()));
+ configuration.clock, handler->payload_sink_.get(), NULL,
+ handler->rtp_payload_registry_.get()));
if (handler->rtp_module_.get() == NULL) {
return -1;
}
@@ -305,7 +304,7 @@ class SsrcHandlers {
const PayloadTypes& payload_types_;
LostPackets* lost_packets_;
- DISALLOW_COPY_AND_ASSIGN(Handler);
+ RTC_DISALLOW_COPY_AND_ASSIGN(Handler);
};
typedef std::map<uint32_t, Handler*> HandlerMap;
@@ -315,7 +314,7 @@ class SsrcHandlers {
PayloadTypes payload_types_;
HandlerMap handlers_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(SsrcHandlers);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(SsrcHandlers);
};
class RtpPlayerImpl : public RtpPlayerInterface {
@@ -467,7 +466,7 @@ class RtpPlayerImpl : public RtpPlayerInterface {
bool reordering_;
rtc::scoped_ptr<RawRtpPacket> reorder_buffer_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(RtpPlayerImpl);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RtpPlayerImpl);
};
RtpPlayerInterface* Create(const std::string& input_filename,
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/test/test_util.h b/chromium/third_party/webrtc/modules/video_coding/main/test/test_util.h
index 11fb6d4ddac..55cf4b91af7 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/test/test_util.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/test/test_util.h
@@ -61,14 +61,13 @@ class FileOutputFrameReceiver : public webrtc::VCMReceiveCallback {
private:
std::string out_filename_;
- uint32_t ssrc_;
FILE* out_file_;
FILE* timing_file_;
int width_;
int height_;
int count_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(FileOutputFrameReceiver);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FileOutputFrameReceiver);
};
class CmdArgs {
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc b/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
index 2fe646ed740..6a6362b9d3b 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
@@ -95,7 +95,7 @@ class VcmPayloadSinkFactory::VcmPayloadSink
rtc::scoped_ptr<VideoCodingModule> vcm_;
rtc::scoped_ptr<FileOutputFrameReceiver> frame_receiver_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSink);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSink);
};
VcmPayloadSinkFactory::VcmPayloadSinkFactory(
diff --git a/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h b/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
index ca0ed560549..ec94bdc3822 100644
--- a/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
+++ b/chromium/third_party/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.h
@@ -57,7 +57,7 @@ class VcmPayloadSinkFactory : public PayloadSinkFactoryInterface {
rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
Sinks sinks_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSinkFactory);
+ RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(VcmPayloadSinkFactory);
};
} // namespace rtpplayer
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/video_coding/utility/include/quality_scaler.h b/chromium/third_party/webrtc/modules/video_coding/utility/include/quality_scaler.h
index 1d6c917ec4e..e92c55ddc06 100644
--- a/chromium/third_party/webrtc/modules/video_coding/utility/include/quality_scaler.h
+++ b/chromium/third_party/webrtc/modules/video_coding/utility/include/quality_scaler.h
@@ -25,14 +25,18 @@ class QualityScaler {
};
QualityScaler();
- void Init(int low_qp_threshold);
+ void Init(int low_qp_threshold,
+ int high_qp_threshold,
+ bool use_framerate_reduction);
void SetMinResolution(int min_width, int min_height);
void ReportFramerate(int framerate);
void ReportQP(int qp);
void ReportDroppedFrame();
void Reset(int framerate, int bitrate, int width, int height);
- Resolution GetScaledResolution(const VideoFrame& frame);
+ void OnEncodeFrame(const VideoFrame& frame);
+ Resolution GetScaledResolution() const;
const VideoFrame& GetScaledFrame(const VideoFrame& frame);
+ int GetTargetFramerate() const;
private:
void AdjustScale(bool up);
@@ -42,11 +46,17 @@ class QualityScaler {
VideoFrame scaled_frame_;
size_t num_samples_;
+ int framerate_;
+ int target_framerate_;
int low_qp_threshold_;
+ int high_qp_threshold_;
MovingAverage<int> framedrop_percent_;
MovingAverage<int> average_qp_;
+ Resolution res_;
int downscale_shift_;
+ int framerate_down_;
+ bool use_framerate_reduction_;
int min_width_;
int min_height_;
};
diff --git a/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler.cc b/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler.cc
index 7a2a9c02615..ec7715230ed 100644
--- a/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler.cc
@@ -24,13 +24,19 @@ QualityScaler::QualityScaler()
: num_samples_(0),
low_qp_threshold_(-1),
downscale_shift_(0),
+ framerate_down_(false),
min_width_(kDefaultMinDownscaleDimension),
min_height_(kDefaultMinDownscaleDimension) {
}
-void QualityScaler::Init(int low_qp_threshold) {
+void QualityScaler::Init(int low_qp_threshold,
+ int high_qp_threshold,
+ bool use_framerate_reduction) {
ClearSamples();
low_qp_threshold_ = low_qp_threshold;
+ high_qp_threshold_ = high_qp_threshold;
+ use_framerate_reduction_ = use_framerate_reduction;
+ target_framerate_ = -1;
}
void QualityScaler::SetMinResolution(int min_width, int min_height) {
@@ -42,6 +48,7 @@ void QualityScaler::SetMinResolution(int min_width, int min_height) {
void QualityScaler::ReportFramerate(int framerate) {
num_samples_ = static_cast<size_t>(
kMeasureSeconds * (framerate < kMinFps ? kMinFps : framerate));
+ framerate_ = framerate;
}
void QualityScaler::ReportQP(int qp) {
@@ -53,41 +60,67 @@ void QualityScaler::ReportDroppedFrame() {
framedrop_percent_.AddSample(100);
}
-QualityScaler::Resolution QualityScaler::GetScaledResolution(
- const VideoFrame& frame) {
+void QualityScaler::OnEncodeFrame(const VideoFrame& frame) {
// Should be set through InitEncode -> Should be set by now.
assert(low_qp_threshold_ >= 0);
assert(num_samples_ > 0);
-
- Resolution res;
- res.width = frame.width();
- res.height = frame.height();
+ res_.width = frame.width();
+ res_.height = frame.height();
// Update scale factor.
int avg_drop = 0;
int avg_qp = 0;
- if (framedrop_percent_.GetAverage(num_samples_, &avg_drop) &&
- avg_drop >= kFramedropPercentThreshold) {
- AdjustScale(false);
+
+ // When encoder consistently overshoots, framerate reduction and spatial
+ // resizing will be triggered to get a smoother video.
+ if ((framedrop_percent_.GetAverage(num_samples_, &avg_drop) &&
+ avg_drop >= kFramedropPercentThreshold) ||
+ (average_qp_.GetAverage(num_samples_, &avg_qp) &&
+ avg_qp > high_qp_threshold_)) {
+ // Reducing frame rate before spatial resolution change.
+ // Reduce frame rate only when it is above a certain number.
+ // Only one reduction is allowed for now.
+ // TODO(jackychen): Allow more than one framerate reduction.
+ if (use_framerate_reduction_ && !framerate_down_ && framerate_ >= 20) {
+ target_framerate_ = framerate_ / 2;
+ framerate_down_ = true;
+ // If frame rate has been updated, clear the buffer. We don't want
+ // spatial resolution to change right after frame rate change.
+ ClearSamples();
+ } else {
+ AdjustScale(false);
+ }
} else if (average_qp_.GetAverage(num_samples_, &avg_qp) &&
avg_qp <= low_qp_threshold_) {
- AdjustScale(true);
+ if (use_framerate_reduction_ && framerate_down_) {
+ target_framerate_ = -1;
+ framerate_down_ = false;
+ ClearSamples();
+ } else {
+ AdjustScale(true);
+ }
}
assert(downscale_shift_ >= 0);
for (int shift = downscale_shift_;
- shift > 0 && (res.width >> 1 >= min_width_) &&
- (res.height >> 1 >= min_height_);
+ shift > 0 && (res_.width / 2 >= min_width_) &&
+ (res_.height / 2 >= min_height_);
--shift) {
- res.width >>= 1;
- res.height >>= 1;
+ res_.width /= 2;
+ res_.height /= 2;
}
+}
+
+QualityScaler::Resolution QualityScaler::GetScaledResolution() const {
+ return res_;
+}
- return res;
+int QualityScaler::GetTargetFramerate() const {
+ return target_framerate_;
}
const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
- Resolution res = GetScaledResolution(frame);
+ Resolution res = GetScaledResolution();
if (res.width == frame.width())
return frame;
diff --git a/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc b/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
index c09dffb8288..2ce1107472e 100644
--- a/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/chromium/third_party/webrtc/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -21,22 +21,38 @@ static const int kHeight = 1080;
static const int kFramerate = 30;
static const int kLowQp = 15;
static const int kNormalQp = 30;
+static const int kHighQp = 40;
static const int kMaxQp = 56;
} // namespace
class QualityScalerTest : public ::testing::Test {
+ public:
+ // Temporal and spatial resolution.
+ struct Resolution {
+ int framerate;
+ int width;
+ int height;
+ };
protected:
- enum ScaleDirection { kScaleDown, kScaleUp };
+ enum ScaleDirection {
+ kKeepScaleAtHighQp,
+ kScaleDown,
+ kScaleDownAboveHighQp,
+ kScaleUp
+ };
+ enum BadQualityMetric { kDropFrame, kReportLowQP };
QualityScalerTest() {
input_frame_.CreateEmptyFrame(
kWidth, kHeight, kWidth, kHalfWidth, kHalfWidth);
- qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator);
+ qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator, kHighQp, false);
qs_.ReportFramerate(kFramerate);
+ qs_.OnEncodeFrame(input_frame_);
}
bool TriggerScale(ScaleDirection scale_direction) {
- int initial_width = qs_.GetScaledResolution(input_frame_).width;
+ qs_.OnEncodeFrame(input_frame_);
+ int initial_width = qs_.GetScaledResolution().width;
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
switch (scale_direction) {
case kScaleUp:
@@ -45,9 +61,15 @@ class QualityScalerTest : public ::testing::Test {
case kScaleDown:
qs_.ReportDroppedFrame();
break;
+ case kKeepScaleAtHighQp:
+ qs_.ReportQP(kHighQp);
+ break;
+ case kScaleDownAboveHighQp:
+ qs_.ReportQP(kHighQp + 1);
+ break;
}
-
- if (qs_.GetScaledResolution(input_frame_).width != initial_width)
+ qs_.OnEncodeFrame(input_frame_);
+ if (qs_.GetScaledResolution().width != initial_width)
return true;
}
@@ -60,7 +82,8 @@ class QualityScalerTest : public ::testing::Test {
}
void ExpectScaleUsingReportedResolution() {
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ qs_.OnEncodeFrame(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
const VideoFrame& scaled_frame = qs_.GetScaledFrame(input_frame_);
EXPECT_EQ(res.width, scaled_frame.width());
EXPECT_EQ(res.height, scaled_frame.height());
@@ -70,6 +93,14 @@ class QualityScalerTest : public ::testing::Test {
void DoesNotDownscaleFrameDimensions(int width, int height);
+ Resolution TriggerResolutionChange(BadQualityMetric dropframe_lowqp,
+ int num_second,
+ int initial_framerate);
+
+ void VerifyQualityAdaptation(int initial_framerate, int seconds,
+ bool expect_spatial_resize,
+ bool expect_framerate_reduction);
+
void DownscaleEndsAt(int input_width,
int input_height,
int end_width,
@@ -84,7 +115,8 @@ TEST_F(QualityScalerTest, UsesOriginalFrameInitially) {
}
TEST_F(QualityScalerTest, ReportsOriginalResolutionInitially) {
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ qs_.OnEncodeFrame(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_EQ(input_frame_.width(), res.width);
EXPECT_EQ(input_frame_.height(), res.height);
}
@@ -92,7 +124,23 @@ TEST_F(QualityScalerTest, ReportsOriginalResolutionInitially) {
TEST_F(QualityScalerTest, DownscalesAfterContinuousFramedrop) {
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within " << kNumSeconds
<< " seconds.";
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
+ EXPECT_LT(res.width, input_frame_.width());
+ EXPECT_LT(res.height, input_frame_.height());
+}
+
+TEST_F(QualityScalerTest, KeepsScaleAtHighQp) {
+ EXPECT_FALSE(TriggerScale(kKeepScaleAtHighQp))
+ << "Downscale at high threshold which should keep scale.";
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
+ EXPECT_EQ(res.width, input_frame_.width());
+ EXPECT_EQ(res.height, input_frame_.height());
+}
+
+TEST_F(QualityScalerTest, DownscalesAboveHighQp) {
+ EXPECT_TRUE(TriggerScale(kScaleDownAboveHighQp))
+ << "No downscale within " << kNumSeconds << " seconds.";
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
EXPECT_LT(res.width, input_frame_.width());
EXPECT_LT(res.height, input_frame_.height());
}
@@ -102,7 +150,8 @@ TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
qs_.ReportQP(kNormalQp);
qs_.ReportDroppedFrame();
qs_.ReportDroppedFrame();
- if (qs_.GetScaledResolution(input_frame_).width < input_frame_.width())
+ qs_.OnEncodeFrame(input_frame_);
+ if (qs_.GetScaledResolution().width < input_frame_.width())
return;
}
@@ -112,7 +161,8 @@ TEST_F(QualityScalerTest, DownscalesAfterTwoThirdsFramedrop) {
TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportQP(kNormalQp);
- ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution(input_frame_).width)
+ qs_.OnEncodeFrame(input_frame_);
+ ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
}
}
@@ -120,11 +170,13 @@ TEST_F(QualityScalerTest, DoesNotDownscaleOnNormalQp) {
TEST_F(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) {
for (int i = 0; i < kFramerate * kNumSeconds / 2; ++i) {
qs_.ReportQP(kNormalQp);
- ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution(input_frame_).width)
+ qs_.OnEncodeFrame(input_frame_);
+ ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
qs_.ReportDroppedFrame();
- ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution(input_frame_).width)
+ qs_.OnEncodeFrame(input_frame_);
+ ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
<< "Unexpected scale on half framedrop.";
}
}
@@ -139,7 +191,8 @@ void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
while (min_dimension >= 2 * QualityScaler::kDefaultMinDownscaleDimension) {
EXPECT_TRUE(TriggerScale(kScaleDown)) << "No downscale within "
<< kNumSeconds << " seconds.";
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ qs_.OnEncodeFrame(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
min_dimension = res.width < res.height ? res.width : res.height;
++current_shift;
ASSERT_EQ(input_frame_.width() >> current_shift, res.width);
@@ -151,7 +204,8 @@ void QualityScalerTest::ContinuouslyDownscalesByHalfDimensionsAndBackUp() {
while (min_dimension < initial_min_dimension) {
EXPECT_TRUE(TriggerScale(kScaleUp)) << "No upscale within " << kNumSeconds
<< " seconds.";
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ qs_.OnEncodeFrame(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
min_dimension = res.width < res.height ? res.width : res.height;
--current_shift;
ASSERT_EQ(input_frame_.width() >> current_shift, res.width);
@@ -186,7 +240,8 @@ void QualityScalerTest::DoesNotDownscaleFrameDimensions(int width, int height) {
for (int i = 0; i < kFramerate * kNumSeconds; ++i) {
qs_.ReportDroppedFrame();
- ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution(input_frame_).width)
+ qs_.OnEncodeFrame(input_frame_);
+ ASSERT_EQ(input_frame_.width(), qs_.GetScaledResolution().width)
<< "Unexpected scale of minimal-size frame.";
}
}
@@ -203,6 +258,96 @@ TEST_F(QualityScalerTest, DoesNotDownscaleFrom1Px) {
DoesNotDownscaleFrameDimensions(1, 1);
}
+QualityScalerTest::Resolution QualityScalerTest::TriggerResolutionChange(
+ BadQualityMetric dropframe_lowqp, int num_second, int initial_framerate) {
+ QualityScalerTest::Resolution res;
+ res.framerate = initial_framerate;
+ qs_.OnEncodeFrame(input_frame_);
+ res.width = qs_.GetScaledResolution().width;
+ res.height = qs_.GetScaledResolution().height;
+ for (int i = 0; i < kFramerate * num_second; ++i) {
+ switch (dropframe_lowqp) {
+ case kReportLowQP:
+ qs_.ReportQP(kLowQp);
+ break;
+ case kDropFrame:
+ qs_.ReportDroppedFrame();
+ break;
+ }
+ qs_.OnEncodeFrame(input_frame_);
+ // Simulate the case when SetRates is called right after reducing
+ // framerate.
+ qs_.ReportFramerate(initial_framerate);
+ res.framerate = qs_.GetTargetFramerate();
+ if (res.framerate != -1)
+ qs_.ReportFramerate(res.framerate);
+ res.width = qs_.GetScaledResolution().width;
+ res.height = qs_.GetScaledResolution().height;
+ }
+ return res;
+}
+
+void QualityScalerTest::VerifyQualityAdaptation(
+ int initial_framerate, int seconds, bool expect_spatial_resize,
+ bool expect_framerate_reduction) {
+ const int kDisabledBadQpThreshold = kMaxQp + 1;
+ qs_.Init(kMaxQp / QualityScaler::kDefaultLowQpDenominator,
+ kDisabledBadQpThreshold, true);
+ qs_.OnEncodeFrame(input_frame_);
+ int init_width = qs_.GetScaledResolution().width;
+ int init_height = qs_.GetScaledResolution().height;
+
+ // Test reducing framerate by dropping frame continuously.
+ QualityScalerTest::Resolution res = TriggerResolutionChange(
+ kDropFrame, seconds, initial_framerate);
+
+ if (expect_framerate_reduction) {
+ EXPECT_LT(res.framerate, initial_framerate);
+ } else {
+ // No framerate reduction, video decimator should be disabled.
+ EXPECT_EQ(-1, res.framerate);
+ }
+
+ if (expect_spatial_resize) {
+ EXPECT_LT(res.width, init_width);
+ EXPECT_LT(res.height, init_height);
+ } else {
+ EXPECT_EQ(init_width, res.width);
+ EXPECT_EQ(init_height, res.height);
+ }
+
+ // The "seconds * 1.5" is to ensure spatial resolution to recover.
+ // For example, in 10 seconds test, framerate reduction happens in the first
+ // 5 seconds from 30fps to 15fps and causes the buffer size to be half of the
+ // original one. Then it will take only 75 samples to downscale (twice in 150
+ // samples). So to recover the resolution changes, we need more than 10
+ // seconds (i.e, seconds * 1.5). This is because the framerate increases
+ // before spatial size recovers, so it will take 150 samples to recover
+ // spatial size (300 for twice).
+ res = TriggerResolutionChange(kReportLowQP, seconds * 1.5, initial_framerate);
+ EXPECT_EQ(-1, res.framerate);
+ EXPECT_EQ(init_width, res.width);
+ EXPECT_EQ(init_height, res.height);
+}
+
+// In 5 seconds test, only framerate adjusting should happen.
+TEST_F(QualityScalerTest, ChangeFramerateOnly) {
+ VerifyQualityAdaptation(kFramerate, 5, false, true);
+}
+
+// In 10 seconds test, framerate adjusting and scaling are both
+// triggered, it shows that scaling would happen after framerate
+// adjusting.
+TEST_F(QualityScalerTest, ChangeFramerateAndSpatialSize) {
+ VerifyQualityAdaptation(kFramerate, 10, true, true);
+}
+
+// When starting from a low framerate, only spatial size will be changed.
+TEST_F(QualityScalerTest, ChangeSpatialSizeOnly) {
+ qs_.ReportFramerate(kFramerate >> 1);
+ VerifyQualityAdaptation(kFramerate >> 1, 10, true, false);
+}
+
TEST_F(QualityScalerTest, DoesNotDownscaleBelow2xDefaultMinDimensionsWidth) {
DoesNotDownscaleFrameDimensions(
2 * QualityScaler::kDefaultMinDownscaleDimension - 1, 1000);
@@ -227,7 +372,7 @@ void QualityScalerTest::DownscaleEndsAt(int input_width,
// Drop all frames to force-trigger downscaling.
while (true) {
TriggerScale(kScaleDown);
- QualityScaler::Resolution res = qs_.GetScaledResolution(input_frame_);
+ QualityScaler::Resolution res = qs_.GetScaledResolution();
if (last_width == res.width) {
EXPECT_EQ(last_height, res.height);
EXPECT_EQ(end_width, res.width);
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/interface/video_processing.h b/chromium/third_party/webrtc/modules/video_processing/main/interface/video_processing.h
index 0cab9fcc584..30af99fb8e6 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/interface/video_processing.h
+++ b/chromium/third_party/webrtc/modules/video_processing/main/interface/video_processing.h
@@ -76,7 +76,7 @@ class VideoProcessingModule : public Module {
\return Pointer to a VPM object.
*/
- static VideoProcessingModule* Create(int32_t id);
+ static VideoProcessingModule* Create();
/**
Destroys a VPM object.
@@ -214,6 +214,8 @@ class VideoProcessingModule : public Module {
uint32_t height,
uint32_t frame_rate) = 0;
+ virtual void SetTargetFramerate(int frame_rate) {}
+
/**
Get decimated(target) frame rate
*/
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.cc b/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.cc
index cf4bc8113ec..a9d77c2e0c1 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.cc
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.cc
@@ -38,7 +38,6 @@ void VPMFramePreprocessor::Reset() {
frame_cnt_ = 0;
}
-
void VPMFramePreprocessor::EnableTemporalDecimation(bool enable) {
vd_->EnableTemporalDecimation(enable);
}
@@ -62,12 +61,19 @@ int32_t VPMFramePreprocessor::SetTargetResolution(
if (ret_val < 0) return ret_val;
- ret_val = vd_->SetTargetFramerate(frame_rate);
- if (ret_val < 0) return ret_val;
-
+ vd_->SetTargetFramerate(frame_rate);
return VPM_OK;
}
+void VPMFramePreprocessor::SetTargetFramerate(int frame_rate) {
+ if (frame_rate == -1) {
+ vd_->EnableTemporalDecimation(false);
+ } else {
+ vd_->EnableTemporalDecimation(true);
+ vd_->SetTargetFramerate(frame_rate);
+ }
+}
+
void VPMFramePreprocessor::UpdateIncomingframe_rate() {
vd_->UpdateIncomingframe_rate();
}
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.h b/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.h
index 81e92ed9eb8..895e457cc60 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.h
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/frame_preprocessor.h
@@ -41,6 +41,9 @@ class VPMFramePreprocessor {
int32_t SetTargetResolution(uint32_t width, uint32_t height,
uint32_t frame_rate);
+ // Set target frame rate.
+ void SetTargetFramerate(int frame_rate);
+
// Update incoming frame rate/dimension.
void UpdateIncomingframe_rate();
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.cc b/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.cc
index bf05bd71545..9991c4fda79 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.cc
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.cc
@@ -8,6 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include "webrtc/base/checks.h"
#include "webrtc/modules/video_processing/main/interface/video_processing.h"
#include "webrtc/modules/video_processing/main/source/video_decimator.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
@@ -36,11 +37,9 @@ void VPMVideoDecimator::EnableTemporalDecimation(bool enable) {
enable_temporal_decimation_ = enable;
}
-int32_t VPMVideoDecimator::SetTargetFramerate(uint32_t frame_rate) {
- if (frame_rate == 0) return VPM_PARAMETER_ERROR;
-
+void VPMVideoDecimator::SetTargetFramerate(int frame_rate) {
+ RTC_DCHECK(frame_rate);
target_frame_rate_ = frame_rate;
- return VPM_OK;
}
bool VPMVideoDecimator::DropFrame() {
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.h b/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.h
index fca74aeae15..3d4573caf84 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.h
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/video_decimator.h
@@ -25,7 +25,7 @@ class VPMVideoDecimator {
void EnableTemporalDecimation(bool enable);
- int32_t SetTargetFramerate(uint32_t frame_rate);
+ void SetTargetFramerate(int frame_rate);
bool DropFrame();
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.cc b/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.cc
index afb8290dc99..c04c5663119 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.cc
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.cc
@@ -37,8 +37,8 @@ void SetSubSampling(VideoProcessingModule::FrameStats* stats,
}
} // namespace
-VideoProcessingModule* VideoProcessingModule::Create(const int32_t id) {
- return new VideoProcessingModuleImpl(id);
+VideoProcessingModule* VideoProcessingModule::Create() {
+ return new VideoProcessingModuleImpl();
}
void VideoProcessingModule::Destroy(VideoProcessingModule* module) {
@@ -46,16 +46,11 @@ void VideoProcessingModule::Destroy(VideoProcessingModule* module) {
delete static_cast<VideoProcessingModuleImpl*>(module);
}
-VideoProcessingModuleImpl::VideoProcessingModuleImpl(const int32_t id)
- : mutex_(*CriticalSectionWrapper::CreateCriticalSection()) {
-}
-
-VideoProcessingModuleImpl::~VideoProcessingModuleImpl() {
- delete &mutex_;
-}
+VideoProcessingModuleImpl::VideoProcessingModuleImpl() {}
+VideoProcessingModuleImpl::~VideoProcessingModuleImpl() {}
void VideoProcessingModuleImpl::Reset() {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
deflickering_.Reset();
brightness_detection_.Reset();
frame_pre_processor_.Reset();
@@ -117,66 +112,71 @@ int32_t VideoProcessingModule::Brighten(VideoFrame* frame, int delta) {
int32_t VideoProcessingModuleImpl::Deflickering(VideoFrame* frame,
FrameStats* stats) {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
return deflickering_.ProcessFrame(frame, stats);
}
int32_t VideoProcessingModuleImpl::BrightnessDetection(
const VideoFrame& frame,
const FrameStats& stats) {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
return brightness_detection_.ProcessFrame(frame, stats);
}
void VideoProcessingModuleImpl::EnableTemporalDecimation(bool enable) {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
frame_pre_processor_.EnableTemporalDecimation(enable);
}
void VideoProcessingModuleImpl::SetInputFrameResampleMode(VideoFrameResampling
resampling_mode) {
- CriticalSectionScoped cs(&mutex_);
+ rtc::CritScope cs(&mutex_);
frame_pre_processor_.SetInputFrameResampleMode(resampling_mode);
}
int32_t VideoProcessingModuleImpl::SetTargetResolution(uint32_t width,
uint32_t height,
uint32_t frame_rate) {
- CriticalSectionScoped cs(&mutex_);
+ rtc::CritScope cs(&mutex_);
return frame_pre_processor_.SetTargetResolution(width, height, frame_rate);
}
+void VideoProcessingModuleImpl::SetTargetFramerate(int frame_rate) {
+ rtc::CritScope cs(&mutex_);
+ frame_pre_processor_.SetTargetFramerate(frame_rate);
+}
+
uint32_t VideoProcessingModuleImpl::Decimatedframe_rate() {
- CriticalSectionScoped cs(&mutex_);
+ rtc::CritScope cs(&mutex_);
return frame_pre_processor_.Decimatedframe_rate();
}
uint32_t VideoProcessingModuleImpl::DecimatedWidth() const {
- CriticalSectionScoped cs(&mutex_);
+ rtc::CritScope cs(&mutex_);
return frame_pre_processor_.DecimatedWidth();
}
uint32_t VideoProcessingModuleImpl::DecimatedHeight() const {
- CriticalSectionScoped cs(&mutex_);
+ rtc::CritScope cs(&mutex_);
return frame_pre_processor_.DecimatedHeight();
}
int32_t VideoProcessingModuleImpl::PreprocessFrame(
const VideoFrame& frame,
VideoFrame** processed_frame) {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
return frame_pre_processor_.PreprocessFrame(frame, processed_frame);
}
VideoContentMetrics* VideoProcessingModuleImpl::ContentMetrics() const {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
return frame_pre_processor_.ContentMetrics();
}
void VideoProcessingModuleImpl::EnableContentAnalysis(bool enable) {
- CriticalSectionScoped mutex(&mutex_);
+ rtc::CritScope mutex(&mutex_);
frame_pre_processor_.EnableContentAnalysis(enable);
}
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.h b/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.h
index b78c019fdb6..fed5197f497 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.h
+++ b/chromium/third_party/webrtc/modules/video_processing/main/source/video_processing_impl.h
@@ -11,6 +11,7 @@
#ifndef WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
#define WEBRTC_MODULE_VIDEO_PROCESSING_IMPL_H
+#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/video_processing/main/interface/video_processing.h"
#include "webrtc/modules/video_processing/main/source/brighten.h"
#include "webrtc/modules/video_processing/main/source/brightness_detection.h"
@@ -22,9 +23,8 @@ class CriticalSectionWrapper;
class VideoProcessingModuleImpl : public VideoProcessingModule {
public:
- VideoProcessingModuleImpl(int32_t id);
-
- virtual ~VideoProcessingModuleImpl();
+ VideoProcessingModuleImpl();
+ ~VideoProcessingModuleImpl() override;
void Reset() override;
@@ -48,6 +48,8 @@ class VideoProcessingModuleImpl : public VideoProcessingModule {
uint32_t height,
uint32_t frame_rate) override;
+ void SetTargetFramerate(int frame_rate) override;
+
// Get decimated values: frame rate/dimension
uint32_t Decimatedframe_rate() override;
uint32_t DecimatedWidth() const override;
@@ -62,8 +64,8 @@ class VideoProcessingModuleImpl : public VideoProcessingModule {
VideoContentMetrics* ContentMetrics() const override;
private:
- CriticalSectionWrapper& mutex_;
- VPMDeflickering deflickering_;
+ mutable rtc::CriticalSection mutex_;
+ VPMDeflickering deflickering_ GUARDED_BY(mutex_);
VPMBrightnessDetection brightness_detection_;
VPMFramePreprocessor frame_pre_processor_;
};
diff --git a/chromium/third_party/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc b/chromium/third_party/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
index 5e74ec02e37..99984fa0029 100644
--- a/chromium/third_party/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
+++ b/chromium/third_party/webrtc/modules/video_processing/main/test/unit_test/video_processing_unittest.cc
@@ -66,7 +66,7 @@ VideoProcessingModuleTest::VideoProcessingModuleTest()
frame_length_(CalcBufferSize(kI420, width_, height_)) {}
void VideoProcessingModuleTest::SetUp() {
- vpm_ = VideoProcessingModule::Create(0);
+ vpm_ = VideoProcessingModule::Create();
ASSERT_TRUE(vpm_ != NULL);
ASSERT_EQ(0, video_frame_.CreateEmptyFrame(width_, height_, width_,
diff --git a/chromium/third_party/webrtc/modules/video_render/BUILD.gn b/chromium/third_party/webrtc/modules/video_render/BUILD.gn
index 8e67cc6fe05..80f23870aae 100644
--- a/chromium/third_party/webrtc/modules/video_render/BUILD.gn
+++ b/chromium/third_party/webrtc/modules/video_render/BUILD.gn
@@ -161,7 +161,7 @@ if (!build_with_chromium) {
deps += [ "../..:webrtc_common" ]
- cflags += [ "-fobjc-arc" ] # CLANG_ENABLE_OBJC_ARC = YES.
+ cflags = [ "-fobjc-arc" ] # CLANG_ENABLE_OBJC_ARC = YES.
}
all_dependent_configs = [ ":video_render_internal_impl_config" ]
diff --git a/chromium/third_party/webrtc/modules/video_render/video_render.gypi b/chromium/third_party/webrtc/modules/video_render/video_render.gypi
index c7b0d7f17a9..63f69b0a63a 100644
--- a/chromium/third_party/webrtc/modules/video_render/video_render.gypi
+++ b/chromium/third_party/webrtc/modules/video_render/video_render.gypi
@@ -197,25 +197,6 @@
] # conditions
}, # video_render_module_test
], # targets
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'video_render_tests_run',
- 'type': 'none',
- 'dependencies': [
- 'video_render_tests',
- ],
- 'includes': [
- '../../build/isolate.gypi',
- ],
- 'sources': [
- 'video_render_tests.isolate',
- ],
- },
- ],
- }],
- ],
}], # include_tests==1
], # conditions
}
diff --git a/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.cc b/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.cc
index ae37110e4af..dab4e7a6085 100644
--- a/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.cc
+++ b/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.cc
@@ -602,9 +602,6 @@ int VideoRenderDirect3D9::UpdateRenderSurface()
_pd3dDevice->SetStreamSource(0, _pVB, 0, sizeof(CUSTOMVERTEX));
_pd3dDevice->SetFVF(D3DFVF_CUSTOMVERTEX);
- D3DXMATRIX matWorld;
- D3DXMATRIX matWorldTemp;
-
//draw all the channels
//get texture from the channels
LPDIRECT3DTEXTURE9 textureFromChannel = NULL;
diff --git a/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.h b/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.h
index 5a2f1f9f611..15379b8f987 100644
--- a/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.h
+++ b/chromium/third_party/webrtc/modules/video_render/windows/video_render_direct3d9.h
@@ -14,7 +14,6 @@
#include "webrtc/modules/video_render/windows/i_video_render_win.h"
#include <d3d9.h>
-#include <d3dx9.h>
#include <ddraw.h>
#include <Map>
diff --git a/chromium/third_party/webrtc/modules/video_render/video_render_tests.isolate b/chromium/third_party/webrtc/modules/video_render_tests.isolate
index ca59a8c3a72..ca59a8c3a72 100644
--- a/chromium/third_party/webrtc/modules/video_render/video_render_tests.isolate
+++ b/chromium/third_party/webrtc/modules/video_render_tests.isolate