summaryrefslogtreecommitdiff
path: root/chromium/third_party/webrtc/modules/audio_coding/neteq
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-07-14 17:41:05 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-04 12:37:36 +0000
commit399c965b6064c440ddcf4015f5f8e9d131c7a0a6 (patch)
tree6b06b60ff365abef0e13b3503d593a0df48d20e8 /chromium/third_party/webrtc/modules/audio_coding/neteq
parent7366110654eec46f21b6824f302356426f48cd74 (diff)
downloadqtwebengine-chromium-399c965b6064c440ddcf4015f5f8e9d131c7a0a6.tar.gz
BASELINE: Update Chromium to 52.0.2743.76 and Ninja to 1.7.1
Change-Id: I382f51b959689505a60f8b707255ecb344f7d8b4 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/third_party/webrtc/modules/audio_coding/neteq')
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc101
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h38
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.cc347
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h38
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector_unittest.cc8
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc42
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.cc62
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.h50
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc61
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h28
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc11
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h18
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc24
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h31
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_unittest.cc43
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc127
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h53
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc37
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h17
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager_unittest.cc19
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.cc50
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h20
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector_unittest.cc20
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc18
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc88
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc36
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/include/neteq.h7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc75
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h13
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h3
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.cc46
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi21
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc205
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h57
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc489
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc7
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc763
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc50
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet.cc19
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet.h23
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc15
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h9
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc33
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc9
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc47
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc41
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.cc25
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.h110
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer_unittest.cc135
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc14
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc51
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc5
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc4
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc112
-rw-r--r--chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h11
72 files changed, 2528 insertions, 1421 deletions
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
index d800cc7dbe9..762c3859837 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.cc
@@ -13,7 +13,6 @@
#include <assert.h>
#include "webrtc/base/checks.h"
-#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
#ifdef WEBRTC_CODEC_G722
#include "webrtc/modules/audio_coding/codecs/g722/audio_decoder_g722.h"
@@ -36,43 +35,6 @@
namespace webrtc {
-AudioDecoderCng::AudioDecoderCng() {
- RTC_CHECK_EQ(0, WebRtcCng_CreateDec(&dec_state_));
- WebRtcCng_InitDec(dec_state_);
-}
-
-AudioDecoderCng::~AudioDecoderCng() {
- WebRtcCng_FreeDec(dec_state_);
-}
-
-void AudioDecoderCng::Reset() {
- WebRtcCng_InitDec(dec_state_);
-}
-
-int AudioDecoderCng::IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) {
- return -1;
-}
-
-CNG_dec_inst* AudioDecoderCng::CngDecoderInstance() {
- return dec_state_;
-}
-
-size_t AudioDecoderCng::Channels() const {
- return 1;
-}
-
-int AudioDecoderCng::DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) {
- return -1;
-}
-
bool CodecSupported(NetEqDecoder codec_type) {
switch (codec_type) {
case NetEqDecoder::kDecoderPCMu:
@@ -175,67 +137,4 @@ int CodecSampleRateHz(NetEqDecoder codec_type) {
}
}
-AudioDecoder* CreateAudioDecoder(NetEqDecoder codec_type) {
- if (!CodecSupported(codec_type)) {
- return NULL;
- }
- switch (codec_type) {
- case NetEqDecoder::kDecoderPCMu:
- return new AudioDecoderPcmU(1);
- case NetEqDecoder::kDecoderPCMa:
- return new AudioDecoderPcmA(1);
- case NetEqDecoder::kDecoderPCMu_2ch:
- return new AudioDecoderPcmU(2);
- case NetEqDecoder::kDecoderPCMa_2ch:
- return new AudioDecoderPcmA(2);
-#ifdef WEBRTC_CODEC_ILBC
- case NetEqDecoder::kDecoderILBC:
- return new AudioDecoderIlbc;
-#endif
-#if defined(WEBRTC_CODEC_ISACFX)
- case NetEqDecoder::kDecoderISAC:
- return new AudioDecoderIsacFix();
-#elif defined(WEBRTC_CODEC_ISAC)
- case NetEqDecoder::kDecoderISAC:
- case NetEqDecoder::kDecoderISACswb:
- return new AudioDecoderIsac();
-#endif
- case NetEqDecoder::kDecoderPCM16B:
- case NetEqDecoder::kDecoderPCM16Bwb:
- case NetEqDecoder::kDecoderPCM16Bswb32kHz:
- case NetEqDecoder::kDecoderPCM16Bswb48kHz:
- return new AudioDecoderPcm16B(1);
- case NetEqDecoder::kDecoderPCM16B_2ch:
- case NetEqDecoder::kDecoderPCM16Bwb_2ch:
- case NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch:
- case NetEqDecoder::kDecoderPCM16Bswb48kHz_2ch:
- return new AudioDecoderPcm16B(2);
- case NetEqDecoder::kDecoderPCM16B_5ch:
- return new AudioDecoderPcm16B(5);
-#ifdef WEBRTC_CODEC_G722
- case NetEqDecoder::kDecoderG722:
- return new AudioDecoderG722;
- case NetEqDecoder::kDecoderG722_2ch:
- return new AudioDecoderG722Stereo;
-#endif
-#ifdef WEBRTC_CODEC_OPUS
- case NetEqDecoder::kDecoderOpus:
- return new AudioDecoderOpus(1);
- case NetEqDecoder::kDecoderOpus_2ch:
- return new AudioDecoderOpus(2);
-#endif
- case NetEqDecoder::kDecoderCNGnb:
- case NetEqDecoder::kDecoderCNGwb:
- case NetEqDecoder::kDecoderCNGswb32kHz:
- case NetEqDecoder::kDecoderCNGswb48kHz:
- return new AudioDecoderCng;
- case NetEqDecoder::kDecoderRED:
- case NetEqDecoder::kDecoderAVT:
- case NetEqDecoder::kDecoderArbitrary:
- default: {
- return NULL;
- }
- }
-}
-
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
index bc8bdd9626d..579ccb36f7a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_decoder_impl.h
@@ -16,7 +16,6 @@
#include "webrtc/engine_configurations.h"
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#ifdef WEBRTC_CODEC_G722
#include "webrtc/modules/audio_coding/codecs/g722/g722_interface.h"
#endif
@@ -25,38 +24,6 @@
namespace webrtc {
-// AudioDecoderCng is a special type of AudioDecoder. It inherits from
-// AudioDecoder just to fit in the DecoderDatabase. None of the class methods
-// should be used, except constructor, destructor, and accessors.
-// TODO(hlundin): Consider the possibility to create a super-class to
-// AudioDecoder that is stored in DecoderDatabase. Then AudioDecoder and a
-// specific CngDecoder class could both inherit from that class.
-class AudioDecoderCng : public AudioDecoder {
- public:
- explicit AudioDecoderCng();
- ~AudioDecoderCng() override;
- void Reset() override;
- int IncomingPacket(const uint8_t* payload,
- size_t payload_len,
- uint16_t rtp_sequence_number,
- uint32_t rtp_timestamp,
- uint32_t arrival_timestamp) override;
-
- CNG_dec_inst* CngDecoderInstance() override;
- size_t Channels() const override;
-
- protected:
- int DecodeInternal(const uint8_t* encoded,
- size_t encoded_len,
- int sample_rate_hz,
- int16_t* decoded,
- SpeechType* speech_type) override;
-
- private:
- CNG_dec_inst* dec_state_;
- RTC_DISALLOW_COPY_AND_ASSIGN(AudioDecoderCng);
-};
-
using NetEqDecoder = acm2::RentACodec::NetEqDecoder;
// Returns true if |codec_type| is supported.
@@ -65,10 +32,5 @@ bool CodecSupported(NetEqDecoder codec_type);
// Returns the sample rate for |codec_type|.
int CodecSampleRateHz(NetEqDecoder codec_type);
-// Creates an AudioDecoder object of type |codec_type|. Returns NULL for for
-// unsupported codecs, and when creating an AudioDecoder is not applicable
-// (e.g., for RED and DTMF/AVT types).
-AudioDecoder* CreateAudioDecoder(NetEqDecoder codec_type);
-
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_AUDIO_DECODER_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.cc
index bd38c43903a..c80909d7b58 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_multi_vector.cc
@@ -106,7 +106,7 @@ void AudioMultiVector::PushBackFromIndex(const AudioMultiVector& append_this,
assert(num_channels_ == append_this.num_channels_);
if (num_channels_ == append_this.num_channels_) {
for (size_t i = 0; i < num_channels_; ++i) {
- channels_[i]->PushBack(&append_this[i][index], length);
+ channels_[i]->PushBack(append_this[i], length, index);
}
}
}
@@ -133,14 +133,14 @@ size_t AudioMultiVector::ReadInterleavedFromIndex(size_t start_index,
int16_t* destination) const {
RTC_DCHECK(destination);
size_t index = 0; // Number of elements written to |destination| so far.
- assert(start_index <= Size());
+ RTC_DCHECK_LE(start_index, Size());
start_index = std::min(start_index, Size());
if (length + start_index > Size()) {
length = Size() - start_index;
}
if (num_channels_ == 1) {
// Special case to avoid the nested for loop below.
- memcpy(destination, &(*this)[0][start_index], length * sizeof(int16_t));
+ (*this)[0].CopyTo(length, start_index, destination);
return length;
}
for (size_t i = 0; i < length; ++i) {
@@ -167,7 +167,7 @@ void AudioMultiVector::OverwriteAt(const AudioMultiVector& insert_this,
length = std::min(length, insert_this.Size());
if (num_channels_ == insert_this.num_channels_) {
for (size_t i = 0; i < num_channels_; ++i) {
- channels_[i]->OverwriteAt(&insert_this[i][0], length, position);
+ channels_[i]->OverwriteAt(insert_this[i], length, position);
}
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.cc
index 013e1d89ad9..ea737a55424 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.cc
@@ -15,124 +15,236 @@
#include <algorithm>
#include <memory>
+#include "webrtc/base/checks.h"
#include "webrtc/typedefs.h"
namespace webrtc {
AudioVector::AudioVector()
- : array_(new int16_t[kDefaultInitialSize]),
- first_free_ix_(0),
- capacity_(kDefaultInitialSize) {
+ : AudioVector(kDefaultInitialSize) {
+ Clear();
}
AudioVector::AudioVector(size_t initial_size)
- : array_(new int16_t[initial_size]),
- first_free_ix_(initial_size),
- capacity_(initial_size) {
- memset(array_.get(), 0, initial_size * sizeof(int16_t));
+ : array_(new int16_t[initial_size + 1]),
+ capacity_(initial_size + 1),
+ begin_index_(0),
+ end_index_(capacity_ - 1) {
+ memset(array_.get(), 0, capacity_ * sizeof(int16_t));
}
AudioVector::~AudioVector() = default;
void AudioVector::Clear() {
- first_free_ix_ = 0;
+ end_index_ = begin_index_ = 0;
}
void AudioVector::CopyTo(AudioVector* copy_to) const {
- if (copy_to) {
- copy_to->Reserve(Size());
- assert(copy_to->capacity_ >= Size());
- memcpy(copy_to->array_.get(), array_.get(), Size() * sizeof(int16_t));
- copy_to->first_free_ix_ = first_free_ix_;
+ RTC_DCHECK(copy_to);
+ copy_to->Reserve(Size());
+ CopyTo(Size(), 0, copy_to->array_.get());
+ copy_to->begin_index_ = 0;
+ copy_to->end_index_ = Size();
+}
+
+void AudioVector::CopyTo(
+ size_t length, size_t position, int16_t* copy_to) const {
+ if (length == 0)
+ return;
+ length = std::min(length, Size() - position);
+ const size_t copy_index = (begin_index_ + position) % capacity_;
+ const size_t first_chunk_length =
+ std::min(length, capacity_ - copy_index);
+ memcpy(copy_to, &array_[copy_index],
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(&copy_to[first_chunk_length], array_.get(),
+ remaining_length * sizeof(int16_t));
}
}
void AudioVector::PushFront(const AudioVector& prepend_this) {
- size_t insert_length = prepend_this.Size();
- Reserve(Size() + insert_length);
- memmove(&array_[insert_length], &array_[0], Size() * sizeof(int16_t));
- memcpy(&array_[0], &prepend_this.array_[0], insert_length * sizeof(int16_t));
- first_free_ix_ += insert_length;
+ const size_t length = prepend_this.Size();
+ if (length == 0)
+ return;
+
+ // Although the subsequent calling to PushFront does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ Reserve(Size() + length);
+
+ const size_t first_chunk_length =
+ std::min(length, prepend_this.capacity_ - prepend_this.begin_index_);
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0)
+ PushFront(prepend_this.array_.get(), remaining_length);
+ PushFront(&prepend_this.array_[prepend_this.begin_index_],
+ first_chunk_length);
}
void AudioVector::PushFront(const int16_t* prepend_this, size_t length) {
- // Same operation as InsertAt beginning.
- InsertAt(prepend_this, length, 0);
+ if (length == 0)
+ return;
+ Reserve(Size() + length);
+ const size_t first_chunk_length = std::min(length, begin_index_);
+ memcpy(&array_[begin_index_ - first_chunk_length],
+ &prepend_this[length - first_chunk_length],
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(&array_[capacity_ - remaining_length], prepend_this,
+ remaining_length * sizeof(int16_t));
+ }
+ begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
}
void AudioVector::PushBack(const AudioVector& append_this) {
- PushBack(append_this.array_.get(), append_this.Size());
+ PushBack(append_this, append_this.Size(), 0);
+}
+
+void AudioVector::PushBack(
+ const AudioVector& append_this, size_t length, size_t position) {
+ RTC_DCHECK_LE(position, append_this.Size());
+ RTC_DCHECK_LE(length, append_this.Size() - position);
+
+ if (length == 0)
+ return;
+
+ // Although the subsequent calling to PushBack does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ Reserve(Size() + length);
+
+ const size_t start_index =
+ (append_this.begin_index_ + position) % append_this.capacity_;
+ const size_t first_chunk_length = std::min(
+ length, append_this.capacity_ - start_index);
+ PushBack(&append_this.array_[start_index], first_chunk_length);
+
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0)
+ PushBack(append_this.array_.get(), remaining_length);
}
void AudioVector::PushBack(const int16_t* append_this, size_t length) {
+ if (length == 0)
+ return;
Reserve(Size() + length);
- memcpy(&array_[first_free_ix_], append_this, length * sizeof(int16_t));
- first_free_ix_ += length;
+ const size_t first_chunk_length = std::min(length, capacity_ - end_index_);
+ memcpy(&array_[end_index_], append_this,
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(array_.get(), &append_this[first_chunk_length],
+ remaining_length * sizeof(int16_t));
+ }
+ end_index_ = (end_index_ + length) % capacity_;
}
void AudioVector::PopFront(size_t length) {
- if (length >= Size()) {
- // Remove all elements.
- Clear();
- } else {
- size_t remaining_samples = Size() - length;
- memmove(&array_[0], &array_[length], remaining_samples * sizeof(int16_t));
- first_free_ix_ -= length;
- }
+ if (length == 0)
+ return;
+ length = std::min(length, Size());
+ begin_index_ = (begin_index_ + length) % capacity_;
}
void AudioVector::PopBack(size_t length) {
+ if (length == 0)
+ return;
// Never remove more than what is in the array.
length = std::min(length, Size());
- first_free_ix_ -= length;
+ end_index_ = (end_index_ + capacity_ - length) % capacity_;
}
void AudioVector::Extend(size_t extra_length) {
- Reserve(Size() + extra_length);
- memset(&array_[first_free_ix_], 0, extra_length * sizeof(int16_t));
- first_free_ix_ += extra_length;
+ if (extra_length == 0)
+ return;
+ InsertZerosByPushBack(extra_length, Size());
}
void AudioVector::InsertAt(const int16_t* insert_this,
size_t length,
size_t position) {
- Reserve(Size() + length);
- // Cap the position at the current vector length, to be sure the iterator
- // does not extend beyond the end of the vector.
+ if (length == 0)
+ return;
+ // Cap the insert position at the current array length.
position = std::min(Size(), position);
- int16_t* insert_position_ptr = &array_[position];
- size_t samples_to_move = Size() - position;
- memmove(insert_position_ptr + length, insert_position_ptr,
- samples_to_move * sizeof(int16_t));
- memcpy(insert_position_ptr, insert_this, length * sizeof(int16_t));
- first_free_ix_ += length;
+
+ // When inserting to a position closer to the beginning, it is more efficient
+ // to insert by pushing front than to insert by pushing back, since less data
+ // will be moved, vice versa.
+ if (position <= Size() - position) {
+ InsertByPushFront(insert_this, length, position);
+ } else {
+ InsertByPushBack(insert_this, length, position);
+ }
}
void AudioVector::InsertZerosAt(size_t length,
size_t position) {
- Reserve(Size() + length);
- // Cap the position at the current vector length, to be sure the iterator
- // does not extend beyond the end of the vector.
- position = std::min(capacity_, position);
- int16_t* insert_position_ptr = &array_[position];
- size_t samples_to_move = Size() - position;
- memmove(insert_position_ptr + length, insert_position_ptr,
- samples_to_move * sizeof(int16_t));
- memset(insert_position_ptr, 0, length * sizeof(int16_t));
- first_free_ix_ += length;
+ if (length == 0)
+ return;
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ // When inserting to a position closer to the beginning, it is more efficient
+ // to insert by pushing front than to insert by pushing back, since less data
+ // will be moved, vice versa.
+ if (position <= Size() - position) {
+ InsertZerosByPushFront(length, position);
+ } else {
+ InsertZerosByPushBack(length, position);
+ }
+}
+
+void AudioVector::OverwriteAt(const AudioVector& insert_this,
+ size_t length,
+ size_t position) {
+ RTC_DCHECK_LE(length, insert_this.Size());
+ if (length == 0)
+ return;
+
+ // Cap the insert position at the current array length.
+ position = std::min(Size(), position);
+
+ // Although the subsequent calling to OverwriteAt does Reserve in it, it is
+ // always more efficient to do a big Reserve first.
+ size_t new_size = std::max(Size(), position + length);
+ Reserve(new_size);
+
+ const size_t first_chunk_length =
+ std::min(length, insert_this.capacity_ - insert_this.begin_index_);
+ OverwriteAt(&insert_this.array_[insert_this.begin_index_], first_chunk_length,
+ position);
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ OverwriteAt(insert_this.array_.get(), remaining_length,
+ position + first_chunk_length);
+ }
}
void AudioVector::OverwriteAt(const int16_t* insert_this,
size_t length,
size_t position) {
+ if (length == 0)
+ return;
// Cap the insert position at the current array length.
position = std::min(Size(), position);
- Reserve(position + length);
- memcpy(&array_[position], insert_this, length * sizeof(int16_t));
- if (position + length > Size()) {
- // Array was expanded.
- first_free_ix_ += position + length - Size();
+
+ size_t new_size = std::max(Size(), position + length);
+ Reserve(new_size);
+
+ const size_t overwrite_index = (begin_index_ + position) % capacity_;
+ const size_t first_chunk_length =
+ std::min(length, capacity_ - overwrite_index);
+ memcpy(&array_[overwrite_index], insert_this,
+ first_chunk_length * sizeof(int16_t));
+ const size_t remaining_length = length - first_chunk_length;
+ if (remaining_length > 0) {
+ memcpy(array_.get(), &insert_this[first_chunk_length],
+ remaining_length * sizeof(int16_t));
}
+
+ end_index_ = (begin_index_ + new_size) % capacity_;
}
void AudioVector::CrossFade(const AudioVector& append_this,
@@ -142,7 +254,7 @@ void AudioVector::CrossFade(const AudioVector& append_this,
assert(fade_length <= append_this.Size());
fade_length = std::min(fade_length, Size());
fade_length = std::min(fade_length, append_this.Size());
- size_t position = Size() - fade_length;
+ size_t position = Size() - fade_length + begin_index_;
// Cross fade the overlapping regions.
// |alpha| is the mixing factor in Q14.
// TODO(hlundin): Consider skipping +1 in the denominator to produce a
@@ -151,41 +263,132 @@ void AudioVector::CrossFade(const AudioVector& append_this,
int alpha = 16384;
for (size_t i = 0; i < fade_length; ++i) {
alpha -= alpha_step;
- array_[position + i] = (alpha * array_[position + i] +
- (16384 - alpha) * append_this[i] + 8192) >> 14;
+ array_[(position + i) % capacity_] =
+ (alpha * array_[(position + i) % capacity_] +
+ (16384 - alpha) * append_this[i] + 8192) >> 14;
}
assert(alpha >= 0); // Verify that the slope was correct.
// Append what is left of |append_this|.
size_t samples_to_push_back = append_this.Size() - fade_length;
if (samples_to_push_back > 0)
- PushBack(&append_this[fade_length], samples_to_push_back);
+ PushBack(append_this, samples_to_push_back, fade_length);
}
// Returns the number of elements in this AudioVector.
size_t AudioVector::Size() const {
- return first_free_ix_;
+ return (end_index_ + capacity_ - begin_index_) % capacity_;
}
// Returns true if this AudioVector is empty.
bool AudioVector::Empty() const {
- return first_free_ix_ == 0;
+ return begin_index_ == end_index_;
}
const int16_t& AudioVector::operator[](size_t index) const {
- return array_[index];
+ return array_[(begin_index_ + index) % capacity_];
}
int16_t& AudioVector::operator[](size_t index) {
- return array_[index];
+ return array_[(begin_index_ + index) % capacity_];
}
void AudioVector::Reserve(size_t n) {
- if (capacity_ < n) {
- std::unique_ptr<int16_t[]> temp_array(new int16_t[n]);
- memcpy(temp_array.get(), array_.get(), Size() * sizeof(int16_t));
- array_.swap(temp_array);
- capacity_ = n;
+ if (capacity_ > n)
+ return;
+ const size_t length = Size();
+ // Reserve one more sample to remove the ambiguity between empty vector and
+ // full vector. Therefore |begin_index_| == |end_index_| indicates empty
+ // vector, and |begin_index_| == (|end_index_| + 1) % capacity indicates
+ // full vector.
+ std::unique_ptr<int16_t[]> temp_array(new int16_t[n + 1]);
+ CopyTo(length, 0, temp_array.get());
+ array_.swap(temp_array);
+ begin_index_ = 0;
+ end_index_ = length;
+ capacity_ = n + 1;
+}
+
+void AudioVector::InsertByPushBack(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ const size_t move_chunk_length = Size() - position;
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (move_chunk_length > 0) {
+ // TODO(minyue): see if it is possible to avoid copying to a buffer.
+ temp_array.reset(new int16_t[move_chunk_length]);
+ CopyTo(move_chunk_length, position, temp_array.get());
+ PopBack(move_chunk_length);
+ }
+
+ Reserve(Size() + length + move_chunk_length);
+ PushBack(insert_this, length);
+ if (move_chunk_length > 0)
+ PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertByPushFront(const int16_t* insert_this,
+ size_t length,
+ size_t position) {
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (position > 0) {
+ // TODO(minyue): see if it is possible to avoid copying to a buffer.
+ temp_array.reset(new int16_t[position]);
+ CopyTo(position, 0, temp_array.get());
+ PopFront(position);
+ }
+
+ Reserve(Size() + length + position);
+ PushFront(insert_this, length);
+ if (position > 0)
+ PushFront(temp_array.get(), position);
+}
+
+void AudioVector::InsertZerosByPushBack(size_t length,
+ size_t position) {
+ const size_t move_chunk_length = Size() - position;
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (move_chunk_length > 0) {
+ temp_array.reset(new int16_t[move_chunk_length]);
+ CopyTo(move_chunk_length, position, temp_array.get());
+ PopBack(move_chunk_length);
}
+
+ Reserve(Size() + length + move_chunk_length);
+
+ const size_t first_zero_chunk_length =
+ std::min(length, capacity_ - end_index_);
+ memset(&array_[end_index_], 0, first_zero_chunk_length * sizeof(int16_t));
+ const size_t remaining_zero_length = length - first_zero_chunk_length;
+ if (remaining_zero_length > 0)
+ memset(array_.get(), 0, remaining_zero_length * sizeof(int16_t));
+ end_index_ = (end_index_ + length) % capacity_;
+
+ if (move_chunk_length > 0)
+ PushBack(temp_array.get(), move_chunk_length);
+}
+
+void AudioVector::InsertZerosByPushFront(size_t length,
+ size_t position) {
+ std::unique_ptr<int16_t[]> temp_array(nullptr);
+ if (position > 0) {
+ temp_array.reset(new int16_t[position]);
+ CopyTo(position, 0, temp_array.get());
+ PopFront(position);
+ }
+
+ Reserve(Size() + length + position);
+
+ const size_t first_zero_chunk_length = std::min(length, begin_index_);
+ memset(&array_[begin_index_ - first_zero_chunk_length], 0,
+ first_zero_chunk_length * sizeof(int16_t));
+ const size_t remaining_zero_length = length - first_zero_chunk_length;
+ if (remaining_zero_length > 0)
+ memset(&array_[capacity_ - remaining_zero_length], 0,
+ remaining_zero_length * sizeof(int16_t));
+ begin_index_ = (begin_index_ + capacity_ - length) % capacity_;
+
+ if (position > 0)
+ PushFront(temp_array.get(), position);
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
index 15297f9bc8c..756292aa783 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector.h
@@ -37,6 +37,9 @@ class AudioVector {
// |copy_to| will be an exact replica of this object.
virtual void CopyTo(AudioVector* copy_to) const;
+ // Copies |length| values from |position| in this vector to |copy_to|.
+ virtual void CopyTo(size_t length, size_t position, int16_t* copy_to) const;
+
// Prepends the contents of AudioVector |prepend_this| to this object. The
// length of this object is increased with the length of |prepend_this|.
virtual void PushFront(const AudioVector& prepend_this);
@@ -48,6 +51,12 @@ class AudioVector {
// Same as PushFront but will append to the end of this object.
virtual void PushBack(const AudioVector& append_this);
+ // Appends a segment of |append_this| to the end of this object. The segment
+ // starts from |position| and has |length| samples.
+ virtual void PushBack(const AudioVector& append_this,
+ size_t length,
+ size_t position);
+
// Same as PushFront but will append to the end of this object.
virtual void PushBack(const int16_t* append_this, size_t length);
@@ -71,6 +80,15 @@ class AudioVector {
// Like InsertAt, but inserts |length| zero elements at |position|.
virtual void InsertZerosAt(size_t length, size_t position);
+ // Overwrites |length| elements of this AudioVector starting from |position|
+ // with first values in |AudioVector|. The definition of |position|
+ // is the same as for InsertAt(). If |length| and |position| are selected
+ // such that the new data extends beyond the end of the current AudioVector,
+ // the vector is extended to accommodate the new data.
+ virtual void OverwriteAt(const AudioVector& insert_this,
+ size_t length,
+ size_t position);
+
// Overwrites |length| elements of this AudioVector with values taken from the
// array |insert_this|, starting at |position|. The definition of |position|
// is the same as for InsertAt(). If |length| and |position| are selected
@@ -100,11 +118,27 @@ class AudioVector {
void Reserve(size_t n);
+ void InsertByPushBack(const int16_t* insert_this, size_t length,
+ size_t position);
+
+ void InsertByPushFront(const int16_t* insert_this, size_t length,
+ size_t position);
+
+ void InsertZerosByPushBack(size_t length, size_t position);
+
+ void InsertZerosByPushFront(size_t length, size_t position);
+
std::unique_ptr<int16_t[]> array_;
- size_t first_free_ix_; // The first index after the last sample in array_.
- // Note that this index may point outside of array_.
+
size_t capacity_; // Allocated number of samples in the array.
+ // The index of the first sample in |array_|, except when
+ // |begin_index_ == end_index_|, which indicates an empty buffer.
+ size_t begin_index_;
+
+ // The index of the sample after the last sample in |array_|.
+ size_t end_index_;
+
RTC_DISALLOW_COPY_AND_ASSIGN(AudioVector);
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector_unittest.cc
index 08009863455..cee7e586695 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/audio_vector_unittest.cc
@@ -82,14 +82,6 @@ TEST_F(AudioVectorTest, PushBackAndCopy) {
EXPECT_TRUE(vec_copy.Empty());
}
-// Try to copy to a NULL pointer. Nothing should happen.
-TEST_F(AudioVectorTest, CopyToNull) {
- AudioVector vec;
- AudioVector* vec_copy = NULL;
- vec.PushBack(array_, array_length());
- vec.CopyTo(vec_copy);
-}
-
// Test the PushBack method with another AudioVector as input argument.
TEST_F(AudioVectorTest, PushBackVector) {
static const size_t kLength = 10;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
index 7e7a6325e97..9cfd6cb40ed 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/background_noise.cc
@@ -17,6 +17,7 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
+#include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
#include "webrtc/modules/audio_coding/neteq/post_decode_vad.h"
namespace webrtc {
@@ -58,10 +59,7 @@ void BackgroundNoise::Update(const AudioMultiVector& input,
ChannelParameters& parameters = channel_parameters_[channel_ix];
int16_t temp_signal_array[kVecLen + kMaxLpcOrder] = {0};
int16_t* temp_signal = &temp_signal_array[kMaxLpcOrder];
- memcpy(temp_signal,
- &input[channel_ix][input.Size() - kVecLen],
- sizeof(int16_t) * kVecLen);
-
+ input[channel_ix].CopyTo(kVecLen, input.Size() - kVecLen, temp_signal);
int32_t sample_energy = CalculateAutoCorrelation(temp_signal, kVecLen,
auto_correlation);
@@ -169,15 +167,10 @@ int16_t BackgroundNoise::ScaleShift(size_t channel) const {
int32_t BackgroundNoise::CalculateAutoCorrelation(
const int16_t* signal, size_t length, int32_t* auto_correlation) const {
- int16_t signal_max = WebRtcSpl_MaxAbsValueW16(signal, length);
- int correlation_scale = kLogVecLen -
- WebRtcSpl_NormW32(signal_max * signal_max);
- correlation_scale = std::max(0, correlation_scale);
-
static const int kCorrelationStep = -1;
- WebRtcSpl_CrossCorrelation(auto_correlation, signal, signal, length,
- kMaxLpcOrder + 1, correlation_scale,
- kCorrelationStep);
+ const int correlation_scale =
+ CrossCorrelationWithAutoShift(signal, signal, length, kMaxLpcOrder + 1,
+ kCorrelationStep, auto_correlation);
// Number of shifts to normalize energy to energy/sample.
int energy_sample_shift = kLogVecLen - correlation_scale;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
index a5b08469bea..90b02daf712 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.cc
@@ -14,7 +14,6 @@
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
@@ -23,31 +22,23 @@ namespace webrtc {
void ComfortNoise::Reset() {
first_call_ = true;
- internal_error_code_ = 0;
}
int ComfortNoise::UpdateParameters(Packet* packet) {
assert(packet); // Existence is verified by caller.
// Get comfort noise decoder.
- AudioDecoder* cng_decoder = decoder_database_->GetDecoder(
- packet->header.payloadType);
- if (!cng_decoder) {
+ if (decoder_database_->SetActiveCngDecoder(packet->header.payloadType)
+ != kOK) {
delete [] packet->payload;
delete packet;
return kUnknownPayloadType;
}
- decoder_database_->SetActiveCngDecoder(packet->header.payloadType);
- CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
- int16_t ret = WebRtcCng_UpdateSid(cng_inst,
- packet->payload,
- packet->payload_length);
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ RTC_DCHECK(cng_decoder);
+ cng_decoder->UpdateSid(rtc::ArrayView<const uint8_t>(
+ packet->payload, packet->payload_length));
delete [] packet->payload;
delete packet;
- if (ret < 0) {
- internal_error_code_ = WebRtcCng_GetErrorCodeDec(cng_inst);
- LOG(LS_ERROR) << "WebRtcCng_UpdateSid produced " << internal_error_code_;
- return kInternalError;
- }
return kOK;
}
@@ -63,30 +54,31 @@ int ComfortNoise::Generate(size_t requested_length,
}
size_t number_of_samples = requested_length;
- int16_t new_period = 0;
+ bool new_period = false;
if (first_call_) {
// Generate noise and overlap slightly with old data.
number_of_samples = requested_length + overlap_length_;
- new_period = 1;
+ new_period = true;
}
output->AssertSize(number_of_samples);
// Get the decoder from the database.
- AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (!cng_decoder) {
LOG(LS_ERROR) << "Unknwown payload type";
return kUnknownPayloadType;
}
- CNG_dec_inst* cng_inst = cng_decoder->CngDecoderInstance();
- // The expression &(*output)[0][0] is a pointer to the first element in
- // the first channel.
- if (WebRtcCng_Generate(cng_inst, &(*output)[0][0], number_of_samples,
- new_period) < 0) {
+
+ std::unique_ptr<int16_t[]> temp(new int16_t[number_of_samples]);
+ if (!cng_decoder->Generate(
+ rtc::ArrayView<int16_t>(temp.get(), number_of_samples),
+ new_period)) {
// Error returned.
output->Zeros(requested_length);
- internal_error_code_ = WebRtcCng_GetErrorCodeDec(cng_inst);
- LOG(LS_ERROR) << "WebRtcCng_Generate produced " << internal_error_code_;
+ LOG(LS_ERROR) <<
+ "ComfortNoiseDecoder::Genererate failed to generate comfort noise";
return kInternalError;
}
+ (*output)[0].OverwriteAt(temp.get(), number_of_samples, 0);
if (first_call_) {
// Set tapering window parameters. Values are in Q15.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
index 1fc22586637..f877bf63efb 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/comfort_noise.h
@@ -38,8 +38,7 @@ class ComfortNoise {
first_call_(true),
overlap_length_(5 * fs_hz_ / 8000),
decoder_database_(decoder_database),
- sync_buffer_(sync_buffer),
- internal_error_code_(0) {
+ sync_buffer_(sync_buffer) {
}
// Resets the state. Should be called before each new comfort noise period.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.cc
new file mode 100644
index 00000000000..ad89ab8a139
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.cc
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
+
+#include <cstdlib>
+#include <limits>
+
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+
+namespace webrtc {
+
+// This function decides the overflow-protecting scaling and calls
+// WebRtcSpl_CrossCorrelation.
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+ const int16_t* sequence_2,
+ size_t sequence_1_length,
+ size_t cross_correlation_length,
+ int cross_correlation_step,
+ int32_t* cross_correlation) {
+ // Find the maximum absolute value of sequence_1 and 2.
+ const int16_t max_1 = WebRtcSpl_MaxAbsValueW16(sequence_1, sequence_1_length);
+ const int sequence_2_shift =
+ cross_correlation_step * (static_cast<int>(cross_correlation_length) - 1);
+ const int16_t* sequence_2_start =
+ sequence_2_shift >= 0 ? sequence_2 : sequence_2 + sequence_2_shift;
+ const size_t sequence_2_length =
+ sequence_1_length + std::abs(sequence_2_shift);
+ const int16_t max_2 =
+ WebRtcSpl_MaxAbsValueW16(sequence_2_start, sequence_2_length);
+
+ // In order to avoid overflow when computing the sum we should scale the
+ // samples so that (in_vector_length * max_1 * max_2) will not overflow.
+ // Expected scaling fulfills
+ // 1) sufficient:
+ // sequence_1_length * (max_1 * max_2 >> scaling) <= 0x7fffffff;
+ // 2) necessary:
+ // if (scaling > 0)
+ // sequence_1_length * (max_1 * max_2 >> (scaling - 1)) > 0x7fffffff;
+ // The following calculation fulfills 1) and almost fulfills 2).
+ // There are some corner cases that 2) is not satisfied, e.g.,
+ // max_1 = 17, max_2 = 30848, sequence_1_length = 4095, in such case,
+ // optimal scaling is 0, while the following calculation results in 1.
+ const int32_t factor = (max_1 * max_2) / (std::numeric_limits<int32_t>::max()
+ / static_cast<int32_t>(sequence_1_length));
+ const int scaling = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
+
+ WebRtcSpl_CrossCorrelation(cross_correlation, sequence_1, sequence_2,
+ sequence_1_length, cross_correlation_length,
+ scaling, cross_correlation_step);
+
+ return scaling;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.h
new file mode 100644
index 00000000000..db14141027c
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/cross_correlation.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
+
+#include "webrtc/common_types.h"
+
+namespace webrtc {
+
+// The function calculates the cross-correlation between two sequences
+// |sequence_1| and |sequence_2|. |sequence_1| is taken as reference, with
+// |sequence_1_length| as its length. |sequence_2| slides for the calculation of
+// cross-correlation. The result will be saved in |cross_correlation|.
+// |cross_correlation_length| correlation points are calculated.
+// The corresponding lag starts from 0, and increases with a step of
+// |cross_correlation_step|. The result is without normalization. To avoid
+// overflow, the result will be right shifted. The amount of shifts will be
+// returned.
+//
+// Input:
+// - sequence_1 : First sequence (reference).
+// - sequence_2 : Second sequence (sliding during calculation).
+// - sequence_1_length : Length of |sequence_1|.
+// - cross_correlation_length : Number of cross-correlations to calculate.
+// - cross_correlation_step : Step in the lag for the cross-correlation.
+//
+// Output:
+// - cross_correlation : The cross-correlation in Q(-right_shifts)
+//
+// Return:
+// Number of right shifts in cross_correlation.
+
+int CrossCorrelationWithAutoShift(const int16_t* sequence_1,
+ const int16_t* sequence_2,
+ size_t sequence_1_length,
+ size_t cross_correlation_length,
+ int cross_correlation_step,
+ int32_t* cross_correlation);
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_CROSS_CORRELATION_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
index 39bb4662c71..545d1d62455 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.cc
@@ -29,26 +29,19 @@ DecisionLogic* DecisionLogic::Create(int fs_hz,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter) {
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer) {
switch (playout_mode) {
case kPlayoutOn:
case kPlayoutStreaming:
- return new DecisionLogicNormal(fs_hz,
- output_size_samples,
- playout_mode,
- decoder_database,
- packet_buffer,
- delay_manager,
- buffer_level_filter);
+ return new DecisionLogicNormal(
+ fs_hz, output_size_samples, playout_mode, decoder_database,
+ packet_buffer, delay_manager, buffer_level_filter, tick_timer);
case kPlayoutFax:
case kPlayoutOff:
- return new DecisionLogicFax(fs_hz,
- output_size_samples,
- playout_mode,
- decoder_database,
- packet_buffer,
- delay_manager,
- buffer_level_filter);
+ return new DecisionLogicFax(
+ fs_hz, output_size_samples, playout_mode, decoder_database,
+ packet_buffer, delay_manager, buffer_level_filter, tick_timer);
}
// This line cannot be reached, but must be here to avoid compiler errors.
assert(false);
@@ -61,30 +54,34 @@ DecisionLogic::DecisionLogic(int fs_hz,
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter)
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer)
: decoder_database_(decoder_database),
packet_buffer_(packet_buffer),
delay_manager_(delay_manager),
buffer_level_filter_(buffer_level_filter),
+ tick_timer_(tick_timer),
cng_state_(kCngOff),
- generated_noise_samples_(0),
packet_length_samples_(0),
sample_memory_(0),
prev_time_scale_(false),
- timescale_hold_off_(kMinTimescaleInterval),
+ timescale_countdown_(
+ tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1)),
num_consecutive_expands_(0),
playout_mode_(playout_mode) {
delay_manager_->set_streaming_mode(playout_mode_ == kPlayoutStreaming);
SetSampleRate(fs_hz, output_size_samples);
}
+DecisionLogic::~DecisionLogic() = default;
+
void DecisionLogic::Reset() {
cng_state_ = kCngOff;
- generated_noise_samples_ = 0;
+ noise_fast_forward_ = 0;
packet_length_samples_ = 0;
sample_memory_ = 0;
prev_time_scale_ = false;
- timescale_hold_off_ = 0;
+ timescale_countdown_.reset();
num_consecutive_expands_ = 0;
}
@@ -92,7 +89,8 @@ void DecisionLogic::SoftReset() {
packet_length_samples_ = 0;
sample_memory_ = 0;
prev_time_scale_ = false;
- timescale_hold_off_ = kMinTimescaleInterval;
+ timescale_countdown_ =
+ tick_timer_->GetNewCountdown(kMinTimescaleInterval + 1);
}
void DecisionLogic::SetSampleRate(int fs_hz, size_t output_size_samples) {
@@ -107,15 +105,15 @@ Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
size_t decoder_frame_length,
const RTPHeader* packet_header,
Modes prev_mode,
- bool play_dtmf, bool* reset_decoder) {
+ bool play_dtmf,
+ size_t generated_noise_samples,
+ bool* reset_decoder) {
if (prev_mode == kModeRfc3389Cng ||
prev_mode == kModeCodecInternalCng ||
prev_mode == kModeExpand) {
// If last mode was CNG (or Expand, since this could be covering up for
- // a lost CNG packet), increase the |generated_noise_samples_| counter.
- generated_noise_samples_ += output_size_samples_;
- // Remember that CNG is on. This is needed if comfort noise is interrupted
- // by DTMF.
+ // a lost CNG packet), remember that CNG is on. This is needed if comfort
+ // noise is interrupted by DTMF.
if (prev_mode == kModeRfc3389Cng) {
cng_state_ = kCngRfc3389On;
} else if (prev_mode == kModeCodecInternalCng) {
@@ -139,7 +137,7 @@ Operations DecisionLogic::GetDecision(const SyncBuffer& sync_buffer,
return GetDecisionSpecialized(sync_buffer, expand, decoder_frame_length,
packet_header, prev_mode, play_dtmf,
- reset_decoder);
+ reset_decoder, generated_noise_samples);
}
void DecisionLogic::ExpandDecision(Operations operation) {
@@ -152,10 +150,6 @@ void DecisionLogic::ExpandDecision(Operations operation) {
void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
Modes prev_mode) {
- const int elapsed_time_ms =
- static_cast<int>(output_size_samples_ / (8 * fs_mult_));
- delay_manager_->UpdateCounters(elapsed_time_ms);
-
// Do not update buffer history if currently playing CNG since it will bias
// the filtered buffer level.
if ((prev_mode != kModeRfc3389Cng) && (prev_mode != kModeCodecInternalCng)) {
@@ -170,14 +164,13 @@ void DecisionLogic::FilterBufferLevel(size_t buffer_size_samples,
int sample_memory_local = 0;
if (prev_time_scale_) {
sample_memory_local = sample_memory_;
- timescale_hold_off_ = kMinTimescaleInterval;
+ timescale_countdown_ =
+ tick_timer_->GetNewCountdown(kMinTimescaleInterval);
}
buffer_level_filter_->Update(buffer_size_packets, sample_memory_local,
packet_length_samples_);
prev_time_scale_ = false;
}
-
- timescale_hold_off_ = std::max(timescale_hold_off_ - 1, 0);
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
index 72121b7aac5..008655d1a2b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic.h
@@ -14,6 +14,7 @@
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/neteq/defines.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -39,7 +40,8 @@ class DecisionLogic {
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter);
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer);
// Constructor.
DecisionLogic(int fs_hz,
@@ -48,10 +50,10 @@ class DecisionLogic {
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter);
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer);
- // Destructor.
- virtual ~DecisionLogic() {}
+ virtual ~DecisionLogic();
// Resets object to a clean state.
void Reset();
@@ -79,6 +81,7 @@ class DecisionLogic {
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
+ size_t generated_noise_samples,
bool* reset_decoder);
// These methods test the |cng_state_| for different conditions.
@@ -101,10 +104,7 @@ class DecisionLogic {
// Accessors and mutators.
void set_sample_memory(int32_t value) { sample_memory_ = value; }
- size_t generated_noise_samples() const { return generated_noise_samples_; }
- void set_generated_noise_samples(size_t value) {
- generated_noise_samples_ = value;
- }
+ size_t noise_fast_forward() const { return noise_fast_forward_; }
size_t packet_length_samples() const { return packet_length_samples_; }
void set_packet_length_samples(size_t value) {
packet_length_samples_ = value;
@@ -113,8 +113,8 @@ class DecisionLogic {
NetEqPlayoutMode playout_mode() const { return playout_mode_; }
protected:
- // The value 6 sets maximum time-stretch rate to about 100 ms/s.
- static const int kMinTimescaleInterval = 6;
+ // The value 5 sets maximum time-stretch rate to about 100 ms/s.
+ static const int kMinTimescaleInterval = 5;
enum CngState {
kCngOff,
@@ -138,7 +138,8 @@ class DecisionLogic {
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
- bool* reset_decoder) = 0;
+ bool* reset_decoder,
+ size_t generated_noise_samples) = 0;
// Updates the |buffer_level_filter_| with the current buffer level
// |buffer_size_packets|.
@@ -148,15 +149,16 @@ class DecisionLogic {
const PacketBuffer& packet_buffer_;
DelayManager* delay_manager_;
BufferLevelFilter* buffer_level_filter_;
+ const TickTimer* tick_timer_;
int fs_mult_;
size_t output_size_samples_;
CngState cng_state_; // Remember if comfort noise is interrupted by other
// event (e.g., DTMF).
- size_t generated_noise_samples_;
+ size_t noise_fast_forward_ = 0;
size_t packet_length_samples_;
int sample_memory_;
bool prev_time_scale_;
- int timescale_hold_off_;
+ std::unique_ptr<TickTimer::Countdown> timescale_countdown_;
int num_consecutive_expands_;
const NetEqPlayoutMode playout_mode_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
index ddea64425f2..aace402a7de 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.cc
@@ -26,7 +26,8 @@ Operations DecisionLogicFax::GetDecisionSpecialized(
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
- bool* reset_decoder) {
+ bool* reset_decoder,
+ size_t generated_noise_samples) {
assert(playout_mode_ == kPlayoutFax || playout_mode_ == kPlayoutOff);
uint32_t target_timestamp = sync_buffer.end_timestamp();
uint32_t available_timestamp = 0;
@@ -37,7 +38,7 @@ Operations DecisionLogicFax::GetDecisionSpecialized(
decoder_database_->IsComfortNoise(packet_header->payloadType);
}
if (is_cng_packet) {
- if (static_cast<int32_t>((generated_noise_samples_ + target_timestamp)
+ if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
- available_timestamp) >= 0) {
// Time to play this packet now.
return kRfc3389Cng;
@@ -70,13 +71,13 @@ Operations DecisionLogicFax::GetDecisionSpecialized(
} else if (target_timestamp == available_timestamp) {
return kNormal;
} else {
- if (static_cast<int32_t>((generated_noise_samples_ + target_timestamp)
+ if (static_cast<int32_t>((generated_noise_samples + target_timestamp)
- available_timestamp) >= 0) {
return kNormal;
} else {
// If currently playing comfort noise, continue with that. Do not
- // increase the timestamp counter since generated_noise_samples_ will
- // be increased.
+ // increase the timestamp counter since generated_noise_stopwatch_ in
+ // NetEqImpl will take care of the time-keeping.
if (cng_state_ == kCngRfc3389On) {
return kRfc3389CngNoPacket;
} else if (cng_state_ == kCngInternalOn) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
index 204dcc168a3..6958f908b1c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_fax.h
@@ -28,11 +28,16 @@ class DecisionLogicFax : public DecisionLogic {
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter)
- : DecisionLogic(fs_hz, output_size_samples, playout_mode,
- decoder_database, packet_buffer, delay_manager,
- buffer_level_filter) {
- }
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer)
+ : DecisionLogic(fs_hz,
+ output_size_samples,
+ playout_mode,
+ decoder_database,
+ packet_buffer,
+ delay_manager,
+ buffer_level_filter,
+ tick_timer) {}
protected:
// Returns the operation that should be done next. |sync_buffer| and |expand|
@@ -50,7 +55,8 @@ class DecisionLogicFax : public DecisionLogic {
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
- bool* reset_decoder) override;
+ bool* reset_decoder,
+ size_t generated_noise_samples) override;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(DecisionLogicFax);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
index 0252d1cdfaf..37a75d7f5ad 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.cc
@@ -31,7 +31,8 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
- bool* reset_decoder) {
+ bool* reset_decoder,
+ size_t generated_noise_samples) {
assert(playout_mode_ == kPlayoutOn || playout_mode_ == kPlayoutStreaming);
// Guard for errors, to avoid getting stuck in error mode.
if (prev_mode == kModeError) {
@@ -52,7 +53,8 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
}
if (is_cng_packet) {
- return CngOperation(prev_mode, target_timestamp, available_timestamp);
+ return CngOperation(prev_mode, target_timestamp, available_timestamp,
+ generated_noise_samples);
}
// Handle the case with no packet at all available (except maybe DTMF).
@@ -76,7 +78,8 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
available_timestamp, target_timestamp, five_seconds_samples)) {
return FuturePacketAvailable(sync_buffer, expand, decoder_frame_length,
prev_mode, target_timestamp,
- available_timestamp, play_dtmf);
+ available_timestamp, play_dtmf,
+ generated_noise_samples);
} else {
// This implies that available_timestamp < target_timestamp, which can
// happen when a new stream or codec is received. Signal for a reset.
@@ -86,10 +89,11 @@ Operations DecisionLogicNormal::GetDecisionSpecialized(
Operations DecisionLogicNormal::CngOperation(Modes prev_mode,
uint32_t target_timestamp,
- uint32_t available_timestamp) {
+ uint32_t available_timestamp,
+ size_t generated_noise_samples) {
// Signed difference between target and available timestamp.
int32_t timestamp_diff = static_cast<int32_t>(
- static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) -
+ static_cast<uint32_t>(generated_noise_samples + target_timestamp) -
available_timestamp);
int32_t optimal_level_samp = static_cast<int32_t>(
(delay_manager_->TargetLevel() * packet_length_samples_) >> 8);
@@ -97,9 +101,9 @@ Operations DecisionLogicNormal::CngOperation(Modes prev_mode,
if (excess_waiting_time_samp > optimal_level_samp / 2) {
// The waiting time for this packet will be longer than 1.5
- // times the wanted buffer delay. Advance the clock to cut
+ // times the wanted buffer delay. Apply fast-forward to cut the
// waiting time down to the optimal.
- generated_noise_samples_ += excess_waiting_time_samp;
+ noise_fast_forward_ += excess_waiting_time_samp;
timestamp_diff += excess_waiting_time_samp;
}
@@ -109,6 +113,7 @@ Operations DecisionLogicNormal::CngOperation(Modes prev_mode,
return kRfc3389CngNoPacket;
} else {
// Otherwise, go for the CNG packet now.
+ noise_fast_forward_ = 0;
return kRfc3389Cng;
}
}
@@ -153,7 +158,8 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
- bool play_dtmf) {
+ bool play_dtmf,
+ size_t generated_noise_samples) {
// Required packet is not available, but a future packet is.
// Check if we should continue with an ongoing expand because the new packet
// is too far into the future.
@@ -184,7 +190,7 @@ Operations DecisionLogicNormal::FuturePacketAvailable(
// safety precaution), but make sure that the number of samples in buffer
// is no higher than 4 times the optimal level. (Note that TargetLevel()
// is in Q8.)
- if (static_cast<uint32_t>(generated_noise_samples_ + target_timestamp) >=
+ if (static_cast<uint32_t>(generated_noise_samples + target_timestamp) >=
available_timestamp ||
cur_size_samples >
((delay_manager_->TargetLevel() * packet_length_samples_) >> 8) *
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
index 7465906a381..aa0edf3152a 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_normal.h
@@ -28,11 +28,16 @@ class DecisionLogicNormal : public DecisionLogic {
DecoderDatabase* decoder_database,
const PacketBuffer& packet_buffer,
DelayManager* delay_manager,
- BufferLevelFilter* buffer_level_filter)
- : DecisionLogic(fs_hz, output_size_samples, playout_mode,
- decoder_database, packet_buffer, delay_manager,
- buffer_level_filter) {
- }
+ BufferLevelFilter* buffer_level_filter,
+ const TickTimer* tick_timer)
+ : DecisionLogic(fs_hz,
+ output_size_samples,
+ playout_mode,
+ decoder_database,
+ packet_buffer,
+ delay_manager,
+ buffer_level_filter,
+ tick_timer) {}
protected:
static const int kAllowMergeWithoutExpandMs = 20; // 20 ms.
@@ -54,7 +59,8 @@ class DecisionLogicNormal : public DecisionLogic {
const RTPHeader* packet_header,
Modes prev_mode,
bool play_dtmf,
- bool* reset_decoder) override;
+ bool* reset_decoder,
+ size_t generated_noise_samples) override;
// Returns the operation to do given that the expected packet is not
// available, but a packet further into the future is at hand.
@@ -65,7 +71,8 @@ class DecisionLogicNormal : public DecisionLogic {
Modes prev_mode,
uint32_t target_timestamp,
uint32_t available_timestamp,
- bool play_dtmf);
+ bool play_dtmf,
+ size_t generated_noise_samples);
// Returns the operation to do given that the expected packet is available.
virtual Operations ExpectedPacketAvailable(Modes prev_mode, bool play_dtmf);
@@ -77,12 +84,16 @@ class DecisionLogicNormal : public DecisionLogic {
private:
// Returns the operation given that the next available packet is a comfort
// noise payload (RFC 3389 only, not codec-internal).
- Operations CngOperation(Modes prev_mode, uint32_t target_timestamp,
- uint32_t available_timestamp);
+ Operations CngOperation(Modes prev_mode,
+ uint32_t target_timestamp,
+ uint32_t available_timestamp,
+ size_t generated_noise_samples);
// Checks if enough time has elapsed since the last successful timescale
// operation was done (i.e., accelerate or preemptive expand).
- bool TimescaleAllowed() const { return timescale_hold_off_ == 0; }
+ bool TimescaleAllowed() const {
+ return !timescale_countdown_ || timescale_countdown_->Finished();
+ }
// Checks if the current (filtered) buffer level is under the target level.
bool UnderTargetLevel() const;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
index 499f9464347..ebb366890b3 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -11,45 +11,42 @@
// Unit tests for DecisionLogic class and derived classes.
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_decoder_factory.h"
#include "webrtc/modules/audio_coding/neteq/buffer_level_filter.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/decision_logic.h"
#include "webrtc/modules/audio_coding/neteq/delay_manager.h"
#include "webrtc/modules/audio_coding/neteq/delay_peak_detector.h"
#include "webrtc/modules/audio_coding/neteq/packet_buffer.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
namespace webrtc {
TEST(DecisionLogic, CreateAndDestroy) {
int fs_hz = 8000;
int output_size_samples = fs_hz / 100; // Samples per 10 ms.
- DecoderDatabase decoder_database;
- PacketBuffer packet_buffer(10);
- DelayPeakDetector delay_peak_detector;
- DelayManager delay_manager(240, &delay_peak_detector);
+ DecoderDatabase decoder_database(
+ std::unique_ptr<MockAudioDecoderFactory>(new MockAudioDecoderFactory));
+ TickTimer tick_timer;
+ PacketBuffer packet_buffer(10, &tick_timer);
+ DelayPeakDetector delay_peak_detector(&tick_timer);
+ DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
BufferLevelFilter buffer_level_filter;
- DecisionLogic* logic = DecisionLogic::Create(fs_hz, output_size_samples,
- kPlayoutOn, &decoder_database,
- packet_buffer, &delay_manager,
- &buffer_level_filter);
+ DecisionLogic* logic = DecisionLogic::Create(
+ fs_hz, output_size_samples, kPlayoutOn, &decoder_database, packet_buffer,
+ &delay_manager, &buffer_level_filter, &tick_timer);
delete logic;
- logic = DecisionLogic::Create(fs_hz, output_size_samples,
- kPlayoutStreaming,
- &decoder_database,
- packet_buffer, &delay_manager,
- &buffer_level_filter);
+ logic = DecisionLogic::Create(
+ fs_hz, output_size_samples, kPlayoutStreaming, &decoder_database,
+ packet_buffer, &delay_manager, &buffer_level_filter, &tick_timer);
delete logic;
- logic = DecisionLogic::Create(fs_hz, output_size_samples,
- kPlayoutFax,
- &decoder_database,
- packet_buffer, &delay_manager,
- &buffer_level_filter);
+ logic = DecisionLogic::Create(
+ fs_hz, output_size_samples, kPlayoutFax, &decoder_database, packet_buffer,
+ &delay_manager, &buffer_level_filter, &tick_timer);
delete logic;
- logic = DecisionLogic::Create(fs_hz, output_size_samples,
- kPlayoutOff,
- &decoder_database,
- packet_buffer, &delay_manager,
- &buffer_level_filter);
+ logic = DecisionLogic::Create(
+ fs_hz, output_size_samples, kPlayoutOff, &decoder_database, packet_buffer,
+ &delay_manager, &buffer_level_filter, &tick_timer);
delete logic;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
index 92d4bab1e4a..4fddf75ce26 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.cc
@@ -19,13 +19,39 @@
namespace webrtc {
-DecoderDatabase::DecoderDatabase()
- : active_decoder_(-1), active_cng_decoder_(-1) {}
+DecoderDatabase::DecoderDatabase(
+ std::unique_ptr<AudioDecoderFactory> decoder_factory)
+ : active_decoder_type_(-1),
+ active_cng_decoder_type_(-1),
+ decoder_factory_(std::move(decoder_factory)) {}
-DecoderDatabase::~DecoderDatabase() {}
+DecoderDatabase::~DecoderDatabase() = default;
-DecoderDatabase::DecoderInfo::~DecoderInfo() {
- if (!external) delete decoder;
+DecoderDatabase::DecoderInfo::DecoderInfo(NetEqDecoder ct,
+ const std::string& nm,
+ int fs,
+ AudioDecoder* ext_dec)
+ : codec_type(ct),
+ name(nm),
+ fs_hz(fs),
+ external_decoder(ext_dec),
+ audio_format_(acm2::RentACodec::NetEqDecoderToSdpAudioFormat(ct)) {}
+
+DecoderDatabase::DecoderInfo::DecoderInfo(DecoderInfo&&) = default;
+DecoderDatabase::DecoderInfo::~DecoderInfo() = default;
+
+AudioDecoder* DecoderDatabase::DecoderInfo::GetDecoder(
+ AudioDecoderFactory* factory) {
+ if (external_decoder) {
+ RTC_DCHECK(!decoder_);
+ return external_decoder;
+ }
+ RTC_DCHECK(audio_format_);
+ if (!decoder_) {
+ decoder_ = factory->MakeAudioDecoder(*audio_format_);
+ }
+ RTC_DCHECK(decoder_) << "Failed to create: " << *audio_format_;
+ return decoder_.get();
}
bool DecoderDatabase::Empty() const { return decoders_.empty(); }
@@ -34,8 +60,8 @@ int DecoderDatabase::Size() const { return static_cast<int>(decoders_.size()); }
void DecoderDatabase::Reset() {
decoders_.clear();
- active_decoder_ = -1;
- active_cng_decoder_ = -1;
+ active_decoder_type_ = -1;
+ active_cng_decoder_type_ = -1;
}
int DecoderDatabase::RegisterPayload(uint8_t rtp_payload_type,
@@ -48,8 +74,9 @@ int DecoderDatabase::RegisterPayload(uint8_t rtp_payload_type,
return kCodecNotSupported;
}
const int fs_hz = CodecSampleRateHz(codec_type);
- DecoderInfo info(codec_type, name, fs_hz, NULL, false);
- auto ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
+ DecoderInfo info(codec_type, name, fs_hz, nullptr);
+ auto ret =
+ decoders_.insert(std::make_pair(rtp_payload_type, std::move(info)));
if (ret.second == false) {
// Database already contains a decoder with type |rtp_payload_type|.
return kDecoderExists;
@@ -75,8 +102,8 @@ int DecoderDatabase::InsertExternal(uint8_t rtp_payload_type,
return kInvalidPointer;
}
std::pair<DecoderMap::iterator, bool> ret;
- DecoderInfo info(codec_type, codec_name, fs_hz, decoder, true);
- ret = decoders_.insert(std::make_pair(rtp_payload_type, info));
+ DecoderInfo info(codec_type, codec_name, fs_hz, decoder);
+ ret = decoders_.insert(std::make_pair(rtp_payload_type, std::move(info)));
if (ret.second == false) {
// Database already contains a decoder with type |rtp_payload_type|.
return kDecoderExists;
@@ -89,11 +116,11 @@ int DecoderDatabase::Remove(uint8_t rtp_payload_type) {
// No decoder with that |rtp_payload_type|.
return kDecoderNotFound;
}
- if (active_decoder_ == rtp_payload_type) {
- active_decoder_ = -1; // No active decoder.
+ if (active_decoder_type_ == rtp_payload_type) {
+ active_decoder_type_ = -1; // No active decoder.
}
- if (active_cng_decoder_ == rtp_payload_type) {
- active_cng_decoder_ = -1; // No active CNG decoder.
+ if (active_cng_decoder_type_ == rtp_payload_type) {
+ active_cng_decoder_type_ = -1; // No active CNG decoder.
}
return kOK;
}
@@ -122,7 +149,8 @@ uint8_t DecoderDatabase::GetRtpPayloadType(
}
AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) {
- if (IsDtmf(rtp_payload_type) || IsRed(rtp_payload_type)) {
+ if (IsDtmf(rtp_payload_type) || IsRed(rtp_payload_type) ||
+ IsComfortNoise(rtp_payload_type)) {
// These are not real decoders.
return NULL;
}
@@ -132,13 +160,7 @@ AudioDecoder* DecoderDatabase::GetDecoder(uint8_t rtp_payload_type) {
return NULL;
}
DecoderInfo* info = &(*it).second;
- if (!info->decoder) {
- // Create the decoder object.
- AudioDecoder* decoder = CreateAudioDecoder(info->codec_type);
- assert(decoder); // Should not be able to have an unsupported codec here.
- info->decoder = decoder;
- }
- return info->decoder;
+ return info->GetDecoder(decoder_factory_.get());
}
bool DecoderDatabase::IsType(uint8_t rtp_payload_type,
@@ -152,14 +174,16 @@ bool DecoderDatabase::IsType(uint8_t rtp_payload_type,
}
bool DecoderDatabase::IsComfortNoise(uint8_t rtp_payload_type) const {
- if (IsType(rtp_payload_type, NetEqDecoder::kDecoderCNGnb) ||
- IsType(rtp_payload_type, NetEqDecoder::kDecoderCNGwb) ||
- IsType(rtp_payload_type, NetEqDecoder::kDecoderCNGswb32kHz) ||
- IsType(rtp_payload_type, NetEqDecoder::kDecoderCNGswb48kHz)) {
- return true;
- } else {
+ DecoderMap::const_iterator it = decoders_.find(rtp_payload_type);
+ if (it == decoders_.end()) {
+ // Decoder not found.
return false;
}
+ const auto& type = it->second.codec_type;
+ return type == NetEqDecoder::kDecoderCNGnb
+ || type == NetEqDecoder::kDecoderCNGwb
+ || type == NetEqDecoder::kDecoderCNGswb32kHz
+ || type == NetEqDecoder::kDecoderCNGswb48kHz;
}
bool DecoderDatabase::IsDtmf(uint8_t rtp_payload_type) const {
@@ -178,37 +202,33 @@ int DecoderDatabase::SetActiveDecoder(uint8_t rtp_payload_type,
// Decoder not found.
return kDecoderNotFound;
}
+ RTC_CHECK(!IsComfortNoise(rtp_payload_type));
assert(new_decoder);
*new_decoder = false;
- if (active_decoder_ < 0) {
+ if (active_decoder_type_ < 0) {
// This is the first active decoder.
*new_decoder = true;
- } else if (active_decoder_ != rtp_payload_type) {
+ } else if (active_decoder_type_ != rtp_payload_type) {
// Moving from one active decoder to another. Delete the first one.
- DecoderMap::iterator it = decoders_.find(active_decoder_);
+ DecoderMap::iterator it = decoders_.find(active_decoder_type_);
if (it == decoders_.end()) {
// Decoder not found. This should not be possible.
assert(false);
return kDecoderNotFound;
}
- if (!(*it).second.external) {
- // Delete the AudioDecoder object, unless it is an externally created
- // decoder.
- delete (*it).second.decoder;
- (*it).second.decoder = NULL;
- }
+ it->second.DropDecoder();
*new_decoder = true;
}
- active_decoder_ = rtp_payload_type;
+ active_decoder_type_ = rtp_payload_type;
return kOK;
}
AudioDecoder* DecoderDatabase::GetActiveDecoder() {
- if (active_decoder_ < 0) {
+ if (active_decoder_type_ < 0) {
// No active decoder.
return NULL;
}
- return GetDecoder(active_decoder_);
+ return GetDecoder(active_decoder_type_);
}
int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
@@ -218,31 +238,32 @@ int DecoderDatabase::SetActiveCngDecoder(uint8_t rtp_payload_type) {
// Decoder not found.
return kDecoderNotFound;
}
- if (active_cng_decoder_ >= 0 && active_cng_decoder_ != rtp_payload_type) {
+ if (active_cng_decoder_type_ >= 0 &&
+ active_cng_decoder_type_ != rtp_payload_type) {
// Moving from one active CNG decoder to another. Delete the first one.
- DecoderMap::iterator it = decoders_.find(active_cng_decoder_);
+ DecoderMap::iterator it = decoders_.find(active_cng_decoder_type_);
if (it == decoders_.end()) {
// Decoder not found. This should not be possible.
assert(false);
return kDecoderNotFound;
}
- if (!(*it).second.external) {
- // Delete the AudioDecoder object, unless it is an externally created
- // decoder.
- delete (*it).second.decoder;
- (*it).second.decoder = NULL;
- }
+ // The CNG decoder should never be provided externally.
+ RTC_CHECK(!it->second.external_decoder);
+ active_cng_decoder_.reset();
}
- active_cng_decoder_ = rtp_payload_type;
+ active_cng_decoder_type_ = rtp_payload_type;
return kOK;
}
-AudioDecoder* DecoderDatabase::GetActiveCngDecoder() {
- if (active_cng_decoder_ < 0) {
+ComfortNoiseDecoder* DecoderDatabase::GetActiveCngDecoder() {
+ if (active_cng_decoder_type_ < 0) {
// No active CNG decoder.
return NULL;
}
- return GetDecoder(active_cng_decoder_);
+ if (!active_cng_decoder_) {
+ active_cng_decoder_.reset(new ComfortNoiseDecoder);
+ }
+ return active_cng_decoder_.get();
}
int DecoderDatabase::CheckPayloadTypes(const PacketList& packet_list) const {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
index 01ff0c9fdb3..3a40e08c8a6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database.h
@@ -12,10 +12,14 @@
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_DECODER_DATABASE_H_
#include <map>
+#include <memory>
#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/common_types.h" // NULL
+#include "webrtc/modules/audio_coding/codecs/audio_decoder_factory.h"
+#include "webrtc/modules/audio_coding/codecs/audio_format.h"
+#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
#include "webrtc/modules/audio_coding/neteq/packet.h"
#include "webrtc/typedefs.h"
@@ -34,37 +38,38 @@ class DecoderDatabase {
kInvalidPointer = -6
};
- // Struct used to store decoder info in the database.
- struct DecoderInfo {
- DecoderInfo() = default;
- DecoderInfo(NetEqDecoder ct, int fs, AudioDecoder* dec, bool ext)
- : DecoderInfo(ct, "", fs, dec, ext) {}
+ // Class that stores decoder info in the database.
+ class DecoderInfo {
+ public:
DecoderInfo(NetEqDecoder ct,
const std::string& nm,
int fs,
- AudioDecoder* dec,
- bool ext)
- : codec_type(ct),
- name(nm),
- fs_hz(fs),
- rtp_sample_rate_hz(fs),
- decoder(dec),
- external(ext) {}
+ AudioDecoder* ext_dec);
+ DecoderInfo(DecoderInfo&&);
~DecoderInfo();
- NetEqDecoder codec_type = NetEqDecoder::kDecoderArbitrary;
- std::string name;
- int fs_hz = 8000;
- int rtp_sample_rate_hz = 8000;
- AudioDecoder* decoder = nullptr;
- bool external = false;
+ // Get the AudioDecoder object, creating it first if necessary.
+ AudioDecoder* GetDecoder(AudioDecoderFactory* factory);
+
+ // Delete the AudioDecoder object, unless it's external. (This means we can
+ // always recreate it later if we need it.)
+ void DropDecoder() { decoder_.reset(); }
+
+ const NetEqDecoder codec_type;
+ const std::string name;
+ const int fs_hz;
+ AudioDecoder* const external_decoder;
+
+ private:
+ const rtc::Optional<SdpAudioFormat> audio_format_;
+ std::unique_ptr<AudioDecoder> decoder_;
};
// Maximum value for 8 bits, and an invalid RTP payload type (since it is
// only 7 bits).
static const uint8_t kRtpPayloadTypeError = 0xFF;
- DecoderDatabase();
+ DecoderDatabase(std::unique_ptr<AudioDecoderFactory> decoder_factory);
virtual ~DecoderDatabase();
@@ -142,7 +147,7 @@ class DecoderDatabase {
// Returns the current active comfort noise decoder, or NULL if no active
// comfort noise decoder exists.
- virtual AudioDecoder* GetActiveCngDecoder();
+ virtual ComfortNoiseDecoder* GetActiveCngDecoder();
// Returns kOK if all packets in |packet_list| carry payload types that are
// registered in the database. Otherwise, returns kDecoderNotFound.
@@ -152,8 +157,10 @@ class DecoderDatabase {
typedef std::map<uint8_t, DecoderInfo> DecoderMap;
DecoderMap decoders_;
- int active_decoder_;
- int active_cng_decoder_;
+ int active_decoder_type_;
+ int active_cng_decoder_type_;
+ std::unique_ptr<ComfortNoiseDecoder> active_cng_decoder_;
+ const std::unique_ptr<AudioDecoderFactory> decoder_factory_;
RTC_DISALLOW_COPY_AND_ASSIGN(DecoderDatabase);
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
index 85aaef11431..91ca606d65b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/decoder_database_unittest.cc
@@ -19,17 +19,21 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_decoder_factory.h"
namespace webrtc {
TEST(DecoderDatabase, CreateAndDestroy) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
EXPECT_EQ(0, db.Size());
EXPECT_TRUE(db.Empty());
}
TEST(DecoderDatabase, InsertAndRemove) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
const uint8_t kPayloadType = 0;
const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_EQ(
@@ -43,7 +47,8 @@ TEST(DecoderDatabase, InsertAndRemove) {
}
TEST(DecoderDatabase, GetDecoderInfo) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
const uint8_t kPayloadType = 0;
const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_EQ(
@@ -53,16 +58,16 @@ TEST(DecoderDatabase, GetDecoderInfo) {
info = db.GetDecoderInfo(kPayloadType);
ASSERT_TRUE(info != NULL);
EXPECT_EQ(NetEqDecoder::kDecoderPCMu, info->codec_type);
- EXPECT_EQ(NULL, info->decoder);
+ EXPECT_EQ(nullptr, info->external_decoder);
EXPECT_EQ(8000, info->fs_hz);
EXPECT_EQ(kCodecName, info->name);
- EXPECT_FALSE(info->external);
info = db.GetDecoderInfo(kPayloadType + 1); // Other payload type.
EXPECT_TRUE(info == NULL); // Should not be found.
}
TEST(DecoderDatabase, GetRtpPayloadType) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
const uint8_t kPayloadType = 0;
const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_EQ(
@@ -76,7 +81,7 @@ TEST(DecoderDatabase, GetRtpPayloadType) {
}
TEST(DecoderDatabase, GetDecoder) {
- DecoderDatabase db;
+ DecoderDatabase db(CreateBuiltinAudioDecoderFactory());
const uint8_t kPayloadType = 0;
const std::string kCodecName = "Robert\'); DROP TABLE Students;";
EXPECT_EQ(DecoderDatabase::kOK,
@@ -87,7 +92,8 @@ TEST(DecoderDatabase, GetDecoder) {
}
TEST(DecoderDatabase, TypeTests) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
const uint8_t kPayloadTypePcmU = 0;
const uint8_t kPayloadTypeCng = 13;
const uint8_t kPayloadTypeDtmf = 100;
@@ -122,7 +128,8 @@ TEST(DecoderDatabase, TypeTests) {
}
TEST(DecoderDatabase, ExternalDecoder) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
const uint8_t kPayloadType = 0;
const std::string kCodecName = "Robert\'); DROP TABLE Students;";
MockAudioDecoder decoder;
@@ -139,9 +146,8 @@ TEST(DecoderDatabase, ExternalDecoder) {
ASSERT_TRUE(info != NULL);
EXPECT_EQ(NetEqDecoder::kDecoderPCMu, info->codec_type);
EXPECT_EQ(kCodecName, info->name);
- EXPECT_EQ(&decoder, info->decoder);
+ EXPECT_EQ(&decoder, info->external_decoder);
EXPECT_EQ(8000, info->fs_hz);
- EXPECT_TRUE(info->external);
// Expect not to delete the decoder when removing it from the database, since
// it was declared externally.
EXPECT_CALL(decoder, Die()).Times(0);
@@ -152,7 +158,8 @@ TEST(DecoderDatabase, ExternalDecoder) {
}
TEST(DecoderDatabase, CheckPayloadTypes) {
- DecoderDatabase db;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase db(std::move(factory));
// Load a number of payloads into the database. Payload types are 0, 1, ...,
// while the decoder type is the same for all payload types (this does not
// matter for the test).
@@ -196,7 +203,7 @@ TEST(DecoderDatabase, CheckPayloadTypes) {
// Test the methods for setting and getting active speech and CNG decoders.
TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
- DecoderDatabase db;
+ DecoderDatabase db(CreateBuiltinAudioDecoderFactory());
// Load payload types.
ASSERT_EQ(DecoderDatabase::kOK,
db.RegisterPayload(0, NetEqDecoder::kDecoderPCMu, "pcmu"));
@@ -233,8 +240,8 @@ TEST(DecoderDatabase, IF_ISAC(ActiveDecoders)) {
// Set active CNG codec.
EXPECT_EQ(DecoderDatabase::kOK, db.SetActiveCngDecoder(13));
- decoder = db.GetActiveCngDecoder();
- ASSERT_FALSE(decoder == NULL); // Should get a decoder here.
+ ComfortNoiseDecoder* cng = db.GetActiveCngDecoder();
+ ASSERT_FALSE(cng == NULL); // Should get a decoder here.
// Remove the active CNG decoder, and verify that the active becomes NULL.
EXPECT_EQ(DecoderDatabase::kOK, db.Remove(13));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
index af49f00f8af..84bda7cf699 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.cc
@@ -24,12 +24,13 @@
namespace webrtc {
DelayManager::DelayManager(size_t max_packets_in_buffer,
- DelayPeakDetector* peak_detector)
+ DelayPeakDetector* peak_detector,
+ const TickTimer* tick_timer)
: first_packet_received_(false),
max_packets_in_buffer_(max_packets_in_buffer),
iat_vector_(kMaxIat + 1, 0),
iat_factor_(0),
- packet_iat_count_ms_(0),
+ tick_timer_(tick_timer),
base_target_level_(4), // In Q0 domain.
target_level_(base_target_level_ << 8), // In Q8 domain.
packet_len_ms_(0),
@@ -41,7 +42,6 @@ DelayManager::DelayManager(size_t max_packets_in_buffer,
maximum_delay_ms_(target_level_),
iat_cumulative_sum_(0),
max_iat_cumulative_sum_(0),
- max_timer_ms_(0),
peak_detector_(*peak_detector),
last_pack_cng_or_dtmf_(1) {
assert(peak_detector); // Should never be NULL.
@@ -79,7 +79,7 @@ int DelayManager::Update(uint16_t sequence_number,
if (!first_packet_received_) {
// Prepare for next packet arrival.
- packet_iat_count_ms_ = 0;
+ packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
last_seq_no_ = sequence_number;
last_timestamp_ = timestamp;
first_packet_received_ = true;
@@ -106,7 +106,7 @@ int DelayManager::Update(uint16_t sequence_number,
// Calculate inter-arrival time (IAT) in integer "packet times"
// (rounding down). This is the value used as index to the histogram
// vector |iat_vector_|.
- int iat_packets = packet_iat_count_ms_ / packet_len_ms;
+ int iat_packets = packet_iat_stopwatch_->ElapsedMs() / packet_len_ms;
if (streaming_mode_) {
UpdateCumulativeSums(packet_len_ms, sequence_number);
@@ -137,7 +137,7 @@ int DelayManager::Update(uint16_t sequence_number,
} // End if (packet_len_ms > 0).
// Prepare for next packet arrival.
- packet_iat_count_ms_ = 0;
+ packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
last_seq_no_ = sequence_number;
last_timestamp_ = timestamp;
return 0;
@@ -147,7 +147,8 @@ void DelayManager::UpdateCumulativeSums(int packet_len_ms,
uint16_t sequence_number) {
// Calculate IAT in Q8, including fractions of a packet (i.e., more
// accurate than |iat_packets|.
- int iat_packets_q8 = (packet_iat_count_ms_ << 8) / packet_len_ms;
+ int iat_packets_q8 =
+ (packet_iat_stopwatch_->ElapsedMs() << 8) / packet_len_ms;
// Calculate cumulative sum IAT with sequence number compensation. The sum
// is zero if there is no clock-drift.
iat_cumulative_sum_ += (iat_packets_q8 -
@@ -159,9 +160,9 @@ void DelayManager::UpdateCumulativeSums(int packet_len_ms,
if (iat_cumulative_sum_ > max_iat_cumulative_sum_) {
// Found a new maximum.
max_iat_cumulative_sum_ = iat_cumulative_sum_;
- max_timer_ms_ = 0;
+ max_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
}
- if (max_timer_ms_ > kMaxStreamingPeakPeriodMs) {
+ if (max_iat_stopwatch_->ElapsedMs() > kMaxStreamingPeakPeriodMs) {
// Too long since the last maximum was observed; decrease max value.
max_iat_cumulative_sum_ -= kCumulativeSumDrift;
}
@@ -299,7 +300,7 @@ int DelayManager::SetPacketAudioLength(int length_ms) {
}
packet_len_ms_ = length_ms;
peak_detector_.SetPacketAudioLength(packet_len_ms_);
- packet_iat_count_ms_ = 0;
+ packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
last_pack_cng_or_dtmf_ = 1; // TODO(hlundin): Legacy. Remove?
return 0;
}
@@ -311,8 +312,8 @@ void DelayManager::Reset() {
peak_detector_.Reset();
ResetHistogram(); // Resets target levels too.
iat_factor_ = 0; // Adapt the histogram faster for the first few packets.
- packet_iat_count_ms_ = 0;
- max_timer_ms_ = 0;
+ packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
+ max_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
iat_cumulative_sum_ = 0;
max_iat_cumulative_sum_ = 0;
last_pack_cng_or_dtmf_ = 1;
@@ -340,14 +341,10 @@ bool DelayManager::PeakFound() const {
return peak_detector_.peak_found();
}
-void DelayManager::UpdateCounters(int elapsed_time_ms) {
- packet_iat_count_ms_ += elapsed_time_ms;
- peak_detector_.IncrementCounter(elapsed_time_ms);
- max_timer_ms_ += elapsed_time_ms;
+void DelayManager::ResetPacketIatCount() {
+ packet_iat_stopwatch_ = tick_timer_->GetNewStopwatch();
}
-void DelayManager::ResetPacketIatCount() { packet_iat_count_ms_ = 0; }
-
// Note that |low_limit| and |higher_limit| are not assigned to
// |minimum_delay_ms_| and |maximum_delay_ms_| defined by the client of this
// class. They are computed from |target_level_| and used for decision making.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
index 785fced15df..6f3c14aea9d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager.h
@@ -13,10 +13,12 @@
#include <string.h> // Provide access to size_t.
+#include <memory>
#include <vector>
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -32,7 +34,9 @@ class DelayManager {
// buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
// is the number of packet slots in the buffer). Supply a PeakDetector
// object to the DelayManager.
- DelayManager(size_t max_packets_in_buffer, DelayPeakDetector* peak_detector);
+ DelayManager(size_t max_packets_in_buffer,
+ DelayPeakDetector* peak_detector,
+ const TickTimer* tick_timer);
virtual ~DelayManager();
@@ -75,10 +79,6 @@ class DelayManager {
// DelayPeakDetector object.
virtual bool PeakFound() const;
- // Notifies the counters in DelayManager and DelayPeakDetector that
- // |elapsed_time_ms| have elapsed.
- virtual void UpdateCounters(int elapsed_time_ms);
-
// Reset the inter-arrival time counter to 0.
virtual void ResetPacketIatCount();
@@ -135,7 +135,9 @@ class DelayManager {
const size_t max_packets_in_buffer_; // Capacity of the packet buffer.
IATVector iat_vector_; // Histogram of inter-arrival times.
int iat_factor_; // Forgetting factor for updating the IAT histogram (Q15).
- int packet_iat_count_ms_; // Milliseconds elapsed since last packet.
+ const TickTimer* tick_timer_;
+ // Time elapsed since last packet.
+ std::unique_ptr<TickTimer::Stopwatch> packet_iat_stopwatch_;
int base_target_level_; // Currently preferred buffer level before peak
// detection and streaming mode (Q0).
// TODO(turajs) change the comment according to the implementation of
@@ -153,7 +155,8 @@ class DelayManager {
int maximum_delay_ms_; // Externally set maximum allowed delay.
int iat_cumulative_sum_; // Cumulative sum of delta inter-arrival times.
int max_iat_cumulative_sum_; // Max of |iat_cumulative_sum_|.
- int max_timer_ms_; // Time elapsed since maximum was observed.
+ // Time elapsed since maximum was observed.
+ std::unique_ptr<TickTimer::Stopwatch> max_iat_stopwatch_;
DelayPeakDetector& peak_detector_;
int last_pack_cng_or_dtmf_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager_unittest.cc
index f231c3da301..3290e9cca68 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -39,21 +39,19 @@ class DelayManagerTest : public ::testing::Test {
void IncreaseTime(int inc_ms);
DelayManager* dm_;
+ TickTimer tick_timer_;
MockDelayPeakDetector detector_;
uint16_t seq_no_;
uint32_t ts_;
};
DelayManagerTest::DelayManagerTest()
- : dm_(NULL),
- seq_no_(0x1234),
- ts_(0x12345678) {
-}
+ : dm_(NULL), detector_(&tick_timer_), seq_no_(0x1234), ts_(0x12345678) {}
void DelayManagerTest::SetUp() {
EXPECT_CALL(detector_, Reset())
.Times(1);
- dm_ = new DelayManager(kMaxNumberOfPackets, &detector_);
+ dm_ = new DelayManager(kMaxNumberOfPackets, &detector_, &tick_timer_);
}
void DelayManagerTest::SetPacketAudioLength(int lengt_ms) {
@@ -69,9 +67,7 @@ void DelayManagerTest::InsertNextPacket() {
void DelayManagerTest::IncreaseTime(int inc_ms) {
for (int t = 0; t < inc_ms; t += kTimeStepMs) {
- EXPECT_CALL(detector_, IncrementCounter(kTimeStepMs))
- .Times(1);
- dm_->UpdateCounters(kTimeStepMs);
+ tick_timer_.Increment();
}
}
void DelayManagerTest::TearDown() {
@@ -115,13 +111,6 @@ TEST_F(DelayManagerTest, PeakFound) {
EXPECT_FALSE(dm_->PeakFound());
}
-TEST_F(DelayManagerTest, UpdateCounters) {
- // Expect DelayManager to pass on the counter update to the detector.
- EXPECT_CALL(detector_, IncrementCounter(kTimeStepMs))
- .Times(1);
- dm_->UpdateCounters(kTimeStepMs);
-}
-
TEST_F(DelayManagerTest, UpdateNormal) {
SetPacketAudioLength(kFrameSizeMs);
// First packet arrival.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.cc
index 712c7788aca..ce9133bdaed 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.cc
@@ -12,6 +12,9 @@
#include <algorithm> // max
+#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
+
namespace webrtc {
// The DelayPeakDetector keeps track of severe inter-arrival times, called
@@ -23,14 +26,15 @@ namespace webrtc {
DelayPeakDetector::~DelayPeakDetector() = default;
-DelayPeakDetector::DelayPeakDetector()
- : peak_found_(false),
- peak_detection_threshold_(0),
- peak_period_counter_ms_(-1) {
+DelayPeakDetector::DelayPeakDetector(const TickTimer* tick_timer)
+ : peak_found_(false),
+ peak_detection_threshold_(0),
+ tick_timer_(tick_timer) {
+ RTC_DCHECK(!peak_period_stopwatch_);
}
void DelayPeakDetector::Reset() {
- peak_period_counter_ms_ = -1; // Indicate that next peak is the first.
+ peak_period_stopwatch_.reset();
peak_found_ = false;
peak_history_.clear();
}
@@ -55,38 +59,40 @@ int DelayPeakDetector::MaxPeakHeight() const {
return max_height;
}
-int DelayPeakDetector::MaxPeakPeriod() const {
- int max_period = -1; // Returns -1 for an empty history.
- std::list<Peak>::const_iterator it;
- for (it = peak_history_.begin(); it != peak_history_.end(); ++it) {
- max_period = std::max(max_period, it->period_ms);
+uint64_t DelayPeakDetector::MaxPeakPeriod() const {
+ auto max_period_element = std::max_element(
+ peak_history_.begin(), peak_history_.end(),
+ [](Peak a, Peak b) { return a.period_ms < b.period_ms; });
+ if (max_period_element == peak_history_.end()) {
+ return 0; // |peak_history_| is empty.
}
- return max_period;
+ RTC_DCHECK_GT(max_period_element->period_ms, 0u);
+ return max_period_element->period_ms;
}
bool DelayPeakDetector::Update(int inter_arrival_time, int target_level) {
if (inter_arrival_time > target_level + peak_detection_threshold_ ||
inter_arrival_time > 2 * target_level) {
// A delay peak is observed.
- if (peak_period_counter_ms_ == -1) {
+ if (!peak_period_stopwatch_) {
// This is the first peak. Reset the period counter.
- peak_period_counter_ms_ = 0;
- } else if (peak_period_counter_ms_ <= kMaxPeakPeriodMs) {
+ peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
+ } else if (peak_period_stopwatch_->ElapsedMs() <= kMaxPeakPeriodMs) {
// This is not the first peak, and the period is valid.
// Store peak data in the vector.
Peak peak_data;
- peak_data.period_ms = peak_period_counter_ms_;
+ peak_data.period_ms = peak_period_stopwatch_->ElapsedMs();
peak_data.peak_height_packets = inter_arrival_time;
peak_history_.push_back(peak_data);
while (peak_history_.size() > kMaxNumPeaks) {
// Delete the oldest data point.
peak_history_.pop_front();
}
- peak_period_counter_ms_ = 0;
- } else if (peak_period_counter_ms_ <= 2 * kMaxPeakPeriodMs) {
+ peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
+ } else if (peak_period_stopwatch_->ElapsedMs() <= 2 * kMaxPeakPeriodMs) {
// Invalid peak due to too long period. Reset period counter and start
// looking for next peak.
- peak_period_counter_ms_ = 0;
+ peak_period_stopwatch_ = tick_timer_->GetNewStopwatch();
} else {
// More than 2 times the maximum period has elapsed since the last peak
// was registered. It seams that the network conditions have changed.
@@ -97,16 +103,10 @@ bool DelayPeakDetector::Update(int inter_arrival_time, int target_level) {
return CheckPeakConditions();
}
-void DelayPeakDetector::IncrementCounter(int inc_ms) {
- if (peak_period_counter_ms_ >= 0) {
- peak_period_counter_ms_ += inc_ms;
- }
-}
-
bool DelayPeakDetector::CheckPeakConditions() {
size_t s = peak_history_.size();
if (s >= kMinPeaksToTrigger &&
- peak_period_counter_ms_ <= 2 * MaxPeakPeriod()) {
+ peak_period_stopwatch_->ElapsedMs() <= 2 * MaxPeakPeriod()) {
peak_found_ = true;
} else {
peak_found_ = false;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
index 69433b45248..f57d3bd71e5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector.h
@@ -14,14 +14,16 @@
#include <string.h> // size_t
#include <list>
+#include <memory>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
namespace webrtc {
class DelayPeakDetector {
public:
- DelayPeakDetector();
+ DelayPeakDetector(const TickTimer* tick_timer);
virtual ~DelayPeakDetector();
virtual void Reset();
@@ -37,20 +39,15 @@ class DelayPeakDetector {
// delay peaks have been observed recently. The unit is number of packets.
virtual int MaxPeakHeight() const;
- // Calculates and returns the maximum delay peak distance in ms.
- // Returns -1 if no delay peaks have been observed recently.
- virtual int MaxPeakPeriod() const;
+ // Calculates and returns the maximum delay peak distance in ms (strictly
+ // larger than 0), or 0 if no delay peaks have been observed recently.
+ virtual uint64_t MaxPeakPeriod() const;
// Updates the DelayPeakDetector with a new inter-arrival time (in packets)
// and the current target buffer level (needed to decide if a peak is observed
// or not). Returns true if peak-mode is active, false if not.
virtual bool Update(int inter_arrival_time, int target_level);
- // Increments the |peak_period_counter_ms_| with |inc_ms|. Only increments
- // the counter if it is non-negative. A negative denotes that no peak has
- // been observed.
- virtual void IncrementCounter(int inc_ms);
-
private:
static const size_t kMaxNumPeaks = 8;
static const size_t kMinPeaksToTrigger = 2;
@@ -58,7 +55,7 @@ class DelayPeakDetector {
static const int kMaxPeakPeriodMs = 10000;
typedef struct {
- int period_ms;
+ uint64_t period_ms;
int peak_height_packets;
} Peak;
@@ -67,7 +64,8 @@ class DelayPeakDetector {
std::list<Peak> peak_history_;
bool peak_found_;
int peak_detection_threshold_;
- int peak_period_counter_ms_;
+ const TickTimer* tick_timer_;
+ std::unique_ptr<TickTimer::Stopwatch> peak_period_stopwatch_;
RTC_DISALLOW_COPY_AND_ASSIGN(DelayPeakDetector);
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
index c40f3991b04..32b36b25ef4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/delay_peak_detector_unittest.cc
@@ -17,22 +17,25 @@
namespace webrtc {
TEST(DelayPeakDetector, CreateAndDestroy) {
- DelayPeakDetector* detector = new DelayPeakDetector();
+ TickTimer tick_timer;
+ DelayPeakDetector* detector = new DelayPeakDetector(&tick_timer);
EXPECT_FALSE(detector->peak_found());
delete detector;
}
TEST(DelayPeakDetector, EmptyHistory) {
- DelayPeakDetector detector;
+ TickTimer tick_timer;
+ DelayPeakDetector detector(&tick_timer);
EXPECT_EQ(-1, detector.MaxPeakHeight());
- EXPECT_EQ(-1, detector.MaxPeakPeriod());
+ EXPECT_EQ(0u, detector.MaxPeakPeriod());
}
// Inject a series of packet arrivals into the detector. Three of the packets
// have suffered delays. After the third delay peak, peak-mode is expected to
// start. This should then continue until it is disengaged due to lack of peaks.
TEST(DelayPeakDetector, TriggerPeakMode) {
- DelayPeakDetector detector;
+ TickTimer tick_timer;
+ DelayPeakDetector detector(&tick_timer);
const int kPacketSizeMs = 30;
detector.SetPacketAudioLength(kPacketSizeMs);
@@ -52,7 +55,7 @@ TEST(DelayPeakDetector, TriggerPeakMode) {
// Third delay peak. Trigger peak-mode after this packet.
arrival_times_ms[400] += kPeakDelayMs;
// The second peak period is the longest, 200 packets.
- const int kWorstPeakPeriod = 200 * kPacketSizeMs;
+ const uint64_t kWorstPeakPeriod = 200 * kPacketSizeMs;
int peak_mode_start_ms = arrival_times_ms[400];
// Expect to disengage after no peaks are observed for two period times.
int peak_mode_end_ms = peak_mode_start_ms + 2 * kWorstPeakPeriod;
@@ -74,7 +77,7 @@ TEST(DelayPeakDetector, TriggerPeakMode) {
}
++next;
}
- detector.IncrementCounter(10);
+ tick_timer.Increment();
time += 10; // Increase time 10 ms.
}
}
@@ -83,7 +86,8 @@ TEST(DelayPeakDetector, TriggerPeakMode) {
// 2, in order to raise the bar for delay peaks to inter-arrival times > 4.
// The delay pattern has peaks with delay = 3, thus should not trigger.
TEST(DelayPeakDetector, DoNotTriggerPeakMode) {
- DelayPeakDetector detector;
+ TickTimer tick_timer;
+ DelayPeakDetector detector(&tick_timer);
const int kPacketSizeMs = 30;
detector.SetPacketAudioLength(kPacketSizeMs);
@@ -114,7 +118,7 @@ TEST(DelayPeakDetector, DoNotTriggerPeakMode) {
EXPECT_FALSE(detector.Update(iat_packets, kTargetBufferLevel));
++next;
}
- detector.IncrementCounter(10);
+ tick_timer.Increment();
time += 10; // Increase time 10 ms.
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
index 4188914c86c..32756650942 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.cc
@@ -80,6 +80,22 @@ int DspHelper::RampSignal(int16_t* signal,
return RampSignal(signal, length, factor, increment, signal);
}
+int DspHelper::RampSignal(AudioVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment) {
+ int factor_q20 = (factor << 6) + 32;
+ // TODO(hlundin): Add 32 to factor_q20 when converting back to Q14?
+ for (size_t i = start_index; i < start_index + length; ++i) {
+ (*signal)[i] = (factor * (*signal)[i] + 8192) >> 14;
+ factor_q20 += increment;
+ factor_q20 = std::max(factor_q20, 0); // Never go negative.
+ factor = std::min(factor_q20 >> 6, 16384);
+ }
+ return factor;
+}
+
int DspHelper::RampSignal(AudioMultiVector* signal,
size_t start_index,
size_t length,
@@ -94,7 +110,7 @@ int DspHelper::RampSignal(AudioMultiVector* signal,
// Loop over the channels, starting at the same |factor| each time.
for (size_t channel = 0; channel < signal->Channels(); ++channel) {
end_factor =
- RampSignal(&(*signal)[channel][start_index], length, factor, increment);
+ RampSignal(&(*signal)[channel], start_index, length, factor, increment);
}
return end_factor;
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
index 269c2eb0f25..23543fe383e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/dsp_helper.h
@@ -67,6 +67,13 @@ class DspHelper {
// Same as above, but processes |length| samples from |signal|, starting at
// |start_index|.
+ static int RampSignal(AudioVector* signal,
+ size_t start_index,
+ size_t length,
+ int factor,
+ int increment);
+
+ // Same as above, but for an AudioMultiVector.
static int RampSignal(AudioMultiVector* signal,
size_t start_index,
size_t length,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
index ef7af46597e..963f4bdb6c0 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.cc
@@ -19,6 +19,7 @@
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
+#include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
@@ -111,25 +112,33 @@ int Expand::Process(AudioMultiVector* output) {
// Use only expand_vector0.
assert(expansion_vector_position + temp_length <=
parameters.expand_vector0.Size());
- memcpy(voiced_vector_storage,
- &parameters.expand_vector0[expansion_vector_position],
- sizeof(int16_t) * temp_length);
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ voiced_vector_storage);
} else if (current_lag_index_ == 1) {
+ std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ temp_0.get());
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+ parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+ temp_1.get());
// Mix 3/4 of expand_vector0 with 1/4 of expand_vector1.
- WebRtcSpl_ScaleAndAddVectorsWithRound(
- &parameters.expand_vector0[expansion_vector_position], 3,
- &parameters.expand_vector1[expansion_vector_position], 1, 2,
- voiced_vector_storage, temp_length);
+ WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 3, temp_1.get(), 1, 2,
+ voiced_vector_storage, temp_length);
} else if (current_lag_index_ == 2) {
// Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
assert(expansion_vector_position + temp_length <=
parameters.expand_vector0.Size());
assert(expansion_vector_position + temp_length <=
parameters.expand_vector1.Size());
- WebRtcSpl_ScaleAndAddVectorsWithRound(
- &parameters.expand_vector0[expansion_vector_position], 1,
- &parameters.expand_vector1[expansion_vector_position], 1, 1,
- voiced_vector_storage, temp_length);
+
+ std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
+ parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
+ temp_0.get());
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
+ parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
+ temp_1.get());
+ WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 1, temp_1.get(), 1, 1,
+ voiced_vector_storage, temp_length);
}
// Get tapering window parameters. Values are in Q15.
@@ -298,8 +307,7 @@ int Expand::Process(AudioMultiVector* output) {
} else {
assert(output->Size() == current_lag);
}
- memcpy(&(*output)[channel_ix][0], temp_data,
- sizeof(temp_data[0]) * current_lag);
+ (*output)[channel_ix].OverwriteAt(temp_data, current_lag, 0);
}
// Increase call number and cap it.
@@ -326,6 +334,17 @@ void Expand::SetParametersForMergeAfterExpand() {
stop_muting_ = true;
}
+bool Expand::Muted() const {
+ if (first_expand_ || stop_muting_)
+ return false;
+ RTC_DCHECK(channel_parameters_);
+ for (size_t ch = 0; ch < num_channels_; ++ch) {
+ if (channel_parameters_[ch].mute_factor != 0)
+ return false;
+ }
+ return true;
+}
+
size_t Expand::overlap_length() const {
return overlap_length_;
}
@@ -372,19 +391,20 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
const size_t signal_length = static_cast<size_t>(256 * fs_mult);
- const int16_t* audio_history =
- &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length];
+
+ const size_t audio_history_position = sync_buffer_->Size() - signal_length;
+ std::unique_ptr<int16_t[]> audio_history(new int16_t[signal_length]);
+ (*sync_buffer_)[0].CopyTo(signal_length, audio_history_position,
+ audio_history.get());
// Initialize.
InitializeForAnExpandPeriod();
// Calculate correlation in downsampled domain (4 kHz sample rate).
- int correlation_scale;
size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
// If it is decided to break bit-exactness |correlation_length| should be
// initialized to the return value of Correlation().
- Correlation(audio_history, signal_length, correlation_vector,
- &correlation_scale);
+ Correlation(audio_history.get(), signal_length, correlation_vector);
// Find peaks in correlation vector.
DspHelper::PeakDetection(correlation_vector, correlation_length,
@@ -455,7 +475,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
&audio_history[signal_length - correlation_length - start_index
- correlation_lags],
correlation_length + start_index + correlation_lags - 1);
- correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
+ int correlation_scale = (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
(31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
correlation_scale = std::max(0, correlation_scale);
@@ -541,12 +561,14 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
parameters.expand_vector1.Extend(
expansion_length - parameters.expand_vector1.Size());
}
- WebRtcSpl_AffineTransformVector(&parameters.expand_vector1[0],
+ std::unique_ptr<int16_t[]> temp_1(new int16_t[expansion_length]);
+ WebRtcSpl_AffineTransformVector(temp_1.get(),
const_cast<int16_t*>(vector2),
amplitude_ratio,
4096,
13,
expansion_length);
+ parameters.expand_vector1.OverwriteAt(temp_1.get(), expansion_length, 0);
} else {
// Energy change constraint not fulfilled. Only use last vector.
parameters.expand_vector0.Clear();
@@ -582,13 +604,6 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
}
// Calculate the LPC and the gain of the filters.
- // Calculate scale value needed for auto-correlation.
- correlation_scale = WebRtcSpl_MaxAbsValueW16(
- &(audio_history[signal_length - fs_mult_lpc_analysis_len]),
- fs_mult_lpc_analysis_len);
-
- correlation_scale = std::min(16 - WebRtcSpl_NormW32(correlation_scale), 0);
- correlation_scale = std::max(correlation_scale * 2 + 7, 0);
// Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
size_t temp_index = signal_length - fs_mult_lpc_analysis_len -
@@ -601,11 +616,9 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
memcpy(&temp_signal[kUnvoicedLpcOrder],
&audio_history[temp_index + kUnvoicedLpcOrder],
sizeof(int16_t) * fs_mult_lpc_analysis_len);
- WebRtcSpl_CrossCorrelation(auto_correlation,
- &temp_signal[kUnvoicedLpcOrder],
- &temp_signal[kUnvoicedLpcOrder],
- fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1,
- correlation_scale, -1);
+ CrossCorrelationWithAutoShift(
+ &temp_signal[kUnvoicedLpcOrder], &temp_signal[kUnvoicedLpcOrder],
+ fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1, -1, auto_correlation);
delete [] temp_signal;
// Verify that variance is positive.
@@ -766,8 +779,7 @@ Expand::ChannelParameters::ChannelParameters()
void Expand::Correlation(const int16_t* input,
size_t input_length,
- int16_t* output,
- int* output_scale) const {
+ int16_t* output) const {
// Set parameters depending on sample rate.
const int16_t* filter_coefficients;
size_t num_coefficients;
@@ -814,13 +826,11 @@ void Expand::Correlation(const int16_t* input,
downsampled_input, norm_shift);
int32_t correlation[kNumCorrelationLags];
- static const int kCorrelationShift = 6;
- WebRtcSpl_CrossCorrelation(
- correlation,
+ CrossCorrelationWithAutoShift(
&downsampled_input[kDownsampledLength - kCorrelationLength],
&downsampled_input[kDownsampledLength - kCorrelationLength
- kCorrelationStartLag],
- kCorrelationLength, kNumCorrelationLags, kCorrelationShift, -1);
+ kCorrelationLength, kNumCorrelationLags, -1, correlation);
// Normalize and move data from 32-bit to 16-bit vector.
int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
@@ -829,8 +839,6 @@ void Expand::Correlation(const int16_t* input,
std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
norm_shift2);
- // Total scale factor (right shifts) of correlation value.
- *output_scale = 2 * norm_shift + kCorrelationShift + norm_shift2;
}
void Expand::UpdateLagIndex() {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
index 7f61bf3b18c..0feba3693a1 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand.h
@@ -62,6 +62,10 @@ class Expand {
return channel_parameters_[channel].mute_factor;
}
+ // Returns true if expansion has been faded down to zero amplitude (for all
+ // channels); false otherwise.
+ bool Muted() const;
+
// Accessors and mutators.
virtual size_t overlap_length() const;
size_t max_lag() const { return max_lag_; }
@@ -120,12 +124,10 @@ class Expand {
// Calculate the auto-correlation of |input|, with length |input_length|
// samples. The correlation is calculated from a downsampled version of
- // |input|, and is written to |output|. The scale factor is written to
- // |output_scale|.
+ // |input|, and is written to |output|.
void Correlation(const int16_t* input,
size_t input_length,
- int16_t* output,
- int* output_scale) const;
+ int16_t* output) const;
void UpdateLagIndex();
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
index 1441704102d..f19487ab17d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/expand_unittest.cc
@@ -93,8 +93,9 @@ class ExpandTest : public ::testing::Test {
ASSERT_TRUE(input_file_.Seek(speech_start_samples));
// Pre-load the sync buffer with speech data.
- ASSERT_TRUE(
- input_file_.Read(sync_buffer_.Size(), &sync_buffer_.Channel(0)[0]));
+ std::unique_ptr<int16_t[]> temp(new int16_t[sync_buffer_.Size()]);
+ ASSERT_TRUE(input_file_.Read(sync_buffer_.Size(), temp.get()));
+ sync_buffer_.Channel(0).OverwriteAt(temp.get(), sync_buffer_.Size(), 0);
ASSERT_EQ(1u, num_channels_) << "Fix: Must populate all channels.";
}
@@ -169,6 +170,37 @@ TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
statistics_.last_outage_duration_ms());
}
+namespace {
+// Runs expand until Muted() returns true. Times out after 1000 calls.
+void ExpandUntilMuted(size_t num_channels, Expand* expand) {
+ EXPECT_FALSE(expand->Muted()) << "Instance is muted from the start";
+ AudioMultiVector output(num_channels);
+ int num_calls = 0;
+ while (!expand->Muted()) {
+ ASSERT_LT(num_calls++, 1000) << "Test timed out";
+ EXPECT_EQ(0, expand->Process(&output));
+ }
+}
+} // namespace
+
+// Verifies that Muted() returns true after a long expand period. Also verifies
+// that Muted() is reset to false after calling Reset(),
+// SetParametersForMergeAfterExpand() and SetParametersForNormalAfterExpand().
+TEST_F(ExpandTest, Muted) {
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.Reset();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.SetParametersForMergeAfterExpand();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+
+ expand_.Reset(); // Must reset in order to start a new expand period.
+ ExpandUntilMuted(num_channels_, &expand_);
+ expand_.SetParametersForNormalAfterExpand();
+ EXPECT_FALSE(expand_.Muted()); // Should be back to unmuted.
+}
+
// TODO(hlundin): Write more tests.
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/include/neteq.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/include/neteq.h
index 89b0c543244..3a9de1d2606 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/include/neteq.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/include/neteq.h
@@ -93,6 +93,7 @@ class NetEq {
BackgroundNoiseMode background_noise_mode;
NetEqPlayoutMode playout_mode;
bool enable_fast_accelerate;
+ bool enable_muted_state = false;
};
enum ReturnCodes {
@@ -161,8 +162,12 @@ class NetEq {
// |num_channels_|, |sample_rate_hz_|, |samples_per_channel_|, and
// |vad_activity_| are updated upon success. If an error is returned, some
// fields may not have been updated.
+ // If muted state is enabled (through Config::enable_muted_state), |muted|
+ // may be set to true after a prolonged expand period. When this happens, the
+ // |data_| in |audio_frame| is not written, but should be interpreted as being
+ // all zeros.
// Returns kOK on success, or kFail in case of an error.
- virtual int GetAudio(AudioFrame* audio_frame) = 0;
+ virtual int GetAudio(AudioFrame* audio_frame, bool* muted) = 0;
// Associates |rtp_payload_type| with |codec| and |codec_name|, and stores the
// information in the codec database. Returns 0 on success, -1 on failure.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
index 9aed91f7887..299682f60d4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.cc
@@ -18,6 +18,7 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
+#include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
@@ -38,6 +39,8 @@ Merge::Merge(int fs_hz,
assert(num_channels_ > 0);
}
+Merge::~Merge() = default;
+
size_t Merge::Process(int16_t* input, size_t input_length,
int16_t* external_mute_factor_array,
AudioMultiVector* output) {
@@ -60,13 +63,16 @@ size_t Merge::Process(int16_t* input, size_t input_length,
size_t best_correlation_index = 0;
size_t output_length = 0;
+ std::unique_ptr<int16_t[]> input_channel(
+ new int16_t[input_length_per_channel]);
+ std::unique_ptr<int16_t[]> expanded_channel(new int16_t[expanded_length]);
for (size_t channel = 0; channel < num_channels_; ++channel) {
- int16_t* input_channel = &input_vector[channel][0];
- int16_t* expanded_channel = &expanded_[channel][0];
- int16_t expanded_max, input_max;
+ input_vector[channel].CopyTo(
+ input_length_per_channel, 0, input_channel.get());
+ expanded_[channel].CopyTo(expanded_length, 0, expanded_channel.get());
+
int16_t new_mute_factor = SignalScaling(
- input_channel, input_length_per_channel, expanded_channel,
- &expanded_max, &input_max);
+ input_channel.get(), input_length_per_channel, expanded_channel.get());
// Adjust muting factor (product of "main" muting factor and expand muting
// factor).
@@ -84,18 +90,16 @@ size_t Merge::Process(int16_t* input, size_t input_length,
// Downsample, correlate, and find strongest correlation period for the
// master (i.e., first) channel only.
// Downsample to 4kHz sample rate.
- Downsample(input_channel, input_length_per_channel, expanded_channel,
- expanded_length);
+ Downsample(input_channel.get(), input_length_per_channel,
+ expanded_channel.get(), expanded_length);
// Calculate the lag of the strongest correlation period.
best_correlation_index = CorrelateAndPeakSearch(
- expanded_max, input_max, old_length,
- input_length_per_channel, expand_period);
+ old_length, input_length_per_channel, expand_period);
}
- static const int kTempDataSize = 3600;
- int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
- int16_t* decoded_output = temp_data + best_correlation_index;
+ temp_data_.resize(input_length_per_channel + best_correlation_index);
+ int16_t* decoded_output = temp_data_.data() + best_correlation_index;
// Mute the new decoded data if needed (and unmute it linearly).
// This is the overlapping part of expanded_signal.
@@ -109,7 +113,7 @@ size_t Merge::Process(int16_t* input, size_t input_length,
// and so on.
int increment = 4194 / fs_mult_;
*external_mute_factor =
- static_cast<int16_t>(DspHelper::RampSignal(input_channel,
+ static_cast<int16_t>(DspHelper::RampSignal(input_channel.get(),
interpolation_length,
*external_mute_factor,
increment));
@@ -129,10 +133,10 @@ size_t Merge::Process(int16_t* input, size_t input_length,
int16_t increment =
static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14.
int16_t mute_factor = 16384 - increment;
- memmove(temp_data, expanded_channel,
+ memmove(temp_data_.data(), expanded_channel.get(),
sizeof(int16_t) * best_correlation_index);
DspHelper::CrossFade(&expanded_channel[best_correlation_index],
- input_channel, interpolation_length,
+ input_channel.get(), interpolation_length,
&mute_factor, increment, decoded_output);
output_length = best_correlation_index + input_length_per_channel;
@@ -142,8 +146,7 @@ size_t Merge::Process(int16_t* input, size_t input_length,
} else {
assert(output->Size() == output_length);
}
- memcpy(&(*output)[channel][0], temp_data,
- sizeof(temp_data[0]) * output_length);
+ (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
}
// Copy back the first part of the data to |sync_buffer_| and remove it from
@@ -204,29 +207,26 @@ size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
}
int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
- const int16_t* expanded_signal,
- int16_t* expanded_max, int16_t* input_max) const {
+ const int16_t* expanded_signal) const {
// Adjust muting factor if new vector is more or less of the BGN energy.
const size_t mod_input_length =
std::min(static_cast<size_t>(64 * fs_mult_), input_length);
- *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
- *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
-
- // Calculate energy of expanded signal.
- // |log_fs_mult| is log2(fs_mult_), but is not exact for 48000 Hz.
- int log_fs_mult = 30 - WebRtcSpl_NormW32(fs_mult_);
- int expanded_shift = 6 + log_fs_mult
- - WebRtcSpl_NormW32(*expanded_max * *expanded_max);
- expanded_shift = std::max(expanded_shift, 0);
+ const int16_t expanded_max =
+ WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
+ int32_t factor = (expanded_max * expanded_max) /
+ (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(mod_input_length));
+ const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
expanded_signal,
mod_input_length,
expanded_shift);
// Calculate energy of input signal.
- int input_shift = 6 + log_fs_mult -
- WebRtcSpl_NormW32(*input_max * *input_max);
- input_shift = std::max(input_shift, 0);
+ const int16_t input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
+ factor = (input_max * input_max) / (std::numeric_limits<int32_t>::max() /
+ static_cast<int32_t>(mod_input_length));
+ const int input_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
mod_input_length,
input_shift);
@@ -307,22 +307,17 @@ void Merge::Downsample(const int16_t* input, size_t input_length,
}
}
-size_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- size_t start_position, size_t input_length,
+size_t Merge::CorrelateAndPeakSearch(size_t start_position, size_t input_length,
size_t expand_period) const {
// Calculate correlation without any normalization.
const size_t max_corr_length = kMaxCorrelationLength;
size_t stop_position_downsamp =
std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
- int correlation_shift = 0;
- if (expanded_max * input_max > 26843546) {
- correlation_shift = 3;
- }
int32_t correlation[kMaxCorrelationLength];
- WebRtcSpl_CrossCorrelation(correlation, input_downsampled_,
- expanded_downsampled_, kInputDownsampLength,
- stop_position_downsamp, correlation_shift, 1);
+ CrossCorrelationWithAutoShift(input_downsampled_, expanded_downsampled_,
+ kInputDownsampLength, stop_position_downsamp, 1,
+ correlation);
// Normalize correlation to 14 bits and copy to a 16-bit array.
const size_t pad_length = expand_->overlap_length() - 1;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
index a168502c271..48f09a16727 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/merge.h
@@ -37,7 +37,7 @@ class Merge {
size_t num_channels,
Expand* expand,
SyncBuffer* sync_buffer);
- virtual ~Merge() {}
+ virtual ~Merge();
// The main method to produce the audio data. The decoded data is supplied in
// |input|, having |input_length| samples in total for all channels
@@ -69,11 +69,10 @@ class Merge {
// of samples that were taken from the |sync_buffer_|.
size_t GetExpandedSignal(size_t* old_length, size_t* expand_period);
- // Analyzes |input| and |expanded_signal| to find maximum values. Returns
- // a muting factor (Q14) to be used on the new data.
+ // Analyzes |input| and |expanded_signal| and returns muting factor (Q14) to
+ // be used on the new data.
int16_t SignalScaling(const int16_t* input, size_t input_length,
- const int16_t* expanded_signal,
- int16_t* expanded_max, int16_t* input_max) const;
+ const int16_t* expanded_signal) const;
// Downsamples |input| (|input_length| samples) and |expanded_signal| to
// 4 kHz sample rate. The downsampled signals are written to
@@ -84,8 +83,7 @@ class Merge {
// Calculates cross-correlation between |input_downsampled_| and
// |expanded_downsampled_|, and finds the correlation maximum. The maximizing
// lag is returned.
- size_t CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
- size_t start_position, size_t input_length,
+ size_t CorrelateAndPeakSearch(size_t start_position, size_t input_length,
size_t expand_period) const;
const int fs_mult_; // fs_hz_ / 8000.
@@ -95,6 +93,7 @@ class Merge {
int16_t expanded_downsampled_[kExpandDownsampLength];
int16_t input_downsampled_[kInputDownsampLength];
AudioMultiVector expanded_;
+ std::vector<int16_t> temp_data_;
RTC_DISALLOW_COPY_AND_ASSIGN(Merge);
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
index 1b4a3c9da5b..60ae0f6501e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h
@@ -21,6 +21,7 @@ namespace webrtc {
class MockDecoderDatabase : public DecoderDatabase {
public:
+ MockDecoderDatabase() : DecoderDatabase(nullptr) {}
virtual ~MockDecoderDatabase() { Die(); }
MOCK_METHOD0(Die, void());
MOCK_CONST_METHOD0(Empty,
@@ -59,7 +60,7 @@ class MockDecoderDatabase : public DecoderDatabase {
MOCK_METHOD1(SetActiveCngDecoder,
int(uint8_t rtp_payload_type));
MOCK_METHOD0(GetActiveCngDecoder,
- AudioDecoder*());
+ ComfortNoiseDecoder*());
MOCK_CONST_METHOD1(CheckPayloadTypes,
int(const PacketList& packet_list));
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 6fb85854d77..7ceea70621f 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -20,8 +20,9 @@ namespace webrtc {
class MockDelayManager : public DelayManager {
public:
MockDelayManager(size_t max_packets_in_buffer,
- DelayPeakDetector* peak_detector)
- : DelayManager(max_packets_in_buffer, peak_detector) {}
+ DelayPeakDetector* peak_detector,
+ const TickTimer* tick_timer)
+ : DelayManager(max_packets_in_buffer, peak_detector, tick_timer) {}
virtual ~MockDelayManager() { Die(); }
MOCK_METHOD0(Die, void());
MOCK_CONST_METHOD0(iat_vector,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h
index fa5cd7ed061..5564fba312c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_delay_peak_detector.h
@@ -19,15 +19,16 @@ namespace webrtc {
class MockDelayPeakDetector : public DelayPeakDetector {
public:
+ MockDelayPeakDetector(const TickTimer* tick_timer)
+ : DelayPeakDetector(tick_timer) {}
virtual ~MockDelayPeakDetector() { Die(); }
MOCK_METHOD0(Die, void());
MOCK_METHOD0(Reset, void());
MOCK_METHOD1(SetPacketAudioLength, void(int length_ms));
MOCK_METHOD0(peak_found, bool());
MOCK_CONST_METHOD0(MaxPeakHeight, int());
- MOCK_CONST_METHOD0(MaxPeakPeriod, int());
+ MOCK_CONST_METHOD0(MaxPeakPeriod, uint64_t());
MOCK_METHOD2(Update, bool(int inter_arrival_time, int target_level));
- MOCK_METHOD1(IncrementCounter, void(int inc_ms));
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
index 97e54d83a5e..6bb95901d8c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/mock/mock_packet_buffer.h
@@ -19,8 +19,8 @@ namespace webrtc {
class MockPacketBuffer : public PacketBuffer {
public:
- MockPacketBuffer(size_t max_number_of_packets)
- : PacketBuffer(max_number_of_packets) {}
+ MockPacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer)
+ : PacketBuffer(max_number_of_packets, tick_timer) {}
virtual ~MockPacketBuffer() { Die(); }
MOCK_METHOD0(Die, void());
MOCK_METHOD0(Flush,
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.cc
index c31dbdc1a3c..2d1ce724cab 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.cc
@@ -10,21 +10,10 @@
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
+#include <memory>
#include <sstream>
-#include "webrtc/modules/audio_coding/neteq/accelerate.h"
-#include "webrtc/modules/audio_coding/neteq/buffer_level_filter.h"
-#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
-#include "webrtc/modules/audio_coding/neteq/delay_manager.h"
-#include "webrtc/modules/audio_coding/neteq/delay_peak_detector.h"
-#include "webrtc/modules/audio_coding/neteq/dtmf_buffer.h"
-#include "webrtc/modules/audio_coding/neteq/dtmf_tone_generator.h"
-#include "webrtc/modules/audio_coding/neteq/expand.h"
#include "webrtc/modules/audio_coding/neteq/neteq_impl.h"
-#include "webrtc/modules/audio_coding/neteq/packet_buffer.h"
-#include "webrtc/modules/audio_coding/neteq/payload_splitter.h"
-#include "webrtc/modules/audio_coding/neteq/preemptive_expand.h"
-#include "webrtc/modules/audio_coding/neteq/timestamp_scaler.h"
namespace webrtc {
@@ -37,41 +26,16 @@ std::string NetEq::Config::ToString() const {
<< ", max_packets_in_buffer=" << max_packets_in_buffer
<< ", background_noise_mode=" << background_noise_mode
<< ", playout_mode=" << playout_mode
- << ", enable_fast_accelerate=" << enable_fast_accelerate;
+ << ", enable_fast_accelerate="
+ << (enable_fast_accelerate ? " true": "false")
+ << ", enable_muted_state=" << (enable_muted_state ? " true": "false");
return ss.str();
}
// Creates all classes needed and inject them into a new NetEqImpl object.
// Return the new object.
NetEq* NetEq::Create(const NetEq::Config& config) {
- BufferLevelFilter* buffer_level_filter = new BufferLevelFilter;
- DecoderDatabase* decoder_database = new DecoderDatabase;
- DelayPeakDetector* delay_peak_detector = new DelayPeakDetector;
- DelayManager* delay_manager =
- new DelayManager(config.max_packets_in_buffer, delay_peak_detector);
- delay_manager->SetMaximumDelay(config.max_delay_ms);
- DtmfBuffer* dtmf_buffer = new DtmfBuffer(config.sample_rate_hz);
- DtmfToneGenerator* dtmf_tone_generator = new DtmfToneGenerator;
- PacketBuffer* packet_buffer = new PacketBuffer(config.max_packets_in_buffer);
- PayloadSplitter* payload_splitter = new PayloadSplitter;
- TimestampScaler* timestamp_scaler = new TimestampScaler(*decoder_database);
- AccelerateFactory* accelerate_factory = new AccelerateFactory;
- ExpandFactory* expand_factory = new ExpandFactory;
- PreemptiveExpandFactory* preemptive_expand_factory =
- new PreemptiveExpandFactory;
- return new NetEqImpl(config,
- buffer_level_filter,
- decoder_database,
- delay_manager,
- delay_peak_detector,
- dtmf_buffer,
- dtmf_tone_generator,
- packet_buffer,
- payload_splitter,
- timestamp_scaler,
- accelerate_factory,
- expand_factory,
- preemptive_expand_factory);
+ return new NetEqImpl(config, NetEqImpl::Dependencies(config));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
index ead9586f5ce..e92567eef5b 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq.gypi
@@ -51,6 +51,8 @@
'dependencies': [
'<@(neteq_dependencies)',
'<(webrtc_root)/common.gyp:webrtc_common',
+ 'builtin_audio_decoder_factory',
+ 'rent_a_codec',
],
'defines': [
'<@(neteq_defines)',
@@ -73,6 +75,8 @@
'buffer_level_filter.h',
'comfort_noise.cc',
'comfort_noise.h',
+ 'cross_correlation.cc',
+ 'cross_correlation.h',
'decision_logic.cc',
'decision_logic.h',
'decision_logic_fax.cc',
@@ -105,6 +109,8 @@
'statistics_calculator.h',
'normal.cc',
'normal.h',
+ 'packet.cc',
+ 'packet.h',
'packet_buffer.cc',
'packet_buffer.h',
'payload_splitter.cc',
@@ -119,6 +125,8 @@
'rtcp.h',
'sync_buffer.cc',
'sync_buffer.h',
+ 'tick_timer.cc',
+ 'tick_timer.h',
'timestamp_scaler.cc',
'timestamp_scaler.h',
'time_stretch.cc',
@@ -206,19 +214,6 @@
],
}, # neteq_unittest_tools
], # targets
- 'conditions': [
- ['OS=="android"', {
- 'targets': [
- {
- 'target_name': 'audio_decoder_unittests_apk_target',
- 'type': 'none',
- 'dependencies': [
- '<(apk_tests_path):audio_decoder_unittests_apk',
- ],
- },
- ],
- }],
- ],
}], # include_tests
], # conditions
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
index 50c24a3b73a..25fa1a7365c 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_external_decoder_unittest.cc
@@ -189,7 +189,9 @@ class NetEqExternalVsInternalDecoderTest : public NetEqExternalDecoderUnitTest,
void GetAndVerifyOutput() override {
// Get audio from internal decoder instance.
- EXPECT_EQ(NetEq::kOK, neteq_internal_->GetAudio(&output_internal_));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_internal_->GetAudio(&output_internal_, &muted));
+ ASSERT_FALSE(muted);
EXPECT_EQ(1u, output_internal_.num_channels_);
EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
output_internal_.samples_per_channel_);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index db37e716d66..7f8661bae89 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -14,6 +14,7 @@
#include <memory.h> // memset
#include <algorithm>
+#include <vector>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
@@ -21,6 +22,7 @@
#include "webrtc/base/trace_event.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
#include "webrtc/modules/audio_coding/neteq/accelerate.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/buffer_level_filter.h"
@@ -42,6 +44,7 @@
#include "webrtc/modules/audio_coding/neteq/post_decode_vad.h"
#include "webrtc/modules/audio_coding/neteq/preemptive_expand.h"
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
#include "webrtc/modules/audio_coding/neteq/timestamp_scaler.h"
#include "webrtc/modules/include/module_common_types.h"
@@ -52,33 +55,43 @@
namespace webrtc {
+NetEqImpl::Dependencies::Dependencies(const NetEq::Config& config)
+ : tick_timer(new TickTimer),
+ buffer_level_filter(new BufferLevelFilter),
+ decoder_database(new DecoderDatabase(CreateBuiltinAudioDecoderFactory())),
+ delay_peak_detector(new DelayPeakDetector(tick_timer.get())),
+ delay_manager(new DelayManager(config.max_packets_in_buffer,
+ delay_peak_detector.get(),
+ tick_timer.get())),
+ dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
+ dtmf_tone_generator(new DtmfToneGenerator),
+ packet_buffer(
+ new PacketBuffer(config.max_packets_in_buffer, tick_timer.get())),
+ payload_splitter(new PayloadSplitter),
+ timestamp_scaler(new TimestampScaler(*decoder_database)),
+ accelerate_factory(new AccelerateFactory),
+ expand_factory(new ExpandFactory),
+ preemptive_expand_factory(new PreemptiveExpandFactory) {}
+
+NetEqImpl::Dependencies::~Dependencies() = default;
+
NetEqImpl::NetEqImpl(const NetEq::Config& config,
- BufferLevelFilter* buffer_level_filter,
- DecoderDatabase* decoder_database,
- DelayManager* delay_manager,
- DelayPeakDetector* delay_peak_detector,
- DtmfBuffer* dtmf_buffer,
- DtmfToneGenerator* dtmf_tone_generator,
- PacketBuffer* packet_buffer,
- PayloadSplitter* payload_splitter,
- TimestampScaler* timestamp_scaler,
- AccelerateFactory* accelerate_factory,
- ExpandFactory* expand_factory,
- PreemptiveExpandFactory* preemptive_expand_factory,
+ Dependencies&& deps,
bool create_components)
- : buffer_level_filter_(buffer_level_filter),
- decoder_database_(decoder_database),
- delay_manager_(delay_manager),
- delay_peak_detector_(delay_peak_detector),
- dtmf_buffer_(dtmf_buffer),
- dtmf_tone_generator_(dtmf_tone_generator),
- packet_buffer_(packet_buffer),
- payload_splitter_(payload_splitter),
- timestamp_scaler_(timestamp_scaler),
+ : tick_timer_(std::move(deps.tick_timer)),
+ buffer_level_filter_(std::move(deps.buffer_level_filter)),
+ decoder_database_(std::move(deps.decoder_database)),
+ delay_manager_(std::move(deps.delay_manager)),
+ delay_peak_detector_(std::move(deps.delay_peak_detector)),
+ dtmf_buffer_(std::move(deps.dtmf_buffer)),
+ dtmf_tone_generator_(std::move(deps.dtmf_tone_generator)),
+ packet_buffer_(std::move(deps.packet_buffer)),
+ payload_splitter_(std::move(deps.payload_splitter)),
+ timestamp_scaler_(std::move(deps.timestamp_scaler)),
vad_(new PostDecodeVad()),
- expand_factory_(expand_factory),
- accelerate_factory_(accelerate_factory),
- preemptive_expand_factory_(preemptive_expand_factory),
+ expand_factory_(std::move(deps.expand_factory)),
+ accelerate_factory_(std::move(deps.accelerate_factory)),
+ preemptive_expand_factory_(std::move(deps.preemptive_expand_factory)),
last_mode_(kModeNormal),
decoded_buffer_length_(kMaxFrameSize),
decoded_buffer_(new int16_t[decoded_buffer_length_]),
@@ -95,7 +108,8 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
background_noise_mode_(config.background_noise_mode),
playout_mode_(config.playout_mode),
enable_fast_accelerate_(config.enable_fast_accelerate),
- nack_enabled_(false) {
+ nack_enabled_(false),
+ enable_muted_state_(config.enable_muted_state) {
LOG(LS_INFO) << "NetEq config: " << config.ToString();
int fs = config.sample_rate_hz;
if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) {
@@ -103,6 +117,7 @@ NetEqImpl::NetEqImpl(const NetEq::Config& config,
"Changing to 8000 Hz.";
fs = 8000;
}
+ delay_manager_->SetMaximumDelay(config.max_delay_ms);
fs_hz_ = fs;
fs_mult_ = fs / 8000;
last_output_sample_rate_hz_ = fs;
@@ -191,10 +206,10 @@ void SetAudioFrameActivityAndType(bool vad_enabled,
}
} // namespace
-int NetEqImpl::GetAudio(AudioFrame* audio_frame) {
+int NetEqImpl::GetAudio(AudioFrame* audio_frame, bool* muted) {
TRACE_EVENT0("webrtc", "NetEqImpl::GetAudio");
rtc::CritScope lock(&crit_sect_);
- int error = GetAudioInternal(audio_frame);
+ int error = GetAudioInternal(audio_frame, muted);
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
rtc::checked_cast<int>(audio_frame->samples_per_channel_ * 100));
@@ -487,6 +502,11 @@ const SyncBuffer* NetEqImpl::sync_buffer_for_test() const {
return sync_buffer_.get();
}
+Operations NetEqImpl::last_operation_for_test() const {
+ rtc::CritScope lock(&crit_sect_);
+ return last_operation_;
+}
+
// Methods below this line are private.
int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
@@ -532,7 +552,8 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
packet->header.numCSRCs = 0;
packet->payload_length = payload.size();
packet->primary = true;
- packet->waiting_time = 0;
+ // Waiting time will be set upon inserting the packet in the buffer.
+ RTC_DCHECK(!packet->waiting_time);
packet->payload = new uint8_t[packet->payload_length];
packet->sync_packet = is_sync_packet;
if (!packet->payload) {
@@ -664,13 +685,15 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
}
}
- // Update bandwidth estimate, if the packet is not sync-packet.
- if (!packet_list.empty() && !packet_list.front()->sync_packet) {
+ // Update bandwidth estimate, if the packet is not sync-packet nor comfort
+ // noise.
+ if (!packet_list.empty() && !packet_list.front()->sync_packet &&
+ !decoder_database_->IsComfortNoise(main_header.payloadType)) {
// The list can be empty here if we got nothing but DTMF payloads.
AudioDecoder* decoder =
decoder_database_->GetDecoder(main_header.payloadType);
assert(decoder); // Should always get a valid object, since we have
- // already checked that the payload types are known.
+ // already checked that the payload types are known.
decoder->IncomingPacket(packet_list.front()->payload,
packet_list.front()->payload_length,
packet_list.front()->header.sequenceNumber,
@@ -728,14 +751,18 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
const RTPHeader* rtp_header = packet_buffer_->NextRtpHeader();
assert(rtp_header);
int payload_type = rtp_header->payloadType;
- AudioDecoder* decoder = decoder_database_->GetDecoder(payload_type);
- assert(decoder); // Payloads are already checked to be valid.
+ size_t channels = 1;
+ if (!decoder_database_->IsComfortNoise(payload_type)) {
+ AudioDecoder* decoder = decoder_database_->GetDecoder(payload_type);
+ assert(decoder); // Payloads are already checked to be valid.
+ channels = decoder->Channels();
+ }
const DecoderDatabase::DecoderInfo* decoder_info =
decoder_database_->GetDecoderInfo(payload_type);
assert(decoder_info);
if (decoder_info->fs_hz != fs_hz_ ||
- decoder->Channels() != algorithm_buffer_->Channels()) {
- SetSampleRateAndChannels(decoder_info->fs_hz, decoder->Channels());
+ channels != algorithm_buffer_->Channels()) {
+ SetSampleRateAndChannels(decoder_info->fs_hz, channels);
}
if (nack_enabled_) {
RTC_DCHECK(nack_);
@@ -783,11 +810,32 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
return 0;
}
-int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame) {
+int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
PacketList packet_list;
DtmfEvent dtmf_event;
Operations operation;
bool play_dtmf;
+ *muted = false;
+ tick_timer_->Increment();
+ stats_.IncreaseCounter(output_size_samples_, fs_hz_);
+
+ // Check for muted state.
+ if (enable_muted_state_ && expand_->Muted() && packet_buffer_->Empty()) {
+ RTC_DCHECK_EQ(last_mode_, kModeExpand);
+ playout_timestamp_ += static_cast<uint32_t>(output_size_samples_);
+ audio_frame->sample_rate_hz_ = fs_hz_;
+ audio_frame->samples_per_channel_ = output_size_samples_;
+ audio_frame->timestamp_ =
+ first_packet_
+ ? 0
+ : timestamp_scaler_->ToExternal(playout_timestamp_) -
+ static_cast<uint32_t>(audio_frame->samples_per_channel_);
+ audio_frame->num_channels_ = sync_buffer_->Channels();
+ stats_.ExpandedNoiseSamples(output_size_samples_);
+ *muted = true;
+ return 0;
+ }
+
int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
&play_dtmf);
if (return_value != 0) {
@@ -806,6 +854,11 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame) {
vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
sid_frame_available, fs_hz_);
+ if (sid_frame_available || speech_type == AudioDecoder::kComfortNoise) {
+ // Start a new stopwatch since we are decoding a new CNG packet.
+ generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+
algorithm_buffer_->Clear();
switch (operation) {
case kNormal: {
@@ -884,6 +937,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame) {
return kInvalidOperation;
}
} // End of switch.
+ last_operation_ = operation;
if (return_value < 0) {
return return_value;
}
@@ -978,6 +1032,12 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame) {
: timestamp_scaler_->ToExternal(playout_timestamp_) -
static_cast<uint32_t>(audio_frame->samples_per_channel_);
+ if (!(last_mode_ == kModeRfc3389Cng ||
+ last_mode_ == kModeCodecInternalCng ||
+ last_mode_ == kModeExpand)) {
+ generated_noise_stopwatch_.reset();
+ }
+
if (decode_return_value) return decode_return_value;
return return_value;
}
@@ -990,10 +1050,6 @@ int NetEqImpl::GetDecision(Operations* operation,
*play_dtmf = false;
*operation = kUndefined;
- // Increment time counters.
- packet_buffer_->IncrementWaitingTimes();
- stats_.IncreaseCounter(output_size_samples_, fs_hz_);
-
assert(sync_buffer_.get());
uint32_t end_timestamp = sync_buffer_->end_timestamp();
if (!new_codec_) {
@@ -1002,14 +1058,22 @@ int NetEqImpl::GetDecision(Operations* operation,
}
const RTPHeader* header = packet_buffer_->NextRtpHeader();
+ RTC_DCHECK(!generated_noise_stopwatch_ ||
+ generated_noise_stopwatch_->ElapsedTicks() >= 1);
+ uint64_t generated_noise_samples =
+ generated_noise_stopwatch_
+ ? (generated_noise_stopwatch_->ElapsedTicks() - 1) *
+ output_size_samples_ +
+ decision_logic_->noise_fast_forward()
+ : 0;
+
if (decision_logic_->CngRfc3389On() || last_mode_ == kModeRfc3389Cng) {
// Because of timestamp peculiarities, we have to "manually" disallow using
// a CNG packet with the same timestamp as the one that was last played.
// This can happen when using redundancy and will cause the timing to shift.
while (header && decoder_database_->IsComfortNoise(header->payloadType) &&
(end_timestamp >= header->timestamp ||
- end_timestamp + decision_logic_->generated_noise_samples() >
- header->timestamp)) {
+ end_timestamp + generated_noise_samples > header->timestamp)) {
// Don't use this packet, discard it.
if (packet_buffer_->DiscardNextPacket() != PacketBuffer::kOK) {
assert(false); // Must be ok by design.
@@ -1037,7 +1101,7 @@ int NetEqImpl::GetDecision(Operations* operation,
// Check if it is time to play a DTMF event.
if (dtmf_buffer_->GetEvent(
static_cast<uint32_t>(
- end_timestamp + decision_logic_->generated_noise_samples()),
+ end_timestamp + generated_noise_samples),
dtmf_event)) {
*play_dtmf = true;
}
@@ -1045,13 +1109,14 @@ int NetEqImpl::GetDecision(Operations* operation,
// Get instruction.
assert(sync_buffer_.get());
assert(expand_.get());
- *operation = decision_logic_->GetDecision(*sync_buffer_,
- *expand_,
- decoder_frame_length_,
- header,
- last_mode_,
- *play_dtmf,
- &reset_decoder_);
+ generated_noise_samples =
+ generated_noise_stopwatch_
+ ? generated_noise_stopwatch_->ElapsedTicks() * output_size_samples_ +
+ decision_logic_->noise_fast_forward()
+ : 0;
+ *operation = decision_logic_->GetDecision(
+ *sync_buffer_, *expand_, decoder_frame_length_, header, last_mode_,
+ *play_dtmf, generated_noise_samples, &reset_decoder_);
// Check if we already have enough samples in the |sync_buffer_|. If so,
// change decision to normal, unless the decision was merge, accelerate, or
@@ -1124,15 +1189,19 @@ int NetEqImpl::GetDecision(Operations* operation,
// TODO(hlundin): Write test for this.
// Update timestamp.
timestamp_ = end_timestamp;
- if (decision_logic_->generated_noise_samples() > 0 &&
- last_mode_ != kModeDtmf) {
+ const uint64_t generated_noise_samples =
+ generated_noise_stopwatch_
+ ? generated_noise_stopwatch_->ElapsedTicks() *
+ output_size_samples_ +
+ decision_logic_->noise_fast_forward()
+ : 0;
+ if (generated_noise_samples > 0 && last_mode_ != kModeDtmf) {
// Make a jump in timestamp due to the recently played comfort noise.
uint32_t timestamp_jump =
- static_cast<uint32_t>(decision_logic_->generated_noise_samples());
+ static_cast<uint32_t>(generated_noise_samples);
sync_buffer_->IncreaseEndTimestamp(timestamp_jump);
timestamp_ += timestamp_jump;
}
- decision_logic_->set_generated_noise_samples(0);
return 0;
}
case kAccelerate:
@@ -1215,9 +1284,6 @@ int NetEqImpl::GetDecision(Operations* operation,
// We are about to decode and use a non-CNG packet.
decision_logic_->SetCngOff();
}
- // Reset CNG timestamp as a new packet will be delivered.
- // (Also if this is a CNG packet, since playedOutTS is updated.)
- decision_logic_->set_generated_noise_samples(0);
extracted_samples = ExtractPackets(required_samples, packet_list);
if (extracted_samples < 0) {
@@ -1297,7 +1363,7 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
decoder->Reset();
// Reset comfort noise decoder.
- AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (cng_decoder)
cng_decoder->Reset();
@@ -1550,6 +1616,12 @@ int NetEqImpl::DoExpand(bool play_dtmf) {
if (!play_dtmf) {
dtmf_tone_generator_->Reset();
}
+
+ if (!generated_noise_stopwatch_) {
+ // Start a new stopwatch since we may be covering for a lost CNG packet.
+ generated_noise_stopwatch_ = tick_timer_->GetNewStopwatch();
+ }
+
return 0;
}
@@ -1920,8 +1992,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
return -1;
}
stats_.PacketsDiscarded(discard_count);
- // Store waiting time in ms; packets->waiting_time is in "output blocks".
- stats_.StoreWaitingTime(packet->waiting_time * kOutputSizeMs);
+ stats_.StoreWaitingTime(packet->waiting_time->ElapsedMs());
assert(packet->payload_length > 0);
packet_list->push_back(packet); // Store packet in list.
@@ -1955,7 +2026,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
stats_.SecondaryDecodedSamples(packet_duration);
}
}
- } else {
+ } else if (!decoder_database_->IsComfortNoise(packet->header.payloadType)) {
LOG(LS_WARNING) << "Unknown payload type "
<< static_cast<int>(packet->header.payloadType);
assert(false);
@@ -2023,7 +2094,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
mute_factor_array_[i] = 16384; // 1.0 in Q14.
}
- AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (cng_decoder)
cng_decoder->Reset();
@@ -2094,11 +2165,9 @@ NetEqImpl::OutputType NetEqImpl::LastOutputType() {
}
void NetEqImpl::CreateDecisionLogic() {
- decision_logic_.reset(DecisionLogic::Create(fs_hz_, output_size_samples_,
- playout_mode_,
- decoder_database_.get(),
- *packet_buffer_.get(),
- delay_manager_.get(),
- buffer_level_filter_.get()));
+ decision_logic_.reset(DecisionLogic::Create(
+ fs_hz_, output_size_samples_, playout_mode_, decoder_database_.get(),
+ *packet_buffer_.get(), delay_manager_.get(), buffer_level_filter_.get(),
+ tick_timer_.get()));
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
index 75055a7b47f..cc5550411f2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl.h
@@ -24,6 +24,7 @@
#include "webrtc/modules/audio_coding/neteq/random_vector.h"
#include "webrtc/modules/audio_coding/neteq/rtcp.h"
#include "webrtc/modules/audio_coding/neteq/statistics_calculator.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
#include "webrtc/typedefs.h"
namespace webrtc {
@@ -65,21 +66,33 @@ class NetEqImpl : public webrtc::NetEq {
kVadPassive
};
- // Creates a new NetEqImpl object. The object will assume ownership of all
- // injected dependencies, and will delete them when done.
+ struct Dependencies {
+ // The constructor populates the Dependencies struct with the default
+ // implementations of the objects. They can all be replaced by the user
+ // before sending the struct to the NetEqImpl constructor. However, there
+ // are dependencies between some of the classes inside the struct, so
+ // swapping out one may make it necessary to re-create another one.
+ explicit Dependencies(const NetEq::Config& config);
+ ~Dependencies();
+
+ std::unique_ptr<TickTimer> tick_timer;
+ std::unique_ptr<BufferLevelFilter> buffer_level_filter;
+ std::unique_ptr<DecoderDatabase> decoder_database;
+ std::unique_ptr<DelayPeakDetector> delay_peak_detector;
+ std::unique_ptr<DelayManager> delay_manager;
+ std::unique_ptr<DtmfBuffer> dtmf_buffer;
+ std::unique_ptr<DtmfToneGenerator> dtmf_tone_generator;
+ std::unique_ptr<PacketBuffer> packet_buffer;
+ std::unique_ptr<PayloadSplitter> payload_splitter;
+ std::unique_ptr<TimestampScaler> timestamp_scaler;
+ std::unique_ptr<AccelerateFactory> accelerate_factory;
+ std::unique_ptr<ExpandFactory> expand_factory;
+ std::unique_ptr<PreemptiveExpandFactory> preemptive_expand_factory;
+ };
+
+ // Creates a new NetEqImpl object.
NetEqImpl(const NetEq::Config& config,
- BufferLevelFilter* buffer_level_filter,
- DecoderDatabase* decoder_database,
- DelayManager* delay_manager,
- DelayPeakDetector* delay_peak_detector,
- DtmfBuffer* dtmf_buffer,
- DtmfToneGenerator* dtmf_tone_generator,
- PacketBuffer* packet_buffer,
- PayloadSplitter* payload_splitter,
- TimestampScaler* timestamp_scaler,
- AccelerateFactory* accelerate_factory,
- ExpandFactory* expand_factory,
- PreemptiveExpandFactory* preemptive_expand_factory,
+ Dependencies&& deps,
bool create_components = true);
~NetEqImpl() override;
@@ -104,7 +117,7 @@ class NetEqImpl : public webrtc::NetEq {
int InsertSyncPacket(const WebRtcRTPHeader& rtp_header,
uint32_t receive_timestamp) override;
- int GetAudio(AudioFrame* audio_frame) override;
+ int GetAudio(AudioFrame* audio_frame, bool* muted) override;
int RegisterPayloadType(NetEqDecoder codec,
const std::string& codec_name,
@@ -191,12 +204,15 @@ class NetEqImpl : public webrtc::NetEq {
// This accessor method is only intended for testing purposes.
const SyncBuffer* sync_buffer_for_test() const;
+ Operations last_operation_for_test() const;
protected:
static const int kOutputSizeMs = 10;
- static const size_t kMaxFrameSize = 2880; // 60 ms @ 48 kHz.
+ static const size_t kMaxFrameSize = 5760; // 120 ms @ 48 kHz.
// TODO(hlundin): Provide a better value for kSyncBufferSize.
- static const size_t kSyncBufferSize = 2 * kMaxFrameSize;
+ // Current value is kMaxFrameSize + 60 ms * 48 kHz, which is enough for
+ // calculating correlations of current frame against history.
+ static const size_t kSyncBufferSize = kMaxFrameSize + 60 * 48;
// Inserts a new packet into NetEq. This is used by the InsertPacket method
// above. Returns 0 on success, otherwise an error code.
@@ -209,7 +225,7 @@ class NetEqImpl : public webrtc::NetEq {
// Delivers 10 ms of audio data. The data is written to |audio_frame|.
// Returns 0 on success, otherwise an error code.
- int GetAudioInternal(AudioFrame* audio_frame)
+ int GetAudioInternal(AudioFrame* audio_frame, bool* muted)
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
// Provides a decision to the GetAudioInternal method. The decision what to
@@ -328,6 +344,7 @@ class NetEqImpl : public webrtc::NetEq {
virtual void CreateDecisionLogic() EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
rtc::CriticalSection crit_sect_;
+ const std::unique_ptr<TickTimer> tick_timer_ GUARDED_BY(crit_sect_);
const std::unique_ptr<BufferLevelFilter> buffer_level_filter_
GUARDED_BY(crit_sect_);
const std::unique_ptr<DecoderDatabase> decoder_database_
@@ -369,6 +386,7 @@ class NetEqImpl : public webrtc::NetEq {
size_t output_size_samples_ GUARDED_BY(crit_sect_);
size_t decoder_frame_length_ GUARDED_BY(crit_sect_);
Modes last_mode_ GUARDED_BY(crit_sect_);
+ Operations last_operation_ GUARDED_BY(crit_sect_);
std::unique_ptr<int16_t[]> mute_factor_array_ GUARDED_BY(crit_sect_);
size_t decoded_buffer_length_ GUARDED_BY(crit_sect_);
std::unique_ptr<int16_t[]> decoded_buffer_ GUARDED_BY(crit_sect_);
@@ -387,8 +405,11 @@ class NetEqImpl : public webrtc::NetEq {
bool enable_fast_accelerate_ GUARDED_BY(crit_sect_);
std::unique_ptr<Nack> nack_ GUARDED_BY(crit_sect_);
bool nack_enabled_ GUARDED_BY(crit_sect_);
+ const bool enable_muted_state_ GUARDED_BY(crit_sect_);
AudioFrame::VADActivity last_vad_activity_ GUARDED_BY(crit_sect_) =
AudioFrame::kVadPassive;
+ std::unique_ptr<TickTimer::Stopwatch> generated_noise_stopwatch_
+ GUARDED_BY(crit_sect_);
private:
RTC_DISALLOW_COPY_AND_ASSIGN(NetEqImpl);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
index 561c0459bfb..43db87f4fa7 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <memory>
+
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/neteq_impl.h"
@@ -54,111 +56,82 @@ int DeletePacketsAndReturnOk(PacketList* packet_list) {
class NetEqImplTest : public ::testing::Test {
protected:
- NetEqImplTest()
- : neteq_(NULL),
- config_(),
- mock_buffer_level_filter_(NULL),
- buffer_level_filter_(NULL),
- use_mock_buffer_level_filter_(true),
- mock_decoder_database_(NULL),
- decoder_database_(NULL),
- use_mock_decoder_database_(true),
- mock_delay_peak_detector_(NULL),
- delay_peak_detector_(NULL),
- use_mock_delay_peak_detector_(true),
- mock_delay_manager_(NULL),
- delay_manager_(NULL),
- use_mock_delay_manager_(true),
- mock_dtmf_buffer_(NULL),
- dtmf_buffer_(NULL),
- use_mock_dtmf_buffer_(true),
- mock_dtmf_tone_generator_(NULL),
- dtmf_tone_generator_(NULL),
- use_mock_dtmf_tone_generator_(true),
- mock_packet_buffer_(NULL),
- packet_buffer_(NULL),
- use_mock_packet_buffer_(true),
- mock_payload_splitter_(NULL),
- payload_splitter_(NULL),
- use_mock_payload_splitter_(true),
- timestamp_scaler_(NULL) {
- config_.sample_rate_hz = 8000;
- }
+ NetEqImplTest() { config_.sample_rate_hz = 8000; }
void CreateInstance() {
+ NetEqImpl::Dependencies deps(config_);
+
+ // Get a local pointer to NetEq's TickTimer object.
+ tick_timer_ = deps.tick_timer.get();
+
if (use_mock_buffer_level_filter_) {
- mock_buffer_level_filter_ = new MockBufferLevelFilter;
- buffer_level_filter_ = mock_buffer_level_filter_;
- } else {
- buffer_level_filter_ = new BufferLevelFilter;
+ std::unique_ptr<MockBufferLevelFilter> mock(new MockBufferLevelFilter);
+ mock_buffer_level_filter_ = mock.get();
+ deps.buffer_level_filter = std::move(mock);
}
+ buffer_level_filter_ = deps.buffer_level_filter.get();
+
if (use_mock_decoder_database_) {
- mock_decoder_database_ = new MockDecoderDatabase;
+ std::unique_ptr<MockDecoderDatabase> mock(new MockDecoderDatabase);
+ mock_decoder_database_ = mock.get();
EXPECT_CALL(*mock_decoder_database_, GetActiveCngDecoder())
.WillOnce(ReturnNull());
- decoder_database_ = mock_decoder_database_;
- } else {
- decoder_database_ = new DecoderDatabase;
+ deps.decoder_database = std::move(mock);
}
+ decoder_database_ = deps.decoder_database.get();
+
if (use_mock_delay_peak_detector_) {
- mock_delay_peak_detector_ = new MockDelayPeakDetector;
+ std::unique_ptr<MockDelayPeakDetector> mock(
+ new MockDelayPeakDetector(tick_timer_));
+ mock_delay_peak_detector_ = mock.get();
EXPECT_CALL(*mock_delay_peak_detector_, Reset()).Times(1);
- delay_peak_detector_ = mock_delay_peak_detector_;
- } else {
- delay_peak_detector_ = new DelayPeakDetector;
+ deps.delay_peak_detector = std::move(mock);
}
+ delay_peak_detector_ = deps.delay_peak_detector.get();
+
if (use_mock_delay_manager_) {
- mock_delay_manager_ = new MockDelayManager(config_.max_packets_in_buffer,
- delay_peak_detector_);
+ std::unique_ptr<MockDelayManager> mock(new MockDelayManager(
+ config_.max_packets_in_buffer, delay_peak_detector_, tick_timer_));
+ mock_delay_manager_ = mock.get();
EXPECT_CALL(*mock_delay_manager_, set_streaming_mode(false)).Times(1);
- delay_manager_ = mock_delay_manager_;
- } else {
- delay_manager_ =
- new DelayManager(config_.max_packets_in_buffer, delay_peak_detector_);
+ deps.delay_manager = std::move(mock);
}
+ delay_manager_ = deps.delay_manager.get();
+
if (use_mock_dtmf_buffer_) {
- mock_dtmf_buffer_ = new MockDtmfBuffer(config_.sample_rate_hz);
- dtmf_buffer_ = mock_dtmf_buffer_;
- } else {
- dtmf_buffer_ = new DtmfBuffer(config_.sample_rate_hz);
+ std::unique_ptr<MockDtmfBuffer> mock(
+ new MockDtmfBuffer(config_.sample_rate_hz));
+ mock_dtmf_buffer_ = mock.get();
+ deps.dtmf_buffer = std::move(mock);
}
+ dtmf_buffer_ = deps.dtmf_buffer.get();
+
if (use_mock_dtmf_tone_generator_) {
- mock_dtmf_tone_generator_ = new MockDtmfToneGenerator;
- dtmf_tone_generator_ = mock_dtmf_tone_generator_;
- } else {
- dtmf_tone_generator_ = new DtmfToneGenerator;
+ std::unique_ptr<MockDtmfToneGenerator> mock(new MockDtmfToneGenerator);
+ mock_dtmf_tone_generator_ = mock.get();
+ deps.dtmf_tone_generator = std::move(mock);
}
+ dtmf_tone_generator_ = deps.dtmf_tone_generator.get();
+
if (use_mock_packet_buffer_) {
- mock_packet_buffer_ = new MockPacketBuffer(config_.max_packets_in_buffer);
- packet_buffer_ = mock_packet_buffer_;
- } else {
- packet_buffer_ = new PacketBuffer(config_.max_packets_in_buffer);
+ std::unique_ptr<MockPacketBuffer> mock(
+ new MockPacketBuffer(config_.max_packets_in_buffer, tick_timer_));
+ mock_packet_buffer_ = mock.get();
+ deps.packet_buffer = std::move(mock);
}
+ packet_buffer_ = deps.packet_buffer.get();
+
if (use_mock_payload_splitter_) {
- mock_payload_splitter_ = new MockPayloadSplitter;
- payload_splitter_ = mock_payload_splitter_;
- } else {
- payload_splitter_ = new PayloadSplitter;
+ std::unique_ptr<MockPayloadSplitter> mock(new MockPayloadSplitter);
+ mock_payload_splitter_ = mock.get();
+ deps.payload_splitter = std::move(mock);
}
- timestamp_scaler_ = new TimestampScaler(*decoder_database_);
- AccelerateFactory* accelerate_factory = new AccelerateFactory;
- ExpandFactory* expand_factory = new ExpandFactory;
- PreemptiveExpandFactory* preemptive_expand_factory =
- new PreemptiveExpandFactory;
-
- neteq_ = new NetEqImpl(config_,
- buffer_level_filter_,
- decoder_database_,
- delay_manager_,
- delay_peak_detector_,
- dtmf_buffer_,
- dtmf_tone_generator_,
- packet_buffer_,
- payload_splitter_,
- timestamp_scaler_,
- accelerate_factory,
- expand_factory,
- preemptive_expand_factory);
+ payload_splitter_ = deps.payload_splitter.get();
+
+ deps.timestamp_scaler = std::unique_ptr<TimestampScaler>(
+ new TimestampScaler(*deps.decoder_database.get()));
+
+ neteq_.reset(new NetEqImpl(config_, std::move(deps)));
ASSERT_TRUE(neteq_ != NULL);
}
@@ -196,36 +169,35 @@ class NetEqImplTest : public ::testing::Test {
if (use_mock_packet_buffer_) {
EXPECT_CALL(*mock_packet_buffer_, Die()).Times(1);
}
- delete neteq_;
}
- NetEqImpl* neteq_;
+ std::unique_ptr<NetEqImpl> neteq_;
NetEq::Config config_;
- MockBufferLevelFilter* mock_buffer_level_filter_;
- BufferLevelFilter* buffer_level_filter_;
- bool use_mock_buffer_level_filter_;
- MockDecoderDatabase* mock_decoder_database_;
- DecoderDatabase* decoder_database_;
- bool use_mock_decoder_database_;
- MockDelayPeakDetector* mock_delay_peak_detector_;
- DelayPeakDetector* delay_peak_detector_;
- bool use_mock_delay_peak_detector_;
- MockDelayManager* mock_delay_manager_;
- DelayManager* delay_manager_;
- bool use_mock_delay_manager_;
- MockDtmfBuffer* mock_dtmf_buffer_;
- DtmfBuffer* dtmf_buffer_;
- bool use_mock_dtmf_buffer_;
- MockDtmfToneGenerator* mock_dtmf_tone_generator_;
- DtmfToneGenerator* dtmf_tone_generator_;
- bool use_mock_dtmf_tone_generator_;
- MockPacketBuffer* mock_packet_buffer_;
- PacketBuffer* packet_buffer_;
- bool use_mock_packet_buffer_;
- MockPayloadSplitter* mock_payload_splitter_;
- PayloadSplitter* payload_splitter_;
- bool use_mock_payload_splitter_;
- TimestampScaler* timestamp_scaler_;
+ TickTimer* tick_timer_ = nullptr;
+ MockBufferLevelFilter* mock_buffer_level_filter_ = nullptr;
+ BufferLevelFilter* buffer_level_filter_ = nullptr;
+ bool use_mock_buffer_level_filter_ = true;
+ MockDecoderDatabase* mock_decoder_database_ = nullptr;
+ DecoderDatabase* decoder_database_ = nullptr;
+ bool use_mock_decoder_database_ = true;
+ MockDelayPeakDetector* mock_delay_peak_detector_ = nullptr;
+ DelayPeakDetector* delay_peak_detector_ = nullptr;
+ bool use_mock_delay_peak_detector_ = true;
+ MockDelayManager* mock_delay_manager_ = nullptr;
+ DelayManager* delay_manager_ = nullptr;
+ bool use_mock_delay_manager_ = true;
+ MockDtmfBuffer* mock_dtmf_buffer_ = nullptr;
+ DtmfBuffer* dtmf_buffer_ = nullptr;
+ bool use_mock_dtmf_buffer_ = true;
+ MockDtmfToneGenerator* mock_dtmf_tone_generator_ = nullptr;
+ DtmfToneGenerator* dtmf_tone_generator_ = nullptr;
+ bool use_mock_dtmf_tone_generator_ = true;
+ MockPacketBuffer* mock_packet_buffer_ = nullptr;
+ PacketBuffer* packet_buffer_ = nullptr;
+ bool use_mock_packet_buffer_ = true;
+ MockPayloadSplitter* mock_payload_splitter_ = nullptr;
+ PayloadSplitter* payload_splitter_ = nullptr;
+ bool use_mock_payload_splitter_ = true;
};
@@ -301,8 +273,8 @@ TEST_F(NetEqImplTest, InsertPacket) {
.WillRepeatedly(Return(&mock_decoder));
EXPECT_CALL(*mock_decoder_database_, IsComfortNoise(kPayloadType))
.WillRepeatedly(Return(false)); // This is not CNG.
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderPCMu;
+ DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, "", 8000,
+ nullptr);
EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(&info));
@@ -356,6 +328,9 @@ TEST_F(NetEqImplTest, InsertPacket) {
}
// Expectations for payload splitter.
+ EXPECT_CALL(*mock_payload_splitter_, SplitFec(_, _))
+ .Times(2)
+ .WillRepeatedly(Return(PayloadSplitter::kOK));
EXPECT_CALL(*mock_payload_splitter_, SplitAudio(_, _))
.Times(2)
.WillRepeatedly(Return(PayloadSplitter::kOK));
@@ -466,7 +441,9 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -521,6 +498,8 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, IncomingPacket(_, kPayloadLengthBytes, _, _, _))
.WillRepeatedly(Return(0));
+ EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
+ .WillRepeatedly(Return(kPayloadLengthSamples));
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// |kPayloadLengthSamples| zeros to the output array, and mark it as speech.
@@ -541,7 +520,8 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -569,7 +549,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
Return(kPayloadLengthSamples)));
// Pull audio once.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -609,7 +589,8 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
@@ -630,7 +611,7 @@ TEST_F(NetEqImplTest, FirstPacketUnknown) {
// Pull audio repeatedly and make sure we get normal output, that is not PLC.
for (size_t i = 0; i < 3; ++i) {
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
ASSERT_LE(output.samples_per_channel_, kMaxOutputSize);
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
@@ -734,7 +715,8 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
50 * kSampleRateKhz, 10 * kSampleRateKhz
};
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
rtc::Optional<uint32_t> last_timestamp = neteq_->GetPlayoutTimestamp();
ASSERT_TRUE(last_timestamp);
@@ -756,7 +738,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(expected_type[i - 1], output.speech_type_);
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
SCOPED_TRACE("");
verify_timestamp(neteq_->GetPlayoutTimestamp(), i);
}
@@ -772,7 +754,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(expected_type[i - 1], output.speech_type_);
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
SCOPED_TRACE("");
verify_timestamp(neteq_->GetPlayoutTimestamp(), i);
}
@@ -786,7 +768,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
TEST_F(NetEqImplTest, UnsupportedDecoder) {
UseNoMocks();
CreateInstance();
- static const size_t kNetEqMaxFrameSize = 2880; // 60 ms @ 48 kHz.
+ static const size_t kNetEqMaxFrameSize = 5760; // 120 ms @ 48 kHz.
static const size_t kChannels = 2;
const uint8_t kPayloadType = 17; // Just an arbitrary number.
@@ -796,7 +778,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
const size_t kPayloadLengthSamples =
static_cast<size_t>(10 * kSampleRateHz / 1000); // 10 ms.
const size_t kPayloadLengthBytes = 1;
- uint8_t payload[kPayloadLengthBytes]= {0};
+ uint8_t payload[kPayloadLengthBytes] = {0};
int16_t dummy_output[kPayloadLengthSamples * kChannels] = {0};
WebRtcRTPHeader rtp_header;
rtp_header.header.payloadType = kPayloadType;
@@ -806,11 +788,15 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
class MockAudioDecoder : public AudioDecoder {
public:
- void Reset() override {}
+ // TODO(nisse): Valid overrides commented out, because the gmock
+ // methods don't use any override declarations, and we want to avoid
+ // warnings from -Winconsistent-missing-override. See
+ // http://crbug.com/428099.
+ void Reset() /* override */ {}
MOCK_CONST_METHOD2(PacketDuration, int(const uint8_t*, size_t));
MOCK_METHOD5(DecodeInternal, int(const uint8_t*, size_t, int, int16_t*,
SpeechType*));
- size_t Channels() const override { return kChannels; }
+ size_t Channels() const /* override */ { return kChannels; }
} decoder_;
const uint8_t kFirstPayloadValue = 1;
@@ -860,9 +846,10 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
neteq_->InsertPacket(rtp_header, payload, kReceiveTime));
AudioFrame output;
+ bool muted;
// First call to GetAudio will try to decode the "faulty" packet.
// Expect kFail return value...
- EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
// ... and kOtherDecoderError error code.
EXPECT_EQ(NetEq::kOtherDecoderError, neteq_->LastError());
// Output size and number of channels should be correct.
@@ -872,7 +859,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
// Second call to GetAudio will decode the packet that is ok. No errors are
// expected.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kExpectedOutputSize, output.samples_per_channel_ * kChannels);
EXPECT_EQ(kChannels, output.num_channels_);
}
@@ -965,7 +952,8 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
// Pull audio once.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -1057,13 +1045,14 @@ TEST_F(NetEqImplTest, DecodingError) {
// Pull audio.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
// Pull audio again. Decoder fails.
- EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
@@ -1072,13 +1061,13 @@ TEST_F(NetEqImplTest, DecodingError) {
// returned.
// Pull audio again, should continue an expansion.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kPLC, output.speech_type_);
// Pull audio again, should behave normal.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -1166,13 +1155,14 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
// Pull audio.
const size_t kMaxOutputSize = static_cast<size_t>(10 * kSampleRateHz / 1000);
AudioFrame output;
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
// Pull audio again. Decoder fails.
- EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
EXPECT_EQ(kDecoderErrorCode, neteq_->LastDecoderError());
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
@@ -1181,7 +1171,7 @@ TEST_F(NetEqImplTest, DecodingErrorDuringInternalCng) {
// returned.
// Pull audio again, should resume codec CNG.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output));
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
EXPECT_EQ(kMaxOutputSize, output.samples_per_channel_);
EXPECT_EQ(1u, output.num_channels_);
EXPECT_EQ(AudioFrame::kCNG, output.speech_type_);
@@ -1198,4 +1188,235 @@ TEST_F(NetEqImplTest, InitialLastOutputSampleRate) {
EXPECT_EQ(48000, neteq_->last_output_sample_rate_hz());
}
+TEST_F(NetEqImplTest, TickTimerIncrement) {
+ UseNoMocks();
+ CreateInstance();
+ ASSERT_TRUE(tick_timer_);
+ EXPECT_EQ(0u, tick_timer_->ticks());
+ AudioFrame output;
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
+ EXPECT_EQ(1u, tick_timer_->ticks());
+}
+
+class Decoder120ms : public AudioDecoder {
+ public:
+ Decoder120ms(SpeechType speech_type)
+ : next_value_(1),
+ speech_type_(speech_type) {}
+
+ int DecodeInternal(const uint8_t* encoded,
+ size_t encoded_len,
+ int sample_rate_hz,
+ int16_t* decoded,
+ SpeechType* speech_type) override {
+ size_t decoded_len =
+ rtc::CheckedDivExact(sample_rate_hz, 1000) * 120 * Channels();
+ for (size_t i = 0; i < decoded_len; ++i) {
+ decoded[i] = next_value_++;
+ }
+ *speech_type = speech_type_;
+ return decoded_len;
+ }
+
+ void Reset() override { next_value_ = 1; }
+ size_t Channels() const override { return 2; }
+
+ private:
+ int16_t next_value_;
+ SpeechType speech_type_;
+};
+
+class NetEqImplTest120ms : public NetEqImplTest {
+ protected:
+ NetEqImplTest120ms() : NetEqImplTest() {}
+ virtual ~NetEqImplTest120ms() {}
+
+ void CreateInstanceNoMocks() {
+ UseNoMocks();
+ CreateInstance();
+ }
+
+ void CreateInstanceWithDelayManagerMock() {
+ UseNoMocks();
+ use_mock_delay_manager_ = true;
+ CreateInstance();
+ }
+
+ uint32_t timestamp_diff_between_packets() const {
+ return rtc::CheckedDivExact(kSamplingFreq_, 1000u) * 120;
+ }
+
+ uint32_t first_timestamp() const { return 10u; }
+
+ void GetFirstPacket() {
+ bool muted;
+ for (int i = 0; i < 12; i++) {
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_FALSE(muted);
+ }
+ }
+
+ void InsertPacket(uint32_t timestamp) {
+ WebRtcRTPHeader rtp_header;
+ rtp_header.header.payloadType = kPayloadType;
+ rtp_header.header.sequenceNumber = sequence_number_;
+ rtp_header.header.timestamp = timestamp;
+ rtp_header.header.ssrc = 15;
+ const size_t kPayloadLengthBytes = 1; // This can be arbitrary.
+ uint8_t payload[kPayloadLengthBytes] = {0};
+ EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload, 10));
+ sequence_number_++;
+ }
+
+ void Register120msCodec(AudioDecoder::SpeechType speech_type) {
+ decoder_.reset(new Decoder120ms(speech_type));
+ ASSERT_EQ(2u, decoder_->Channels());
+ EXPECT_EQ(NetEq::kOK, neteq_->RegisterExternalDecoder(
+ decoder_.get(), NetEqDecoder::kDecoderOpus_2ch,
+ "120ms codec", kPayloadType, kSamplingFreq_));
+ }
+
+ std::unique_ptr<Decoder120ms> decoder_;
+ AudioFrame output_;
+ const uint32_t kPayloadType = 17;
+ const uint32_t kSamplingFreq_ = 48000;
+ uint16_t sequence_number_ = 1;
+};
+
+TEST_F(NetEqImplTest120ms, AudioRepetition) {
+ config_.playout_mode = kPlayoutFax;
+ CreateInstanceNoMocks();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kAudioRepetition, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, AlternativePlc) {
+ config_.playout_mode = kPlayoutOff;
+ CreateInstanceNoMocks();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kAlternativePlc, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, CodecInternalCng) {
+ CreateInstanceNoMocks();
+ Register120msCodec(AudioDecoder::kComfortNoise);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kCodecInternalCng, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Normal) {
+ CreateInstanceNoMocks();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ EXPECT_EQ(kNormal, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Merge) {
+ CreateInstanceWithDelayManagerMock();
+
+ Register120msCodec(AudioDecoder::kSpeech);
+ InsertPacket(first_timestamp());
+
+ GetFirstPacket();
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+
+ InsertPacket(first_timestamp() + 2 * timestamp_diff_between_packets());
+
+ // Delay manager reports a target level which should cause a Merge.
+ EXPECT_CALL(*mock_delay_manager_, TargetLevel()).WillOnce(Return(-10));
+
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kMerge, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Expand) {
+ CreateInstanceNoMocks();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kExpand, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, FastAccelerate) {
+ CreateInstanceWithDelayManagerMock();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ // Delay manager report buffer limit which should cause a FastAccelerate.
+ EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(0), SetArgPointee<1>(0)));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kFastAccelerate, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, PreemptiveExpand) {
+ CreateInstanceWithDelayManagerMock();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ // Delay manager report buffer limit which should cause a PreemptiveExpand.
+ EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(100), SetArgPointee<1>(100)));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kPreemptiveExpand, neteq_->last_operation_for_test());
+}
+
+TEST_F(NetEqImplTest120ms, Accelerate) {
+ CreateInstanceWithDelayManagerMock();
+ Register120msCodec(AudioDecoder::kSpeech);
+
+ InsertPacket(first_timestamp());
+ GetFirstPacket();
+
+ InsertPacket(first_timestamp() + timestamp_diff_between_packets());
+
+ // Delay manager report buffer limit which should cause a Accelerate.
+ EXPECT_CALL(*mock_delay_manager_, BufferLimits(_, _))
+ .Times(1)
+ .WillOnce(DoAll(SetArgPointee<0>(1), SetArgPointee<1>(2)));
+
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output_, &muted));
+ EXPECT_EQ(kAccelerate, neteq_->last_operation_for_test());
+}
+
}// namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
index 770ebd57835..1a77abcd505 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_network_stats_unittest.cc
@@ -24,31 +24,36 @@ using ::testing::Return;
class MockAudioDecoder final : public AudioDecoder {
public:
+ // TODO(nisse): Valid overrides commented out, because the gmock
+ // methods don't use any override declarations, and we want to avoid
+ // warnings from -Winconsistent-missing-override. See
+ // http://crbug.com/428099.
static const int kPacketDuration = 960; // 48 kHz * 20 ms
explicit MockAudioDecoder(size_t num_channels)
: num_channels_(num_channels), fec_enabled_(false) {
}
- ~MockAudioDecoder() override { Die(); }
+ ~MockAudioDecoder() /* override */ { Die(); }
MOCK_METHOD0(Die, void());
MOCK_METHOD0(Reset, void());
int PacketDuration(const uint8_t* encoded,
- size_t encoded_len) const override {
+ size_t encoded_len) const /* override */ {
return kPacketDuration;
}
int PacketDurationRedundant(const uint8_t* encoded,
- size_t encoded_len) const override {
+ size_t encoded_len) const /* override */ {
return kPacketDuration;
}
- bool PacketHasFec(const uint8_t* encoded, size_t encoded_len) const override {
+ bool PacketHasFec(
+ const uint8_t* encoded, size_t encoded_len) const /* override */ {
return fec_enabled_;
}
- size_t Channels() const override { return num_channels_; }
+ size_t Channels() const /* override */ { return num_channels_; }
void set_fec_enabled(bool enable_fec) { fec_enabled_ = enable_fec; }
@@ -60,7 +65,7 @@ class MockAudioDecoder final : public AudioDecoder {
size_t encoded_len,
int /*sample_rate_hz*/,
int16_t* decoded,
- SpeechType* speech_type) override {
+ SpeechType* speech_type) /* override */ {
*speech_type = kSpeech;
memset(decoded, 0, sizeof(int16_t) * kPacketDuration * Channels());
return kPacketDuration * Channels();
@@ -70,7 +75,7 @@ class MockAudioDecoder final : public AudioDecoder {
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
- SpeechType* speech_type) override {
+ SpeechType* speech_type) /* override */ {
return DecodeInternal(encoded, encoded_len, sample_rate_hz, decoded,
speech_type);
}
@@ -294,7 +299,3 @@ TEST(NetEqNetworkStatsTest, NoiseExpansionTest) {
} // namespace test
} // namespace webrtc
-
-
-
-
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
index 4ee17d2a446..e1a9922b0b4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_stereo_unittest.cc
@@ -212,11 +212,14 @@ class NetEqStereoTest : public ::testing::TestWithParam<TestParameters> {
} while (Lost()); // If lost, immediately read the next packet.
}
// Get audio from mono instance.
- EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_mono_->GetAudio(&output_, &muted));
+ ASSERT_FALSE(muted);
EXPECT_EQ(1u, output_.num_channels_);
EXPECT_EQ(output_size_samples_, output_.samples_per_channel_);
// Get audio from multi-channel instance.
- ASSERT_EQ(NetEq::kOK, neteq_->GetAudio(&output_multi_channel_));
+ ASSERT_EQ(NetEq::kOK, neteq_->GetAudio(&output_multi_channel_, &muted));
+ ASSERT_FALSE(muted);
EXPECT_EQ(num_channels_, output_multi_channel_.num_channels_);
EXPECT_EQ(output_size_samples_,
output_multi_channel_.samples_per_channel_);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
index f02d3deee9b..bb316e8a81d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_tests.gypi
@@ -14,9 +14,12 @@
'target_name': 'rtc_event_log_source',
'type': 'static_library',
'dependencies': [
- '<(webrtc_root)/webrtc.gyp:rtc_event_log',
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log_parser',
'<(webrtc_root)/webrtc.gyp:rtc_event_log_proto',
],
+ 'export_dependent_settings': [
+ '<(webrtc_root)/webrtc.gyp:rtc_event_log_parser',
+ ],
'sources': [
'tools/rtc_event_log_source.h',
'tools/rtc_event_log_source.cc',
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
index b6efe7d7d6b..cf8e5b474c2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/neteq_unittest.cc
@@ -8,10 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-/*
- * This file includes unit tests for NetEQ.
- */
-
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include <math.h>
@@ -26,6 +22,8 @@
#include "gflags/gflags.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/sha1digest.h"
+#include "webrtc/base/stringencode.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
@@ -45,6 +43,23 @@ DEFINE_bool(gen_ref, false, "Generate reference files.");
namespace {
+const std::string& PlatformChecksum(const std::string& checksum_general,
+ const std::string& checksum_android,
+ const std::string& checksum_win_32,
+ const std::string& checksum_win_64) {
+#ifdef WEBRTC_ANDROID
+ return checksum_android;
+#elif WEBRTC_WIN
+ #ifdef WEBRTC_ARCH_64_BITS
+ return checksum_win_64;
+ #else
+ return checksum_win_32;
+ #endif // WEBRTC_ARCH_64_BITS
+#else
+ return checksum_general;
+#endif // WEBRTC_WIN
+}
+
bool IsAllZero(const int16_t* buf, size_t buf_length) {
bool all_zero = true;
for (size_t n = 0; n < buf_length && all_zero; ++n)
@@ -89,186 +104,141 @@ void Convert(const webrtc::RtcpStatistics& stats_raw,
stats->set_jitter(stats_raw.jitter);
}
-void WriteMessage(FILE* file, const std::string& message) {
+void AddMessage(FILE* file, rtc::MessageDigest* digest,
+ const std::string& message) {
int32_t size = message.length();
- ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
- if (size <= 0)
- return;
- ASSERT_EQ(static_cast<size_t>(size),
- fwrite(message.data(), sizeof(char), size, file));
+ if (file)
+ ASSERT_EQ(1u, fwrite(&size, sizeof(size), 1, file));
+ digest->Update(&size, sizeof(size));
+
+ if (file)
+ ASSERT_EQ(static_cast<size_t>(size),
+ fwrite(message.data(), sizeof(char), size, file));
+ digest->Update(message.data(), sizeof(char) * size);
}
-void ReadMessage(FILE* file, std::string* message) {
- int32_t size;
- ASSERT_EQ(1u, fread(&size, sizeof(size), 1, file));
- if (size <= 0)
- return;
- std::unique_ptr<char[]> buffer(new char[size]);
- ASSERT_EQ(static_cast<size_t>(size),
- fread(buffer.get(), sizeof(char), size, file));
- message->assign(buffer.get(), size);
-}
#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
+void LoadDecoders(webrtc::NetEq* neteq) {
+ // Load PCMu.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMu,
+ "pcmu", 0));
+ // Load PCMa.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCMa,
+ "pcma", 8));
+#ifdef WEBRTC_CODEC_ILBC
+ // Load iLBC.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderILBC,
+ "ilbc", 102));
+#endif
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+ // Load iSAC.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderISAC,
+ "isac", 103));
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ // Load iSAC SWB.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderISACswb,
+ "isac-swb", 104));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderOpus,
+ "opus", 111));
+#endif
+ // Load PCM16B nb.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderPCM16B,
+ "pcm16-nb", 93));
+ // Load PCM16B wb.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(
+ webrtc::NetEqDecoder::kDecoderPCM16Bwb, "pcm16-wb", 94));
+ // Load PCM16B swb32.
+ ASSERT_EQ(
+ 0, neteq->RegisterPayloadType(
+ webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz, "pcm16-swb32", 95));
+ // Load CNG 8 kHz.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderCNGnb,
+ "cng-nb", 13));
+ // Load CNG 16 kHz.
+ ASSERT_EQ(0, neteq->RegisterPayloadType(webrtc::NetEqDecoder::kDecoderCNGwb,
+ "cng-wb", 98));
+}
} // namespace
namespace webrtc {
-class RefFiles {
+class ResultSink {
public:
- RefFiles(const std::string& input_file, const std::string& output_file);
- ~RefFiles();
- template<class T> void ProcessReference(const T& test_results);
- template<typename T, size_t n> void ProcessReference(
- const T (&test_results)[n],
- size_t length);
- template<typename T, size_t n> void WriteToFile(
- const T (&test_results)[n],
- size_t length);
- template<typename T, size_t n> void ReadFromFileAndCompare(
+ explicit ResultSink(const std::string& output_file);
+ ~ResultSink();
+
+ template<typename T, size_t n> void AddResult(
const T (&test_results)[n],
size_t length);
- void WriteToFile(const NetEqNetworkStatistics& stats);
- void ReadFromFileAndCompare(const NetEqNetworkStatistics& stats);
- void WriteToFile(const RtcpStatistics& stats);
- void ReadFromFileAndCompare(const RtcpStatistics& stats);
- FILE* input_fp_;
+ void AddResult(const NetEqNetworkStatistics& stats);
+ void AddResult(const RtcpStatistics& stats);
+
+ void VerifyChecksum(const std::string& ref_check_sum);
+
+ private:
FILE* output_fp_;
+ std::unique_ptr<rtc::MessageDigest> digest_;
};
-RefFiles::RefFiles(const std::string &input_file,
- const std::string &output_file)
- : input_fp_(NULL),
- output_fp_(NULL) {
- if (!input_file.empty()) {
- input_fp_ = fopen(input_file.c_str(), "rb");
- EXPECT_TRUE(input_fp_ != NULL);
- }
+ResultSink::ResultSink(const std::string &output_file)
+ : output_fp_(nullptr),
+ digest_(new rtc::Sha1Digest()) {
if (!output_file.empty()) {
output_fp_ = fopen(output_file.c_str(), "wb");
EXPECT_TRUE(output_fp_ != NULL);
}
}
-RefFiles::~RefFiles() {
- if (input_fp_) {
- EXPECT_EQ(EOF, fgetc(input_fp_)); // Make sure that we reached the end.
- fclose(input_fp_);
- }
- if (output_fp_) fclose(output_fp_);
-}
-
-template<class T>
-void RefFiles::ProcessReference(const T& test_results) {
- WriteToFile(test_results);
- ReadFromFileAndCompare(test_results);
+ResultSink::~ResultSink() {
+ if (output_fp_)
+ fclose(output_fp_);
}
template<typename T, size_t n>
-void RefFiles::ProcessReference(const T (&test_results)[n], size_t length) {
- WriteToFile(test_results, length);
- ReadFromFileAndCompare(test_results, length);
-}
-
-template<typename T, size_t n>
-void RefFiles::WriteToFile(const T (&test_results)[n], size_t length) {
+void ResultSink::AddResult(const T (&test_results)[n], size_t length) {
if (output_fp_) {
ASSERT_EQ(length, fwrite(&test_results, sizeof(T), length, output_fp_));
}
+ digest_->Update(&test_results, sizeof(T) * length);
}
-template<typename T, size_t n>
-void RefFiles::ReadFromFileAndCompare(const T (&test_results)[n],
- size_t length) {
- if (input_fp_) {
- // Read from ref file.
- T* ref = new T[length];
- ASSERT_EQ(length, fread(ref, sizeof(T), length, input_fp_));
- // Compare
- ASSERT_EQ(0, memcmp(&test_results, ref, sizeof(T) * length));
- delete [] ref;
- }
-}
-
-void RefFiles::WriteToFile(const NetEqNetworkStatistics& stats_raw) {
+void ResultSink::AddResult(const NetEqNetworkStatistics& stats_raw) {
#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
- if (!output_fp_)
- return;
neteq_unittest::NetEqNetworkStatistics stats;
Convert(stats_raw, &stats);
std::string stats_string;
ASSERT_TRUE(stats.SerializeToString(&stats_string));
- WriteMessage(output_fp_, stats_string);
+ AddMessage(output_fp_, digest_.get(), stats_string);
#else
FAIL() << "Writing to reference file requires Proto Buffer.";
#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
-void RefFiles::ReadFromFileAndCompare(
- const NetEqNetworkStatistics& stats) {
+void ResultSink::AddResult(const RtcpStatistics& stats_raw) {
#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
- if (!input_fp_)
- return;
-
- std::string stats_string;
- ReadMessage(input_fp_, &stats_string);
- neteq_unittest::NetEqNetworkStatistics ref_stats;
- ASSERT_TRUE(ref_stats.ParseFromString(stats_string));
-
- // Compare
- ASSERT_EQ(stats.current_buffer_size_ms, ref_stats.current_buffer_size_ms());
- ASSERT_EQ(stats.preferred_buffer_size_ms,
- ref_stats.preferred_buffer_size_ms());
- ASSERT_EQ(stats.jitter_peaks_found, ref_stats.jitter_peaks_found());
- ASSERT_EQ(stats.packet_loss_rate, ref_stats.packet_loss_rate());
- ASSERT_EQ(stats.packet_discard_rate, ref_stats.packet_discard_rate());
- ASSERT_EQ(stats.expand_rate, ref_stats.expand_rate());
- ASSERT_EQ(stats.preemptive_rate, ref_stats.preemptive_rate());
- ASSERT_EQ(stats.accelerate_rate, ref_stats.accelerate_rate());
- ASSERT_EQ(stats.clockdrift_ppm, ref_stats.clockdrift_ppm());
- ASSERT_EQ(stats.added_zero_samples, ref_stats.added_zero_samples());
- ASSERT_EQ(stats.secondary_decoded_rate, ref_stats.secondary_decoded_rate());
- ASSERT_LE(stats.speech_expand_rate, ref_stats.expand_rate());
-#else
- FAIL() << "Reading from reference file requires Proto Buffer.";
-#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
-}
-
-void RefFiles::WriteToFile(const RtcpStatistics& stats_raw) {
-#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
- if (!output_fp_)
- return;
neteq_unittest::RtcpStatistics stats;
Convert(stats_raw, &stats);
std::string stats_string;
ASSERT_TRUE(stats.SerializeToString(&stats_string));
- WriteMessage(output_fp_, stats_string);
+ AddMessage(output_fp_, digest_.get(), stats_string);
#else
FAIL() << "Writing to reference file requires Proto Buffer.";
#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
}
-void RefFiles::ReadFromFileAndCompare(const RtcpStatistics& stats) {
-#ifdef WEBRTC_NETEQ_UNITTEST_BITEXACT
- if (!input_fp_)
- return;
- std::string stats_string;
- ReadMessage(input_fp_, &stats_string);
- neteq_unittest::RtcpStatistics ref_stats;
- ASSERT_TRUE(ref_stats.ParseFromString(stats_string));
-
- // Compare
- ASSERT_EQ(stats.fraction_lost, ref_stats.fraction_lost());
- ASSERT_EQ(stats.cumulative_lost, ref_stats.cumulative_lost());
- ASSERT_EQ(stats.extended_max_sequence_number,
- ref_stats.extended_max_sequence_number());
- ASSERT_EQ(stats.jitter, ref_stats.jitter());
-#else
- FAIL() << "Reading from reference file requires Proto Buffer.";
-#endif // WEBRTC_NETEQ_UNITTEST_BITEXACT
+void ResultSink::VerifyChecksum(const std::string& checksum) {
+ std::vector<char> buffer;
+ buffer.resize(digest_->Size());
+ digest_->Finish(&buffer[0], buffer.size());
+ const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
+ EXPECT_EQ(checksum, result);
}
class NetEqDecodingTest : public ::testing::Test {
@@ -286,14 +256,14 @@ class NetEqDecodingTest : public ::testing::Test {
virtual void SetUp();
virtual void TearDown();
void SelectDecoders(NetEqDecoder* used_codec);
- void LoadDecoders();
void OpenInputFile(const std::string &rtp_file);
void Process();
void DecodeAndCompare(const std::string& rtp_file,
- const std::string& ref_file,
- const std::string& stat_ref_file,
- const std::string& rtcp_ref_file);
+ const std::string& output_checksum,
+ const std::string& network_stats_checksum,
+ const std::string& rtcp_stats_checksum,
+ bool gen_ref);
static void PopulateRtpInfo(int frame_index,
int timestamp,
@@ -350,56 +320,13 @@ void NetEqDecodingTest::SetUp() {
ASSERT_EQ(0, neteq_->NetworkStatistics(&stat));
algorithmic_delay_ms_ = stat.current_buffer_size_ms;
ASSERT_TRUE(neteq_);
- LoadDecoders();
+ LoadDecoders(neteq_);
}
void NetEqDecodingTest::TearDown() {
delete neteq_;
}
-void NetEqDecodingTest::LoadDecoders() {
- // Load PCMu.
- ASSERT_EQ(0,
- neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMu, "pcmu", 0));
- // Load PCMa.
- ASSERT_EQ(0,
- neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCMa, "pcma", 8));
-#ifdef WEBRTC_CODEC_ILBC
- // Load iLBC.
- ASSERT_EQ(
- 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderILBC, "ilbc", 102));
-#endif
-#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
- // Load iSAC.
- ASSERT_EQ(
- 0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISAC, "isac", 103));
-#endif
-#ifdef WEBRTC_CODEC_ISAC
- // Load iSAC SWB.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderISACswb,
- "isac-swb", 104));
-#endif
-#ifdef WEBRTC_CODEC_OPUS
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderOpus,
- "opus", 111));
-#endif
- // Load PCM16B nb.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16B,
- "pcm16-nb", 93));
- // Load PCM16B wb.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bwb,
- "pcm16-wb", 94));
- // Load PCM16B swb32.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderPCM16Bswb32kHz,
- "pcm16-swb32", 95));
- // Load CNG 8 kHz.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGnb,
- "cng-nb", 13));
- // Load CNG 16 kHz.
- ASSERT_EQ(0, neteq_->RegisterPayloadType(NetEqDecoder::kDecoderCNGwb,
- "cng-wb", 98));
-}
-
void NetEqDecodingTest::OpenInputFile(const std::string &rtp_file) {
rtp_source_.reset(test::RtpFileSource::Create(rtp_file));
}
@@ -426,7 +353,9 @@ void NetEqDecodingTest::Process() {
}
// Get audio from NetEq.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
ASSERT_TRUE((out_frame_.samples_per_channel_ == kBlockSize8kHz) ||
(out_frame_.samples_per_channel_ == kBlockSize16kHz) ||
(out_frame_.samples_per_channel_ == kBlockSize32kHz) ||
@@ -438,29 +367,25 @@ void NetEqDecodingTest::Process() {
sim_clock_ += kTimeStepMs;
}
-void NetEqDecodingTest::DecodeAndCompare(const std::string& rtp_file,
- const std::string& ref_file,
- const std::string& stat_ref_file,
- const std::string& rtcp_ref_file) {
+void NetEqDecodingTest::DecodeAndCompare(
+ const std::string& rtp_file,
+ const std::string& output_checksum,
+ const std::string& network_stats_checksum,
+ const std::string& rtcp_stats_checksum,
+ bool gen_ref) {
OpenInputFile(rtp_file);
- std::string ref_out_file = "";
- if (ref_file.empty()) {
- ref_out_file = webrtc::test::OutputPath() + "neteq_universal_ref.pcm";
- }
- RefFiles ref_files(ref_file, ref_out_file);
+ std::string ref_out_file =
+ gen_ref ? webrtc::test::OutputPath() + "neteq_universal_ref.pcm" : "";
+ ResultSink output(ref_out_file);
- std::string stat_out_file = "";
- if (stat_ref_file.empty()) {
- stat_out_file = webrtc::test::OutputPath() + "neteq_network_stats.dat";
- }
- RefFiles network_stat_files(stat_ref_file, stat_out_file);
+ std::string stat_out_file =
+ gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
+ ResultSink network_stats(stat_out_file);
- std::string rtcp_out_file = "";
- if (rtcp_ref_file.empty()) {
- rtcp_out_file = webrtc::test::OutputPath() + "neteq_rtcp_stats.dat";
- }
- RefFiles rtcp_stat_files(rtcp_ref_file, rtcp_out_file);
+ std::string rtcp_out_file =
+ gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
+ ResultSink rtcp_stats(rtcp_out_file);
packet_.reset(rtp_source_->NextPacket());
int i = 0;
@@ -469,25 +394,33 @@ void NetEqDecodingTest::DecodeAndCompare(const std::string& rtp_file,
ss << "Lap number " << i++ << " in DecodeAndCompare while loop";
SCOPED_TRACE(ss.str()); // Print out the parameter values on failure.
ASSERT_NO_FATAL_FAILURE(Process());
- ASSERT_NO_FATAL_FAILURE(ref_files.ProcessReference(
+ ASSERT_NO_FATAL_FAILURE(output.AddResult(
out_frame_.data_, out_frame_.samples_per_channel_));
// Query the network statistics API once per second
if (sim_clock_ % 1000 == 0) {
// Process NetworkStatistics.
- NetEqNetworkStatistics network_stats;
- ASSERT_EQ(0, neteq_->NetworkStatistics(&network_stats));
- ASSERT_NO_FATAL_FAILURE(
- network_stat_files.ProcessReference(network_stats));
+ NetEqNetworkStatistics current_network_stats;
+ ASSERT_EQ(0, neteq_->NetworkStatistics(&current_network_stats));
+ ASSERT_NO_FATAL_FAILURE(network_stats.AddResult(current_network_stats));
+
// Compare with CurrentDelay, which should be identical.
- EXPECT_EQ(network_stats.current_buffer_size_ms, neteq_->CurrentDelayMs());
+ EXPECT_EQ(current_network_stats.current_buffer_size_ms,
+ neteq_->CurrentDelayMs());
// Process RTCPstat.
- RtcpStatistics rtcp_stats;
- neteq_->GetRtcpStatistics(&rtcp_stats);
- ASSERT_NO_FATAL_FAILURE(rtcp_stat_files.ProcessReference(rtcp_stats));
+ RtcpStatistics current_rtcp_stats;
+ neteq_->GetRtcpStatistics(&current_rtcp_stats);
+ ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
}
}
+
+ SCOPED_TRACE("Check output audio.");
+ output.VerifyChecksum(output_checksum);
+ SCOPED_TRACE("Check network stats.");
+ network_stats.VerifyChecksum(network_stats_checksum);
+ SCOPED_TRACE("Check rtcp stats.");
+ rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
}
void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
@@ -525,31 +458,30 @@ void NetEqDecodingTest::PopulateCng(int frame_index,
TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
const std::string input_rtp_file =
webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
- // Note that neteq4_universal_ref.pcm and neteq4_universal_ref_win_32.pcm
- // are identical. The latter could have been removed, but if clients still
- // have a copy of the file, the test will fail.
- const std::string input_ref_file =
- webrtc::test::ResourcePath("audio_coding/neteq4_universal_ref", "pcm");
-#if defined(_MSC_VER) && (_MSC_VER >= 1700)
- // For Visual Studio 2012 and later, we will have to use the generic reference
- // file, rather than the windows-specific one.
- const std::string network_stat_ref_file = webrtc::test::ProjectRootPath() +
- "resources/audio_coding/neteq4_network_stats.dat";
-#else
- const std::string network_stat_ref_file =
- webrtc::test::ResourcePath("audio_coding/neteq4_network_stats", "dat");
-#endif
- const std::string rtcp_stat_ref_file =
- webrtc::test::ResourcePath("audio_coding/neteq4_rtcp_stats", "dat");
-
- if (FLAGS_gen_ref) {
- DecodeAndCompare(input_rtp_file, "", "", "");
- } else {
- DecodeAndCompare(input_rtp_file,
- input_ref_file,
- network_stat_ref_file,
- rtcp_stat_ref_file);
- }
+
+ const std::string output_checksum = PlatformChecksum(
+ "472ebe1126f41fdb6b5c63c87f625a52e7604e49",
+ "d2a6b6ff54b340cf9f961c7f07768d86b3761073",
+ "472ebe1126f41fdb6b5c63c87f625a52e7604e49",
+ "f9749813dbc3fb59dae761de518fec65b8407c5b");
+
+ const std::string network_stats_checksum = PlatformChecksum(
+ "2cf380a05ee07080bd72471e8ec7777a39644ec9",
+ "01be67dc4c3b8e74743a45cbd8684c0535dec9ad",
+ "2cf380a05ee07080bd72471e8ec7777a39644ec9",
+ "2cf380a05ee07080bd72471e8ec7777a39644ec9");
+
+ const std::string rtcp_stats_checksum = PlatformChecksum(
+ "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+ "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4",
+ "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
+ "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
+
+ DecodeAndCompare(input_rtp_file,
+ output_checksum,
+ network_stats_checksum,
+ rtcp_stats_checksum,
+ FLAGS_gen_ref);
}
#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID) && \
@@ -562,26 +494,30 @@ TEST_F(NetEqDecodingTest, MAYBE_TestBitExactness) {
TEST_F(NetEqDecodingTest, MAYBE_TestOpusBitExactness) {
const std::string input_rtp_file =
webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
- const std::string input_ref_file =
- // The pcm files were generated by using Opus v1.1.2 to decode the RTC
- // file generated by Opus v1.1
- webrtc::test::ResourcePath("audio_coding/neteq4_opus_ref", "pcm");
- const std::string network_stat_ref_file =
- // The network stats file was generated when using Opus v1.1.2 to decode
- // the RTC file generated by Opus v1.1
- webrtc::test::ResourcePath("audio_coding/neteq4_opus_network_stats",
- "dat");
- const std::string rtcp_stat_ref_file =
- webrtc::test::ResourcePath("audio_coding/neteq4_opus_rtcp_stats", "dat");
-
- if (FLAGS_gen_ref) {
- DecodeAndCompare(input_rtp_file, "", "", "");
- } else {
- DecodeAndCompare(input_rtp_file,
- input_ref_file,
- network_stat_ref_file,
- rtcp_stat_ref_file);
- }
+
+ const std::string output_checksum = PlatformChecksum(
+ "19ad24b4a1eb7a9620e6da09f98c49aa5792ade4",
+ "19ad24b4a1eb7a9620e6da09f98c49aa5792ade4",
+ "19ad24b4a1eb7a9620e6da09f98c49aa5792ade4",
+ "19ad24b4a1eb7a9620e6da09f98c49aa5792ade4");
+
+ const std::string network_stats_checksum = PlatformChecksum(
+ "6eab76efbde753d4dde38983445ca16b4ce59b39",
+ "6eab76efbde753d4dde38983445ca16b4ce59b39",
+ "6eab76efbde753d4dde38983445ca16b4ce59b39",
+ "6eab76efbde753d4dde38983445ca16b4ce59b39");
+
+ const std::string rtcp_stats_checksum = PlatformChecksum(
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
+ "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
+
+ DecodeAndCompare(input_rtp_file,
+ output_checksum,
+ network_stats_checksum,
+ rtcp_stats_checksum,
+ FLAGS_gen_ref);
}
// Use fax mode to avoid time-scaling. This is to simplify the testing of
@@ -610,7 +546,8 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
}
// Pull out all data.
for (size_t i = 0; i < num_frames; ++i) {
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
@@ -651,7 +588,8 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimeNegative) {
}
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
@@ -678,7 +616,8 @@ TEST_F(NetEqDecodingTest, TestAverageInterArrivalTimePositive) {
}
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
@@ -699,6 +638,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
const size_t kPayloadBytes = kSamples * 2;
double next_input_time_ms = 0.0;
double t_ms;
+ bool muted;
// Insert speech for 5 seconds.
const int kSpeechDurationMs = 5000;
@@ -715,7 +655,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
next_input_time_ms += static_cast<double>(kFrameSizeMs) * drift_factor;
}
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
@@ -744,7 +684,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
next_input_time_ms += static_cast<double>(kCngPeriodMs) * drift_factor;
}
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
@@ -757,7 +697,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
const double loop_end_time = t_ms + network_freeze_ms;
for (; t_ms < loop_end_time; t_ms += 10) {
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
}
@@ -769,7 +709,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
if (pull_once && next_input_time_ms >= pull_time_ms) {
pull_once = false;
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
t_ms += 10;
@@ -803,7 +743,7 @@ void NetEqDecodingTest::LongCngWithClockDrift(double drift_factor,
next_input_time_ms += kFrameSizeMs * drift_factor;
}
// Pull out data once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
// Increase clock.
t_ms += 10;
@@ -931,7 +871,9 @@ TEST_F(NetEqDecodingTest, MAYBE_DecoderError) {
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
out_frame_.data_[i] = 1;
}
- EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ EXPECT_EQ(NetEq::kFail, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
// Verify that there is a decoder error to check.
EXPECT_EQ(NetEq::kDecoderErrorCode, neteq_->LastError());
@@ -968,7 +910,9 @@ TEST_F(NetEqDecodingTest, GetAudioBeforeInsertPacket) {
for (size_t i = 0; i < AudioFrame::kMaxDataSizeSamples; ++i) {
out_frame_.data_[i] = 1;
}
- EXPECT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_FALSE(muted);
// Verify that the first block of samples is set to 0.
static const int kExpectedOutputLength =
kInitSampleRateHz / 100; // 10 ms at initial sample rate.
@@ -1020,6 +964,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
rtp_info.header.payloadType = payload_type;
uint32_t receive_timestamp = 0;
+ bool muted;
for (int n = 0; n < 10; ++n) { // Insert few packets and get audio.
auto block = input.GetNextBlock();
ASSERT_EQ(expected_samples_per_channel, block.size());
@@ -1031,7 +976,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
payload, enc_len_bytes),
receive_timestamp));
output.Reset();
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(1u, output.num_channels_);
ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
ASSERT_EQ(AudioFrame::kNormalSpeech, output.speech_type_);
@@ -1047,7 +992,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
// Get audio without inserting packets, expecting PLC and PLC-to-CNG. Pull
// one frame without checking speech-type. This is the first frame pulled
// without inserting any packet, and might not be labeled as PLC.
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(1u, output.num_channels_);
ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
@@ -1062,7 +1007,8 @@ class NetEqBgnTest : public NetEqDecodingTest {
for (int n = 0; n < kFadingThreshold + kNumPlcToCngTestFrames; ++n) {
output.Reset();
memset(output.data_, 1, sizeof(output.data_)); // Set to non-zero.
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
ASSERT_EQ(1u, output.num_channels_);
ASSERT_EQ(expected_samples_per_channel, output.samples_per_channel_);
if (output.speech_type_ == AudioFrame::kPLCCNG) {
@@ -1236,9 +1182,10 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// Insert some packets which decode to noise. We are not interested in
// actual decoded values.
uint32_t receive_timestamp = 0;
+ bool muted;
for (int n = 0; n < 100; ++n) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
ASSERT_EQ(1u, output.num_channels_);
@@ -1254,7 +1201,8 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// Insert sync-packets, the decoded sequence should be all-zero.
for (int n = 0; n < kNumSyncPackets; ++n) {
ASSERT_EQ(0, neteq_->InsertSyncPacket(rtp_info, receive_timestamp));
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
ASSERT_EQ(1u, output.num_channels_);
if (n > algorithmic_frame_delay) {
@@ -1270,7 +1218,8 @@ TEST_F(NetEqDecodingTest, SyncPacketDecode) {
// network statistics would show some packet loss.
for (int n = 0; n <= algorithmic_frame_delay + 10; ++n) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
if (n >= algorithmic_frame_delay + 1) {
// Expect that this frame contain samples from regular RTP.
EXPECT_TRUE(IsAllNonZero(
@@ -1306,9 +1255,10 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
// actual decoded values.
uint32_t receive_timestamp = 0;
int algorithmic_frame_delay = algorithmic_delay_ms_ / 10 + 1;
+ bool muted;
for (int n = 0; n < algorithmic_frame_delay; ++n) {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, receive_timestamp));
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
ASSERT_EQ(1u, output.num_channels_);
rtp_info.header.sequenceNumber++;
@@ -1345,7 +1295,8 @@ TEST_F(NetEqDecodingTest, SyncPacketBufferSizeAndOverridenByNetworkPackets) {
// Decode.
for (int n = 0; n < kNumSyncPackets; ++n) {
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
+ ASSERT_FALSE(muted);
ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
ASSERT_EQ(1u, output.num_channels_);
EXPECT_TRUE(IsAllNonZero(
@@ -1412,7 +1363,8 @@ void NetEqDecodingTest::WrapTest(uint16_t start_seq_no,
}
// Pull out data once.
AudioFrame output;
- ASSERT_EQ(0, neteq_->GetAudio(&output));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&output, &muted));
ASSERT_EQ(kBlockSize16kHz, output.samples_per_channel_);
ASSERT_EQ(1u, output.num_channels_);
@@ -1468,6 +1420,7 @@ void NetEqDecodingTest::DuplicateCng() {
// correct.
uint8_t payload[kPayloadBytes] = {0};
WebRtcRTPHeader rtp_info;
+ bool muted;
for (int i = 0; i < 3; ++i) {
PopulateRtpInfo(seq_no, timestamp, &rtp_info);
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
@@ -1475,7 +1428,7 @@ void NetEqDecodingTest::DuplicateCng() {
timestamp += kSamples;
// Pull audio once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
// Verify speech output.
@@ -1492,7 +1445,7 @@ void NetEqDecodingTest::DuplicateCng() {
rtp_info, rtc::ArrayView<const uint8_t>(payload, payload_len), 0));
// Pull audio once and make sure CNG is played.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
@@ -1508,7 +1461,7 @@ void NetEqDecodingTest::DuplicateCng() {
// Pull audio until we have played |kCngPeriodMs| of CNG. Start at 10 ms since
// we have already pulled out CNG once.
for (int cng_time_ms = 10; cng_time_ms < kCngPeriodMs; cng_time_ms += 10) {
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
EXPECT_FALSE(PlayoutTimestamp()); // Returns empty value during CNG.
@@ -1523,7 +1476,7 @@ void NetEqDecodingTest::DuplicateCng() {
ASSERT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
// Pull audio once and verify that the output is speech again.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
rtc::Optional<uint32_t> playout_timestamp = PlayoutTimestamp();
@@ -1561,7 +1514,8 @@ TEST_F(NetEqDecodingTest, CngFirst) {
timestamp += kCngPeriodSamples;
// Pull audio once and make sure CNG is played.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ bool muted;
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
EXPECT_EQ(AudioFrame::kCNG, out_frame_.speech_type_);
@@ -1573,10 +1527,261 @@ TEST_F(NetEqDecodingTest, CngFirst) {
timestamp += kSamples;
// Pull audio once.
- ASSERT_EQ(0, neteq_->GetAudio(&out_frame_));
+ ASSERT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
ASSERT_EQ(kBlockSize16kHz, out_frame_.samples_per_channel_);
}
// Verify speech output.
EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
}
+
+class NetEqDecodingTestWithMutedState : public NetEqDecodingTest {
+ public:
+ NetEqDecodingTestWithMutedState() : NetEqDecodingTest() {
+ config_.enable_muted_state = true;
+ }
+
+ protected:
+ static constexpr size_t kSamples = 10 * 16;
+ static constexpr size_t kPayloadBytes = kSamples * 2;
+
+ void InsertPacket(uint32_t rtp_timestamp) {
+ uint8_t payload[kPayloadBytes] = {0};
+ WebRtcRTPHeader rtp_info;
+ PopulateRtpInfo(0, rtp_timestamp, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+ }
+
+ bool GetAudioReturnMuted() {
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ return muted;
+ }
+
+ void GetAudioUntilMuted() {
+ while (!GetAudioReturnMuted()) {
+ ASSERT_LT(counter_++, 1000) << "Test timed out";
+ }
+ }
+
+ void GetAudioUntilNormal() {
+ bool muted = false;
+ while (out_frame_.speech_type_ != AudioFrame::kNormalSpeech) {
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame_, &muted));
+ ASSERT_LT(counter_++, 1000) << "Test timed out";
+ }
+ EXPECT_FALSE(muted);
+ }
+
+ int counter_ = 0;
+};
+
+// Verifies that NetEq goes in and out of muted state as expected.
+TEST_F(NetEqDecodingTestWithMutedState, MutedState) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+
+ // Verify that output audio is not written during muted mode. Other parameters
+ // should be correct, though.
+ AudioFrame new_frame;
+ for (auto& d : new_frame.data_) {
+ d = 17;
+ }
+ bool muted;
+ EXPECT_EQ(0, neteq_->GetAudio(&new_frame, &muted));
+ EXPECT_TRUE(muted);
+ for (auto d : new_frame.data_) {
+ EXPECT_EQ(17, d);
+ }
+ EXPECT_EQ(out_frame_.timestamp_ + out_frame_.samples_per_channel_,
+ new_frame.timestamp_);
+ EXPECT_EQ(out_frame_.samples_per_channel_, new_frame.samples_per_channel_);
+ EXPECT_EQ(out_frame_.sample_rate_hz_, new_frame.sample_rate_hz_);
+ EXPECT_EQ(out_frame_.num_channels_, new_frame.num_channels_);
+ EXPECT_EQ(out_frame_.speech_type_, new_frame.speech_type_);
+ EXPECT_EQ(out_frame_.vad_activity_, new_frame.vad_activity_);
+
+ // Insert new data. Timestamp is corrected for the time elapsed since the last
+ // packet. Verify that normal operation resumes.
+ InsertPacket(kSamples * counter_);
+ GetAudioUntilNormal();
+
+ NetEqNetworkStatistics stats;
+ EXPECT_EQ(0, neteq_->NetworkStatistics(&stats));
+ // NetEqNetworkStatistics::expand_rate tells the fraction of samples that were
+ // concealment samples, in Q14 (16384 = 100%) .The vast majority should be
+ // concealment samples in this test.
+ EXPECT_GT(stats.expand_rate, 14000);
+ // And, it should be greater than the speech_expand_rate.
+ EXPECT_GT(stats.expand_rate, stats.speech_expand_rate);
+}
+
+// Verifies that NetEq goes out of muted state when given a delayed packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateDelayedPacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+ // Insert new data. Timestamp is only corrected for the half of the time
+ // elapsed since the last packet. That is, the new packet is delayed. Verify
+ // that normal operation resumes.
+ InsertPacket(kSamples * counter_ / 2);
+ GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given a future packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateFuturePacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+ // Insert new data. Timestamp is over-corrected for the time elapsed since the
+ // last packet. That is, the new packet is too early. Verify that normal
+ // operation resumes.
+ InsertPacket(kSamples * counter_ * 2);
+ GetAudioUntilNormal();
+}
+
+// Verifies that NetEq goes out of muted state when given an old packet.
+TEST_F(NetEqDecodingTestWithMutedState, MutedStateOldPacket) {
+ // Insert one speech packet.
+ InsertPacket(0);
+ // Pull out audio once and expect it not to be muted.
+ EXPECT_FALSE(GetAudioReturnMuted());
+ // Pull data until faded out.
+ GetAudioUntilMuted();
+
+ EXPECT_NE(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+ // Insert packet which is older than the first packet.
+ InsertPacket(kSamples * (counter_ - 1000));
+ EXPECT_FALSE(GetAudioReturnMuted());
+ EXPECT_EQ(AudioFrame::kNormalSpeech, out_frame_.speech_type_);
+}
+
+class NetEqDecodingTestTwoInstances : public NetEqDecodingTest {
+ public:
+ NetEqDecodingTestTwoInstances() : NetEqDecodingTest() {}
+
+ void SetUp() override {
+ NetEqDecodingTest::SetUp();
+ config2_ = config_;
+ }
+
+ void CreateSecondInstance() {
+ neteq2_.reset(NetEq::Create(config2_));
+ ASSERT_TRUE(neteq2_);
+ LoadDecoders(neteq2_.get());
+ }
+
+ protected:
+ std::unique_ptr<NetEq> neteq2_;
+ NetEq::Config config2_;
+};
+
+namespace {
+::testing::AssertionResult AudioFramesEqualExceptData(const AudioFrame& a,
+ const AudioFrame& b) {
+ if (a.timestamp_ != b.timestamp_)
+ return ::testing::AssertionFailure() << "timestamp_ diff (" << a.timestamp_
+ << " != " << b.timestamp_ << ")";
+ if (a.sample_rate_hz_ != b.sample_rate_hz_)
+ return ::testing::AssertionFailure() << "sample_rate_hz_ diff ("
+ << a.sample_rate_hz_
+ << " != " << b.sample_rate_hz_ << ")";
+ if (a.samples_per_channel_ != b.samples_per_channel_)
+ return ::testing::AssertionFailure()
+ << "samples_per_channel_ diff (" << a.samples_per_channel_
+ << " != " << b.samples_per_channel_ << ")";
+ if (a.num_channels_ != b.num_channels_)
+ return ::testing::AssertionFailure() << "num_channels_ diff ("
+ << a.num_channels_
+ << " != " << b.num_channels_ << ")";
+ if (a.speech_type_ != b.speech_type_)
+ return ::testing::AssertionFailure() << "speech_type_ diff ("
+ << a.speech_type_
+ << " != " << b.speech_type_ << ")";
+ if (a.vad_activity_ != b.vad_activity_)
+ return ::testing::AssertionFailure() << "vad_activity_ diff ("
+ << a.vad_activity_
+ << " != " << b.vad_activity_ << ")";
+ return ::testing::AssertionSuccess();
+}
+
+::testing::AssertionResult AudioFramesEqual(const AudioFrame& a,
+ const AudioFrame& b) {
+ ::testing::AssertionResult res = AudioFramesEqualExceptData(a, b);
+ if (!res)
+ return res;
+ if (memcmp(
+ a.data_, b.data_,
+ a.samples_per_channel_ * a.num_channels_ * sizeof(a.data_[0])) != 0) {
+ return ::testing::AssertionFailure() << "data_ diff";
+ }
+ return ::testing::AssertionSuccess();
+}
+
+} // namespace
+
+TEST_F(NetEqDecodingTestTwoInstances, CompareMutedStateOnOff) {
+ ASSERT_FALSE(config_.enable_muted_state);
+ config2_.enable_muted_state = true;
+ CreateSecondInstance();
+
+ // Insert one speech packet into both NetEqs.
+ const size_t kSamples = 10 * 16;
+ const size_t kPayloadBytes = kSamples * 2;
+ uint8_t payload[kPayloadBytes] = {0};
+ WebRtcRTPHeader rtp_info;
+ PopulateRtpInfo(0, 0, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+ EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
+
+ AudioFrame out_frame1, out_frame2;
+ bool muted;
+ for (int i = 0; i < 1000; ++i) {
+ std::ostringstream ss;
+ ss << "i = " << i;
+ SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+ EXPECT_FALSE(muted);
+ EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+ if (muted) {
+ EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+ } else {
+ EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+ }
+ }
+ EXPECT_TRUE(muted);
+
+ // Insert new data. Timestamp is corrected for the time elapsed since the last
+ // packet.
+ PopulateRtpInfo(0, kSamples * 1000, &rtp_info);
+ EXPECT_EQ(0, neteq_->InsertPacket(rtp_info, payload, 0));
+ EXPECT_EQ(0, neteq2_->InsertPacket(rtp_info, payload, 0));
+
+ int counter = 0;
+ while (out_frame1.speech_type_ != AudioFrame::kNormalSpeech) {
+ ASSERT_LT(counter++, 1000) << "Test timed out";
+ std::ostringstream ss;
+ ss << "counter = " << counter;
+ SCOPED_TRACE(ss.str()); // Print out the loop iterator on failure.
+ EXPECT_EQ(0, neteq_->GetAudio(&out_frame1, &muted));
+ EXPECT_FALSE(muted);
+ EXPECT_EQ(0, neteq2_->GetAudio(&out_frame2, &muted));
+ if (muted) {
+ EXPECT_TRUE(AudioFramesEqualExceptData(out_frame1, out_frame2));
+ } else {
+ EXPECT_TRUE(AudioFramesEqual(out_frame1, out_frame2));
+ }
+ }
+ EXPECT_FALSE(muted);
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
index 9bddfe77657..f99b3f200f4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal.cc
@@ -16,7 +16,6 @@
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
-#include "webrtc/modules/audio_coding/codecs/cng/webrtc_cng.h"
#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
@@ -43,7 +42,6 @@ int Normal::Process(const int16_t* input,
return 0;
}
output->PushBackInterleaved(input, length);
- int16_t* signal = &(*output)[0][0];
const int fs_mult = fs_hz_ / 8000;
assert(fs_mult > 0);
@@ -64,24 +62,26 @@ int Normal::Process(const int16_t* input,
expand_->Process(&expanded);
expand_->Reset();
+ size_t length_per_channel = length / output->Channels();
+ std::unique_ptr<int16_t[]> signal(new int16_t[length_per_channel]);
for (size_t channel_ix = 0; channel_ix < output->Channels(); ++channel_ix) {
// Adjust muting factor (main muting factor times expand muting factor).
external_mute_factor_array[channel_ix] = static_cast<int16_t>(
(external_mute_factor_array[channel_ix] *
expand_->MuteFactor(channel_ix)) >> 14);
- int16_t* signal = &(*output)[channel_ix][0];
- size_t length_per_channel = length / output->Channels();
+ (*output)[channel_ix].CopyTo(length_per_channel, 0, signal.get());
+
// Find largest absolute value in new data.
int16_t decoded_max =
- WebRtcSpl_MaxAbsValueW16(signal, length_per_channel);
+ WebRtcSpl_MaxAbsValueW16(signal.get(), length_per_channel);
// Adjust muting factor if needed (to BGN level).
size_t energy_length =
std::min(static_cast<size_t>(fs_mult * 64), length_per_channel);
int scaling = 6 + fs_shift
- WebRtcSpl_NormW32(decoded_max * decoded_max);
scaling = std::max(scaling, 0); // |scaling| should always be >= 0.
- int32_t energy = WebRtcSpl_DotProductWithScale(signal, signal,
+ int32_t energy = WebRtcSpl_DotProductWithScale(signal.get(), signal.get(),
energy_length, scaling);
int32_t scaled_energy_length =
static_cast<int32_t>(energy_length >> scaling);
@@ -149,19 +149,18 @@ int Normal::Process(const int16_t* input,
int16_t cng_output[kCngLength];
// Reset mute factor and start up fresh.
external_mute_factor_array[0] = 16384;
- AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
+ ComfortNoiseDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder();
if (cng_decoder) {
// Generate long enough for 32kHz.
- if (WebRtcCng_Generate(cng_decoder->CngDecoderInstance(), cng_output,
- kCngLength, 0) < 0) {
+ if (!cng_decoder->Generate(cng_output, 0)) {
// Error returned; set return vector to all zeros.
memset(cng_output, 0, sizeof(cng_output));
}
} else {
// If no CNG instance is defined, just copy from the decoded data.
// (This will result in interpolating the decoded with itself.)
- memcpy(cng_output, signal, fs_mult * 8 * sizeof(int16_t));
+ (*output)[0].CopyTo(fs_mult * 8, 0, cng_output);
}
// Interpolate the CNG into the new vector.
// (NB/WB/SWB32/SWB48 8/16/32/48 samples.)
@@ -171,8 +170,8 @@ int Normal::Process(const int16_t* input,
for (size_t i = 0; i < static_cast<size_t>(8 * fs_mult); i++) {
// TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 now
// for legacy bit-exactness.
- signal[i] =
- (fraction * signal[i] + (32 - fraction) * cng_output[i] + 8) >> 5;
+ (*output)[0][i] = (fraction * (*output)[0][i] +
+ (32 - fraction) * cng_output[i] + 8) >> 5;
fraction += increment;
}
} else if (external_mute_factor_array[0] < 16384) {
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
index f98e99a82d8..5e1fc131e50 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/normal_unittest.cc
@@ -27,9 +27,20 @@
#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
using ::testing::_;
+using ::testing::Invoke;
namespace webrtc {
+namespace {
+
+int ExpandProcess120ms(AudioMultiVector* output) {
+ AudioMultiVector dummy_audio(1, 11520u);
+ dummy_audio.CopyTo(output);
+ return 0;
+}
+
+} // namespace
+
TEST(Normal, CreateAndDestroy) {
MockDecoderDatabase db;
int fs = 8000;
@@ -121,6 +132,45 @@ TEST(Normal, InputLengthAndChannelsDoNotMatch) {
EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
}
+TEST(Normal, LastModeExpand120msPacket) {
+ WebRtcSpl_Init();
+ MockDecoderDatabase db;
+ const int kFs = 48000;
+ const size_t kPacketsizeBytes = 11520u;
+ const size_t kChannels = 1;
+ BackgroundNoise bgn(kChannels);
+ SyncBuffer sync_buffer(kChannels, 1000);
+ RandomVector random_vector;
+ StatisticsCalculator statistics;
+ MockExpand expand(&bgn, &sync_buffer, &random_vector, &statistics, kFs,
+ kChannels);
+ Normal normal(kFs, &db, bgn, &expand);
+
+ int16_t input[kPacketsizeBytes] = {0};
+
+ std::unique_ptr<int16_t[]> mute_factor_array(new int16_t[kChannels]);
+ for (size_t i = 0; i < kChannels; ++i) {
+ mute_factor_array[i] = 16384;
+ }
+
+ AudioMultiVector output(kChannels);
+
+ EXPECT_CALL(expand, SetParametersForNormalAfterExpand());
+ EXPECT_CALL(expand, Process(_)).WillOnce(Invoke(ExpandProcess120ms));
+ EXPECT_CALL(expand, Reset());
+ EXPECT_EQ(static_cast<int>(kPacketsizeBytes),
+ normal.Process(input,
+ kPacketsizeBytes,
+ kModeExpand,
+ mute_factor_array.get(),
+ &output));
+
+ EXPECT_EQ(kPacketsizeBytes, output.Size());
+
+ EXPECT_CALL(db, Die()); // Called when |db| goes out of scope.
+ EXPECT_CALL(expand, Die()); // Called when |expand| goes out of scope.
+}
+
// TODO(hlundin): Write more tests.
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.cc
new file mode 100644
index 00000000000..8a19fe4d592
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.cc
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/neteq/packet.h"
+
+namespace webrtc {
+
+Packet::Packet() = default;
+
+Packet::~Packet() = default;
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.h
index 64b325e027a..d6f64c7e088 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet.h
@@ -12,7 +12,9 @@
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_PACKET_H_
#include <list>
+#include <memory>
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/typedefs.h"
@@ -21,20 +23,15 @@ namespace webrtc {
// Struct for holding RTP packets.
struct Packet {
RTPHeader header;
- uint8_t* payload; // Datagram excluding RTP header and header extension.
- size_t payload_length;
- bool primary; // Primary, i.e., not redundant payload.
- int waiting_time;
- bool sync_packet;
+ // Datagram excluding RTP header and header extension.
+ uint8_t* payload = nullptr;
+ size_t payload_length = 0;
+ bool primary = true; // Primary, i.e., not redundant payload.
+ bool sync_packet = false;
+ std::unique_ptr<TickTimer::Stopwatch> waiting_time;
- // Constructor.
- Packet()
- : payload(NULL),
- payload_length(0),
- primary(true),
- waiting_time(0),
- sync_packet(false) {
- }
+ Packet();
+ ~Packet();
// Comparison operators. Establish a packet ordering based on (1) timestamp,
// (2) sequence number, (3) regular packet vs sync-packet and (4) redundancy.
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
index c89de12318b..f1b898e34cf 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.cc
@@ -19,6 +19,7 @@
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
namespace webrtc {
@@ -37,8 +38,9 @@ class NewTimestampIsLarger {
const Packet* new_packet_;
};
-PacketBuffer::PacketBuffer(size_t max_number_of_packets)
- : max_number_of_packets_(max_number_of_packets) {}
+PacketBuffer::PacketBuffer(size_t max_number_of_packets,
+ const TickTimer* tick_timer)
+ : max_number_of_packets_(max_number_of_packets), tick_timer_(tick_timer) {}
// Destructor. All packets in the buffer will be destroyed.
PacketBuffer::~PacketBuffer() {
@@ -65,6 +67,8 @@ int PacketBuffer::InsertPacket(Packet* packet) {
int return_val = kOK;
+ packet->waiting_time = tick_timer_->GetNewStopwatch();
+
if (buffer_.size() >= max_number_of_packets_) {
// Buffer is full. Flush it.
Flush();
@@ -268,13 +272,6 @@ size_t PacketBuffer::NumSamplesInBuffer(DecoderDatabase* decoder_database,
return num_samples;
}
-void PacketBuffer::IncrementWaitingTimes(int inc) {
- PacketList::iterator it;
- for (it = buffer_.begin(); it != buffer_.end(); ++it) {
- (*it)->waiting_time += inc;
- }
-}
-
bool PacketBuffer::DeleteFirstPacket(PacketList* packet_list) {
if (packet_list->empty()) {
return false;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
index 03c11e61b6e..6867b4cb37e 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer.h
@@ -17,8 +17,8 @@
namespace webrtc {
-// Forward declaration.
class DecoderDatabase;
+class TickTimer;
// This is the actual buffer holding the packets before decoding.
class PacketBuffer {
@@ -34,7 +34,7 @@ class PacketBuffer {
// Constructor creates a buffer which can hold a maximum of
// |max_number_of_packets| packets.
- PacketBuffer(size_t max_number_of_packets);
+ PacketBuffer(size_t max_number_of_packets, const TickTimer* tick_timer);
// Deletes all packets in the buffer before destroying the buffer.
virtual ~PacketBuffer();
@@ -116,10 +116,6 @@ class PacketBuffer {
virtual size_t NumSamplesInBuffer(DecoderDatabase* decoder_database,
size_t last_decoded_length) const;
- // Increase the waiting time counter for every packet in the buffer by |inc|.
- // The default value for |inc| is 1.
- virtual void IncrementWaitingTimes(int inc = 1);
-
virtual void BufferStat(int* num_packets, int* max_num_packets) const;
// Static method that properly deletes the first packet, and its payload
@@ -148,6 +144,7 @@ class PacketBuffer {
private:
size_t max_number_of_packets_;
PacketList buffer_;
+ const TickTimer* tick_timer_;
RTC_DISALLOW_COPY_AND_ASSIGN(PacketBuffer);
};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
index 435b6c848dc..da353010857 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/packet_buffer_unittest.cc
@@ -16,6 +16,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
using ::testing::Return;
using ::testing::_;
@@ -80,13 +81,15 @@ struct PacketsToInsert {
// Start of test definitions.
TEST(PacketBuffer, CreateAndDestroy) {
- PacketBuffer* buffer = new PacketBuffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer* buffer = new PacketBuffer(10, &tick_timer); // 10 packets.
EXPECT_TRUE(buffer->Empty());
delete buffer;
}
TEST(PacketBuffer, InsertPacket) {
- PacketBuffer buffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
PacketGenerator gen(17u, 4711u, 0, 10);
const int payload_len = 100;
@@ -107,7 +110,8 @@ TEST(PacketBuffer, InsertPacket) {
// Test to flush buffer.
TEST(PacketBuffer, FlushBuffer) {
- PacketBuffer buffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
PacketGenerator gen(0, 0, 0, 10);
const int payload_len = 10;
@@ -127,7 +131,8 @@ TEST(PacketBuffer, FlushBuffer) {
// Test to fill the buffer over the limits, and verify that it flushes.
TEST(PacketBuffer, OverfillBuffer) {
- PacketBuffer buffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
PacketGenerator gen(0, 0, 0, 10);
// Insert 10 small packets; should be ok.
@@ -156,7 +161,8 @@ TEST(PacketBuffer, OverfillBuffer) {
// Test inserting a list of packets.
TEST(PacketBuffer, InsertPacketList) {
- PacketBuffer buffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
PacketGenerator gen(0, 0, 0, 10);
PacketList list;
const int payload_len = 10;
@@ -192,7 +198,8 @@ TEST(PacketBuffer, InsertPacketList) {
// Expecting the buffer to flush.
// TODO(hlundin): Remove this test when legacy operation is no longer needed.
TEST(PacketBuffer, InsertPacketListChangePayloadType) {
- PacketBuffer buffer(10); // 10 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(10, &tick_timer); // 10 packets.
PacketGenerator gen(0, 0, 0, 10);
PacketList list;
const int payload_len = 10;
@@ -230,7 +237,8 @@ TEST(PacketBuffer, InsertPacketListChangePayloadType) {
}
TEST(PacketBuffer, ExtractOrderRedundancy) {
- PacketBuffer buffer(100); // 100 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
const int kPackets = 18;
const int kFrameSize = 10;
const int kPayloadLength = 10;
@@ -289,7 +297,8 @@ TEST(PacketBuffer, ExtractOrderRedundancy) {
}
TEST(PacketBuffer, DiscardPackets) {
- PacketBuffer buffer(100); // 100 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
const uint16_t start_seq_no = 17;
const uint32_t start_ts = 4711;
const uint32_t ts_increment = 10;
@@ -318,7 +327,8 @@ TEST(PacketBuffer, DiscardPackets) {
}
TEST(PacketBuffer, Reordering) {
- PacketBuffer buffer(100); // 100 packets.
+ TickTimer tick_timer;
+ PacketBuffer buffer(100, &tick_timer); // 100 packets.
const uint16_t start_seq_no = 17;
const uint32_t start_ts = 4711;
const uint32_t ts_increment = 10;
@@ -373,8 +383,9 @@ TEST(PacketBuffer, Failures) {
const uint32_t ts_increment = 10;
int payload_len = 100;
PacketGenerator gen(start_seq_no, start_ts, 0, ts_increment);
+ TickTimer tick_timer;
- PacketBuffer* buffer = new PacketBuffer(100); // 100 packets.
+ PacketBuffer* buffer = new PacketBuffer(100, &tick_timer); // 100 packets.
Packet* packet = NULL;
EXPECT_EQ(PacketBuffer::kInvalidPacket, buffer->InsertPacket(packet));
packet = gen.NextPacket(payload_len);
@@ -404,7 +415,7 @@ TEST(PacketBuffer, Failures) {
// Insert packet list of three packets, where the second packet has an invalid
// payload. Expect first packet to be inserted, and the remaining two to be
// discarded.
- buffer = new PacketBuffer(100); // 100 packets.
+ buffer = new PacketBuffer(100, &tick_timer); // 100 packets.
PacketList list;
list.push_back(gen.NextPacket(payload_len)); // Valid packet.
packet = gen.NextPacket(payload_len);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
index 85307181341..530e9d064dc 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter.cc
@@ -12,6 +12,7 @@
#include <assert.h>
+#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_coding/neteq/decoder_database.h"
@@ -143,8 +144,9 @@ int PayloadSplitter::SplitFec(PacketList* packet_list,
// Not an FEC packet.
AudioDecoder* decoder = decoder_database->GetDecoder(payload_type);
- // decoder should not return NULL.
- assert(decoder != NULL);
+ // decoder should not return NULL, except for comfort noise payloads which
+ // are handled separately.
+ assert(decoder != NULL || decoder_database->IsComfortNoise(payload_type));
if (!decoder ||
!decoder->PacketHasFec(packet->payload, packet->payload_length)) {
++it;
@@ -167,8 +169,9 @@ int PayloadSplitter::SplitFec(PacketList* packet_list,
memcpy(new_packet->payload, packet->payload, packet->payload_length);
new_packet->payload_length = packet->payload_length;
new_packet->primary = false;
- new_packet->waiting_time = packet->waiting_time;
new_packet->sync_packet = packet->sync_packet;
+ // Waiting time should not be set here.
+ RTC_DCHECK(!packet->waiting_time);
packet_list->insert(it, new_packet);
break;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
index a68e8d68a98..63772452da6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/payload_splitter_unittest.cc
@@ -18,6 +18,8 @@
#include <utility> // pair
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_decoder_factory.h"
#include "webrtc/modules/audio_coding/neteq/mock/mock_decoder_database.h"
#include "webrtc/modules/audio_coding/neteq/packet.h"
@@ -309,7 +311,8 @@ TEST(RedPayloadSplitter, CheckRedPayloads) {
// Use a real DecoderDatabase object here instead of a mock, since it is
// easier to just register the payload types and let the actual implementation
// do its job.
- DecoderDatabase decoder_database;
+ std::unique_ptr<MockAudioDecoderFactory> factory(new MockAudioDecoderFactory);
+ DecoderDatabase decoder_database(std::move(factory));
decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderCNGnb, "cng-nb");
decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu, "pcmu");
decoder_database.RegisterPayload(2, NetEqDecoder::kDecoderAVT, "avt");
@@ -372,33 +375,33 @@ TEST(AudioPayloadSplitter, NonSplittable) {
// codec types.
// Use scoped pointers to avoid having to delete them later.
std::unique_ptr<DecoderDatabase::DecoderInfo> info0(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderISAC, 16000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderISAC, "", 16000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(0))
.WillRepeatedly(Return(info0.get()));
std::unique_ptr<DecoderDatabase::DecoderInfo> info1(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderISACswb, 32000,
- NULL, false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderISACswb, "", 32000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(1))
.WillRepeatedly(Return(info1.get()));
std::unique_ptr<DecoderDatabase::DecoderInfo> info2(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderRED, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderRED, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(2))
.WillRepeatedly(Return(info2.get()));
std::unique_ptr<DecoderDatabase::DecoderInfo> info3(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderAVT, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderAVT, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(3))
.WillRepeatedly(Return(info3.get()));
std::unique_ptr<DecoderDatabase::DecoderInfo> info4(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderCNGnb, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderCNGnb, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(4))
.WillRepeatedly(Return(info4.get()));
std::unique_ptr<DecoderDatabase::DecoderInfo> info5(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderArbitrary, 8000,
- NULL, false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderArbitrary, "",
+ 8000, nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(5))
.WillRepeatedly(Return(info5.get()));
@@ -536,7 +539,7 @@ TEST_P(SplitBySamplesTest, PayloadSizes) {
// Use scoped pointers to avoid having to delete them later.
// (Sample rate is set to 8000 Hz, but does not matter.)
std::unique_ptr<DecoderDatabase::DecoderInfo> info(
- new DecoderDatabase::DecoderInfo(decoder_type_, 8000, NULL, false));
+ new DecoderDatabase::DecoderInfo(decoder_type_, "", 8000, nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(info.get()));
@@ -623,8 +626,8 @@ TEST_P(SplitIlbcTest, NumFrames) {
// codec types.
// Use scoped pointers to avoid having to delete them later.
std::unique_ptr<DecoderDatabase::DecoderInfo> info(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(info.get()));
@@ -687,8 +690,8 @@ TEST(IlbcPayloadSplitter, TooLargePayload) {
MockDecoderDatabase decoder_database;
std::unique_ptr<DecoderDatabase::DecoderInfo> info(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(info.get()));
@@ -719,8 +722,8 @@ TEST(IlbcPayloadSplitter, UnevenPayload) {
MockDecoderDatabase decoder_database;
std::unique_ptr<DecoderDatabase::DecoderInfo> info(
- new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, 8000, NULL,
- false));
+ new DecoderDatabase::DecoderInfo(NetEqDecoder::kDecoderILBC, "", 8000,
+ nullptr));
EXPECT_CALL(decoder_database, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(info.get()));
@@ -743,7 +746,7 @@ TEST(IlbcPayloadSplitter, UnevenPayload) {
TEST(FecPayloadSplitter, MixedPayload) {
PacketList packet_list;
- DecoderDatabase decoder_database;
+ DecoderDatabase decoder_database(CreateBuiltinAudioDecoderFactory());
decoder_database.RegisterPayload(0, NetEqDecoder::kDecoderOpus, "opus");
decoder_database.RegisterPayload(1, NetEqDecoder::kDecoderPCMu, "pcmu");
@@ -798,7 +801,7 @@ TEST(FecPayloadSplitter, MixedPayload) {
TEST(FecPayloadSplitter, EmbedFecInRed) {
PacketList packet_list;
- DecoderDatabase decoder_database;
+ DecoderDatabase decoder_database(CreateBuiltinAudioDecoderFactory());
const int kTimestampOffset = 20 * 48; // 20 ms * 48 kHz.
uint8_t payload_types[] = {0, 0};
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
index 45586ee111c..149f2826582 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/RTPencode.cc
@@ -265,7 +265,7 @@ GSMFR_encinst_t* GSMFRenc_inst[2];
#endif
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
-CNG_enc_inst* CNGenc_inst[2];
+webrtc::ComfortNoiseEncoder *CNG_encoder[2];
#endif
#ifdef CODEC_SPEEX_8
SPEEX_encinst_t* SPEEX8enc_inst[2];
@@ -928,18 +928,8 @@ int NetEQTest_init_coders(webrtc::NetEqDecoder coder,
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
- ok = WebRtcCng_CreateEnc(&CNGenc_inst[k]);
- if (ok != 0) {
- printf("Error: Couldn't allocate memory for CNG encoding instance\n");
- exit(0);
- }
if (sampfreq <= 16000) {
- ok = WebRtcCng_InitEnc(CNGenc_inst[k], sampfreq, 200, 5);
- if (ok == -1) {
- printf("Error: Initialization of CNG struct failed. Error code %d\n",
- WebRtcCng_GetErrorCodeEnc(CNGenc_inst[k]));
- exit(0);
- }
+ CNG_encoder[k] = new webrtc::ComfortNoiseEncoder(sampfreq, 200, 5);
}
#endif
@@ -1461,7 +1451,8 @@ int NetEQTest_free_coders(webrtc::NetEqDecoder coder, size_t numChannels) {
WebRtcVad_Free(VAD_inst[k]);
#if (defined(CODEC_CNGCODEC8) || defined(CODEC_CNGCODEC16) || \
defined(CODEC_CNGCODEC32) || defined(CODEC_CNGCODEC48))
- WebRtcCng_FreeEnc(CNGenc_inst[k]);
+ delete CNG_encoder[k];
+ CNG_encoder[k] = nullptr;
#endif
switch (coder) {
@@ -1600,7 +1591,7 @@ size_t NetEQTest_encode(webrtc::NetEqDecoder coder,
size_t numChannels) {
size_t cdlen = 0;
int16_t* tempdata;
- static int first_cng = 1;
+ static bool first_cng = true;
size_t tempLen;
*vad = 1;
@@ -1608,9 +1599,9 @@ size_t NetEQTest_encode(webrtc::NetEqDecoder coder,
if (useVAD) {
*vad = 0;
- size_t sampleRate_10 = static_cast<size_t>(10 * sampleRate / 1000);
- size_t sampleRate_20 = static_cast<size_t>(20 * sampleRate / 1000);
- size_t sampleRate_30 = static_cast<size_t>(30 * sampleRate / 1000);
+ const size_t sampleRate_10 = static_cast<size_t>(10 * sampleRate / 1000);
+ const size_t sampleRate_20 = static_cast<size_t>(20 * sampleRate / 1000);
+ const size_t sampleRate_30 = static_cast<size_t>(30 * sampleRate / 1000);
for (size_t k = 0; k < numChannels; k++) {
tempLen = frameLen;
tempdata = &indata[k * frameLen];
@@ -1642,16 +1633,22 @@ size_t NetEQTest_encode(webrtc::NetEqDecoder coder,
if (!*vad) {
// all channels are silent
+ rtc::Buffer workaround;
cdlen = 0;
for (size_t k = 0; k < numChannels; k++) {
- WebRtcCng_Encode(CNGenc_inst[k], &indata[k * frameLen],
- (frameLen <= 640 ? frameLen : 640) /* max 640 */,
- encoded, &tempLen, first_cng);
+ workaround.Clear();
+ tempLen = CNG_encoder[k]->Encode(
+ rtc::ArrayView<const int16_t>(
+ &indata[k * frameLen],
+ (frameLen <= 640 ? frameLen : 640) /* max 640 */),
+ first_cng,
+ &workaround);
+ memcpy(encoded, workaround.data(), tempLen);
encoded += tempLen;
cdlen += tempLen;
}
*vad = 0;
- first_cng = 0;
+ first_cng = false;
return (cdlen);
}
}
@@ -1734,7 +1731,7 @@ size_t NetEQTest_encode(webrtc::NetEqDecoder coder,
} // end for
- first_cng = 1;
+ first_cng = true;
return (totalLen);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
index 2ebd1927bc4..62bfc1b3cb5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_isac_quality_test.cc
@@ -43,8 +43,8 @@ class NetEqIsacQualityTest : public NetEqQualityTest {
NetEqIsacQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes);
+ int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ rtc::Buffer* payload, size_t max_bytes) override;
private:
ISACFIX_MainStruct* isac_encoder_;
int bit_rate_kbps_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
index baa0d67aded..a6117a4c5b6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/test/neteq_opus_quality_test.cc
@@ -103,8 +103,8 @@ class NetEqOpusQualityTest : public NetEqQualityTest {
NetEqOpusQualityTest();
void SetUp() override;
void TearDown() override;
- virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
- rtc::Buffer* payload, size_t max_bytes);
+ int EncodeBlock(int16_t* in_data, size_t block_size_samples,
+ rtc::Buffer* payload, size_t max_bytes) override;
private:
WebRtcOpusEncInst* opus_encoder_;
OpusRepacketizer* repacketizer_;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.cc
new file mode 100644
index 00000000000..4a1b9b7b1fe
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
+
+namespace webrtc {
+
+TickTimer::Stopwatch::Stopwatch(const TickTimer& ticktimer)
+ : ticktimer_(ticktimer), starttick_(ticktimer.ticks()) {}
+
+TickTimer::Countdown::Countdown(const TickTimer& ticktimer,
+ uint64_t ticks_to_count)
+ : stopwatch_(ticktimer.GetNewStopwatch()),
+ ticks_to_count_(ticks_to_count) {}
+
+TickTimer::Countdown::~Countdown() = default;
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.h
new file mode 100644
index 00000000000..8f17f435967
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
+
+#include <memory>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Implements a time counter. The counter is advanced with the Increment()
+// methods, and is queried with the ticks() accessor. It is assumed that one
+// "tick" och the counter corresponds to 10 ms.
+// A TickTimer object can provide two types of associated time-measuring
+// objects: Stopwatch and Countdown.
+class TickTimer {
+ public:
+ // Stopwatch measures time elapsed since it was started, by querying the
+ // associated TickTimer for the current time. The intended use is to request a
+ // new Stopwatch object from a TickTimer object with the GetNewStopwatch()
+ // method. Note: since the Stopwatch object contains a reference to the
+ // TickTimer it is associated with, it cannot outlive the TickTimer.
+ class Stopwatch {
+ public:
+ explicit Stopwatch(const TickTimer& ticktimer);
+
+ uint64_t ElapsedTicks() const { return ticktimer_.ticks() - starttick_; }
+
+ uint64_t ElapsedMs() const {
+ const uint64_t elapsed_ticks = ticktimer_.ticks() - starttick_;
+ const int ms_per_tick = ticktimer_.ms_per_tick();
+ return elapsed_ticks < UINT64_MAX / ms_per_tick
+ ? elapsed_ticks * ms_per_tick
+ : UINT64_MAX;
+ }
+
+ private:
+ const TickTimer& ticktimer_;
+ const uint64_t starttick_;
+ };
+
+ // Countdown counts down from a given start value with each tick of the
+ // associated TickTimer, until zero is reached. The Finished() method will
+ // return true if zero has been reached, false otherwise. The intended use is
+ // to request a new Countdown object from a TickTimer object with the
+ // GetNewCountdown() method. Note: since the Countdown object contains a
+ // reference to the TickTimer it is associated with, it cannot outlive the
+ // TickTimer.
+ class Countdown {
+ public:
+ Countdown(const TickTimer& ticktimer, uint64_t ticks_to_count);
+
+ ~Countdown();
+
+ bool Finished() const {
+ return stopwatch_->ElapsedTicks() >= ticks_to_count_;
+ }
+
+ private:
+ const std::unique_ptr<Stopwatch> stopwatch_;
+ const uint64_t ticks_to_count_;
+ };
+
+ TickTimer() : TickTimer(10) {}
+ explicit TickTimer(int ms_per_tick) : ms_per_tick_(ms_per_tick) {
+ RTC_DCHECK_GT(ms_per_tick_, 0);
+ }
+
+ void Increment() { ++ticks_; }
+
+ // Mainly intended for testing.
+ void Increment(uint64_t x) { ticks_ += x; }
+
+ uint64_t ticks() const { return ticks_; }
+
+ int ms_per_tick() const { return ms_per_tick_; }
+
+ // Returns a new Stopwatch object, based on the current TickTimer. Note that
+ // the new Stopwatch object contains a reference to the current TickTimer,
+ // and must therefore not outlive the TickTimer.
+ std::unique_ptr<Stopwatch> GetNewStopwatch() const {
+ return std::unique_ptr<Stopwatch>(new Stopwatch(*this));
+ }
+
+ // Returns a new Countdown object, based on the current TickTimer. Note that
+ // the new Countdown object contains a reference to the current TickTimer,
+ // and must therefore not outlive the TickTimer.
+ std::unique_ptr<Countdown> GetNewCountdown(uint64_t ticks_to_count) const {
+ return std::unique_ptr<Countdown>(new Countdown(*this, ticks_to_count));
+ }
+
+ private:
+ uint64_t ticks_ = 0;
+ const int ms_per_tick_;
+ RTC_DISALLOW_COPY_AND_ASSIGN(TickTimer);
+};
+
+} // namespace webrtc
+#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TICK_TIMER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer_unittest.cc
new file mode 100644
index 00000000000..55edcf5b292
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tick_timer_unittest.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <memory>
+
+#include "webrtc/modules/audio_coding/neteq/tick_timer.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace webrtc {
+
+// Verify that the default value for ms_per_tick is 10.
+TEST(TickTimer, DefaultMsPerTick) {
+ TickTimer tt;
+ EXPECT_EQ(10, tt.ms_per_tick());
+}
+
+TEST(TickTimer, CustomMsPerTick) {
+ TickTimer tt(17);
+ EXPECT_EQ(17, tt.ms_per_tick());
+}
+
+TEST(TickTimer, Increment) {
+ TickTimer tt;
+ EXPECT_EQ(0u, tt.ticks());
+ tt.Increment();
+ EXPECT_EQ(1u, tt.ticks());
+
+ for (int i = 0; i < 17; ++i) {
+ tt.Increment();
+ }
+ EXPECT_EQ(18u, tt.ticks());
+
+ tt.Increment(17);
+ EXPECT_EQ(35u, tt.ticks());
+}
+
+TEST(TickTimer, WrapAround) {
+ TickTimer tt;
+ tt.Increment(UINT64_MAX);
+ EXPECT_EQ(UINT64_MAX, tt.ticks());
+ tt.Increment();
+ EXPECT_EQ(0u, tt.ticks());
+}
+
+TEST(TickTimer, Stopwatch) {
+ TickTimer tt;
+ // Increment it a "random" number of steps.
+ tt.Increment(17);
+
+ std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+ ASSERT_TRUE(sw);
+
+ EXPECT_EQ(0u, sw->ElapsedTicks()); // Starts at zero.
+ EXPECT_EQ(0u, sw->ElapsedMs());
+ tt.Increment();
+ EXPECT_EQ(1u, sw->ElapsedTicks()); // Increases with the TickTimer.
+ EXPECT_EQ(10u, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchWrapAround) {
+ TickTimer tt;
+ tt.Increment(UINT64_MAX);
+
+ std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+ ASSERT_TRUE(sw);
+
+ tt.Increment();
+ EXPECT_EQ(0u, tt.ticks());
+ EXPECT_EQ(1u, sw->ElapsedTicks());
+ EXPECT_EQ(10u, sw->ElapsedMs());
+
+ tt.Increment();
+ EXPECT_EQ(1u, tt.ticks());
+ EXPECT_EQ(2u, sw->ElapsedTicks());
+ EXPECT_EQ(20u, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchMsOverflow) {
+ TickTimer tt;
+ std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+ ASSERT_TRUE(sw);
+
+ tt.Increment(UINT64_MAX / 10);
+ EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+
+ tt.Increment();
+ EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+
+ tt.Increment(UINT64_MAX - tt.ticks());
+ EXPECT_EQ(UINT64_MAX, tt.ticks());
+ EXPECT_EQ(UINT64_MAX, sw->ElapsedMs());
+}
+
+TEST(TickTimer, StopwatchWithCustomTicktime) {
+ const int kMsPerTick = 17;
+ TickTimer tt(kMsPerTick);
+ std::unique_ptr<TickTimer::Stopwatch> sw = tt.GetNewStopwatch();
+ ASSERT_TRUE(sw);
+
+ EXPECT_EQ(0u, sw->ElapsedMs());
+ tt.Increment();
+ EXPECT_EQ(static_cast<uint64_t>(kMsPerTick), sw->ElapsedMs());
+}
+
+TEST(TickTimer, Countdown) {
+ TickTimer tt;
+ // Increment it a "random" number of steps.
+ tt.Increment(4711);
+
+ std::unique_ptr<TickTimer::Countdown> cd = tt.GetNewCountdown(17);
+ ASSERT_TRUE(cd);
+
+ EXPECT_FALSE(cd->Finished());
+ tt.Increment();
+ EXPECT_FALSE(cd->Finished());
+
+ tt.Increment(16); // Total increment is now 17.
+ EXPECT_TRUE(cd->Finished());
+
+ // Further increments do not change the state.
+ tt.Increment();
+ EXPECT_TRUE(cd->Finished());
+ tt.Increment(1234);
+ EXPECT_TRUE(cd->Finished());
+}
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
index 6a91ea487b5..880b1f82ea5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -16,6 +16,7 @@
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_coding/neteq/background_noise.h"
+#include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
namespace webrtc {
@@ -158,20 +159,15 @@ TimeStretch::ReturnCodes TimeStretch::Process(const int16_t* input,
}
void TimeStretch::AutoCorrelation() {
- // Set scaling factor for cross correlation to protect against overflow.
- int scaling = kLogCorrelationLen - WebRtcSpl_NormW32(
- max_input_value_ * max_input_value_);
- scaling = std::max(0, scaling);
-
// Calculate correlation from lag kMinLag to lag kMaxLag in 4 kHz domain.
int32_t auto_corr[kCorrelationLen];
- WebRtcSpl_CrossCorrelation(auto_corr, &downsampled_input_[kMaxLag],
- &downsampled_input_[kMaxLag - kMinLag],
- kCorrelationLen, kMaxLag - kMinLag, scaling, -1);
+ CrossCorrelationWithAutoShift(
+ &downsampled_input_[kMaxLag], &downsampled_input_[kMaxLag - kMinLag],
+ kCorrelationLen, kMaxLag - kMinLag, -1, auto_corr);
// Normalize correlation to 14 bits and write to |auto_correlation_|.
int32_t max_corr = WebRtcSpl_MaxAbsValueW32(auto_corr, kCorrelationLen);
- scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr));
+ int scaling = std::max(0, 17 - WebRtcSpl_NormW32(max_corr));
WebRtcSpl_VectorBitShiftW32ToW16(auto_correlation_, kCorrelationLen,
auto_corr, scaling);
}
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
index b1cb45d2014..adaf16223b6 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/timestamp_scaler_unittest.cc
@@ -23,9 +23,9 @@ namespace webrtc {
TEST(TimestampScaler, TestNoScaling) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type =
- NetEqDecoder::kDecoderPCMu; // Does not use scaled timestamps.
+ // Use PCMu, because it doesn't use scaled timestamps.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, "", 8000,
+ nullptr);
static const uint8_t kRtpPayloadType = 0;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -44,9 +44,9 @@ TEST(TimestampScaler, TestNoScaling) {
TEST(TimestampScaler, TestNoScalingLargeStep) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type =
- NetEqDecoder::kDecoderPCMu; // Does not use scaled timestamps.
+ // Use PCMu, because it doesn't use scaled timestamps.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderPCMu, "", 8000,
+ nullptr);
static const uint8_t kRtpPayloadType = 0;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -70,8 +70,9 @@ TEST(TimestampScaler, TestNoScalingLargeStep) {
TEST(TimestampScaler, TestG722) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderG722; // Uses a factor 2 scaling.
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, "", 16000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -94,8 +95,9 @@ TEST(TimestampScaler, TestG722) {
TEST(TimestampScaler, TestG722LargeStep) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderG722; // Uses a factor 2 scaling.
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, "", 16000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -122,10 +124,11 @@ TEST(TimestampScaler, TestG722LargeStep) {
TEST(TimestampScaler, TestG722WithCng) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info_g722, info_cng;
- info_g722.codec_type =
- NetEqDecoder::kDecoderG722; // Uses a factor 2 scaling.
- info_cng.codec_type = NetEqDecoder::kDecoderCNGwb;
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info_g722(NetEqDecoder::kDecoderG722, "",
+ 16000, nullptr);
+ const DecoderDatabase::DecoderInfo info_cng(NetEqDecoder::kDecoderCNGwb, "",
+ 16000, nullptr);
static const uint8_t kRtpPayloadTypeG722 = 17;
static const uint8_t kRtpPayloadTypeCng = 13;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeG722))
@@ -164,9 +167,9 @@ TEST(TimestampScaler, TestG722WithCng) {
// as many tests here.
TEST(TimestampScaler, TestG722Packet) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type =
- NetEqDecoder::kDecoderG722; // Does uses a factor 2 scaling.
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, "", 16000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -193,8 +196,9 @@ TEST(TimestampScaler, TestG722Packet) {
// we are not doing as many tests here.
TEST(TimestampScaler, TestG722PacketList) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderG722; // Uses a factor 2 scaling.
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, "", 16000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -222,8 +226,9 @@ TEST(TimestampScaler, TestG722PacketList) {
TEST(TimestampScaler, TestG722Reset) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderG722; // Uses a factor 2 scaling.
+ // Use G722, which has a factor 2 scaling.
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderG722, "", 16000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@@ -262,8 +267,8 @@ TEST(TimestampScaler, TestG722Reset) {
// timestamp scaler.
TEST(TimestampScaler, TestOpusLargeStep) {
MockDecoderDatabase db;
- DecoderDatabase::DecoderInfo info;
- info.codec_type = NetEqDecoder::kDecoderOpus;
+ const DecoderDatabase::DecoderInfo info(NetEqDecoder::kDecoderOpus, "", 48000,
+ nullptr);
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
index 2608d9a03b7..7a51256af2d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.cc
@@ -45,7 +45,9 @@ void NetEqExternalDecoderTest::InsertPacket(
void NetEqExternalDecoderTest::GetOutputAudio(AudioFrame* output) {
// Get audio from regular instance.
- EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output));
+ bool muted;
+ EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output, &muted));
+ ASSERT_FALSE(muted);
EXPECT_EQ(channels_, output->num_channels_);
EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
output->samples_per_channel_);
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
index 59402a2029b..d0052c28a8d 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.cc
@@ -10,6 +10,7 @@
#include "webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h"
+#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
@@ -105,7 +106,9 @@ int64_t NetEqPerformanceTest::Run(int runtime_ms,
}
// Get output audio, but don't do anything with it.
- int error = neteq->GetAudio(&out_frame);
+ bool muted;
+ int error = neteq->GetAudio(&out_frame, &muted);
+ RTC_CHECK(!muted);
if (error != NetEq::kOK)
return -1;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
index 5f874ad8dbe..2983cebe9d4 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.cc
@@ -391,7 +391,9 @@ int NetEqQualityTest::Transmit() {
}
int NetEqQualityTest::DecodeBlock() {
- int ret = neteq_->GetAudio(&out_frame_);
+ bool muted;
+ int ret = neteq_->GetAudio(&out_frame_, &muted);
+ RTC_CHECK(!muted);
if (ret != NetEq::kOK) {
return -1;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
index fdb66714cfb..1d462b3c9f2 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/neteq_rtpplay.cc
@@ -605,7 +605,9 @@ int main(int argc, char* argv[]) {
// Check if it is time to get output audio.
while (time_now_ms >= next_output_time_ms && output_event_available) {
webrtc::AudioFrame out_frame;
- int error = neteq->GetAudio(&out_frame);
+ bool muted;
+ int error = neteq->GetAudio(&out_frame, &muted);
+ RTC_CHECK(!muted);
if (error != NetEq::kOK) {
std::cerr << "GetAudio returned error code " <<
neteq->LastError() << std::endl;
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
index dad72eaecd1..9192839be30 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.cc
@@ -16,51 +16,15 @@
#include <limits>
#include "webrtc/base/checks.h"
+#include "webrtc/call.h"
#include "webrtc/call/rtc_event_log.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
-// Files generated at build-time by the protobuf compiler.
-#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
-#include "external/webrtc/webrtc/call/rtc_event_log.pb.h"
-#else
-#include "webrtc/call/rtc_event_log.pb.h"
-#endif
namespace webrtc {
namespace test {
-namespace {
-
-const rtclog::RtpPacket* GetRtpPacket(const rtclog::Event& event) {
- if (!event.has_type() || event.type() != rtclog::Event::RTP_EVENT)
- return nullptr;
- if (!event.has_timestamp_us() || !event.has_rtp_packet())
- return nullptr;
- const rtclog::RtpPacket& rtp_packet = event.rtp_packet();
- if (!rtp_packet.has_type() || rtp_packet.type() != rtclog::AUDIO ||
- !rtp_packet.has_incoming() || !rtp_packet.incoming() ||
- !rtp_packet.has_packet_length() || rtp_packet.packet_length() == 0 ||
- !rtp_packet.has_header() || rtp_packet.header().size() == 0 ||
- rtp_packet.packet_length() < rtp_packet.header().size())
- return nullptr;
- return &rtp_packet;
-}
-
-const rtclog::AudioPlayoutEvent* GetAudioPlayoutEvent(
- const rtclog::Event& event) {
- if (!event.has_type() || event.type() != rtclog::Event::AUDIO_PLAYOUT_EVENT)
- return nullptr;
- if (!event.has_timestamp_us() || !event.has_audio_playout_event())
- return nullptr;
- const rtclog::AudioPlayoutEvent& playout_event = event.audio_playout_event();
- if (!playout_event.has_local_ssrc())
- return nullptr;
- return &playout_event;
-}
-
-} // namespace
-
RtcEventLogSource* RtcEventLogSource::Create(const std::string& file_name) {
RtcEventLogSource* source = new RtcEventLogSource();
RTC_CHECK(source->OpenFile(file_name));
@@ -76,42 +40,57 @@ bool RtcEventLogSource::RegisterRtpHeaderExtension(RTPExtensionType type,
}
Packet* RtcEventLogSource::NextPacket() {
- while (rtp_packet_index_ < event_log_->stream_size()) {
- const rtclog::Event& event = event_log_->stream(rtp_packet_index_);
- const rtclog::RtpPacket* rtp_packet = GetRtpPacket(event);
- rtp_packet_index_++;
- if (rtp_packet) {
- uint8_t* packet_header = new uint8_t[rtp_packet->header().size()];
- memcpy(packet_header, rtp_packet->header().data(),
- rtp_packet->header().size());
- Packet* packet = new Packet(packet_header, rtp_packet->header().size(),
- rtp_packet->packet_length(),
- event.timestamp_us() / 1000, *parser_.get());
- if (packet->valid_header()) {
- // Check if the packet should not be filtered out.
- if (!filter_.test(packet->header().payloadType) &&
- !(use_ssrc_filter_ && packet->header().ssrc != ssrc_))
- return packet;
- } else {
- std::cout << "Warning: Packet with index " << (rtp_packet_index_ - 1)
- << " has an invalid header and will be ignored." << std::endl;
+ while (rtp_packet_index_ < parsed_stream_.GetNumberOfEvents()) {
+ if (parsed_stream_.GetEventType(rtp_packet_index_) ==
+ ParsedRtcEventLog::RTP_EVENT) {
+ PacketDirection direction;
+ MediaType media_type;
+ size_t header_length;
+ size_t packet_length;
+ uint64_t timestamp_us = parsed_stream_.GetTimestamp(rtp_packet_index_);
+ parsed_stream_.GetRtpHeader(rtp_packet_index_, &direction, &media_type,
+ nullptr, &header_length, &packet_length);
+ if (direction == kIncomingPacket && media_type == MediaType::AUDIO) {
+ uint8_t* packet_header = new uint8_t[header_length];
+ parsed_stream_.GetRtpHeader(rtp_packet_index_, nullptr, nullptr,
+ packet_header, nullptr, nullptr);
+ Packet* packet = new Packet(packet_header, header_length, packet_length,
+ static_cast<double>(timestamp_us) / 1000,
+ *parser_.get());
+ if (packet->valid_header()) {
+ // Check if the packet should not be filtered out.
+ if (!filter_.test(packet->header().payloadType) &&
+ !(use_ssrc_filter_ && packet->header().ssrc != ssrc_)) {
+ rtp_packet_index_++;
+ return packet;
+ }
+ } else {
+ std::cout << "Warning: Packet with index " << rtp_packet_index_
+ << " has an invalid header and will be ignored."
+ << std::endl;
+ }
+ // The packet has either an invalid header or needs to be filtered out,
+ // so it can be deleted.
+ delete packet;
}
- // The packet has either an invalid header or needs to be filtered out, so
- // it can be deleted.
- delete packet;
}
+ rtp_packet_index_++;
}
return nullptr;
}
int64_t RtcEventLogSource::NextAudioOutputEventMs() {
- while (audio_output_index_ < event_log_->stream_size()) {
- const rtclog::Event& event = event_log_->stream(audio_output_index_);
- const rtclog::AudioPlayoutEvent* playout_event =
- GetAudioPlayoutEvent(event);
+ while (audio_output_index_ < parsed_stream_.GetNumberOfEvents()) {
+ if (parsed_stream_.GetEventType(audio_output_index_) ==
+ ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT) {
+ uint64_t timestamp_us = parsed_stream_.GetTimestamp(audio_output_index_);
+ // We call GetAudioPlayout only to check that the protobuf event is
+ // well-formed.
+ parsed_stream_.GetAudioPlayout(audio_output_index_, nullptr);
+ audio_output_index_++;
+ return timestamp_us / 1000;
+ }
audio_output_index_++;
- if (playout_event)
- return event.timestamp_us() / 1000;
}
return std::numeric_limits<int64_t>::max();
}
@@ -120,8 +99,7 @@ RtcEventLogSource::RtcEventLogSource()
: PacketSource(), parser_(RtpHeaderParser::Create()) {}
bool RtcEventLogSource::OpenFile(const std::string& file_name) {
- event_log_.reset(new rtclog::EventStream());
- return RtcEventLog::ParseRtcEventLog(file_name, event_log_.get());
+ return parsed_stream_.ParseFile(file_name);
}
} // namespace test
diff --git a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
index 312338ee087..ad7add154c5 100644
--- a/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
+++ b/chromium/third_party/webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h
@@ -15,6 +15,7 @@
#include <string>
#include "webrtc/base/constructormagic.h"
+#include "webrtc/call/rtc_event_log_parser.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
@@ -22,10 +23,6 @@ namespace webrtc {
class RtpHeaderParser;
-namespace rtclog {
-class EventStream;
-} // namespace rtclog
-
namespace test {
class Packet;
@@ -55,10 +52,10 @@ class RtcEventLogSource : public PacketSource {
bool OpenFile(const std::string& file_name);
- int rtp_packet_index_ = 0;
- int audio_output_index_ = 0;
+ size_t rtp_packet_index_ = 0;
+ size_t audio_output_index_ = 0;
- std::unique_ptr<rtclog::EventStream> event_log_;
+ ParsedRtcEventLog parsed_stream_;
std::unique_ptr<RtpHeaderParser> parser_;
RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogSource);