summaryrefslogtreecommitdiff
path: root/chromium/content/renderer/media/webrtc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@theqtcompany.com>2016-07-14 17:41:05 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2016-08-04 12:37:36 +0000
commit399c965b6064c440ddcf4015f5f8e9d131c7a0a6 (patch)
tree6b06b60ff365abef0e13b3503d593a0df48d20e8 /chromium/content/renderer/media/webrtc
parent7366110654eec46f21b6824f302356426f48cd74 (diff)
downloadqtwebengine-chromium-399c965b6064c440ddcf4015f5f8e9d131c7a0a6.tar.gz
BASELINE: Update Chromium to 52.0.2743.76 and Ninja to 1.7.1
Change-Id: I382f51b959689505a60f8b707255ecb344f7d8b4 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/content/renderer/media/webrtc')
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.cc237
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.h89
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc26
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc8
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc2
-rw-r--r--chromium/content/renderer/media/webrtc/media_stream_video_webrtc_sink.cc5
-rw-r--r--chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc59
-rw-r--r--chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h36
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc224
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h53
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc153
-rw-r--r--chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.h102
-rw-r--r--chromium/content/renderer/media/webrtc/processed_local_audio_source.cc374
-rw-r--r--chromium/content/renderer/media/webrtc/processed_local_audio_source.h143
-rw-r--r--chromium/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc227
-rw-r--r--chromium/content/renderer/media/webrtc/stun_field_trial.cc3
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink.cc196
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink.h183
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc51
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h51
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc161
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h107
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc102
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc96
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h16
-rw-r--r--chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc46
26 files changed, 1530 insertions, 1220 deletions
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.cc b/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.cc
deleted file mode 100644
index e3940ab72b3..00000000000
--- a/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/webrtc/media_stream_remote_audio_track.h"
-
-#include <stddef.h>
-
-#include <list>
-
-#include "base/logging.h"
-#include "content/public/renderer/media_stream_audio_sink.h"
-#include "third_party/webrtc/api/mediastreaminterface.h"
-
-namespace content {
-
-class MediaStreamRemoteAudioSource::AudioSink
- : public webrtc::AudioTrackSinkInterface {
- public:
- AudioSink() {
- }
- ~AudioSink() override {
- DCHECK(sinks_.empty());
- }
-
- void Add(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track,
- bool enabled) {
- DCHECK(thread_checker_.CalledOnValidThread());
- SinkInfo info(sink, track, enabled);
- base::AutoLock lock(lock_);
- sinks_.push_back(info);
- }
-
- void Remove(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track) {
- DCHECK(thread_checker_.CalledOnValidThread());
- base::AutoLock lock(lock_);
- sinks_.remove_if([&sink, &track](const SinkInfo& info) {
- return info.sink == sink && info.track == track;
- });
- }
-
- void SetEnabled(MediaStreamAudioTrack* track, bool enabled) {
- DCHECK(thread_checker_.CalledOnValidThread());
- base::AutoLock lock(lock_);
- for (SinkInfo& info : sinks_) {
- if (info.track == track)
- info.enabled = enabled;
- }
- }
-
- void RemoveAll(MediaStreamAudioTrack* track) {
- base::AutoLock lock(lock_);
- sinks_.remove_if([&track](const SinkInfo& info) {
- return info.track == track;
- });
- }
-
- bool IsNeeded() const {
- DCHECK(thread_checker_.CalledOnValidThread());
- return !sinks_.empty();
- }
-
- private:
- void OnData(const void* audio_data, int bits_per_sample, int sample_rate,
- size_t number_of_channels, size_t number_of_frames) override {
- if (!audio_bus_ ||
- static_cast<size_t>(audio_bus_->channels()) != number_of_channels ||
- static_cast<size_t>(audio_bus_->frames()) != number_of_frames) {
- audio_bus_ = media::AudioBus::Create(number_of_channels,
- number_of_frames);
- }
-
- audio_bus_->FromInterleaved(audio_data, number_of_frames,
- bits_per_sample / 8);
-
- bool format_changed = false;
- if (params_.format() != media::AudioParameters::AUDIO_PCM_LOW_LATENCY ||
- static_cast<size_t>(params_.channels()) != number_of_channels ||
- params_.sample_rate() != sample_rate ||
- static_cast<size_t>(params_.frames_per_buffer()) != number_of_frames) {
- params_ = media::AudioParameters(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::GuessChannelLayout(number_of_channels),
- sample_rate, 16, number_of_frames);
- format_changed = true;
- }
-
- // TODO(tommi): We should get the timestamp from WebRTC.
- base::TimeTicks estimated_capture_time(base::TimeTicks::Now());
-
- base::AutoLock lock(lock_);
- for (const SinkInfo& info : sinks_) {
- if (info.enabled) {
- if (format_changed)
- info.sink->OnSetFormat(params_);
- info.sink->OnData(*audio_bus_.get(), estimated_capture_time);
- }
- }
- }
-
- mutable base::Lock lock_;
- struct SinkInfo {
- SinkInfo(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track,
- bool enabled) : sink(sink), track(track), enabled(enabled) {}
- MediaStreamAudioSink* sink;
- MediaStreamAudioTrack* track;
- bool enabled;
- };
- std::list<SinkInfo> sinks_;
- base::ThreadChecker thread_checker_;
- media::AudioParameters params_; // Only used on the callback thread.
- std::unique_ptr<media::AudioBus>
- audio_bus_; // Only used on the callback thread.
-};
-
-MediaStreamRemoteAudioTrack::MediaStreamRemoteAudioTrack(
- const blink::WebMediaStreamSource& source, bool enabled)
- : MediaStreamAudioTrack(false), source_(source), enabled_(enabled) {
- DCHECK(source.getExtraData()); // Make sure the source has a native source.
-}
-
-MediaStreamRemoteAudioTrack::~MediaStreamRemoteAudioTrack() {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- // Ensure the track is stopped.
- MediaStreamAudioTrack::Stop();
-}
-
-void MediaStreamRemoteAudioTrack::SetEnabled(bool enabled) {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
-
- // This affects the shared state of the source for whether or not it's a part
- // of the mixed audio that's rendered for remote tracks from WebRTC.
- // All tracks from the same source will share this state and thus can step
- // on each other's toes.
- // This is also why we can't check the |enabled_| state for equality with
- // |enabled| before setting the mixing enabled state. |enabled_| and the
- // shared state might not be the same.
- source()->SetEnabledForMixing(enabled);
-
- enabled_ = enabled;
- source()->SetSinksEnabled(this, enabled);
-}
-
-void MediaStreamRemoteAudioTrack::OnStop() {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- DVLOG(1) << "MediaStreamRemoteAudioTrack::OnStop()";
-
- source()->RemoveAll(this);
-
- // Stop means that a track should be stopped permanently. But
- // since there is no proper way of doing that on a remote track, we can
- // at least disable the track. Blink will not call down to the content layer
- // after a track has been stopped.
- SetEnabled(false);
-}
-
-void MediaStreamRemoteAudioTrack::AddSink(MediaStreamAudioSink* sink) {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- return source()->AddSink(sink, this, enabled_);
-}
-
-void MediaStreamRemoteAudioTrack::RemoveSink(MediaStreamAudioSink* sink) {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- return source()->RemoveSink(sink, this);
-}
-
-media::AudioParameters MediaStreamRemoteAudioTrack::GetOutputFormat() const {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- // This method is not implemented on purpose and should be removed.
- // TODO(tommi): See comment for GetOutputFormat in MediaStreamAudioTrack.
- NOTIMPLEMENTED();
- return media::AudioParameters();
-}
-
-webrtc::AudioTrackInterface* MediaStreamRemoteAudioTrack::GetAudioAdapter() {
- DCHECK(main_render_thread_checker_.CalledOnValidThread());
- return source()->GetAudioAdapter();
-}
-
-MediaStreamRemoteAudioSource* MediaStreamRemoteAudioTrack::source() const {
- return static_cast<MediaStreamRemoteAudioSource*>(source_.getExtraData());
-}
-
-MediaStreamRemoteAudioSource::MediaStreamRemoteAudioSource(
- const scoped_refptr<webrtc::AudioTrackInterface>& track) : track_(track) {}
-
-MediaStreamRemoteAudioSource::~MediaStreamRemoteAudioSource() {
- DCHECK(thread_checker_.CalledOnValidThread());
-}
-
-void MediaStreamRemoteAudioSource::SetEnabledForMixing(bool enabled) {
- DCHECK(thread_checker_.CalledOnValidThread());
- track_->set_enabled(enabled);
-}
-
-void MediaStreamRemoteAudioSource::AddSink(MediaStreamAudioSink* sink,
- MediaStreamAudioTrack* track,
- bool enabled) {
- DCHECK(thread_checker_.CalledOnValidThread());
- if (!sink_) {
- sink_.reset(new AudioSink());
- track_->AddSink(sink_.get());
- }
-
- sink_->Add(sink, track, enabled);
-}
-
-void MediaStreamRemoteAudioSource::RemoveSink(MediaStreamAudioSink* sink,
- MediaStreamAudioTrack* track) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(sink_);
-
- sink_->Remove(sink, track);
-
- if (!sink_->IsNeeded()) {
- track_->RemoveSink(sink_.get());
- sink_.reset();
- }
-}
-
-void MediaStreamRemoteAudioSource::SetSinksEnabled(MediaStreamAudioTrack* track,
- bool enabled) {
- if (sink_)
- sink_->SetEnabled(track, enabled);
-}
-
-void MediaStreamRemoteAudioSource::RemoveAll(MediaStreamAudioTrack* track) {
- if (sink_)
- sink_->RemoveAll(track);
-}
-
-webrtc::AudioTrackInterface* MediaStreamRemoteAudioSource::GetAudioAdapter() {
- DCHECK(thread_checker_.CalledOnValidThread());
- return track_.get();
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.h b/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.h
deleted file mode 100644
index 9e48dfb40d7..00000000000
--- a/chromium/content/renderer/media/webrtc/media_stream_remote_audio_track.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_AUDIO_TRACK_H_
-#define CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_AUDIO_TRACK_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/threading/thread_checker.h"
-#include "content/renderer/media/media_stream_audio_track.h"
-#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
-
-namespace content {
-
-class MediaStreamRemoteAudioSource;
-
-// MediaStreamRemoteAudioTrack is a WebRTC specific implementation of an
-// audio track received from a PeerConnection.
-// TODO(tommi): Chrome shouldn't have to care about remote vs local so
-// we should have a single track implementation that delegates to the
-// sources that do different things depending on the type of source.
-class MediaStreamRemoteAudioTrack : public MediaStreamAudioTrack {
- public:
- explicit MediaStreamRemoteAudioTrack(
- const blink::WebMediaStreamSource& source, bool enabled);
- ~MediaStreamRemoteAudioTrack() override;
-
- // MediaStreamTrack override.
- void SetEnabled(bool enabled) override;
-
- // MediaStreamAudioTrack overrides.
- void AddSink(MediaStreamAudioSink* sink) override;
- void RemoveSink(MediaStreamAudioSink* sink) override;
- media::AudioParameters GetOutputFormat() const override;
-
- webrtc::AudioTrackInterface* GetAudioAdapter() override;
-
- private:
- // MediaStreamAudioTrack override.
- void OnStop() final;
-
- MediaStreamRemoteAudioSource* source() const;
-
- blink::WebMediaStreamSource source_;
- bool enabled_;
-};
-
-// Inheriting from ExtraData directly since MediaStreamAudioSource has
-// too much unrelated bloat.
-// TODO(tommi): MediaStreamAudioSource needs refactoring.
-// TODO(miu): On it! ;-)
-class MediaStreamRemoteAudioSource
- : public blink::WebMediaStreamSource::ExtraData {
- public:
- explicit MediaStreamRemoteAudioSource(
- const scoped_refptr<webrtc::AudioTrackInterface>& track);
- ~MediaStreamRemoteAudioSource() override;
-
- // Controls whether or not the source is included in the main, mixed, audio
- // output from WebRTC as rendered by WebRtcAudioRenderer (media players).
- void SetEnabledForMixing(bool enabled);
-
- // Adds an audio sink for a track belonging to this source.
- // |enabled| is the enabled state of the track and can be updated via
- // a call to SetSinksEnabled.
- void AddSink(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track,
- bool enabled);
-
- // Removes an audio sink for a track belonging to this source.
- void RemoveSink(MediaStreamAudioSink* sink, MediaStreamAudioTrack* track);
-
- // Turns audio callbacks on/off for all sinks belonging to a track.
- void SetSinksEnabled(MediaStreamAudioTrack* track, bool enabled);
-
- // Removes all sinks belonging to a track.
- void RemoveAll(MediaStreamAudioTrack* track);
-
- webrtc::AudioTrackInterface* GetAudioAdapter();
-
- private:
- class AudioSink;
- std::unique_ptr<AudioSink> sink_;
- const scoped_refptr<webrtc::AudioTrackInterface> track_;
- base::ThreadChecker thread_checker_;
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_WEBRTC_MEDIA_STREAM_REMOTE_AUDIO_TRACK_H_
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc
index 03b6125128f..928b1804c68 100644
--- a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc
+++ b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source.cc
@@ -10,7 +10,6 @@
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/location.h"
-#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_event.h"
#include "content/renderer/media/webrtc/track_observer.h"
#include "media/base/bind_to_current_loop.h"
@@ -19,7 +18,6 @@
#include "media/base/video_util.h"
#include "third_party/webrtc/media/base/videoframe.h"
#include "third_party/webrtc/media/base/videosinkinterface.h"
-#include "third_party/webrtc/system_wrappers/include/tick_util.h"
namespace content {
@@ -46,9 +44,6 @@ class MediaStreamRemoteVideoSource::RemoteVideoSourceDelegate
const scoped_refptr<media::VideoFrame>& video_frame);
private:
- // Bound to the render thread.
- base::ThreadChecker thread_checker_;
-
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
// |frame_callback_| is accessed on the IO thread.
@@ -71,8 +66,7 @@ MediaStreamRemoteVideoSource::RemoteVideoSourceDelegate::
// the offset, 2) the rate (i.e., one clock runs faster than the other).
// See http://crbug/516700
time_diff_(base::TimeTicks::Now() - base::TimeTicks() -
- base::TimeDelta::FromMicroseconds(
- webrtc::TickTime::MicrosecondTimestamp())) {}
+ base::TimeDelta::FromMicroseconds(rtc::TimeMicros())) {}
MediaStreamRemoteVideoSource::
RemoteVideoSourceDelegate::~RemoteVideoSourceDelegate() {
@@ -95,15 +89,16 @@ void MediaStreamRemoteVideoSource::RemoteVideoSourceDelegate::OnFrame(
incoming_timestamp - start_timestamp_;
scoped_refptr<media::VideoFrame> video_frame;
- if (incoming_frame.GetNativeHandle() != NULL) {
+ if (incoming_frame.video_frame_buffer()->native_handle() != NULL) {
video_frame =
- static_cast<media::VideoFrame*>(incoming_frame.GetNativeHandle());
+ static_cast<media::VideoFrame*>(
+ incoming_frame.video_frame_buffer()->native_handle());
video_frame->set_timestamp(elapsed_timestamp);
} else {
const cricket::VideoFrame* frame =
incoming_frame.GetCopyWithRotationApplied();
- gfx::Size size(frame->GetWidth(), frame->GetHeight());
+ gfx::Size size(frame->width(), frame->height());
// Make a shallow copy. Both |frame| and |video_frame| will share a single
// reference counted frame buffer. Const cast and hope no one will overwrite
@@ -112,10 +107,13 @@ void MediaStreamRemoteVideoSource::RemoteVideoSourceDelegate::OnFrame(
// need to const cast here.
video_frame = media::VideoFrame::WrapExternalYuvData(
media::PIXEL_FORMAT_YV12, size, gfx::Rect(size), size,
- frame->GetYPitch(), frame->GetUPitch(), frame->GetVPitch(),
- const_cast<uint8_t*>(frame->GetYPlane()),
- const_cast<uint8_t*>(frame->GetUPlane()),
- const_cast<uint8_t*>(frame->GetVPlane()), elapsed_timestamp);
+ frame->video_frame_buffer()->StrideY(),
+ frame->video_frame_buffer()->StrideU(),
+ frame->video_frame_buffer()->StrideV(),
+ const_cast<uint8_t*>(frame->video_frame_buffer()->DataY()),
+ const_cast<uint8_t*>(frame->video_frame_buffer()->DataU()),
+ const_cast<uint8_t*>(frame->video_frame_buffer()->DataV()),
+ elapsed_timestamp);
if (!video_frame)
return;
video_frame->AddDestructionObserver(
diff --git a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc
index 52a580bc233..5cd94260e38 100644
--- a/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc
+++ b/chromium/content/renderer/media/webrtc/media_stream_remote_video_source_unittest.cc
@@ -9,7 +9,7 @@
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "content/child/child_process.h"
#include "content/renderer/media/media_stream_video_track.h"
#include "content/renderer/media/mock_media_stream_video_sink.h"
@@ -53,7 +53,7 @@ class MediaStreamRemoteVideoSourceTest
webkit_source_.initialize(base::UTF8ToUTF16("dummy_source_id"),
blink::WebMediaStreamSource::TypeVideo,
base::UTF8ToUTF16("dummy_source_name"),
- true /* remote */ , true /* readonly */);
+ true /* remote */);
webkit_source_.setExtraData(remote_source_);
}
@@ -123,7 +123,7 @@ TEST_F(MediaStreamRemoteVideoSourceTest, StartTrack) {
EXPECT_EQ(1, NumberOfSuccessConstraintsCallbacks());
MockMediaStreamVideoSink sink;
- track->AddSink(&sink, sink.GetDeliverFrameCB());
+ track->AddSink(&sink, sink.GetDeliverFrameCB(), false);
base::RunLoop run_loop;
base::Closure quit_closure = run_loop.QuitClosure();
EXPECT_CALL(sink, OnVideoFrame()).WillOnce(
@@ -141,7 +141,7 @@ TEST_F(MediaStreamRemoteVideoSourceTest, RemoteTrackStop) {
std::unique_ptr<MediaStreamVideoTrack> track(CreateTrack());
MockMediaStreamVideoSink sink;
- track->AddSink(&sink, sink.GetDeliverFrameCB());
+ track->AddSink(&sink, sink.GetDeliverFrameCB(), false);
EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive, sink.state());
EXPECT_EQ(blink::WebMediaStreamSource::ReadyStateLive,
webkit_source().getReadyState());
diff --git a/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc
index 21f2da67a52..d52860823e6 100644
--- a/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc
+++ b/chromium/content/renderer/media/webrtc/media_stream_track_metrics.cc
@@ -9,7 +9,7 @@
#include <string>
#include "base/md5.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "content/common/media/media_stream_track_metrics_host_messages.h"
#include "content/renderer/render_thread_impl.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
diff --git a/chromium/content/renderer/media/webrtc/media_stream_video_webrtc_sink.cc b/chromium/content/renderer/media/webrtc/media_stream_video_webrtc_sink.cc
index 2fc6e2aefab..b5ed1fdecda 100644
--- a/chromium/content/renderer/media/webrtc/media_stream_video_webrtc_sink.cc
+++ b/chromium/content/renderer/media/webrtc/media_stream_video_webrtc_sink.cc
@@ -9,7 +9,7 @@
#include "base/single_thread_task_runner.h"
#include "base/strings/utf_string_conversions.h"
#include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/timer/timer.h"
#include "content/common/media/media_stream_options.h"
#include "content/public/renderer/media_stream_utils.h"
@@ -246,7 +246,8 @@ MediaStreamVideoWebRtcSink::MediaStreamVideoWebRtcSink(
MediaStreamVideoSink::ConnectToTrack(
track,
- base::Bind(&WebRtcVideoSourceAdapter::OnVideoFrameOnIO, source_adapter_));
+ base::Bind(&WebRtcVideoSourceAdapter::OnVideoFrameOnIO, source_adapter_),
+ false);
DVLOG(3) << "MediaStreamVideoWebRtcSink ctor() : is_screencast "
<< is_screencast;
diff --git a/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc
index 982291bdbf8..354615d21cf 100644
--- a/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc
+++ b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.cc
@@ -9,11 +9,7 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "content/renderer/media/mock_peer_connection_impl.h"
-#include "content/renderer/media/webaudio_capturer_source.h"
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
-#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/webrtc/api/mediastreaminterface.h"
#include "third_party/webrtc/base/scoped_ref_ptr.h"
@@ -156,30 +152,6 @@ class MockRtcVideoCapturer : public WebRtcVideoCapturerAdapter {
int height_;
};
-MockAudioSource::MockAudioSource(const cricket::AudioOptions& options,
- bool remote)
- : remote_(remote), state_(MediaSourceInterface::kLive) {}
-
-MockAudioSource::~MockAudioSource() {}
-
-void MockAudioSource::RegisterObserver(webrtc::ObserverInterface* observer) {
- DCHECK(observers_.find(observer) == observers_.end());
- observers_.insert(observer);
-}
-
-void MockAudioSource::UnregisterObserver(webrtc::ObserverInterface* observer) {
- DCHECK(observers_.find(observer) != observers_.end());
- observers_.erase(observer);
-}
-
-webrtc::MediaSourceInterface::SourceState MockAudioSource::state() const {
- return state_;
-}
-
-bool MockAudioSource::remote() const {
- return remote_;
-}
-
scoped_refptr<MockWebRtcAudioTrack> MockWebRtcAudioTrack::Create(
const std::string& id) {
return new rtc::RefCountedObject<MockWebRtcAudioTrack>(id);
@@ -380,7 +352,9 @@ class MockIceCandidate : public IceCandidateInterface {
MockPeerConnectionDependencyFactory::MockPeerConnectionDependencyFactory()
: PeerConnectionDependencyFactory(NULL),
- fail_to_create_next_audio_capturer_(false) {
+ signaling_thread_("MockPCFactory WebRtc Signaling Thread") {
+ EnsureWebRtcAudioDeviceImpl();
+ CHECK(signaling_thread_.Start());
}
MockPeerConnectionDependencyFactory::~MockPeerConnectionDependencyFactory() {}
@@ -393,14 +367,6 @@ MockPeerConnectionDependencyFactory::CreatePeerConnection(
return new rtc::RefCountedObject<MockPeerConnectionImpl>(this, observer);
}
-scoped_refptr<webrtc::AudioSourceInterface>
-MockPeerConnectionDependencyFactory::CreateLocalAudioSource(
- const cricket::AudioOptions& options) {
- last_audio_source_ =
- new rtc::RefCountedObject<MockAudioSource>(options, false);
- return last_audio_source_;
-}
-
WebRtcVideoCapturerAdapter*
MockPeerConnectionDependencyFactory::CreateVideoCapturer(
bool is_screen_capture) {
@@ -416,9 +382,6 @@ MockPeerConnectionDependencyFactory::CreateVideoSource(
return nullptr;
}
-void MockPeerConnectionDependencyFactory::CreateWebAudioSource(
- blink::WebMediaStreamSource* source) {}
-
scoped_refptr<webrtc::MediaStreamInterface>
MockPeerConnectionDependencyFactory::CreateLocalMediaStream(
const std::string& label) {
@@ -458,19 +421,9 @@ MockPeerConnectionDependencyFactory::CreateIceCandidate(
return new MockIceCandidate(sdp_mid, sdp_mline_index, sdp);
}
-std::unique_ptr<WebRtcAudioCapturer>
-MockPeerConnectionDependencyFactory::CreateAudioCapturer(
- int render_frame_id,
- const StreamDeviceInfo& device_info,
- const blink::WebMediaConstraints& constraints,
- MediaStreamAudioSource* audio_source) {
- if (fail_to_create_next_audio_capturer_) {
- fail_to_create_next_audio_capturer_ = false;
- return NULL;
- }
- DCHECK(audio_source);
- return WebRtcAudioCapturer::CreateCapturer(-1, device_info, constraints, NULL,
- audio_source);
+scoped_refptr<base::SingleThreadTaskRunner>
+MockPeerConnectionDependencyFactory::GetWebRtcSignalingThread() const {
+ return signaling_thread_.task_runner();
}
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h
index 39881c633d6..49a03f5eb2d 100644
--- a/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h
+++ b/chromium/content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h
@@ -19,24 +19,6 @@ namespace content {
typedef std::set<webrtc::ObserverInterface*> ObserverSet;
-class MockAudioSource : public webrtc::AudioSourceInterface {
- public:
- explicit MockAudioSource(const cricket::AudioOptions& options, bool remote);
-
- void RegisterObserver(webrtc::ObserverInterface* observer) override;
- void UnregisterObserver(webrtc::ObserverInterface* observer) override;
- MediaSourceInterface::SourceState state() const override;
- bool remote() const override;
-
- protected:
- ~MockAudioSource() override;
-
- private:
- bool remote_;
- ObserverSet observers_;
- MediaSourceInterface::SourceState state_;
-};
-
class MockWebRtcAudioTrack : public webrtc::AudioTrackInterface {
public:
static scoped_refptr<MockWebRtcAudioTrack> Create(const std::string& id);
@@ -144,13 +126,10 @@ class MockPeerConnectionDependencyFactory
const webrtc::PeerConnectionInterface::RTCConfiguration& config,
blink::WebFrame* frame,
webrtc::PeerConnectionObserver* observer) override;
- scoped_refptr<webrtc::AudioSourceInterface> CreateLocalAudioSource(
- const cricket::AudioOptions& options) override;
WebRtcVideoCapturerAdapter* CreateVideoCapturer(
bool is_screen_capture) override;
scoped_refptr<webrtc::VideoTrackSourceInterface> CreateVideoSource(
cricket::VideoCapturer* capturer) override;
- void CreateWebAudioSource(blink::WebMediaStreamSource* source) override;
scoped_refptr<webrtc::MediaStreamInterface> CreateLocalMediaStream(
const std::string& label) override;
scoped_refptr<webrtc::VideoTrackInterface> CreateLocalVideoTrack(
@@ -168,20 +147,11 @@ class MockPeerConnectionDependencyFactory
int sdp_mline_index,
const std::string& sdp) override;
- std::unique_ptr<WebRtcAudioCapturer> CreateAudioCapturer(
- int render_frame_id,
- const StreamDeviceInfo& device_info,
- const blink::WebMediaConstraints& constraints,
- MediaStreamAudioSource* audio_source) override;
- void FailToCreateNextAudioCapturer() {
- fail_to_create_next_audio_capturer_ = true;
- }
-
- MockAudioSource* last_audio_source() { return last_audio_source_.get(); }
+ scoped_refptr<base::SingleThreadTaskRunner> GetWebRtcSignalingThread()
+ const override;
private:
- bool fail_to_create_next_audio_capturer_;
- scoped_refptr <MockAudioSource> last_audio_source_;
+ base::Thread signaling_thread_;
DISALLOW_COPY_AND_ASSIGN(MockPeerConnectionDependencyFactory);
};
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
index 0885aefb1e6..76888542d54 100644
--- a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
+++ b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.cc
@@ -30,23 +30,15 @@
#include "content/public/common/webrtc_ip_handling_policy.h"
#include "content/public/renderer/content_renderer_client.h"
#include "content/renderer/media/media_stream.h"
-#include "content/renderer/media/media_stream_audio_processor.h"
-#include "content/renderer/media/media_stream_audio_processor_options.h"
-#include "content/renderer/media/media_stream_audio_source.h"
-#include "content/renderer/media/media_stream_constraints_util.h"
#include "content/renderer/media/media_stream_video_source.h"
#include "content/renderer/media/media_stream_video_track.h"
#include "content/renderer/media/peer_connection_identity_store.h"
#include "content/renderer/media/rtc_peer_connection_handler.h"
#include "content/renderer/media/rtc_video_decoder_factory.h"
#include "content/renderer/media/rtc_video_encoder_factory.h"
-#include "content/renderer/media/webaudio_capturer_source.h"
-#include "content/renderer/media/webrtc/media_stream_remote_audio_track.h"
#include "content/renderer/media/webrtc/stun_field_trial.h"
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
#include "content/renderer/media/webrtc_logging.h"
#include "content/renderer/media/webrtc_uma_histograms.h"
#include "content/renderer/p2p/empty_network_manager.h"
@@ -72,7 +64,6 @@
#include "third_party/webrtc/api/dtlsidentitystore.h"
#include "third_party/webrtc/api/mediaconstraintsinterface.h"
#include "third_party/webrtc/base/ssladapter.h"
-#include "third_party/webrtc/media/base/mediachannel.h"
#include "third_party/webrtc/modules/video_coding/codecs/h264/include/h264.h"
#if defined(OS_ANDROID)
@@ -116,7 +107,7 @@ PeerConnectionDependencyFactory::PeerConnectionDependencyFactory(
PeerConnectionDependencyFactory::~PeerConnectionDependencyFactory() {
DVLOG(1) << "~PeerConnectionDependencyFactory()";
- DCHECK(pc_factory_ == NULL);
+ DCHECK(!pc_factory_);
}
blink::WebRTCPeerConnectionHandler*
@@ -130,91 +121,6 @@ PeerConnectionDependencyFactory::CreateRTCPeerConnectionHandler(
return new RTCPeerConnectionHandler(client, this);
}
-bool PeerConnectionDependencyFactory::InitializeMediaStreamAudioSource(
- int render_frame_id,
- const blink::WebMediaConstraints& audio_constraints,
- MediaStreamAudioSource* source_data) {
- DVLOG(1) << "InitializeMediaStreamAudioSources()";
-
- // Do additional source initialization if the audio source is a valid
- // microphone or tab audio.
-
- StreamDeviceInfo device_info = source_data->device_info();
-
- cricket::AudioOptions options;
- // Apply relevant constraints.
- options.echo_cancellation = ConstraintToOptional(
- audio_constraints, &blink::WebMediaTrackConstraintSet::echoCancellation);
- options.delay_agnostic_aec = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googDAEchoCancellation);
- options.auto_gain_control = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googAutoGainControl);
- options.experimental_agc = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googExperimentalAutoGainControl);
- options.noise_suppression = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googNoiseSuppression);
- options.experimental_ns = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googExperimentalNoiseSuppression);
- options.highpass_filter = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googHighpassFilter);
- options.typing_detection = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googTypingNoiseDetection);
- options.stereo_swapping = ConstraintToOptional(
- audio_constraints,
- &blink::WebMediaTrackConstraintSet::googAudioMirroring);
-
- MediaAudioConstraints::ApplyFixedAudioConstraints(&options);
-
- if (device_info.device.input.effects &
- media::AudioParameters::ECHO_CANCELLER) {
- // TODO(hta): Figure out if we should be looking at echoCancellation.
- // Previous code had googEchoCancellation only.
- const blink::BooleanConstraint& echoCancellation =
- audio_constraints.basic().googEchoCancellation;
- if (echoCancellation.hasExact() && !echoCancellation.exact()) {
- device_info.device.input.effects &=
- ~media::AudioParameters::ECHO_CANCELLER;
- }
- options.echo_cancellation = rtc::Optional<bool>(false);
- }
-
- std::unique_ptr<WebRtcAudioCapturer> capturer = CreateAudioCapturer(
- render_frame_id, device_info, audio_constraints, source_data);
- if (!capturer.get()) {
- const std::string log_string =
- "PCDF::InitializeMediaStreamAudioSource: fails to create capturer";
- WebRtcLogMessage(log_string);
- DVLOG(1) << log_string;
- // TODO(xians): Don't we need to check if source_observer is observing
- // something? If not, then it looks like we have a leak here.
- // OTOH, if it _is_ observing something, then the callback might
- // be called multiple times which is likely also a bug.
- return false;
- }
- source_data->SetAudioCapturer(std::move(capturer));
-
- // Creates a LocalAudioSource object which holds audio options.
- // TODO(xians): The option should apply to the track instead of the source.
- // TODO(perkj): Move audio constraints parsing to Chrome.
- // Currently there are a few constraints that are parsed by libjingle and
- // the state is set to ended if parsing fails.
- scoped_refptr<webrtc::AudioSourceInterface> rtc_source(
- CreateLocalAudioSource(options).get());
- if (rtc_source->state() != webrtc::MediaSourceInterface::kLive) {
- DLOG(WARNING) << "Failed to create rtc LocalAudioSource.";
- return false;
- }
- source_data->SetLocalAudioSource(rtc_source.get());
- return true;
-}
-
WebRtcVideoCapturerAdapter*
PeerConnectionDependencyFactory::CreateVideoCapturer(
bool is_screeencast) {
@@ -275,6 +181,8 @@ void PeerConnectionDependencyFactory::CreatePeerConnectionFactory() {
jingle_glue::JingleThreadWrapper::EnsureForCurrentMessageLoop();
jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
+ EnsureWebRtcAudioDeviceImpl();
+
CHECK(chrome_signaling_thread_.Start());
CHECK(chrome_worker_thread_.Start());
@@ -326,8 +234,6 @@ void PeerConnectionDependencyFactory::InitializeSignalingThread(
jingle_glue::JingleThreadWrapper::current()->set_send_allowed(true);
signaling_thread_ = jingle_glue::JingleThreadWrapper::current();
- EnsureWebRtcAudioDeviceImpl();
-
socket_factory_.reset(
new IpcPacketSocketFactory(p2p_socket_dispatcher_.get()));
@@ -384,7 +290,7 @@ PeerConnectionDependencyFactory::CreatePeerConnection(
if (!GetPcFactory().get())
return NULL;
- rtc::scoped_ptr<PeerConnectionIdentityStore> identity_store(
+ std::unique_ptr<PeerConnectionIdentityStore> identity_store(
new PeerConnectionIdentityStore(
base::ThreadTaskRunnerHandle::Get(), GetWebRtcSignalingThread(),
GURL(web_frame->document().url()),
@@ -493,7 +399,7 @@ PeerConnectionDependencyFactory::CreatePeerConnection(
} else {
network_manager.reset(new EmptyNetworkManager(network_manager_));
}
- rtc::scoped_ptr<P2PPortAllocator> port_allocator(new P2PPortAllocator(
+ std::unique_ptr<P2PPortAllocator> port_allocator(new P2PPortAllocator(
p2p_socket_dispatcher_, std::move(network_manager), socket_factory_.get(),
port_config, requesting_origin, chrome_worker_thread_.task_runner()));
@@ -504,21 +410,9 @@ PeerConnectionDependencyFactory::CreatePeerConnection(
}
// static
-void PeerConnectionDependencyFactory::SetDefaultCertificate(
- webrtc::PeerConnectionInterface::RTCConfiguration* config) {
- if (config->certificates.empty()) {
- rtc::scoped_ptr<rtc::SSLIdentity> identity(rtc::SSLIdentity::Generate(
- webrtc::kIdentityName, rtc::KeyParams::ECDSA(rtc::EC_NIST_P256)));
- rtc::scoped_refptr<rtc::RTCCertificate> certificate =
- rtc::RTCCertificate::Create(std::move(identity));
- config->certificates.push_back(certificate);
- }
-}
-
-// static
rtc::scoped_refptr<rtc::RTCCertificate>
PeerConnectionDependencyFactory::GenerateDefaultCertificate() {
- rtc::scoped_ptr<rtc::SSLIdentity> identity(rtc::SSLIdentity::Generate(
+ std::unique_ptr<rtc::SSLIdentity> identity(rtc::SSLIdentity::Generate(
webrtc::kIdentityName, rtc::KeyParams::ECDSA(rtc::EC_NIST_P256)));
return rtc::RTCCertificate::Create(std::move(identity));
}
@@ -529,92 +423,6 @@ PeerConnectionDependencyFactory::CreateLocalMediaStream(
return GetPcFactory()->CreateLocalMediaStream(label).get();
}
-scoped_refptr<webrtc::AudioSourceInterface>
-PeerConnectionDependencyFactory::CreateLocalAudioSource(
- const cricket::AudioOptions& options) {
- scoped_refptr<webrtc::AudioSourceInterface> source =
- GetPcFactory()->CreateAudioSource(options).get();
- return source;
-}
-
-void PeerConnectionDependencyFactory::CreateLocalAudioTrack(
- const blink::WebMediaStreamTrack& track) {
- blink::WebMediaStreamSource source = track.source();
- DCHECK_EQ(source.getType(), blink::WebMediaStreamSource::TypeAudio);
- MediaStreamAudioSource* source_data = MediaStreamAudioSource::From(source);
-
- if (!source_data) {
- if (source.requiresAudioConsumer()) {
- // We're adding a WebAudio MediaStream.
- // Create a specific capturer for each WebAudio consumer.
- CreateWebAudioSource(&source);
- source_data = MediaStreamAudioSource::From(source);
- DCHECK(source_data->webaudio_capturer());
- } else {
- NOTREACHED() << "Local track missing MediaStreamAudioSource instance.";
- return;
- }
- }
-
- // Creates an adapter to hold all the libjingle objects.
- scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
- WebRtcLocalAudioTrackAdapter::Create(track.id().utf8(),
- source_data->local_audio_source()));
- static_cast<webrtc::AudioTrackInterface*>(adapter.get())->set_enabled(
- track.isEnabled());
-
- // TODO(xians): Merge |source| to the capturer(). We can't do this today
- // because only one capturer() is supported while one |source| is created
- // for each audio track.
- std::unique_ptr<WebRtcLocalAudioTrack> audio_track(
- new WebRtcLocalAudioTrack(adapter.get()));
-
- // Start the source and connect the audio data flow to the track.
- //
- // TODO(miu): This logic will me moved to MediaStreamAudioSource (or a
- // subclass of it) in soon-upcoming changes.
- audio_track->Start(base::Bind(&MediaStreamAudioSource::StopAudioDeliveryTo,
- source_data->GetWeakPtr(),
- audio_track.get()));
- if (source_data->webaudio_capturer())
- source_data->webaudio_capturer()->Start(audio_track.get());
- else if (source_data->audio_capturer())
- source_data->audio_capturer()->AddTrack(audio_track.get());
- else
- NOTREACHED();
-
- // Pass the ownership of the native local audio track to the blink track.
- blink::WebMediaStreamTrack writable_track = track;
- writable_track.setExtraData(audio_track.release());
-}
-
-void PeerConnectionDependencyFactory::CreateRemoteAudioTrack(
- const blink::WebMediaStreamTrack& track) {
- blink::WebMediaStreamSource source = track.source();
- DCHECK_EQ(source.getType(), blink::WebMediaStreamSource::TypeAudio);
- DCHECK(source.remote());
- DCHECK(MediaStreamAudioSource::From(source));
-
- blink::WebMediaStreamTrack writable_track = track;
- writable_track.setExtraData(
- new MediaStreamRemoteAudioTrack(source, track.isEnabled()));
-}
-
-void PeerConnectionDependencyFactory::CreateWebAudioSource(
- blink::WebMediaStreamSource* source) {
- DVLOG(1) << "PeerConnectionDependencyFactory::CreateWebAudioSource()";
-
- MediaStreamAudioSource* source_data = new MediaStreamAudioSource();
- source_data->SetWebAudioCapturer(
- base::WrapUnique(new WebAudioCapturerSource(source)));
-
- // Create a LocalAudioSource object which holds audio options.
- // SetLocalAudioSource() affects core audio parts in third_party/Libjingle.
- cricket::AudioOptions options;
- source_data->SetLocalAudioSource(CreateLocalAudioSource(options).get());
- source->setExtraData(source_data);
-}
-
scoped_refptr<webrtc::VideoTrackInterface>
PeerConnectionDependencyFactory::CreateLocalVideoTrack(
const std::string& id,
@@ -665,6 +473,8 @@ void PeerConnectionDependencyFactory::StopRtcEventLog() {
WebRtcAudioDeviceImpl*
PeerConnectionDependencyFactory::GetWebRtcAudioDevice() {
+ DCHECK(CalledOnValidThread());
+ EnsureWebRtcAudioDeviceImpl();
return audio_device_.get();
}
@@ -753,23 +563,6 @@ void PeerConnectionDependencyFactory::CleanupPeerConnectionFactory() {
}
}
-std::unique_ptr<WebRtcAudioCapturer>
-PeerConnectionDependencyFactory::CreateAudioCapturer(
- int render_frame_id,
- const StreamDeviceInfo& device_info,
- const blink::WebMediaConstraints& constraints,
- MediaStreamAudioSource* audio_source) {
- // TODO(xians): Handle the cases when gUM is called without a proper render
- // view, for example, by an extension.
- DCHECK_GE(render_frame_id, 0);
-
- EnsureWebRtcAudioDeviceImpl();
- DCHECK(GetWebRtcAudioDevice());
- return WebRtcAudioCapturer::CreateCapturer(
- render_frame_id, device_info, constraints, GetWebRtcAudioDevice(),
- audio_source);
-}
-
void PeerConnectionDependencyFactory::EnsureInitialized() {
DCHECK(CalledOnValidThread());
GetPcFactory();
@@ -791,6 +584,7 @@ PeerConnectionDependencyFactory::GetWebRtcSignalingThread() const {
}
void PeerConnectionDependencyFactory::EnsureWebRtcAudioDeviceImpl() {
+ DCHECK(CalledOnValidThread());
if (audio_device_.get())
return;
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h
index e58174311a6..e5e017c5a7c 100644
--- a/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h
+++ b/chromium/content/renderer/media/webrtc/peer_connection_dependency_factory.h
@@ -9,10 +9,10 @@
#include "base/files/file.h"
#include "base/macros.h"
+#include "base/message_loop/message_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "content/common/content_export.h"
-#include "content/public/renderer/render_process_observer.h"
#include "content/renderer/media/webrtc/stun_field_trial.h"
#include "content/renderer/p2p/socket_dispatcher.h"
#include "ipc/ipc_platform_file.h"
@@ -47,11 +47,7 @@ namespace content {
class IpcNetworkManager;
class IpcPacketSocketFactory;
-class MediaStreamAudioSource;
-class WebAudioCapturerSource;
-class WebRtcAudioCapturer;
class WebRtcAudioDeviceImpl;
-class WebRtcLocalAudioTrack;
class WebRtcLoggingHandlerImpl;
class WebRtcLoggingMessageFilter;
class WebRtcVideoCapturerAdapter;
@@ -71,36 +67,18 @@ class CONTENT_EXPORT PeerConnectionDependencyFactory
blink::WebRTCPeerConnectionHandler* CreateRTCPeerConnectionHandler(
blink::WebRTCPeerConnectionHandlerClient* client);
- // Add an ECDSA certificate to |config| in case it contains no certificate.
- static void SetDefaultCertificate(
- webrtc::PeerConnectionInterface::RTCConfiguration* config);
-
+ // Generate an ECDSA certificate.
static rtc::scoped_refptr<rtc::RTCCertificate> GenerateDefaultCertificate();
// Asks the PeerConnection factory to create a Local MediaStream object.
virtual scoped_refptr<webrtc::MediaStreamInterface>
CreateLocalMediaStream(const std::string& label);
- // InitializeMediaStreamAudioSource initialize a MediaStream source object
- // for audio input.
- bool InitializeMediaStreamAudioSource(
- int render_frame_id,
- const blink::WebMediaConstraints& audio_constraints,
- MediaStreamAudioSource* source_data);
-
// Creates an implementation of a cricket::VideoCapturer object that can be
// used when creating a libjingle webrtc::VideoTrackSourceInterface object.
virtual WebRtcVideoCapturerAdapter* CreateVideoCapturer(
bool is_screen_capture);
- // Creates an instance of WebRtcLocalAudioTrack and stores it
- // in the extraData field of |track|.
- void CreateLocalAudioTrack(const blink::WebMediaStreamTrack& track);
-
- // Creates an instance of MediaStreamRemoteAudioTrack and associates with the
- // |track| object.
- void CreateRemoteAudioTrack(const blink::WebMediaStreamTrack& track);
-
// Asks the PeerConnection factory to create a Local VideoTrack object.
virtual scoped_refptr<webrtc::VideoTrackInterface> CreateLocalVideoTrack(
const std::string& id,
@@ -143,20 +121,10 @@ class CONTENT_EXPORT PeerConnectionDependencyFactory
void EnsureInitialized();
scoped_refptr<base::SingleThreadTaskRunner> GetWebRtcWorkerThread() const;
- scoped_refptr<base::SingleThreadTaskRunner> GetWebRtcSignalingThread() const;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWebRtcSignalingThread()
+ const;
protected:
- // Asks the PeerConnection factory to create a Local Audio Source.
- virtual scoped_refptr<webrtc::AudioSourceInterface> CreateLocalAudioSource(
- const cricket::AudioOptions& options);
-
- // Creates a media::AudioCapturerSource with an implementation that is
- // specific for a WebAudio source. The created WebAudioCapturerSource
- // instance will function as audio source instead of the default
- // WebRtcAudioCapturer. Ownership of the new WebAudioCapturerSource is
- // transferred to |source|.
- virtual void CreateWebAudioSource(blink::WebMediaStreamSource* source);
-
// Asks the PeerConnection factory to create a Local VideoTrack object with
// the video source using |capturer|.
virtual scoped_refptr<webrtc::VideoTrackInterface>
@@ -167,14 +135,8 @@ class CONTENT_EXPORT PeerConnectionDependencyFactory
GetPcFactory();
virtual bool PeerConnectionFactoryCreated();
- // Returns a new capturer or existing capturer based on the |render_frame_id|
- // and |device_info|; if both are valid, it reuses existing capture if any --
- // otherwise it creates a new capturer.
- virtual std::unique_ptr<WebRtcAudioCapturer> CreateAudioCapturer(
- int render_frame_id,
- const StreamDeviceInfo& device_info,
- const blink::WebMediaConstraints& constraints,
- MediaStreamAudioSource* audio_source);
+ // Helper method to create a WebRtcAudioDeviceImpl.
+ void EnsureWebRtcAudioDeviceImpl();
private:
// Implement base::MessageLoop::DestructionObserver.
@@ -202,9 +164,6 @@ class CONTENT_EXPORT PeerConnectionDependencyFactory
void DeleteIpcNetworkManager();
void CleanupPeerConnectionFactory();
- // Helper method to create a WebRtcAudioDeviceImpl.
- void EnsureWebRtcAudioDeviceImpl();
-
// We own network_manager_, must be deleted on the worker thread.
// The network manager uses |p2p_socket_dispatcher_|.
IpcNetworkManager* network_manager_;
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc b/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc
new file mode 100644
index 00000000000..46d7318c3b2
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.cc
@@ -0,0 +1,153 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/peer_connection_remote_audio_source.h"
+
+#include "base/logging.h"
+#include "base/time/time.h"
+#include "media/base/audio_bus.h"
+
+namespace content {
+
+namespace {
+// Used as an identifier for the down-casters.
+void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier);
+} // namespace
+
+PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack(
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface)
+ : MediaStreamAudioTrack(false /* is_local_track */),
+ track_interface_(std::move(track_interface)) {
+ DVLOG(1)
+ << "PeerConnectionRemoteAudioTrack::PeerConnectionRemoteAudioTrack()";
+}
+
+PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack() {
+ DVLOG(1)
+ << "PeerConnectionRemoteAudioTrack::~PeerConnectionRemoteAudioTrack()";
+ // Ensure the track is stopped.
+ MediaStreamAudioTrack::Stop();
+}
+
+// static
+PeerConnectionRemoteAudioTrack* PeerConnectionRemoteAudioTrack::From(
+ MediaStreamAudioTrack* track) {
+ if (track && track->GetClassIdentifier() == kClassIdentifier)
+ return static_cast<PeerConnectionRemoteAudioTrack*>(track);
+ return nullptr;
+}
+
+void PeerConnectionRemoteAudioTrack::SetEnabled(bool enabled) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // This affects the shared state of the source for whether or not it's a part
+ // of the mixed audio that's rendered for remote tracks from WebRTC.
+ // All tracks from the same source will share this state and thus can step
+ // on each other's toes.
+ // This is also why we can't check the enabled state for equality with
+ // |enabled| before setting the mixing enabled state. This track's enabled
+ // state and the shared state might not be the same.
+ track_interface_->set_enabled(enabled);
+
+ MediaStreamAudioTrack::SetEnabled(enabled);
+}
+
+void* PeerConnectionRemoteAudioTrack::GetClassIdentifier() const {
+ return kClassIdentifier;
+}
+
+PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource(
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface)
+ : MediaStreamAudioSource(false /* is_local_source */),
+ track_interface_(std::move(track_interface)),
+ is_sink_of_peer_connection_(false) {
+ DCHECK(track_interface_);
+ DVLOG(1)
+ << "PeerConnectionRemoteAudioSource::PeerConnectionRemoteAudioSource()";
+}
+
+PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource() {
+ DVLOG(1)
+ << "PeerConnectionRemoteAudioSource::~PeerConnectionRemoteAudioSource()";
+ EnsureSourceIsStopped();
+}
+
+std::unique_ptr<MediaStreamAudioTrack>
+PeerConnectionRemoteAudioSource::CreateMediaStreamAudioTrack(
+ const std::string& id) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return std::unique_ptr<MediaStreamAudioTrack>(
+ new PeerConnectionRemoteAudioTrack(track_interface_));
+}
+
+bool PeerConnectionRemoteAudioSource::EnsureSourceIsStarted() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (is_sink_of_peer_connection_)
+ return true;
+ VLOG(1) << "Starting PeerConnection remote audio source with id="
+ << track_interface_->id();
+ track_interface_->AddSink(this);
+ is_sink_of_peer_connection_ = true;
+ return true;
+}
+
+void PeerConnectionRemoteAudioSource::EnsureSourceIsStopped() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (is_sink_of_peer_connection_) {
+ track_interface_->RemoveSink(this);
+ is_sink_of_peer_connection_ = false;
+ VLOG(1) << "Stopped PeerConnection remote audio source with id="
+ << track_interface_->id();
+ }
+}
+
+void PeerConnectionRemoteAudioSource::OnData(const void* audio_data,
+ int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) {
+ // Debug builds: Note that this lock isn't meant to synchronize anything.
+ // Instead, it is being used as a run-time check to ensure there isn't already
+ // another thread executing this method. The reason we don't use
+ // base::ThreadChecker here is because we shouldn't be making assumptions
+ // about the private threading model of libjingle. For example, it would be
+ // legitimate for libjingle to use a different thread to invoke this method
+ // whenever the audio format changes.
+#ifndef NDEBUG
+ const bool is_only_thread_here = single_audio_thread_guard_.Try();
+ DCHECK(is_only_thread_here);
+#endif
+
+ // TODO(tommi): We should get the timestamp from WebRTC.
+ base::TimeTicks playout_time(base::TimeTicks::Now());
+
+ if (!audio_bus_ ||
+ static_cast<size_t>(audio_bus_->channels()) != number_of_channels ||
+ static_cast<size_t>(audio_bus_->frames()) != number_of_frames) {
+ audio_bus_ = media::AudioBus::Create(number_of_channels, number_of_frames);
+ }
+
+ audio_bus_->FromInterleaved(audio_data, number_of_frames,
+ bits_per_sample / 8);
+
+ media::AudioParameters params = MediaStreamAudioSource::GetAudioParameters();
+ if (!params.IsValid() ||
+ params.format() != media::AudioParameters::AUDIO_PCM_LOW_LATENCY ||
+ static_cast<size_t>(params.channels()) != number_of_channels ||
+ params.sample_rate() != sample_rate ||
+ static_cast<size_t>(params.frames_per_buffer()) != number_of_frames) {
+ MediaStreamAudioSource::SetFormat(
+ media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::GuessChannelLayout(number_of_channels),
+ sample_rate, bits_per_sample, number_of_frames));
+ }
+
+ MediaStreamAudioSource::DeliverDataToTracks(*audio_bus_, playout_time);
+
+#ifndef NDEBUG
+ single_audio_thread_guard_.Release();
+#endif
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.h b/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.h
new file mode 100644
index 00000000000..aa4f15d5b0e
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/peer_connection_remote_audio_source.h
@@ -0,0 +1,102 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_REMOTE_AUDIO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_REMOTE_AUDIO_SOURCE_H_
+
+#include <memory>
+
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/media_stream_audio_track.h"
+#include "third_party/webrtc/api/mediastreaminterface.h"
+
+namespace media {
+class AudioBus;
+}
+
+namespace content {
+
+// PeerConnectionRemoteAudioTrack is a WebRTC specific implementation of an
+// audio track whose data is sourced from a PeerConnection.
+class PeerConnectionRemoteAudioTrack final
+ : NON_EXPORTED_BASE(public MediaStreamAudioTrack) {
+ public:
+ explicit PeerConnectionRemoteAudioTrack(
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface);
+ ~PeerConnectionRemoteAudioTrack() final;
+
+ // If |track| is an instance of PeerConnectionRemoteAudioTrack, return a
+ // type-casted pointer to it. Otherwise, return null.
+ static PeerConnectionRemoteAudioTrack* From(MediaStreamAudioTrack* track);
+
+ webrtc::AudioTrackInterface* track_interface() const {
+ return track_interface_.get();
+ }
+
+ // MediaStreamAudioTrack override.
+ void SetEnabled(bool enabled) override;
+
+ private:
+ // MediaStreamAudioTrack overrides.
+ void* GetClassIdentifier() const final;
+
+ const scoped_refptr<webrtc::AudioTrackInterface> track_interface_;
+
+ // In debug builds, check that all methods that could cause object graph
+ // or data flow changes are being called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeerConnectionRemoteAudioTrack);
+};
+
+// Represents the audio provided by the receiving end of a PeerConnection.
+class PeerConnectionRemoteAudioSource final
+ : NON_EXPORTED_BASE(public MediaStreamAudioSource),
+ NON_EXPORTED_BASE(protected webrtc::AudioTrackSinkInterface) {
+ public:
+ explicit PeerConnectionRemoteAudioSource(
+ scoped_refptr<webrtc::AudioTrackInterface> track_interface);
+ ~PeerConnectionRemoteAudioSource() final;
+
+ protected:
+ // MediaStreamAudioSource implementation.
+ std::unique_ptr<MediaStreamAudioTrack> CreateMediaStreamAudioTrack(
+ const std::string& id) final;
+ bool EnsureSourceIsStarted() final;
+ void EnsureSourceIsStopped() final;
+
+ // webrtc::AudioTrackSinkInterface implementation.
+ void OnData(const void* audio_data, int bits_per_sample, int sample_rate,
+ size_t number_of_channels, size_t number_of_frames) final;
+
+ private:
+ // Interface to the implementation that calls OnData().
+ const scoped_refptr<webrtc::AudioTrackInterface> track_interface_;
+
+ // In debug builds, check that all methods that could cause object graph
+ // or data flow changes are being called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ // True if |this| is receiving an audio flow as a sink of the remote
+ // PeerConnection via |track_interface_|.
+ bool is_sink_of_peer_connection_;
+
+ // Buffer for converting from interleaved signed-integer PCM samples to the
+ // planar float format. Only used on the thread that calls OnData().
+ std::unique_ptr<media::AudioBus> audio_bus_;
+
+ // In debug builds, use a "try lock" to sanity-check that there are no
+ // concurrent calls to OnData(). See notes in OnData() implementation.
+#ifndef NDEBUG
+ base::Lock single_audio_thread_guard_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(PeerConnectionRemoteAudioSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_PEER_CONNECTION_REMOTE_AUDIO_SOURCE_H_
diff --git a/chromium/content/renderer/media/webrtc/processed_local_audio_source.cc b/chromium/content/renderer/media/webrtc/processed_local_audio_source.cc
new file mode 100644
index 00000000000..b25469372a3
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/processed_local_audio_source.cc
@@ -0,0 +1,374 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/processed_local_audio_source.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/stringprintf.h"
+#include "content/renderer/media/audio_device_factory.h"
+#include "content/renderer/media/media_stream_audio_processor_options.h"
+#include "content/renderer/media/media_stream_constraints_util.h"
+#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/renderer/media/webrtc_logging.h"
+#include "content/renderer/render_frame_impl.h"
+#include "media/audio/sample_rates.h"
+#include "media/base/channel_layout.h"
+#include "third_party/webrtc/api/mediaconstraintsinterface.h"
+#include "third_party/webrtc/media/base/mediachannel.h"
+
+namespace content {
+
+namespace {
+// Used as an identifier for ProcessedLocalAudioSource::From().
+void* const kClassIdentifier = const_cast<void**>(&kClassIdentifier);
+} // namespace
+
+ProcessedLocalAudioSource::ProcessedLocalAudioSource(
+ int consumer_render_frame_id,
+ const StreamDeviceInfo& device_info,
+ PeerConnectionDependencyFactory* factory)
+ : MediaStreamAudioSource(true /* is_local_source */),
+ consumer_render_frame_id_(consumer_render_frame_id),
+ pc_factory_(factory),
+ volume_(0),
+ allow_invalid_render_frame_id_for_testing_(false) {
+ DCHECK(pc_factory_);
+ DVLOG(1) << "ProcessedLocalAudioSource::ProcessedLocalAudioSource()";
+ MediaStreamSource::SetDeviceInfo(device_info);
+}
+
+ProcessedLocalAudioSource::~ProcessedLocalAudioSource() {
+ DVLOG(1) << "ProcessedLocalAudioSource::~ProcessedLocalAudioSource()";
+ EnsureSourceIsStopped();
+}
+
+// static
+ProcessedLocalAudioSource* ProcessedLocalAudioSource::From(
+ MediaStreamAudioSource* source) {
+ if (source && source->GetClassIdentifier() == kClassIdentifier)
+ return static_cast<ProcessedLocalAudioSource*>(source);
+ return nullptr;
+}
+
+void ProcessedLocalAudioSource::SetSourceConstraints(
+ const blink::WebMediaConstraints& constraints) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!constraints.isNull());
+ DCHECK(!source_);
+ constraints_ = constraints;
+}
+
+void* ProcessedLocalAudioSource::GetClassIdentifier() const {
+ return kClassIdentifier;
+}
+
+bool ProcessedLocalAudioSource::EnsureSourceIsStarted() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ {
+ base::AutoLock auto_lock(source_lock_);
+ if (source_)
+ return true;
+ }
+
+ // Sanity-check that the consuming RenderFrame still exists. This is required
+ // to initialize the audio source.
+ if (!allow_invalid_render_frame_id_for_testing_ &&
+ !RenderFrameImpl::FromRoutingID(consumer_render_frame_id_)) {
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
+ " because the render frame does not exist.");
+ return false;
+ }
+
+ WebRtcLogMessage(base::StringPrintf(
+ "ProcessedLocalAudioSource::EnsureSourceIsStarted. render_frame_id=%d"
+ ", channel_layout=%d, sample_rate=%d, buffer_size=%d"
+ ", session_id=%d, paired_output_sample_rate=%d"
+ ", paired_output_frames_per_buffer=%d, effects=%d. ",
+ consumer_render_frame_id_, device_info().device.input.channel_layout,
+ device_info().device.input.sample_rate,
+ device_info().device.input.frames_per_buffer, device_info().session_id,
+ device_info().device.matched_output.sample_rate,
+ device_info().device.matched_output.frames_per_buffer,
+ device_info().device.input.effects));
+
+ // Sanity-check that the constraints, plus the additional input effects are
+ // valid when combined.
+ const MediaAudioConstraints audio_constraints(
+ constraints_, device_info().device.input.effects);
+ if (!audio_constraints.IsValid()) {
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
+ " because MediaAudioConstraints are not valid.");
+ return false;
+ }
+
+ if (device_info().device.input.effects &
+ media::AudioParameters::ECHO_CANCELLER) {
+ // TODO(hta): Figure out if we should be looking at echoCancellation.
+ // Previous code had googEchoCancellation only.
+ const blink::BooleanConstraint& echoCancellation =
+ constraints_.basic().googEchoCancellation;
+ if (echoCancellation.hasExact() && !echoCancellation.exact()) {
+ StreamDeviceInfo modified_device_info(device_info());
+ modified_device_info.device.input.effects &=
+ ~media::AudioParameters::ECHO_CANCELLER;
+ SetDeviceInfo(modified_device_info);
+ }
+ }
+
+ // Create the MediaStreamAudioProcessor, bound to the WebRTC audio device
+ // module.
+ WebRtcAudioDeviceImpl* const rtc_audio_device =
+ pc_factory_->GetWebRtcAudioDevice();
+ if (!rtc_audio_device) {
+ WebRtcLogMessage("ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
+ " because there is no WebRtcAudioDeviceImpl instance.");
+ return false;
+ }
+ audio_processor_ = new rtc::RefCountedObject<MediaStreamAudioProcessor>(
+ constraints_, device_info().device.input, rtc_audio_device);
+
+ // If KEYBOARD_MIC effect is set, change the layout to the corresponding
+ // layout that includes the keyboard mic.
+ media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
+ device_info().device.input.channel_layout);
+ if ((device_info().device.input.effects &
+ media::AudioParameters::KEYBOARD_MIC) &&
+ audio_constraints.GetGoogExperimentalNoiseSuppression()) {
+ if (channel_layout == media::CHANNEL_LAYOUT_STEREO) {
+ channel_layout = media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC;
+ DVLOG(1) << "Changed stereo layout to stereo + keyboard mic layout due "
+ << "to KEYBOARD_MIC effect.";
+ } else {
+ DVLOG(1) << "KEYBOARD_MIC effect ignored, not compatible with layout "
+ << channel_layout;
+ }
+ }
+
+ DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
+ UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
+ channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
+
+ // Verify that the reported input channel configuration is supported.
+ if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
+ channel_layout != media::CHANNEL_LAYOUT_STEREO &&
+ channel_layout != media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC) {
+ WebRtcLogMessage(base::StringPrintf(
+ "ProcessedLocalAudioSource::EnsureSourceIsStarted() fails "
+ " because the input channel layout (%d) is not supported.",
+ static_cast<int>(channel_layout)));
+ return false;
+ }
+
+ DVLOG(1) << "Audio input hardware sample rate: "
+ << device_info().device.input.sample_rate;
+ media::AudioSampleRate asr;
+ if (media::ToAudioSampleRate(device_info().device.input.sample_rate, &asr)) {
+ UMA_HISTOGRAM_ENUMERATION(
+ "WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
+ } else {
+ UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
+ device_info().device.input.sample_rate);
+ }
+
+ // Determine the audio format required of the AudioCapturerSource. Then, pass
+ // that to the |audio_processor_| and set the output format of this
+ // ProcessedLocalAudioSource to the processor's output format.
+ media::AudioParameters params(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
+ device_info().device.input.sample_rate, 16,
+ GetBufferSize(device_info().device.input.sample_rate));
+ params.set_effects(device_info().device.input.effects);
+ DCHECK(params.IsValid());
+ audio_processor_->OnCaptureFormatChanged(params);
+ MediaStreamAudioSource::SetFormat(audio_processor_->OutputFormat());
+
+ // Start the source.
+ VLOG(1) << "Starting WebRTC audio source for consumption by render frame "
+ << consumer_render_frame_id_ << " with input parameters={"
+ << params.AsHumanReadableString() << "} and output parameters={"
+ << GetAudioParameters().AsHumanReadableString() << '}';
+ scoped_refptr<media::AudioCapturerSource> new_source =
+ AudioDeviceFactory::NewAudioCapturerSource(consumer_render_frame_id_);
+ new_source->Initialize(params, this, device_info().session_id);
+ // We need to set the AGC control before starting the stream.
+ new_source->SetAutomaticGainControl(true);
+ {
+ base::AutoLock auto_lock(source_lock_);
+ source_ = std::move(new_source);
+ }
+ source_->Start();
+
+ // Register this source with the WebRtcAudioDeviceImpl.
+ rtc_audio_device->AddAudioCapturer(this);
+
+ return true;
+}
+
+void ProcessedLocalAudioSource::EnsureSourceIsStopped() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ scoped_refptr<media::AudioCapturerSource> source_to_stop;
+ {
+ base::AutoLock auto_lock(source_lock_);
+ if (!source_)
+ return;
+ source_to_stop = std::move(source_);
+ }
+
+ if (WebRtcAudioDeviceImpl* rtc_audio_device =
+ pc_factory_->GetWebRtcAudioDevice()) {
+ rtc_audio_device->RemoveAudioCapturer(this);
+ }
+
+ source_to_stop->Stop();
+
+ // Stop the audio processor to avoid feeding render data into the processor.
+ audio_processor_->Stop();
+
+ VLOG(1) << "Stopped WebRTC audio pipeline for consumption by render frame "
+ << consumer_render_frame_id_ << '.';
+}
+
+void ProcessedLocalAudioSource::SetVolume(int volume) {
+ DVLOG(1) << "ProcessedLocalAudioSource::SetVolume()";
+ DCHECK_LE(volume, MaxVolume());
+
+ const double normalized_volume = static_cast<double>(volume) / MaxVolume();
+
+ // Hold a strong reference to |source_| while its SetVolume() method is
+ // called. This will prevent the object from being destroyed on another thread
+ // in the meantime. It's possible the |source_| will be stopped on another
+ // thread while calling SetVolume() here; but this is safe: The operation will
+ // simply be ignored.
+ scoped_refptr<media::AudioCapturerSource> maybe_source;
+ {
+ base::AutoLock auto_lock(source_lock_);
+ maybe_source = source_;
+ }
+ if (maybe_source)
+ maybe_source->SetVolume(normalized_volume);
+}
+
+int ProcessedLocalAudioSource::Volume() const {
+ // Note: Using NoBarrier_Load() because the timing of visibility of the
+ // updated volume information on other threads can be relaxed.
+ return base::subtle::NoBarrier_Load(&volume_);
+}
+
+int ProcessedLocalAudioSource::MaxVolume() const {
+ return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
+}
+
+void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus,
+ int audio_delay_milliseconds,
+ double volume,
+ bool key_pressed) {
+#if defined(OS_WIN) || defined(OS_MACOSX)
+ DCHECK_LE(volume, 1.0);
+#elif (defined(OS_LINUX) && !defined(OS_CHROMEOS)) || defined(OS_OPENBSD)
+ // We have a special situation on Linux where the microphone volume can be
+ // "higher than maximum". The input volume slider in the sound preference
+ // allows the user to set a scaling that is higher than 100%. It means that
+ // even if the reported maximum levels is N, the actual microphone level can
+ // go up to 1.5x*N and that corresponds to a normalized |volume| of 1.5x.
+ DCHECK_LE(volume, 1.6);
+#endif
+
+ // TODO(miu): Plumbing is needed to determine the actual capture timestamp
+ // of the audio, instead of just snapshotting TimeTicks::Now(), for proper
+ // audio/video sync. http://crbug.com/335335
+ const base::TimeTicks reference_clock_snapshot = base::TimeTicks::Now();
+
+ // Map internal volume range of [0.0, 1.0] into [0, 255] used by AGC.
+ // The volume can be higher than 255 on Linux, and it will be cropped to
+ // 255 since AGC does not allow values out of range.
+ int current_volume = static_cast<int>((volume * MaxVolume()) + 0.5);
+ // Note: Using NoBarrier_Store() because the timing of visibility of the
+ // updated volume information on other threads can be relaxed.
+ base::subtle::NoBarrier_Store(&volume_, current_volume);
+ current_volume = std::min(current_volume, MaxVolume());
+
+ // Sanity-check the input audio format in debug builds. Then, notify the
+ // tracks if the format has changed.
+ //
+ // Locking is not needed here to read the audio input/output parameters
+ // because the audio processor format changes only occur while audio capture
+ // is stopped.
+ DCHECK(audio_processor_->InputFormat().IsValid());
+ DCHECK_EQ(audio_bus->channels(), audio_processor_->InputFormat().channels());
+ DCHECK_EQ(audio_bus->frames(),
+ audio_processor_->InputFormat().frames_per_buffer());
+
+ // Figure out if the pre-processed data has any energy or not. This
+ // information will be passed to the level calculator to force it to report
+ // energy in case the post-processed data is zeroed by the audio processing.
+ const bool force_report_nonzero_energy = !audio_bus->AreFramesZero();
+
+ // Push the data to the processor for processing.
+ audio_processor_->PushCaptureData(
+ *audio_bus,
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds));
+
+ // Process and consume the data in the processor until there is not enough
+ // data in the processor.
+ media::AudioBus* processed_data = nullptr;
+ base::TimeDelta processed_data_audio_delay;
+ int new_volume = 0;
+ while (audio_processor_->ProcessAndConsumeData(
+ current_volume, key_pressed,
+ &processed_data, &processed_data_audio_delay, &new_volume)) {
+ DCHECK(processed_data);
+
+ level_calculator_.Calculate(*processed_data, force_report_nonzero_energy);
+
+ MediaStreamAudioSource::DeliverDataToTracks(
+ *processed_data, reference_clock_snapshot - processed_data_audio_delay);
+
+ if (new_volume) {
+ SetVolume(new_volume);
+
+ // Update the |current_volume| to avoid passing the old volume to AGC.
+ current_volume = new_volume;
+ }
+ }
+}
+
+void ProcessedLocalAudioSource::OnCaptureError(const std::string& message) {
+ WebRtcLogMessage("ProcessedLocalAudioSource::OnCaptureError: " + message);
+}
+
+media::AudioParameters ProcessedLocalAudioSource::GetInputFormat() const {
+ return audio_processor_ ? audio_processor_->InputFormat()
+ : media::AudioParameters();
+}
+
+int ProcessedLocalAudioSource::GetBufferSize(int sample_rate) const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_ANDROID)
+ // TODO(henrika): Re-evaluate whether to use same logic as other platforms.
+ return (2 * sample_rate / 100);
+#endif
+
+ // If audio processing is turned on, require 10ms buffers.
+ if (audio_processor_->has_audio_processing())
+ return (sample_rate / 100);
+
+ // If audio processing is off and the native hardware buffer size was
+ // provided, use it. It can be harmful, in terms of CPU/power consumption, to
+ // use smaller buffer sizes than the native size (http://crbug.com/362261).
+ if (int hardware_buffer_size = device_info().device.input.frames_per_buffer)
+ return hardware_buffer_size;
+
+ // If the buffer size is missing from the StreamDeviceInfo, provide 10ms as a
+ // fall-back.
+ //
+ // TODO(miu): Identify where/why the buffer size might be missing, fix the
+ // code, and then require it here.
+ return (sample_rate / 100);
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/processed_local_audio_source.h b/chromium/content/renderer/media/webrtc/processed_local_audio_source.h
new file mode 100644
index 00000000000..5d2e38862bd
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/processed_local_audio_source.h
@@ -0,0 +1,143 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "content/common/media/media_stream_options.h"
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "media/base/audio_capturer_source.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+
+namespace media {
+class AudioBus;
+}
+
+namespace webrtc {
+class AudioSourceInterface;
+}
+
+namespace content {
+
+class PeerConnectionDependencyFactory;
+
+// Represents a local source of audio data that is routed through the WebRTC
+// audio pipeline for post-processing (e.g., for echo cancellation during a
+// video conferencing call). Owns a media::AudioCapturerSource and the
+// MediaStreamProcessor that modifies its audio. Modified audio is delivered to
+// one or more MediaStreamAudioTracks.
+class CONTENT_EXPORT ProcessedLocalAudioSource final
+ : NON_EXPORTED_BASE(public MediaStreamAudioSource),
+ NON_EXPORTED_BASE(public media::AudioCapturerSource::CaptureCallback) {
+ public:
+ // |consumer_render_frame_id| references the RenderFrame that will consume the
+ // audio data. Audio parameters and (optionally) a pre-existing audio session
+ // ID are derived from |device_info|. |factory| must outlive this instance.
+ ProcessedLocalAudioSource(int consumer_render_frame_id,
+ const StreamDeviceInfo& device_info,
+ PeerConnectionDependencyFactory* factory);
+
+ ~ProcessedLocalAudioSource() final;
+
+ // If |source| is an instance of ProcessedLocalAudioSource, return a
+ // type-casted pointer to it. Otherwise, return null.
+ static ProcessedLocalAudioSource* From(MediaStreamAudioSource* source);
+
+ // Non-browser unit tests cannot provide RenderFrame implementations at
+ // run-time. This is used to skip the otherwise mandatory check for a valid
+ // render frame ID when the source is started.
+ void SetAllowInvalidRenderFrameIdForTesting(bool allowed) {
+ allow_invalid_render_frame_id_for_testing_ = allowed;
+ }
+
+ // Gets/Sets source constraints. Using this is optional, but must be done
+ // before the first call to ConnectToTrack().
+ const blink::WebMediaConstraints& source_constraints() const {
+ return constraints_;
+ }
+ void SetSourceConstraints(const blink::WebMediaConstraints& constraints);
+
+ // The following accessors are not valid until after the source is started
+ // (when the first track is connected).
+ const scoped_refptr<MediaStreamAudioProcessor>& audio_processor() const {
+ return audio_processor_;
+ }
+ const scoped_refptr<MediaStreamAudioLevelCalculator::Level>& audio_level()
+ const {
+ return level_calculator_.level();
+ }
+
+ // Thread-safe volume accessors used by WebRtcAudioDeviceImpl.
+ void SetVolume(int volume);
+ int Volume() const;
+ int MaxVolume() const;
+
+ // Audio parameters utilized by the source of the audio capturer.
+ // TODO(phoglund): Think over the implications of this accessor and if we can
+ // remove it.
+ media::AudioParameters GetInputFormat() const;
+
+ protected:
+ // MediaStreamAudioSource implementation.
+ void* GetClassIdentifier() const final;
+ bool EnsureSourceIsStarted() final;
+ void EnsureSourceIsStopped() final;
+
+ // AudioCapturerSource::CaptureCallback implementation.
+ // Called on the AudioCapturerSource audio thread.
+ void Capture(const media::AudioBus* audio_source,
+ int audio_delay_milliseconds,
+ double volume,
+ bool key_pressed) override;
+ void OnCaptureError(const std::string& message) override;
+
+ private:
+ // Helper function to get the source buffer size based on whether audio
+ // processing will take place.
+ int GetBufferSize(int sample_rate) const;
+
+ // The RenderFrame that will consume the audio data. Used when creating
+ // AudioCapturerSources.
+ const int consumer_render_frame_id_;
+
+ PeerConnectionDependencyFactory* const pc_factory_;
+
+ // In debug builds, check that all methods that could cause object graph
+ // or data flow changes are being called on the main thread.
+ base::ThreadChecker thread_checker_;
+
+ // Cached audio constraints for the capturer.
+ blink::WebMediaConstraints constraints_;
+
+ // Audio processor doing processing like FIFO, AGC, AEC and NS. Its output
+ // data is in a unit of 10 ms data chunk.
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
+
+ // The device created by the AudioDeviceFactory in EnsureSourceIsStarted().
+ scoped_refptr<media::AudioCapturerSource> source_;
+
+ // Lock used to ensure thread-safe access to |source_| by SetVolume().
+ mutable base::Lock source_lock_;
+
+ // Stores latest microphone volume received in a CaptureData() callback.
+ // Range is [0, 255].
+ base::subtle::Atomic32 volume_;
+
+ // Used to calculate the signal level that shows in the UI.
+ MediaStreamAudioLevelCalculator level_calculator_;
+
+ bool allow_invalid_render_frame_id_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProcessedLocalAudioSource);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_PROCESSED_LOCAL_AUDIO_SOURCE_H_
diff --git a/chromium/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc b/chromium/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc
new file mode 100644
index 00000000000..0abec8ea56d
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc
@@ -0,0 +1,227 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_track.h"
+#include "content/renderer/media/mock_audio_device_factory.h"
+#include "content/renderer/media/mock_constraint_factory.h"
+#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/processed_local_audio_source.h"
+#include "media/base/audio_bus.h"
+#include "media/base/audio_parameters.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/platform/WebMediaConstraints.h"
+#include "third_party/WebKit/public/web/WebHeap.h"
+
+using ::testing::_;
+using ::testing::AtLeast;
+using ::testing::Invoke;
+using ::testing::WithArg;
+
+namespace content {
+
+namespace {
+
+// Audio parameters for the VerifyAudioFlowWithoutAudioProcessing test.
+constexpr int kSampleRate = 48000;
+constexpr media::ChannelLayout kChannelLayout = media::CHANNEL_LAYOUT_STEREO;
+constexpr int kRequestedBufferSize = 512;
+
+// On Android, ProcessedLocalAudioSource forces a 20ms buffer size from the
+// input device.
+#if defined(OS_ANDROID)
+constexpr int kExpectedSourceBufferSize = kSampleRate / 50;
+#else
+constexpr int kExpectedSourceBufferSize = kRequestedBufferSize;
+#endif
+
+// On both platforms, even though audio processing is turned off, the
+// MediaStreamAudioProcessor will force the use of 10ms buffer sizes on the
+// output end of its FIFO.
+constexpr int kExpectedOutputBufferSize = kSampleRate / 100;
+
+class MockMediaStreamAudioSink : public MediaStreamAudioSink {
+ public:
+ MockMediaStreamAudioSink() {}
+ ~MockMediaStreamAudioSink() override {}
+
+ void OnData(const media::AudioBus& audio_bus,
+ base::TimeTicks estimated_capture_time) override {
+ EXPECT_EQ(audio_bus.channels(), params_.channels());
+ EXPECT_EQ(audio_bus.frames(), params_.frames_per_buffer());
+ EXPECT_FALSE(estimated_capture_time.is_null());
+ OnDataCallback();
+ }
+ MOCK_METHOD0(OnDataCallback, void());
+
+ void OnSetFormat(const media::AudioParameters& params) override {
+ params_ = params;
+ FormatIsSet(params_);
+ }
+ MOCK_METHOD1(FormatIsSet, void(const media::AudioParameters& params));
+
+ private:
+ media::AudioParameters params_;
+};
+
+} // namespace
+
+class ProcessedLocalAudioSourceTest : public testing::Test {
+ protected:
+ ProcessedLocalAudioSourceTest() {}
+
+ ~ProcessedLocalAudioSourceTest() override {}
+
+ void SetUp() override {
+ blink_audio_source_.initialize(blink::WebString::fromUTF8("audio_label"),
+ blink::WebMediaStreamSource::TypeAudio,
+ blink::WebString::fromUTF8("audio_track"),
+ false /* remote */);
+ blink_audio_track_.initialize(blink_audio_source_.id(),
+ blink_audio_source_);
+ }
+
+ void TearDown() override {
+ blink_audio_track_.reset();
+ blink_audio_source_.reset();
+ blink::WebHeap::collectAllGarbageForTesting();
+ }
+
+ void CreateProcessedLocalAudioSource(
+ const blink::WebMediaConstraints& constraints) {
+ ProcessedLocalAudioSource* const source =
+ new ProcessedLocalAudioSource(
+ -1 /* consumer_render_frame_id is N/A for non-browser tests */,
+ StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE, "Mock audio device",
+ "mock_audio_device_id", kSampleRate,
+ kChannelLayout, kRequestedBufferSize),
+ &mock_dependency_factory_);
+ source->SetAllowInvalidRenderFrameIdForTesting(true);
+ source->SetSourceConstraints(constraints);
+ blink_audio_source_.setExtraData(source); // Takes ownership.
+ }
+
+ void CheckSourceFormatMatches(const media::AudioParameters& params) {
+ EXPECT_EQ(kSampleRate, params.sample_rate());
+ EXPECT_EQ(kChannelLayout, params.channel_layout());
+ EXPECT_EQ(kExpectedSourceBufferSize, params.frames_per_buffer());
+ }
+
+ void CheckOutputFormatMatches(const media::AudioParameters& params) {
+ EXPECT_EQ(kSampleRate, params.sample_rate());
+ EXPECT_EQ(kChannelLayout, params.channel_layout());
+ EXPECT_EQ(kExpectedOutputBufferSize, params.frames_per_buffer());
+ }
+
+ MockAudioDeviceFactory* mock_audio_device_factory() {
+ return &mock_audio_device_factory_;
+ }
+
+ media::AudioCapturerSource::CaptureCallback* capture_source_callback() const {
+ return static_cast<media::AudioCapturerSource::CaptureCallback*>(
+ ProcessedLocalAudioSource::From(audio_source()));
+ }
+
+ MediaStreamAudioSource* audio_source() const {
+ return MediaStreamAudioSource::From(blink_audio_source_);
+ }
+
+ const blink::WebMediaStreamTrack& blink_audio_track() {
+ return blink_audio_track_;
+ }
+
+ private:
+ MockAudioDeviceFactory mock_audio_device_factory_;
+ MockPeerConnectionDependencyFactory mock_dependency_factory_;
+ blink::WebMediaStreamSource blink_audio_source_;
+ blink::WebMediaStreamTrack blink_audio_track_;
+};
+
+// Tests a basic end-to-end start-up, track+sink connections, audio flow, and
+// shut-down. The unit tests in media_stream_audio_unittest.cc provide more
+// comprehensive testing of the object graph connections and multi-threading
+// concerns.
+TEST_F(ProcessedLocalAudioSourceTest, VerifyAudioFlowWithoutAudioProcessing) {
+ using ThisTest =
+ ProcessedLocalAudioSourceTest_VerifyAudioFlowWithoutAudioProcessing_Test;
+
+ // Turn off the default constraints so the sink will get audio in chunks of
+ // the native buffer size.
+ MockConstraintFactory constraint_factory;
+ constraint_factory.DisableDefaultAudioConstraints();
+
+ CreateProcessedLocalAudioSource(
+ constraint_factory.CreateWebMediaConstraints());
+
+ // Connect the track, and expect the MockCapturerSource to be initialized and
+ // started by ProcessedLocalAudioSource.
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(),
+ Initialize(_, capture_source_callback(), -1))
+ .WillOnce(WithArg<0>(Invoke(this, &ThisTest::CheckSourceFormatMatches)));
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(),
+ SetAutomaticGainControl(true));
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), Start());
+ ASSERT_TRUE(audio_source()->ConnectToTrack(blink_audio_track()));
+ CheckOutputFormatMatches(audio_source()->GetAudioParameters());
+
+ // Connect a sink to the track.
+ std::unique_ptr<MockMediaStreamAudioSink> sink(
+ new MockMediaStreamAudioSink());
+ EXPECT_CALL(*sink, FormatIsSet(_))
+ .WillOnce(Invoke(this, &ThisTest::CheckOutputFormatMatches));
+ MediaStreamAudioTrack::From(blink_audio_track())->AddSink(sink.get());
+
+ // Feed audio data into the ProcessedLocalAudioSource and expect it to reach
+ // the sink.
+ int delay_ms = 65;
+ bool key_pressed = true;
+ double volume = 0.9;
+ std::unique_ptr<media::AudioBus> audio_bus =
+ media::AudioBus::Create(2, kExpectedSourceBufferSize);
+ audio_bus->Zero();
+ EXPECT_CALL(*sink, OnDataCallback()).Times(AtLeast(1));
+ capture_source_callback()->Capture(audio_bus.get(), delay_ms, volume,
+ key_pressed);
+
+ // Expect the ProcessedLocalAudioSource to auto-stop the MockCapturerSource
+ // when the track is stopped.
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), Stop());
+ MediaStreamAudioTrack::From(blink_audio_track())->Stop();
+}
+
+// Tests that the source is not started when invalid audio constraints are
+// present.
+TEST_F(ProcessedLocalAudioSourceTest, FailToStartWithWrongConstraints) {
+ MockConstraintFactory constraint_factory;
+ const std::string dummy_constraint = "dummy";
+ // Set a non-audio constraint.
+ constraint_factory.basic().width.setExact(240);
+
+ CreateProcessedLocalAudioSource(
+ constraint_factory.CreateWebMediaConstraints());
+
+ // Expect the MockCapturerSource is never initialized/started and the
+ // ConnectToTrack() operation fails due to the invalid constraint.
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(),
+ Initialize(_, capture_source_callback(), -1))
+ .Times(0);
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(),
+ SetAutomaticGainControl(true)).Times(0);
+ EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), Start())
+ .Times(0);
+ EXPECT_FALSE(audio_source()->ConnectToTrack(blink_audio_track()));
+
+ // Even though ConnectToTrack() failed, there should still have been a new
+ // MediaStreamAudioTrack instance created, owned by the
+ // blink::WebMediaStreamTrack.
+ EXPECT_TRUE(MediaStreamAudioTrack::From(blink_audio_track()));
+}
+
+// TODO(miu): There's a lot of logic in ProcessedLocalAudioSource around
+// constraints processing and validation that should have unit testing.
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/stun_field_trial.cc b/chromium/content/renderer/media/webrtc/stun_field_trial.cc
index 5f203f26f82..6e9635b315f 100644
--- a/chromium/content/renderer/media/webrtc/stun_field_trial.cc
+++ b/chromium/content/renderer/media/webrtc/stun_field_trial.cc
@@ -40,7 +40,8 @@ enum NatType {
};
// This needs to match "NatType" in histograms.xml.
-const char* NatTypeNames[] = {"NoNAT", "UnknownNAT", "SymNAT", "NonSymNAT"};
+const char* const NatTypeNames[] =
+ {"NoNAT", "UnknownNAT", "SymNAT", "NonSymNAT"};
static_assert(arraysize(NatTypeNames) == NAT_TYPE_MAX,
"NatType enums must match names");
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink.cc b/chromium/content/renderer/media/webrtc/webrtc_audio_sink.cc
new file mode 100644
index 00000000000..213b2b0d650
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_audio_sink.cc
@@ -0,0 +1,196 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/webrtc/webrtc_audio_sink.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
+
+namespace content {
+
+WebRtcAudioSink::WebRtcAudioSink(
+ const std::string& label,
+ scoped_refptr<webrtc::AudioSourceInterface> track_source,
+ scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner)
+ : adapter_(new rtc::RefCountedObject<Adapter>(
+ label, std::move(track_source), std::move(signaling_task_runner))),
+ fifo_(base::Bind(&WebRtcAudioSink::DeliverRebufferedAudio,
+ base::Unretained(this))) {
+ DVLOG(1) << "WebRtcAudioSink::WebRtcAudioSink()";
+}
+
+WebRtcAudioSink::~WebRtcAudioSink() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "WebRtcAudioSink::~WebRtcAudioSink()";
+}
+
+void WebRtcAudioSink::SetAudioProcessor(
+ scoped_refptr<MediaStreamAudioProcessor> processor) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(processor.get());
+ adapter_->set_processor(std::move(processor));
+}
+
+void WebRtcAudioSink::SetLevel(
+ scoped_refptr<MediaStreamAudioLevelCalculator::Level> level) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(level.get());
+ adapter_->set_level(std::move(level));
+}
+
+void WebRtcAudioSink::OnEnabledChanged(bool enabled) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ adapter_->signaling_task_runner()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ base::IgnoreResult(&WebRtcAudioSink::Adapter::set_enabled),
+ adapter_, enabled));
+}
+
+void WebRtcAudioSink::OnData(const media::AudioBus& audio_bus,
+ base::TimeTicks estimated_capture_time) {
+ DCHECK(audio_thread_checker_.CalledOnValidThread());
+ // The following will result in zero, one, or multiple synchronous calls to
+ // DeliverRebufferedAudio().
+ fifo_.Push(audio_bus);
+}
+
+void WebRtcAudioSink::OnSetFormat(const media::AudioParameters& params) {
+ // On a format change, the thread delivering audio might have also changed.
+ audio_thread_checker_.DetachFromThread();
+ DCHECK(audio_thread_checker_.CalledOnValidThread());
+
+ DCHECK(params.IsValid());
+ params_ = params;
+ // Make sure that our params always reflect a buffer size of 10ms.
+ params_.set_frames_per_buffer(params_.sample_rate() / 100);
+ fifo_.Reset(params_.frames_per_buffer());
+ const int num_pcm16_data_elements =
+ params_.frames_per_buffer() * params_.channels();
+ interleaved_data_.reset(new int16_t[num_pcm16_data_elements]);
+}
+
+void WebRtcAudioSink::DeliverRebufferedAudio(const media::AudioBus& audio_bus,
+ int frame_delay) {
+ DCHECK(audio_thread_checker_.CalledOnValidThread());
+ DCHECK(params_.IsValid());
+
+ // TODO(miu): Why doesn't a WebRTC sink care about reference time passed to
+ // OnData(), and the |frame_delay| here? How is AV sync achieved otherwise?
+
+ // TODO(henrika): Remove this conversion once the interface in libjingle
+ // supports float vectors.
+ audio_bus.ToInterleaved(audio_bus.frames(),
+ sizeof(interleaved_data_[0]),
+ interleaved_data_.get());
+ adapter_->DeliverPCMToWebRtcSinks(interleaved_data_.get(),
+ params_.sample_rate(),
+ audio_bus.channels(),
+ audio_bus.frames());
+}
+
+namespace {
+// TODO(miu): MediaStreamAudioProcessor destructor requires this nonsense.
+void DereferenceOnMainThread(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor) {}
+} // namespace
+
+WebRtcAudioSink::Adapter::Adapter(
+ const std::string& label,
+ scoped_refptr<webrtc::AudioSourceInterface> source,
+ scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner)
+ : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
+ source_(std::move(source)),
+ signaling_task_runner_(std::move(signaling_task_runner)),
+ main_task_runner_(base::MessageLoop::current()->task_runner()) {
+ DCHECK(signaling_task_runner_);
+}
+
+WebRtcAudioSink::Adapter::~Adapter() {
+ if (audio_processor_) {
+ main_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&DereferenceOnMainThread, std::move(audio_processor_)));
+ }
+}
+
+void WebRtcAudioSink::Adapter::DeliverPCMToWebRtcSinks(
+ const int16_t* audio_data,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames) {
+ base::AutoLock auto_lock(lock_);
+ for (webrtc::AudioTrackSinkInterface* sink : sinks_) {
+ sink->OnData(audio_data, sizeof(int16_t) * 8, sample_rate,
+ number_of_channels, number_of_frames);
+ }
+}
+
+std::string WebRtcAudioSink::Adapter::kind() const {
+ return webrtc::MediaStreamTrackInterface::kAudioKind;
+}
+
+bool WebRtcAudioSink::Adapter::set_enabled(bool enable) {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+ return webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>::
+ set_enabled(enable);
+}
+
+void WebRtcAudioSink::Adapter::AddSink(webrtc::AudioTrackSinkInterface* sink) {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+ DCHECK(sink);
+ base::AutoLock auto_lock(lock_);
+ DCHECK(std::find(sinks_.begin(), sinks_.end(), sink) == sinks_.end());
+ sinks_.push_back(sink);
+}
+
+void WebRtcAudioSink::Adapter::RemoveSink(
+ webrtc::AudioTrackSinkInterface* sink) {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+ base::AutoLock auto_lock(lock_);
+ const auto it = std::find(sinks_.begin(), sinks_.end(), sink);
+ if (it != sinks_.end())
+ sinks_.erase(it);
+}
+
+bool WebRtcAudioSink::Adapter::GetSignalLevel(int* level) {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+
+ // |level_| is only set once, so it's safe to read without first acquiring a
+ // mutex.
+ if (!level_)
+ return false;
+ const float signal_level = level_->GetCurrent();
+ DCHECK_GE(signal_level, 0.0f);
+ DCHECK_LE(signal_level, 1.0f);
+ // Convert from float in range [0.0,1.0] to an int in range [0,32767].
+ *level = static_cast<int>(signal_level * std::numeric_limits<int16_t>::max() +
+ 0.5f /* rounding to nearest int */);
+ return true;
+}
+
+rtc::scoped_refptr<webrtc::AudioProcessorInterface>
+WebRtcAudioSink::Adapter::GetAudioProcessor() {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+ return audio_processor_.get();
+}
+
+webrtc::AudioSourceInterface* WebRtcAudioSink::Adapter::GetSource() const {
+ DCHECK(!signaling_task_runner_ ||
+ signaling_task_runner_->RunsTasksOnCurrentThread());
+ return source_.get();
+}
+
+} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink.h b/chromium/content/renderer/media/webrtc/webrtc_audio_sink.h
new file mode 100644
index 00000000000..ce302fa88dc
--- /dev/null
+++ b/chromium/content/renderer/media/webrtc/webrtc_audio_sink.h
@@ -0,0 +1,183 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_H_
+#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "content/common/content_export.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
+#include "media/base/audio_parameters.h"
+#include "media/base/audio_push_fifo.h"
+#include "third_party/webrtc/api/mediastreamtrack.h"
+#include "third_party/webrtc/media/base/audiorenderer.h"
+
+namespace content {
+
+// Provides an implementation of the MediaStreamAudioSink which re-chunks audio
+// data into the 10ms chunks required by WebRTC and then delivers the audio to
+// one or more objects implementing the webrtc::AudioTrackSinkInterface.
+//
+// The inner class, Adapter, implements the webrtc::AudioTrackInterface and
+// manages one or more "WebRTC sinks" (i.e., instances of
+// webrtc::AudioTrackSinkInterface) which are added/removed on the WebRTC
+// signaling thread.
+class CONTENT_EXPORT WebRtcAudioSink : public MediaStreamAudioSink {
+ public:
+ WebRtcAudioSink(
+ const std::string& label,
+ scoped_refptr<webrtc::AudioSourceInterface> track_source,
+ scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner);
+
+ ~WebRtcAudioSink() override;
+
+ webrtc::AudioTrackInterface* webrtc_audio_track() const {
+ return adapter_.get();
+ }
+
+ // Set the object that provides shared access to the current audio signal
+ // level. This is passed via the Adapter to libjingle. This method may only
+ // be called once, before the audio data flow starts, and before any calls to
+ // Adapter::GetSignalLevel() might be made.
+ void SetLevel(scoped_refptr<MediaStreamAudioLevelCalculator::Level> level);
+
+ // Set the processor that applies signal processing on the data from the
+ // source. This is passed via the Adapter to libjingle. This method may only
+ // be called once, before the audio data flow starts, and before any calls to
+ // GetAudioProcessor() might be made.
+ void SetAudioProcessor(scoped_refptr<MediaStreamAudioProcessor> processor);
+
+ // MediaStreamSink override.
+ void OnEnabledChanged(bool enabled) override;
+
+ private:
+ // Private implementation of the webrtc::AudioTrackInterface whose control
+ // methods are all called on the WebRTC signaling thread. This class is
+ // ref-counted, per the requirements of webrtc::AudioTrackInterface.
+ class Adapter
+ : NON_EXPORTED_BASE(
+ public webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>) {
+ public:
+ Adapter(const std::string& label,
+ scoped_refptr<webrtc::AudioSourceInterface> source,
+ scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner);
+
+ base::SingleThreadTaskRunner* signaling_task_runner() const {
+ return signaling_task_runner_.get();
+ }
+
+ // These setters are called before the audio data flow starts, and before
+ // any methods called on the signaling thread reference these objects.
+ void set_processor(scoped_refptr<MediaStreamAudioProcessor> processor) {
+ audio_processor_ = std::move(processor);
+ }
+ void set_level(
+ scoped_refptr<MediaStreamAudioLevelCalculator::Level> level) {
+ level_ = std::move(level);
+ }
+
+ // Delivers a 10ms chunk of audio to all WebRTC sinks managed by this
+ // Adapter. This is called on the audio thread.
+ void DeliverPCMToWebRtcSinks(const int16_t* audio_data,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames);
+
+ // webrtc::MediaStreamTrack implementation.
+ std::string kind() const override;
+ bool set_enabled(bool enable) override;
+
+ // webrtc::AudioTrackInterface implementation.
+ void AddSink(webrtc::AudioTrackSinkInterface* sink) override;
+ void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override;
+ bool GetSignalLevel(int* level) override;
+ rtc::scoped_refptr<webrtc::AudioProcessorInterface> GetAudioProcessor()
+ override;
+ webrtc::AudioSourceInterface* GetSource() const override;
+
+ protected:
+ ~Adapter() override;
+
+ private:
+ const scoped_refptr<webrtc::AudioSourceInterface> source_;
+
+ // Task runner for operations that must be done on libjingle's signaling
+ // thread.
+ const scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner_;
+
+ // Task runner used for the final de-referencing of |audio_processor_| at
+ // destruction time.
+ //
+ // TODO(miu): Remove this once MediaStreamAudioProcessor is fixed.
+ const scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
+
+ // The audio processsor that applies audio post-processing on the source
+ // audio. This is null if there is no audio processing taking place
+ // upstream. This must be set before calls to GetAudioProcessor() are made.
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
+
+ // Thread-safe accessor to current audio signal level. This may be null, if
+ // not applicable to the current use case. This must be set before calls to
+ // GetSignalLevel() are made.
+ scoped_refptr<MediaStreamAudioLevelCalculator::Level> level_;
+
+ // Lock that protects concurrent access to the |sinks_| list.
+ base::Lock lock_;
+
+ // A vector of pointers to unowned WebRTC-internal objects which each
+ // receive the audio data.
+ std::vector<webrtc::AudioTrackSinkInterface*> sinks_;
+
+ DISALLOW_COPY_AND_ASSIGN(Adapter);
+ };
+
+ // MediaStreamAudioSink implementation.
+ void OnData(const media::AudioBus& audio_bus,
+ base::TimeTicks estimated_capture_time) override;
+ void OnSetFormat(const media::AudioParameters& params) override;
+
+ // Called by AudioPushFifo zero or more times during the call to OnData().
+ // Delivers audio data with the required 10ms buffer size to |adapter_|.
+ void DeliverRebufferedAudio(const media::AudioBus& audio_bus,
+ int frame_delay);
+
+ // Owner of the WebRTC sinks. May outlive this WebRtcAudioSink (if references
+ // are held by libjingle).
+ const scoped_refptr<Adapter> adapter_;
+
+ // The current format of the audio passing through this sink.
+ media::AudioParameters params_;
+
+ // Light-weight fifo used for re-chunking audio into the 10ms chunks required
+ // by the WebRTC sinks.
+ media::AudioPushFifo fifo_;
+
+ // Buffer used for converting into the required signed 16-bit integer
+ // interleaved samples.
+ std::unique_ptr<int16_t[]> interleaved_data_;
+
+ // In debug builds, check that WebRtcAudioSink's public methods are all being
+ // called on the main render thread.
+ base::ThreadChecker thread_checker_;
+
+ // Used to DCHECK that OnSetFormat() and OnData() are called on the same
+ // thread.
+ base::ThreadChecker audio_thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcAudioSink);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc
deleted file mode 100644
index 2679aff9648..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/logging.h"
-#include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h"
-#include "media/base/audio_bus.h"
-#include "third_party/webrtc/api/mediastreaminterface.h"
-
-namespace content {
-
-WebRtcAudioSinkAdapter::WebRtcAudioSinkAdapter(
- webrtc::AudioTrackSinkInterface* sink)
- : sink_(sink) {
- DCHECK(sink);
-}
-
-WebRtcAudioSinkAdapter::~WebRtcAudioSinkAdapter() {
-}
-
-bool WebRtcAudioSinkAdapter::IsEqual(
- const webrtc::AudioTrackSinkInterface* other) const {
- return (other == sink_);
-}
-
-void WebRtcAudioSinkAdapter::OnData(const media::AudioBus& audio_bus,
- base::TimeTicks estimated_capture_time) {
- DCHECK_EQ(audio_bus.frames(), params_.frames_per_buffer());
- DCHECK_EQ(audio_bus.channels(), params_.channels());
- // TODO(henrika): Remove this conversion once the interface in libjingle
- // supports float vectors.
- audio_bus.ToInterleaved(audio_bus.frames(),
- sizeof(interleaved_data_[0]),
- interleaved_data_.get());
- sink_->OnData(interleaved_data_.get(),
- 16,
- params_.sample_rate(),
- audio_bus.channels(),
- audio_bus.frames());
-}
-
-void WebRtcAudioSinkAdapter::OnSetFormat(
- const media::AudioParameters& params) {
- DCHECK(params.IsValid());
- params_ = params;
- const int num_pcm16_data_elements =
- params_.frames_per_buffer() * params_.channels();
- interleaved_data_.reset(new int16_t[num_pcm16_data_elements]);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h
deleted file mode 100644
index 726441156bd..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_audio_sink_adapter.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
-#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
-
-#include <stdint.h>
-
-#include <memory>
-
-#include "base/macros.h"
-#include "content/public/renderer/media_stream_audio_sink.h"
-#include "media/audio/audio_parameters.h"
-
-namespace webrtc {
-class AudioTrackSinkInterface;
-} // namespace webrtc
-
-namespace content {
-
-// Adapter to the webrtc::AudioTrackSinkInterface of the audio track.
-// This class is used in between the MediaStreamAudioSink and
-// webrtc::AudioTrackSinkInterface. It gets data callback via the
-// MediaStreamAudioSink::OnData() interface and pass the data to
-// webrtc::AudioTrackSinkInterface.
-class WebRtcAudioSinkAdapter : public MediaStreamAudioSink {
- public:
- explicit WebRtcAudioSinkAdapter(
- webrtc::AudioTrackSinkInterface* sink);
- ~WebRtcAudioSinkAdapter() override;
-
- bool IsEqual(const webrtc::AudioTrackSinkInterface* other) const;
-
- private:
- // MediaStreamAudioSink implementation.
- void OnData(const media::AudioBus& audio_bus,
- base::TimeTicks estimated_capture_time) override;
- void OnSetFormat(const media::AudioParameters& params) override;
-
- webrtc::AudioTrackSinkInterface* const sink_;
-
- media::AudioParameters params_;
- std::unique_ptr<int16_t[]> interleaved_data_;
-
- DISALLOW_COPY_AND_ASSIGN(WebRtcAudioSinkAdapter);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_AUDIO_SINK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc
deleted file mode 100644
index c86881b07a9..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "content/renderer/media/media_stream_audio_processor.h"
-#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
-#include "content/renderer/media/webrtc/webrtc_audio_sink_adapter.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
-#include "content/renderer/render_thread_impl.h"
-#include "third_party/webrtc/api/mediastreaminterface.h"
-
-namespace content {
-
-static const char kAudioTrackKind[] = "audio";
-
-scoped_refptr<WebRtcLocalAudioTrackAdapter>
-WebRtcLocalAudioTrackAdapter::Create(
- const std::string& label,
- webrtc::AudioSourceInterface* track_source) {
- // TODO(tommi): Change this so that the signaling thread is one of the
- // parameters to this method.
- scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner;
- RenderThreadImpl* current = RenderThreadImpl::current();
- if (current) {
- PeerConnectionDependencyFactory* pc_factory =
- current->GetPeerConnectionDependencyFactory();
- signaling_task_runner = pc_factory->GetWebRtcSignalingThread();
- LOG_IF(ERROR, !signaling_task_runner) << "No signaling thread!";
- } else {
- LOG(WARNING) << "Assuming single-threaded operation for unit test.";
- }
-
- rtc::RefCountedObject<WebRtcLocalAudioTrackAdapter>* adapter =
- new rtc::RefCountedObject<WebRtcLocalAudioTrackAdapter>(
- label, track_source, std::move(signaling_task_runner));
- return adapter;
-}
-
-WebRtcLocalAudioTrackAdapter::WebRtcLocalAudioTrackAdapter(
- const std::string& label,
- webrtc::AudioSourceInterface* track_source,
- scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner)
- : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
- owner_(NULL),
- track_source_(track_source),
- signaling_task_runner_(std::move(signaling_task_runner)) {}
-
-WebRtcLocalAudioTrackAdapter::~WebRtcLocalAudioTrackAdapter() {
-}
-
-void WebRtcLocalAudioTrackAdapter::Initialize(WebRtcLocalAudioTrack* owner) {
- DCHECK(!owner_);
- DCHECK(owner);
- owner_ = owner;
-}
-
-void WebRtcLocalAudioTrackAdapter::SetAudioProcessor(
- scoped_refptr<MediaStreamAudioProcessor> processor) {
- DCHECK(processor.get());
- DCHECK(!audio_processor_);
- audio_processor_ = std::move(processor);
-}
-
-void WebRtcLocalAudioTrackAdapter::SetLevel(
- scoped_refptr<MediaStreamAudioLevelCalculator::Level> level) {
- DCHECK(level.get());
- DCHECK(!level_);
- level_ = std::move(level);
-}
-
-std::string WebRtcLocalAudioTrackAdapter::kind() const {
- return kAudioTrackKind;
-}
-
-bool WebRtcLocalAudioTrackAdapter::set_enabled(bool enable) {
- // If we're not called on the signaling thread, we need to post a task to
- // change the state on the correct thread.
- if (signaling_task_runner_ &&
- !signaling_task_runner_->BelongsToCurrentThread()) {
- signaling_task_runner_->PostTask(FROM_HERE,
- base::Bind(
- base::IgnoreResult(&WebRtcLocalAudioTrackAdapter::set_enabled),
- this, enable));
- return true;
- }
-
- return webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>::
- set_enabled(enable);
-}
-
-void WebRtcLocalAudioTrackAdapter::AddSink(
- webrtc::AudioTrackSinkInterface* sink) {
- DCHECK(!signaling_task_runner_ ||
- signaling_task_runner_->RunsTasksOnCurrentThread());
- DCHECK(sink);
-#ifndef NDEBUG
- // Verify that |sink| has not been added.
- for (ScopedVector<WebRtcAudioSinkAdapter>::const_iterator it =
- sink_adapters_.begin();
- it != sink_adapters_.end(); ++it) {
- DCHECK(!(*it)->IsEqual(sink));
- }
-#endif
-
- std::unique_ptr<WebRtcAudioSinkAdapter> adapter(
- new WebRtcAudioSinkAdapter(sink));
- owner_->AddSink(adapter.get());
- sink_adapters_.push_back(adapter.release());
-}
-
-void WebRtcLocalAudioTrackAdapter::RemoveSink(
- webrtc::AudioTrackSinkInterface* sink) {
- DCHECK(!signaling_task_runner_ ||
- signaling_task_runner_->RunsTasksOnCurrentThread());
- DCHECK(sink);
- for (ScopedVector<WebRtcAudioSinkAdapter>::iterator it =
- sink_adapters_.begin();
- it != sink_adapters_.end(); ++it) {
- if ((*it)->IsEqual(sink)) {
- owner_->RemoveSink(*it);
- sink_adapters_.erase(it);
- return;
- }
- }
-}
-
-bool WebRtcLocalAudioTrackAdapter::GetSignalLevel(int* level) {
- DCHECK(!signaling_task_runner_ ||
- signaling_task_runner_->RunsTasksOnCurrentThread());
-
- // |level_| is only set once, so it's safe to read without first acquiring a
- // mutex.
- if (!level_)
- return false;
- const float signal_level = level_->GetCurrent();
- DCHECK_GE(signal_level, 0.0f);
- DCHECK_LE(signal_level, 1.0f);
- // Convert from float in range [0.0,1.0] to an int in range [0,32767].
- *level = static_cast<int>(signal_level * std::numeric_limits<int16_t>::max() +
- 0.5f /* rounding to nearest int */);
- return true;
-}
-
-rtc::scoped_refptr<webrtc::AudioProcessorInterface>
-WebRtcLocalAudioTrackAdapter::GetAudioProcessor() {
- DCHECK(!signaling_task_runner_ ||
- signaling_task_runner_->RunsTasksOnCurrentThread());
- return audio_processor_.get();
-}
-
-webrtc::AudioSourceInterface* WebRtcLocalAudioTrackAdapter::GetSource() const {
- DCHECK(!signaling_task_runner_ ||
- signaling_task_runner_->RunsTasksOnCurrentThread());
- return track_source_;
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h
deleted file mode 100644
index 72b80194b08..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
-#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
-
-#include <vector>
-
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/lock.h"
-#include "content/common/content_export.h"
-#include "content/renderer/media/media_stream_audio_level_calculator.h"
-#include "content/renderer/media/media_stream_audio_processor.h"
-#include "third_party/webrtc/api/mediastreamtrack.h"
-#include "third_party/webrtc/media/base/audiorenderer.h"
-
-namespace cricket {
-class AudioRenderer;
-}
-
-namespace webrtc {
-class AudioSourceInterface;
-class AudioProcessorInterface;
-}
-
-namespace content {
-
-class MediaStreamAudioProcessor;
-class WebRtcAudioSinkAdapter;
-class WebRtcLocalAudioTrack;
-
-// Provides an implementation of the webrtc::AudioTrackInterface that can be
-// bound/unbound to/from a MediaStreamAudioTrack. In other words, this is an
-// adapter that sits between the media stream object graph and WebRtc's object
-// graph and proxies between the two.
-class CONTENT_EXPORT WebRtcLocalAudioTrackAdapter
- : NON_EXPORTED_BASE(
- public webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>) {
- public:
- static scoped_refptr<WebRtcLocalAudioTrackAdapter> Create(
- const std::string& label,
- webrtc::AudioSourceInterface* track_source);
-
- WebRtcLocalAudioTrackAdapter(
- const std::string& label,
- webrtc::AudioSourceInterface* track_source,
- scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner);
-
- ~WebRtcLocalAudioTrackAdapter() override;
-
- void Initialize(WebRtcLocalAudioTrack* owner);
-
- // Set the object that provides shared access to the current audio signal
- // level. This method may only be called once, before the audio data flow
- // starts, and before any calls to GetSignalLevel() might be made.
- void SetLevel(scoped_refptr<MediaStreamAudioLevelCalculator::Level> level);
-
- // Method called by the WebRtcLocalAudioTrack to set the processor that
- // applies signal processing on the data of the track.
- // This class will keep a reference of the |processor|.
- // Called on the main render thread.
- // This method may only be called once, before the audio data flow starts, and
- // before any calls to GetAudioProcessor() might be made.
- void SetAudioProcessor(scoped_refptr<MediaStreamAudioProcessor> processor);
-
- // webrtc::MediaStreamTrack implementation.
- std::string kind() const override;
- bool set_enabled(bool enable) override;
-
- private:
- // webrtc::AudioTrackInterface implementation.
- void AddSink(webrtc::AudioTrackSinkInterface* sink) override;
- void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override;
- bool GetSignalLevel(int* level) override;
- rtc::scoped_refptr<webrtc::AudioProcessorInterface> GetAudioProcessor()
- override;
- webrtc::AudioSourceInterface* GetSource() const override;
-
- // Weak reference.
- WebRtcLocalAudioTrack* owner_;
-
- // The source of the audio track which handles the audio constraints.
- // TODO(xians): merge |track_source_| to |capturer_| in WebRtcLocalAudioTrack.
- rtc::scoped_refptr<webrtc::AudioSourceInterface> track_source_;
-
- // Libjingle's signaling thread.
- const scoped_refptr<base::SingleThreadTaskRunner> signaling_task_runner_;
-
- // The audio processsor that applies audio processing on the data of audio
- // track. This must be set before calls to GetAudioProcessor() are made.
- scoped_refptr<MediaStreamAudioProcessor> audio_processor_;
-
- // A vector of the peer connection sink adapters which receive the audio data
- // from the audio track.
- ScopedVector<WebRtcAudioSinkAdapter> sink_adapters_;
-
- // Thread-safe accessor to current audio signal level. This must be set
- // before calls to GetSignalLevel() are made.
- scoped_refptr<MediaStreamAudioLevelCalculator::Level> level_;
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_LOCAL_AUDIO_TRACK_ADAPTER_H_
diff --git a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc b/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc
deleted file mode 100644
index 0a30d4ec0e3..00000000000
--- a/chromium/content/renderer/media/webrtc/webrtc_local_audio_track_adapter_unittest.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "content/renderer/media/media_stream_audio_level_calculator.h"
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
-#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/webrtc/api/mediastreaminterface.h"
-
-using ::testing::_;
-using ::testing::AnyNumber;
-
-namespace content {
-
-namespace {
-
-class MockWebRtcAudioSink : public webrtc::AudioTrackSinkInterface {
- public:
- MockWebRtcAudioSink() {}
- ~MockWebRtcAudioSink() {}
- MOCK_METHOD5(OnData, void(const void* audio_data,
- int bits_per_sample,
- int sample_rate,
- size_t number_of_channels,
- size_t number_of_frames));
-};
-
-} // namespace
-
-class WebRtcLocalAudioTrackAdapterTest : public ::testing::Test {
- public:
- WebRtcLocalAudioTrackAdapterTest()
- : params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480),
- adapter_(WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL)) {
- track_.reset(new WebRtcLocalAudioTrack(adapter_.get()));
- }
-
- protected:
- void SetUp() override {
- track_->OnSetFormat(params_);
- EXPECT_TRUE(track_->GetAudioAdapter()->enabled());
- }
-
- media::AudioParameters params_;
- scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter_;
- std::unique_ptr<WebRtcLocalAudioTrack> track_;
-};
-
-// Adds and Removes a WebRtcAudioSink to a local audio track.
-TEST_F(WebRtcLocalAudioTrackAdapterTest, AddAndRemoveSink) {
- // Add a sink to the webrtc track.
- std::unique_ptr<MockWebRtcAudioSink> sink(new MockWebRtcAudioSink());
- webrtc::AudioTrackInterface* webrtc_track =
- static_cast<webrtc::AudioTrackInterface*>(adapter_.get());
- webrtc_track->AddSink(sink.get());
-
- // Send a packet via |track_| and the data should reach the sink of the
- // |adapter_|.
- const std::unique_ptr<media::AudioBus> audio_bus =
- media::AudioBus::Create(params_);
- // While this test is not checking the signal data being passed around, the
- // implementation in WebRtcLocalAudioTrack reads the data for its signal level
- // computation. Initialize all samples to zero to make the memory sanitizer
- // happy.
- audio_bus->Zero();
-
- base::TimeTicks estimated_capture_time = base::TimeTicks::Now();
- EXPECT_CALL(*sink,
- OnData(_, 16, params_.sample_rate(), params_.channels(),
- params_.frames_per_buffer()));
- track_->Capture(*audio_bus, estimated_capture_time);
-
- // Remove the sink from the webrtc track.
- webrtc_track->RemoveSink(sink.get());
- sink.reset();
-
- // Verify that no more callback gets into the sink.
- estimated_capture_time +=
- params_.frames_per_buffer() * base::TimeDelta::FromSeconds(1) /
- params_.sample_rate();
- track_->Capture(*audio_bus, estimated_capture_time);
-}
-
-TEST_F(WebRtcLocalAudioTrackAdapterTest, GetSignalLevel) {
- webrtc::AudioTrackInterface* webrtc_track =
- static_cast<webrtc::AudioTrackInterface*>(adapter_.get());
- int signal_level = -1;
- EXPECT_FALSE(webrtc_track->GetSignalLevel(&signal_level));
- MediaStreamAudioLevelCalculator calculator;
- adapter_->SetLevel(calculator.level());
- signal_level = -1;
- EXPECT_TRUE(webrtc_track->GetSignalLevel(&signal_level));
- EXPECT_EQ(0, signal_level);
-}
-
-} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc
index 3c34d794dde..b5ecec76e94 100644
--- a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.cc
@@ -5,11 +5,12 @@
#include "content/renderer/media/webrtc/webrtc_media_stream_adapter.h"
#include "base/logging.h"
-#include "content/renderer/media/media_stream_audio_source.h"
#include "content/renderer/media/media_stream_audio_track.h"
#include "content/renderer/media/media_stream_track.h"
#include "content/renderer/media/webrtc/media_stream_video_webrtc_sink.h"
#include "content/renderer/media/webrtc/peer_connection_dependency_factory.h"
+#include "content/renderer/media/webrtc/processed_local_audio_source.h"
+#include "content/renderer/media/webrtc/webrtc_audio_sink.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
#include "third_party/WebKit/public/platform/WebString.h"
@@ -27,12 +28,12 @@ WebRtcMediaStreamAdapter::WebRtcMediaStreamAdapter(
blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
web_stream_.audioTracks(audio_tracks);
for (blink::WebMediaStreamTrack& audio_track : audio_tracks)
- CreateAudioTrack(audio_track);
+ AddAudioSinkToTrack(audio_track);
blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
web_stream_.videoTracks(video_tracks);
for (blink::WebMediaStreamTrack& video_track : video_tracks)
- CreateVideoTrack(video_track);
+ AddVideoSinkToTrack(video_track);
MediaStream* const native_stream = MediaStream::GetMediaStream(web_stream_);
native_stream->AddObserver(this);
@@ -41,72 +42,105 @@ WebRtcMediaStreamAdapter::WebRtcMediaStreamAdapter(
WebRtcMediaStreamAdapter::~WebRtcMediaStreamAdapter() {
MediaStream* const native_stream = MediaStream::GetMediaStream(web_stream_);
native_stream->RemoveObserver(this);
+
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ web_stream_.audioTracks(audio_tracks);
+ for (blink::WebMediaStreamTrack& audio_track : audio_tracks)
+ TrackRemoved(audio_track);
+ DCHECK(audio_sinks_.empty());
+ blink::WebVector<blink::WebMediaStreamTrack> video_tracks;
+ web_stream_.videoTracks(video_tracks);
+ for (blink::WebMediaStreamTrack& video_track : video_tracks)
+ TrackRemoved(video_track);
+ DCHECK(video_sinks_.empty());
}
void WebRtcMediaStreamAdapter::TrackAdded(
const blink::WebMediaStreamTrack& track) {
if (track.source().getType() == blink::WebMediaStreamSource::TypeAudio)
- CreateAudioTrack(track);
+ AddAudioSinkToTrack(track);
else
- CreateVideoTrack(track);
+ AddVideoSinkToTrack(track);
}
void WebRtcMediaStreamAdapter::TrackRemoved(
const blink::WebMediaStreamTrack& track) {
const std::string track_id = track.id().utf8();
if (track.source().getType() == blink::WebMediaStreamSource::TypeAudio) {
- webrtc_media_stream_->RemoveTrack(
- webrtc_media_stream_->FindAudioTrack(track_id));
+ scoped_refptr<webrtc::AudioTrackInterface> webrtc_track =
+ make_scoped_refptr(
+ webrtc_media_stream_->FindAudioTrack(track_id).get());
+ if (!webrtc_track)
+ return;
+ webrtc_media_stream_->RemoveTrack(webrtc_track.get());
+
+ for (auto it = audio_sinks_.begin(); it != audio_sinks_.end(); ++it) {
+ if ((*it)->webrtc_audio_track() == webrtc_track.get()) {
+ if (auto* media_stream_track = MediaStreamAudioTrack::From(track))
+ media_stream_track->RemoveSink(it->get());
+ audio_sinks_.erase(it);
+ break;
+ }
+ }
} else {
DCHECK_EQ(track.source().getType(), blink::WebMediaStreamSource::TypeVideo);
scoped_refptr<webrtc::VideoTrackInterface> webrtc_track =
- webrtc_media_stream_->FindVideoTrack(track_id).get();
+ make_scoped_refptr(
+ webrtc_media_stream_->FindVideoTrack(track_id).get());
+ if (!webrtc_track)
+ return;
webrtc_media_stream_->RemoveTrack(webrtc_track.get());
- for (ScopedVector<MediaStreamVideoWebRtcSink>::iterator it =
- video_adapters_.begin(); it != video_adapters_.end(); ++it) {
+ for (auto it = video_sinks_.begin(); it != video_sinks_.end(); ++it) {
if ((*it)->webrtc_video_track() == webrtc_track.get()) {
- video_adapters_.erase(it);
+ video_sinks_.erase(it);
break;
}
}
}
}
-void WebRtcMediaStreamAdapter::CreateAudioTrack(
+void WebRtcMediaStreamAdapter::AddAudioSinkToTrack(
const blink::WebMediaStreamTrack& track) {
- DCHECK_EQ(track.source().getType(), blink::WebMediaStreamSource::TypeAudio);
- // A media stream is connected to a peer connection, enable the
- // peer connection mode for the sources.
MediaStreamAudioTrack* native_track = MediaStreamAudioTrack::From(track);
if (!native_track) {
DLOG(ERROR) << "No native track for blink audio track.";
return;
}
- webrtc::AudioTrackInterface* audio_track = native_track->GetAudioAdapter();
- if (!audio_track) {
- DLOG(ERROR) << "Audio track doesn't support webrtc.";
- return;
- }
-
- if (native_track->is_local_track()) {
- const blink::WebMediaStreamSource& source = track.source();
- MediaStreamAudioSource* audio_source = MediaStreamAudioSource::From(source);
- if (audio_source && audio_source->audio_capturer())
- audio_source->audio_capturer()->EnablePeerConnectionMode();
+ // Non-WebRtc remote sources and local sources do not provide an instance of
+ // the webrtc::AudioSourceInterface, and also do not need references to the
+ // audio level calculator or audio processor passed to the sink.
+ webrtc::AudioSourceInterface* source_interface = nullptr;
+ WebRtcAudioSink* audio_sink = new WebRtcAudioSink(
+ track.id().utf8(), source_interface,
+ factory_->GetWebRtcSignalingThread());
+
+ if (auto* media_stream_source = ProcessedLocalAudioSource::From(
+ MediaStreamAudioSource::From(track.source()))) {
+ audio_sink->SetLevel(media_stream_source->audio_level());
+ // The sink only grabs stats from the audio processor. Stats are only
+ // available if audio processing is turned on. Therefore, only provide the
+ // sink a reference to the processor if audio processing is turned on.
+ if (auto processor = media_stream_source->audio_processor()) {
+ if (processor && processor->has_audio_processing())
+ audio_sink->SetAudioProcessor(processor);
+ }
}
- webrtc_media_stream_->AddTrack(audio_track);
+ audio_sinks_.push_back(std::unique_ptr<WebRtcAudioSink>(audio_sink));
+ native_track->AddSink(audio_sink);
+ webrtc_media_stream_->AddTrack(audio_sink->webrtc_audio_track());
}
-void WebRtcMediaStreamAdapter::CreateVideoTrack(
+void WebRtcMediaStreamAdapter::AddVideoSinkToTrack(
const blink::WebMediaStreamTrack& track) {
DCHECK_EQ(track.source().getType(), blink::WebMediaStreamSource::TypeVideo);
- MediaStreamVideoWebRtcSink* adapter =
+ MediaStreamVideoWebRtcSink* video_sink =
new MediaStreamVideoWebRtcSink(track, factory_);
- video_adapters_.push_back(adapter);
- webrtc_media_stream_->AddTrack(adapter->webrtc_video_track());
+ video_sinks_.push_back(
+ std::unique_ptr<MediaStreamVideoWebRtcSink>(video_sink));
+ webrtc_media_stream_->AddTrack(video_sink->webrtc_video_track());
}
} // namespace content
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h
index c4052550242..77c5dbd2c0e 100644
--- a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter.h
@@ -5,9 +5,11 @@
#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_MEDIA_STREAM_ADAPTER_H_
#define CONTENT_RENDERER_MEDIA_WEBRTC_WEBRTC_MEDIA_STREAM_ADAPTER_H_
+#include <memory>
+#include <vector>
+
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_vector.h"
#include "content/common/content_export.h"
#include "content/renderer/media/media_stream.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
@@ -17,6 +19,7 @@ namespace content {
class PeerConnectionDependencyFactory;
class MediaStreamVideoWebRtcSink;
+class WebRtcAudioSink;
// WebRtcMediaStreamAdapter is an adapter between a blink::WebMediaStream
// object and a webrtc MediaStreams that is currently sent on a PeerConnection.
@@ -33,11 +36,11 @@ class CONTENT_EXPORT WebRtcMediaStreamAdapter
PeerConnectionDependencyFactory* factory);
~WebRtcMediaStreamAdapter() override;
- bool IsEqual(const blink::WebMediaStream& web_stream) {
+ bool IsEqual(const blink::WebMediaStream& web_stream) const {
return web_stream_.getExtraData() == web_stream.getExtraData();
}
- webrtc::MediaStreamInterface* webrtc_media_stream() {
+ webrtc::MediaStreamInterface* webrtc_media_stream() const {
return webrtc_media_stream_.get();
}
@@ -47,8 +50,8 @@ class CONTENT_EXPORT WebRtcMediaStreamAdapter
void TrackRemoved(const blink::WebMediaStreamTrack& track) override;
private:
- void CreateAudioTrack(const blink::WebMediaStreamTrack& track);
- void CreateVideoTrack(const blink::WebMediaStreamTrack& track);
+ void AddAudioSinkToTrack(const blink::WebMediaStreamTrack& track);
+ void AddVideoSinkToTrack(const blink::WebMediaStreamTrack& track);
const blink::WebMediaStream web_stream_;
@@ -57,7 +60,8 @@ class CONTENT_EXPORT WebRtcMediaStreamAdapter
PeerConnectionDependencyFactory* const factory_;
scoped_refptr<webrtc::MediaStreamInterface> webrtc_media_stream_;
- ScopedVector<MediaStreamVideoWebRtcSink> video_adapters_;
+ std::vector<std::unique_ptr<WebRtcAudioSink>> audio_sinks_;
+ std::vector<std::unique_ptr<MediaStreamVideoWebRtcSink>> video_sinks_;
DISALLOW_COPY_AND_ASSIGN (WebRtcMediaStreamAdapter);
};
diff --git a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
index f52ccdae568..4b29a852385 100644
--- a/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
+++ b/chromium/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
@@ -11,13 +11,14 @@
#include "base/message_loop/message_loop.h"
#include "content/child/child_process.h"
#include "content/renderer/media/media_stream.h"
-#include "content/renderer/media/media_stream_audio_source.h"
#include "content/renderer/media/media_stream_video_source.h"
#include "content/renderer/media/media_stream_video_track.h"
+#include "content/renderer/media/mock_audio_device_factory.h"
+#include "content/renderer/media/mock_constraint_factory.h"
#include "content/renderer/media/mock_media_stream_video_source.h"
#include "content/renderer/media/webrtc/mock_peer_connection_dependency_factory.h"
-#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
-#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "content/renderer/media/webrtc/processed_local_audio_source.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/WebKit/public/platform/WebMediaStream.h"
#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
@@ -25,6 +26,8 @@
#include "third_party/WebKit/public/platform/WebVector.h"
#include "third_party/WebKit/public/web/WebHeap.h"
+using ::testing::_;
+
namespace content {
class WebRtcMediaStreamAdapterTest : public ::testing::Test {
@@ -47,16 +50,28 @@ class WebRtcMediaStreamAdapterTest : public ::testing::Test {
audio_source.initialize("audio",
blink::WebMediaStreamSource::TypeAudio,
"audio",
- false /* remote */, true /* readonly */);
- audio_source.setExtraData(new MediaStreamAudioSource());
-
+ false /* remote */);
+ ProcessedLocalAudioSource* const source =
+ new ProcessedLocalAudioSource(
+ -1 /* consumer_render_frame_id is N/A for non-browser tests */,
+ StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE, "Mock audio device",
+ "mock_audio_device_id",
+ media::AudioParameters::kAudioCDSampleRate,
+ media::CHANNEL_LAYOUT_STEREO,
+ media::AudioParameters::kAudioCDSampleRate / 50),
+ dependency_factory_.get());
+ source->SetAllowInvalidRenderFrameIdForTesting(true);
+ source->SetSourceConstraints(
+ MockConstraintFactory().CreateWebMediaConstraints());
+ audio_source.setExtraData(source); // Takes ownership.
audio_track_vector[0].initialize(audio_source);
- scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
- WebRtcLocalAudioTrackAdapter::Create(
- audio_track_vector[0].id().utf8(), nullptr));
- std::unique_ptr<WebRtcLocalAudioTrack> native_track(
- new WebRtcLocalAudioTrack(adapter.get()));
- audio_track_vector[0].setExtraData(native_track.release());
+ EXPECT_CALL(*mock_audio_device_factory_.mock_capturer_source(),
+ Initialize(_, _, -1));
+ EXPECT_CALL(*mock_audio_device_factory_.mock_capturer_source(),
+ SetAutomaticGainControl(true));
+ EXPECT_CALL(*mock_audio_device_factory_.mock_capturer_source(), Start());
+ EXPECT_CALL(*mock_audio_device_factory_.mock_capturer_source(), Stop());
+ CHECK(source->ConnectToTrack(audio_track_vector[0]));
}
blink::WebVector<blink::WebMediaStreamTrack> video_track_vector(
@@ -67,7 +82,7 @@ class WebRtcMediaStreamAdapterTest : public ::testing::Test {
video_source.initialize("video",
blink::WebMediaStreamSource::TypeVideo,
"video",
- false /* remote */, true /* readonly */);
+ false /* remote */);
MediaStreamVideoSource* native_source =
new MockMediaStreamVideoSource(false);
video_source.setExtraData(native_source);
@@ -104,11 +119,12 @@ class WebRtcMediaStreamAdapterTest : public ::testing::Test {
return adapter_->webrtc_media_stream();
}
- protected:
+ private:
base::MessageLoop message_loop_;
std::unique_ptr<ChildProcess> child_process_;
std::unique_ptr<MockPeerConnectionDependencyFactory> dependency_factory_;
std::unique_ptr<WebRtcMediaStreamAdapter> adapter_;
+ MockAudioDeviceFactory mock_audio_device_factory_;
};
TEST_F(WebRtcMediaStreamAdapterTest, CreateWebRtcMediaStream) {
@@ -126,7 +142,7 @@ TEST_F(WebRtcMediaStreamAdapterTest,
audio_source.initialize("audio source",
blink::WebMediaStreamSource::TypeAudio,
"something",
- false /* remote */, true /* readonly */);
+ false /* remote */);
blink::WebVector<blink::WebMediaStreamTrack> audio_tracks(
static_cast<size_t>(1));